summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accessibility/speakup/devsynth.c149
-rw-r--r--drivers/accessibility/speakup/synth.c4
-rw-r--r--drivers/acpi/Kconfig3
-rw-r--r--drivers/acpi/dock.c2
-rw-r--r--drivers/acpi/riscv/Makefile4
-rw-r--r--drivers/acpi/riscv/cppc.c157
-rw-r--r--drivers/acpi/riscv/cpuidle.c81
-rw-r--r--drivers/acpi/sleep.c24
-rw-r--r--drivers/android/binder_alloc.c2
-rw-r--r--drivers/ata/ahci.c13
-rw-r--r--drivers/base/component.c4
-rw-r--r--drivers/base/core.c72
-rw-r--r--drivers/base/cpu.c2
-rw-r--r--drivers/base/dd.c32
-rw-r--r--drivers/base/firmware_loader/main.c16
-rw-r--r--drivers/base/platform-msi.c6
-rw-r--r--drivers/base/property.c67
-rw-r--r--drivers/base/swnode.c13
-rw-r--r--drivers/block/floppy.c1
-rw-r--r--drivers/bluetooth/btmtkuart.c4
-rw-r--r--drivers/bluetooth/btnxpuart.c4
-rw-r--r--drivers/bluetooth/hci_serdev.c4
-rw-r--r--drivers/bus/brcmstb_gisb.c15
-rw-r--r--drivers/bus/mhi/common.h38
-rw-r--r--drivers/bus/mhi/ep/main.c7
-rw-r--r--drivers/bus/mhi/host/boot.c11
-rw-r--r--drivers/bus/mhi/host/init.c91
-rw-r--r--drivers/bus/mhi/host/internal.h56
-rw-r--r--drivers/bus/mhi/host/main.c19
-rw-r--r--drivers/bus/mhi/host/pci_generic.c2
-rw-r--r--drivers/bus/mhi/host/pm.c27
-rw-r--r--drivers/bus/mhi/host/trace.h282
-rw-r--r--drivers/bus/ts-nbus.c81
-rw-r--r--drivers/cdx/Makefile4
-rw-r--r--drivers/cdx/cdx.c20
-rw-r--r--drivers/cdx/cdx.h12
-rw-r--r--drivers/cdx/cdx_msi.c192
-rw-r--r--drivers/cdx/controller/Kconfig1
-rw-r--r--drivers/cdx/controller/cdx_controller.c25
-rw-r--r--drivers/cdx/controller/mc_cdx_pcol.h64
-rw-r--r--drivers/cdx/controller/mcdi_functions.c33
-rw-r--r--drivers/cdx/controller/mcdi_functions.h33
-rw-r--r--drivers/char/hpet.c1
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c6
-rw-r--r--drivers/char/xillybus/xillybus_of.c6
-rw-r--r--drivers/clk/ti/apll.c11
-rw-r--r--drivers/clk/ti/clk.c71
-rw-r--r--drivers/clk/ti/clock.h1
-rw-r--r--drivers/clk/ti/divider.c5
-rw-r--r--drivers/clk/ti/gate.c9
-rw-r--r--drivers/clk/ti/interface.c4
-rw-r--r--drivers/clk/ti/mux.c6
-rw-r--r--drivers/clocksource/arm_global_timer.c35
-rw-r--r--drivers/clocksource/hyperv_timer.c26
-rw-r--r--drivers/clocksource/timer-clint.c2
-rw-r--r--drivers/clocksource/timer-imx-gpt.c3
-rw-r--r--drivers/clocksource/timer-imx-sysctr.c117
-rw-r--r--drivers/clocksource/timer-riscv.c5
-rw-r--r--drivers/clocksource/timer-stm32.c4
-rw-r--r--drivers/clocksource/timer-ti-32k.c2
-rw-r--r--drivers/comedi/drivers/das08.c1
-rw-r--r--drivers/cpufreq/Kconfig29
-rw-r--r--drivers/cpufreq/Kconfig.arm26
-rw-r--r--drivers/cpufreq/cpufreq-dt.c2
-rw-r--r--drivers/cpufreq/cpufreq.c18
-rw-r--r--drivers/cpufreq/freq_table.c2
-rw-r--r--drivers/cpufreq/scmi-cpufreq.c20
-rw-r--r--drivers/cpuidle/cpuidle-riscv-sbi.c49
-rw-r--r--drivers/cxl/core/trace.h14
-rw-r--r--drivers/dio/dio-driver.c2
-rw-r--r--drivers/firewire/core-device.c18
-rw-r--r--drivers/firmware/arm_scmi/perf.c3
-rw-r--r--drivers/firmware/efi/earlycon.c2
-rw-r--r--drivers/firmware/efi/sysfb_efi.c2
-rw-r--r--drivers/firmware/xilinx/zynqmp.c25
-rw-r--r--drivers/fpga/dfl.c2
-rw-r--r--drivers/fpga/fpga-bridge.c8
-rw-r--r--drivers/gnss/serial.c2
-rw-r--r--drivers/gnss/sirf.c2
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c71
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c28
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c3
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c19
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c18
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt8912b.c16
-rw-r--r--drivers/gpu/drm/bridge/lontium-lt9611uxc.c2
-rw-r--r--drivers/gpu/drm/drm_panel.c17
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_trace.h6
-rw-r--r--drivers/gpu/drm/imx/ipuv3/parallel-display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c9
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c2
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c41
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c4
-rw-r--r--drivers/gpu/drm/xe/xe_trace.h2
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c32
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h7
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c4
-rw-r--r--drivers/greybus/bundle.c2
-rw-r--r--drivers/greybus/control.c2
-rw-r--r--drivers/greybus/core.c32
-rw-r--r--drivers/greybus/es2.c8
-rw-r--r--drivers/greybus/gb-beagleplay.c6
-rw-r--r--drivers/greybus/hd.c18
-rw-r--r--drivers/greybus/interface.c11
-rw-r--r--drivers/greybus/module.c2
-rw-r--r--drivers/greybus/svc.c2
-rw-r--r--drivers/hv/Kconfig1
-rw-r--r--drivers/hv/hv.c36
-rw-r--r--drivers/hv/hv_common.c99
-rw-r--r--drivers/hv/vmbus_drv.c5
-rw-r--r--drivers/hwspinlock/omap_hwspinlock.c57
-rw-r--r--drivers/hwtracing/coresight/Makefile20
-rw-r--r--drivers/hwtracing/coresight/coresight-cfg-afdo.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-core.c496
-rw-r--r--drivers/hwtracing/coresight/coresight-cti-core.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c31
-rw-r--r--drivers/hwtracing/coresight/coresight-etm-perf.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-core.c27
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-core.c38
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h1
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h9
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c32
-rw-r--r--drivers/hwtracing/coresight/coresight-sysfs.c391
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-core.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c46
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.c145
-rw-r--r--drivers/hwtracing/coresight/coresight-tpda.h6
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.c457
-rw-r--r--drivers/hwtracing/coresight/coresight-tpdm.h114
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c16
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.c24
-rw-r--r--drivers/hwtracing/coresight/ultrasoc-smb.h2
-rw-r--r--drivers/hwtracing/ptt/hisi_ptt.c6
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c740
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c46
-rw-r--r--drivers/iio/Kconfig9
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/iio/accel/Kconfig8
-rw-r--r--drivers/iio/accel/Makefile1
-rw-r--r--drivers/iio/accel/adxl367.c297
-rw-r--r--drivers/iio/accel/adxl372_spi.c2
-rw-r--r--drivers/iio/accel/bma180.c2
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c15
-rw-r--r--drivers/iio/accel/bmc150-accel-spi.c3
-rw-r--r--drivers/iio/accel/bmi088-accel-i2c.c70
-rw-r--r--drivers/iio/accel/da280.c66
-rw-r--r--drivers/iio/accel/kxcjk-1013.c120
-rw-r--r--drivers/iio/accel/kxsd9-spi.c2
-rw-r--r--drivers/iio/accel/mma9551.c4
-rw-r--r--drivers/iio/accel/mma9553.c4
-rw-r--r--drivers/iio/accel/mxc4005.c5
-rw-r--r--drivers/iio/accel/mxc6255.c4
-rw-r--r--drivers/iio/accel/st_accel_i2c.c5
-rw-r--r--drivers/iio/accel/stk8ba50.c4
-rw-r--r--drivers/iio/adc/Kconfig26
-rw-r--r--drivers/iio/adc/Makefile2
-rw-r--r--drivers/iio/adc/ad4130.c131
-rw-r--r--drivers/iio/adc/ad7091r-base.c25
-rw-r--r--drivers/iio/adc/ad9467.c267
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c7
-rw-r--r--drivers/iio/adc/adi-axi-adc.c385
-rw-r--r--drivers/iio/adc/max1363.c171
-rw-r--r--drivers/iio/adc/mcp320x.c29
-rw-r--r--drivers/iio/adc/pac1934.c1636
-rw-r--r--drivers/iio/adc/qcom-pm8xxx-xoadc.c1
-rw-r--r--drivers/iio/adc/rockchip_saradc.c17
-rw-r--r--drivers/iio/adc/rtq6056.c275
-rw-r--r--drivers/iio/adc/ti-adc108s102.c4
-rw-r--r--drivers/iio/adc/ti-ads1015.c2
-rw-r--r--drivers/iio/adc/ti-ads1298.c771
-rw-r--r--drivers/iio/adc/ti-ads8688.c2
-rw-r--r--drivers/iio/amplifiers/hmc425a.c274
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c11
-rw-r--r--drivers/iio/chemical/pms7003.c4
-rw-r--r--drivers/iio/chemical/scd30_serial.c4
-rw-r--r--drivers/iio/chemical/sps30_serial.c4
-rw-r--r--drivers/iio/common/inv_sensors/inv_sensors_timestamp.c2
-rw-r--r--drivers/iio/dac/mcp4821.c2
-rw-r--r--drivers/iio/dummy/iio_dummy_evgen.c2
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c185
-rw-r--r--drivers/iio/frequency/Kconfig10
-rw-r--r--drivers/iio/frequency/Makefile1
-rw-r--r--drivers/iio/frequency/admfm2000.c282
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c4
-rw-r--r--drivers/iio/health/afe4403.c65
-rw-r--r--drivers/iio/health/afe4404.c65
-rw-r--r--drivers/iio/humidity/hdc3020.c445
-rw-r--r--drivers/iio/humidity/hts221_i2c.c4
-rw-r--r--drivers/iio/imu/adis16475.c8
-rw-r--r--drivers/iio/imu/adis16480.c9
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c9
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c78
-rw-r--r--drivers/iio/imu/bmi323/bmi323_i2c.c21
-rw-r--r--drivers/iio/imu/bno055/bno055_ser_core.c4
-rw-r--r--drivers/iio/imu/fxos8700_i2c.c3
-rw-r--r--drivers/iio/imu/fxos8700_spi.c3
-rw-r--r--drivers/iio/imu/kmx61.c2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/Kconfig31
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h2
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c28
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c33
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c5
-rw-r--r--drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c5
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h5
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c21
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c6
-rw-r--r--drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c4
-rw-r--r--drivers/iio/industrialio-backend.c418
-rw-r--r--drivers/iio/industrialio-core.c6
-rw-r--r--drivers/iio/industrialio-gts-helper.c15
-rw-r--r--drivers/iio/light/Kconfig5
-rw-r--r--drivers/iio/light/al3010.c2
-rw-r--r--drivers/iio/light/al3320a.c1
-rw-r--r--drivers/iio/light/as73211.c142
-rw-r--r--drivers/iio/light/hid-sensor-als.c122
-rw-r--r--drivers/iio/light/jsa1212.c4
-rw-r--r--drivers/iio/light/ltr501.c3
-rw-r--r--drivers/iio/light/max44000.c6
-rw-r--r--drivers/iio/light/rpr0521.c4
-rw-r--r--drivers/iio/light/stk3310.c4
-rw-r--r--drivers/iio/light/us5182d.c4
-rw-r--r--drivers/iio/light/vcnl4000.c36
-rw-r--r--drivers/iio/light/vl6180.c1
-rw-r--r--drivers/iio/magnetometer/Kconfig12
-rw-r--r--drivers/iio/magnetometer/Makefile1
-rw-r--r--drivers/iio/magnetometer/af8133j.c528
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c3
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c3
-rw-r--r--drivers/iio/magnetometer/mmc35240.c4
-rw-r--r--drivers/iio/potentiometer/max5487.c4
-rw-r--r--drivers/iio/pressure/Kconfig16
-rw-r--r--drivers/iio/pressure/Makefile2
-rw-r--r--drivers/iio/pressure/hp206c.c6
-rw-r--r--drivers/iio/pressure/hsc030pa.c49
-rw-r--r--drivers/iio/pressure/hsc030pa.h7
-rw-r--r--drivers/iio/pressure/hsc030pa_i2c.c9
-rw-r--r--drivers/iio/pressure/hsc030pa_spi.c7
-rw-r--r--drivers/iio/pressure/mprls0025pa.c313
-rw-r--r--drivers/iio/pressure/mprls0025pa.h102
-rw-r--r--drivers/iio/pressure/mprls0025pa_i2c.c100
-rw-r--r--drivers/iio/pressure/mprls0025pa_spi.c92
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c5
-rw-r--r--drivers/iio/proximity/isl29501.c3
-rw-r--r--drivers/iio/proximity/sx9310.c114
-rw-r--r--drivers/iio/proximity/sx9324.c178
-rw-r--r--drivers/iio/proximity/sx9360.c115
-rw-r--r--drivers/iio/temperature/ltc2983.c28
-rw-r--r--drivers/iio/temperature/tmp117.c9
-rw-r--r--drivers/iio/test/Kconfig14
-rw-r--r--drivers/iio/test/Makefile1
-rw-r--r--drivers/iio/test/iio-test-gts.c513
-rw-r--r--drivers/infiniband/core/cm.c20
-rw-r--r--drivers/infiniband/core/device.c37
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c16
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c78
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/efa/efa.h1
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c32
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c25
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cmd.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_cq.c11
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h35
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.c95
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hem.h56
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c154
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h9
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c339
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_qp.c60
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_restrack.c23
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c3
-rw-r--r--drivers/infiniband/hw/mana/cq.c29
-rw-r--r--drivers/infiniband/hw/mana/main.c82
-rw-r--r--drivers/infiniband/hw/mana/mana_ib.h27
-rw-r--r--drivers/infiniband/hw/mana/mr.c17
-rw-r--r--drivers/infiniband/hw/mana/qp.c94
-rw-r--r--drivers/infiniband/hw/mana/wq.c4
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1
-rw-r--r--drivers/infiniband/sw/rxe/rxe.c6
-rw-r--r--drivers/infiniband/sw/rxe/rxe.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c18
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c8
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c12
-rw-r--r--drivers/infiniband/sw/rxe/rxe_task.c4
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c218
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c3
-rw-r--r--drivers/input/keyboard/amikbd.c6
-rw-r--r--drivers/interconnect/core.c4
-rw-r--r--drivers/interconnect/qcom/Kconfig18
-rw-r--r--drivers/interconnect/qcom/Makefile4
-rw-r--r--drivers/interconnect/qcom/icc-common.c3
-rw-r--r--drivers/interconnect/qcom/icc-common.h3
-rw-r--r--drivers/interconnect/qcom/msm8909.c1329
-rw-r--r--drivers/interconnect/qcom/sa8775p.c56
-rw-r--r--drivers/interconnect/qcom/sm6115.c12
-rw-r--r--drivers/interconnect/qcom/sm7150.c1754
-rw-r--r--drivers/interconnect/qcom/sm7150.h140
-rw-r--r--drivers/interconnect/qcom/sm8250.c2
-rw-r--r--drivers/interconnect/qcom/sm8550.c574
-rw-r--r--drivers/interconnect/qcom/sm8550.h284
-rw-r--r--drivers/interconnect/qcom/x1e80100.c327
-rw-r--r--drivers/interconnect/samsung/exynos.c2
-rw-r--r--drivers/iommu/dma-iommu.c9
-rw-r--r--drivers/iommu/intel/Kconfig2
-rw-r--r--drivers/ipack/ipack.c2
-rw-r--r--drivers/irqchip/irq-renesas-rzg2l.c72
-rw-r--r--drivers/mcb/mcb-core.c4
-rw-r--r--drivers/md/Kconfig1
-rw-r--r--drivers/md/dm-integrity.c18
-rw-r--r--drivers/md/dm-snap.c4
-rw-r--r--drivers/memory/tegra/mc.c2
-rw-r--r--drivers/memory/tegra/tegra124-emc.c2
-rw-r--r--drivers/memory/tegra/tegra124.c2
-rw-r--r--drivers/memory/tegra/tegra186-emc.c2
-rw-r--r--drivers/memory/tegra/tegra20-emc.c2
-rw-r--r--drivers/memory/tegra/tegra20.c2
-rw-r--r--drivers/memory/tegra/tegra30-emc.c2
-rw-r--r--drivers/memory/tegra/tegra30.c2
-rw-r--r--drivers/mfd/rave-sp.c4
-rw-r--r--drivers/misc/atmel-ssc.c6
-rw-r--r--drivers/misc/cxl/of.c5
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c1
-rw-r--r--drivers/misc/eeprom/idt_89hpesx.c6
-rw-r--r--drivers/misc/fastrpc.c6
-rw-r--r--drivers/misc/hi6421v600-irq.c1
-rw-r--r--drivers/misc/hisi_hikey_usb.c7
-rw-r--r--drivers/misc/hpilo.c8
-rw-r--r--drivers/misc/lkdtm/bugs.c2
-rw-r--r--drivers/misc/mei/gsc-me.c22
-rw-r--r--drivers/misc/mei/hdcp/Kconfig2
-rw-r--r--drivers/misc/mei/hdcp/mei_hdcp.c14
-rw-r--r--drivers/misc/mei/pci-me.c38
-rw-r--r--drivers/misc/mei/pci-txe.c40
-rw-r--r--drivers/misc/mei/platform-vsc.c6
-rw-r--r--drivers/misc/mei/pxp/Kconfig2
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c14
-rw-r--r--drivers/misc/mei/vsc-tp.c33
-rw-r--r--drivers/misc/open-dice.c5
-rw-r--r--drivers/misc/sram.c6
-rw-r--r--drivers/misc/ti-st/st_kim.c5
-rw-r--r--drivers/misc/tifm_core.c2
-rw-r--r--drivers/misc/vcpu_stall_detector.c6
-rw-r--r--drivers/misc/xilinx_sdfec.c5
-rw-r--r--drivers/misc/xilinx_tmr_inject.c5
-rw-r--r--drivers/most/core.c2
-rw-r--r--drivers/mtd/ubi/Kconfig13
-rw-r--r--drivers/mtd/ubi/Makefile1
-rw-r--r--drivers/mtd/ubi/block.c136
-rw-r--r--drivers/mtd/ubi/build.c154
-rw-r--r--drivers/mtd/ubi/eba.c7
-rw-r--r--drivers/mtd/ubi/fastmap.c7
-rw-r--r--drivers/mtd/ubi/kapi.c56
-rw-r--r--drivers/mtd/ubi/nvmem.c191
-rw-r--r--drivers/mtd/ubi/ubi.h3
-rw-r--r--drivers/mtd/ubi/vmt.c75
-rw-r--r--drivers/mtd/ubi/vtbl.c6
-rw-r--r--drivers/net/can/kvaser_pciefd.c4
-rw-r--r--drivers/net/dsa/mt7530.c66
-rw-r--r--drivers/net/dsa/mt7530.h22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c25
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h1
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h8
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.c43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c31
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c119
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c71
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c18
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c2
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/veth.c18
-rw-r--r--drivers/net/virtio_net.c151
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c6
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c2
-rw-r--r--drivers/net/wireguard/device.c11
-rw-r--r--drivers/net/wireguard/netlink.c10
-rw-r--r--drivers/net/wireguard/receive.c6
-rw-r--r--drivers/nfc/pn533/uart.c4
-rw-r--r--drivers/nfc/s3fwrn5/uart.c4
-rw-r--r--drivers/nvme/host/apple.c6
-rw-r--r--drivers/nvme/host/core.c11
-rw-r--r--drivers/nvme/host/fabrics.h7
-rw-r--r--drivers/nvme/host/pci.c3
-rw-r--r--drivers/nvme/host/pr.c3
-rw-r--r--drivers/nvme/host/sysfs.c3
-rw-r--r--drivers/nvme/host/tcp.c21
-rw-r--r--drivers/nvme/host/trace.c105
-rw-r--r--drivers/nvme/target/rdma.c8
-rw-r--r--drivers/nvme/target/tcp.c1
-rw-r--r--drivers/nvme/target/trace.c98
-rw-r--r--drivers/nvmem/core.c5
-rw-r--r--drivers/nvmem/layouts.c2
-rw-r--r--drivers/nvmem/meson-efuse.c25
-rw-r--r--drivers/nvmem/mtk-efuse.c20
-rw-r--r--drivers/nvmem/zynqmp_nvmem.c215
-rw-r--r--drivers/of/property.c17
-rw-r--r--drivers/parport/parport_amiga.c5
-rw-r--r--drivers/parport/parport_sunbpp.c6
-rw-r--r--drivers/perf/Kconfig17
-rw-r--r--drivers/perf/riscv_pmu_sbi.c37
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/Makefile1
-rw-r--r--drivers/phy/phy-core.c47
-rw-r--r--drivers/phy/realtek/Kconfig32
-rw-r--r--drivers/phy/realtek/Makefile3
-rw-r--r--drivers/phy/realtek/phy-rtk-usb2.c1312
-rw-r--r--drivers/phy/realtek/phy-rtk-usb3.c748
-rw-r--r--drivers/phy/tegra/xusb.c13
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c19
-rw-r--r--drivers/platform/chrome/cros_ec_uart.c4
-rw-r--r--drivers/platform/goldfish/Kconfig1
-rw-r--r--drivers/platform/surface/aggregator/core.c4
-rw-r--r--drivers/powercap/intel_rapl_msr.c5
-rw-r--r--drivers/pps/generators/Makefile4
-rw-r--r--drivers/regulator/core.c11
-rw-r--r--drivers/remoteproc/imx_dsp_rproc.c11
-rw-r--r--drivers/remoteproc/imx_rproc.c16
-rw-r--r--drivers/remoteproc/qcom_q6v5_adsp.c14
-rw-r--r--drivers/remoteproc/qcom_q6v5_mss.c28
-rw-r--r--drivers/remoteproc/qcom_q6v5_pas.c326
-rw-r--r--drivers/remoteproc/qcom_q6v5_wcss.c24
-rw-r--r--drivers/remoteproc/qcom_wcnss.c17
-rw-r--r--drivers/remoteproc/remoteproc_core.c29
-rw-r--r--drivers/remoteproc/remoteproc_virtio.c6
-rw-r--r--drivers/remoteproc/st_remoteproc.c15
-rw-r--r--drivers/remoteproc/stm32_rproc.c10
-rw-r--r--drivers/remoteproc/ti_k3_dsp_remoteproc.c156
-rw-r--r--drivers/reset/Kconfig12
-rw-r--r--drivers/reset/Makefile1
-rw-r--r--drivers/reset/core.c224
-rw-r--r--drivers/reset/reset-gpio.c119
-rw-r--r--drivers/reset/reset-simple.c2
-rw-r--r--drivers/rpmsg/rpmsg_char.c12
-rw-r--r--drivers/rpmsg/rpmsg_core.c2
-rw-r--r--drivers/rpmsg/rpmsg_ctrl.c12
-rw-r--r--drivers/rtc/Kconfig3
-rw-r--r--drivers/rtc/class.c21
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-ds1511.c340
-rw-r--r--drivers/rtc/rtc-m41t80.c5
-rw-r--r--drivers/rtc/rtc-max31335.c2
-rw-r--r--drivers/rtc/rtc-nct3018y.c6
-rw-r--r--drivers/rtc/rtc-pcf8523.c25
-rw-r--r--drivers/s390/block/dasd.c4
-rw-r--r--drivers/s390/block/dasd_3990_erp.c14
-rw-r--r--drivers/s390/block/dasd_alias.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c118
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/s390/block/dasd_fba.c32
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/block/scm_blk.c6
-rw-r--r--drivers/s390/char/con3215.c4
-rw-r--r--drivers/s390/char/fs3270.c14
-rw-r--r--drivers/s390/char/raw3270.c42
-rw-r--r--drivers/s390/char/raw3270.h2
-rw-r--r--drivers/s390/char/tape.h12
-rw-r--r--drivers/s390/char/tape_class.c17
-rw-r--r--drivers/s390/char/vmlogrdr.c18
-rw-r--r--drivers/s390/char/vmur.c22
-rw-r--r--drivers/s390/cio/ccwgroup.c4
-rw-r--r--drivers/s390/cio/chsc.c12
-rw-r--r--drivers/s390/cio/chsc.h6
-rw-r--r--drivers/s390/cio/cio.c4
-rw-r--r--drivers/s390/cio/css.c25
-rw-r--r--drivers/s390/cio/device_fsm.c8
-rw-r--r--drivers/s390/cio/device_id.c2
-rw-r--r--drivers/s390/cio/device_ops.c5
-rw-r--r--drivers/s390/cio/device_pgid.c8
-rw-r--r--drivers/s390/cio/device_status.c2
-rw-r--r--drivers/s390/cio/eadm_sch.c4
-rw-r--r--drivers/s390/cio/fcx.c22
-rw-r--r--drivers/s390/cio/orb.h9
-rw-r--r--drivers/s390/cio/qdio_main.c12
-rw-r--r--drivers/s390/cio/qdio_setup.c10
-rw-r--r--drivers/s390/cio/qdio_thinint.c6
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c82
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c39
-rw-r--r--drivers/s390/net/ctcm_fsms.c4
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/ctcm_mpc.c20
-rw-r--r--drivers/s390/net/lcs.c12
-rw-r--r--drivers/s390/net/qeth_core_main.c24
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c4
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h6
-rw-r--r--drivers/s390/virtio/virtio_ccw.c170
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/scsi.c2
-rw-r--r--drivers/scsi/scsi_debug.c293
-rw-r--r--drivers/scsi/scsi_proto_test.c56
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/sd.c111
-rw-r--r--drivers/scsi/sd.h3
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/internals.h2
-rw-r--r--drivers/siox/siox-bus-gpio.c62
-rw-r--r--drivers/siox/siox-core.c52
-rw-r--r--drivers/siox/siox.h4
-rw-r--r--drivers/slimbus/core.c6
-rw-r--r--drivers/slimbus/qcom-ngd-ctrl.c8
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c2
-rw-r--r--drivers/soc/fsl/qbman/qman.c25
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c22
-rw-r--r--drivers/spi/spi-fsl-lpspi.c8
-rw-r--r--drivers/spi/spi-imx.c4
-rw-r--r--drivers/spi/spi-lm70llp.c4
-rw-r--r--drivers/spi/spi-mem.c2
-rw-r--r--drivers/spi/spi-mt65xx.c22
-rw-r--r--drivers/spi/spi.c24
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/Makefile2
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c7
-rw-r--r--drivers/staging/board/Kconfig12
-rw-r--r--drivers/staging/board/Makefile4
-rw-r--r--drivers/staging/board/TODO2
-rw-r--r--drivers/staging/board/armadillo800eva.c88
-rw-r--r--drivers/staging/board/board.c204
-rw-r--r--drivers/staging/board/board.h46
-rw-r--r--drivers/staging/board/kzm9d.c26
-rw-r--r--drivers/staging/emxx_udc/Kconfig11
-rw-r--r--drivers/staging/emxx_udc/Makefile2
-rw-r--r--drivers/staging/emxx_udc/TODO6
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c3223
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h554
-rw-r--r--drivers/staging/fbtft/fbtft-core.c2
-rw-r--r--drivers/staging/fieldbus/anybuss/arcx-anybus.c6
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c2
-rw-r--r--drivers/staging/fieldbus/dev_core.c8
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c2
-rw-r--r--drivers/staging/greybus/audio_apbridgea.h1
-rw-r--r--drivers/staging/greybus/audio_manager.c8
-rw-r--r--drivers/staging/greybus/audio_topology.c3
-rw-r--r--drivers/staging/greybus/authentication.c6
-rw-r--r--drivers/staging/greybus/bootrom.c8
-rw-r--r--drivers/staging/greybus/fw-download.c15
-rw-r--r--drivers/staging/greybus/fw-management.c20
-rw-r--r--drivers/staging/greybus/gbphy.c8
-rw-r--r--drivers/staging/greybus/greybus_authentication.h6
-rw-r--r--drivers/staging/greybus/greybus_firmware.h8
-rw-r--r--drivers/staging/greybus/light.c8
-rw-r--r--drivers/staging/greybus/loopback.c6
-rw-r--r--drivers/staging/greybus/raw.c6
-rw-r--r--drivers/staging/greybus/vibrator.c6
-rw-r--r--drivers/staging/nvec/TODO7
-rw-r--r--drivers/staging/nvec/nvec.c7
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c2
-rw-r--r--drivers/staging/octeon/octeon-stubs.h2
-rw-r--r--drivers/staging/pi433/pi433_if.c1
-rw-r--r--drivers/staging/pi433/rf69.c4
-rw-r--r--drivers/staging/pi433/rf69.h4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c103
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c64
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c37
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c4
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c70
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HT.h6
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c44
-rw-r--r--drivers/staging/rtl8192e/rtl819x_Qos.h2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c30
-rw-r--r--drivers/staging/rtl8192e/rtllib.h98
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c50
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c278
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c8
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c18
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c9
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c3
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c14
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c3
-rw-r--r--drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c61
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c2
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h2
-rw-r--r--drivers/staging/vme_user/vme.c2
-rw-r--r--drivers/staging/vme_user/vme.h2
-rw-r--r--drivers/staging/vme_user/vme_tsi148.h6
-rw-r--r--drivers/staging/vt6655/card.c74
-rw-r--r--drivers/staging/vt6655/rxtx.h1
-rw-r--r--drivers/thermal/Makefile2
-rw-r--r--drivers/thermal/mediatek/auxadc_thermal.c3
-rw-r--r--drivers/thermal/mediatek/lvts_thermal.c4
-rw-r--r--drivers/thermal/qoriq_thermal.c12
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c4
-rw-r--r--drivers/thermal/st/st_thermal.h18
-rw-r--r--drivers/thermal/st/st_thermal_memmap.c2
-rw-r--r--drivers/thermal/sun8i_thermal.c139
-rw-r--r--drivers/thermal/thermal_of.c14
-rw-r--r--drivers/thunderbolt/Makefile1
-rw-r--r--drivers/thunderbolt/ctl.c19
-rw-r--r--drivers/thunderbolt/ctl.h4
-rw-r--r--drivers/thunderbolt/domain.c19
-rw-r--r--drivers/thunderbolt/icm.c2
-rw-r--r--drivers/thunderbolt/lc.c45
-rw-r--r--drivers/thunderbolt/nhi.c11
-rw-r--r--drivers/thunderbolt/nvm.c4
-rw-r--r--drivers/thunderbolt/path.c13
-rw-r--r--drivers/thunderbolt/quirks.c14
-rw-r--r--drivers/thunderbolt/retimer.c2
-rw-r--r--drivers/thunderbolt/switch.c140
-rw-r--r--drivers/thunderbolt/tb.c890
-rw-r--r--drivers/thunderbolt/tb.h29
-rw-r--r--drivers/thunderbolt/tb_regs.h6
-rw-r--r--drivers/thunderbolt/trace.h188
-rw-r--r--drivers/thunderbolt/tunnel.c96
-rw-r--r--drivers/thunderbolt/tunnel.h6
-rw-r--r--drivers/thunderbolt/usb4.c43
-rw-r--r--drivers/thunderbolt/usb4_port.c2
-rw-r--r--drivers/thunderbolt/xdomain.c16
-rw-r--r--drivers/tty/Kconfig7
-rw-r--r--drivers/tty/amiserial.c6
-rw-r--r--drivers/tty/goldfish.c5
-rw-r--r--drivers/tty/hvc/hvc_iucv.c6
-rw-r--r--drivers/tty/serdev/core.c2
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c10
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c50
-rw-r--r--drivers/tty/serial/8250/8250_bcm2835aux.c94
-rw-r--r--drivers/tty/serial/8250/8250_bcm7271.c73
-rw-r--r--drivers/tty/serial/8250/8250_dw.c121
-rw-r--r--drivers/tty/serial/8250/8250_exar.c52
-rw-r--r--drivers/tty/serial/8250/8250_ingenic.c20
-rw-r--r--drivers/tty/serial/8250/8250_lpc18xx.c20
-rw-r--r--drivers/tty/serial/8250/8250_of.c143
-rw-r--r--drivers/tty/serial/8250/8250_omap.c29
-rw-r--r--drivers/tty/serial/8250/8250_pci1xxxx.c175
-rw-r--r--drivers/tty/serial/8250/8250_port.c50
-rw-r--r--drivers/tty/serial/8250/8250_pxa.c22
-rw-r--r--drivers/tty/serial/8250/8250_tegra.c26
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c17
-rw-r--r--drivers/tty/serial/8250/Kconfig1
-rw-r--r--drivers/tty/serial/amba-pl011.c24
-rw-r--r--drivers/tty/serial/ar933x_uart.c18
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c24
-rw-r--r--drivers/tty/serial/fsl_linflexuart.c1
-rw-r--r--drivers/tty/serial/jsm/jsm_cls.c1
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c19
-rw-r--r--drivers/tty/serial/max310x.c329
-rw-r--r--drivers/tty/serial/mcf.c27
-rw-r--r--drivers/tty/serial/meson_uart.c22
-rw-r--r--drivers/tty/serial/msm_serial.c33
-rw-r--r--drivers/tty/serial/omap-serial.c16
-rw-r--r--drivers/tty/serial/owl-uart.c30
-rw-r--r--drivers/tty/serial/pch_uart.c70
-rw-r--r--drivers/tty/serial/pmac_zilog.c9
-rw-r--r--drivers/tty/serial/pxa.c17
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c27
-rw-r--r--drivers/tty/serial/rda-uart.c28
-rw-r--r--drivers/tty/serial/samsung_tty.c270
-rw-r--r--drivers/tty/serial/serial_base_bus.c2
-rw-r--r--drivers/tty/serial/serial_core.c12
-rw-r--r--drivers/tty/serial/serial_port.c145
-rw-r--r--drivers/tty/serial/serial_txx9.c3
-rw-r--r--drivers/tty/serial/sh-sci.c245
-rw-r--r--drivers/tty/serial/sifive.c17
-rw-r--r--drivers/tty/serial/st-asc.c40
-rw-r--r--drivers/tty/serial/stm32-usart.c223
-rw-r--r--drivers/tty/serial/stm32-usart.h38
-rw-r--r--drivers/tty/serial/sunplus-uart.c18
-rw-r--r--drivers/tty/serial/xilinx_uartps.c236
-rw-r--r--drivers/tty/tty_buffer.c1
-rw-r--r--drivers/tty/vt/Makefile4
-rw-r--r--drivers/tty/vt/selection.c43
-rw-r--r--drivers/tty/vt/vt.c1531
-rw-r--r--drivers/tty/vt/vt_ioctl.c6
-rw-r--r--drivers/uio/uio.c47
-rw-r--r--drivers/uio/uio_dmem_genirq.c22
-rw-r--r--drivers/uio/uio_pruss.c6
-rw-r--r--drivers/usb/cdns3/drd.c2
-rw-r--r--drivers/usb/core/Kconfig27
-rw-r--r--drivers/usb/core/driver.c8
-rw-r--r--drivers/usb/core/endpoint.c2
-rw-r--r--drivers/usb/core/hcd.c20
-rw-r--r--drivers/usb/core/hub.c29
-rw-r--r--drivers/usb/core/message.c7
-rw-r--r--drivers/usb/core/of.c71
-rw-r--r--drivers/usb/core/phy.c120
-rw-r--r--drivers/usb/core/phy.h3
-rw-r--r--drivers/usb/core/port.c4
-rw-r--r--drivers/usb/core/sysfs.c103
-rw-r--r--drivers/usb/core/usb-acpi.c46
-rw-r--r--drivers/usb/core/usb.c2
-rw-r--r--drivers/usb/core/usb.h8
-rw-r--r--drivers/usb/dwc3/Kconfig2
-rw-r--r--drivers/usb/dwc3/core.h2
-rw-r--r--drivers/usb/dwc3/dwc3-am62.c42
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c4
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c276
-rw-r--r--drivers/usb/dwc3/ep0.c1
-rw-r--r--drivers/usb/dwc3/gadget.c91
-rw-r--r--drivers/usb/dwc3/gadget.h1
-rw-r--r--drivers/usb/dwc3/host.c50
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/function/f_fs.c533
-rw-r--r--drivers/usb/gadget/function/u_ether.c2
-rw-r--r--drivers/usb/gadget/function/uvc_video.c115
-rw-r--r--drivers/usb/gadget/udc/core.c7
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c127
-rw-r--r--drivers/usb/gadget/udc/fsl_usb2_udc.h47
-rw-r--r--drivers/usb/gadget/udc/net2272.c2
-rw-r--r--drivers/usb/gadget/udc/pxa27x_udc.c1
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c1
-rw-r--r--drivers/usb/gadget/udc/tegra-xudc.c39
-rw-r--r--drivers/usb/host/ehci-orion.c18
-rw-r--r--drivers/usb/host/ohci-pxa27x.c1
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/xhci-caps.h85
-rw-r--r--drivers/usb/host/xhci-dbgcap.c13
-rw-r--r--drivers/usb/host/xhci-dbgcap.h2
-rw-r--r--drivers/usb/host/xhci-hub.c69
-rw-r--r--drivers/usb/host/xhci-mem.c95
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c14
-rw-r--r--drivers/usb/host/xhci-pci.c15
-rw-r--r--drivers/usb/host/xhci-port.h176
-rw-r--r--drivers/usb/host/xhci-ring.c227
-rw-r--r--drivers/usb/host/xhci-trace.h12
-rw-r--r--drivers/usb/host/xhci.c56
-rw-r--r--drivers/usb/host/xhci.h272
-rw-r--r--drivers/usb/image/mdc800.c1
-rw-r--r--drivers/usb/misc/onboard_usb_hub.c4
-rw-r--r--drivers/usb/misc/onboard_usb_hub.h7
-rw-r--r--drivers/usb/mtu3/mtu3_host.c30
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/phy/phy-generic.c55
-rw-r--r--drivers/usb/phy/phy.c2
-rw-r--r--drivers/usb/roles/class.c43
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/ftdi_sio.c4
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h6
-rw-r--r--drivers/usb/serial/keyspan.c1
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/serial/oti6858.c1
-rw-r--r--drivers/usb/storage/freecom.c1
-rw-r--r--drivers/usb/storage/sddr55.c4
-rw-r--r--drivers/usb/typec/altmodes/displayport.c165
-rw-r--r--drivers/usb/typec/bus.c102
-rw-r--r--drivers/usb/typec/class.c61
-rw-r--r--drivers/usb/typec/class.h7
-rw-r--r--drivers/usb/typec/mux.c2
-rw-r--r--drivers/usb/typec/mux/Kconfig10
-rw-r--r--drivers/usb/typec/mux/Makefile1
-rw-r--r--drivers/usb/typec/mux/it5205.c294
-rw-r--r--drivers/usb/typec/pd.c30
-rw-r--r--drivers/usb/typec/retimer.c2
-rw-r--r--drivers/usb/typec/tcpm/fusb302.c2
-rw-r--r--drivers/usb/typec/tcpm/qcom/Makefile3
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c254
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.h27
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c159
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h94
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c80
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c290
-rw-r--r--drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h172
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c27
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.h1
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim_core.c38
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c1052
-rw-r--r--drivers/usb/typec/tcpm/wcove.c2
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c294
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h107
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c92
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c1
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c8
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c11
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h2
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c15
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c13
-rw-r--r--drivers/vdpa/pds/aux_drv.c2
-rw-r--r--drivers/vdpa/pds/vdpa_dev.c20
-rw-r--r--drivers/vdpa/pds/vdpa_dev.h1
-rw-r--r--drivers/vdpa/vdpa.c214
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c15
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.c27
-rw-r--r--drivers/vdpa/vdpa_user/iova_domain.h8
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c34
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c8
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/vdpa.c14
-rw-r--r--drivers/video/console/dummycon.c38
-rw-r--r--drivers/video/console/mdacon.c43
-rw-r--r--drivers/video/console/newport_con.c69
-rw-r--r--drivers/video/console/sticon.c79
-rw-r--r--drivers/video/console/vgacon.c152
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/arkfb.c15
-rw-r--r--drivers/video/fbdev/core/bitblit.c13
-rw-r--r--drivers/video/fbdev/core/fbcon.c139
-rw-r--r--drivers/video/fbdev/core/fbcon.h4
-rw-r--r--drivers/video/fbdev/core/fbcon_ccw.c13
-rw-r--r--drivers/video/fbdev/core/fbcon_cw.c13
-rw-r--r--drivers/video/fbdev/core/fbcon_ud.c13
-rw-r--r--drivers/video/fbdev/core/fbmem.c12
-rw-r--r--drivers/video/fbdev/core/fbmon.c7
-rw-r--r--drivers/video/fbdev/core/svgalib.c15
-rw-r--r--drivers/video/fbdev/core/tileblit.c4
-rw-r--r--drivers/video/fbdev/mb862xx/mb862xxfbdrv.c18
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c13
-rw-r--r--drivers/video/fbdev/s3fb.c15
-rw-r--r--drivers/video/fbdev/tgafb.c2
-rw-r--r--drivers/video/fbdev/uvesafb.c2
-rw-r--r--drivers/video/fbdev/vga16fb.c6
-rw-r--r--drivers/video/fbdev/via/accel.c4
-rw-r--r--drivers/video/fbdev/vt8623fb.c15
-rw-r--r--drivers/video/sticore.c2
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_ring.c6
-rw-r--r--drivers/virtio/virtio_vdpa.c5
-rw-r--r--drivers/w1/masters/Kconfig10
-rw-r--r--drivers/w1/masters/Makefile1
-rw-r--r--drivers/w1/masters/mxc_w1.c6
-rw-r--r--drivers/w1/masters/omap_hdq.c6
-rw-r--r--drivers/w1/masters/sgi_w1.c6
-rw-r--r--drivers/w1/masters/w1-gpio.c6
-rw-r--r--drivers/w1/masters/w1-uart.c415
-rw-r--r--drivers/w1/w1.c2
-rw-r--r--drivers/xen/balloon.c2
-rw-r--r--drivers/xen/events/events_base.c22
-rw-r--r--drivers/xen/evtchn.c6
-rw-r--r--drivers/xen/grant-dma-iommu.c6
918 files changed, 31237 insertions, 17111 deletions
diff --git a/drivers/accessibility/speakup/devsynth.c b/drivers/accessibility/speakup/devsynth.c
index d30571663585..cb7e1114e8eb 100644
--- a/drivers/accessibility/speakup/devsynth.c
+++ b/drivers/accessibility/speakup/devsynth.c
@@ -7,9 +7,10 @@
#include "speakup.h"
#include "spk_priv.h"
-static int misc_registered;
+static int synth_registered, synthu_registered;
static int dev_opened;
+/* Latin1 version */
static ssize_t speakup_file_write(struct file *fp, const char __user *buffer,
size_t nbytes, loff_t *ppos)
{
@@ -34,6 +35,98 @@ static ssize_t speakup_file_write(struct file *fp, const char __user *buffer,
return (ssize_t)nbytes;
}
+/* UTF-8 version */
+static ssize_t speakup_file_writeu(struct file *fp, const char __user *buffer,
+ size_t nbytes, loff_t *ppos)
+{
+ size_t count = nbytes, want;
+ const char __user *ptr = buffer;
+ size_t bytes;
+ unsigned long flags;
+ unsigned char buf[256];
+ u16 ubuf[256];
+ size_t in, in2, out;
+
+ if (!synth)
+ return -ENODEV;
+
+ want = 1;
+ while (count >= want) {
+ /* Copy some UTF-8 piece from userland */
+ bytes = min(count, sizeof(buf));
+ if (copy_from_user(buf, ptr, bytes))
+ return -EFAULT;
+
+ /* Convert to u16 */
+ for (in = 0, out = 0; in < bytes; in++) {
+ unsigned char c = buf[in];
+ int nbytes = 8 - fls(c ^ 0xff);
+ u32 value;
+
+ switch (nbytes) {
+ case 8: /* 0xff */
+ case 7: /* 0xfe */
+ case 1: /* 0x80 */
+ /* Invalid, drop */
+ goto drop;
+
+ case 0:
+ /* ASCII, copy */
+ ubuf[out++] = c;
+ continue;
+
+ default:
+ /* 2..6-byte UTF-8 */
+
+ if (bytes - in < nbytes) {
+ /* We don't have it all yet, stop here
+ * and wait for the rest
+ */
+ bytes = in;
+ want = nbytes;
+ continue;
+ }
+
+ /* First byte */
+ value = c & ((1u << (7 - nbytes)) - 1);
+
+ /* Other bytes */
+ for (in2 = 2; in2 <= nbytes; in2++) {
+ c = buf[in + 1];
+ if ((c & 0xc0) != 0x80) {
+ /* Invalid, drop the head */
+ want = 1;
+ goto drop;
+ }
+ value = (value << 6) | (c & 0x3f);
+ in++;
+ }
+
+ if (value < 0x10000)
+ ubuf[out++] = value;
+ want = 1;
+ break;
+ }
+drop:
+ /* empty statement */;
+ }
+
+ count -= bytes;
+ ptr += bytes;
+
+ /* And speak this up */
+ if (out) {
+ spin_lock_irqsave(&speakup_info.spinlock, flags);
+ for (in = 0; in < out; in++)
+ synth_buffer_add(ubuf[in]);
+ synth_start();
+ spin_unlock_irqrestore(&speakup_info.spinlock, flags);
+ }
+ }
+
+ return (ssize_t)(nbytes - count);
+}
+
static ssize_t speakup_file_read(struct file *fp, char __user *buf,
size_t nbytes, loff_t *ppos)
{
@@ -62,31 +155,57 @@ static const struct file_operations synth_fops = {
.release = speakup_file_release,
};
+static const struct file_operations synthu_fops = {
+ .read = speakup_file_read,
+ .write = speakup_file_writeu,
+ .open = speakup_file_open,
+ .release = speakup_file_release,
+};
+
static struct miscdevice synth_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "synth",
.fops = &synth_fops,
};
+static struct miscdevice synthu_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "synthu",
+ .fops = &synthu_fops,
+};
+
void speakup_register_devsynth(void)
{
- if (misc_registered != 0)
- return;
-/* zero it so if register fails, deregister will not ref invalid ptrs */
- if (misc_register(&synth_device)) {
- pr_warn("Couldn't initialize miscdevice /dev/synth.\n");
- } else {
- pr_info("initialized device: /dev/synth, node (MAJOR %d, MINOR %d)\n",
- MISC_MAJOR, synth_device.minor);
- misc_registered = 1;
+ if (!synth_registered) {
+ if (misc_register(&synth_device)) {
+ pr_warn("Couldn't initialize miscdevice /dev/synth.\n");
+ } else {
+ pr_info("initialized device: /dev/synth, node (MAJOR %d, MINOR %d)\n",
+ MISC_MAJOR, synth_device.minor);
+ synth_registered = 1;
+ }
+ }
+ if (!synthu_registered) {
+ if (misc_register(&synthu_device)) {
+ pr_warn("Couldn't initialize miscdevice /dev/synthu.\n");
+ } else {
+ pr_info("initialized device: /dev/synthu, node (MAJOR %d, MINOR %d)\n",
+ MISC_MAJOR, synthu_device.minor);
+ synthu_registered = 1;
+ }
}
}
void speakup_unregister_devsynth(void)
{
- if (!misc_registered)
- return;
- pr_info("speakup: unregistering synth device /dev/synth\n");
- misc_deregister(&synth_device);
- misc_registered = 0;
+ if (synth_registered) {
+ pr_info("speakup: unregistering synth device /dev/synth\n");
+ misc_deregister(&synth_device);
+ synth_registered = 0;
+ }
+ if (synthu_registered) {
+ pr_info("speakup: unregistering synth device /dev/synthu\n");
+ misc_deregister(&synthu_device);
+ synthu_registered = 0;
+ }
}
diff --git a/drivers/accessibility/speakup/synth.c b/drivers/accessibility/speakup/synth.c
index eea2a2fa4f01..45f906103133 100644
--- a/drivers/accessibility/speakup/synth.c
+++ b/drivers/accessibility/speakup/synth.c
@@ -208,8 +208,10 @@ void spk_do_flush(void)
wake_up_process(speakup_task);
}
-void synth_write(const char *buf, size_t count)
+void synth_write(const char *_buf, size_t count)
{
+ const unsigned char *buf = (const unsigned char *) _buf;
+
while (count--)
synth_buffer_add(*buf++);
synth_start();
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index c645bb453f3b..ff1689bb3124 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -286,7 +286,7 @@ config ACPI_CPPC_LIB
config ACPI_PROCESSOR
tristate "Processor"
- depends on X86 || ARM64 || LOONGARCH
+ depends on X86 || ARM64 || LOONGARCH || RISCV
select ACPI_PROCESSOR_IDLE
select ACPI_CPU_FREQ_PSS if X86 || LOONGARCH
select THERMAL
@@ -460,7 +460,6 @@ config ACPI_BGRT
config ACPI_REDUCED_HARDWARE_ONLY
bool "Hardware-reduced ACPI support only" if EXPERT
- def_bool n
help
This config item changes the way the ACPI code is built. When this
option is selected, the kernel will use a specialized version of
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index a89bdbe00184..a7c00ef78086 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -380,6 +380,8 @@ static int dock_in_progress(struct dock_station *ds)
/**
* handle_eject_request - handle an undock request checking for error conditions
+ * @ds: The dock station to undock.
+ * @event: The ACPI event number associated with the undock request.
*
* Check to make sure the dock device is still present, then undock and
* hotremove all the devices that may need removing.
diff --git a/drivers/acpi/riscv/Makefile b/drivers/acpi/riscv/Makefile
index 8b3b126e0b94..86b0925f612d 100644
--- a/drivers/acpi/riscv/Makefile
+++ b/drivers/acpi/riscv/Makefile
@@ -1,2 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-y += rhct.o
+obj-y += rhct.o
+obj-$(CONFIG_ACPI_PROCESSOR_IDLE) += cpuidle.o
+obj-$(CONFIG_ACPI_CPPC_LIB) += cppc.o
diff --git a/drivers/acpi/riscv/cppc.c b/drivers/acpi/riscv/cppc.c
new file mode 100644
index 000000000000..4cdff387deff
--- /dev/null
+++ b/drivers/acpi/riscv/cppc.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Implement CPPC FFH helper routines for RISC-V.
+ *
+ * Copyright (C) 2024 Ventana Micro Systems Inc.
+ */
+
+#include <acpi/cppc_acpi.h>
+#include <asm/csr.h>
+#include <asm/sbi.h>
+
+#define SBI_EXT_CPPC 0x43505043
+
+/* CPPC interfaces defined in SBI spec */
+#define SBI_CPPC_PROBE 0x0
+#define SBI_CPPC_READ 0x1
+#define SBI_CPPC_READ_HI 0x2
+#define SBI_CPPC_WRITE 0x3
+
+/* RISC-V FFH definitions from RISC-V FFH spec */
+#define FFH_CPPC_TYPE(r) (((r) & GENMASK_ULL(63, 60)) >> 60)
+#define FFH_CPPC_SBI_REG(r) ((r) & GENMASK(31, 0))
+#define FFH_CPPC_CSR_NUM(r) ((r) & GENMASK(11, 0))
+
+#define FFH_CPPC_SBI 0x1
+#define FFH_CPPC_CSR 0x2
+
+struct sbi_cppc_data {
+ u64 val;
+ u32 reg;
+ struct sbiret ret;
+};
+
+static bool cppc_ext_present;
+
+static int __init sbi_cppc_init(void)
+{
+ if (sbi_spec_version >= sbi_mk_version(2, 0) &&
+ sbi_probe_extension(SBI_EXT_CPPC) > 0) {
+ pr_info("SBI CPPC extension detected\n");
+ cppc_ext_present = true;
+ } else {
+ pr_info("SBI CPPC extension NOT detected!!\n");
+ cppc_ext_present = false;
+ }
+
+ return 0;
+}
+device_initcall(sbi_cppc_init);
+
+static void sbi_cppc_read(void *read_data)
+{
+ struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data;
+
+ data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_READ,
+ data->reg, 0, 0, 0, 0, 0);
+}
+
+static void sbi_cppc_write(void *write_data)
+{
+ struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data;
+
+ data->ret = sbi_ecall(SBI_EXT_CPPC, SBI_CPPC_WRITE,
+ data->reg, data->val, 0, 0, 0, 0);
+}
+
+static void cppc_ffh_csr_read(void *read_data)
+{
+ struct sbi_cppc_data *data = (struct sbi_cppc_data *)read_data;
+
+ switch (data->reg) {
+ /* Support only TIME CSR for now */
+ case CSR_TIME:
+ data->ret.value = csr_read(CSR_TIME);
+ data->ret.error = 0;
+ break;
+ default:
+ data->ret.error = -EINVAL;
+ break;
+ }
+}
+
+static void cppc_ffh_csr_write(void *write_data)
+{
+ struct sbi_cppc_data *data = (struct sbi_cppc_data *)write_data;
+
+ data->ret.error = -EINVAL;
+}
+
+/*
+ * Refer to drivers/acpi/cppc_acpi.c for the description of the functions
+ * below.
+ */
+bool cpc_ffh_supported(void)
+{
+ return true;
+}
+
+int cpc_read_ffh(int cpu, struct cpc_reg *reg, u64 *val)
+{
+ struct sbi_cppc_data data;
+
+ if (WARN_ON_ONCE(irqs_disabled()))
+ return -EPERM;
+
+ if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) {
+ if (!cppc_ext_present)
+ return -EINVAL;
+
+ data.reg = FFH_CPPC_SBI_REG(reg->address);
+
+ smp_call_function_single(cpu, sbi_cppc_read, &data, 1);
+
+ *val = data.ret.value;
+
+ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) {
+ data.reg = FFH_CPPC_CSR_NUM(reg->address);
+
+ smp_call_function_single(cpu, cppc_ffh_csr_read, &data, 1);
+
+ *val = data.ret.value;
+
+ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ }
+
+ return -EINVAL;
+}
+
+int cpc_write_ffh(int cpu, struct cpc_reg *reg, u64 val)
+{
+ struct sbi_cppc_data data;
+
+ if (WARN_ON_ONCE(irqs_disabled()))
+ return -EPERM;
+
+ if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_SBI) {
+ if (!cppc_ext_present)
+ return -EINVAL;
+
+ data.reg = FFH_CPPC_SBI_REG(reg->address);
+ data.val = val;
+
+ smp_call_function_single(cpu, sbi_cppc_write, &data, 1);
+
+ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ } else if (FFH_CPPC_TYPE(reg->address) == FFH_CPPC_CSR) {
+ data.reg = FFH_CPPC_CSR_NUM(reg->address);
+ data.val = val;
+
+ smp_call_function_single(cpu, cppc_ffh_csr_write, &data, 1);
+
+ return (data.ret.error) ? sbi_err_map_linux_errno(data.ret.error) : 0;
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/acpi/riscv/cpuidle.c b/drivers/acpi/riscv/cpuidle.c
new file mode 100644
index 000000000000..624f9bbdb58c
--- /dev/null
+++ b/drivers/acpi/riscv/cpuidle.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024, Ventana Micro Systems Inc
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ *
+ */
+
+#include <linux/acpi.h>
+#include <acpi/processor.h>
+#include <linux/cpu_pm.h>
+#include <linux/cpuidle.h>
+#include <linux/suspend.h>
+#include <asm/cpuidle.h>
+#include <asm/sbi.h>
+#include <asm/suspend.h>
+
+#define RISCV_FFH_LPI_TYPE_MASK GENMASK_ULL(63, 60)
+#define RISCV_FFH_LPI_RSVD_MASK GENMASK_ULL(59, 32)
+
+#define RISCV_FFH_LPI_TYPE_SBI BIT_ULL(60)
+
+static int acpi_cpu_init_idle(unsigned int cpu)
+{
+ int i;
+ struct acpi_lpi_state *lpi;
+ struct acpi_processor *pr = per_cpu(processors, cpu);
+
+ if (unlikely(!pr || !pr->flags.has_lpi))
+ return -EINVAL;
+
+ if (!riscv_sbi_hsm_is_supported())
+ return -ENODEV;
+
+ if (pr->power.count <= 1)
+ return -ENODEV;
+
+ for (i = 1; i < pr->power.count; i++) {
+ u32 state;
+
+ lpi = &pr->power.lpi_states[i];
+
+ /*
+ * Validate Entry Method as per FFH spec.
+ * bits[63:60] should be 0x1
+ * bits[59:32] should be 0x0
+ * bits[31:0] represent a SBI power_state
+ */
+ if (((lpi->address & RISCV_FFH_LPI_TYPE_MASK) != RISCV_FFH_LPI_TYPE_SBI) ||
+ (lpi->address & RISCV_FFH_LPI_RSVD_MASK)) {
+ pr_warn("Invalid LPI entry method %#llx\n", lpi->address);
+ return -EINVAL;
+ }
+
+ state = lpi->address;
+ if (!riscv_sbi_suspend_state_is_valid(state)) {
+ pr_warn("Invalid SBI power state %#x\n", state);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+int acpi_processor_ffh_lpi_probe(unsigned int cpu)
+{
+ return acpi_cpu_init_idle(cpu);
+}
+
+int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
+{
+ u32 state = lpi->address;
+
+ if (state & SBI_HSM_SUSP_NON_RET_BIT)
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend,
+ lpi->index,
+ state);
+ else
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend,
+ lpi->index,
+ state);
+}
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 728acfeb774d..889f1c1a1fa9 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -502,6 +502,7 @@ static void acpi_pm_finish(void)
/**
* acpi_pm_start - Start system PM transition.
+ * @acpi_state: The target ACPI power state to transition to.
*/
static void acpi_pm_start(u32 acpi_state)
{
@@ -540,8 +541,9 @@ static u32 acpi_suspend_states[] = {
};
/**
- * acpi_suspend_begin - Set the target system sleep state to the state
- * associated with given @pm_state, if supported.
+ * acpi_suspend_begin - Set the target system sleep state to the state
+ * associated with given @pm_state, if supported.
+ * @pm_state: The target system power management state.
*/
static int acpi_suspend_begin(suspend_state_t pm_state)
{
@@ -671,10 +673,11 @@ static const struct platform_suspend_ops acpi_suspend_ops = {
};
/**
- * acpi_suspend_begin_old - Set the target system sleep state to the
- * state associated with given @pm_state, if supported, and
- * execute the _PTS control method. This function is used if the
- * pre-ACPI 2.0 suspend ordering has been requested.
+ * acpi_suspend_begin_old - Set the target system sleep state to the
+ * state associated with given @pm_state, if supported, and
+ * execute the _PTS control method. This function is used if the
+ * pre-ACPI 2.0 suspend ordering has been requested.
+ * @pm_state: The target suspend state for the system.
*/
static int acpi_suspend_begin_old(suspend_state_t pm_state)
{
@@ -967,10 +970,11 @@ static const struct platform_hibernation_ops acpi_hibernation_ops = {
};
/**
- * acpi_hibernation_begin_old - Set the target system sleep state to
- * ACPI_STATE_S4 and execute the _PTS control method. This
- * function is used if the pre-ACPI 2.0 suspend ordering has been
- * requested.
+ * acpi_hibernation_begin_old - Set the target system sleep state to
+ * ACPI_STATE_S4 and execute the _PTS control method. This
+ * function is used if the pre-ACPI 2.0 suspend ordering has been
+ * requested.
+ * @stage: The power management event message.
*/
static int acpi_hibernation_begin_old(pm_message_t stage)
{
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index e0e4dc38b692..2e1f261ec5c8 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -925,7 +925,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
int i;
for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- unsigned long page_addr;
bool on_lru;
if (!alloc->pages[i].page_ptr)
@@ -933,7 +932,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc)
on_lru = list_lru_del_obj(&binder_freelist,
&alloc->pages[i].lru);
- page_addr = alloc->buffer + i * PAGE_SIZE;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%s: %d: page %d %s\n",
__func__, alloc->pid, i,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 78570684ff68..562302e2e57c 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -669,19 +669,6 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
static void ahci_pci_save_initial_config(struct pci_dev *pdev,
struct ahci_host_priv *hpriv)
{
- if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA) {
- switch (pdev->device) {
- case 0x1166:
- dev_info(&pdev->dev, "ASM1166 has only six ports\n");
- hpriv->saved_port_map = 0x3f;
- break;
- case 0x1064:
- dev_info(&pdev->dev, "ASM1064 has only four ports\n");
- hpriv->saved_port_map = 0xf;
- break;
- }
- }
-
if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
dev_info(&pdev->dev, "JMB361 has only one port\n");
hpriv->saved_port_map = 1;
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 7dbf14a1d915..741497324d78 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -751,7 +751,7 @@ static int __component_add(struct device *dev, const struct component_ops *ops,
* component_bind_all(). See also &struct component_ops.
*
* @subcomponent must be nonzero and is used to differentiate between multiple
- * components registerd on the same device @dev. These components are match
+ * components registered on the same device @dev. These components are match
* using component_match_add_typed().
*
* The component needs to be unregistered at driver unload/disconnect by
@@ -781,7 +781,7 @@ EXPORT_SYMBOL_GPL(component_add_typed);
* The component needs to be unregistered at driver unload/disconnect by
* calling component_del().
*
- * See also component_add_typed() for a variant that allows multipled different
+ * See also component_add_typed() for a variant that allows multiple different
* components on the same device.
*/
int component_add(struct device *dev, const struct component_ops *ops)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 9828da9b933c..b93f3c5716ae 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -92,12 +92,13 @@ static int __fwnode_link_add(struct fwnode_handle *con,
return 0;
}
-int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup)
+int fwnode_link_add(struct fwnode_handle *con, struct fwnode_handle *sup,
+ u8 flags)
{
int ret;
mutex_lock(&fwnode_link_lock);
- ret = __fwnode_link_add(con, sup, 0);
+ ret = __fwnode_link_add(con, sup, flags);
mutex_unlock(&fwnode_link_lock);
return ret;
}
@@ -1011,7 +1012,8 @@ static struct fwnode_handle *fwnode_links_check_suppliers(
return NULL;
list_for_each_entry(link, &fwnode->suppliers, c_hook)
- if (!(link->flags & FWLINK_FLAG_CYCLE))
+ if (!(link->flags &
+ (FWLINK_FLAG_CYCLE | FWLINK_FLAG_IGNORE)))
return link->supplier;
return NULL;
@@ -1871,6 +1873,7 @@ static void fw_devlink_unblock_consumers(struct device *dev)
device_links_write_unlock();
}
+#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
static bool fwnode_init_without_drv(struct fwnode_handle *fwnode)
{
@@ -1902,6 +1905,63 @@ static bool fwnode_ancestor_init_without_drv(struct fwnode_handle *fwnode)
}
/**
+ * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child
+ * @ancestor: Firmware which is tested for being an ancestor
+ * @child: Firmware which is tested for being the child
+ *
+ * A node is considered an ancestor of itself too.
+ *
+ * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
+ */
+static bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor,
+ const struct fwnode_handle *child)
+{
+ struct fwnode_handle *parent;
+
+ if (IS_ERR_OR_NULL(ancestor))
+ return false;
+
+ if (child == ancestor)
+ return true;
+
+ fwnode_for_each_parent_node(child, parent) {
+ if (parent == ancestor) {
+ fwnode_handle_put(parent);
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode
+ * @fwnode: firmware node
+ *
+ * Given a firmware node (@fwnode), this function finds its closest ancestor
+ * firmware node that has a corresponding struct device and returns that struct
+ * device.
+ *
+ * The caller is responsible for calling put_device() on the returned device
+ * pointer.
+ *
+ * Return: a pointer to the device of the @fwnode's closest ancestor.
+ */
+static struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode)
+{
+ struct fwnode_handle *parent;
+ struct device *dev;
+
+ fwnode_for_each_parent_node(fwnode, parent) {
+ dev = get_dev_from_fwnode(parent);
+ if (dev) {
+ fwnode_handle_put(parent);
+ return dev;
+ }
+ }
+ return NULL;
+}
+
+/**
* __fw_devlink_relax_cycles - Relax and mark dependency cycles.
* @con: Potential consumer device.
* @sup_handle: Potential supplier's fwnode.
@@ -1962,6 +2022,9 @@ static bool __fw_devlink_relax_cycles(struct device *con,
}
list_for_each_entry(link, &sup_handle->suppliers, c_hook) {
+ if (link->flags & FWLINK_FLAG_IGNORE)
+ continue;
+
if (__fw_devlink_relax_cycles(con, link->supplier)) {
__fwnode_link_cycle(link);
ret = true;
@@ -2040,6 +2103,9 @@ static int fw_devlink_create_devlink(struct device *con,
int ret = 0;
u32 flags;
+ if (link->flags & FWLINK_FLAG_IGNORE)
+ return 0;
+
if (con->fwnode == link->consumer)
flags = fw_devlink_get_flags(link->flags);
else
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index f5a6bffce518..56fba44ba391 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -366,7 +366,7 @@ static int cpu_uevent(const struct device *dev, struct kobj_uevent_env *env)
}
#endif
-struct bus_type cpu_subsys = {
+const struct bus_type cpu_subsys = {
.name = "cpu",
.dev_name = "cpu",
.match = cpu_subsys_match,
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 85152537dbf1..83d352394fdf 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -313,7 +313,7 @@ static void deferred_probe_timeout_work_func(struct work_struct *work)
mutex_lock(&deferred_probe_mutex);
list_for_each_entry(p, &deferred_probe_pending_list, deferred_probe)
- dev_info(p->device, "deferred probe pending: %s", p->deferred_probe_reason ?: "(reason unknown)\n");
+ dev_warn(p->device, "deferred probe pending: %s", p->deferred_probe_reason ?: "(reason unknown)\n");
mutex_unlock(&deferred_probe_mutex);
fw_devlink_probing_done();
@@ -397,13 +397,12 @@ bool device_is_bound(struct device *dev)
static void driver_bound(struct device *dev)
{
if (device_is_bound(dev)) {
- pr_warn("%s: device %s already bound\n",
- __func__, kobject_name(&dev->kobj));
+ dev_warn(dev, "%s: device already bound\n", __func__);
return;
}
- pr_debug("driver: '%s': %s: bound to device '%s'\n", dev->driver->name,
- __func__, dev_name(dev));
+ dev_dbg(dev, "driver: '%s': %s: bound to device\n", dev->driver->name,
+ __func__);
klist_add_tail(&dev->p->knode_driver, &dev->driver->p->klist_devices);
device_links_driver_bound(dev);
@@ -587,13 +586,13 @@ static int call_driver_probe(struct device *dev, struct device_driver *drv)
break;
case -ENODEV:
case -ENXIO:
- pr_debug("%s: probe of %s rejects match %d\n",
- drv->name, dev_name(dev), ret);
+ dev_dbg(dev, "probe with driver %s rejects match %d\n",
+ drv->name, ret);
break;
default:
/* driver matched but the probe failed */
- pr_warn("%s: probe of %s failed with error %d\n",
- drv->name, dev_name(dev), ret);
+ dev_err(dev, "probe with driver %s failed with error %d\n",
+ drv->name, ret);
break;
}
@@ -620,8 +619,8 @@ static int really_probe(struct device *dev, struct device_driver *drv)
if (link_ret == -EPROBE_DEFER)
return link_ret;
- pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
- drv->bus->name, __func__, drv->name, dev_name(dev));
+ dev_dbg(dev, "bus: '%s': %s: probing driver %s with device\n",
+ drv->bus->name, __func__, drv->name);
if (!list_empty(&dev->devres_head)) {
dev_crit(dev, "Resources present before probing\n");
ret = -EBUSY;
@@ -644,8 +643,7 @@ re_probe:
ret = driver_sysfs_add(dev);
if (ret) {
- pr_err("%s: driver_sysfs_add(%s) failed\n",
- __func__, dev_name(dev));
+ dev_err(dev, "%s: driver_sysfs_add failed\n", __func__);
goto sysfs_failed;
}
@@ -706,8 +704,8 @@ re_probe:
dev->pm_domain->sync(dev);
driver_bound(dev);
- pr_debug("bus: '%s': %s: bound device %s to driver %s\n",
- drv->bus->name, __func__, dev_name(dev), drv->name);
+ dev_dbg(dev, "bus: '%s': %s: bound device to driver %s\n",
+ drv->bus->name, __func__, drv->name);
goto done;
dev_sysfs_state_synced_failed:
@@ -786,8 +784,8 @@ static int __driver_probe_device(struct device_driver *drv, struct device *dev)
return -EBUSY;
dev->can_match = true;
- pr_debug("bus: '%s': %s: matched device %s with driver %s\n",
- drv->bus->name, __func__, dev_name(dev), drv->name);
+ dev_dbg(dev, "bus: '%s': %s: matched device with driver %s\n",
+ drv->bus->name, __func__, drv->name);
pm_runtime_get_suppliers(dev);
if (dev->parent)
diff --git a/drivers/base/firmware_loader/main.c b/drivers/base/firmware_loader/main.c
index ea28102d421e..da8ca01d011c 100644
--- a/drivers/base/firmware_loader/main.c
+++ b/drivers/base/firmware_loader/main.c
@@ -551,12 +551,16 @@ fw_get_filesystem_firmware(struct device *device, struct fw_priv *fw_priv,
file_size_ptr,
READING_FIRMWARE);
if (rc < 0) {
- if (rc != -ENOENT)
- dev_warn(device, "loading %s failed with error %d\n",
- path, rc);
- else
- dev_dbg(device, "loading %s failed for no such file or directory.\n",
- path);
+ if (!(fw_priv->opt_flags & FW_OPT_NO_WARN)) {
+ if (rc != -ENOENT)
+ dev_warn(device,
+ "loading %s failed with error %d\n",
+ path, rc);
+ else
+ dev_dbg(device,
+ "loading %s failed for no such file or directory.\n",
+ path);
+ }
continue;
}
size = rc;
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 0d01890160f3..11f5fdf65b9e 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -174,8 +174,8 @@ static int platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
if (!datap)
return -ENOMEM;
- datap->devid = ida_simple_get(&platform_msi_devid_ida,
- 0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
+ datap->devid = ida_alloc_max(&platform_msi_devid_ida,
+ (1 << DEV_ID_SHIFT) - 1, GFP_KERNEL);
if (datap->devid < 0) {
err = datap->devid;
kfree(datap);
@@ -193,7 +193,7 @@ static void platform_msi_free_priv_data(struct device *dev)
struct platform_msi_priv_data *data = dev->msi.data->platform_data;
dev->msi.data->platform_data = NULL;
- ida_simple_remove(&platform_msi_devid_ida, data->devid);
+ ida_free(&platform_msi_devid_ida, data->devid);
kfree(data);
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index a1b01ab42052..7324a704a9a1 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -7,15 +7,16 @@
* Mika Westerberg <mika.westerberg@linux.intel.com>
*/
-#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/err.h>
#include <linux/export.h>
-#include <linux/kernel.h>
+#include <linux/kconfig.h>
#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_graph.h>
-#include <linux/of_irq.h>
#include <linux/property.h>
#include <linux/phy.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
struct fwnode_handle *__dev_fwnode(struct device *dev)
{
@@ -700,34 +701,6 @@ struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode)
EXPORT_SYMBOL_GPL(fwnode_get_next_parent);
/**
- * fwnode_get_next_parent_dev - Find device of closest ancestor fwnode
- * @fwnode: firmware node
- *
- * Given a firmware node (@fwnode), this function finds its closest ancestor
- * firmware node that has a corresponding struct device and returns that struct
- * device.
- *
- * The caller is responsible for calling put_device() on the returned device
- * pointer.
- *
- * Return: a pointer to the device of the @fwnode's closest ancestor.
- */
-struct device *fwnode_get_next_parent_dev(const struct fwnode_handle *fwnode)
-{
- struct fwnode_handle *parent;
- struct device *dev;
-
- fwnode_for_each_parent_node(fwnode, parent) {
- dev = get_dev_from_fwnode(parent);
- if (dev) {
- fwnode_handle_put(parent);
- return dev;
- }
- }
- return NULL;
-}
-
-/**
* fwnode_count_parents - Return the number of parents a node has
* @fwnode: The node the parents of which are to be counted
*
@@ -774,34 +747,6 @@ struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwnode,
EXPORT_SYMBOL_GPL(fwnode_get_nth_parent);
/**
- * fwnode_is_ancestor_of - Test if @ancestor is ancestor of @child
- * @ancestor: Firmware which is tested for being an ancestor
- * @child: Firmware which is tested for being the child
- *
- * A node is considered an ancestor of itself too.
- *
- * Return: true if @ancestor is an ancestor of @child. Otherwise, returns false.
- */
-bool fwnode_is_ancestor_of(const struct fwnode_handle *ancestor, const struct fwnode_handle *child)
-{
- struct fwnode_handle *parent;
-
- if (IS_ERR_OR_NULL(ancestor))
- return false;
-
- if (child == ancestor)
- return true;
-
- fwnode_for_each_parent_node(child, parent) {
- if (parent == ancestor) {
- fwnode_handle_put(parent);
- return true;
- }
- }
- return false;
-}
-
-/**
* fwnode_get_next_child_node - Return the next child node handle for a node
* @fwnode: Firmware node to find the next child node for.
* @child: Handle to one of the node's child nodes or a %NULL handle.
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 36512fb75a20..eb6eb25b343b 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -6,10 +6,21 @@
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
*/
+#include <linux/container_of.h>
#include <linux/device.h>
-#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/kstrtox.h>
+#include <linux/list.h>
#include <linux/property.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
#include "base.h"
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 1b399ec8c07d..25c9d85667f1 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2787,7 +2787,6 @@ do_request:
pending = set_next_request();
spin_unlock_irq(&floppy_lock);
if (!pending) {
- do_floppy = NULL;
unlock_fdc();
return;
}
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index 3c84fcbda01a..e6bc4a73c9fc 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -383,8 +383,8 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
}
}
-static ssize_t btmtkuart_receive_buf(struct serdev_device *serdev,
- const u8 *data, size_t count)
+static size_t btmtkuart_receive_buf(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev);
diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c
index 0b93c2ff29e4..9d0c7e278114 100644
--- a/drivers/bluetooth/btnxpuart.c
+++ b/drivers/bluetooth/btnxpuart.c
@@ -1285,8 +1285,8 @@ static const struct h4_recv_pkt nxp_recv_pkts[] = {
{ NXP_RECV_FW_REQ_V3, .recv = nxp_recv_fw_req_v3 },
};
-static ssize_t btnxpuart_receive_buf(struct serdev_device *serdev,
- const u8 *data, size_t count)
+static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index 214fff876eae..85c0d9b68f5f 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -271,8 +271,8 @@ static void hci_uart_write_wakeup(struct serdev_device *serdev)
*
* Return: number of processed bytes
*/
-static ssize_t hci_uart_receive_buf(struct serdev_device *serdev,
- const u8 *data, size_t count)
+static size_t hci_uart_receive_buf(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct hci_uart *hu = serdev_device_get_drvdata(serdev);
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index b6dfe4340da2..65ae758f3194 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -96,6 +96,20 @@ static const int gisb_offsets_bcm7400[] = {
[ARB_ERR_CAP_MASTER] = 0x0d8,
};
+static const int gisb_offsets_bcm74165[] = {
+ [ARB_TIMER] = 0x008,
+ [ARB_BP_CAP_CLR] = 0x044,
+ [ARB_BP_CAP_HI_ADDR] = -1,
+ [ARB_BP_CAP_ADDR] = 0x048,
+ [ARB_BP_CAP_STATUS] = 0x058,
+ [ARB_BP_CAP_MASTER] = 0x05c,
+ [ARB_ERR_CAP_CLR] = 0x038,
+ [ARB_ERR_CAP_HI_ADDR] = -1,
+ [ARB_ERR_CAP_ADDR] = 0x020,
+ [ARB_ERR_CAP_STATUS] = 0x030,
+ [ARB_ERR_CAP_MASTER] = 0x034,
+};
+
static const int gisb_offsets_bcm7435[] = {
[ARB_TIMER] = 0x00c,
[ARB_BP_CAP_CLR] = 0x014,
@@ -393,6 +407,7 @@ static const struct of_device_id brcmstb_gisb_arb_of_match[] = {
{ .compatible = "brcm,bcm7400-gisb-arb", .data = gisb_offsets_bcm7400 },
{ .compatible = "brcm,bcm7278-gisb-arb", .data = gisb_offsets_bcm7278 },
{ .compatible = "brcm,bcm7038-gisb-arb", .data = gisb_offsets_bcm7038 },
+ { .compatible = "brcm,bcm74165-gisb-arb", .data = gisb_offsets_bcm74165 },
{ },
};
diff --git a/drivers/bus/mhi/common.h b/drivers/bus/mhi/common.h
index f794b9c8049e..dda340aaed95 100644
--- a/drivers/bus/mhi/common.h
+++ b/drivers/bus/mhi/common.h
@@ -297,30 +297,30 @@ struct mhi_ring_element {
__le32 dword[2];
};
+#define MHI_STATE_LIST \
+ mhi_state(RESET, "RESET") \
+ mhi_state(READY, "READY") \
+ mhi_state(M0, "M0") \
+ mhi_state(M1, "M1") \
+ mhi_state(M2, "M2") \
+ mhi_state(M3, "M3") \
+ mhi_state(M3_FAST, "M3_FAST") \
+ mhi_state(BHI, "BHI") \
+ mhi_state_end(SYS_ERR, "SYS ERROR")
+
+#undef mhi_state
+#undef mhi_state_end
+
+#define mhi_state(a, b) case MHI_STATE_##a: return b;
+#define mhi_state_end(a, b) case MHI_STATE_##a: return b;
+
static inline const char *mhi_state_str(enum mhi_state state)
{
switch (state) {
- case MHI_STATE_RESET:
- return "RESET";
- case MHI_STATE_READY:
- return "READY";
- case MHI_STATE_M0:
- return "M0";
- case MHI_STATE_M1:
- return "M1";
- case MHI_STATE_M2:
- return "M2";
- case MHI_STATE_M3:
- return "M3";
- case MHI_STATE_M3_FAST:
- return "M3 FAST";
- case MHI_STATE_BHI:
- return "BHI";
- case MHI_STATE_SYS_ERR:
- return "SYS ERROR";
+ MHI_STATE_LIST
default:
return "Unknown state";
}
-};
+}
#endif /* _MHI_COMMON_H */
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index 65fc1d738bec..f8f674adf1d4 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -1149,8 +1149,9 @@ int mhi_ep_power_up(struct mhi_ep_cntrl *mhi_cntrl)
mhi_ep_mmio_mask_interrupts(mhi_cntrl);
mhi_ep_mmio_init(mhi_cntrl);
- mhi_cntrl->mhi_event = kzalloc(mhi_cntrl->event_rings * (sizeof(*mhi_cntrl->mhi_event)),
- GFP_KERNEL);
+ mhi_cntrl->mhi_event = kcalloc(mhi_cntrl->event_rings,
+ sizeof(*mhi_cntrl->mhi_event),
+ GFP_KERNEL);
if (!mhi_cntrl->mhi_event)
return -ENOMEM;
@@ -1496,7 +1497,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item",
sizeof(struct mhi_ep_ring_item), 0,
0, NULL);
- if (!mhi_cntrl->ev_ring_el_cache) {
+ if (!mhi_cntrl->ring_item_cache) {
ret = -ENOMEM;
goto err_destroy_tre_buf_cache;
}
diff --git a/drivers/bus/mhi/host/boot.c b/drivers/bus/mhi/host/boot.c
index edc0ec5a0933..dedd29ca8db3 100644
--- a/drivers/bus/mhi/host/boot.c
+++ b/drivers/bus/mhi/host/boot.c
@@ -395,7 +395,7 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
void *buf;
dma_addr_t dma_addr;
size_t size, fw_sz;
- int i, ret;
+ int ret;
if (MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)) {
dev_err(dev, "Device MHI is not in valid state\n");
@@ -408,15 +408,6 @@ void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
if (ret)
dev_err(dev, "Could not capture serial number via BHI\n");
- for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) {
- ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i),
- &mhi_cntrl->oem_pk_hash[i]);
- if (ret) {
- dev_err(dev, "Could not capture OEM PK HASH via BHI\n");
- break;
- }
- }
-
/* wait for ready on pass through or any other execution environment */
if (!MHI_FW_LOAD_CAPABLE(mhi_cntrl->ee))
goto fw_load_ready_state;
diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c
index 65ceac1837f9..44f934981de8 100644
--- a/drivers/bus/mhi/host/init.c
+++ b/drivers/bus/mhi/host/init.c
@@ -20,50 +20,49 @@
#include <linux/wait.h>
#include "internal.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
static DEFINE_IDA(mhi_controller_ida);
+#undef mhi_ee
+#undef mhi_ee_end
+
+#define mhi_ee(a, b) [MHI_EE_##a] = b,
+#define mhi_ee_end(a, b) [MHI_EE_##a] = b,
+
const char * const mhi_ee_str[MHI_EE_MAX] = {
- [MHI_EE_PBL] = "PRIMARY BOOTLOADER",
- [MHI_EE_SBL] = "SECONDARY BOOTLOADER",
- [MHI_EE_AMSS] = "MISSION MODE",
- [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE",
- [MHI_EE_WFW] = "WLAN FIRMWARE",
- [MHI_EE_PTHRU] = "PASS THROUGH",
- [MHI_EE_EDL] = "EMERGENCY DOWNLOAD",
- [MHI_EE_FP] = "FLASH PROGRAMMER",
- [MHI_EE_DISABLE_TRANSITION] = "DISABLE",
- [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED",
+ MHI_EE_LIST
};
+#undef dev_st_trans
+#undef dev_st_trans_end
+
+#define dev_st_trans(a, b) [DEV_ST_TRANSITION_##a] = b,
+#define dev_st_trans_end(a, b) [DEV_ST_TRANSITION_##a] = b,
+
const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = {
- [DEV_ST_TRANSITION_PBL] = "PBL",
- [DEV_ST_TRANSITION_READY] = "READY",
- [DEV_ST_TRANSITION_SBL] = "SBL",
- [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE",
- [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER",
- [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR",
- [DEV_ST_TRANSITION_DISABLE] = "DISABLE",
+ DEV_ST_TRANSITION_LIST
};
+#undef ch_state_type
+#undef ch_state_type_end
+
+#define ch_state_type(a, b) [MHI_CH_STATE_TYPE_##a] = b,
+#define ch_state_type_end(a, b) [MHI_CH_STATE_TYPE_##a] = b,
+
const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = {
- [MHI_CH_STATE_TYPE_RESET] = "RESET",
- [MHI_CH_STATE_TYPE_STOP] = "STOP",
- [MHI_CH_STATE_TYPE_START] = "START",
+ MHI_CH_STATE_TYPE_LIST
};
+#undef mhi_pm_state
+#undef mhi_pm_state_end
+
+#define mhi_pm_state(a, b) [MHI_PM_STATE_##a] = b,
+#define mhi_pm_state_end(a, b) [MHI_PM_STATE_##a] = b,
+
static const char * const mhi_pm_state_str[] = {
- [MHI_PM_STATE_DISABLE] = "DISABLE",
- [MHI_PM_STATE_POR] = "POWER ON RESET",
- [MHI_PM_STATE_M0] = "M0",
- [MHI_PM_STATE_M2] = "M2",
- [MHI_PM_STATE_M3_ENTER] = "M?->M3",
- [MHI_PM_STATE_M3] = "M3",
- [MHI_PM_STATE_M3_EXIT] = "M3->M0",
- [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error",
- [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect",
- [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process",
- [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
- [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect",
+ MHI_PM_STATE_LIST
};
const char *to_mhi_pm_state_str(u32 state)
@@ -97,11 +96,19 @@ static ssize_t oem_pk_hash_show(struct device *dev,
{
struct mhi_device *mhi_dev = to_mhi_device(dev);
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
- int i, cnt = 0;
+ u32 hash_segment[MHI_MAX_OEM_PK_HASH_SEGMENTS];
+ int i, cnt = 0, ret;
- for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++)
- cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n",
- i, mhi_cntrl->oem_pk_hash[i]);
+ for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++) {
+ ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_OEMPKHASH(i), &hash_segment[i]);
+ if (ret) {
+ dev_err(dev, "Could not capture OEM PK HASH\n");
+ return ret;
+ }
+ }
+
+ for (i = 0; i < MHI_MAX_OEM_PK_HASH_SEGMENTS; i++)
+ cnt += sysfs_emit_at(buf, cnt, "OEMPKHASH[%d]: 0x%x\n", i, hash_segment[i]);
return cnt;
}
@@ -907,7 +914,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan;
struct mhi_cmd *mhi_cmd;
struct mhi_device *mhi_dev;
- u32 soc_info;
int ret, i;
if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs ||
@@ -982,17 +988,6 @@ int mhi_register_controller(struct mhi_controller *mhi_cntrl,
mhi_cntrl->unmap_single = mhi_unmap_single_no_bb;
}
- /* Read the MHI device info */
- ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs,
- SOC_HW_VERSION_OFFS, &soc_info);
- if (ret)
- goto err_destroy_wq;
-
- mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info);
- mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info);
- mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info);
- mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info);
-
mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL);
if (mhi_cntrl->index < 0) {
ret = mhi_cntrl->index;
diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h
index 30ac415a3000..5fe49311b8eb 100644
--- a/drivers/bus/mhi/host/internal.h
+++ b/drivers/bus/mhi/host/internal.h
@@ -15,12 +15,6 @@ extern struct bus_type mhi_bus_type;
#define MHI_SOC_RESET_REQ_OFFSET 0xb0
#define MHI_SOC_RESET_REQ BIT(0)
-#define SOC_HW_VERSION_OFFS 0x224
-#define SOC_HW_VERSION_FAM_NUM_BMSK GENMASK(31, 28)
-#define SOC_HW_VERSION_DEV_NUM_BMSK GENMASK(27, 16)
-#define SOC_HW_VERSION_MAJOR_VER_BMSK GENMASK(15, 8)
-#define SOC_HW_VERSION_MINOR_VER_BMSK GENMASK(7, 0)
-
struct mhi_ctxt {
struct mhi_event_ctxt *er_ctxt;
struct mhi_chan_ctxt *chan_ctxt;
@@ -42,6 +36,11 @@ enum mhi_ch_state_type {
MHI_CH_STATE_TYPE_MAX,
};
+#define MHI_CH_STATE_TYPE_LIST \
+ ch_state_type(RESET, "RESET") \
+ ch_state_type(STOP, "STOP") \
+ ch_state_type_end(START, "START")
+
extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
#define TO_CH_STATE_TYPE_STR(state) (((state) >= MHI_CH_STATE_TYPE_MAX) ? \
"INVALID_STATE" : \
@@ -50,6 +49,18 @@ extern const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX];
#define MHI_INVALID_BRSTMODE(mode) (mode != MHI_DB_BRST_DISABLE && \
mode != MHI_DB_BRST_ENABLE)
+#define MHI_EE_LIST \
+ mhi_ee(PBL, "PRIMARY BOOTLOADER") \
+ mhi_ee(SBL, "SECONDARY BOOTLOADER") \
+ mhi_ee(AMSS, "MISSION MODE") \
+ mhi_ee(RDDM, "RAMDUMP DOWNLOAD MODE")\
+ mhi_ee(WFW, "WLAN FIRMWARE") \
+ mhi_ee(PTHRU, "PASS THROUGH") \
+ mhi_ee(EDL, "EMERGENCY DOWNLOAD") \
+ mhi_ee(FP, "FLASH PROGRAMMER") \
+ mhi_ee(DISABLE_TRANSITION, "DISABLE") \
+ mhi_ee_end(NOT_SUPPORTED, "NOT SUPPORTED")
+
extern const char * const mhi_ee_str[MHI_EE_MAX];
#define TO_MHI_EXEC_STR(ee) (((ee) >= MHI_EE_MAX) ? \
"INVALID_EE" : mhi_ee_str[ee])
@@ -72,6 +83,15 @@ enum dev_st_transition {
DEV_ST_TRANSITION_MAX,
};
+#define DEV_ST_TRANSITION_LIST \
+ dev_st_trans(PBL, "PBL") \
+ dev_st_trans(READY, "READY") \
+ dev_st_trans(SBL, "SBL") \
+ dev_st_trans(MISSION_MODE, "MISSION MODE") \
+ dev_st_trans(FP, "FLASH PROGRAMMER") \
+ dev_st_trans(SYS_ERR, "SYS ERROR") \
+ dev_st_trans_end(DISABLE, "DISABLE")
+
extern const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX];
#define TO_DEV_STATE_TRANS_STR(state) (((state) >= DEV_ST_TRANSITION_MAX) ? \
"INVALID_STATE" : dev_state_tran_str[state])
@@ -88,11 +108,27 @@ enum mhi_pm_state {
MHI_PM_STATE_FW_DL_ERR,
MHI_PM_STATE_SYS_ERR_DETECT,
MHI_PM_STATE_SYS_ERR_PROCESS,
+ MHI_PM_STATE_SYS_ERR_FAIL,
MHI_PM_STATE_SHUTDOWN_PROCESS,
MHI_PM_STATE_LD_ERR_FATAL_DETECT,
MHI_PM_STATE_MAX
};
+#define MHI_PM_STATE_LIST \
+ mhi_pm_state(DISABLE, "DISABLE") \
+ mhi_pm_state(POR, "POWER ON RESET") \
+ mhi_pm_state(M0, "M0") \
+ mhi_pm_state(M2, "M2") \
+ mhi_pm_state(M3_ENTER, "M?->M3") \
+ mhi_pm_state(M3, "M3") \
+ mhi_pm_state(M3_EXIT, "M3->M0") \
+ mhi_pm_state(FW_DL_ERR, "Firmware Download Error") \
+ mhi_pm_state(SYS_ERR_DETECT, "SYS ERROR Detect") \
+ mhi_pm_state(SYS_ERR_PROCESS, "SYS ERROR Process") \
+ mhi_pm_state(SYS_ERR_FAIL, "SYS ERROR Failure") \
+ mhi_pm_state(SHUTDOWN_PROCESS, "SHUTDOWN Process") \
+ mhi_pm_state_end(LD_ERR_FATAL_DETECT, "Linkdown or Error Fatal Detect")
+
#define MHI_PM_DISABLE BIT(0)
#define MHI_PM_POR BIT(1)
#define MHI_PM_M0 BIT(2)
@@ -104,14 +140,16 @@ enum mhi_pm_state {
#define MHI_PM_FW_DL_ERR BIT(7)
#define MHI_PM_SYS_ERR_DETECT BIT(8)
#define MHI_PM_SYS_ERR_PROCESS BIT(9)
-#define MHI_PM_SHUTDOWN_PROCESS BIT(10)
+#define MHI_PM_SYS_ERR_FAIL BIT(10)
+#define MHI_PM_SHUTDOWN_PROCESS BIT(11)
/* link not accessible */
-#define MHI_PM_LD_ERR_FATAL_DETECT BIT(11)
+#define MHI_PM_LD_ERR_FATAL_DETECT BIT(12)
#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
MHI_PM_M2 | MHI_PM_M3_ENTER | MHI_PM_M3_EXIT | \
MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
- MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
+ MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS | \
+ MHI_PM_FW_DL_ERR)))
#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & mhi_cntrl->db_access)
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c
index abb561db9ae1..15d657af9b5b 100644
--- a/drivers/bus/mhi/host/main.c
+++ b/drivers/bus/mhi/host/main.c
@@ -15,6 +15,7 @@
#include <linux/skbuff.h>
#include <linux/slab.h>
#include "internal.h"
+#include "trace.h"
int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
void __iomem *base, u32 offset, u32 *out)
@@ -493,11 +494,8 @@ irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv)
state = mhi_get_mhi_state(mhi_cntrl);
ee = mhi_get_exec_env(mhi_cntrl);
- dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n",
- TO_MHI_EXEC_STR(mhi_cntrl->ee),
- mhi_state_str(mhi_cntrl->dev_state),
- TO_MHI_EXEC_STR(ee), mhi_state_str(state));
+ trace_mhi_intvec_states(mhi_cntrl, ee, state);
if (state == MHI_STATE_SYS_ERR) {
dev_dbg(dev, "System error detected\n");
pm_state = mhi_tryset_pm_state(mhi_cntrl,
@@ -838,6 +836,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
while (dev_rp != local_rp) {
enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+ trace_mhi_ctrl_event(mhi_cntrl, local_rp);
+
switch (type) {
case MHI_PKT_TYPE_BW_REQ_EVENT:
{
@@ -1003,6 +1003,8 @@ int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl,
while (dev_rp != local_rp && event_quota > 0) {
enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp);
+ trace_mhi_data_event(mhi_cntrl, local_rp);
+
chan = MHI_TRE_GET_EV_CHID(local_rp);
WARN_ON(chan >= mhi_cntrl->max_chan);
@@ -1243,6 +1245,7 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len);
mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain);
+ trace_mhi_gen_tre(mhi_cntrl, mhi_chan, mhi_tre);
/* increment WP */
mhi_add_ring_element(mhi_cntrl, tre_ring);
mhi_add_ring_element(mhi_cntrl, buf_ring);
@@ -1337,9 +1340,7 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
enum mhi_cmd_type cmd = MHI_CMD_NOP;
int ret;
- dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan,
- TO_CH_STATE_TYPE_STR(to_state));
-
+ trace_mhi_channel_command_start(mhi_cntrl, mhi_chan, to_state, TPS("Updating"));
switch (to_state) {
case MHI_CH_STATE_TYPE_RESET:
write_lock_irq(&mhi_chan->lock);
@@ -1406,9 +1407,7 @@ static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl,
write_unlock_irq(&mhi_chan->lock);
}
- dev_dbg(dev, "%d: Channel state change to %s successful\n",
- mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state));
-
+ trace_mhi_channel_command_end(mhi_cntrl, mhi_chan, to_state, TPS("Updated"));
exit_channel_update:
mhi_cntrl->runtime_put(mhi_cntrl);
mhi_device_put(mhi_cntrl->mhi_dev);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index cd6cd14b3d29..51639bfcfec7 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -538,7 +538,7 @@ static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = {
MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101)
};
-static struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
+static const struct mhi_controller_config modem_telit_fn980_hw_v1_config = {
.max_channels = 128,
.timeout_ms = 20000,
.num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels),
diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c
index a2f2feef1476..8b40d3f01acc 100644
--- a/drivers/bus/mhi/host/pm.c
+++ b/drivers/bus/mhi/host/pm.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include "internal.h"
+#include "trace.h"
/*
* Not all MHI state transitions are synchronous. Transitions like Linkdown,
@@ -36,7 +37,10 @@
* M0 <--> M0
* M0 -> FW_DL_ERR
* M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
- * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
+ * L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS
+ * SYS_ERR_PROCESS -> SYS_ERR_FAIL
+ * SYS_ERR_FAIL -> SYS_ERR_DETECT
+ * SYS_ERR_PROCESS --> POR
* L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
* SHUTDOWN_PROCESS -> DISABLE
* L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
@@ -93,7 +97,12 @@ static const struct mhi_pm_transitions dev_state_transitions[] = {
},
{
MHI_PM_SYS_ERR_PROCESS,
- MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_POR | MHI_PM_SYS_ERR_FAIL | MHI_PM_SHUTDOWN_PROCESS |
+ MHI_PM_LD_ERR_FATAL_DETECT
+ },
+ {
+ MHI_PM_SYS_ERR_FAIL,
+ MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
MHI_PM_LD_ERR_FATAL_DETECT
},
/* L2 States */
@@ -123,6 +132,7 @@ enum mhi_pm_state __must_check mhi_tryset_pm_state(struct mhi_controller *mhi_cn
if (unlikely(!(dev_state_transitions[index].to_states & state)))
return cur_state;
+ trace_mhi_tryset_pm_state(mhi_cntrl, state);
mhi_cntrl->pm_state = state;
return mhi_cntrl->pm_state;
}
@@ -629,7 +639,13 @@ static void mhi_pm_sys_error_transition(struct mhi_controller *mhi_cntrl)
!in_reset, timeout);
if (!ret || in_reset) {
dev_err(dev, "Device failed to exit MHI Reset state\n");
- goto exit_sys_error_transition;
+ write_lock_irq(&mhi_cntrl->pm_lock);
+ cur_state = mhi_tryset_pm_state(mhi_cntrl,
+ MHI_PM_SYS_ERR_FAIL);
+ write_unlock_irq(&mhi_cntrl->pm_lock);
+ /* Shutdown may have occurred, otherwise cleanup now */
+ if (cur_state != MHI_PM_SYS_ERR_FAIL)
+ goto exit_sys_error_transition;
}
/*
@@ -758,7 +774,6 @@ void mhi_pm_st_worker(struct work_struct *work)
struct mhi_controller *mhi_cntrl = container_of(work,
struct mhi_controller,
st_worker);
- struct device *dev = &mhi_cntrl->mhi_dev->dev;
spin_lock_irq(&mhi_cntrl->transition_lock);
list_splice_tail_init(&mhi_cntrl->transition_list, &head);
@@ -766,8 +781,8 @@ void mhi_pm_st_worker(struct work_struct *work)
list_for_each_entry_safe(itr, tmp, &head, node) {
list_del(&itr->node);
- dev_dbg(dev, "Handling state transition: %s\n",
- TO_DEV_STATE_TRANS_STR(itr->state));
+
+ trace_mhi_pm_st_transition(mhi_cntrl, itr->state);
switch (itr->state) {
case DEV_ST_TRANSITION_PBL:
diff --git a/drivers/bus/mhi/host/trace.h b/drivers/bus/mhi/host/trace.h
new file mode 100644
index 000000000000..368515dcb22d
--- /dev/null
+++ b/drivers/bus/mhi/host/trace.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mhi_host
+
+#if !defined(_TRACE_EVENT_MHI_HOST_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EVENT_MHI_HOST_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include "../common.h"
+#include "internal.h"
+
+#undef mhi_state
+#undef mhi_state_end
+
+#define mhi_state(a, b) TRACE_DEFINE_ENUM(MHI_STATE_##a);
+#define mhi_state_end(a, b) TRACE_DEFINE_ENUM(MHI_STATE_##a);
+
+MHI_STATE_LIST
+
+#undef mhi_state
+#undef mhi_state_end
+
+#define mhi_state(a, b) { MHI_STATE_##a, b },
+#define mhi_state_end(a, b) { MHI_STATE_##a, b }
+
+#undef mhi_pm_state
+#undef mhi_pm_state_end
+
+#define mhi_pm_state(a, b) TRACE_DEFINE_ENUM(MHI_PM_STATE_##a);
+#define mhi_pm_state_end(a, b) TRACE_DEFINE_ENUM(MHI_PM_STATE_##a);
+
+MHI_PM_STATE_LIST
+
+#undef mhi_pm_state
+#undef mhi_pm_state_end
+
+#define mhi_pm_state(a, b) { MHI_PM_STATE_##a, b },
+#define mhi_pm_state_end(a, b) { MHI_PM_STATE_##a, b }
+
+#undef mhi_ee
+#undef mhi_ee_end
+
+#define mhi_ee(a, b) TRACE_DEFINE_ENUM(MHI_EE_##a);
+#define mhi_ee_end(a, b) TRACE_DEFINE_ENUM(MHI_EE_##a);
+
+MHI_EE_LIST
+
+#undef mhi_ee
+#undef mhi_ee_end
+
+#define mhi_ee(a, b) { MHI_EE_##a, b },
+#define mhi_ee_end(a, b) { MHI_EE_##a, b }
+
+#undef ch_state_type
+#undef ch_state_type_end
+
+#define ch_state_type(a, b) TRACE_DEFINE_ENUM(MHI_CH_STATE_TYPE_##a);
+#define ch_state_type_end(a, b) TRACE_DEFINE_ENUM(MHI_CH_STATE_TYPE_##a);
+
+MHI_CH_STATE_TYPE_LIST
+
+#undef ch_state_type
+#undef ch_state_type_end
+
+#define ch_state_type(a, b) { MHI_CH_STATE_TYPE_##a, b },
+#define ch_state_type_end(a, b) { MHI_CH_STATE_TYPE_##a, b }
+
+#undef dev_st_trans
+#undef dev_st_trans_end
+
+#define dev_st_trans(a, b) TRACE_DEFINE_ENUM(DEV_ST_TRANSITION_##a);
+#define dev_st_trans_end(a, b) TRACE_DEFINE_ENUM(DEV_ST_TRANSITION_##a);
+
+DEV_ST_TRANSITION_LIST
+
+#undef dev_st_trans
+#undef dev_st_trans_end
+
+#define dev_st_trans(a, b) { DEV_ST_TRANSITION_##a, b },
+#define dev_st_trans_end(a, b) { DEV_ST_TRANSITION_##a, b }
+
+#define TPS(x) tracepoint_string(x)
+
+TRACE_EVENT(mhi_gen_tre,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
+ struct mhi_ring_element *mhi_tre),
+
+ TP_ARGS(mhi_cntrl, mhi_chan, mhi_tre),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, ch_num)
+ __field(void *, wp)
+ __field(__le64, tre_ptr)
+ __field(__le32, dword0)
+ __field(__le32, dword1)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __entry->ch_num = mhi_chan->chan;
+ __entry->wp = mhi_tre;
+ __entry->tre_ptr = mhi_tre->ptr;
+ __entry->dword0 = mhi_tre->dword[0];
+ __entry->dword1 = mhi_tre->dword[1];
+ ),
+
+ TP_printk("%s: Chan: %d TRE: 0x%p TRE buf: 0x%llx DWORD0: 0x%08x DWORD1: 0x%08x\n",
+ __get_str(name), __entry->ch_num, __entry->wp, __entry->tre_ptr,
+ __entry->dword0, __entry->dword1)
+);
+
+TRACE_EVENT(mhi_intvec_states,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, int dev_ee, int dev_state),
+
+ TP_ARGS(mhi_cntrl, dev_ee, dev_state),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, local_ee)
+ __field(int, state)
+ __field(int, dev_ee)
+ __field(int, dev_state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __entry->local_ee = mhi_cntrl->ee;
+ __entry->state = mhi_cntrl->dev_state;
+ __entry->dev_ee = dev_ee;
+ __entry->dev_state = dev_state;
+ ),
+
+ TP_printk("%s: Local EE: %s State: %s Device EE: %s Dev State: %s\n",
+ __get_str(name),
+ __print_symbolic(__entry->local_ee, MHI_EE_LIST),
+ __print_symbolic(__entry->state, MHI_STATE_LIST),
+ __print_symbolic(__entry->dev_ee, MHI_EE_LIST),
+ __print_symbolic(__entry->dev_state, MHI_STATE_LIST))
+);
+
+TRACE_EVENT(mhi_tryset_pm_state,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, int pm_state),
+
+ TP_ARGS(mhi_cntrl, pm_state),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, pm_state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mhi_cntrl->mhi_dev->name);
+ if (pm_state)
+ pm_state = __fls(pm_state);
+ __entry->pm_state = pm_state;
+ ),
+
+ TP_printk("%s: PM state: %s\n", __get_str(name),
+ __print_symbolic(__entry->pm_state, MHI_PM_STATE_LIST))
+);
+
+DECLARE_EVENT_CLASS(mhi_process_event_ring,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_ring_element *rp),
+
+ TP_ARGS(mhi_cntrl, rp),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(__le32, dword0)
+ __field(__le32, dword1)
+ __field(int, state)
+ __field(__le64, ptr)
+ __field(void *, rp)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __entry->rp = rp;
+ __entry->ptr = rp->ptr;
+ __entry->dword0 = rp->dword[0];
+ __entry->dword1 = rp->dword[1];
+ __entry->state = MHI_TRE_GET_EV_STATE(rp);
+ ),
+
+ TP_printk("%s: TRE: 0x%p TRE buf: 0x%llx DWORD0: 0x%08x DWORD1: 0x%08x State: %s\n",
+ __get_str(name), __entry->rp, __entry->ptr, __entry->dword0,
+ __entry->dword1, __print_symbolic(__entry->state, MHI_STATE_LIST))
+);
+
+DEFINE_EVENT(mhi_process_event_ring, mhi_data_event,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_ring_element *rp),
+
+ TP_ARGS(mhi_cntrl, rp)
+);
+
+DEFINE_EVENT(mhi_process_event_ring, mhi_ctrl_event,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_ring_element *rp),
+
+ TP_ARGS(mhi_cntrl, rp)
+);
+
+DECLARE_EVENT_CLASS(mhi_update_channel_state,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int state,
+ const char *reason),
+
+ TP_ARGS(mhi_cntrl, mhi_chan, state, reason),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, ch_num)
+ __field(int, state)
+ __field(const char *, reason)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __entry->ch_num = mhi_chan->chan;
+ __entry->state = state;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("%s: chan%d: %s state to: %s\n",
+ __get_str(name), __entry->ch_num, __entry->reason,
+ __print_symbolic(__entry->state, MHI_CH_STATE_TYPE_LIST))
+);
+
+DEFINE_EVENT(mhi_update_channel_state, mhi_channel_command_start,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int state,
+ const char *reason),
+
+ TP_ARGS(mhi_cntrl, mhi_chan, state, reason)
+);
+
+DEFINE_EVENT(mhi_update_channel_state, mhi_channel_command_end,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int state,
+ const char *reason),
+
+ TP_ARGS(mhi_cntrl, mhi_chan, state, reason)
+);
+
+TRACE_EVENT(mhi_pm_st_transition,
+
+ TP_PROTO(struct mhi_controller *mhi_cntrl, int state),
+
+ TP_ARGS(mhi_cntrl, state),
+
+ TP_STRUCT__entry(
+ __string(name, mhi_cntrl->mhi_dev->name)
+ __field(int, state)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, mhi_cntrl->mhi_dev->name);
+ __entry->state = state;
+ ),
+
+ TP_printk("%s: Handling state transition: %s\n", __get_str(name),
+ __print_symbolic(__entry->state, DEV_ST_TRANSITION_LIST))
+);
+
+#endif
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/bus/mhi/host
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/bus/ts-nbus.c b/drivers/bus/ts-nbus.c
index 4fa932cb0915..baf22a82c47a 100644
--- a/drivers/bus/ts-nbus.c
+++ b/drivers/bus/ts-nbus.c
@@ -39,45 +39,39 @@ struct ts_nbus {
/*
* request all gpios required by the bus.
*/
-static int ts_nbus_init_pdata(struct platform_device *pdev, struct ts_nbus
- *ts_nbus)
+static int ts_nbus_init_pdata(struct platform_device *pdev,
+ struct ts_nbus *ts_nbus)
{
ts_nbus->data = devm_gpiod_get_array(&pdev->dev, "ts,data",
GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->data)) {
- dev_err(&pdev->dev, "failed to retrieve ts,data-gpio from dts\n");
- return PTR_ERR(ts_nbus->data);
- }
+ if (IS_ERR(ts_nbus->data))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->data),
+ "failed to retrieve ts,data-gpio from dts\n");
ts_nbus->csn = devm_gpiod_get(&pdev->dev, "ts,csn", GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->csn)) {
- dev_err(&pdev->dev, "failed to retrieve ts,csn-gpio from dts\n");
- return PTR_ERR(ts_nbus->csn);
- }
+ if (IS_ERR(ts_nbus->csn))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->csn),
+ "failed to retrieve ts,csn-gpio from dts\n");
ts_nbus->txrx = devm_gpiod_get(&pdev->dev, "ts,txrx", GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->txrx)) {
- dev_err(&pdev->dev, "failed to retrieve ts,txrx-gpio from dts\n");
- return PTR_ERR(ts_nbus->txrx);
- }
+ if (IS_ERR(ts_nbus->txrx))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->txrx),
+ "failed to retrieve ts,txrx-gpio from dts\n");
ts_nbus->strobe = devm_gpiod_get(&pdev->dev, "ts,strobe", GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->strobe)) {
- dev_err(&pdev->dev, "failed to retrieve ts,strobe-gpio from dts\n");
- return PTR_ERR(ts_nbus->strobe);
- }
+ if (IS_ERR(ts_nbus->strobe))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->strobe),
+ "failed to retrieve ts,strobe-gpio from dts\n");
ts_nbus->ale = devm_gpiod_get(&pdev->dev, "ts,ale", GPIOD_OUT_HIGH);
- if (IS_ERR(ts_nbus->ale)) {
- dev_err(&pdev->dev, "failed to retrieve ts,ale-gpio from dts\n");
- return PTR_ERR(ts_nbus->ale);
- }
+ if (IS_ERR(ts_nbus->ale))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->ale),
+ "failed to retrieve ts,ale-gpio from dts\n");
ts_nbus->rdy = devm_gpiod_get(&pdev->dev, "ts,rdy", GPIOD_IN);
- if (IS_ERR(ts_nbus->rdy)) {
- dev_err(&pdev->dev, "failed to retrieve ts,rdy-gpio from dts\n");
- return PTR_ERR(ts_nbus->rdy);
- }
+ if (IS_ERR(ts_nbus->rdy))
+ return dev_err_probe(&pdev->dev, PTR_ERR(ts_nbus->rdy),
+ "failed to retrieve ts,rdy-gpio from dts\n");
return 0;
}
@@ -273,7 +267,7 @@ EXPORT_SYMBOL_GPL(ts_nbus_write);
static int ts_nbus_probe(struct platform_device *pdev)
{
struct pwm_device *pwm;
- struct pwm_args pargs;
+ struct pwm_state state;
struct device *dev = &pdev->dev;
struct ts_nbus *ts_nbus;
int ret;
@@ -289,32 +283,24 @@ static int ts_nbus_probe(struct platform_device *pdev)
return ret;
pwm = devm_pwm_get(dev, NULL);
- if (IS_ERR(pwm)) {
- ret = PTR_ERR(pwm);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "unable to request PWM\n");
- return ret;
- }
+ if (IS_ERR(pwm))
+ return dev_err_probe(dev, PTR_ERR(pwm),
+ "unable to request PWM\n");
- pwm_get_args(pwm, &pargs);
- if (!pargs.period) {
- dev_err(&pdev->dev, "invalid PWM period\n");
- return -EINVAL;
- }
+ pwm_init_state(pwm, &state);
+ if (!state.period)
+ return dev_err_probe(dev, -EINVAL, "invalid PWM period\n");
- /*
- * FIXME: pwm_apply_args() should be removed when switching to
- * the atomic PWM API.
- */
- pwm_apply_args(pwm);
- ret = pwm_config(pwm, pargs.period, pargs.period);
+ state.duty_cycle = state.period;
+ state.enabled = true;
+
+ ret = pwm_apply_state(pwm, &state);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret, "failed to configure PWM\n");
/*
* we can now start the FPGA and populate the peripherals.
*/
- pwm_enable(pwm);
ts_nbus->pwm = pwm;
/*
@@ -324,7 +310,8 @@ static int ts_nbus_probe(struct platform_device *pdev)
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret < 0)
- return ret;
+ return dev_err_probe(dev, ret,
+ "failed to populate platform devices on bus\n");
dev_info(dev, "initialized\n");
diff --git a/drivers/cdx/Makefile b/drivers/cdx/Makefile
index 5d1ea482419f..749a3295c2bd 100644
--- a/drivers/cdx/Makefile
+++ b/drivers/cdx/Makefile
@@ -8,3 +8,7 @@
ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CDX_BUS
obj-$(CONFIG_CDX_BUS) += cdx.o controller/
+
+ifdef CONFIG_GENERIC_MSI_IRQ
+obj-$(CONFIG_CDX_BUS) += cdx_msi.o
+endif
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index b74d76afccb6..236d381dc5f7 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -56,6 +56,7 @@
*/
#include <linux/init.h>
+#include <linux/irqdomain.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -302,8 +303,19 @@ static int cdx_probe(struct device *dev)
{
struct cdx_driver *cdx_drv = to_cdx_driver(dev->driver);
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
int error;
+ /*
+ * Setup MSI device data so that generic MSI alloc/free can
+ * be used by the device driver.
+ */
+ if (cdx->msi_domain) {
+ error = msi_setup_device_data(&cdx_dev->dev);
+ if (error)
+ return error;
+ }
+
error = cdx_drv->probe(cdx_dev);
if (error) {
dev_err_probe(dev, error, "%s failed\n", __func__);
@@ -787,6 +799,7 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
/* Populate CDX dev params */
cdx_dev->req_id = dev_params->req_id;
+ cdx_dev->msi_dev_id = dev_params->msi_dev_id;
cdx_dev->vendor = dev_params->vendor;
cdx_dev->device = dev_params->device;
cdx_dev->subsystem_vendor = dev_params->subsys_vendor;
@@ -804,12 +817,19 @@ int cdx_device_add(struct cdx_dev_params *dev_params)
cdx_dev->dev.bus = &cdx_bus_type;
cdx_dev->dev.dma_mask = &cdx_dev->dma_mask;
cdx_dev->dev.release = cdx_device_release;
+ cdx_dev->msi_write_pending = false;
+ mutex_init(&cdx_dev->irqchip_lock);
/* Set Name */
dev_set_name(&cdx_dev->dev, "cdx-%02x:%02x",
((cdx->id << CDX_CONTROLLER_ID_SHIFT) | (cdx_dev->bus_num & CDX_BUS_NUM_MASK)),
cdx_dev->dev_num);
+ if (cdx->msi_domain) {
+ cdx_dev->num_msi = dev_params->num_msi;
+ dev_set_msi_domain(&cdx_dev->dev, cdx->msi_domain);
+ }
+
ret = device_add(&cdx_dev->dev);
if (ret) {
dev_err(&cdx_dev->dev,
diff --git a/drivers/cdx/cdx.h b/drivers/cdx/cdx.h
index 300ad8be7a34..9c60c04dcf87 100644
--- a/drivers/cdx/cdx.h
+++ b/drivers/cdx/cdx.h
@@ -25,6 +25,8 @@
* @req_id: Requestor ID associated with CDX device
* @class: Class of the CDX Device
* @revision: Revision of the CDX device
+ * @msi_dev_id: MSI device ID associated with CDX device
+ * @num_msi: Number of MSI's supported by the device
*/
struct cdx_dev_params {
struct cdx_controller *cdx;
@@ -40,6 +42,8 @@ struct cdx_dev_params {
u32 req_id;
u32 class;
u8 revision;
+ u32 msi_dev_id;
+ u32 num_msi;
};
/**
@@ -79,4 +83,12 @@ int cdx_device_add(struct cdx_dev_params *dev_params);
*/
struct device *cdx_bus_add(struct cdx_controller *cdx, u8 bus_num);
+/**
+ * cdx_msi_domain_init - Init the CDX bus MSI domain.
+ * @dev: Device of the CDX bus controller
+ *
+ * Return: CDX MSI domain, NULL on failure
+ */
+struct irq_domain *cdx_msi_domain_init(struct device *dev);
+
#endif /* _CDX_H_ */
diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c
new file mode 100644
index 000000000000..e55f1716cfcb
--- /dev/null
+++ b/drivers/cdx/cdx_msi.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * AMD CDX bus driver MSI support
+ *
+ * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
+ */
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/msi.h>
+#include <linux/cdx/cdx_bus.h>
+
+#include "cdx.h"
+
+static void cdx_msi_write_msg(struct irq_data *irq_data, struct msi_msg *msg)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct cdx_device *cdx_dev = to_cdx_device(msi_desc->dev);
+
+ /* We would not operate on msg here rather we wait for irq_bus_sync_unlock()
+ * to be called from preemptible task context.
+ */
+ msi_desc->msg = *msg;
+ cdx_dev->msi_write_pending = true;
+}
+
+static void cdx_msi_write_irq_lock(struct irq_data *irq_data)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct cdx_device *cdx_dev = to_cdx_device(msi_desc->dev);
+
+ mutex_lock(&cdx_dev->irqchip_lock);
+}
+
+static void cdx_msi_write_irq_unlock(struct irq_data *irq_data)
+{
+ struct msi_desc *msi_desc = irq_data_get_msi_desc(irq_data);
+ struct cdx_device *cdx_dev = to_cdx_device(msi_desc->dev);
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ struct cdx_device_config dev_config;
+
+ if (!cdx_dev->msi_write_pending) {
+ mutex_unlock(&cdx_dev->irqchip_lock);
+ return;
+ }
+
+ cdx_dev->msi_write_pending = false;
+ mutex_unlock(&cdx_dev->irqchip_lock);
+
+ dev_config.msi.msi_index = msi_desc->msi_index;
+ dev_config.msi.data = msi_desc->msg.data;
+ dev_config.msi.addr = ((u64)(msi_desc->msg.address_hi) << 32) | msi_desc->msg.address_lo;
+
+ /*
+ * dev_configure() is a controller callback which can interact with
+ * Firmware or other entities, and can sleep, so invoke this function
+ * outside of the mutex held region.
+ */
+ dev_config.type = CDX_DEV_MSI_CONF;
+ if (cdx->ops->dev_configure)
+ cdx->ops->dev_configure(cdx, cdx_dev->bus_num, cdx_dev->dev_num, &dev_config);
+}
+
+int cdx_enable_msi(struct cdx_device *cdx_dev)
+{
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ struct cdx_device_config dev_config;
+
+ dev_config.type = CDX_DEV_MSI_ENABLE;
+ dev_config.msi_enable = true;
+ if (cdx->ops->dev_configure) {
+ return cdx->ops->dev_configure(cdx, cdx_dev->bus_num, cdx_dev->dev_num,
+ &dev_config);
+ }
+
+ return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL_GPL(cdx_enable_msi);
+
+void cdx_disable_msi(struct cdx_device *cdx_dev)
+{
+ struct cdx_controller *cdx = cdx_dev->cdx;
+ struct cdx_device_config dev_config;
+
+ dev_config.type = CDX_DEV_MSI_ENABLE;
+ dev_config.msi_enable = false;
+ if (cdx->ops->dev_configure)
+ cdx->ops->dev_configure(cdx, cdx_dev->bus_num, cdx_dev->dev_num, &dev_config);
+}
+EXPORT_SYMBOL_GPL(cdx_disable_msi);
+
+static struct irq_chip cdx_msi_irq_chip = {
+ .name = "CDX-MSI",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_affinity = msi_domain_set_affinity,
+ .irq_write_msi_msg = cdx_msi_write_msg,
+ .irq_bus_lock = cdx_msi_write_irq_lock,
+ .irq_bus_sync_unlock = cdx_msi_write_irq_unlock
+};
+
+/* Convert an msi_desc to a unique identifier within the domain. */
+static irq_hw_number_t cdx_domain_calc_hwirq(struct cdx_device *dev,
+ struct msi_desc *desc)
+{
+ return ((irq_hw_number_t)dev->msi_dev_id << 10) | desc->msi_index;
+}
+
+static void cdx_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+ arg->desc = desc;
+ arg->hwirq = cdx_domain_calc_hwirq(to_cdx_device(desc->dev), desc);
+}
+
+static int cdx_msi_prepare(struct irq_domain *msi_domain,
+ struct device *dev,
+ int nvec, msi_alloc_info_t *info)
+{
+ struct cdx_device *cdx_dev = to_cdx_device(dev);
+ struct device *parent = cdx_dev->cdx->dev;
+ struct msi_domain_info *msi_info;
+ u32 dev_id;
+ int ret;
+
+ /* Retrieve device ID from requestor ID using parent device */
+ ret = of_map_id(parent->of_node, cdx_dev->msi_dev_id, "msi-map", "msi-map-mask",
+ NULL, &dev_id);
+ if (ret) {
+ dev_err(dev, "of_map_id failed for MSI: %d\n", ret);
+ return ret;
+ }
+
+#ifdef GENERIC_MSI_DOMAIN_OPS
+ /* Set the device Id to be passed to the GIC-ITS */
+ info->scratchpad[0].ul = dev_id;
+#endif
+
+ msi_info = msi_get_domain_info(msi_domain->parent);
+
+ return msi_info->ops->msi_prepare(msi_domain->parent, dev, nvec, info);
+}
+
+static struct msi_domain_ops cdx_msi_ops = {
+ .msi_prepare = cdx_msi_prepare,
+ .set_desc = cdx_msi_set_desc
+};
+
+static struct msi_domain_info cdx_msi_domain_info = {
+ .ops = &cdx_msi_ops,
+ .chip = &cdx_msi_irq_chip,
+ .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+ MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS | MSI_FLAG_FREE_MSI_DESCS
+};
+
+struct irq_domain *cdx_msi_domain_init(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct fwnode_handle *fwnode_handle;
+ struct irq_domain *cdx_msi_domain;
+ struct device_node *parent_node;
+ struct irq_domain *parent;
+
+ fwnode_handle = of_node_to_fwnode(np);
+
+ parent_node = of_parse_phandle(np, "msi-map", 1);
+ if (!parent_node) {
+ dev_err(dev, "msi-map not present on cdx controller\n");
+ return NULL;
+ }
+
+ parent = irq_find_matching_fwnode(of_node_to_fwnode(parent_node), DOMAIN_BUS_NEXUS);
+ if (!parent || !msi_get_domain_info(parent)) {
+ dev_err(dev, "unable to locate ITS domain\n");
+ return NULL;
+ }
+
+ cdx_msi_domain = msi_create_irq_domain(fwnode_handle, &cdx_msi_domain_info, parent);
+ if (!cdx_msi_domain) {
+ dev_err(dev, "unable to create CDX-MSI domain\n");
+ return NULL;
+ }
+
+ dev_dbg(dev, "CDX-MSI domain created\n");
+
+ return cdx_msi_domain;
+}
+EXPORT_SYMBOL_NS_GPL(cdx_msi_domain_init, CDX_BUS_CONTROLLER);
diff --git a/drivers/cdx/controller/Kconfig b/drivers/cdx/controller/Kconfig
index 61bf17fbe433..f8e729761aee 100644
--- a/drivers/cdx/controller/Kconfig
+++ b/drivers/cdx/controller/Kconfig
@@ -9,6 +9,7 @@ if CDX_BUS
config CDX_CONTROLLER
tristate "CDX bus controller"
+ select GENERIC_MSI_IRQ
select REMOTEPROC
select RPMSG
help
diff --git a/drivers/cdx/controller/cdx_controller.c b/drivers/cdx/controller/cdx_controller.c
index 85fe4b1c4e5e..112a1541de6d 100644
--- a/drivers/cdx/controller/cdx_controller.c
+++ b/drivers/cdx/controller/cdx_controller.c
@@ -9,6 +9,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/cdx/cdx_bus.h>
+#include <linux/irqdomain.h>
#include "cdx_controller.h"
#include "../cdx.h"
@@ -60,9 +61,19 @@ static int cdx_configure_device(struct cdx_controller *cdx,
u8 bus_num, u8 dev_num,
struct cdx_device_config *dev_config)
{
+ u16 msi_index;
int ret = 0;
+ u32 data;
+ u64 addr;
switch (dev_config->type) {
+ case CDX_DEV_MSI_CONF:
+ msi_index = dev_config->msi.msi_index;
+ data = dev_config->msi.data;
+ addr = dev_config->msi.addr;
+
+ ret = cdx_mcdi_write_msi(cdx->priv, bus_num, dev_num, msi_index, addr, data);
+ break;
case CDX_DEV_RESET_CONF:
ret = cdx_mcdi_reset_device(cdx->priv, bus_num, dev_num);
break;
@@ -70,6 +81,9 @@ static int cdx_configure_device(struct cdx_controller *cdx,
ret = cdx_mcdi_bus_master_enable(cdx->priv, bus_num, dev_num,
dev_config->bus_master_enable);
break;
+ case CDX_DEV_MSI_ENABLE:
+ ret = cdx_mcdi_msi_enable(cdx->priv, bus_num, dev_num, dev_config->msi_enable);
+ break;
default:
ret = -EINVAL;
}
@@ -178,6 +192,14 @@ static int xlnx_cdx_probe(struct platform_device *pdev)
cdx->priv = cdx_mcdi;
cdx->ops = &cdx_ops;
+ /* Create MSI domain */
+ cdx->msi_domain = cdx_msi_domain_init(&pdev->dev);
+ if (!cdx->msi_domain) {
+ dev_err(&pdev->dev, "cdx_msi_domain_init() failed");
+ ret = -ENODEV;
+ goto cdx_msi_fail;
+ }
+
ret = cdx_setup_rpmsg(pdev);
if (ret) {
if (ret != -EPROBE_DEFER)
@@ -189,6 +211,8 @@ static int xlnx_cdx_probe(struct platform_device *pdev)
return 0;
cdx_rpmsg_fail:
+ irq_domain_remove(cdx->msi_domain);
+cdx_msi_fail:
kfree(cdx);
cdx_alloc_fail:
cdx_mcdi_finish(cdx_mcdi);
@@ -205,6 +229,7 @@ static int xlnx_cdx_remove(struct platform_device *pdev)
cdx_destroy_rpmsg(pdev);
+ irq_domain_remove(cdx->msi_domain);
kfree(cdx);
cdx_mcdi_finish(cdx_mcdi);
diff --git a/drivers/cdx/controller/mc_cdx_pcol.h b/drivers/cdx/controller/mc_cdx_pcol.h
index 2de019406b57..832a44af963e 100644
--- a/drivers/cdx/controller/mc_cdx_pcol.h
+++ b/drivers/cdx/controller/mc_cdx_pcol.h
@@ -455,6 +455,12 @@
#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_REQUESTER_ID_OFST 84
#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_REQUESTER_ID_LEN 4
+/* MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2 msgresponse */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_LEN 92
+/* Requester ID used by device for GIC ITS DeviceID */
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_REQUESTER_DEVICE_ID_OFST 88
+#define MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_REQUESTER_DEVICE_ID_LEN 4
+
/***********************************/
/*
* MC_CMD_CDX_BUS_DOWN
@@ -617,6 +623,64 @@
#define MC_CMD_CDX_DEVICE_CONTROL_GET_OUT_MMIO_REGIONS_ENABLE_WIDTH 1
/***********************************/
+/*
+ * MC_CMD_CDX_DEVICE_WRITE_MSI_MSG
+ * Populates the MSI message to be used by the hardware to raise the specified
+ * interrupt vector. Versal-net implementation specific limitations are that
+ * only 4 CDX devices with MSI interrupt capability are supported and all
+ * vectors within a device must use the same write address. The command will
+ * return EINVAL if any of these limitations is violated.
+ */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG 0x9
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_MSGSET 0x9
+#undef MC_CMD_0x9_PRIVILEGE_CTG
+
+#define MC_CMD_0x9_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN msgrequest */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_LEN 28
+/* Device bus number, in range 0 to BUS_COUNT-1 */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_BUS_OFST 0
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_BUS_LEN 4
+/* Device number relative to the bus, in range 0 to DEVICE_COUNT-1 for that bus */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_DEVICE_OFST 4
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_DEVICE_LEN 4
+/*
+ * Device-relative MSI vector number. Must be < MSI_COUNT reported for the
+ * device.
+ */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_VECTOR_OFST 8
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_VECTOR_LEN 4
+/* Reserved (alignment) */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_RESERVED_OFST 12
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_RESERVED_LEN 4
+/*
+ * MSI address to be used by the hardware. Typically, on ARM systems this
+ * address is translated by the IOMMU (if enabled) and it is the responsibility
+ * of the entity managing the IOMMU (APU kernel) to supply the correct IOVA
+ * here.
+ */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_OFST 16
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LEN 8
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LO_OFST 16
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LO_LEN 4
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LO_LBN 128
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_LO_WIDTH 32
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_HI_OFST 20
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_HI_LEN 4
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_HI_LBN 160
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS_HI_WIDTH 32
+/*
+ * MSI data to be used by the hardware. On versal-net, only the lower 16-bits
+ * are used, the remaining bits are ignored and should be set to zero.
+ */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_DATA_OFST 24
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_DATA_LEN 4
+
+/* MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_OUT msgresponse */
+#define MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_OUT_LEN 0
+
+/***********************************/
/* MC_CMD_V2_EXTN - Encapsulation for a v2 extended command */
#define MC_CMD_V2_EXTN 0x7f
diff --git a/drivers/cdx/controller/mcdi_functions.c b/drivers/cdx/controller/mcdi_functions.c
index b1f530946389..885c69e6ebe5 100644
--- a/drivers/cdx/controller/mcdi_functions.c
+++ b/drivers/cdx/controller/mcdi_functions.c
@@ -49,7 +49,7 @@ int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
u8 bus_num, u8 dev_num,
struct cdx_dev_params *dev_params)
{
- MCDI_DECLARE_BUF(outbuf, MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_LEN);
MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_IN_LEN);
struct resource *res = &dev_params->res[0];
size_t outlen;
@@ -64,7 +64,7 @@ int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
if (ret)
return ret;
- if (outlen != MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_LEN)
+ if (outlen != MC_CMD_CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_LEN)
return -EIO;
dev_params->bus_num = bus_num;
@@ -73,6 +73,9 @@ int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
req_id = MCDI_DWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_REQUESTER_ID);
dev_params->req_id = req_id;
+ dev_params->msi_dev_id = MCDI_DWORD(outbuf,
+ CDX_BUS_GET_DEVICE_CONFIG_OUT_V2_REQUESTER_DEVICE_ID);
+
dev_params->res_count = 0;
if (MCDI_QWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MMIO_REGION0_SIZE) != 0) {
res[dev_params->res_count].start =
@@ -127,6 +130,7 @@ int cdx_mcdi_get_dev_config(struct cdx_mcdi *cdx,
dev_params->class = MCDI_DWORD(outbuf,
CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_CLASS) & 0xFFFFFF;
dev_params->revision = MCDI_BYTE(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_DEVICE_REVISION);
+ dev_params->num_msi = MCDI_DWORD(outbuf, CDX_BUS_GET_DEVICE_CONFIG_OUT_MSI_COUNT);
return 0;
}
@@ -155,6 +159,24 @@ int cdx_mcdi_bus_disable(struct cdx_mcdi *cdx, u8 bus_num)
return ret;
}
+int cdx_mcdi_write_msi(struct cdx_mcdi *cdx, u8 bus_num, u8 dev_num,
+ u32 msi_vector, u64 msi_address, u32 msi_data)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_DEVICE_WRITE_MSI_MSG_IN_LEN);
+ int ret;
+
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_BUS, bus_num);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_DEVICE, dev_num);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_VECTOR, msi_vector);
+ MCDI_SET_QWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_ADDRESS, msi_address);
+ MCDI_SET_DWORD(inbuf, CDX_DEVICE_WRITE_MSI_MSG_IN_MSI_DATA, msi_data);
+
+ ret = cdx_mcdi_rpc(cdx, MC_CMD_CDX_DEVICE_WRITE_MSI_MSG, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ return ret;
+}
+
int cdx_mcdi_reset_device(struct cdx_mcdi *cdx, u8 bus_num, u8 dev_num)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_CDX_DEVICE_RESET_IN_LEN);
@@ -226,3 +248,10 @@ int cdx_mcdi_bus_master_enable(struct cdx_mcdi *cdx, u8 bus_num,
return cdx_mcdi_ctrl_flag_set(cdx, bus_num, dev_num, enable,
MC_CMD_CDX_DEVICE_CONTROL_SET_IN_BUS_MASTER_ENABLE_LBN);
}
+
+int cdx_mcdi_msi_enable(struct cdx_mcdi *cdx, u8 bus_num,
+ u8 dev_num, bool enable)
+{
+ return cdx_mcdi_ctrl_flag_set(cdx, bus_num, dev_num, enable,
+ MC_CMD_CDX_DEVICE_CONTROL_SET_IN_MSI_ENABLE_LBN);
+}
diff --git a/drivers/cdx/controller/mcdi_functions.h b/drivers/cdx/controller/mcdi_functions.h
index 258a5462fbe3..b9942affdc6b 100644
--- a/drivers/cdx/controller/mcdi_functions.h
+++ b/drivers/cdx/controller/mcdi_functions.h
@@ -66,6 +66,26 @@ int cdx_mcdi_bus_enable(struct cdx_mcdi *cdx, u8 bus_num);
int cdx_mcdi_bus_disable(struct cdx_mcdi *cdx, u8 bus_num);
/**
+ * cdx_mcdi_write_msi - Write MSI configuration for CDX device
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ * @dev_num: Device number.
+ * @msi_vector: Device-relative MSI vector number.
+ * Must be < MSI_COUNT reported for the device.
+ * @msi_address: MSI address to be used by the hardware. Typically, on ARM
+ * systems this address is translated by the IOMMU (if enabled) and
+ * it is the responsibility of the entity managing the IOMMU (APU kernel)
+ * to supply the correct IOVA here.
+ * @msi_data: MSI data to be used by the hardware. On versal-net, only the
+ * lower 16-bits are used, the remaining bits are ignored and should be
+ * set to zero.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int cdx_mcdi_write_msi(struct cdx_mcdi *cdx, u8 bus_num, u8 dev_num,
+ u32 msi_vector, u64 msi_address, u32 msi_data);
+
+/**
* cdx_mcdi_reset_device - Reset cdx device represented by bus_num:dev_num
* @cdx: pointer to MCDI interface.
* @bus_num: Bus number.
@@ -89,4 +109,17 @@ int cdx_mcdi_reset_device(struct cdx_mcdi *cdx,
int cdx_mcdi_bus_master_enable(struct cdx_mcdi *cdx, u8 bus_num,
u8 dev_num, bool enable);
+/**
+ * cdx_mcdi_msi_enable - Enable/Disable MSIs for cdx device represented
+ * by bus_num:dev_num
+ * @cdx: pointer to MCDI interface.
+ * @bus_num: Bus number.
+ * @dev_num: Device number.
+ * @enable: Enable msi's if set, disable otherwise.
+ *
+ * Return: 0 on success, <0 on failure
+ */
+int cdx_mcdi_msi_enable(struct cdx_mcdi *cdx, u8 bus_num,
+ u8 dev_num, bool enable);
+
#endif /* CDX_MCDI_FUNCTIONS_H */
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 9c90b1d2c036..d51fc8321d41 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -87,7 +87,6 @@ struct hpets {
struct hpets *hp_next;
struct hpet __iomem *hp_hpet;
unsigned long hp_hpet_phys;
- struct clocksource *hp_clocksource;
unsigned long long hp_tick_freq;
unsigned long hp_delta;
unsigned int hp_ntimer;
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index 019cf6079cec..4f6c3cb8aa41 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -636,11 +636,11 @@ static int hwicap_setup(struct platform_device *pdev, int id,
retval = -ENOMEM;
goto failed;
}
- dev_set_drvdata(dev, (void *)drvdata);
+ dev_set_drvdata(dev, drvdata);
drvdata->base_address = devm_platform_ioremap_resource(pdev, 0);
- if (!drvdata->base_address) {
- retval = -ENODEV;
+ if (IS_ERR(drvdata->base_address)) {
+ retval = PTR_ERR(drvdata->base_address);
goto failed;
}
diff --git a/drivers/char/xillybus/xillybus_of.c b/drivers/char/xillybus/xillybus_of.c
index e5372e45d211..8802e2a6fd20 100644
--- a/drivers/char/xillybus/xillybus_of.c
+++ b/drivers/char/xillybus/xillybus_of.c
@@ -64,19 +64,17 @@ static int xilly_drv_probe(struct platform_device *op)
return xillybus_endpoint_discovery(endpoint);
}
-static int xilly_drv_remove(struct platform_device *op)
+static void xilly_drv_remove(struct platform_device *op)
{
struct device *dev = &op->dev;
struct xilly_endpoint *endpoint = dev_get_drvdata(dev);
xillybus_endpoint_remove(endpoint);
-
- return 0;
}
static struct platform_driver xillybus_platform_driver = {
.probe = xilly_drv_probe,
- .remove = xilly_drv_remove,
+ .remove_new = xilly_drv_remove,
.driver = {
.name = xillyname,
.of_match_table = xillybus_of_match,
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 93183287c58d..43514e6f3b78 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -376,14 +376,9 @@ static void __init of_omap2_apll_setup(struct device_node *node)
}
clk_hw->fixed_rate = val;
- if (of_property_read_u32(node, "ti,bit-shift", &val)) {
- pr_err("%pOFn missing bit-shift\n", node);
- goto cleanup;
- }
-
- clk_hw->enable_bit = val;
- ad->enable_mask = 0x3 << val;
- ad->autoidle_mask = 0x3 << val;
+ clk_hw->enable_bit = ti_clk_get_legacy_bit_shift(node);
+ ad->enable_mask = 0x3 << clk_hw->enable_bit;
+ ad->autoidle_mask = 0x3 << clk_hw->enable_bit;
if (of_property_read_u32(node, "ti,idlest-shift", &val)) {
pr_err("%pOFn missing idlest-shift\n", node);
diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c
index 1862958ab412..f2117fef7c7d 100644
--- a/drivers/clk/ti/clk.c
+++ b/drivers/clk/ti/clk.c
@@ -7,6 +7,7 @@
* Tero Kristo <t-kristo@ti.com>
*/
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
@@ -15,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/list.h>
+#include <linux/minmax.h>
#include <linux/regmap.h>
#include <linux/string_helpers.h>
#include <linux/memblock.h>
@@ -114,20 +116,26 @@ int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops)
/*
* Eventually we could standardize to using '_' for clk-*.c files to follow the
- * TRM naming and leave out the tmp name here.
+ * TRM naming.
*/
static struct device_node *ti_find_clock_provider(struct device_node *from,
const char *name)
{
+ char *tmp __free(kfree) = NULL;
struct device_node *np;
bool found = false;
const char *n;
- char *tmp;
+ char *p;
tmp = kstrdup_and_replace(name, '-', '_', GFP_KERNEL);
if (!tmp)
return NULL;
+ /* Ignore a possible address for the node name */
+ p = strchr(tmp, '@');
+ if (p)
+ *p = '\0';
+
/* Node named "clock" with "clock-output-names" */
for_each_of_allnodes_from(from, np) {
if (of_property_read_string_index(np, "clock-output-names",
@@ -140,7 +148,6 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
break;
}
}
- kfree(tmp);
if (found) {
of_node_put(from);
@@ -148,7 +155,7 @@ static struct device_node *ti_find_clock_provider(struct device_node *from,
}
/* Fall back to using old node name base provider name */
- return of_find_node_by_name(from, name);
+ return of_find_node_by_name(from, tmp);
}
/**
@@ -301,8 +308,9 @@ int __init ti_clk_retry_init(struct device_node *node, void *user,
int ti_clk_get_reg_addr(struct device_node *node, int index,
struct clk_omap_reg *reg)
{
- u32 val;
- int i;
+ u32 clksel_addr, val;
+ bool is_clksel = false;
+ int i, err;
for (i = 0; i < CLK_MAX_MEMMAPS; i++) {
if (clocks_node_ptr[i] == node->parent)
@@ -318,21 +326,62 @@ int ti_clk_get_reg_addr(struct device_node *node, int index,
reg->index = i;
- if (of_property_read_u32_index(node, "reg", index, &val)) {
- if (of_property_read_u32_index(node->parent, "reg",
- index, &val)) {
- pr_err("%pOFn or parent must have reg[%d]!\n",
- node, index);
+ if (of_device_is_compatible(node->parent, "ti,clksel")) {
+ err = of_property_read_u32_index(node->parent, "reg", index, &clksel_addr);
+ if (err) {
+ pr_err("%pOFn parent clksel must have reg[%d]!\n", node, index);
return -EINVAL;
}
+ is_clksel = true;
}
+ err = of_property_read_u32_index(node, "reg", index, &val);
+ if (err && is_clksel) {
+ /* Legacy clksel with no reg and a possible ti,bit-shift property */
+ reg->offset = clksel_addr;
+ reg->bit = ti_clk_get_legacy_bit_shift(node);
+ reg->ptr = NULL;
+
+ return 0;
+ }
+
+ /* Updated clksel clock with a proper reg property */
+ if (is_clksel) {
+ reg->offset = clksel_addr;
+ reg->bit = val;
+ reg->ptr = NULL;
+ return 0;
+ }
+
+ /* Other clocks that may or may not have ti,bit-shift property */
reg->offset = val;
+ reg->bit = ti_clk_get_legacy_bit_shift(node);
reg->ptr = NULL;
return 0;
}
+/**
+ * ti_clk_get_legacy_bit_shift - get bit shift for a clock register
+ * @node: device node for the clock
+ *
+ * Gets the clock register bit shift using the legacy ti,bit-shift
+ * property. Only needed for legacy clock, and can be eventually
+ * dropped once all the composite clocks use a clksel node with a
+ * proper reg property.
+ */
+int ti_clk_get_legacy_bit_shift(struct device_node *node)
+{
+ int err;
+ u32 val;
+
+ err = of_property_read_u32(node, "ti,bit-shift", &val);
+ if (!err && in_range(val, 0, 32))
+ return val;
+
+ return 0;
+}
+
void ti_clk_latch(struct clk_omap_reg *reg, s8 shift)
{
u32 latch;
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
index 16a9f7c2280a..2de7acea1ea0 100644
--- a/drivers/clk/ti/clock.h
+++ b/drivers/clk/ti/clock.h
@@ -216,6 +216,7 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
int ti_clk_get_reg_addr(struct device_node *node, int index,
struct clk_omap_reg *reg);
+int ti_clk_get_legacy_bit_shift(struct device_node *node);
void ti_dt_clocks_register(struct ti_dt_clk *oclks);
int ti_clk_retry_init(struct device_node *node, void *user,
ti_of_clk_init_cb_t func);
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index 5d5bb123ba94..ade99ab6cfa9 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -477,10 +477,7 @@ static int __init ti_clk_divider_populate(struct device_node *node,
if (ret)
return ret;
- if (!of_property_read_u32(node, "ti,bit-shift", &val))
- div->shift = val;
- else
- div->shift = 0;
+ div->shift = div->reg.bit;
if (!of_property_read_u32(node, "ti,latch-bit", &val))
div->latch = val;
diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c
index 8e477d50d0fd..a9febd6356b8 100644
--- a/drivers/clk/ti/gate.c
+++ b/drivers/clk/ti/gate.c
@@ -132,7 +132,6 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node,
struct clk_omap_reg reg;
const char *name;
u8 enable_bit = 0;
- u32 val;
u32 flags = 0;
u8 clk_gate_flags = 0;
@@ -140,8 +139,7 @@ static void __init _of_ti_gate_clk_setup(struct device_node *node,
if (ti_clk_get_reg_addr(node, 0, &reg))
return;
- if (!of_property_read_u32(node, "ti,bit-shift", &val))
- enable_bit = val;
+ enable_bit = reg.bit;
}
if (of_clk_get_parent_count(node) != 1) {
@@ -170,7 +168,6 @@ _of_ti_composite_gate_clk_setup(struct device_node *node,
const struct clk_hw_omap_ops *hw_ops)
{
struct clk_hw_omap *gate;
- u32 val = 0;
gate = kzalloc(sizeof(*gate), GFP_KERNEL);
if (!gate)
@@ -179,9 +176,7 @@ _of_ti_composite_gate_clk_setup(struct device_node *node,
if (ti_clk_get_reg_addr(node, 0, &gate->enable_reg))
goto cleanup;
- of_property_read_u32(node, "ti,bit-shift", &val);
-
- gate->enable_bit = val;
+ gate->enable_bit = gate->enable_reg.bit;
gate->ops = hw_ops;
if (!ti_clk_add_component(node, &gate->hw, CLK_COMPONENT_TYPE_GATE))
diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c
index 172301c646f8..3eb35c87c0ed 100644
--- a/drivers/clk/ti/interface.c
+++ b/drivers/clk/ti/interface.c
@@ -66,13 +66,11 @@ static void __init _of_ti_interface_clk_setup(struct device_node *node,
struct clk_omap_reg reg;
u8 enable_bit = 0;
const char *name;
- u32 val;
if (ti_clk_get_reg_addr(node, 0, &reg))
return;
- if (!of_property_read_u32(node, "ti,bit-shift", &val))
- enable_bit = val;
+ enable_bit = reg.bit;
parent_name = of_clk_get_parent_name(node, 0);
if (!parent_name) {
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 1ebafa386be6..216d85d6aac6 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -189,7 +189,7 @@ static void of_mux_clk_setup(struct device_node *node)
if (ti_clk_get_reg_addr(node, 0, &reg))
goto cleanup;
- of_property_read_u32(node, "ti,bit-shift", &shift);
+ shift = reg.bit;
of_property_read_u32(node, "ti,latch-bit", &latch);
@@ -252,7 +252,6 @@ static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
{
struct clk_omap_mux *mux;
unsigned int num_parents;
- u32 val;
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
if (!mux)
@@ -261,8 +260,7 @@ static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
if (ti_clk_get_reg_addr(node, 0, &mux->reg))
goto cleanup;
- if (!of_property_read_u32(node, "ti,bit-shift", &val))
- mux->shift = val;
+ mux->shift = mux->reg.bit;
if (of_property_read_bool(node, "ti,index-starts-at-one"))
mux->flags |= CLK_MUX_INDEX_ONE;
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 44a61dc6f932..ab1c8c2b66b8 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/bitfield.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/cpu.h>
@@ -31,10 +32,7 @@
#define GT_CONTROL_COMP_ENABLE BIT(1) /* banked */
#define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */
#define GT_CONTROL_AUTO_INC BIT(3) /* banked */
-#define GT_CONTROL_PRESCALER_SHIFT 8
-#define GT_CONTROL_PRESCALER_MAX 0xF
-#define GT_CONTROL_PRESCALER_MASK (GT_CONTROL_PRESCALER_MAX << \
- GT_CONTROL_PRESCALER_SHIFT)
+#define GT_CONTROL_PRESCALER_MASK GENMASK(15, 8)
#define GT_INT_STATUS 0x0c
#define GT_INT_STATUS_EVENT_FLAG BIT(0)
@@ -52,7 +50,8 @@
*/
static void __iomem *gt_base;
static struct notifier_block gt_clk_rate_change_nb;
-static u32 gt_psv_new, gt_psv_bck, gt_target_rate;
+static u32 gt_psv_new, gt_psv_bck;
+static unsigned long gt_target_rate;
static int gt_ppi;
static struct clock_event_device __percpu *gt_evt;
@@ -88,7 +87,7 @@ static u64 gt_counter_read(void)
return _gt_counter_read();
}
-/**
+/*
* To ensure that updates to comparator value register do not set the
* Interrupt Status Register proceed as follows:
* 1. Clear the Comp Enable bit in the Timer Control Register.
@@ -247,7 +246,7 @@ static void gt_write_presc(u32 psv)
reg = readl(gt_base + GT_CONTROL);
reg &= ~GT_CONTROL_PRESCALER_MASK;
- reg |= psv << GT_CONTROL_PRESCALER_SHIFT;
+ reg |= FIELD_PREP(GT_CONTROL_PRESCALER_MASK, psv);
writel(reg, gt_base + GT_CONTROL);
}
@@ -256,8 +255,7 @@ static u32 gt_read_presc(void)
u32 reg;
reg = readl(gt_base + GT_CONTROL);
- reg &= GT_CONTROL_PRESCALER_MASK;
- return reg >> GT_CONTROL_PRESCALER_SHIFT;
+ return FIELD_GET(GT_CONTROL_PRESCALER_MASK, reg);
}
static void __init gt_delay_timer_init(void)
@@ -272,9 +270,9 @@ static int __init gt_clocksource_init(void)
writel(0, gt_base + GT_COUNTER0);
writel(0, gt_base + GT_COUNTER1);
/* set prescaler and enable timer on all the cores */
- writel(((CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) <<
- GT_CONTROL_PRESCALER_SHIFT)
- | GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
+ writel(FIELD_PREP(GT_CONTROL_PRESCALER_MASK,
+ CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) |
+ GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
sched_clock_register(gt_sched_clock_read, 64, gt_target_rate);
@@ -290,18 +288,17 @@ static int gt_clk_rate_change_cb(struct notifier_block *nb,
switch (event) {
case PRE_RATE_CHANGE:
{
- int psv;
-
- psv = DIV_ROUND_CLOSEST(ndata->new_rate,
- gt_target_rate);
+ unsigned long psv;
- if (abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
+ psv = DIV_ROUND_CLOSEST(ndata->new_rate, gt_target_rate);
+ if (!psv ||
+ abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
return NOTIFY_BAD;
psv--;
/* prescaler within legal range? */
- if (psv < 0 || psv > GT_CONTROL_PRESCALER_MAX)
+ if (!FIELD_FIT(GT_CONTROL_PRESCALER_MASK, psv))
return NOTIFY_BAD;
/*
@@ -411,7 +408,7 @@ static int __init global_timer_of_register(struct device_node *np)
err = gt_clocksource_init();
if (err)
goto out_irq;
-
+
err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
"clockevents/arm/global_timer:starting",
gt_starting_cpu, gt_dying_cpu);
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 8ff7cd4e20bb..b2a080647e41 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -81,14 +81,14 @@ static int hv_ce_set_next_event(unsigned long delta,
current_tick = hv_read_reference_counter();
current_tick += delta;
- hv_set_register(HV_REGISTER_STIMER0_COUNT, current_tick);
+ hv_set_msr(HV_MSR_STIMER0_COUNT, current_tick);
return 0;
}
static int hv_ce_shutdown(struct clock_event_device *evt)
{
- hv_set_register(HV_REGISTER_STIMER0_COUNT, 0);
- hv_set_register(HV_REGISTER_STIMER0_CONFIG, 0);
+ hv_set_msr(HV_MSR_STIMER0_COUNT, 0);
+ hv_set_msr(HV_MSR_STIMER0_CONFIG, 0);
if (direct_mode_enabled && stimer0_irq >= 0)
disable_percpu_irq(stimer0_irq);
@@ -119,7 +119,7 @@ static int hv_ce_set_oneshot(struct clock_event_device *evt)
timer_cfg.direct_mode = 0;
timer_cfg.sintx = stimer0_message_sint;
}
- hv_set_register(HV_REGISTER_STIMER0_CONFIG, timer_cfg.as_uint64);
+ hv_set_msr(HV_MSR_STIMER0_CONFIG, timer_cfg.as_uint64);
return 0;
}
@@ -372,11 +372,11 @@ static __always_inline u64 read_hv_clock_msr(void)
* is set to 0 when the partition is created and is incremented in 100
* nanosecond units.
*
- * Use hv_raw_get_register() because this function is used from
- * noinstr. Notable; while HV_REGISTER_TIME_REF_COUNT is a synthetic
+ * Use hv_raw_get_msr() because this function is used from
+ * noinstr. Notable; while HV_MSR_TIME_REF_COUNT is a synthetic
* register it doesn't need the GHCB path.
*/
- return hv_raw_get_register(HV_REGISTER_TIME_REF_COUNT);
+ return hv_raw_get_msr(HV_MSR_TIME_REF_COUNT);
}
/*
@@ -439,9 +439,9 @@ static void suspend_hv_clock_tsc(struct clocksource *arg)
union hv_reference_tsc_msr tsc_msr;
/* Disable the TSC page */
- tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
+ tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
tsc_msr.enable = 0;
- hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
+ hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
}
@@ -450,10 +450,10 @@ static void resume_hv_clock_tsc(struct clocksource *arg)
union hv_reference_tsc_msr tsc_msr;
/* Re-enable the TSC page */
- tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
+ tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
tsc_msr.enable = 1;
tsc_msr.pfn = tsc_pfn;
- hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
+ hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
}
#ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK
@@ -555,14 +555,14 @@ static void __init hv_init_tsc_clocksource(void)
* thus TSC clocksource will work even without the real TSC page
* mapped.
*/
- tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC);
+ tsc_msr.as_uint64 = hv_get_msr(HV_MSR_REFERENCE_TSC);
if (hv_root_partition)
tsc_pfn = tsc_msr.pfn;
else
tsc_pfn = HVPFN_DOWN(virt_to_phys(tsc_page));
tsc_msr.enable = 1;
tsc_msr.pfn = tsc_pfn;
- hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64);
+ hv_set_msr(HV_MSR_REFERENCE_TSC, tsc_msr.as_uint64);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c
index 9a55e733ae99..09fd292eb83d 100644
--- a/drivers/clocksource/timer-clint.c
+++ b/drivers/clocksource/timer-clint.c
@@ -131,7 +131,7 @@ static int clint_timer_starting_cpu(unsigned int cpu)
struct clock_event_device *ce = per_cpu_ptr(&clint_clock_event, cpu);
ce->cpumask = cpumask_of(cpu);
- clockevents_config_and_register(ce, clint_timer_freq, 100, 0x7fffffff);
+ clockevents_config_and_register(ce, clint_timer_freq, 100, ULONG_MAX);
enable_percpu_irq(clint_timer_irq,
irq_get_trigger_type(clint_timer_irq));
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c
index 6a878d227a13..489e69169ed4 100644
--- a/drivers/clocksource/timer-imx-gpt.c
+++ b/drivers/clocksource/timer-imx-gpt.c
@@ -258,9 +258,8 @@ static irqreturn_t mxc_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *ced = dev_id;
struct imx_timer *imxtm = to_imx_timer(ced);
- uint32_t tstat;
- tstat = readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
+ readl_relaxed(imxtm->base + imxtm->gpt->reg_tstat);
imxtm->gpt->gpt_irq_acknowledge(imxtm);
diff --git a/drivers/clocksource/timer-imx-sysctr.c b/drivers/clocksource/timer-imx-sysctr.c
index 5a7a951c4efc..44525813be1e 100644
--- a/drivers/clocksource/timer-imx-sysctr.c
+++ b/drivers/clocksource/timer-imx-sysctr.c
@@ -4,48 +4,62 @@
#include <linux/interrupt.h>
#include <linux/clockchips.h>
+#include <linux/slab.h>
#include "timer-of.h"
#define CMP_OFFSET 0x10000
+#define RD_OFFSET 0x20000
#define CNTCV_LO 0x8
#define CNTCV_HI 0xc
#define CMPCV_LO (CMP_OFFSET + 0x20)
#define CMPCV_HI (CMP_OFFSET + 0x24)
#define CMPCR (CMP_OFFSET + 0x2c)
+#define CNTCV_LO_IMX95 (RD_OFFSET + 0x8)
+#define CNTCV_HI_IMX95 (RD_OFFSET + 0xc)
#define SYS_CTR_EN 0x1
#define SYS_CTR_IRQ_MASK 0x2
#define SYS_CTR_CLK_DIV 0x3
-static void __iomem *sys_ctr_base __ro_after_init;
-static u32 cmpcr __ro_after_init;
+struct sysctr_private {
+ u32 cmpcr;
+ u32 lo_off;
+ u32 hi_off;
+};
-static void sysctr_timer_enable(bool enable)
+static void sysctr_timer_enable(struct clock_event_device *evt, bool enable)
{
- writel(enable ? cmpcr | SYS_CTR_EN : cmpcr, sys_ctr_base + CMPCR);
+ struct timer_of *to = to_timer_of(evt);
+ struct sysctr_private *priv = to->private_data;
+ void __iomem *base = timer_of_base(to);
+
+ writel(enable ? priv->cmpcr | SYS_CTR_EN : priv->cmpcr, base + CMPCR);
}
-static void sysctr_irq_acknowledge(void)
+static void sysctr_irq_acknowledge(struct clock_event_device *evt)
{
/*
* clear the enable bit(EN =0) will clear
* the status bit(ISTAT = 0), then the interrupt
* signal will be negated(acknowledged).
*/
- sysctr_timer_enable(false);
+ sysctr_timer_enable(evt, false);
}
-static inline u64 sysctr_read_counter(void)
+static inline u64 sysctr_read_counter(struct clock_event_device *evt)
{
+ struct timer_of *to = to_timer_of(evt);
+ struct sysctr_private *priv = to->private_data;
+ void __iomem *base = timer_of_base(to);
u32 cnt_hi, tmp_hi, cnt_lo;
do {
- cnt_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
- cnt_lo = readl_relaxed(sys_ctr_base + CNTCV_LO);
- tmp_hi = readl_relaxed(sys_ctr_base + CNTCV_HI);
+ cnt_hi = readl_relaxed(base + priv->hi_off);
+ cnt_lo = readl_relaxed(base + priv->lo_off);
+ tmp_hi = readl_relaxed(base + priv->hi_off);
} while (tmp_hi != cnt_hi);
return ((u64) cnt_hi << 32) | cnt_lo;
@@ -54,22 +68,24 @@ static inline u64 sysctr_read_counter(void)
static int sysctr_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
+ struct timer_of *to = to_timer_of(evt);
+ void __iomem *base = timer_of_base(to);
u32 cmp_hi, cmp_lo;
u64 next;
- sysctr_timer_enable(false);
+ sysctr_timer_enable(evt, false);
- next = sysctr_read_counter();
+ next = sysctr_read_counter(evt);
next += delta;
cmp_hi = (next >> 32) & 0x00fffff;
cmp_lo = next & 0xffffffff;
- writel_relaxed(cmp_hi, sys_ctr_base + CMPCV_HI);
- writel_relaxed(cmp_lo, sys_ctr_base + CMPCV_LO);
+ writel_relaxed(cmp_hi, base + CMPCV_HI);
+ writel_relaxed(cmp_lo, base + CMPCV_LO);
- sysctr_timer_enable(true);
+ sysctr_timer_enable(evt, true);
return 0;
}
@@ -81,7 +97,7 @@ static int sysctr_set_state_oneshot(struct clock_event_device *evt)
static int sysctr_set_state_shutdown(struct clock_event_device *evt)
{
- sysctr_timer_enable(false);
+ sysctr_timer_enable(evt, false);
return 0;
}
@@ -90,7 +106,7 @@ static irqreturn_t sysctr_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
- sysctr_irq_acknowledge();
+ sysctr_irq_acknowledge(evt);
evt->event_handler(evt);
@@ -117,34 +133,75 @@ static struct timer_of to_sysctr = {
},
};
-static void __init sysctr_clockevent_init(void)
+static int __init __sysctr_timer_init(struct device_node *np)
{
+ struct sysctr_private *priv;
+ void __iomem *base;
+ int ret;
+
+ priv = kzalloc(sizeof(struct sysctr_private), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = timer_of_init(np, &to_sysctr);
+ if (ret) {
+ kfree(priv);
+ return ret;
+ }
+
+ if (!of_property_read_bool(np, "nxp,no-divider")) {
+ /* system counter clock is divided by 3 internally */
+ to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
+ }
+
to_sysctr.clkevt.cpumask = cpu_possible_mask;
+ to_sysctr.private_data = priv;
+
+ base = timer_of_base(&to_sysctr);
+ priv->cmpcr = readl(base + CMPCR) & ~SYS_CTR_EN;
+
+ return 0;
+}
+
+static int __init sysctr_timer_init(struct device_node *np)
+{
+ struct sysctr_private *priv;
+ int ret;
+
+ ret = __sysctr_timer_init(np);
+ if (ret)
+ return ret;
+
+ priv = to_sysctr.private_data;
+ priv->lo_off = CNTCV_LO;
+ priv->hi_off = CNTCV_HI;
clockevents_config_and_register(&to_sysctr.clkevt,
timer_of_rate(&to_sysctr),
0xff, 0x7fffffff);
+
+ return 0;
}
-static int __init sysctr_timer_init(struct device_node *np)
+static int __init sysctr_timer_imx95_init(struct device_node *np)
{
- int ret = 0;
+ struct sysctr_private *priv;
+ int ret;
- ret = timer_of_init(np, &to_sysctr);
+ ret = __sysctr_timer_init(np);
if (ret)
return ret;
- if (!of_property_read_bool(np, "nxp,no-divider")) {
- /* system counter clock is divided by 3 internally */
- to_sysctr.of_clk.rate /= SYS_CTR_CLK_DIV;
- }
-
- sys_ctr_base = timer_of_base(&to_sysctr);
- cmpcr = readl(sys_ctr_base + CMPCR);
- cmpcr &= ~SYS_CTR_EN;
+ priv = to_sysctr.private_data;
+ priv->lo_off = CNTCV_LO_IMX95;
+ priv->hi_off = CNTCV_HI_IMX95;
- sysctr_clockevent_init();
+ clockevents_config_and_register(&to_sysctr.clkevt,
+ timer_of_rate(&to_sysctr),
+ 0xff, 0x7fffffff);
return 0;
}
+
TIMER_OF_DECLARE(sysctr_timer, "nxp,sysctr-timer", sysctr_timer_init);
+TIMER_OF_DECLARE(sysctr_timer_imx95, "nxp,imx95-sysctr-timer", sysctr_timer_imx95_init);
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index e66dcbd66566..48ce50c5f5e6 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -108,13 +108,16 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
{
struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
+ /* Clear timer interrupt */
+ riscv_clock_event_stop();
+
ce->cpumask = cpumask_of(cpu);
ce->irq = riscv_clock_event_irq;
if (riscv_timer_cannot_wake_cpu)
ce->features |= CLOCK_EVT_FEAT_C3STOP;
if (static_branch_likely(&riscv_sstc_available))
ce->rating = 450;
- clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
+ clockevents_config_and_register(ce, riscv_timebase, 100, ULONG_MAX);
enable_percpu_irq(riscv_clock_event_irq,
irq_get_trigger_type(riscv_clock_event_irq));
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index c9a753f96ba1..0a4ea3288bfb 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -73,7 +73,7 @@ static void stm32_timer_of_bits_set(struct timer_of *to, int bits)
* Accessor helper to get the number of bits in the timer-of private
* structure.
*
- * Returns an integer corresponding to the number of bits.
+ * Returns: an integer corresponding to the number of bits.
*/
static int stm32_timer_of_bits_get(struct timer_of *to)
{
@@ -177,7 +177,7 @@ static irqreturn_t stm32_clock_event_handler(int irq, void *dev_id)
}
/**
- * stm32_timer_width - Sort out the timer width (32/16)
+ * stm32_timer_set_width - Sort out the timer width (32/16)
* @to: a pointer to a timer-of structure
*
* Write the 32-bit max value and read/return the result. If the timer
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index 59b0be482f32..a86529a70737 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/**
+/*
* timer-ti-32k.c - OMAP2 32k Timer Support
*
* Copyright (C) 2009 Nokia Corporation
diff --git a/drivers/comedi/drivers/das08.c b/drivers/comedi/drivers/das08.c
index 5d5b9174f88a..49944ce1f813 100644
--- a/drivers/comedi/drivers/das08.c
+++ b/drivers/comedi/drivers/das08.c
@@ -177,7 +177,6 @@ static int das08_ai_insn_read(struct comedi_device *dev,
int ret;
chan = CR_CHAN(insn->chanspec);
- range = CR_RANGE(insn->chanspec);
/* clear crap */
inb(dev->iobase + DAS08_AI_LSB_REG);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 35efb53d5492..94e55c40970a 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -302,4 +302,33 @@ config QORIQ_CPUFREQ
which are capable of changing the CPU's frequency dynamically.
endif
+
+config ACPI_CPPC_CPUFREQ
+ tristate "CPUFreq driver based on the ACPI CPPC spec"
+ depends on ACPI_PROCESSOR
+ depends on ARM || ARM64 || RISCV
+ select ACPI_CPPC_LIB
+ help
+ This adds a CPUFreq driver which uses CPPC methods
+ as described in the ACPIv5.1 spec. CPPC stands for
+ Collaborative Processor Performance Controls. It
+ is based on an abstract continuous scale of CPU
+ performance values which allows the remote power
+ processor to flexibly optimize for power and
+ performance. CPPC relies on power management firmware
+ support for its operation.
+
+ If in doubt, say N.
+
+config ACPI_CPPC_CPUFREQ_FIE
+ bool "Frequency Invariance support for CPPC cpufreq driver"
+ depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
+ depends on ARM || ARM64 || RISCV
+ default y
+ help
+ This extends frequency invariance support in the CPPC cpufreq driver,
+ by using CPPC delivered and reference performance counters.
+
+ If in doubt, say N.
+
endmenu
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a0ebad77666e..96b404ce829f 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -3,32 +3,6 @@
# ARM CPU Frequency scaling drivers
#
-config ACPI_CPPC_CPUFREQ
- tristate "CPUFreq driver based on the ACPI CPPC spec"
- depends on ACPI_PROCESSOR
- select ACPI_CPPC_LIB
- help
- This adds a CPUFreq driver which uses CPPC methods
- as described in the ACPIv5.1 spec. CPPC stands for
- Collaborative Processor Performance Controls. It
- is based on an abstract continuous scale of CPU
- performance values which allows the remote power
- processor to flexibly optimize for power and
- performance. CPPC relies on power management firmware
- support for its operation.
-
- If in doubt, say N.
-
-config ACPI_CPPC_CPUFREQ_FIE
- bool "Frequency Invariance support for CPPC cpufreq driver"
- depends on ACPI_CPPC_CPUFREQ && GENERIC_ARCH_TOPOLOGY
- default y
- help
- This extends frequency invariance support in the CPPC cpufreq driver,
- by using CPPC delivered and reference performance counters.
-
- If in doubt, say N.
-
config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
tristate "Allwinner nvmem based SUN50I CPUFreq driver"
depends on ARCH_SUNXI
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 8bd6e5e8f121..2d83bbc65dd0 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -208,7 +208,7 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu)
if (!priv)
return -ENOMEM;
- if (!alloc_cpumask_var(&priv->cpus, GFP_KERNEL))
+ if (!zalloc_cpumask_var(&priv->cpus, GFP_KERNEL))
return -ENOMEM;
cpumask_set_cpu(cpu, priv->cpus);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index f6f8d7f450e7..66e10a19d76a 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -653,14 +653,16 @@ static ssize_t store_local_boost(struct cpufreq_policy *policy,
if (policy->boost_enabled == enable)
return count;
+ policy->boost_enabled = enable;
+
cpus_read_lock();
ret = cpufreq_driver->set_boost(policy, enable);
cpus_read_unlock();
- if (ret)
+ if (ret) {
+ policy->boost_enabled = !policy->boost_enabled;
return ret;
-
- policy->boost_enabled = enable;
+ }
return count;
}
@@ -1428,6 +1430,9 @@ static int cpufreq_online(unsigned int cpu)
goto out_free_policy;
}
+ /* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
+ policy->boost_enabled = cpufreq_boost_enabled() && policy_has_boost_freq(policy);
+
/*
* The initialization has succeeded and the policy is online.
* If there is a problem with its frequency table, take it
@@ -2769,11 +2774,12 @@ int cpufreq_boost_trigger_state(int state)
cpus_read_lock();
for_each_active_policy(policy) {
+ policy->boost_enabled = state;
ret = cpufreq_driver->set_boost(policy, state);
- if (ret)
+ if (ret) {
+ policy->boost_enabled = !policy->boost_enabled;
goto err_reset_state;
-
- policy->boost_enabled = state;
+ }
}
cpus_read_unlock();
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index c4d4643b6ca6..c17dc51a5a02 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -40,7 +40,7 @@ int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
cpufreq_for_each_valid_entry(pos, table) {
freq = pos->frequency;
- if (!cpufreq_boost_enabled()
+ if ((!cpufreq_boost_enabled() || !policy->boost_enabled)
&& (pos->flags & CPUFREQ_BOOST_FREQ))
continue;
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c
index 0b483bd0d3ca..3b4f6bfb2f4c 100644
--- a/drivers/cpufreq/scmi-cpufreq.c
+++ b/drivers/cpufreq/scmi-cpufreq.c
@@ -30,6 +30,7 @@ struct scmi_data {
static struct scmi_protocol_handle *ph;
static const struct scmi_perf_proto_ops *perf_ops;
+static struct cpufreq_driver scmi_cpufreq_driver;
static unsigned int scmi_cpufreq_get_rate(unsigned int cpu)
{
@@ -167,6 +168,12 @@ scmi_get_rate_limit(u32 domain, bool has_fast_switch)
return rate_limit;
}
+static struct freq_attr *scmi_cpufreq_hw_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+ NULL,
+};
+
static int scmi_cpufreq_init(struct cpufreq_policy *policy)
{
int ret, nr_opp, domain;
@@ -276,6 +283,17 @@ static int scmi_cpufreq_init(struct cpufreq_policy *policy)
policy->transition_delay_us =
scmi_get_rate_limit(domain, policy->fast_switch_possible);
+ if (policy_has_boost_freq(policy)) {
+ ret = cpufreq_enable_boost_support();
+ if (ret) {
+ dev_warn(cpu_dev, "failed to enable boost: %d\n", ret);
+ goto out_free_opp;
+ } else {
+ scmi_cpufreq_hw_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs;
+ scmi_cpufreq_driver.boost_enabled = true;
+ }
+ }
+
return 0;
out_free_opp:
@@ -334,7 +352,7 @@ static struct cpufreq_driver scmi_cpufreq_driver = {
CPUFREQ_NEED_INITIAL_FREQ_CHECK |
CPUFREQ_IS_COOLING_DEV,
.verify = cpufreq_generic_frequency_table_verify,
- .attr = cpufreq_generic_attr,
+ .attr = scmi_cpufreq_hw_attr,
.target_index = scmi_cpufreq_set_target,
.fast_switch = scmi_cpufreq_fast_switch,
.get = scmi_cpufreq_get_rate,
diff --git a/drivers/cpuidle/cpuidle-riscv-sbi.c b/drivers/cpuidle/cpuidle-riscv-sbi.c
index e8094fc92491..a6e123dfe394 100644
--- a/drivers/cpuidle/cpuidle-riscv-sbi.c
+++ b/drivers/cpuidle/cpuidle-riscv-sbi.c
@@ -73,26 +73,6 @@ static inline bool sbi_is_domain_state_available(void)
return data->available;
}
-static int sbi_suspend_finisher(unsigned long suspend_type,
- unsigned long resume_addr,
- unsigned long opaque)
-{
- struct sbiret ret;
-
- ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
- suspend_type, resume_addr, opaque, 0, 0, 0);
-
- return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
-}
-
-static int sbi_suspend(u32 state)
-{
- if (state & SBI_HSM_SUSP_NON_RET_BIT)
- return cpu_suspend(state, sbi_suspend_finisher);
- else
- return sbi_suspend_finisher(state, 0, 0);
-}
-
static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
@@ -100,9 +80,9 @@ static __cpuidle int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
u32 state = states[idx];
if (state & SBI_HSM_SUSP_NON_RET_BIT)
- return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state);
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(riscv_sbi_hart_suspend, idx, state);
else
- return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend,
+ return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(riscv_sbi_hart_suspend,
idx, state);
}
@@ -133,7 +113,7 @@ static __cpuidle int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,
else
state = states[idx];
- ret = sbi_suspend(state) ? -1 : idx;
+ ret = riscv_sbi_hart_suspend(state) ? -1 : idx;
ct_cpuidle_exit();
@@ -206,17 +186,6 @@ static const struct of_device_id sbi_cpuidle_state_match[] = {
{ },
};
-static bool sbi_suspend_state_is_valid(u32 state)
-{
- if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
- state < SBI_HSM_SUSPEND_RET_PLATFORM)
- return false;
- if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
- state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
- return false;
- return true;
-}
-
static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
{
int err = of_property_read_u32(np, "riscv,sbi-suspend-param", state);
@@ -226,7 +195,7 @@ static int sbi_dt_parse_state_node(struct device_node *np, u32 *state)
return err;
}
- if (!sbi_suspend_state_is_valid(*state)) {
+ if (!riscv_sbi_suspend_state_is_valid(*state)) {
pr_warn("Invalid SBI suspend state %#x\n", *state);
return -EINVAL;
}
@@ -607,16 +576,8 @@ static int __init sbi_cpuidle_init(void)
int ret;
struct platform_device *pdev;
- /*
- * The SBI HSM suspend function is only available when:
- * 1) SBI version is 0.3 or higher
- * 2) SBI HSM extension is available
- */
- if ((sbi_spec_version < sbi_mk_version(0, 3)) ||
- !sbi_probe_extension(SBI_EXT_HSM)) {
- pr_info("HSM suspend not available\n");
+ if (!riscv_sbi_hsm_is_supported())
return 0;
- }
ret = platform_driver_register(&sbi_cpuidle_driver);
if (ret)
diff --git a/drivers/cxl/core/trace.h b/drivers/cxl/core/trace.h
index bdf117a33744..e5f13260fc52 100644
--- a/drivers/cxl/core/trace.h
+++ b/drivers/cxl/core/trace.h
@@ -646,18 +646,18 @@ u64 cxl_trace_hpa(struct cxl_region *cxlr, struct cxl_memdev *memdev, u64 dpa);
TRACE_EVENT(cxl_poison,
- TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *region,
+ TP_PROTO(struct cxl_memdev *cxlmd, struct cxl_region *cxlr,
const struct cxl_poison_record *record, u8 flags,
__le64 overflow_ts, enum cxl_poison_trace_type trace_type),
- TP_ARGS(cxlmd, region, record, flags, overflow_ts, trace_type),
+ TP_ARGS(cxlmd, cxlr, record, flags, overflow_ts, trace_type),
TP_STRUCT__entry(
__string(memdev, dev_name(&cxlmd->dev))
__string(host, dev_name(cxlmd->dev.parent))
__field(u64, serial)
__field(u8, trace_type)
- __string(region, region)
+ __string(region, cxlr ? dev_name(&cxlr->dev) : "")
__field(u64, overflow_ts)
__field(u64, hpa)
__field(u64, dpa)
@@ -677,10 +677,10 @@ TRACE_EVENT(cxl_poison,
__entry->source = cxl_poison_record_source(record);
__entry->trace_type = trace_type;
__entry->flags = flags;
- if (region) {
- __assign_str(region, dev_name(&region->dev));
- memcpy(__entry->uuid, &region->params.uuid, 16);
- __entry->hpa = cxl_trace_hpa(region, cxlmd,
+ if (cxlr) {
+ __assign_str(region, dev_name(&cxlr->dev));
+ memcpy(__entry->uuid, &cxlr->params.uuid, 16);
+ __entry->hpa = cxl_trace_hpa(cxlr, cxlmd,
__entry->dpa);
} else {
__assign_str(region, "");
diff --git a/drivers/dio/dio-driver.c b/drivers/dio/dio-driver.c
index 69c46935ffc7..2d9fa6011945 100644
--- a/drivers/dio/dio-driver.c
+++ b/drivers/dio/dio-driver.c
@@ -123,7 +123,7 @@ static int dio_bus_match(struct device *dev, struct device_driver *drv)
}
-struct bus_type dio_bus_type = {
+const struct bus_type dio_bus_type = {
.name = "dio",
.match = dio_bus_match,
.probe = dio_device_probe,
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index c0976f6268d3..e6cdb905eeac 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -322,7 +322,8 @@ static ssize_t show_immediate(struct device *dev,
if (value < 0)
return -ENOENT;
- return sysfs_emit(buf, "0x%06x\n", value);
+ // Note that this function is also called by init_fw_attribute_group() with NULL pointer.
+ return buf ? sysfs_emit(buf, "0x%06x\n", value) : 0;
}
#define IMMEDIATE_ATTR(name, key) \
@@ -334,6 +335,8 @@ static ssize_t show_text_leaf(struct device *dev,
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
const u32 *directories[] = {NULL, NULL};
+ size_t bufsize;
+ char dummy_buf[2];
int i, ret = -ENOENT;
down_read(&fw_device_rwsem);
@@ -355,9 +358,16 @@ static ssize_t show_text_leaf(struct device *dev,
}
}
+ // Note that this function is also called by init_fw_attribute_group() with NULL pointer.
+ if (buf) {
+ bufsize = PAGE_SIZE - 1;
+ } else {
+ buf = dummy_buf;
+ bufsize = 1;
+ }
+
for (i = 0; i < ARRAY_SIZE(directories) && !!directories[i]; ++i) {
- int result = fw_csr_string(directories[i], attr->key, buf,
- PAGE_SIZE - 1);
+ int result = fw_csr_string(directories[i], attr->key, buf, bufsize);
// Detected.
if (result >= 0) {
ret = result;
@@ -366,7 +376,7 @@ static ssize_t show_text_leaf(struct device *dev,
// in the root directory follows to the directory entry for vendor ID
// instead of the immediate value for vendor ID.
result = fw_csr_string(directories[i], CSR_DIRECTORY | attr->key, buf,
- PAGE_SIZE - 1);
+ bufsize);
if (result >= 0)
ret = result;
}
diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c
index 8e832d1ad825..345fff167b52 100644
--- a/drivers/firmware/arm_scmi/perf.c
+++ b/drivers/firmware/arm_scmi/perf.c
@@ -871,6 +871,9 @@ static int scmi_dvfs_device_opps_add(const struct scmi_protocol_handle *ph,
else
freq = dom->opp[idx].indicative_freq * dom->mult_factor;
+ /* All OPPs above the sustained frequency are treated as turbo */
+ data.turbo = freq > dom->sustained_freq_khz * 1000;
+
data.level = dom->opp[idx].perf;
data.freq = freq;
diff --git a/drivers/firmware/efi/earlycon.c b/drivers/firmware/efi/earlycon.c
index f80a9af3d16e..d18a1a5de144 100644
--- a/drivers/firmware/efi/earlycon.c
+++ b/drivers/firmware/efi/earlycon.c
@@ -252,7 +252,7 @@ static int __init efi_earlycon_setup(struct earlycon_device *device,
if (si->lfb_depth != 32)
return -ENODEV;
- font = get_default_font(xres, yres, -1, -1);
+ font = get_default_font(xres, yres, NULL, NULL);
if (!font)
return -ENODEV;
diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c
index 456d0e5eaf78..cc807ed35aed 100644
--- a/drivers/firmware/efi/sysfb_efi.c
+++ b/drivers/firmware/efi/sysfb_efi.c
@@ -336,7 +336,7 @@ static int efifb_add_links(struct fwnode_handle *fwnode)
if (!sup_np)
return 0;
- fwnode_link_add(fwnode, of_fwnode_handle(sup_np));
+ fwnode_link_add(fwnode, of_fwnode_handle(sup_np), 0);
of_node_put(sup_np);
return 0;
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index 79789f0563f6..9bc45357e1a8 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -3,6 +3,7 @@
* Xilinx Zynq MPSoC Firmware layer
*
* Copyright (C) 2014-2022 Xilinx, Inc.
+ * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
*
* Michal Simek <michal.simek@amd.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -1385,6 +1386,30 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out)
EXPORT_SYMBOL_GPL(zynqmp_pm_aes_engine);
/**
+ * zynqmp_pm_efuse_access - Provides access to efuse memory.
+ * @address: Address of the efuse params structure
+ * @out: Returned output value
+ *
+ * Return: Returns status, either success or error code.
+ */
+int zynqmp_pm_efuse_access(const u64 address, u32 *out)
+{
+ u32 ret_payload[PAYLOAD_ARG_CNT];
+ int ret;
+
+ if (!out)
+ return -EINVAL;
+
+ ret = zynqmp_pm_invoke_fn(PM_EFUSE_ACCESS, ret_payload, 2,
+ upper_32_bits(address),
+ lower_32_bits(address));
+ *out = ret_payload[1];
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_efuse_access);
+
+/**
* zynqmp_pm_sha_hash - Access the SHA engine to calculate the hash
* @address: Address of the data/ Address of output buffer where
* hash should be stored.
diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c
index e6d12fbab653..094ee97ea26c 100644
--- a/drivers/fpga/dfl.c
+++ b/drivers/fpga/dfl.c
@@ -327,7 +327,7 @@ static struct attribute *dfl_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(dfl_dev);
-static struct bus_type dfl_bus_type = {
+static const struct bus_type dfl_bus_type = {
.name = "dfl",
.match = dfl_bus_match,
.probe = dfl_bus_probe,
diff --git a/drivers/fpga/fpga-bridge.c b/drivers/fpga/fpga-bridge.c
index a024be2b84e2..79c473b3c7c3 100644
--- a/drivers/fpga/fpga-bridge.c
+++ b/drivers/fpga/fpga-bridge.c
@@ -30,7 +30,7 @@ int fpga_bridge_enable(struct fpga_bridge *bridge)
{
dev_dbg(&bridge->dev, "enable\n");
- if (bridge->br_ops && bridge->br_ops->enable_set)
+ if (bridge->br_ops->enable_set)
return bridge->br_ops->enable_set(bridge, 1);
return 0;
@@ -48,7 +48,7 @@ int fpga_bridge_disable(struct fpga_bridge *bridge)
{
dev_dbg(&bridge->dev, "disable\n");
- if (bridge->br_ops && bridge->br_ops->enable_set)
+ if (bridge->br_ops->enable_set)
return bridge->br_ops->enable_set(bridge, 0);
return 0;
@@ -296,7 +296,7 @@ static ssize_t state_show(struct device *dev,
struct fpga_bridge *bridge = to_fpga_bridge(dev);
int state = 1;
- if (bridge->br_ops && bridge->br_ops->enable_show) {
+ if (bridge->br_ops->enable_show) {
state = bridge->br_ops->enable_show(bridge);
if (state < 0)
return state;
@@ -401,7 +401,7 @@ void fpga_bridge_unregister(struct fpga_bridge *bridge)
* If the low level driver provides a method for putting bridge into
* a desired state upon unregister, do it.
*/
- if (bridge->br_ops && bridge->br_ops->fpga_bridge_remove)
+ if (bridge->br_ops->fpga_bridge_remove)
bridge->br_ops->fpga_bridge_remove(bridge);
device_unregister(&bridge->dev);
diff --git a/drivers/gnss/serial.c b/drivers/gnss/serial.c
index baa956494e79..0e43bf6294f8 100644
--- a/drivers/gnss/serial.c
+++ b/drivers/gnss/serial.c
@@ -80,7 +80,7 @@ static const struct gnss_operations gnss_serial_gnss_ops = {
.write_raw = gnss_serial_write_raw,
};
-static ssize_t gnss_serial_receive_buf(struct serdev_device *serdev,
+static size_t gnss_serial_receive_buf(struct serdev_device *serdev,
const u8 *buf, size_t count)
{
struct gnss_serial *gserial = serdev_device_get_drvdata(serdev);
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 6801a8fb2040..79375d14bbb6 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -160,7 +160,7 @@ static const struct gnss_operations sirf_gnss_ops = {
.write_raw = sirf_write_raw,
};
-static ssize_t sirf_receive_buf(struct serdev_device *serdev,
+static size_t sirf_receive_buf(struct serdev_device *serdev,
const u8 *buf, size_t count)
{
struct sirf_data *data = serdev_device_get_drvdata(serdev);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 182ed8f67850..5a0c476361c3 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -68,6 +68,7 @@ config DRM_USE_DYNAMIC_DEBUG
config DRM_KUNIT_TEST_HELPERS
tristate
depends on DRM && KUNIT
+ select DRM_KMS_HELPER
help
KUnit Helpers for KMS drivers.
@@ -80,7 +81,6 @@ config DRM_KUNIT_TEST
select DRM_EXEC
select DRM_EXPORT_FOR_TESTS if m
select DRM_GEM_SHMEM_HELPER
- select DRM_KMS_HELPER
select DRM_KUNIT_TEST_HELPERS
select DRM_LIB_RANDOM
select PRIME_NUMBERS
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index f5f2945711be..35dd6effa9a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -146,7 +146,7 @@ int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev)
{
int ret;
- if (!adev->kfd.init_complete)
+ if (!adev->kfd.init_complete || adev->kfd.client.dev)
return 0;
ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 14dc9d2d8d53..df58a6a1a67e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -2869,14 +2869,16 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
mutex_lock(&process_info->lock);
- drm_exec_init(&exec, 0, 0);
+ drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0);
drm_exec_until_all_locked(&exec) {
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {
ret = amdgpu_vm_lock_pd(peer_vm, &exec, 2);
drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ pr_err("Locking VM PD failed, ret: %d\n", ret);
goto ttm_reserve_fail;
+ }
}
/* Reserve all BOs and page tables/directory. Add all BOs from
@@ -2889,8 +2891,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
gobj = &mem->bo->tbo.base;
ret = drm_exec_prepare_obj(&exec, gobj, 1);
drm_exec_retry_on_contention(&exec);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ pr_err("drm_exec_prepare_obj failed, ret: %d\n", ret);
goto ttm_reserve_fail;
+ }
}
}
@@ -2950,8 +2954,10 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
* validations above would invalidate DMABuf imports again.
*/
ret = process_validate_vms(process_info, &exec.ticket);
- if (ret)
+ if (ret) {
+ pr_debug("Validating VMs failed, ret: %d\n", ret);
goto validate_map_fail;
+ }
/* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 1e9454e6e4cb..5dc24c971b41 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4040,10 +4040,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* early on during init and before calling to RREG32.
*/
adev->reset_domain = amdgpu_reset_create_reset_domain(SINGLE_DEVICE, "amdgpu-reset-dev");
- if (!adev->reset_domain) {
- r = -ENOMEM;
- goto unmap_memory;
- }
+ if (!adev->reset_domain)
+ return -ENOMEM;
/* detect hw virtualization here */
amdgpu_detect_virtualization(adev);
@@ -4053,7 +4051,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
r = amdgpu_device_get_job_timeout_settings(adev);
if (r) {
dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
- goto unmap_memory;
+ return r;
}
amdgpu_device_set_mcbp(adev);
@@ -4061,12 +4059,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* early init functions */
r = amdgpu_device_ip_early_init(adev);
if (r)
- goto unmap_memory;
+ return r;
/* Get rid of things like offb */
r = drm_aperture_remove_conflicting_pci_framebuffers(adev->pdev, &amdgpu_kms_driver);
if (r)
- goto unmap_memory;
+ return r;
/* Enable TMZ based on IP_VERSION */
amdgpu_gmc_tmz_set(adev);
@@ -4076,7 +4074,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (adev->gmc.xgmi.supported) {
r = adev->gfxhub.funcs->get_xgmi_info(adev);
if (r)
- goto unmap_memory;
+ return r;
}
/* enable PCIE atomic ops */
@@ -4345,8 +4343,6 @@ release_ras_con:
failed:
amdgpu_vf_error_trans_all(adev);
-unmap_memory:
- iounmap(adev->rmmio);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 15b188aaf681..80b9642f2bc4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2479,8 +2479,11 @@ static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
}
for (i = 0; i < mgpu_info.num_dgpu; i++) {
adev = mgpu_info.gpu_ins[i].adev;
- if (!adev->kfd.init_complete)
+ if (!adev->kfd.init_complete) {
+ kgd2kfd_init_zone_device(adev);
amdgpu_amdkfd_device_init(adev);
+ amdgpu_amdkfd_drm_client_create(adev);
+ }
amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index f8b48fd93108..55d5508987ff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -687,7 +687,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
r = amdgpu_ring_test_helper(kiq_ring);
spin_unlock(&kiq->ring_lock);
if (r)
- DRM_ERROR("KCQ enable failed\n");
+ DRM_ERROR("KGQ enable failed\n");
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
index 55b65fc04b65..431ec72655ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c
@@ -129,13 +129,25 @@ static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = {
*/
int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr)
{
+ int r;
+
if (bo->kfd_bo)
- return mmu_interval_notifier_insert(&bo->notifier, current->mm,
+ r = mmu_interval_notifier_insert(&bo->notifier, current->mm,
addr, amdgpu_bo_size(bo),
&amdgpu_hmm_hsa_ops);
- return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
- amdgpu_bo_size(bo),
- &amdgpu_hmm_gfx_ops);
+ else
+ r = mmu_interval_notifier_insert(&bo->notifier, current->mm, addr,
+ amdgpu_bo_size(bo),
+ &amdgpu_hmm_gfx_ops);
+ if (r)
+ /*
+ * Make sure amdgpu_hmm_unregister() doesn't call
+ * mmu_interval_notifier_remove() when the notifier isn't properly
+ * initialized.
+ */
+ bo->notifier.mm = NULL;
+
+ return r;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 3c2b1413058b..94b310fdb719 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -1830,6 +1830,10 @@ static int psp_hdcp_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ /* bypass hdcp initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
if (!psp->hdcp_context.context.bin_desc.size_bytes ||
!psp->hdcp_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
@@ -1862,6 +1866,9 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ if (!psp->hdcp_context.context.initialized)
+ return 0;
+
return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
}
@@ -1897,6 +1904,10 @@ static int psp_dtm_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ /* bypass dtm initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
if (!psp->dtm_context.context.bin_desc.size_bytes ||
!psp->dtm_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
@@ -1929,6 +1940,9 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ if (!psp->dtm_context.context.initialized)
+ return 0;
+
return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
}
@@ -2063,6 +2077,10 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
+ /* bypass securedisplay initialization if dmu is harvested */
+ if (!amdgpu_device_has_display_hardware(psp->adev))
+ return 0;
+
if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
!psp->securedisplay_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 8722beba494e..fc418e670fda 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -864,6 +864,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
gtt->ttm.dma_address, flags);
}
+ gtt->bound = true;
}
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index b2535023764f..9c514a606a2f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -60,6 +60,7 @@
#define FIRMWARE_VCN4_0_4 "amdgpu/vcn_4_0_4.bin"
#define FIRMWARE_VCN4_0_5 "amdgpu/vcn_4_0_5.bin"
#define FIRMWARE_VCN4_0_6 "amdgpu/vcn_4_0_6.bin"
+#define FIRMWARE_VCN4_0_6_1 "amdgpu/vcn_4_0_6_1.bin"
#define FIRMWARE_VCN5_0_0 "amdgpu/vcn_5_0_0.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN);
@@ -85,6 +86,7 @@ MODULE_FIRMWARE(FIRMWARE_VCN4_0_3);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_4);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_5);
MODULE_FIRMWARE(FIRMWARE_VCN4_0_6);
+MODULE_FIRMWARE(FIRMWARE_VCN4_0_6_1);
MODULE_FIRMWARE(FIRMWARE_VCN5_0_0);
static void amdgpu_vcn_idle_work_handler(struct work_struct *work);
@@ -93,14 +95,22 @@ int amdgpu_vcn_early_init(struct amdgpu_device *adev)
{
char ucode_prefix[30];
char fw_name[40];
- int r;
+ int r, i;
- amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
- snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
- r = amdgpu_ucode_request(adev, &adev->vcn.fw, fw_name);
- if (r)
- amdgpu_ucode_release(&adev->vcn.fw);
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ amdgpu_ucode_ip_version_decode(adev, UVD_HWIP, ucode_prefix, sizeof(ucode_prefix));
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
+ if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(4, 0, 6) &&
+ i == 1) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_%d.bin", ucode_prefix, i);
+ }
+ r = amdgpu_ucode_request(adev, &adev->vcn.fw[i], fw_name);
+ if (r) {
+ amdgpu_ucode_release(&adev->vcn.fw[i]);
+ return r;
+ }
+ }
return r;
}
@@ -141,7 +151,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
}
}
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[0]->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
/* Bit 20-23, it is encode major and non-zero for new naming convention.
@@ -256,9 +266,10 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->vcn.inst[j].ring_enc[i]);
+
+ amdgpu_ucode_release(&adev->vcn.fw[j]);
}
- amdgpu_ucode_release(&adev->vcn.fw);
mutex_destroy(&adev->vcn.vcn1_jpeg1_workaround);
mutex_destroy(&adev->vcn.vcn_pg_lock);
@@ -354,11 +365,12 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
const struct common_firmware_header *hdr;
unsigned int offset;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
if (drm_dev_enter(adev_to_drm(adev), &idx)) {
- memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
+ memcpy_toio(adev->vcn.inst[i].cpu_addr,
+ adev->vcn.fw[i]->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
}
@@ -1043,11 +1055,11 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
-
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[i]->data;
/* currently only support 2 FW instances */
if (i >= 2) {
dev_info(adev->dev, "More then 2 VCN FW instances!\n");
@@ -1055,7 +1067,7 @@ void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
}
idx = AMDGPU_UCODE_ID_VCN + i;
adev->firmware.ucode[idx].ucode_id = idx;
- adev->firmware.ucode[idx].fw = adev->vcn.fw;
+ adev->firmware.ucode[idx].fw = adev->vcn.fw[i];
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index 1985f71b4373..a418393d89ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -306,7 +306,7 @@ struct amdgpu_vcn_ras {
struct amdgpu_vcn {
unsigned fw_version;
struct delayed_work idle_work;
- const struct firmware *fw; /* VCN firmware */
+ const struct firmware *fw[AMDGPU_MAX_VCN_INSTANCES]; /* VCN firmware */
unsigned num_enc_rings;
enum amd_powergating_state cur_state;
bool indirect_sram;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
index 70c5cc80ecdc..7a65a2b128ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vpe.c
@@ -575,9 +575,6 @@ static unsigned int vpe_ring_init_cond_exec(struct amdgpu_ring *ring,
{
unsigned int ret;
- if (ring->adev->vpe.collaborate_mode)
- return ~0;
-
amdgpu_ring_write(ring, VPE_CMD_HEADER(VPE_CMD_OPCODE_COND_EXE, 0));
amdgpu_ring_write(ring, lower_32_bits(addr));
amdgpu_ring_write(ring, upper_32_bits(addr));
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 904b9ff5ead2..f90905ef32c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -3657,6 +3657,9 @@ static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
+ if (amdgpu_sriov_vf(adev))
+ return;
+
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(10, 1, 10):
soc15_program_register_sequence(adev,
@@ -4982,7 +4985,8 @@ static void gfx_v10_0_constants_init(struct amdgpu_device *adev)
u32 tmp;
int i;
- WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
+ if (!amdgpu_sriov_vf(adev))
+ WREG32_FIELD15(GC, 0, GRBM_CNTL, READ_TIMEOUT, 0xff);
gfx_v10_0_setup_rb(adev);
gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info);
@@ -7163,7 +7167,7 @@ static int gfx_v10_0_hw_init(void *handle)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(10, 3, 0))
gfx_v10_3_program_pbb_mode(adev);
- if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0))
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(10, 3, 0) && !amdgpu_sriov_vf(adev))
gfx_v10_3_set_power_brake_sequence(adev);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index cd0e8a321e46..17509f32f61a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -155,6 +155,9 @@ static void gfxhub_v2_1_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Program the AGP BAR */
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 16fe428c0722..7aed96fa10a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -418,6 +418,12 @@ static u32 ih_v7_0_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
index b3961968c10c..238ea40c2450 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c
@@ -99,16 +99,15 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) {
case IP_VERSION(3, 3, 0):
case IP_VERSION(3, 3, 1):
- mmhub_cid = mmhub_client_ids_v3_3[cid][rw];
+ mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ?
+ mmhub_client_ids_v3_3[cid][rw] :
+ cid == 0x140 ? "UMSCH" : NULL;
break;
default:
mmhub_cid = NULL;
break;
}
- if (!mmhub_cid && cid == 0x140)
- mmhub_cid = "UMSCH";
-
dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n",
mmhub_cid ? mmhub_cid : "unknown", cid);
dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n",
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
index 2d904ee72701..34237a1b1f2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c
@@ -431,16 +431,11 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev,
struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 doorbell_offset, doorbell;
u32 rb_cntl, ib_cntl;
- int i, unset = 0;
+ int i;
for_each_inst(i, inst_mask) {
sdma[i] = &adev->sdma.instance[i].ring;
- if ((adev->mman.buffer_funcs_ring == sdma[i]) && unset != 1) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = 1;
- }
-
rb_cntl = RREG32_SDMA(i, regSDMA_GFX_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_GFX_RB_CNTL, RB_ENABLE, 0);
WREG32_SDMA(i, regSDMA_GFX_RB_CNTL, rb_cntl);
@@ -487,20 +482,10 @@ static void sdma_v4_4_2_inst_rlc_stop(struct amdgpu_device *adev,
static void sdma_v4_4_2_inst_page_stop(struct amdgpu_device *adev,
uint32_t inst_mask)
{
- struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES];
u32 rb_cntl, ib_cntl;
int i;
- bool unset = false;
for_each_inst(i, inst_mask) {
- sdma[i] = &adev->sdma.instance[i].page;
-
- if ((adev->mman.buffer_funcs_ring == sdma[i]) &&
- (!unset)) {
- amdgpu_ttm_set_buffer_funcs_status(adev, false);
- unset = true;
- }
-
rb_cntl = RREG32_SDMA(i, regSDMA_PAGE_RB_CNTL);
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA_PAGE_RB_CNTL,
RB_ENABLE, 0);
@@ -950,13 +935,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev,
r = amdgpu_ring_test_helper(page);
if (r)
return r;
-
- if (adev->mman.buffer_funcs_ring == page)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
-
- if (adev->mman.buffer_funcs_ring == ring)
- amdgpu_ttm_set_buffer_funcs_status(adev, true);
}
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 25ba27151ac0..aaceecd558cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -304,7 +304,7 @@ static int vcn_v1_0_resume(void *handle)
*/
static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -371,7 +371,7 @@ static void vcn_v1_0_mc_resume_spg_mode(struct amdgpu_device *adev)
static void vcn_v1_0_mc_resume_dpg_mode(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
/* cache window 0: fw */
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 18794394c5a0..e357d8cf0c01 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -330,7 +330,7 @@ static int vcn_v2_0_resume(void *handle)
*/
static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
if (amdgpu_sriov_vf(adev))
@@ -386,7 +386,7 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_0_mc_resume_dpg_mode(struct amdgpu_device *adev, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1878,7 +1878,7 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
init_table += header->vcn_table_offset;
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[0]->size + 4);
MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index aba403d71806..1cd8a94b0fbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -414,13 +414,15 @@ static int vcn_v2_5_resume(void *handle)
*/
static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size;
uint32_t offset;
int i;
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
+
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@@ -469,7 +471,7 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1240,7 +1242,7 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V1_0_INSERT_DIRECT_WT(
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index e02af4de521c..8f82fb887e9c 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -449,7 +449,7 @@ static int vcn_v3_0_resume(void *handle)
*/
static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -499,7 +499,7 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst)
static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
{
- uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
uint32_t offset;
/* cache window 0: fw */
@@ -1332,7 +1332,7 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
mmUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
index 8ab01ae919d2..832d15f7b5f6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
@@ -382,7 +382,7 @@ static void vcn_v4_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -442,7 +442,7 @@ static void vcn_v4_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx
{
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -1289,7 +1289,7 @@ static int vcn_v4_0_start_sriov(struct amdgpu_device *adev)
regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
index 810bbfccd6f2..203fa988322b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
@@ -332,7 +332,7 @@ static void vcn_v4_0_3_mc_resume(struct amdgpu_device *adev, int inst_idx)
uint32_t offset, size, vcn_inst;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
vcn_inst = GET_INST(VCN, inst_idx);
@@ -407,7 +407,7 @@ static void vcn_v4_0_3_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -894,7 +894,7 @@ static int vcn_v4_0_3_start_sriov(struct amdgpu_device *adev)
MMSCH_V4_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS),
~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
- cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+ cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
MMSCH_V4_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
index 0468955338b7..501e53e69f2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
@@ -45,7 +45,7 @@
#define mmUVD_DPG_LMA_DATA_BASE_IDX regUVD_DPG_LMA_DATA_BASE_IDX
#define VCN_VID_SOC_ADDRESS_2_0 0x1fb00
-#define VCN1_VID_SOC_ADDRESS_3_0 0x48300
+#define VCN1_VID_SOC_ADDRESS_3_0 (0x48300 + 0x38000)
#define VCN_HARVEST_MMSCH 0
@@ -329,7 +329,7 @@ static void vcn_v4_0_5_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -390,7 +390,7 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -486,7 +486,8 @@ static void vcn_v4_0_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
/* VCN global tiling registers */
WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
- VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
+ VCN, inst_idx, regUVD_GFX10_ADDR_CONFIG),
+ adev->gfx.config.gb_addr_config, 0, indirect);
}
/**
@@ -911,7 +912,6 @@ static int vcn_v4_0_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, b
VCN, inst_idx, regUVD_MASTINT_EN),
UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
-
if (indirect)
amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
index d6ee9958ba5f..bc60c554eb32 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
@@ -290,7 +290,7 @@ static void vcn_v5_0_0_mc_resume(struct amdgpu_device *adev, int inst)
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
@@ -351,7 +351,7 @@ static void vcn_v5_0_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_i
uint32_t offset, size;
const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw[inst_idx]->data;
size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
/* cache window 0: fw */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1c9c6096e28f..2851719d7121 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1767,6 +1767,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH)
adev->dm.dc->debug.force_subvp_mclk_switch = true;
+ if (amdgpu_dc_debug_mask & DC_ENABLE_DML2)
+ adev->dm.dc->debug.using_dml2 = true;
+
adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm;
/* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */
@@ -11271,18 +11274,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
if (!adev->dm.freesync_module)
goto update;
- if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
- || sink->sink_signal == SIGNAL_TYPE_EDP) {
+ if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT ||
+ sink->sink_signal == SIGNAL_TYPE_EDP)) {
bool edid_check_required = false;
- if (edid) {
- edid_check_required = is_dp_capable_without_timing_msa(
- adev->dm.dc,
- amdgpu_dm_connector);
+ if (is_dp_capable_without_timing_msa(adev->dm.dc,
+ amdgpu_dm_connector)) {
+ if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) {
+ freesync_capable = true;
+ amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq;
+ amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq;
+ } else {
+ edid_check_required = edid->version > 1 ||
+ (edid->version == 1 &&
+ edid->revision > 1);
+ }
}
- if (edid_check_required == true && (edid->version > 1 ||
- (edid->version == 1 && edid->revision > 1))) {
+ if (edid_check_required) {
for (i = 0; i < 4; i++) {
timing = &edid->detailed_timings[i];
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 668f05c8654e..bec252e1dd27 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -216,6 +216,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950)
clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950;
+ /* DPPCLK */
+ dcn32_init_single_clock(clk_mgr, PPCLK_DPPCLK,
+ &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz,
+ &num_entries_per_clk->num_dppclk_levels);
+ num_levels = num_entries_per_clk->num_dppclk_levels;
+ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DPPCLK);
+ //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x
+ if (clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz > 1950)
+ clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = 1950;
+
if (num_entries_per_clk->num_dcfclk_levels &&
num_entries_per_clk->num_dtbclk_levels &&
num_entries_per_clk->num_dispclk_levels)
@@ -240,6 +250,10 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base)
= khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz);
}
+ for (i = 0; i < num_levels; i++)
+ if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz > 1950)
+ clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz = 1950;
+
/* Get UCLK, update bounding box */
clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 5211c1c0f3c0..e7dc128f6284 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1302,6 +1302,54 @@ static void disable_vbios_mode_if_required(
}
}
+/**
+ * wait_for_blank_complete - wait for all active OPPs to finish pending blank
+ * pattern updates
+ *
+ * @dc: [in] dc reference
+ * @context: [in] hardware context in use
+ */
+static void wait_for_blank_complete(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *opp_head;
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (!hws->funcs.wait_for_blank_complete)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ opp_head = &context->res_ctx.pipe_ctx[i];
+
+ if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
+ dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
+ continue;
+
+ hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
+ }
+}
+
+static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
+{
+ struct pipe_ctx *otg_master;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ otg_master = &context->res_ctx.pipe_ctx[i];
+ if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
+ dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
+ continue;
+ tg = otg_master->stream_res.tg;
+ if (tg->funcs->wait_odm_doublebuffer_pending_clear)
+ tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ }
+
+ /* ODM update may require to reprogram blank pattern for each OPP */
+ wait_for_blank_complete(dc, context);
+}
+
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
@@ -1993,6 +2041,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
wait_for_no_pipes_pending(dc, context);
+ /*
+ * optimized dispclk depends on ODM setup. Need to wait for ODM
+ * update pending complete before optimizing bandwidth.
+ */
+ wait_for_odm_update_pending_complete(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
/* Need to do otg sync again as otg could be out of sync due to otg
@@ -3270,6 +3323,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s
if (stream->link->replay_settings.config.replay_supported)
return true;
+ if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level)
+ return true;
+
return false;
}
@@ -3493,7 +3549,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
-static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
+static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
{
/*
* This function calls HWSS to wait for any potentially double buffered
@@ -3531,6 +3587,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
}
}
}
+ wait_for_odm_update_pending_complete(dc, dc_context);
}
static void commit_planes_for_stream(struct dc *dc,
@@ -4844,22 +4901,16 @@ void dc_exit_ips_for_hw_access(struct dc *dc)
bool dc_dmub_is_ips_idle_state(struct dc *dc)
{
- uint32_t idle_state = 0;
-
if (dc->debug.disable_idle_power_optimizations)
return false;
if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
return false;
- if (dc->hwss.get_idle_state)
- idle_state = dc->hwss.get_idle_state(dc);
-
- if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
- !(idle_state & DMUB_IPS2_ALLOW_MASK))
- return true;
+ if (!dc->ctx->dmub_srv)
+ return false;
- return false;
+ return dc->ctx->dmub_srv->idle_allowed;
}
/* set min and max memory clock to lowest and highest DPM level, respectively */
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 180ac47868c2..5cc7f8da209c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -334,7 +334,8 @@ static void dc_state_free(struct kref *kref)
void dc_state_release(struct dc_state *state)
{
- kref_put(&state->refcount, dc_state_free);
+ if (state != NULL)
+ kref_put(&state->refcount, dc_state_free);
}
/*
* dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 9900dda2eef5..be2ac5c442a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1085,9 +1085,9 @@ struct replay_settings {
/* SMU optimization is enabled */
bool replay_smu_opt_enable;
/* Current Coasting vtotal */
- uint16_t coasting_vtotal;
+ uint32_t coasting_vtotal;
/* Coasting vtotal table */
- uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
+ uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
/* Maximum link off frame count */
enum replay_link_off_frame_count_level link_off_frame_count_level;
/* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 48a40dcc7050..5838a11efd00 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -384,6 +384,7 @@ static const struct opp_funcs dcn10_opp_funcs = {
.opp_set_disp_pattern_generator = NULL,
.opp_program_dpg_dimensions = NULL,
.dpg_is_blanked = NULL,
+ .dpg_is_pending = NULL,
.opp_destroy = opp1_destroy
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 0784d0198661..fbf1b6370eb2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp)
(double_buffer_pending == 0);
}
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+ uint32_t double_buffer_pending;
+ uint32_t dpg_en;
+
+ REG_GET(DPG_CONTROL, DPG_EN, &dpg_en);
+
+ REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending);
+
+ return (dpg_en == 1 && double_buffer_pending == 1);
+}
+
void opp2_program_left_edge_extra_pixel (
struct output_pixel_processor *opp,
bool count)
@@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index 3ab221bdd27d..8f186abd558d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions(
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp);
+
void opp2_dpg_set_blank_color(
struct output_pixel_processor *opp,
const struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
index 8e77db46a409..6a71ba3dfc63 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
@@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 87760600e154..f98def6c8c2d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -782,3 +782,9 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc
pipe_cnt++;
}
}
+
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context)
+{
+ if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ)
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index b49e1dc9d8ba..a0a65e099104 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -623,6 +623,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
* - Not TMZ surface
*/
if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
+ !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 1ba6933d2b36..17a58f41fc6a 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -824,13 +824,25 @@ static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state
static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in)
{
+ dml_uint_t width, height;
+
+ if (in->timing.h_addressable > 3840)
+ width = 3840;
+ else
+ width = in->timing.h_addressable; // 4K max
+
+ if (in->timing.v_addressable > 2160)
+ height = 2160;
+ else
+ height = in->timing.v_addressable; // 4K max
+
out->CursorBPP[location] = dml_cur_32bit;
out->CursorWidth[location] = 256;
out->GPUVMMinPageSizeKBytes[location] = 256;
- out->ViewportWidth[location] = in->timing.h_addressable;
- out->ViewportHeight[location] = in->timing.v_addressable;
+ out->ViewportWidth[location] = width;
+ out->ViewportHeight[location] = height;
out->ViewportStationary[location] = false;
out->ViewportWidthChroma[location] = 0;
out->ViewportHeightChroma[location] = 0;
@@ -849,7 +861,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned
out->HTapsChroma[location] = 0;
out->VTapsChroma[location] = 0;
out->SourceScan[location] = dml_rotation_0;
- out->ScalerRecoutWidth[location] = in->timing.h_addressable;
+ out->ScalerRecoutWidth[location] = width;
out->LBBitPerPixel[location] = 57;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 2a58a7687bdb..72cca367062e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -703,13 +703,8 @@ static inline struct dml2_context *dml2_allocate_memory(void)
return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
}
-bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
- // Allocate Mode Lib Ctx
- *dml2 = dml2_allocate_memory();
-
- if (!(*dml2))
- return false;
// Store config options
(*dml2)->config = *config;
@@ -737,9 +732,18 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options
initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc);
initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states);
+}
+
+bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
+{
+ // Allocate Mode Lib Ctx
+ *dml2 = dml2_allocate_memory();
+
+ if (!(*dml2))
+ return false;
+
+ dml2_init(in_dc, config, dml2);
- /*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/
- //dml2_initialize_instance(&(*dml_ctx)->v20.dml_init);
return true;
}
@@ -779,3 +783,11 @@ bool dml2_create_copy(struct dml2_context **dst_dml2,
return true;
}
+
+void dml2_reinit(const struct dc *in_dc,
+ const struct dml2_configuration_options *config,
+ struct dml2_context **dml2)
+{
+
+ dml2_init(in_dc, config, dml2);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index ee0eb184eb6d..cc662d682fd4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -214,6 +214,9 @@ void dml2_copy(struct dml2_context *dst_dml2,
struct dml2_context *src_dml2);
bool dml2_create_copy(struct dml2_context **dst_dml2,
struct dml2_context *src_dml2);
+void dml2_reinit(const struct dc *in_dc,
+ const struct dml2_configuration_options *config,
+ struct dml2_context **dml2);
/*
* dml2_validate - Determines if a display configuration is supported or not.
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index c55d5155ecb9..8b3536c380b8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -1498,6 +1498,11 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
return;
}
+ if (resource_is_pipe_type(new_pipe, OTG_MASTER) &&
+ resource_is_odm_topology_changed(new_pipe, old_pipe))
+ /* Detect odm changes */
+ new_pipe->update_flags.bits.odm = 1;
+
/* Exit on unchanged, unused pipe */
if (!old_pipe->plane_state && !new_pipe->plane_state)
return;
@@ -1551,10 +1556,6 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state,
/* Detect top pipe only changes */
if (resource_is_pipe_type(new_pipe, OTG_MASTER)) {
- /* Detect odm changes */
- if (resource_is_odm_topology_changed(new_pipe, old_pipe))
- new_pipe->update_flags.bits.odm = 1;
-
/* Detect global sync changes */
if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset
|| old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start
@@ -1999,19 +2000,20 @@ void dcn20_program_front_end_for_ctx(
DC_LOGGER_INIT(dc->ctx->logger);
unsigned int prev_hubp_count = 0;
unsigned int hubp_count = 0;
+ struct pipe_ctx *pipe;
if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context);
if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
- ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) {
+ ASSERT(!pipe->plane_state->triplebuffer_flips);
/*turn off triple buffer for full update*/
dc->hwss.program_triplebuffer(
- dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ dc, pipe, pipe->plane_state->triplebuffer_flips);
}
}
}
@@ -2085,12 +2087,22 @@ void dcn20_program_front_end_for_ctx(
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
+ /* update ODM for blanked OTG master pipes */
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ if (resource_is_pipe_type(pipe, OTG_MASTER) &&
+ !resource_is_pipe_type(pipe, DPP_PIPE) &&
+ pipe->update_flags.bits.odm &&
+ hws->funcs.update_odm)
+ hws->funcs.update_odm(dc, context, pipe);
+ }
+
/*
* Program all updated pipes, order matters for mpcc setup. Start with
* top pipe and program all pipes that follow in order
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ pipe = &context->res_ctx.pipe_ctx[i];
if (pipe->plane_state && !pipe->top_pipe) {
while (pipe) {
@@ -2129,17 +2141,6 @@ void dcn20_program_front_end_for_ctx(
context->stream_status[0].plane_count > 1) {
pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp);
}
-
- /* when dynamic ODM is active, pipes must be reconfigured when all planes are
- * disabled, as some transitions will leave software and hardware state
- * mismatched.
- */
- if (dc->debug.enable_single_display_2to1_odm_policy &&
- pipe->stream &&
- pipe->update_flags.bits.disable &&
- !pipe->prev_odm_pipe &&
- hws->funcs.update_odm)
- hws->funcs.update_odm(dc, context, pipe);
}
}
@@ -2451,7 +2452,7 @@ bool dcn20_wait_for_blank_complete(
int counter;
for (counter = 0; counter < 1000; counter++) {
- if (opp->funcs->dpg_is_blanked(opp))
+ if (!opp->funcs->dpg_is_pending(opp))
break;
udelay(100);
@@ -2462,7 +2463,7 @@ bool dcn20_wait_for_blank_complete(
return false;
}
- return true;
+ return opp->funcs->dpg_is_blanked(opp);
}
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index 7e6b7f2a6dc9..8bc3d01537bb 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -812,10 +812,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
if (pipe_ctx == NULL)
return;
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL)
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) {
pipe_ctx->stream_res.stream_enc->funcs->set_avmute(
pipe_ctx->stream_res.stream_enc,
enable);
+
+ /* Wait for two frame to make sure AV mute is sent out */
+ if (enable) {
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+ }
+ }
}
void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index aa36d7a56ca8..c0b526cf1786 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -1156,6 +1156,13 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx *
dsc->funcs->dsc_disconnect(dsc);
}
}
+
+ if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE))
+ /*
+ * blank pattern is generated by OPP, reprogram blank pattern
+ * due to OPP count change
+ */
+ dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true);
}
unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div)
@@ -1778,3 +1785,26 @@ void dcn32_prepare_bandwidth(struct dc *dc,
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
}
}
+
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock)
+{
+ unsigned int i;
+ struct pipe_ctx *pipe;
+ struct timing_generator *tg;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+ continue;
+
+ if (lock)
+ dc->hwss.pipe_control_lock(dc, pipe, true);
+ else
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
index 069e20bc87c0..f55c11fc56ec 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
@@ -129,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
void dcn32_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock);
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 2b073123d3ed..67d661dbd5b7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -58,7 +58,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.disable_plane = dcn20_disable_plane,
.disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
- .interdependent_update_lock = dcn10_lock_all_pipes,
+ .interdependent_update_lock = dcn32_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn32_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index aee5372e292c..d89c92370d5b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -337,6 +337,9 @@ struct opp_funcs {
bool (*dpg_is_blanked)(
struct output_pixel_processor *opp);
+ bool (*dpg_is_pending)(struct output_pixel_processor *opp);
+
+
void (*opp_dpg_set_blank_color)(
struct output_pixel_processor *opp,
const struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index d98d72f35be5..ffad8fe16c54 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -331,6 +331,7 @@ struct timing_generator_funcs {
void (*init_odm)(struct timing_generator *tg);
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index 26fe81f213da..bf29fc58ea6a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -285,12 +285,12 @@ struct link_service {
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
bool (*edp_set_coasting_vtotal)(
- struct dc_link *link, uint16_t coasting_vtotal);
+ struct dc_link *link, uint32_t coasting_vtotal);
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
const bool is_alpm);
bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index acfbbc638cc6..3baa2bdd6dd6 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -1034,7 +1034,7 @@ bool edp_send_replay_cmd(struct dc_link *link,
return true;
}
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal)
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1073,7 +1073,7 @@ bool edp_replay_residency(const struct dc_link *link,
}
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal)
+ const unsigned int *power_opts, uint32_t coasting_vtotal)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index 34e521af7bb4..a158c6234d42 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link,
bool edp_send_replay_cmd(struct dc_link *link,
enum replay_FW_Message_type msg,
union dmub_replay_cmd_set *cmd_data);
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal);
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
bool edp_replay_residency(const struct dc_link *link,
unsigned int *residency, const bool is_start, const bool is_alpm);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
- const unsigned int *power_opts, uint16_t coasting_vtotal);
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index ab81594a7fad..6c2e84d3967f 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -557,7 +557,8 @@ struct dcn_optc_registers {
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
type OTG_CRC_DATA_FORMAT;\
type OTG_V_TOTAL_LAST_USED_BY_DRR;\
- type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;
+ type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\
+ type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index 823493543325..f07a4c7e48bc 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi
}
}
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000);
+}
+
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -345,6 +352,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.set_odm_bypass = optc32_set_odm_bypass,
.set_odm_combine = optc32_set_odm_combine,
.get_odm_combine_segments = optc32_get_odm_combine_segments,
+ .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear,
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index 8ce3b178cab0..0c2c14695561 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man
void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments);
void optc32_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg);
#endif /* __DC_OPTC_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index 3f3951f3ba98..ce1754cc1f46 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -1771,6 +1771,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
dcn32_override_min_req_memclk(dc, context);
+ dcn32_override_min_req_dcfclk(dc, context);
BW_VAL_TRACE_END_WATERMARKS();
@@ -1930,6 +1931,8 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw
{
DC_FP_START();
dcn32_update_bw_bounding_box_fpu(dc, bw_params);
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
DC_FP_END();
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 0c87b0fabba7..2258c5c7212d 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -42,6 +42,7 @@
#define SUBVP_ACTIVE_MARGIN_LIST_LEN 2
#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800
#define DCN3_2_VMIN_DISPCLK_HZ 717000000
+#define MIN_SUBVP_DCFCLK_KHZ 400000
#define TO_DCN32_RES_POOL(pool)\
container_of(pool, struct dcn32_resource_pool, base)
@@ -181,6 +182,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes);
+void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context);
+
/* definitions for run time init of reg offsets */
/* CLK SRC */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index b356fed1726d..296a0a8e7145 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -1581,6 +1581,8 @@ static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b
{
DC_FP_START();
dcn321_update_bw_bounding_box_fpu(dc, bw_params);
+ if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2)
+ dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2);
DC_FP_END();
}
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index a529e369b2ac..af3fe8bb0728 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -3238,6 +3238,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data {
* Currently the support is only for 0 or 1
*/
uint8_t panel_inst;
+ /**
+ * 16-bit value dicated by driver that indicates the coasting vtotal high byte part.
+ */
+ uint16_t coasting_vtotal_high;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 8c137d7c032e..7c9805705fd3 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+ if (!display)
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
+
hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index;
if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index e304e8435fb8..2a3698fd2dc2 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -975,7 +975,7 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link,
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
- uint16_t vtotal)
+ uint32_t vtotal)
{
link->replay_settings.coasting_vtotal_table[type] = vtotal;
}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index bef4815e1703..ff7e6f3cd6be 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -56,7 +56,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
void set_replay_coasting_vtotal(struct dc_link *link,
enum replay_coasting_vtotal_type type,
- uint16_t vtotal);
+ uint32_t vtotal);
void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
void calculate_replay_link_off_frame_count(struct dc_link *link,
uint16_t vtotal, uint16_t htotal);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 1d96eb274d72..0c2d04f978ac 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -1283,10 +1283,8 @@ static int arcturus_get_power_limit(struct smu_context *smu,
uint32_t *max_power_limit,
uint32_t *min_power_limit)
{
- struct smu_11_0_powerplay_table *powerplay_table =
- (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */
@@ -1302,26 +1300,10 @@ static int arcturus_get_power_limit(struct smu_context *smu,
*current_power_limit = power_limit;
if (default_power_limit)
*default_power_limit = power_limit;
-
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
-
- dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
- od_percent_upper, od_percent_lower, power_limit);
-
- if (max_power_limit) {
- *max_power_limit = power_limit * (100 + od_percent_upper);
- *max_power_limit /= 100;
- }
-
- if (min_power_limit) {
- *min_power_limit = power_limit * (100 - od_percent_lower);
- *min_power_limit /= 100;
- }
+ if (max_power_limit)
+ *max_power_limit = power_limit;
+ if (min_power_limit)
+ *min_power_limit = power_limit;
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index ed189a3878eb..836b1df79928 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -2339,7 +2339,7 @@ static int navi10_get_power_limit(struct smu_context *smu,
(struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
PPTable_t *pptable = smu->smu_table.driver_pptable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
if (smu_v11_0_get_current_power_limit(smu, &power_limit)) {
/* the last hope to figure out the ppt limit */
@@ -2356,13 +2356,16 @@ static int navi10_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled &&
- navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT))
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ } else if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index e2ad2b972ab0..1f18b61884f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -617,6 +617,12 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
return throttler_status;
}
+static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
+ enum SMU_11_0_7_ODFEATURE_CAP cap)
+{
+ return od_table->cap[cap];
+}
+
static int sienna_cichlid_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
@@ -625,7 +631,8 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
{
struct smu_11_0_7_powerplay_table *powerplay_table =
(struct smu_11_0_7_powerplay_table *)smu->smu_table.power_play_table;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ struct smu_11_0_7_overdrive_table *od_settings = smu->od_settings;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint16_t *table_member;
GET_PPTABLE_MEMBER(SocketPowerLimitAc, &table_member);
@@ -640,12 +647,16 @@ static int sienna_cichlid_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ } else if ((sienna_cichlid_is_od_feature_supported(od_settings, SMU_11_0_7_ODCAP_POWER_LIMIT))) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_11_0_7_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
@@ -1250,12 +1261,6 @@ static bool sienna_cichlid_is_support_fine_grained_dpm(struct smu_context *smu,
return dpm_desc->SnapToDiscrete == 0;
}
-static bool sienna_cichlid_is_od_feature_supported(struct smu_11_0_7_overdrive_table *od_table,
- enum SMU_11_0_7_ODFEATURE_CAP cap)
-{
- return od_table->cap[cap];
-}
-
static void sienna_cichlid_get_od_setting_range(struct smu_11_0_7_overdrive_table *od_table,
enum SMU_11_0_7_ODSETTING_ID setting,
uint32_t *min, uint32_t *max)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 9b80f18ea6c3..9c03296f92cd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2356,7 +2356,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
(struct smu_13_0_0_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
@@ -2369,12 +2369,16 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ } else if (smu_v13_0_0_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_0_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 3dc7b60cb075..7318964f1f14 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2320,7 +2320,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
(struct smu_13_0_7_powerplay_table *)table_context->power_play_table;
PPTable_t *pptable = table_context->driver_pptable;
SkuTable_t *skutable = &pptable->SkuTable;
- uint32_t power_limit, od_percent_upper, od_percent_lower;
+ uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0;
uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
if (smu_v13_0_get_current_power_limit(smu, &power_limit))
@@ -2333,12 +2333,16 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
if (default_power_limit)
*default_power_limit = power_limit;
- if (smu->od_enabled)
- od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
- else
- od_percent_upper = 0;
-
- od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ if (powerplay_table) {
+ if (smu->od_enabled &&
+ (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT))) {
+ od_percent_upper = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ } else if (smu_v13_0_7_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) {
+ od_percent_upper = 0;
+ od_percent_lower = le32_to_cpu(powerplay_table->overdrive_table.min[SMU_13_0_7_ODSETTING_POWERPERCENTAGE]);
+ }
+ }
dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n",
od_percent_upper, od_percent_lower, power_limit);
diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c
index e7c4bef74aa4..4b2ae27f0a57 100644
--- a/drivers/gpu/drm/bridge/lontium-lt8912b.c
+++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c
@@ -441,23 +441,21 @@ lt8912_connector_mode_valid(struct drm_connector *connector,
static int lt8912_connector_get_modes(struct drm_connector *connector)
{
const struct drm_edid *drm_edid;
- int ret = -1;
- int num = 0;
struct lt8912 *lt = connector_to_lt8912(connector);
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ int ret, num;
drm_edid = drm_bridge_edid_read(lt->hdmi_port, connector);
drm_edid_connector_update(connector, drm_edid);
- if (drm_edid) {
- num = drm_edid_connector_add_modes(connector);
- } else {
- return ret;
- }
+ if (!drm_edid)
+ return 0;
+
+ num = drm_edid_connector_add_modes(connector);
ret = drm_display_info_set_bus_formats(&connector->display_info,
&bus_format, 1);
- if (ret)
- num = ret;
+ if (ret < 0)
+ num = 0;
drm_edid_free(drm_edid);
return num;
diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
index bcf8bccd86d6..f4f593ad8f79 100644
--- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c
@@ -294,8 +294,8 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc,
static int lt9611uxc_connector_get_modes(struct drm_connector *connector)
{
struct lt9611uxc *lt9611uxc = connector_to_lt9611uxc(connector);
- unsigned int count;
const struct drm_edid *drm_edid;
+ int count;
drm_edid = drm_bridge_edid_read(&lt9611uxc->bridge, connector);
drm_edid_connector_update(connector, drm_edid);
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index e814020bbcd3..cfbe020de54e 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -274,19 +274,24 @@ EXPORT_SYMBOL(drm_panel_disable);
* The modes probed from the panel are automatically added to the connector
* that the panel is attached to.
*
- * Return: The number of modes available from the panel on success or a
- * negative error code on failure.
+ * Return: The number of modes available from the panel on success, or 0 on
+ * failure (no modes).
*/
int drm_panel_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
if (!panel)
- return -EINVAL;
+ return 0;
- if (panel->funcs && panel->funcs->get_modes)
- return panel->funcs->get_modes(panel, connector);
+ if (panel->funcs && panel->funcs->get_modes) {
+ int num;
- return -EOPNOTSUPP;
+ num = panel->funcs->get_modes(panel, connector);
+ if (num > 0)
+ return num;
+ }
+
+ return 0;
}
EXPORT_SYMBOL(drm_panel_get_modes);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 4d60cc810b57..bf2dd1f46b6c 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -422,6 +422,13 @@ static int drm_helper_probe_get_modes(struct drm_connector *connector)
count = connector_funcs->get_modes(connector);
+ /* The .get_modes() callback should not return negative values. */
+ if (count < 0) {
+ drm_err(connector->dev, ".get_modes() returned %pe\n",
+ ERR_PTR(count));
+ count = 0;
+ }
+
/*
* Fallback for when DDC probe failed in drm_get_edid() and thus skipped
* override/firmware EDID.
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index ca31bad6c576..f48c4343f469 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -74,16 +74,15 @@ static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
{
struct exynos_dp_device *dp = to_dp(plat_data);
struct drm_display_mode *mode;
- int num_modes = 0;
if (dp->plat_data.panel)
- return num_modes;
+ return 0;
mode = drm_mode_create(connector->dev);
if (!mode) {
DRM_DEV_ERROR(dp->dev,
"failed to create a new display mode.\n");
- return num_modes;
+ return 0;
}
drm_display_mode_from_videomode(&dp->vm, mode);
@@ -94,7 +93,7 @@ static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
drm_mode_set_name(mode);
drm_mode_probed_add(connector, mode);
- return num_modes + 1;
+ return 1;
}
static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 00382f28748a..f5bbba9ad225 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -316,14 +316,14 @@ static int vidi_get_modes(struct drm_connector *connector)
*/
if (!ctx->raw_edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "raw_edid is null.\n");
- return -EFAULT;
+ return 0;
}
edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH;
edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL);
if (!edid) {
DRM_DEV_DEBUG_KMS(ctx->dev, "failed to allocate edid\n");
- return -ENOMEM;
+ return 0;
}
drm_connector_update_edid_property(connector, edid);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 43bed6cbaaea..b1d02dec3774 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -887,11 +887,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
int ret;
if (!hdata->ddc_adpt)
- return -ENODEV;
+ return 0;
edid = drm_get_edid(connector, hdata->ddc_adpt);
if (!edid)
- return -ENODEV;
+ return 0;
hdata->dvi_mode = !connector->display_info.is_hdmi;
DRM_DEV_DEBUG_KMS(hdata->dev, "%s : width[%d] x height[%d]\n",
diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h
index 99bdb833591c..7862e7cefe02 100644
--- a/drivers/gpu/drm/i915/display/intel_display_trace.h
+++ b/drivers/gpu/drm/i915/display/intel_display_trace.h
@@ -411,7 +411,7 @@ TRACE_EVENT(intel_fbc_activate,
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
__assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name)
+ __assign_str(name, plane->base.name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -438,7 +438,7 @@ TRACE_EVENT(intel_fbc_deactivate,
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
__assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name)
+ __assign_str(name, plane->base.name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
@@ -465,7 +465,7 @@ TRACE_EVENT(intel_fbc_nuke,
struct intel_crtc *crtc = intel_crtc_for_pipe(to_i915(plane->base.dev),
plane->pipe);
__assign_str(dev, __dev_name_kms(plane));
- __assign_str(name, plane->base.name)
+ __assign_str(name, plane->base.name);
__entry->pipe = crtc->pipe;
__entry->frame = intel_crtc_get_vblank_counter(crtc);
__entry->scanline = intel_get_crtc_scanline(crtc);
diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
index 70349739dd89..55dedd73f528 100644
--- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c
+++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c
@@ -72,14 +72,14 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
int ret;
if (!mode)
- return -EINVAL;
+ return 0;
ret = of_get_drm_display_mode(np, &imxpd->mode,
&imxpd->bus_flags,
OF_USE_NATIVE_MODE);
if (ret) {
drm_mode_destroy(connector->dev, mode);
- return ret;
+ return 0;
}
drm_mode_copy(mode, &imxpd->mode);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 56dcd25db1ce..db8cbf615112 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1256,6 +1256,8 @@ out:
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
+ nvbo->bo.resource->bus.offset = 0;
+ nvbo->bo.resource->bus.addr = NULL;
goto retry;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
index 666eb93b1742..11b4c9c274a1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/r535.c
@@ -41,7 +41,6 @@ r535_devinit_new(const struct nvkm_devinit_func *hw,
rm->dtor = r535_devinit_dtor;
rm->post = hw->post;
- rm->disable = hw->disable;
ret = nv50_devinit_new_(rm, device, type, inst, pdevinit);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
index a73a5b589790..9994cbd6f1c4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c
@@ -1430,6 +1430,10 @@ r535_gsp_msg_post_event(void *priv, u32 fn, void *repv, u32 repc)
/**
* r535_gsp_msg_run_cpu_sequencer() -- process I/O commands from the GSP
+ * @priv: gsp pointer
+ * @fn: function number (ignored)
+ * @repv: pointer to libos print RPC
+ * @repc: message size
*
* The GSP sequencer is a list of I/O commands that the GSP can send to
* the driver to perform for various purposes. The most common usage is to
@@ -1781,6 +1785,7 @@ static void create_pte_array(u64 *ptes, dma_addr_t addr, size_t size)
/**
* r535_gsp_libos_init() -- create the libos arguments structure
+ * @gsp: gsp pointer
*
* The logging buffers are byte queues that contain encoded printf-like
* messages from GSP-RM. They need to be decoded by a special application
@@ -1920,6 +1925,10 @@ nvkm_gsp_radix3_dtor(struct nvkm_gsp *gsp, struct nvkm_gsp_radix3 *rx3)
/**
* nvkm_gsp_radix3_sg - build a radix3 table from a S/G list
+ * @gsp: gsp pointer
+ * @sgt: S/G list to traverse
+ * @size: size of the image, in bytes
+ * @rx3: radix3 array to update
*
* The GSP uses a three-level page table, called radix3, to map the firmware.
* Each 64-bit "pointer" in the table is either the bus address of an entry in
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 69001a3dc0df..2d1880c61b50 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -166,7 +166,7 @@ sun4i_hdmi_connector_clock_valid(const struct drm_connector *connector,
unsigned long long clock)
{
const struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
- unsigned long diff = clock / 200; /* +-0.5% allowed by HDMI spec */
+ unsigned long diff = div_u64(clock, 200); /* +-0.5% allowed by HDMI spec */
long rounded_rate;
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 34f807ed1c31..d8751ea20303 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -509,7 +509,7 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
edid = drm_get_edid(connector, vc4_hdmi->ddc);
cec_s_phys_addr_from_edid(vc4_hdmi->cec_adap, edid);
if (!edid)
- return -ENODEV;
+ return 0;
drm_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 952496c6260d..826c8b389672 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -235,6 +235,29 @@ retry:
goto err_unlock_list;
}
+ if (!args->num_batch_buffer) {
+ err = xe_vm_lock(vm, true);
+ if (err)
+ goto err_unlock_list;
+
+ if (!xe_vm_in_lr_mode(vm)) {
+ struct dma_fence *fence;
+
+ fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ goto err_unlock_list;
+ }
+ for (i = 0; i < num_syncs; i++)
+ xe_sync_entry_signal(&syncs[i], NULL, fence);
+ xe_exec_queue_last_fence_set(q, vm, fence);
+ dma_fence_put(fence);
+ }
+
+ xe_vm_unlock(vm);
+ goto err_unlock_list;
+ }
+
vm_exec.vm = &vm->gpuvm;
vm_exec.flags = DRM_EXEC_INTERRUPTIBLE_WAIT;
if (xe_vm_in_lr_mode(vm)) {
@@ -254,24 +277,6 @@ retry:
goto err_exec;
}
- if (!args->num_batch_buffer) {
- if (!xe_vm_in_lr_mode(vm)) {
- struct dma_fence *fence;
-
- fence = xe_sync_in_fence_get(syncs, num_syncs, q, vm);
- if (IS_ERR(fence)) {
- err = PTR_ERR(fence);
- goto err_exec;
- }
- for (i = 0; i < num_syncs; i++)
- xe_sync_entry_signal(&syncs[i], NULL, fence);
- xe_exec_queue_last_fence_set(q, vm, fence);
- dma_fence_put(fence);
- }
-
- goto err_exec;
- }
-
if (xe_exec_queue_is_lr(q) && xe_exec_queue_ring_full(q)) {
err = -EWOULDBLOCK; /* Aliased to -EAGAIN */
skip_retry = true;
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 73c535193a98..241c294270d9 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -69,7 +69,7 @@ static bool access_is_atomic(enum access_type access_type)
static bool vma_is_valid(struct xe_tile *tile, struct xe_vma *vma)
{
return BIT(tile->id) & vma->tile_present &&
- !(BIT(tile->id) & vma->usm.tile_invalidated);
+ !(BIT(tile->id) & vma->tile_invalidated);
}
static bool vma_matches(struct xe_vma *vma, u64 page_addr)
@@ -226,7 +226,7 @@ retry_userptr:
if (xe_vma_is_userptr(vma))
ret = xe_vma_userptr_check_repin(to_userptr_vma(vma));
- vma->usm.tile_invalidated &= ~BIT(tile->id);
+ vma->tile_invalidated &= ~BIT(tile->id);
unlock_dma_resv:
drm_exec_fini(&exec);
diff --git a/drivers/gpu/drm/xe/xe_trace.h b/drivers/gpu/drm/xe/xe_trace.h
index 4ddc55527f9a..846f14507d5f 100644
--- a/drivers/gpu/drm/xe/xe_trace.h
+++ b/drivers/gpu/drm/xe/xe_trace.h
@@ -468,7 +468,7 @@ DEFINE_EVENT(xe_vma, xe_vma_userptr_invalidate,
TP_ARGS(vma)
);
-DEFINE_EVENT(xe_vma, xe_vma_usm_invalidate,
+DEFINE_EVENT(xe_vma, xe_vma_invalidate,
TP_PROTO(struct xe_vma *vma),
TP_ARGS(vma)
);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index d28260351af2..f88faef4142b 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -708,6 +708,7 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
int err = 0;
LIST_HEAD(tmp_evict);
+ xe_assert(vm->xe, !xe_vm_in_fault_mode(vm));
lockdep_assert_held_write(&vm->lock);
/* Collect invalidated userptrs */
@@ -724,11 +725,27 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
userptr.repin_link) {
err = xe_vma_userptr_pin_pages(uvma);
- if (err < 0)
- return err;
+ if (err == -EFAULT) {
+ list_del_init(&uvma->userptr.repin_link);
- list_del_init(&uvma->userptr.repin_link);
- list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list);
+ /* Wait for pending binds */
+ xe_vm_lock(vm, false);
+ dma_resv_wait_timeout(xe_vm_resv(vm),
+ DMA_RESV_USAGE_BOOKKEEP,
+ false, MAX_SCHEDULE_TIMEOUT);
+
+ err = xe_vm_invalidate_vma(&uvma->vma);
+ xe_vm_unlock(vm);
+ if (err)
+ return err;
+ } else {
+ if (err < 0)
+ return err;
+
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->vma.combined_links.rebind,
+ &vm->rebind_list);
+ }
}
return 0;
@@ -2024,7 +2041,7 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
return err;
}
- if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) {
+ if (vma->tile_mask != (vma->tile_present & ~vma->tile_invalidated)) {
return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs,
true, first_op, last_op);
} else {
@@ -3214,9 +3231,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
u8 id;
int ret;
- xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma)));
xe_assert(xe, !xe_vma_is_null(vma));
- trace_xe_vma_usm_invalidate(vma);
+ trace_xe_vma_invalidate(vma);
/* Check that we don't race with page-table updates */
if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
@@ -3254,7 +3270,7 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
}
}
- vma->usm.tile_invalidated = vma->tile_mask;
+ vma->tile_invalidated = vma->tile_mask;
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 79b5cab57711..ae5fb565f6bf 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -84,11 +84,8 @@ struct xe_vma {
struct work_struct destroy_work;
};
- /** @usm: unified shared memory state */
- struct {
- /** @tile_invalidated: VMA has been invalidated */
- u8 tile_invalidated;
- } usm;
+ /** @tile_invalidated: VMA has been invalidated */
+ u8 tile_invalidated;
/** @tile_mask: Tile mask of where to create binding for this VMA */
u8 tile_mask;
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.c b/drivers/gpu/drm/xe/xe_vram_freq.c
index 079cc283a186..c5f6b5a5d117 100644
--- a/drivers/gpu/drm/xe/xe_vram_freq.c
+++ b/drivers/gpu/drm/xe/xe_vram_freq.c
@@ -111,8 +111,10 @@ void xe_vram_freq_sysfs_init(struct xe_tile *tile)
return;
kobj = kobject_create_and_add("memory", tile->sysfs);
- if (!kobj)
+ if (!kobj) {
drm_warn(&xe->drm, "failed to add memory directory, err: %d\n", -ENOMEM);
+ return;
+ }
err = sysfs_create_group(kobj, &freq_group_attrs);
if (err) {
diff --git a/drivers/greybus/bundle.c b/drivers/greybus/bundle.c
index 84660729538b..a6e1cca06172 100644
--- a/drivers/greybus/bundle.c
+++ b/drivers/greybus/bundle.c
@@ -166,7 +166,7 @@ static const struct dev_pm_ops gb_bundle_pm_ops = {
SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle)
};
-struct device_type greybus_bundle_type = {
+const struct device_type greybus_bundle_type = {
.name = "greybus_bundle",
.release = gb_bundle_release,
.pm = &gb_bundle_pm_ops,
diff --git a/drivers/greybus/control.c b/drivers/greybus/control.c
index 359a25841973..b5cf49d09df2 100644
--- a/drivers/greybus/control.c
+++ b/drivers/greybus/control.c
@@ -436,7 +436,7 @@ static void gb_control_release(struct device *dev)
kfree(control);
}
-struct device_type greybus_control_type = {
+const struct device_type greybus_control_type = {
.name = "greybus_control",
.release = gb_control_release,
};
diff --git a/drivers/greybus/core.c b/drivers/greybus/core.c
index 5714be740470..95c09d4f3a86 100644
--- a/drivers/greybus/core.c
+++ b/drivers/greybus/core.c
@@ -27,6 +27,36 @@ int greybus_disabled(void)
}
EXPORT_SYMBOL_GPL(greybus_disabled);
+static int is_gb_host_device(const struct device *dev)
+{
+ return dev->type == &greybus_hd_type;
+}
+
+static int is_gb_module(const struct device *dev)
+{
+ return dev->type == &greybus_module_type;
+}
+
+static int is_gb_interface(const struct device *dev)
+{
+ return dev->type == &greybus_interface_type;
+}
+
+static int is_gb_control(const struct device *dev)
+{
+ return dev->type == &greybus_control_type;
+}
+
+static int is_gb_bundle(const struct device *dev)
+{
+ return dev->type == &greybus_bundle_type;
+}
+
+static int is_gb_svc(const struct device *dev)
+{
+ return dev->type == &greybus_svc_type;
+}
+
static bool greybus_match_one_id(struct gb_bundle *bundle,
const struct greybus_bundle_id *id)
{
@@ -155,7 +185,7 @@ static void greybus_shutdown(struct device *dev)
}
}
-struct bus_type greybus_bus_type = {
+const struct bus_type greybus_bus_type = {
.name = "greybus",
.match = greybus_match_device,
.uevent = greybus_uevent,
diff --git a/drivers/greybus/es2.c b/drivers/greybus/es2.c
index e89cca015095..1ee78d0d90b4 100644
--- a/drivers/greybus/es2.c
+++ b/drivers/greybus/es2.c
@@ -513,16 +513,16 @@ static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
if (cport_id < 0) {
ida_start = 0;
- ida_end = hd->num_cports;
+ ida_end = hd->num_cports - 1;
} else if (cport_id < hd->num_cports) {
ida_start = cport_id;
- ida_end = cport_id + 1;
+ ida_end = cport_id;
} else {
dev_err(&hd->dev, "cport %d not available\n", cport_id);
return -EINVAL;
}
- return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
+ return ida_alloc_range(id_map, ida_start, ida_end, GFP_KERNEL);
}
static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
@@ -535,7 +535,7 @@ static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
return;
}
- ida_simple_remove(&hd->cport_id_map, cport_id);
+ ida_free(&hd->cport_id_map, cport_id);
}
static int cport_enable(struct gb_host_device *hd, u16 cport_id,
diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c
index c3e90025064b..33f8fad70260 100644
--- a/drivers/greybus/gb-beagleplay.c
+++ b/drivers/greybus/gb-beagleplay.c
@@ -271,7 +271,7 @@ static void hdlc_rx_frame(struct gb_beagleplay *bg)
}
}
-static ssize_t hdlc_rx(struct gb_beagleplay *bg, const u8 *data, size_t count)
+static size_t hdlc_rx(struct gb_beagleplay *bg, const u8 *data, size_t count)
{
size_t i;
u8 c;
@@ -331,8 +331,8 @@ static void hdlc_deinit(struct gb_beagleplay *bg)
flush_work(&bg->tx_work);
}
-static ssize_t gb_tty_receive(struct serdev_device *sd, const u8 *data,
- size_t count)
+static size_t gb_tty_receive(struct serdev_device *sd, const u8 *data,
+ size_t count)
{
struct gb_beagleplay *bg = serdev_device_get_drvdata(sd);
diff --git a/drivers/greybus/hd.c b/drivers/greybus/hd.c
index 72b21bf2d7d3..5de98d9177f1 100644
--- a/drivers/greybus/hd.c
+++ b/drivers/greybus/hd.c
@@ -50,7 +50,7 @@ int gb_hd_cport_reserve(struct gb_host_device *hd, u16 cport_id)
struct ida *id_map = &hd->cport_id_map;
int ret;
- ret = ida_simple_get(id_map, cport_id, cport_id + 1, GFP_KERNEL);
+ ret = ida_alloc_range(id_map, cport_id, cport_id, GFP_KERNEL);
if (ret < 0) {
dev_err(&hd->dev, "failed to reserve cport %u\n", cport_id);
return ret;
@@ -64,7 +64,7 @@ void gb_hd_cport_release_reserved(struct gb_host_device *hd, u16 cport_id)
{
struct ida *id_map = &hd->cport_id_map;
- ida_simple_remove(id_map, cport_id);
+ ida_free(id_map, cport_id);
}
EXPORT_SYMBOL_GPL(gb_hd_cport_release_reserved);
@@ -80,16 +80,16 @@ int gb_hd_cport_allocate(struct gb_host_device *hd, int cport_id,
if (cport_id < 0) {
ida_start = 0;
- ida_end = hd->num_cports;
+ ida_end = hd->num_cports - 1;
} else if (cport_id < hd->num_cports) {
ida_start = cport_id;
- ida_end = cport_id + 1;
+ ida_end = cport_id;
} else {
dev_err(&hd->dev, "cport %d not available\n", cport_id);
return -EINVAL;
}
- return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
+ return ida_alloc_range(id_map, ida_start, ida_end, GFP_KERNEL);
}
/* Locking: Caller guarantees serialisation */
@@ -100,7 +100,7 @@ void gb_hd_cport_release(struct gb_host_device *hd, u16 cport_id)
return;
}
- ida_simple_remove(&hd->cport_id_map, cport_id);
+ ida_free(&hd->cport_id_map, cport_id);
}
static void gb_hd_release(struct device *dev)
@@ -111,12 +111,12 @@ static void gb_hd_release(struct device *dev)
if (hd->svc)
gb_svc_put(hd->svc);
- ida_simple_remove(&gb_hd_bus_id_map, hd->bus_id);
+ ida_free(&gb_hd_bus_id_map, hd->bus_id);
ida_destroy(&hd->cport_id_map);
kfree(hd);
}
-struct device_type greybus_hd_type = {
+const struct device_type greybus_hd_type = {
.name = "greybus_host_device",
.release = gb_hd_release,
};
@@ -162,7 +162,7 @@ struct gb_host_device *gb_hd_create(struct gb_hd_driver *driver,
if (!hd)
return ERR_PTR(-ENOMEM);
- ret = ida_simple_get(&gb_hd_bus_id_map, 1, 0, GFP_KERNEL);
+ ret = ida_alloc_min(&gb_hd_bus_id_map, 1, GFP_KERNEL);
if (ret < 0) {
kfree(hd);
return ERR_PTR(ret);
diff --git a/drivers/greybus/interface.c b/drivers/greybus/interface.c
index 9ec949a438ef..fd58a86b0888 100644
--- a/drivers/greybus/interface.c
+++ b/drivers/greybus/interface.c
@@ -131,9 +131,8 @@ static int gb_interface_route_create(struct gb_interface *intf)
int ret;
/* Allocate an interface device id. */
- ret = ida_simple_get(&svc->device_id_map,
- GB_SVC_DEVICE_ID_MIN, GB_SVC_DEVICE_ID_MAX + 1,
- GFP_KERNEL);
+ ret = ida_alloc_range(&svc->device_id_map, GB_SVC_DEVICE_ID_MIN,
+ GB_SVC_DEVICE_ID_MAX, GFP_KERNEL);
if (ret < 0) {
dev_err(&intf->dev, "failed to allocate device id: %d\n", ret);
return ret;
@@ -165,7 +164,7 @@ err_svc_id_free:
* XXX anymore.
*/
err_ida_remove:
- ida_simple_remove(&svc->device_id_map, device_id);
+ ida_free(&svc->device_id_map, device_id);
return ret;
}
@@ -178,7 +177,7 @@ static void gb_interface_route_destroy(struct gb_interface *intf)
return;
gb_svc_route_destroy(svc, svc->ap_intf_id, intf->interface_id);
- ida_simple_remove(&svc->device_id_map, intf->device_id);
+ ida_free(&svc->device_id_map, intf->device_id);
intf->device_id = GB_INTERFACE_DEVICE_ID_BAD;
}
@@ -765,7 +764,7 @@ static const struct dev_pm_ops gb_interface_pm_ops = {
gb_interface_runtime_idle)
};
-struct device_type greybus_interface_type = {
+const struct device_type greybus_interface_type = {
.name = "greybus_interface",
.release = gb_interface_release,
.pm = &gb_interface_pm_ops,
diff --git a/drivers/greybus/module.c b/drivers/greybus/module.c
index 36f77f9e1d74..7f7153a1dd60 100644
--- a/drivers/greybus/module.c
+++ b/drivers/greybus/module.c
@@ -81,7 +81,7 @@ static void gb_module_release(struct device *dev)
kfree(module);
}
-struct device_type greybus_module_type = {
+const struct device_type greybus_module_type = {
.name = "greybus_module",
.release = gb_module_release,
};
diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c
index 0d7e749174a4..4256467fcd35 100644
--- a/drivers/greybus/svc.c
+++ b/drivers/greybus/svc.c
@@ -1305,7 +1305,7 @@ static void gb_svc_release(struct device *dev)
kfree(svc);
}
-struct device_type greybus_svc_type = {
+const struct device_type greybus_svc_type = {
.name = "greybus_svc",
.release = gb_svc_release,
};
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 00242107d62e..862c47b191af 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -16,6 +16,7 @@ config HYPERV
config HYPERV_VTL_MODE
bool "Enable Linux to boot in VTL context"
depends on X86_64 && HYPERV
+ depends on SMP
default n
help
Virtual Secure Mode (VSM) is a set of hypervisor capabilities and
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 51e5018ac9b2..a8ad728354cb 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -270,7 +270,7 @@ void hv_synic_enable_regs(unsigned int cpu)
union hv_synic_scontrol sctrl;
/* Setup the Synic's message page */
- simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
+ simp.as_uint64 = hv_get_msr(HV_MSR_SIMP);
simp.simp_enabled = 1;
if (ms_hyperv.paravisor_present || hv_root_partition) {
@@ -286,10 +286,10 @@ void hv_synic_enable_regs(unsigned int cpu)
>> HV_HYP_PAGE_SHIFT;
}
- hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
+ hv_set_msr(HV_MSR_SIMP, simp.as_uint64);
/* Setup the Synic's event page */
- siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
+ siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 1;
if (ms_hyperv.paravisor_present || hv_root_partition) {
@@ -305,13 +305,12 @@ void hv_synic_enable_regs(unsigned int cpu)
>> HV_HYP_PAGE_SHIFT;
}
- hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
+ hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64);
/* Setup the shared SINT. */
if (vmbus_irq != -1)
enable_percpu_irq(vmbus_irq, 0);
- shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
- VMBUS_MESSAGE_SINT);
+ shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT);
shared_sint.vector = vmbus_interrupt;
shared_sint.masked = false;
@@ -326,14 +325,13 @@ void hv_synic_enable_regs(unsigned int cpu)
#else
shared_sint.auto_eoi = 0;
#endif
- hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
/* Enable the global synic bit */
- sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
+ sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL);
sctrl.enable = 1;
- hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
+ hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
}
int hv_synic_init(unsigned int cpu)
@@ -357,17 +355,15 @@ void hv_synic_disable_regs(unsigned int cpu)
union hv_synic_siefp siefp;
union hv_synic_scontrol sctrl;
- shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 +
- VMBUS_MESSAGE_SINT);
+ shared_sint.as_uint64 = hv_get_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT);
shared_sint.masked = 1;
/* Need to correctly cleanup in the case of SMP!!! */
/* Disable the interrupt */
- hv_set_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT,
- shared_sint.as_uint64);
+ hv_set_msr(HV_MSR_SINT0 + VMBUS_MESSAGE_SINT, shared_sint.as_uint64);
- simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
+ simp.as_uint64 = hv_get_msr(HV_MSR_SIMP);
/*
* In Isolation VM, sim and sief pages are allocated by
* paravisor. These pages also will be used by kdump
@@ -382,9 +378,9 @@ void hv_synic_disable_regs(unsigned int cpu)
simp.base_simp_gpa = 0;
}
- hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
+ hv_set_msr(HV_MSR_SIMP, simp.as_uint64);
- siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
+ siefp.as_uint64 = hv_get_msr(HV_MSR_SIEFP);
siefp.siefp_enabled = 0;
if (ms_hyperv.paravisor_present || hv_root_partition) {
@@ -394,12 +390,12 @@ void hv_synic_disable_regs(unsigned int cpu)
siefp.base_siefp_gpa = 0;
}
- hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
+ hv_set_msr(HV_MSR_SIEFP, siefp.as_uint64);
/* Disable the global synic bit */
- sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL);
+ sctrl.as_uint64 = hv_get_msr(HV_MSR_SCONTROL);
sctrl.enable = 0;
- hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64);
+ hv_set_msr(HV_MSR_SCONTROL, sctrl.as_uint64);
if (vmbus_irq != -1)
disable_percpu_irq(vmbus_irq);
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index 0285a74363b3..dde3f9b6871a 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -20,8 +20,11 @@
#include <linux/sched/task_stack.h>
#include <linux/panic_notifier.h>
#include <linux/ptrace.h>
+#include <linux/random.h>
+#include <linux/efi.h>
#include <linux/kdebug.h>
#include <linux/kmsg_dump.h>
+#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/dma-map-ops.h>
#include <linux/set_memory.h>
@@ -227,19 +230,19 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
* contain the size of the panic data in that page. Rest of the
* registers are no-op when the NOTIFY_MSG flag is set.
*/
- hv_set_register(HV_REGISTER_CRASH_P0, 0);
- hv_set_register(HV_REGISTER_CRASH_P1, 0);
- hv_set_register(HV_REGISTER_CRASH_P2, 0);
- hv_set_register(HV_REGISTER_CRASH_P3, virt_to_phys(hv_panic_page));
- hv_set_register(HV_REGISTER_CRASH_P4, bytes_written);
+ hv_set_msr(HV_MSR_CRASH_P0, 0);
+ hv_set_msr(HV_MSR_CRASH_P1, 0);
+ hv_set_msr(HV_MSR_CRASH_P2, 0);
+ hv_set_msr(HV_MSR_CRASH_P3, virt_to_phys(hv_panic_page));
+ hv_set_msr(HV_MSR_CRASH_P4, bytes_written);
/*
* Let Hyper-V know there is crash data available along with
* the panic message.
*/
- hv_set_register(HV_REGISTER_CRASH_CTL,
- (HV_CRASH_CTL_CRASH_NOTIFY |
- HV_CRASH_CTL_CRASH_NOTIFY_MSG));
+ hv_set_msr(HV_MSR_CRASH_CTL,
+ (HV_CRASH_CTL_CRASH_NOTIFY |
+ HV_CRASH_CTL_CRASH_NOTIFY_MSG));
}
static struct kmsg_dumper hv_kmsg_dumper = {
@@ -278,6 +281,14 @@ static void hv_kmsg_dump_register(void)
int __init hv_common_init(void)
{
int i;
+ union hv_hypervisor_version_info version;
+
+ /* Get information about the Hyper-V host version */
+ if (!hv_get_hypervisor_version(&version))
+ pr_info("Hyper-V: Host Build %d.%d.%d.%d-%d-%d\n",
+ version.major_version, version.minor_version,
+ version.build_number, version.service_number,
+ version.service_pack, version.service_branch);
if (hv_is_isolation_supported())
sysctl_record_panic_msg = 0;
@@ -310,7 +321,7 @@ int __init hv_common_init(void)
* Register for panic kmsg callback only if the right
* capability is supported by the hypervisor.
*/
- hyperv_crash_ctl = hv_get_register(HV_REGISTER_CRASH_CTL);
+ hyperv_crash_ctl = hv_get_msr(HV_MSR_CRASH_CTL);
if (hyperv_crash_ctl & HV_CRASH_CTL_CRASH_NOTIFY_MSG)
hv_kmsg_dump_register();
@@ -347,6 +358,72 @@ int __init hv_common_init(void)
return 0;
}
+void __init ms_hyperv_late_init(void)
+{
+ struct acpi_table_header *header;
+ acpi_status status;
+ u8 *randomdata;
+ u32 length, i;
+
+ /*
+ * Seed the Linux random number generator with entropy provided by
+ * the Hyper-V host in ACPI table OEM0.
+ */
+ if (!IS_ENABLED(CONFIG_ACPI))
+ return;
+
+ status = acpi_get_table("OEM0", 0, &header);
+ if (ACPI_FAILURE(status) || !header)
+ return;
+
+ /*
+ * Since the "OEM0" table name is for OEM specific usage, verify
+ * that what we're seeing purports to be from Microsoft.
+ */
+ if (strncmp(header->oem_table_id, "MICROSFT", 8))
+ goto error;
+
+ /*
+ * Ensure the length is reasonable. Requiring at least 8 bytes and
+ * no more than 4K bytes is somewhat arbitrary and just protects
+ * against a malformed table. Hyper-V currently provides 64 bytes,
+ * but allow for a change in a later version.
+ */
+ if (header->length < sizeof(*header) + 8 ||
+ header->length > sizeof(*header) + SZ_4K)
+ goto error;
+
+ length = header->length - sizeof(*header);
+ randomdata = (u8 *)(header + 1);
+
+ pr_debug("Hyper-V: Seeding rng with %d random bytes from ACPI table OEM0\n",
+ length);
+
+ add_bootloader_randomness(randomdata, length);
+
+ /*
+ * To prevent the seed data from being visible in /sys/firmware/acpi,
+ * zero out the random data in the ACPI table and fixup the checksum.
+ * The zero'ing is done out of an abundance of caution in avoiding
+ * potential security risks to the rng. Similarly, reset the table
+ * length to just the header size so that a subsequent kexec doesn't
+ * try to use the zero'ed out random data.
+ */
+ for (i = 0; i < length; i++) {
+ header->checksum += randomdata[i];
+ randomdata[i] = 0;
+ }
+
+ for (i = 0; i < sizeof(header->length); i++)
+ header->checksum += ((u8 *)&header->length)[i];
+ header->length = sizeof(*header);
+ for (i = 0; i < sizeof(header->length); i++)
+ header->checksum -= ((u8 *)&header->length)[i];
+
+error:
+ acpi_put_table(header);
+}
+
/*
* Hyper-V specific initialization and die code for
* individual CPUs that is common across all architectures.
@@ -409,7 +486,7 @@ int hv_common_cpu_init(unsigned int cpu)
*inputarg = mem;
}
- msr_vp_index = hv_get_register(HV_REGISTER_VP_INDEX);
+ msr_vp_index = hv_get_msr(HV_MSR_VP_INDEX);
hv_vp_index[cpu] = msr_vp_index;
@@ -506,7 +583,7 @@ EXPORT_SYMBOL_GPL(hv_is_hibernation_supported);
*/
static u64 __hv_read_ref_counter(void)
{
- return hv_get_register(HV_REGISTER_TIME_REF_COUNT);
+ return hv_get_msr(HV_MSR_TIME_REF_COUNT);
}
u64 (*hv_read_reference_counter)(void) = __hv_read_ref_counter;
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 7f7965f3d187..4cb17603a828 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2359,10 +2359,9 @@ static int vmbus_platform_driver_probe(struct platform_device *pdev)
return vmbus_acpi_add(pdev);
}
-static int vmbus_platform_driver_remove(struct platform_device *pdev)
+static void vmbus_platform_driver_remove(struct platform_device *pdev)
{
vmbus_mmio_remove();
- return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -2542,7 +2541,7 @@ static const struct dev_pm_ops vmbus_bus_pm = {
static struct platform_driver vmbus_platform_driver = {
.probe = vmbus_platform_driver_probe,
- .remove = vmbus_platform_driver_remove,
+ .remove_new = vmbus_platform_driver_remove,
.driver = {
.name = "vmbus",
.acpi_match_table = ACPI_PTR(vmbus_acpi_device_ids),
diff --git a/drivers/hwspinlock/omap_hwspinlock.c b/drivers/hwspinlock/omap_hwspinlock.c
index a9fd9ca45f2a..27b47b8623c0 100644
--- a/drivers/hwspinlock/omap_hwspinlock.c
+++ b/drivers/hwspinlock/omap_hwspinlock.c
@@ -74,17 +74,12 @@ static const struct hwspinlock_ops omap_hwspinlock_ops = {
static int omap_hwspinlock_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct hwspinlock_device *bank;
- struct hwspinlock *hwlock;
void __iomem *io_base;
int num_locks, i, ret;
/* Only a single hwspinlock block device is supported */
int base_id = 0;
- if (!node)
- return -ENODEV;
-
io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
@@ -93,10 +88,10 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
* make sure the module is enabled and clocked before reading
* the module SYSSTATUS register
*/
- pm_runtime_enable(&pdev->dev);
+ devm_pm_runtime_enable(&pdev->dev);
ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0)
- goto runtime_err;
+ return ret;
/* Determine number of locks */
i = readl(io_base + SYSSTATUS_OFFSET);
@@ -108,55 +103,24 @@ static int omap_hwspinlock_probe(struct platform_device *pdev)
*/
ret = pm_runtime_put(&pdev->dev);
if (ret < 0)
- goto runtime_err;
+ return ret;
/* one of the four lsb's must be set, and nothing else */
- if (hweight_long(i & 0xf) != 1 || i > 8) {
- ret = -EINVAL;
- goto runtime_err;
- }
+ if (hweight_long(i & 0xf) != 1 || i > 8)
+ return -EINVAL;
num_locks = i * 32; /* actual number of locks in this device */
bank = devm_kzalloc(&pdev->dev, struct_size(bank, lock, num_locks),
GFP_KERNEL);
- if (!bank) {
- ret = -ENOMEM;
- goto runtime_err;
- }
-
- platform_set_drvdata(pdev, bank);
+ if (!bank)
+ return -ENOMEM;
- for (i = 0, hwlock = &bank->lock[0]; i < num_locks; i++, hwlock++)
- hwlock->priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
+ for (i = 0; i < num_locks; i++)
+ bank->lock[i].priv = io_base + LOCK_BASE_OFFSET + sizeof(u32) * i;
- ret = hwspin_lock_register(bank, &pdev->dev, &omap_hwspinlock_ops,
+ return devm_hwspin_lock_register(&pdev->dev, bank, &omap_hwspinlock_ops,
base_id, num_locks);
- if (ret)
- goto runtime_err;
-
- dev_dbg(&pdev->dev, "Registered %d locks with HwSpinlock core\n",
- num_locks);
-
- return 0;
-
-runtime_err:
- pm_runtime_disable(&pdev->dev);
- return ret;
-}
-
-static void omap_hwspinlock_remove(struct platform_device *pdev)
-{
- struct hwspinlock_device *bank = platform_get_drvdata(pdev);
- int ret;
-
- ret = hwspin_lock_unregister(bank);
- if (ret) {
- dev_err(&pdev->dev, "%s failed: %d\n", __func__, ret);
- return;
- }
-
- pm_runtime_disable(&pdev->dev);
}
static const struct of_device_id omap_hwspinlock_of_match[] = {
@@ -169,7 +133,6 @@ MODULE_DEVICE_TABLE(of, omap_hwspinlock_of_match);
static struct platform_driver omap_hwspinlock_driver = {
.probe = omap_hwspinlock_probe,
- .remove_new = omap_hwspinlock_remove,
.driver = {
.name = "omap_hwspinlock",
.of_match_table = omap_hwspinlock_of_match,
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index 995d3b2c76df..4ba478211b31 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -2,6 +2,26 @@
#
# Makefile for CoreSight drivers.
#
+
+# Current W=1 warnings
+subdir-ccflags-y += -Wextra -Wunused -Wno-unused-parameter
+subdir-ccflags-y += -Wmissing-declarations
+subdir-ccflags-y += -Wmissing-format-attribute
+subdir-ccflags-y += -Wmissing-prototypes
+subdir-ccflags-y += -Wold-style-definition
+subdir-ccflags-y += -Wmissing-include-dirs
+subdir-ccflags-y += -Wno-sign-compare
+condflags := \
+ $(call cc-option, -Wrestrict) \
+ $(call cc-option, -Wunused-but-set-variable) \
+ $(call cc-option, -Wunused-const-variable) \
+ $(call cc-option, -Wpacked-not-aligned) \
+ $(call cc-option, -Wformat-overflow) \
+ $(call cc-option, -Wformat-truncation) \
+ $(call cc-option, -Wstringop-overflow) \
+ $(call cc-option, -Wstringop-truncation)
+subdir-ccflags-y += $(condflags)
+
obj-$(CONFIG_CORESIGHT) += coresight.o
coresight-y := coresight-core.o coresight-etm-perf.o coresight-platform.o \
coresight-sysfs.o coresight-syscfg.o coresight-config.o \
diff --git a/drivers/hwtracing/coresight/coresight-cfg-afdo.c b/drivers/hwtracing/coresight/coresight-cfg-afdo.c
index 84b31184252b..e794f2e145fa 100644
--- a/drivers/hwtracing/coresight/coresight-cfg-afdo.c
+++ b/drivers/hwtracing/coresight/coresight-cfg-afdo.c
@@ -9,6 +9,7 @@
/* ETMv4 includes and features */
#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
#include "coresight-etm4x-cfg.h"
+#include "coresight-cfg-preload.h"
/* preload configurations and features */
diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c
index d7f0e231feb9..b83613e34289 100644
--- a/drivers/hwtracing/coresight/coresight-core.c
+++ b/drivers/hwtracing/coresight/coresight-core.c
@@ -9,7 +9,6 @@
#include <linux/types.h>
#include <linux/device.h>
#include <linux/io.h>
-#include <linux/idr.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -25,15 +24,12 @@
#include "coresight-priv.h"
#include "coresight-syscfg.h"
-static DEFINE_MUTEX(coresight_mutex);
-static DEFINE_PER_CPU(struct coresight_device *, csdev_sink);
-
/*
- * Use IDR to map the hash of the source's device name
- * to the pointer of path for the source. The idr is for
- * the sources which aren't associated with CPU.
+ * Mutex used to lock all sysfs enable and disable actions and loading and
+ * unloading devices by the Coresight core.
*/
-static DEFINE_IDR(path_idr);
+DEFINE_MUTEX(coresight_mutex);
+static DEFINE_PER_CPU(struct coresight_device *, csdev_sink);
/**
* struct coresight_node - elements of a path, from source to sink
@@ -46,12 +42,6 @@ struct coresight_node {
};
/*
- * When operating Coresight drivers from the sysFS interface, only a single
- * path can exist from a tracer (associated to a CPU) to a sink.
- */
-static DEFINE_PER_CPU(struct list_head *, tracer_path);
-
-/*
* When losing synchronisation a new barrier packet needs to be inserted at the
* beginning of the data collected in a buffer. That way the decoder knows that
* it needs to look for another sync sequence.
@@ -61,34 +51,6 @@ EXPORT_SYMBOL_GPL(coresight_barrier_pkt);
static const struct cti_assoc_op *cti_assoc_ops;
-ssize_t coresight_simple_show_pair(struct device *_dev,
- struct device_attribute *attr, char *buf)
-{
- struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
- struct cs_pair_attribute *cs_attr = container_of(attr, struct cs_pair_attribute, attr);
- u64 val;
-
- pm_runtime_get_sync(_dev->parent);
- val = csdev_access_relaxed_read_pair(&csdev->access, cs_attr->lo_off, cs_attr->hi_off);
- pm_runtime_put_sync(_dev->parent);
- return sysfs_emit(buf, "0x%llx\n", val);
-}
-EXPORT_SYMBOL_GPL(coresight_simple_show_pair);
-
-ssize_t coresight_simple_show32(struct device *_dev,
- struct device_attribute *attr, char *buf)
-{
- struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
- struct cs_off_attribute *cs_attr = container_of(attr, struct cs_off_attribute, attr);
- u64 val;
-
- pm_runtime_get_sync(_dev->parent);
- val = csdev_access_relaxed_read32(&csdev->access, cs_attr->off);
- pm_runtime_put_sync(_dev->parent);
- return sysfs_emit(buf, "0x%llx\n", val);
-}
-EXPORT_SYMBOL_GPL(coresight_simple_show32);
-
void coresight_set_cti_ops(const struct cti_assoc_op *cti_op)
{
cti_assoc_ops = cti_op;
@@ -279,42 +241,18 @@ EXPORT_SYMBOL_GPL(coresight_add_helper);
static int coresight_enable_sink(struct coresight_device *csdev,
enum cs_mode mode, void *data)
{
- int ret;
-
- /*
- * We need to make sure the "new" session is compatible with the
- * existing "mode" of operation.
- */
- if (!sink_ops(csdev)->enable)
- return -EINVAL;
-
- ret = sink_ops(csdev)->enable(csdev, mode, data);
- if (ret)
- return ret;
-
- csdev->enable = true;
-
- return 0;
+ return sink_ops(csdev)->enable(csdev, mode, data);
}
static void coresight_disable_sink(struct coresight_device *csdev)
{
- int ret;
-
- if (!sink_ops(csdev)->disable)
- return;
-
- ret = sink_ops(csdev)->disable(csdev);
- if (ret)
- return;
- csdev->enable = false;
+ sink_ops(csdev)->disable(csdev);
}
static int coresight_enable_link(struct coresight_device *csdev,
struct coresight_device *parent,
struct coresight_device *child)
{
- int ret = 0;
int link_subtype;
struct coresight_connection *inconn, *outconn;
@@ -330,21 +268,13 @@ static int coresight_enable_link(struct coresight_device *csdev,
if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT && IS_ERR(outconn))
return PTR_ERR(outconn);
- if (link_ops(csdev)->enable) {
- ret = link_ops(csdev)->enable(csdev, inconn, outconn);
- if (!ret)
- csdev->enable = true;
- }
-
- return ret;
+ return link_ops(csdev)->enable(csdev, inconn, outconn);
}
static void coresight_disable_link(struct coresight_device *csdev,
struct coresight_device *parent,
struct coresight_device *child)
{
- int i;
- int link_subtype;
struct coresight_connection *inconn, *outconn;
if (!parent || !child)
@@ -352,49 +282,9 @@ static void coresight_disable_link(struct coresight_device *csdev,
inconn = coresight_find_out_connection(parent, csdev);
outconn = coresight_find_out_connection(csdev, child);
- link_subtype = csdev->subtype.link_subtype;
- if (link_ops(csdev)->disable) {
- link_ops(csdev)->disable(csdev, inconn, outconn);
- }
-
- if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
- for (i = 0; i < csdev->pdata->nr_inconns; i++)
- if (atomic_read(&csdev->pdata->in_conns[i]->dest_refcnt) !=
- 0)
- return;
- } else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) {
- for (i = 0; i < csdev->pdata->nr_outconns; i++)
- if (atomic_read(&csdev->pdata->out_conns[i]->src_refcnt) !=
- 0)
- return;
- } else {
- if (atomic_read(&csdev->refcnt) != 0)
- return;
- }
-
- csdev->enable = false;
-}
-
-int coresight_enable_source(struct coresight_device *csdev, enum cs_mode mode,
- void *data)
-{
- int ret;
-
- if (!csdev->enable) {
- if (source_ops(csdev)->enable) {
- ret = source_ops(csdev)->enable(csdev, data, mode);
- if (ret)
- return ret;
- }
- csdev->enable = true;
- }
-
- atomic_inc(&csdev->refcnt);
-
- return 0;
+ link_ops(csdev)->disable(csdev, inconn, outconn);
}
-EXPORT_SYMBOL_GPL(coresight_enable_source);
static bool coresight_is_helper(struct coresight_device *csdev)
{
@@ -404,29 +294,12 @@ static bool coresight_is_helper(struct coresight_device *csdev)
static int coresight_enable_helper(struct coresight_device *csdev,
enum cs_mode mode, void *data)
{
- int ret;
-
- if (!helper_ops(csdev)->enable)
- return 0;
- ret = helper_ops(csdev)->enable(csdev, mode, data);
- if (ret)
- return ret;
-
- csdev->enable = true;
- return 0;
+ return helper_ops(csdev)->enable(csdev, mode, data);
}
static void coresight_disable_helper(struct coresight_device *csdev)
{
- int ret;
-
- if (!helper_ops(csdev)->disable)
- return;
-
- ret = helper_ops(csdev)->disable(csdev, NULL);
- if (ret)
- return;
- csdev->enable = false;
+ helper_ops(csdev)->disable(csdev, NULL);
}
static void coresight_disable_helpers(struct coresight_device *csdev)
@@ -441,25 +314,20 @@ static void coresight_disable_helpers(struct coresight_device *csdev)
}
}
-/**
- * coresight_disable_source - Drop the reference count by 1 and disable
- * the device if there are no users left.
- *
- * @csdev: The coresight device to disable
- * @data: Opaque data to pass on to the disable function of the source device.
- * For example in perf mode this is a pointer to the struct perf_event.
+/*
+ * Helper function to call source_ops(csdev)->disable and also disable the
+ * helpers.
*
- * Returns true if the device has been disabled.
+ * There is an imbalance between coresight_enable_path() and
+ * coresight_disable_path(). Enabling also enables the source's helpers as part
+ * of the path, but disabling always skips the first item in the path (which is
+ * the source), so sources and their helpers don't get disabled as part of that
+ * function and we need the extra step here.
*/
-bool coresight_disable_source(struct coresight_device *csdev, void *data)
+void coresight_disable_source(struct coresight_device *csdev, void *data)
{
- if (atomic_dec_return(&csdev->refcnt) == 0) {
- if (source_ops(csdev)->disable)
- source_ops(csdev)->disable(csdev, data);
- coresight_disable_helpers(csdev);
- csdev->enable = false;
- }
- return !csdev->enable;
+ source_ops(csdev)->disable(csdev, data);
+ coresight_disable_helpers(csdev);
}
EXPORT_SYMBOL_GPL(coresight_disable_source);
@@ -484,7 +352,7 @@ static void coresight_disable_path_from(struct list_head *path,
/*
* ETF devices are tricky... They can be a link or a sink,
* depending on how they are configured. If an ETF has been
- * "activated" it will be configured as a sink, otherwise
+ * selected as a sink it will be configured as a sink, otherwise
* go ahead with the link configuration.
*/
if (type == CORESIGHT_DEV_TYPE_LINKSINK)
@@ -562,7 +430,7 @@ int coresight_enable_path(struct list_head *path, enum cs_mode mode,
/*
* ETF devices are tricky... They can be a link or a sink,
* depending on how they are configured. If an ETF has been
- * "activated" it will be configured as a sink, otherwise
+ * selected as a sink it will be configured as a sink, otherwise
* go ahead with the link configuration.
*/
if (type == CORESIGHT_DEV_TYPE_LINKSINK)
@@ -619,48 +487,6 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
return csdev;
}
-static struct coresight_device *
-coresight_find_enabled_sink(struct coresight_device *csdev)
-{
- int i;
- struct coresight_device *sink = NULL;
-
- if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
- csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
- csdev->activated)
- return csdev;
-
- /*
- * Recursively explore each port found on this element.
- */
- for (i = 0; i < csdev->pdata->nr_outconns; i++) {
- struct coresight_device *child_dev;
-
- child_dev = csdev->pdata->out_conns[i]->dest_dev;
- if (child_dev)
- sink = coresight_find_enabled_sink(child_dev);
- if (sink)
- return sink;
- }
-
- return NULL;
-}
-
-/**
- * coresight_get_enabled_sink - returns the first enabled sink using
- * connection based search starting from the source reference
- *
- * @source: Coresight source device reference
- */
-struct coresight_device *
-coresight_get_enabled_sink(struct coresight_device *source)
-{
- if (!source)
- return NULL;
-
- return coresight_find_enabled_sink(source);
-}
-
static int coresight_sink_by_id(struct device *dev, const void *data)
{
struct coresight_device *csdev = to_coresight_device(dev);
@@ -794,11 +620,10 @@ static void coresight_drop_device(struct coresight_device *csdev)
* @sink: The final sink we want in this path.
* @path: The list to add devices to.
*
- * The tree of Coresight device is traversed until an activated sink is
- * found. From there the sink is added to the list along with all the
- * devices that led to that point - the end result is a list from source
- * to sink. In that list the source is the first device and the sink the
- * last one.
+ * The tree of Coresight device is traversed until @sink is found.
+ * From there the sink is added to the list along with all the devices that led
+ * to that point - the end result is a list from source to sink. In that list
+ * the source is the first device and the sink the last one.
*/
static int _coresight_build_path(struct coresight_device *csdev,
struct coresight_device *sink,
@@ -808,7 +633,7 @@ static int _coresight_build_path(struct coresight_device *csdev,
bool found = false;
struct coresight_node *node;
- /* An activated sink has been found. Enqueue the element */
+ /* The sink has been found. Enqueue the element */
if (csdev == sink)
goto out;
@@ -1072,269 +897,6 @@ static void coresight_clear_default_sink(struct coresight_device *csdev)
}
}
-/** coresight_validate_source - make sure a source has the right credentials
- * @csdev: the device structure for a source.
- * @function: the function this was called from.
- *
- * Assumes the coresight_mutex is held.
- */
-static int coresight_validate_source(struct coresight_device *csdev,
- const char *function)
-{
- u32 type, subtype;
-
- type = csdev->type;
- subtype = csdev->subtype.source_subtype;
-
- if (type != CORESIGHT_DEV_TYPE_SOURCE) {
- dev_err(&csdev->dev, "wrong device type in %s\n", function);
- return -EINVAL;
- }
-
- if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC &&
- subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE &&
- subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM &&
- subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS) {
- dev_err(&csdev->dev, "wrong device subtype in %s\n", function);
- return -EINVAL;
- }
-
- return 0;
-}
-
-int coresight_enable(struct coresight_device *csdev)
-{
- int cpu, ret = 0;
- struct coresight_device *sink;
- struct list_head *path;
- enum coresight_dev_subtype_source subtype;
- u32 hash;
-
- subtype = csdev->subtype.source_subtype;
-
- mutex_lock(&coresight_mutex);
-
- ret = coresight_validate_source(csdev, __func__);
- if (ret)
- goto out;
-
- if (csdev->enable) {
- /*
- * There could be multiple applications driving the software
- * source. So keep the refcount for each such user when the
- * source is already enabled.
- */
- if (subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE)
- atomic_inc(&csdev->refcnt);
- goto out;
- }
-
- sink = coresight_get_enabled_sink(csdev);
- if (!sink) {
- ret = -EINVAL;
- goto out;
- }
-
- path = coresight_build_path(csdev, sink);
- if (IS_ERR(path)) {
- pr_err("building path(s) failed\n");
- ret = PTR_ERR(path);
- goto out;
- }
-
- ret = coresight_enable_path(path, CS_MODE_SYSFS, NULL);
- if (ret)
- goto err_path;
-
- ret = coresight_enable_source(csdev, CS_MODE_SYSFS, NULL);
- if (ret)
- goto err_source;
-
- switch (subtype) {
- case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
- /*
- * When working from sysFS it is important to keep track
- * of the paths that were created so that they can be
- * undone in 'coresight_disable()'. Since there can only
- * be a single session per tracer (when working from sysFS)
- * a per-cpu variable will do just fine.
- */
- cpu = source_ops(csdev)->cpu_id(csdev);
- per_cpu(tracer_path, cpu) = path;
- break;
- case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
- case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM:
- case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
- /*
- * Use the hash of source's device name as ID
- * and map the ID to the pointer of the path.
- */
- hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
- ret = idr_alloc_u32(&path_idr, path, &hash, hash, GFP_KERNEL);
- if (ret)
- goto err_source;
- break;
- default:
- /* We can't be here */
- break;
- }
-
-out:
- mutex_unlock(&coresight_mutex);
- return ret;
-
-err_source:
- coresight_disable_path(path);
-
-err_path:
- coresight_release_path(path);
- goto out;
-}
-EXPORT_SYMBOL_GPL(coresight_enable);
-
-void coresight_disable(struct coresight_device *csdev)
-{
- int cpu, ret;
- struct list_head *path = NULL;
- u32 hash;
-
- mutex_lock(&coresight_mutex);
-
- ret = coresight_validate_source(csdev, __func__);
- if (ret)
- goto out;
-
- if (!csdev->enable || !coresight_disable_source(csdev, NULL))
- goto out;
-
- switch (csdev->subtype.source_subtype) {
- case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
- cpu = source_ops(csdev)->cpu_id(csdev);
- path = per_cpu(tracer_path, cpu);
- per_cpu(tracer_path, cpu) = NULL;
- break;
- case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
- case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM:
- case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
- hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
- /* Find the path by the hash. */
- path = idr_find(&path_idr, hash);
- if (path == NULL) {
- pr_err("Path is not found for %s\n", dev_name(&csdev->dev));
- goto out;
- }
- idr_remove(&path_idr, hash);
- break;
- default:
- /* We can't be here */
- break;
- }
-
- coresight_disable_path(path);
- coresight_release_path(path);
-
-out:
- mutex_unlock(&coresight_mutex);
-}
-EXPORT_SYMBOL_GPL(coresight_disable);
-
-static ssize_t enable_sink_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct coresight_device *csdev = to_coresight_device(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->activated);
-}
-
-static ssize_t enable_sink_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret;
- unsigned long val;
- struct coresight_device *csdev = to_coresight_device(dev);
-
- ret = kstrtoul(buf, 10, &val);
- if (ret)
- return ret;
-
- if (val)
- csdev->activated = true;
- else
- csdev->activated = false;
-
- return size;
-
-}
-static DEVICE_ATTR_RW(enable_sink);
-
-static ssize_t enable_source_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct coresight_device *csdev = to_coresight_device(dev);
-
- return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->enable);
-}
-
-static ssize_t enable_source_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- int ret = 0;
- unsigned long val;
- struct coresight_device *csdev = to_coresight_device(dev);
-
- ret = kstrtoul(buf, 10, &val);
- if (ret)
- return ret;
-
- if (val) {
- ret = coresight_enable(csdev);
- if (ret)
- return ret;
- } else {
- coresight_disable(csdev);
- }
-
- return size;
-}
-static DEVICE_ATTR_RW(enable_source);
-
-static struct attribute *coresight_sink_attrs[] = {
- &dev_attr_enable_sink.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(coresight_sink);
-
-static struct attribute *coresight_source_attrs[] = {
- &dev_attr_enable_source.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(coresight_source);
-
-static struct device_type coresight_dev_type[] = {
- {
- .name = "sink",
- .groups = coresight_sink_groups,
- },
- {
- .name = "link",
- },
- {
- .name = "linksink",
- .groups = coresight_sink_groups,
- },
- {
- .name = "source",
- .groups = coresight_source_groups,
- },
- {
- .name = "helper",
- }
-};
-/* Ensure the enum matches the names and groups */
-static_assert(ARRAY_SIZE(coresight_dev_type) == CORESIGHT_DEV_TYPE_MAX);
-
static void coresight_device_release(struct device *dev)
{
struct coresight_device *csdev = to_coresight_device(dev);
@@ -1799,7 +1361,7 @@ done:
}
EXPORT_SYMBOL_GPL(coresight_alloc_device_name);
-struct bus_type coresight_bustype = {
+const struct bus_type coresight_bustype = {
.name = "coresight",
};
diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c
index 3999d0a2cb60..e805617020d0 100644
--- a/drivers/hwtracing/coresight/coresight-cti-core.c
+++ b/drivers/hwtracing/coresight/coresight-cti-core.c
@@ -974,7 +974,7 @@ static const struct amba_id cti_ids[] = {
CS_AMBA_ID(0x000bb9aa), /* CTI - C-A73 */
CS_AMBA_UCI_ID(0x000bb9da, uci_id_cti), /* CTI - C-A35 */
CS_AMBA_UCI_ID(0x000bb9ed, uci_id_cti), /* Coresight CTI (SoC 600) */
- { 0, 0},
+ { 0, 0, NULL },
};
MODULE_DEVICE_TABLE(amba, cti_ids);
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index fa80039e0821..3aab182b562f 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -76,7 +76,6 @@ DEFINE_CORESIGHT_DEVLIST(etb_devs, "etb");
* @pid: Process ID of the process being monitored by the session
* that is using this component.
* @buf: area of memory where ETB buffer content gets sent.
- * @mode: this ETB is being used.
* @buffer_depth: size of @buf.
* @trigger_cntr: amount of words to store after a trigger.
*/
@@ -89,7 +88,6 @@ struct etb_drvdata {
local_t reading;
pid_t pid;
u8 *buf;
- u32 mode;
u32 buffer_depth;
u32 trigger_cntr;
};
@@ -150,20 +148,20 @@ static int etb_enable_sysfs(struct coresight_device *csdev)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't messup with perf sessions. */
- if (drvdata->mode == CS_MODE_PERF) {
+ if (coresight_get_mode(csdev) == CS_MODE_PERF) {
ret = -EBUSY;
goto out;
}
- if (drvdata->mode == CS_MODE_DISABLED) {
+ if (coresight_get_mode(csdev) == CS_MODE_DISABLED) {
ret = etb_enable_hw(drvdata);
if (ret)
goto out;
- drvdata->mode = CS_MODE_SYSFS;
+ coresight_set_mode(csdev, CS_MODE_SYSFS);
}
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
out:
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return ret;
@@ -181,7 +179,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* No need to continue if the component is already in used by sysFS. */
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
ret = -EBUSY;
goto out;
}
@@ -199,7 +197,7 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
* use for this session.
*/
if (drvdata->pid == pid) {
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
goto out;
}
@@ -216,8 +214,8 @@ static int etb_enable_perf(struct coresight_device *csdev, void *data)
if (!ret) {
/* Associate with monitored process. */
drvdata->pid = pid;
- drvdata->mode = CS_MODE_PERF;
- atomic_inc(&csdev->refcnt);
+ coresight_set_mode(drvdata->csdev, CS_MODE_PERF);
+ csdev->refcnt++;
}
out:
@@ -356,17 +354,18 @@ static int etb_disable(struct coresight_device *csdev)
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (atomic_dec_return(&csdev->refcnt)) {
+ csdev->refcnt--;
+ if (csdev->refcnt) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
/* Complain if we (somehow) got out of sync */
- WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
etb_disable_hw(drvdata);
/* Dissociate from monitored process. */
drvdata->pid = -1;
- drvdata->mode = CS_MODE_DISABLED;
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
dev_dbg(&csdev->dev, "ETB disabled\n");
@@ -447,7 +446,7 @@ static unsigned long etb_update_buffer(struct coresight_device *csdev,
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
- if (atomic_read(&csdev->refcnt) != 1)
+ if (csdev->refcnt != 1)
goto out;
__etb_disable_hw(drvdata);
@@ -589,7 +588,7 @@ static void etb_dump(struct etb_drvdata *drvdata)
unsigned long flags;
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
__etb_disable_hw(drvdata);
etb_dump_hw(drvdata);
__etb_enable_hw(drvdata);
@@ -837,7 +836,7 @@ static const struct amba_id etb_ids[] = {
.id = 0x000bb907,
.mask = 0x000fffff,
},
- { 0, 0},
+ { 0, 0, NULL },
};
MODULE_DEVICE_TABLE(amba, etb_ids);
diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
index a52cfcce25d6..c0c60e6a1703 100644
--- a/drivers/hwtracing/coresight/coresight-etm-perf.c
+++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
@@ -589,7 +589,7 @@ static void etm_event_stop(struct perf_event *event, int mode)
return;
/* stop tracer */
- source_ops(csdev)->disable(csdev, event);
+ coresight_disable_source(csdev, event);
/* tell the core */
event->hw.state = PERF_HES_STOPPED;
diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 9a0d08b092ae..e02c3ea972c9 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -215,7 +215,6 @@ struct etm_config {
* @port_size: port size as reported by ETMCR bit 4-6 and 21.
* @arch: ETM/PTM version number.
* @use_cpu14: true if management registers need to be accessed via CP14.
- * @mode: this tracer's mode, i.e sysFS, Perf or disabled.
* @sticky_enable: true if ETM base configuration has been done.
* @boot_enable:true if we should start tracing at boot time.
* @os_unlock: true if access to management registers is allowed.
@@ -238,7 +237,6 @@ struct etm_drvdata {
int port_size;
u8 arch;
bool use_cp14;
- local_t mode;
bool sticky_enable;
bool boot_enable;
bool os_unlock;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 116a91d90ac2..9d5c1391ffb1 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -115,7 +115,7 @@ static void etm_clr_pwrup(struct etm_drvdata *drvdata)
*
* Basically the same as @coresight_timeout except for the register access
* method where we have to account for CP14 configurations.
-
+ *
* Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
* TIMEOUT_US has elapsed, which ever happens first.
*/
@@ -556,14 +556,12 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
{
int ret;
- u32 val;
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
- val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
-
- /* Someone is already using the tracer */
- if (val)
+ if (!coresight_take_mode(csdev, mode)) {
+ /* Someone is already using the tracer */
return -EBUSY;
+ }
switch (mode) {
case CS_MODE_SYSFS:
@@ -578,7 +576,7 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
/* The tracer didn't start */
if (ret)
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ coresight_set_mode(drvdata->csdev, CS_MODE_DISABLED);
return ret;
}
@@ -672,14 +670,13 @@ static void etm_disable(struct coresight_device *csdev,
struct perf_event *event)
{
enum cs_mode mode;
- struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
/*
* For as long as the tracer isn't disabled another entity can't
* change its status. As such we can read the status here without
* fearing it will change under us.
*/
- mode = local_read(&drvdata->mode);
+ mode = coresight_get_mode(csdev);
switch (mode) {
case CS_MODE_DISABLED:
@@ -696,7 +693,7 @@ static void etm_disable(struct coresight_device *csdev,
}
if (mode)
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
}
static const struct coresight_ops_source etm_source_ops = {
@@ -715,7 +712,7 @@ static int etm_online_cpu(unsigned int cpu)
return 0;
if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
- coresight_enable(etmdrvdata[cpu]->csdev);
+ coresight_enable_sysfs(etmdrvdata[cpu]->csdev);
return 0;
}
@@ -730,7 +727,7 @@ static int etm_starting_cpu(unsigned int cpu)
etmdrvdata[cpu]->os_unlock = true;
}
- if (local_read(&etmdrvdata[cpu]->mode))
+ if (coresight_get_mode(etmdrvdata[cpu]->csdev))
etm_enable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
@@ -742,7 +739,7 @@ static int etm_dying_cpu(unsigned int cpu)
return 0;
spin_lock(&etmdrvdata[cpu]->spinlock);
- if (local_read(&etmdrvdata[cpu]->mode))
+ if (coresight_get_mode(etmdrvdata[cpu]->csdev))
etm_disable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
@@ -925,7 +922,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
dev_info(&drvdata->csdev->dev,
"%s initialized\n", (char *)coresight_get_uci_data(id));
if (boot_enable) {
- coresight_enable(drvdata->csdev);
+ coresight_enable_sysfs(drvdata->csdev);
drvdata->boot_enable = true;
}
@@ -1003,7 +1000,7 @@ static const struct amba_id etm_ids[] = {
CS_AMBA_ID_DATA(0x000bb95f, "PTM 1.1"),
/* PTM 1.1 Qualcomm */
CS_AMBA_ID_DATA(0x000b006f, "PTM 1.1"),
- { 0, 0},
+ { 0, 0, NULL},
};
MODULE_DEVICE_TABLE(amba, etm_ids);
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index 2f271b7fb048..68c644be9813 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -722,7 +722,7 @@ static ssize_t cntr_val_show(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
- if (!local_read(&drvdata->mode)) {
+ if (!coresight_get_mode(drvdata->csdev)) {
spin_lock(&drvdata->spinlock);
for (i = 0; i < drvdata->nr_cntr; i++)
ret += sprintf(buf, "counter %d: %x\n",
@@ -941,7 +941,7 @@ static ssize_t seq_curr_state_show(struct device *dev,
struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct etm_config *config = &drvdata->config;
- if (!local_read(&drvdata->mode)) {
+ if (!coresight_get_mode(drvdata->csdev)) {
val = config->seq_curr_state;
goto out;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c
index ce1995a2827f..c2ca4a02dfce 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c
@@ -840,14 +840,11 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
{
int ret;
- u32 val;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
-
- val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
- /* Someone is already using the tracer */
- if (val)
+ if (!coresight_take_mode(csdev, mode)) {
+ /* Someone is already using the tracer */
return -EBUSY;
+ }
switch (mode) {
case CS_MODE_SYSFS:
@@ -862,7 +859,7 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
/* The tracer didn't start */
if (ret)
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
return ret;
}
@@ -1004,14 +1001,13 @@ static void etm4_disable(struct coresight_device *csdev,
struct perf_event *event)
{
enum cs_mode mode;
- struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
/*
* For as long as the tracer isn't disabled another entity can't
* change its status. As such we can read the status here without
* fearing it will change under us.
*/
- mode = local_read(&drvdata->mode);
+ mode = coresight_get_mode(csdev);
switch (mode) {
case CS_MODE_DISABLED:
@@ -1025,7 +1021,7 @@ static void etm4_disable(struct coresight_device *csdev,
}
if (mode)
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
}
static const struct coresight_ops_source etm4_source_ops = {
@@ -1200,6 +1196,7 @@ static void etm4_init_arch_data(void *info)
struct etm4_init_arg *init_arg = info;
struct etmv4_drvdata *drvdata;
struct csdev_access *csa;
+ struct device *dev = init_arg->dev;
int i;
drvdata = dev_get_drvdata(init_arg->dev);
@@ -1213,6 +1210,10 @@ static void etm4_init_arch_data(void *info)
if (!etm4_init_csdev_access(drvdata, csa))
return;
+ if (!csa->io_mem ||
+ fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
+ drvdata->skip_power_up = true;
+
/* Detect the support for OS Lock before we actually use it */
etm_detect_os_lock(drvdata, csa);
@@ -1650,7 +1651,7 @@ static int etm4_online_cpu(unsigned int cpu)
return etm4_probe_cpu(cpu);
if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable)
- coresight_enable(etmdrvdata[cpu]->csdev);
+ coresight_enable_sysfs(etmdrvdata[cpu]->csdev);
return 0;
}
@@ -1663,7 +1664,7 @@ static int etm4_starting_cpu(unsigned int cpu)
if (!etmdrvdata[cpu]->os_unlock)
etm4_os_unlock(etmdrvdata[cpu]);
- if (local_read(&etmdrvdata[cpu]->mode))
+ if (coresight_get_mode(etmdrvdata[cpu]->csdev))
etm4_enable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
@@ -1675,7 +1676,7 @@ static int etm4_dying_cpu(unsigned int cpu)
return 0;
spin_lock(&etmdrvdata[cpu]->spinlock);
- if (local_read(&etmdrvdata[cpu]->mode))
+ if (coresight_get_mode(etmdrvdata[cpu]->csdev))
etm4_disable_hw(etmdrvdata[cpu]);
spin_unlock(&etmdrvdata[cpu]->spinlock);
return 0;
@@ -1833,7 +1834,7 @@ static int etm4_cpu_save(struct etmv4_drvdata *drvdata)
* Save and restore the ETM Trace registers only if
* the ETM is active.
*/
- if (local_read(&drvdata->mode) && drvdata->save_state)
+ if (coresight_get_mode(drvdata->csdev) && drvdata->save_state)
ret = __etm4_cpu_save(drvdata);
return ret;
}
@@ -2040,11 +2041,6 @@ static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg)
if (!drvdata->arch)
return -EINVAL;
- /* TRCPDCR is not accessible with system instructions. */
- if (!desc.access.io_mem ||
- fwnode_property_present(dev_fwnode(dev), "qcom,skip-power-up"))
- drvdata->skip_power_up = true;
-
major = ETM_ARCH_MAJOR_VERSION(drvdata->arch);
minor = ETM_ARCH_MINOR_VERSION(drvdata->arch);
@@ -2098,7 +2094,7 @@ static int etm4_add_coresight_dev(struct etm4_init_arg *init_arg)
drvdata->cpu, type_name, major, minor);
if (boot_enable) {
- coresight_enable(drvdata->csdev);
+ coresight_enable_sysfs(drvdata->csdev);
drvdata->boot_enable = true;
}
@@ -2390,7 +2386,7 @@ static const struct of_device_id etm4_sysreg_match[] = {
#ifdef CONFIG_ACPI
static const struct acpi_device_id etm4x_acpi_ids[] = {
- {"ARMHC500", 0}, /* ARM CoreSight ETM4x */
+ {"ARMHC500", 0, 0, 0}, /* ARM CoreSight ETM4x */
{}
};
MODULE_DEVICE_TABLE(acpi, etm4x_acpi_ids);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index da17b6c49b0f..9ea678bc2e8e 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -1016,7 +1016,6 @@ struct etmv4_drvdata {
void __iomem *base;
struct coresight_device *csdev;
spinlock_t spinlock;
- local_t mode;
int cpu;
u8 arch;
u8 nr_pe;
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index a5b1fc787766..ef1a0abfee4e 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -350,7 +350,7 @@ MODULE_DEVICE_TABLE(of, static_funnel_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id static_funnel_ids[] = {
- {"ARMHC9FE", 0},
+ {"ARMHC9FE", 0, 0, 0},
{},
};
@@ -391,7 +391,7 @@ static const struct amba_id dynamic_funnel_ids[] = {
.id = 0x000bb9eb,
.mask = 0x000fffff,
},
- { 0, 0},
+ { 0, 0, NULL },
};
MODULE_DEVICE_TABLE(amba, dynamic_funnel_ids);
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 767076e07970..eb365236f9a9 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -12,6 +12,9 @@
#include <linux/coresight.h>
#include <linux/pm_runtime.h>
+extern struct mutex coresight_mutex;
+extern struct device_type coresight_dev_type[];
+
/*
* Coresight management registers (0xf00-0xfcc)
* 0xfa0 - 0xfa4: Management registers in PFTv1.0
@@ -130,8 +133,6 @@ void coresight_disable_path(struct list_head *path);
int coresight_enable_path(struct list_head *path, enum cs_mode mode,
void *sink_data);
struct coresight_device *coresight_get_sink(struct list_head *path);
-struct coresight_device *
-coresight_get_enabled_sink(struct coresight_device *source);
struct coresight_device *coresight_get_sink_by_id(u32 id);
struct coresight_device *
coresight_find_default_sink(struct coresight_device *csdev);
@@ -231,8 +232,6 @@ void coresight_add_helper(struct coresight_device *csdev,
void coresight_set_percpu_sink(int cpu, struct coresight_device *csdev);
struct coresight_device *coresight_get_percpu_sink(int cpu);
-int coresight_enable_source(struct coresight_device *csdev, enum cs_mode mode,
- void *data);
-bool coresight_disable_source(struct coresight_device *csdev, void *data);
+void coresight_disable_source(struct coresight_device *csdev, void *data);
#endif
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 91d93060dda5..73452d9dc13b 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -363,7 +363,7 @@ MODULE_DEVICE_TABLE(of, static_replicator_match);
#ifdef CONFIG_ACPI
static const struct acpi_device_id static_replicator_acpi_ids[] = {
- {"ARMHC985", 0}, /* ARM CoreSight Static Replicator */
+ {"ARMHC985", 0, 0, 0}, /* ARM CoreSight Static Replicator */
{}
};
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index a1c27c901ad1..974d37e5f94c 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -119,7 +119,6 @@ DEFINE_CORESIGHT_DEVLIST(stm_devs, "stm");
* @spinlock: only one at a time pls.
* @chs: the channels accociated to this STM.
* @stm: structure associated to the generic STM interface.
- * @mode: this tracer's mode (enum cs_mode), i.e sysFS, or disabled.
* @traceid: value of the current ID for this component.
* @write_bytes: Maximus bytes this STM can write at a time.
* @stmsper: settings for register STMSPER.
@@ -136,7 +135,6 @@ struct stm_drvdata {
spinlock_t spinlock;
struct channel_space chs;
struct stm_data stm;
- local_t mode;
u8 traceid;
u32 write_bytes;
u32 stmsper;
@@ -195,17 +193,15 @@ static void stm_enable_hw(struct stm_drvdata *drvdata)
static int stm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
{
- u32 val;
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
if (mode != CS_MODE_SYSFS)
return -EINVAL;
- val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
-
- /* Someone is already using the tracer */
- if (val)
+ if (!coresight_take_mode(csdev, mode)) {
+ /* Someone is already using the tracer */
return -EBUSY;
+ }
pm_runtime_get_sync(csdev->dev.parent);
@@ -266,7 +262,7 @@ static void stm_disable(struct coresight_device *csdev,
* change its status. As such we can read the status here without
* fearing it will change under us.
*/
- if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
+ if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
spin_lock(&drvdata->spinlock);
stm_disable_hw(drvdata);
spin_unlock(&drvdata->spinlock);
@@ -276,7 +272,7 @@ static void stm_disable(struct coresight_device *csdev,
pm_runtime_put(csdev->dev.parent);
- local_set(&drvdata->mode, CS_MODE_DISABLED);
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
dev_dbg(&csdev->dev, "STM tracing disabled\n");
}
}
@@ -334,7 +330,7 @@ static int stm_generic_link(struct stm_data *stm_data,
if (!drvdata || !drvdata->csdev)
return -EINVAL;
- return coresight_enable(drvdata->csdev);
+ return coresight_enable_sysfs(drvdata->csdev);
}
static void stm_generic_unlink(struct stm_data *stm_data,
@@ -345,7 +341,7 @@ static void stm_generic_unlink(struct stm_data *stm_data,
if (!drvdata || !drvdata->csdev)
return;
- coresight_disable(drvdata->csdev);
+ coresight_disable_sysfs(drvdata->csdev);
}
static phys_addr_t
@@ -373,7 +369,7 @@ static long stm_generic_set_options(struct stm_data *stm_data,
{
struct stm_drvdata *drvdata = container_of(stm_data,
struct stm_drvdata, stm);
- if (!(drvdata && local_read(&drvdata->mode)))
+ if (!(drvdata && coresight_get_mode(drvdata->csdev)))
return -EINVAL;
if (channel >= drvdata->numsp)
@@ -408,7 +404,7 @@ static ssize_t notrace stm_generic_packet(struct stm_data *stm_data,
struct stm_drvdata, stm);
unsigned int stm_flags;
- if (!(drvdata && local_read(&drvdata->mode)))
+ if (!(drvdata && coresight_get_mode(drvdata->csdev)))
return -EACCES;
if (channel >= drvdata->numsp)
@@ -515,7 +511,7 @@ static ssize_t port_select_show(struct device *dev,
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
- if (!local_read(&drvdata->mode)) {
+ if (!coresight_get_mode(drvdata->csdev)) {
val = drvdata->stmspscr;
} else {
spin_lock(&drvdata->spinlock);
@@ -541,7 +537,7 @@ static ssize_t port_select_store(struct device *dev,
spin_lock(&drvdata->spinlock);
drvdata->stmspscr = val;
- if (local_read(&drvdata->mode)) {
+ if (coresight_get_mode(drvdata->csdev)) {
CS_UNLOCK(drvdata->base);
/* Process as per ARM's TRM recommendation */
stmsper = readl_relaxed(drvdata->base + STMSPER);
@@ -562,7 +558,7 @@ static ssize_t port_enable_show(struct device *dev,
struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
unsigned long val;
- if (!local_read(&drvdata->mode)) {
+ if (!coresight_get_mode(drvdata->csdev)) {
val = drvdata->stmsper;
} else {
spin_lock(&drvdata->spinlock);
@@ -588,7 +584,7 @@ static ssize_t port_enable_store(struct device *dev,
spin_lock(&drvdata->spinlock);
drvdata->stmsper = val;
- if (local_read(&drvdata->mode)) {
+ if (coresight_get_mode(drvdata->csdev)) {
CS_UNLOCK(drvdata->base);
writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER);
CS_LOCK(drvdata->base);
@@ -950,7 +946,7 @@ static const struct dev_pm_ops stm_dev_pm_ops = {
static const struct amba_id stm_ids[] = {
CS_AMBA_ID_DATA(0x000bb962, "STM32"),
CS_AMBA_ID_DATA(0x000bb963, "STM500"),
- { 0, 0},
+ { 0, 0, NULL },
};
MODULE_DEVICE_TABLE(amba, stm_ids);
diff --git a/drivers/hwtracing/coresight/coresight-sysfs.c b/drivers/hwtracing/coresight/coresight-sysfs.c
index dd78e9fcfc4d..f9444e2cb1d9 100644
--- a/drivers/hwtracing/coresight/coresight-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-sysfs.c
@@ -5,11 +5,402 @@
*/
#include <linux/device.h>
+#include <linux/idr.h>
#include <linux/kernel.h>
#include "coresight-priv.h"
/*
+ * Use IDR to map the hash of the source's device name
+ * to the pointer of path for the source. The idr is for
+ * the sources which aren't associated with CPU.
+ */
+static DEFINE_IDR(path_idr);
+
+/*
+ * When operating Coresight drivers from the sysFS interface, only a single
+ * path can exist from a tracer (associated to a CPU) to a sink.
+ */
+static DEFINE_PER_CPU(struct list_head *, tracer_path);
+
+ssize_t coresight_simple_show_pair(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
+ struct cs_pair_attribute *cs_attr = container_of(attr, struct cs_pair_attribute, attr);
+ u64 val;
+
+ pm_runtime_get_sync(_dev->parent);
+ val = csdev_access_relaxed_read_pair(&csdev->access, cs_attr->lo_off, cs_attr->hi_off);
+ pm_runtime_put_sync(_dev->parent);
+ return sysfs_emit(buf, "0x%llx\n", val);
+}
+EXPORT_SYMBOL_GPL(coresight_simple_show_pair);
+
+ssize_t coresight_simple_show32(struct device *_dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = container_of(_dev, struct coresight_device, dev);
+ struct cs_off_attribute *cs_attr = container_of(attr, struct cs_off_attribute, attr);
+ u64 val;
+
+ pm_runtime_get_sync(_dev->parent);
+ val = csdev_access_relaxed_read32(&csdev->access, cs_attr->off);
+ pm_runtime_put_sync(_dev->parent);
+ return sysfs_emit(buf, "0x%llx\n", val);
+}
+EXPORT_SYMBOL_GPL(coresight_simple_show32);
+
+static int coresight_enable_source_sysfs(struct coresight_device *csdev,
+ enum cs_mode mode, void *data)
+{
+ int ret;
+
+ /*
+ * Comparison with CS_MODE_SYSFS works without taking any device
+ * specific spinlock because the truthyness of that comparison can only
+ * change with coresight_mutex held, which we already have here.
+ */
+ lockdep_assert_held(&coresight_mutex);
+ if (coresight_get_mode(csdev) != CS_MODE_SYSFS) {
+ ret = source_ops(csdev)->enable(csdev, data, mode);
+ if (ret)
+ return ret;
+ }
+
+ csdev->refcnt++;
+
+ return 0;
+}
+
+/**
+ * coresight_disable_source_sysfs - Drop the reference count by 1 and disable
+ * the device if there are no users left.
+ *
+ * @csdev: The coresight device to disable
+ * @data: Opaque data to pass on to the disable function of the source device.
+ * For example in perf mode this is a pointer to the struct perf_event.
+ *
+ * Returns true if the device has been disabled.
+ */
+static bool coresight_disable_source_sysfs(struct coresight_device *csdev,
+ void *data)
+{
+ lockdep_assert_held(&coresight_mutex);
+ if (coresight_get_mode(csdev) != CS_MODE_SYSFS)
+ return false;
+
+ csdev->refcnt--;
+ if (csdev->refcnt == 0) {
+ coresight_disable_source(csdev, data);
+ return true;
+ }
+ return false;
+}
+
+/**
+ * coresight_find_activated_sysfs_sink - returns the first sink activated via
+ * sysfs using connection based search starting from the source reference.
+ *
+ * @csdev: Coresight source device reference
+ */
+static struct coresight_device *
+coresight_find_activated_sysfs_sink(struct coresight_device *csdev)
+{
+ int i;
+ struct coresight_device *sink = NULL;
+
+ if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
+ csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) &&
+ csdev->sysfs_sink_activated)
+ return csdev;
+
+ /*
+ * Recursively explore each port found on this element.
+ */
+ for (i = 0; i < csdev->pdata->nr_outconns; i++) {
+ struct coresight_device *child_dev;
+
+ child_dev = csdev->pdata->out_conns[i]->dest_dev;
+ if (child_dev)
+ sink = coresight_find_activated_sysfs_sink(child_dev);
+ if (sink)
+ return sink;
+ }
+
+ return NULL;
+}
+
+/** coresight_validate_source - make sure a source has the right credentials to
+ * be used via sysfs.
+ * @csdev: the device structure for a source.
+ * @function: the function this was called from.
+ *
+ * Assumes the coresight_mutex is held.
+ */
+static int coresight_validate_source_sysfs(struct coresight_device *csdev,
+ const char *function)
+{
+ u32 type, subtype;
+
+ type = csdev->type;
+ subtype = csdev->subtype.source_subtype;
+
+ if (type != CORESIGHT_DEV_TYPE_SOURCE) {
+ dev_err(&csdev->dev, "wrong device type in %s\n", function);
+ return -EINVAL;
+ }
+
+ if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC &&
+ subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE &&
+ subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM &&
+ subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS) {
+ dev_err(&csdev->dev, "wrong device subtype in %s\n", function);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int coresight_enable_sysfs(struct coresight_device *csdev)
+{
+ int cpu, ret = 0;
+ struct coresight_device *sink;
+ struct list_head *path;
+ enum coresight_dev_subtype_source subtype;
+ u32 hash;
+
+ subtype = csdev->subtype.source_subtype;
+
+ mutex_lock(&coresight_mutex);
+
+ ret = coresight_validate_source_sysfs(csdev, __func__);
+ if (ret)
+ goto out;
+
+ /*
+ * mode == SYSFS implies that it's already enabled. Don't look at the
+ * refcount to determine this because we don't claim the source until
+ * coresight_enable_source() so can still race with Perf mode which
+ * doesn't hold coresight_mutex.
+ */
+ if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
+ /*
+ * There could be multiple applications driving the software
+ * source. So keep the refcount for each such user when the
+ * source is already enabled.
+ */
+ if (subtype == CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE)
+ csdev->refcnt++;
+ goto out;
+ }
+
+ sink = coresight_find_activated_sysfs_sink(csdev);
+ if (!sink) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ path = coresight_build_path(csdev, sink);
+ if (IS_ERR(path)) {
+ pr_err("building path(s) failed\n");
+ ret = PTR_ERR(path);
+ goto out;
+ }
+
+ ret = coresight_enable_path(path, CS_MODE_SYSFS, NULL);
+ if (ret)
+ goto err_path;
+
+ ret = coresight_enable_source_sysfs(csdev, CS_MODE_SYSFS, NULL);
+ if (ret)
+ goto err_source;
+
+ switch (subtype) {
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
+ /*
+ * When working from sysFS it is important to keep track
+ * of the paths that were created so that they can be
+ * undone in 'coresight_disable()'. Since there can only
+ * be a single session per tracer (when working from sysFS)
+ * a per-cpu variable will do just fine.
+ */
+ cpu = source_ops(csdev)->cpu_id(csdev);
+ per_cpu(tracer_path, cpu) = path;
+ break;
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM:
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
+ /*
+ * Use the hash of source's device name as ID
+ * and map the ID to the pointer of the path.
+ */
+ hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
+ ret = idr_alloc_u32(&path_idr, path, &hash, hash, GFP_KERNEL);
+ if (ret)
+ goto err_source;
+ break;
+ default:
+ /* We can't be here */
+ break;
+ }
+
+out:
+ mutex_unlock(&coresight_mutex);
+ return ret;
+
+err_source:
+ coresight_disable_path(path);
+
+err_path:
+ coresight_release_path(path);
+ goto out;
+}
+EXPORT_SYMBOL_GPL(coresight_enable_sysfs);
+
+void coresight_disable_sysfs(struct coresight_device *csdev)
+{
+ int cpu, ret;
+ struct list_head *path = NULL;
+ u32 hash;
+
+ mutex_lock(&coresight_mutex);
+
+ ret = coresight_validate_source_sysfs(csdev, __func__);
+ if (ret)
+ goto out;
+
+ if (!coresight_disable_source_sysfs(csdev, NULL))
+ goto out;
+
+ switch (csdev->subtype.source_subtype) {
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
+ cpu = source_ops(csdev)->cpu_id(csdev);
+ path = per_cpu(tracer_path, cpu);
+ per_cpu(tracer_path, cpu) = NULL;
+ break;
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM:
+ case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS:
+ hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev)));
+ /* Find the path by the hash. */
+ path = idr_find(&path_idr, hash);
+ if (path == NULL) {
+ pr_err("Path is not found for %s\n", dev_name(&csdev->dev));
+ goto out;
+ }
+ idr_remove(&path_idr, hash);
+ break;
+ default:
+ /* We can't be here */
+ break;
+ }
+
+ coresight_disable_path(path);
+ coresight_release_path(path);
+
+out:
+ mutex_unlock(&coresight_mutex);
+}
+EXPORT_SYMBOL_GPL(coresight_disable_sysfs);
+
+static ssize_t enable_sink_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->sysfs_sink_activated);
+}
+
+static ssize_t enable_sink_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret;
+ unsigned long val;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ csdev->sysfs_sink_activated = !!val;
+
+ return size;
+
+}
+static DEVICE_ATTR_RW(enable_sink);
+
+static ssize_t enable_source_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ guard(mutex)(&coresight_mutex);
+ return scnprintf(buf, PAGE_SIZE, "%u\n",
+ coresight_get_mode(csdev) == CS_MODE_SYSFS);
+}
+
+static ssize_t enable_source_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ int ret = 0;
+ unsigned long val;
+ struct coresight_device *csdev = to_coresight_device(dev);
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret)
+ return ret;
+
+ if (val) {
+ ret = coresight_enable_sysfs(csdev);
+ if (ret)
+ return ret;
+ } else {
+ coresight_disable_sysfs(csdev);
+ }
+
+ return size;
+}
+static DEVICE_ATTR_RW(enable_source);
+
+static struct attribute *coresight_sink_attrs[] = {
+ &dev_attr_enable_sink.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(coresight_sink);
+
+static struct attribute *coresight_source_attrs[] = {
+ &dev_attr_enable_source.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(coresight_source);
+
+struct device_type coresight_dev_type[] = {
+ [CORESIGHT_DEV_TYPE_SINK] = {
+ .name = "sink",
+ .groups = coresight_sink_groups,
+ },
+ [CORESIGHT_DEV_TYPE_LINK] = {
+ .name = "link",
+ },
+ [CORESIGHT_DEV_TYPE_LINKSINK] = {
+ .name = "linksink",
+ .groups = coresight_sink_groups,
+ },
+ [CORESIGHT_DEV_TYPE_SOURCE] = {
+ .name = "source",
+ .groups = coresight_source_groups,
+ },
+ [CORESIGHT_DEV_TYPE_HELPER] = {
+ .name = "helper",
+ }
+};
+/* Ensure the enum matches the names and groups */
+static_assert(ARRAY_SIZE(coresight_dev_type) == CORESIGHT_DEV_TYPE_MAX);
+
+/*
* Connections group - links attribute.
* Count of created links between coresight components in the group.
*/
diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c
index 7ec5365e2b64..72005b0c633e 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-core.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-core.c
@@ -558,7 +558,7 @@ static void tmc_shutdown(struct amba_device *adev)
spin_lock_irqsave(&drvdata->spinlock, flags);
- if (drvdata->mode == CS_MODE_DISABLED)
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_DISABLED)
goto out;
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
@@ -594,7 +594,7 @@ static const struct amba_id tmc_ids[] = {
CS_AMBA_ID(0x000bb9e9),
/* Coresight SoC 600 TMC-ETF */
CS_AMBA_ID(0x000bb9ea),
- { 0, 0},
+ { 0, 0, NULL },
};
MODULE_DEVICE_TABLE(amba, tmc_ids);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
index 7406b65e2cdd..d4f641cd9de6 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etf.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -89,7 +89,7 @@ static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
*/
- if (drvdata->mode == CS_MODE_SYSFS)
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS)
tmc_etb_dump_hw(drvdata);
tmc_disable_hw(drvdata);
@@ -205,8 +205,8 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
* sink is already enabled no memory is needed and the HW need not be
* touched.
*/
- if (drvdata->mode == CS_MODE_SYSFS) {
- atomic_inc(&csdev->refcnt);
+ if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
+ csdev->refcnt++;
goto out;
}
@@ -228,8 +228,8 @@ static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
ret = tmc_etb_enable_hw(drvdata);
if (!ret) {
- drvdata->mode = CS_MODE_SYSFS;
- atomic_inc(&csdev->refcnt);
+ coresight_set_mode(csdev, CS_MODE_SYSFS);
+ csdev->refcnt++;
} else {
/* Free up the buffer if we failed to enable */
used = false;
@@ -262,7 +262,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
* No need to continue if the ETB/ETF is already operated
* from sysFS.
*/
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
ret = -EBUSY;
break;
}
@@ -284,7 +284,7 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
* use for this session.
*/
if (drvdata->pid == pid) {
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
break;
}
@@ -292,8 +292,8 @@ static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
if (!ret) {
/* Associate with monitored process. */
drvdata->pid = pid;
- drvdata->mode = CS_MODE_PERF;
- atomic_inc(&csdev->refcnt);
+ coresight_set_mode(csdev, CS_MODE_PERF);
+ csdev->refcnt++;
}
} while (0);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -338,17 +338,18 @@ static int tmc_disable_etf_sink(struct coresight_device *csdev)
return -EBUSY;
}
- if (atomic_dec_return(&csdev->refcnt)) {
+ csdev->refcnt--;
+ if (csdev->refcnt) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
/* Complain if we (somehow) got out of sync */
- WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
tmc_etb_disable_hw(drvdata);
/* Dissociate from monitored process. */
drvdata->pid = -1;
- drvdata->mode = CS_MODE_DISABLED;
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -371,15 +372,15 @@ static int tmc_enable_etf_link(struct coresight_device *csdev,
return -EBUSY;
}
- if (atomic_read(&csdev->refcnt) == 0) {
+ if (csdev->refcnt == 0) {
ret = tmc_etf_enable_hw(drvdata);
if (!ret) {
- drvdata->mode = CS_MODE_SYSFS;
+ coresight_set_mode(csdev, CS_MODE_SYSFS);
first_enable = true;
}
}
if (!ret)
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
spin_unlock_irqrestore(&drvdata->spinlock, flags);
if (first_enable)
@@ -401,9 +402,10 @@ static void tmc_disable_etf_link(struct coresight_device *csdev,
return;
}
- if (atomic_dec_return(&csdev->refcnt) == 0) {
+ csdev->refcnt--;
+ if (csdev->refcnt == 0) {
tmc_etf_disable_hw(drvdata);
- drvdata->mode = CS_MODE_DISABLED;
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
last_disable = true;
}
spin_unlock_irqrestore(&drvdata->spinlock, flags);
@@ -483,13 +485,13 @@ static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
return 0;
/* This shouldn't happen */
- if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
+ if (WARN_ON_ONCE(coresight_get_mode(csdev) != CS_MODE_PERF))
return 0;
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
- if (atomic_read(&csdev->refcnt) != 1)
+ if (csdev->refcnt != 1)
goto out;
CS_UNLOCK(drvdata->base);
@@ -629,7 +631,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
}
/* Don't interfere if operated from Perf */
- if (drvdata->mode == CS_MODE_PERF) {
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_PERF) {
ret = -EINVAL;
goto out;
}
@@ -641,7 +643,7 @@ int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
}
/* Disable the TMC if need be */
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
/* There is no point in reading a TMC in HW FIFO mode */
mode = readl_relaxed(drvdata->base + TMC_MODE);
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
@@ -673,7 +675,7 @@ int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Re-enable the TMC if need be */
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
/* There is no point in reading a TMC in HW FIFO mode */
mode = readl_relaxed(drvdata->base + TMC_MODE);
if (mode != TMC_MODE_CIRCULAR_BUFFER) {
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index af02ba5d5f15..e75428fa1592 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1143,7 +1143,7 @@ static void __tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
*/
- if (drvdata->mode == CS_MODE_SYSFS)
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS)
tmc_etr_sync_sysfs_buf(drvdata);
tmc_disable_hw(drvdata);
@@ -1189,7 +1189,7 @@ static struct etr_buf *tmc_etr_get_sysfs_buffer(struct coresight_device *csdev)
spin_lock_irqsave(&drvdata->spinlock, flags);
}
- if (drvdata->reading || drvdata->mode == CS_MODE_PERF) {
+ if (drvdata->reading || coresight_get_mode(csdev) == CS_MODE_PERF) {
ret = -EBUSY;
goto out;
}
@@ -1230,15 +1230,15 @@ static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev)
* sink is already enabled no memory is needed and the HW need not be
* touched, even if the buffer size has changed.
*/
- if (drvdata->mode == CS_MODE_SYSFS) {
- atomic_inc(&csdev->refcnt);
+ if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
+ csdev->refcnt++;
goto out;
}
ret = tmc_etr_enable_hw(drvdata, sysfs_buf);
if (!ret) {
- drvdata->mode = CS_MODE_SYSFS;
- atomic_inc(&csdev->refcnt);
+ coresight_set_mode(csdev, CS_MODE_SYSFS);
+ csdev->refcnt++;
}
out:
@@ -1564,7 +1564,7 @@ tmc_update_etr_buffer(struct coresight_device *csdev,
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't do anything if another tracer is using this sink */
- if (atomic_read(&csdev->refcnt) != 1) {
+ if (csdev->refcnt != 1) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
goto out;
}
@@ -1652,7 +1652,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* Don't use this sink if it is already claimed by sysFS */
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(csdev) == CS_MODE_SYSFS) {
rc = -EBUSY;
goto unlock_out;
}
@@ -1676,7 +1676,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
* use for this session.
*/
if (drvdata->pid == pid) {
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
goto unlock_out;
}
@@ -1684,9 +1684,9 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
if (!rc) {
/* Associate with monitored process. */
drvdata->pid = pid;
- drvdata->mode = CS_MODE_PERF;
+ coresight_set_mode(csdev, CS_MODE_PERF);
drvdata->perf_buf = etr_perf->etr_buf;
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
}
unlock_out:
@@ -1719,17 +1719,18 @@ static int tmc_disable_etr_sink(struct coresight_device *csdev)
return -EBUSY;
}
- if (atomic_dec_return(&csdev->refcnt)) {
+ csdev->refcnt--;
+ if (csdev->refcnt) {
spin_unlock_irqrestore(&drvdata->spinlock, flags);
return -EBUSY;
}
/* Complain if we (somehow) got out of sync */
- WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
tmc_etr_disable_hw(drvdata);
/* Dissociate from monitored process. */
drvdata->pid = -1;
- drvdata->mode = CS_MODE_DISABLED;
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
/* Reset perf specific data */
drvdata->perf_buf = NULL;
@@ -1777,7 +1778,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
}
/* Disable the TMC if we are trying to read from a running session. */
- if (drvdata->mode == CS_MODE_SYSFS)
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS)
__tmc_etr_disable_hw(drvdata);
drvdata->reading = true;
@@ -1799,7 +1800,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
spin_lock_irqsave(&drvdata->spinlock, flags);
/* RE-enable the TMC if need be */
- if (drvdata->mode == CS_MODE_SYSFS) {
+ if (coresight_get_mode(drvdata->csdev) == CS_MODE_SYSFS) {
/*
* The trace run will continue with the same allocated trace
* buffer. Since the tracer is still enabled drvdata::buf can't
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 8dcb426ac3e7..cef979c897e6 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -178,7 +178,6 @@ struct etr_buf {
* @size: trace buffer size for this TMC (common for all modes).
* @max_burst_size: The maximum burst size that can be initiated by
* TMC-ETR on AXI bus.
- * @mode: how this TMC is being used.
* @config_type: TMC variant, must be of type @tmc_config_type.
* @memwidth: width of the memory interface databus, in bytes.
* @trigger_cntr: amount of words to store after a trigger.
@@ -203,7 +202,6 @@ struct tmc_drvdata {
u32 len;
u32 size;
u32 max_burst_size;
- u32 mode;
enum tmc_config_type config_type;
enum tmc_mem_intf_width memwidth;
u32 trigger_cntr;
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
index 5f82737c37bb..7739bc7adc44 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.c
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -18,6 +18,7 @@
#include "coresight-priv.h"
#include "coresight-tpda.h"
#include "coresight-trace-id.h"
+#include "coresight-tpdm.h"
DEFINE_CORESIGHT_DEVLIST(tpda_devs, "tpda");
@@ -28,24 +29,59 @@ static bool coresight_device_is_tpdm(struct coresight_device *csdev)
CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM);
}
+static void tpda_clear_element_size(struct coresight_device *csdev)
+{
+ struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ drvdata->dsb_esize = 0;
+ drvdata->cmb_esize = 0;
+}
+
+static void tpda_set_element_size(struct tpda_drvdata *drvdata, u32 *val)
+{
+ /* Clear all relevant fields */
+ *val &= ~(TPDA_Pn_CR_DSBSIZE | TPDA_Pn_CR_CMBSIZE);
+
+ if (drvdata->dsb_esize == 64)
+ *val |= TPDA_Pn_CR_DSBSIZE;
+ else if (drvdata->dsb_esize == 32)
+ *val &= ~TPDA_Pn_CR_DSBSIZE;
+
+ if (drvdata->cmb_esize == 64)
+ *val |= FIELD_PREP(TPDA_Pn_CR_CMBSIZE, 0x2);
+ else if (drvdata->cmb_esize == 32)
+ *val |= FIELD_PREP(TPDA_Pn_CR_CMBSIZE, 0x1);
+ else if (drvdata->cmb_esize == 8)
+ *val &= ~TPDA_Pn_CR_CMBSIZE;
+}
+
/*
- * Read the DSB element size from the TPDM device
+ * Read the element size from the TPDM device. One TPDM must have at least one of the
+ * element size property.
* Returns
- * The dsb element size read from the devicetree if available.
- * 0 - Otherwise, with a warning once.
+ * 0 - The element size property is read
+ * Others - Cannot read the property of the element size
*/
-static int tpdm_read_dsb_element_size(struct coresight_device *csdev)
+static int tpdm_read_element_size(struct tpda_drvdata *drvdata,
+ struct coresight_device *csdev)
{
- int rc = 0;
- u8 size = 0;
+ int rc = -EINVAL;
+ struct tpdm_drvdata *tpdm_data = dev_get_drvdata(csdev->dev.parent);
+
+ if (tpdm_has_dsb_dataset(tpdm_data)) {
+ rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent),
+ "qcom,dsb-element-bits", &drvdata->dsb_esize);
+ }
+ if (tpdm_has_cmb_dataset(tpdm_data)) {
+ rc = fwnode_property_read_u32(dev_fwnode(csdev->dev.parent),
+ "qcom,cmb-element-bits", &drvdata->cmb_esize);
+ }
- rc = fwnode_property_read_u8(dev_fwnode(csdev->dev.parent),
- "qcom,dsb-element-size", &size);
if (rc)
dev_warn_once(&csdev->dev,
- "Failed to read TPDM DSB Element size: %d\n", rc);
+ "Failed to read TPDM Element size: %d\n", rc);
- return size;
+ return rc;
}
/*
@@ -56,11 +92,12 @@ static int tpdm_read_dsb_element_size(struct coresight_device *csdev)
* Parameter "inport" is used to pass in the input port number
* of TPDA, and it is set to -1 in the recursize call.
*/
-static int tpda_get_element_size(struct coresight_device *csdev,
+static int tpda_get_element_size(struct tpda_drvdata *drvdata,
+ struct coresight_device *csdev,
int inport)
{
- int dsb_size = -ENOENT;
- int i, size;
+ int rc = 0;
+ int i;
struct coresight_device *in;
for (i = 0; i < csdev->pdata->nr_inconns; i++) {
@@ -69,30 +106,26 @@ static int tpda_get_element_size(struct coresight_device *csdev,
continue;
/* Ignore the paths that do not match port */
- if (inport > 0 &&
+ if (inport >= 0 &&
csdev->pdata->in_conns[i]->dest_port != inport)
continue;
if (coresight_device_is_tpdm(in)) {
- size = tpdm_read_dsb_element_size(in);
+ if (drvdata->dsb_esize || drvdata->cmb_esize)
+ return -EEXIST;
+ rc = tpdm_read_element_size(drvdata, in);
+ if (rc)
+ return rc;
} else {
/* Recurse down the path */
- size = tpda_get_element_size(in, -1);
- }
-
- if (size < 0)
- return size;
-
- if (dsb_size < 0) {
- /* Found a size, save it. */
- dsb_size = size;
- } else {
- /* Found duplicate TPDMs */
- return -EEXIST;
+ rc = tpda_get_element_size(drvdata, in, -1);
+ if (rc)
+ return rc;
}
}
- return dsb_size;
+
+ return rc;
}
/* Settings pre enabling port control register */
@@ -109,37 +142,24 @@ static void tpda_enable_pre_port(struct tpda_drvdata *drvdata)
static int tpda_enable_port(struct tpda_drvdata *drvdata, int port)
{
u32 val;
- int size;
+ int rc;
val = readl_relaxed(drvdata->base + TPDA_Pn_CR(port));
- /*
- * Configure aggregator port n DSB data set element size
- * Set the bit to 0 if the size is 32
- * Set the bit to 1 if the size is 64
- */
- size = tpda_get_element_size(drvdata->csdev, port);
- switch (size) {
- case 32:
- val &= ~TPDA_Pn_CR_DSBSIZE;
- break;
- case 64:
- val |= TPDA_Pn_CR_DSBSIZE;
- break;
- case 0:
- return -EEXIST;
- case -EEXIST:
+ tpda_clear_element_size(drvdata->csdev);
+ rc = tpda_get_element_size(drvdata, drvdata->csdev, port);
+ if (!rc && (drvdata->dsb_esize || drvdata->cmb_esize)) {
+ tpda_set_element_size(drvdata, &val);
+ /* Enable the port */
+ val |= TPDA_Pn_CR_ENA;
+ writel_relaxed(val, drvdata->base + TPDA_Pn_CR(port));
+ } else if (rc == -EEXIST)
dev_warn_once(&drvdata->csdev->dev,
- "Detected multiple TPDMs on port %d", -EEXIST);
- return -EEXIST;
- default:
- return -EINVAL;
- }
-
- /* Enable the port */
- val |= TPDA_Pn_CR_ENA;
- writel_relaxed(val, drvdata->base + TPDA_Pn_CR(port));
+ "Detected multiple TPDMs on port %d", port);
+ else
+ dev_warn_once(&drvdata->csdev->dev,
+ "Didn't find TPDM element size");
- return 0;
+ return rc;
}
static int __tpda_enable(struct tpda_drvdata *drvdata, int port)
@@ -148,7 +168,12 @@ static int __tpda_enable(struct tpda_drvdata *drvdata, int port)
CS_UNLOCK(drvdata->base);
- if (!drvdata->csdev->enable)
+ /*
+ * Only do pre-port enable for first port that calls enable when the
+ * device's main refcount is still 0
+ */
+ lockdep_assert_held(&drvdata->spinlock);
+ if (!drvdata->csdev->refcnt)
tpda_enable_pre_port(drvdata);
ret = tpda_enable_port(drvdata, port);
@@ -169,6 +194,7 @@ static int tpda_enable(struct coresight_device *csdev,
ret = __tpda_enable(drvdata, in->dest_port);
if (!ret) {
atomic_inc(&in->dest_refcnt);
+ csdev->refcnt++;
dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port);
}
}
@@ -197,9 +223,10 @@ static void tpda_disable(struct coresight_device *csdev,
struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
spin_lock(&drvdata->spinlock);
- if (atomic_dec_return(&in->dest_refcnt) == 0)
+ if (atomic_dec_return(&in->dest_refcnt) == 0) {
__tpda_disable(drvdata, in->dest_port);
-
+ csdev->refcnt--;
+ }
spin_unlock(&drvdata->spinlock);
dev_dbg(drvdata->dev, "TPDA inport %d disabled\n", in->dest_port);
@@ -300,7 +327,7 @@ static struct amba_id tpda_ids[] = {
.id = 0x000f0f00,
.mask = 0x000fff00,
},
- { 0, 0},
+ { 0, 0, NULL },
};
static struct amba_driver tpda_driver = {
diff --git a/drivers/hwtracing/coresight/coresight-tpda.h b/drivers/hwtracing/coresight/coresight-tpda.h
index b3b38fd41b64..c6af3d2da3ef 100644
--- a/drivers/hwtracing/coresight/coresight-tpda.h
+++ b/drivers/hwtracing/coresight/coresight-tpda.h
@@ -10,6 +10,8 @@
#define TPDA_Pn_CR(n) (0x004 + (n * 4))
/* Aggregator port enable bit */
#define TPDA_Pn_CR_ENA BIT(0)
+/* Aggregator port CMB data set element size bit */
+#define TPDA_Pn_CR_CMBSIZE GENMASK(7, 6)
/* Aggregator port DSB data set element size bit */
#define TPDA_Pn_CR_DSBSIZE BIT(8)
@@ -25,6 +27,8 @@
* @csdev: component vitals needed by the framework.
* @spinlock: lock for the drvdata value.
* @enable: enable status of the component.
+ * @dsb_esize Record the DSB element size.
+ * @cmb_esize Record the CMB element size.
*/
struct tpda_drvdata {
void __iomem *base;
@@ -32,6 +36,8 @@ struct tpda_drvdata {
struct coresight_device *csdev;
spinlock_t spinlock;
u8 atid;
+ u32 dsb_esize;
+ u32 cmb_esize;
};
#endif /* _CORESIGHT_CORESIGHT_TPDA_H */
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
index 97654aa4b772..a9708ab0d488 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.c
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -66,6 +66,31 @@ static ssize_t tpdm_simple_dataset_show(struct device *dev,
return -EINVAL;
return sysfs_emit(buf, "0x%x\n",
drvdata->dsb->msr[tpdm_attr->idx]);
+ case CMB_TRIG_PATT:
+ if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->cmb->trig_patt[tpdm_attr->idx]);
+ case CMB_TRIG_PATT_MASK:
+ if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->cmb->trig_patt_mask[tpdm_attr->idx]);
+ case CMB_PATT:
+ if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->cmb->patt_val[tpdm_attr->idx]);
+ case CMB_PATT_MASK:
+ if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->cmb->patt_mask[tpdm_attr->idx]);
+ case CMB_MSR:
+ if (tpdm_attr->idx >= drvdata->cmb_msr_num)
+ return -EINVAL;
+ return sysfs_emit(buf, "0x%x\n",
+ drvdata->cmb->msr[tpdm_attr->idx]);
}
return -EINVAL;
}
@@ -77,67 +102,103 @@ static ssize_t tpdm_simple_dataset_store(struct device *dev,
size_t size)
{
unsigned long val;
- ssize_t ret = size;
+ ssize_t ret = -EINVAL;
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
struct tpdm_dataset_attribute *tpdm_attr =
container_of(attr, struct tpdm_dataset_attribute, attr);
if (kstrtoul(buf, 0, &val))
- return -EINVAL;
+ return ret;
- spin_lock(&drvdata->spinlock);
+ guard(spinlock)(&drvdata->spinlock);
switch (tpdm_attr->mem) {
case DSB_TRIG_PATT:
- if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+ if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) {
drvdata->dsb->trig_patt[tpdm_attr->idx] = val;
- else
- ret = -EINVAL;
+ ret = size;
+ }
break;
case DSB_TRIG_PATT_MASK:
- if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+ if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) {
drvdata->dsb->trig_patt_mask[tpdm_attr->idx] = val;
- else
- ret = -EINVAL;
+ ret = size;
+ }
break;
case DSB_PATT:
- if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+ if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) {
drvdata->dsb->patt_val[tpdm_attr->idx] = val;
- else
- ret = -EINVAL;
+ ret = size;
+ }
break;
case DSB_PATT_MASK:
- if (tpdm_attr->idx < TPDM_DSB_MAX_PATT)
+ if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) {
drvdata->dsb->patt_mask[tpdm_attr->idx] = val;
- else
- ret = -EINVAL;
+ ret = size;
+ }
break;
case DSB_MSR:
- if (tpdm_attr->idx < drvdata->dsb_msr_num)
+ if (tpdm_attr->idx < drvdata->dsb_msr_num) {
drvdata->dsb->msr[tpdm_attr->idx] = val;
- else
- ret = -EINVAL;
+ ret = size;
+ }
+ break;
+ case CMB_TRIG_PATT:
+ if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) {
+ drvdata->cmb->trig_patt[tpdm_attr->idx] = val;
+ ret = size;
+ }
+ break;
+ case CMB_TRIG_PATT_MASK:
+ if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) {
+ drvdata->cmb->trig_patt_mask[tpdm_attr->idx] = val;
+ ret = size;
+ }
+ break;
+ case CMB_PATT:
+ if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) {
+ drvdata->cmb->patt_val[tpdm_attr->idx] = val;
+ ret = size;
+ }
+ break;
+ case CMB_PATT_MASK:
+ if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) {
+ drvdata->cmb->patt_mask[tpdm_attr->idx] = val;
+ ret = size;
+ }
+ break;
+ case CMB_MSR:
+ if (tpdm_attr->idx < drvdata->cmb_msr_num) {
+ drvdata->cmb->msr[tpdm_attr->idx] = val;
+ ret = size;
+ }
break;
default:
- ret = -EINVAL;
+ break;
}
- spin_unlock(&drvdata->spinlock);
return ret;
}
-static bool tpdm_has_dsb_dataset(struct tpdm_drvdata *drvdata)
+static umode_t tpdm_dsb_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
- return (drvdata->datasets & TPDM_PIDR0_DS_DSB);
+ struct device *dev = kobj_to_dev(kobj);
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (drvdata && tpdm_has_dsb_dataset(drvdata))
+ return attr->mode;
+
+ return 0;
}
-static umode_t tpdm_dsb_is_visible(struct kobject *kobj,
+static umode_t tpdm_cmb_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = kobj_to_dev(kobj);
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
- if (drvdata && tpdm_has_dsb_dataset(drvdata))
+ if (drvdata && tpdm_has_cmb_dataset(drvdata))
return attr->mode;
return 0;
@@ -159,6 +220,23 @@ static umode_t tpdm_dsb_msr_is_visible(struct kobject *kobj,
return 0;
}
+static umode_t tpdm_cmb_msr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ struct device_attribute *dev_attr =
+ container_of(attr, struct device_attribute, attr);
+ struct tpdm_dataset_attribute *tpdm_attr =
+ container_of(dev_attr, struct tpdm_dataset_attribute, attr);
+
+ if (tpdm_attr->idx < drvdata->cmb_msr_num)
+ return attr->mode;
+
+ return 0;
+}
+
static void tpdm_reset_datasets(struct tpdm_drvdata *drvdata)
{
if (tpdm_has_dsb_dataset(drvdata)) {
@@ -167,6 +245,9 @@ static void tpdm_reset_datasets(struct tpdm_drvdata *drvdata)
drvdata->dsb->trig_ts = true;
drvdata->dsb->trig_type = false;
}
+
+ if (drvdata->cmb)
+ memset(drvdata->cmb, 0, sizeof(struct cmb_dataset));
}
static void set_dsb_mode(struct tpdm_drvdata *drvdata, u32 *val)
@@ -233,25 +314,27 @@ static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata)
{
u32 val, i;
+ if (!tpdm_has_dsb_dataset(drvdata))
+ return;
+
for (i = 0; i < TPDM_DSB_MAX_EDCR; i++)
writel_relaxed(drvdata->dsb->edge_ctrl[i],
- drvdata->base + TPDM_DSB_EDCR(i));
+ drvdata->base + TPDM_DSB_EDCR(i));
for (i = 0; i < TPDM_DSB_MAX_EDCMR; i++)
writel_relaxed(drvdata->dsb->edge_ctrl_mask[i],
- drvdata->base + TPDM_DSB_EDCMR(i));
+ drvdata->base + TPDM_DSB_EDCMR(i));
for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
writel_relaxed(drvdata->dsb->patt_val[i],
- drvdata->base + TPDM_DSB_TPR(i));
+ drvdata->base + TPDM_DSB_TPR(i));
writel_relaxed(drvdata->dsb->patt_mask[i],
- drvdata->base + TPDM_DSB_TPMR(i));
+ drvdata->base + TPDM_DSB_TPMR(i));
writel_relaxed(drvdata->dsb->trig_patt[i],
- drvdata->base + TPDM_DSB_XPR(i));
+ drvdata->base + TPDM_DSB_XPR(i));
writel_relaxed(drvdata->dsb->trig_patt_mask[i],
- drvdata->base + TPDM_DSB_XPMR(i));
+ drvdata->base + TPDM_DSB_XPMR(i));
}
set_dsb_tier(drvdata);
-
set_dsb_msr(drvdata);
val = readl_relaxed(drvdata->base + TPDM_DSB_CR);
@@ -267,6 +350,76 @@ static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata)
writel_relaxed(val, drvdata->base + TPDM_DSB_CR);
}
+static void set_cmb_tier(struct tpdm_drvdata *drvdata)
+{
+ u32 val;
+
+ val = readl_relaxed(drvdata->base + TPDM_CMB_TIER);
+
+ /* Clear all relevant fields */
+ val &= ~(TPDM_CMB_TIER_PATT_TSENAB | TPDM_CMB_TIER_TS_ALL |
+ TPDM_CMB_TIER_XTRIG_TSENAB);
+
+ /* Set pattern timestamp type and enablement */
+ if (drvdata->cmb->patt_ts)
+ val |= TPDM_CMB_TIER_PATT_TSENAB;
+
+ /* Set trigger timestamp */
+ if (drvdata->cmb->trig_ts)
+ val |= TPDM_CMB_TIER_XTRIG_TSENAB;
+
+ /* Set all timestamp enablement*/
+ if (drvdata->cmb->ts_all)
+ val |= TPDM_CMB_TIER_TS_ALL;
+
+ writel_relaxed(val, drvdata->base + TPDM_CMB_TIER);
+}
+
+static void set_cmb_msr(struct tpdm_drvdata *drvdata)
+{
+ int i;
+
+ for (i = 0; i < drvdata->cmb_msr_num; i++)
+ writel_relaxed(drvdata->cmb->msr[i],
+ drvdata->base + TPDM_CMB_MSR(i));
+}
+
+static void tpdm_enable_cmb(struct tpdm_drvdata *drvdata)
+{
+ u32 val, i;
+
+ if (!tpdm_has_cmb_dataset(drvdata))
+ return;
+
+ /* Configure pattern registers */
+ for (i = 0; i < TPDM_CMB_MAX_PATT; i++) {
+ writel_relaxed(drvdata->cmb->patt_val[i],
+ drvdata->base + TPDM_CMB_TPR(i));
+ writel_relaxed(drvdata->cmb->patt_mask[i],
+ drvdata->base + TPDM_CMB_TPMR(i));
+ writel_relaxed(drvdata->cmb->trig_patt[i],
+ drvdata->base + TPDM_CMB_XPR(i));
+ writel_relaxed(drvdata->cmb->trig_patt_mask[i],
+ drvdata->base + TPDM_CMB_XPMR(i));
+ }
+
+ set_cmb_tier(drvdata);
+ set_cmb_msr(drvdata);
+
+ val = readl_relaxed(drvdata->base + TPDM_CMB_CR);
+ /*
+ * Set to 0 for continuous CMB collection mode,
+ * 1 for trace-on-change CMB collection mode.
+ */
+ if (drvdata->cmb->trace_mode)
+ val |= TPDM_CMB_CR_MODE;
+ else
+ val &= ~TPDM_CMB_CR_MODE;
+ /* Set the enable bit of CMB control register to 1 */
+ val |= TPDM_CMB_CR_ENA;
+ writel_relaxed(val, drvdata->base + TPDM_CMB_CR);
+}
+
/*
* TPDM enable operations
* The TPDM or Monitor serves as data collection component for various
@@ -279,8 +432,8 @@ static void __tpdm_enable(struct tpdm_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
- if (tpdm_has_dsb_dataset(drvdata))
- tpdm_enable_dsb(drvdata);
+ tpdm_enable_dsb(drvdata);
+ tpdm_enable_cmb(drvdata);
CS_LOCK(drvdata->base);
}
@@ -308,19 +461,35 @@ static void tpdm_disable_dsb(struct tpdm_drvdata *drvdata)
{
u32 val;
+ if (!tpdm_has_dsb_dataset(drvdata))
+ return;
+
/* Set the enable bit of DSB control register to 0 */
val = readl_relaxed(drvdata->base + TPDM_DSB_CR);
val &= ~TPDM_DSB_CR_ENA;
writel_relaxed(val, drvdata->base + TPDM_DSB_CR);
}
+static void tpdm_disable_cmb(struct tpdm_drvdata *drvdata)
+{
+ u32 val;
+
+ if (!tpdm_has_cmb_dataset(drvdata))
+ return;
+
+ val = readl_relaxed(drvdata->base + TPDM_CMB_CR);
+ /* Set the enable bit of CMB control register to 0 */
+ val &= ~TPDM_CMB_CR_ENA;
+ writel_relaxed(val, drvdata->base + TPDM_CMB_CR);
+}
+
/* TPDM disable operations */
static void __tpdm_disable(struct tpdm_drvdata *drvdata)
{
CS_UNLOCK(drvdata->base);
- if (tpdm_has_dsb_dataset(drvdata))
- tpdm_disable_dsb(drvdata);
+ tpdm_disable_dsb(drvdata);
+ tpdm_disable_cmb(drvdata);
CS_LOCK(drvdata->base);
}
@@ -366,6 +535,12 @@ static int tpdm_datasets_setup(struct tpdm_drvdata *drvdata)
if (!drvdata->dsb)
return -ENOMEM;
}
+ if (tpdm_has_cmb_dataset(drvdata) && (!drvdata->cmb)) {
+ drvdata->cmb = devm_kzalloc(drvdata->dev,
+ sizeof(*drvdata->cmb), GFP_KERNEL);
+ if (!drvdata->cmb)
+ return -ENOMEM;
+ }
tpdm_reset_datasets(drvdata);
return 0;
@@ -577,9 +752,18 @@ static ssize_t enable_ts_show(struct device *dev,
char *buf)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct tpdm_dataset_attribute *tpdm_attr =
+ container_of(attr, struct tpdm_dataset_attribute, attr);
+ ssize_t size = -EINVAL;
- return sysfs_emit(buf, "%u\n",
- (unsigned int)drvdata->dsb->patt_ts);
+ if (tpdm_attr->mem == DSB_PATT)
+ size = sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->dsb->patt_ts);
+ else if (tpdm_attr->mem == CMB_PATT)
+ size = sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->cmb->patt_ts);
+
+ return size;
}
/*
@@ -591,17 +775,23 @@ static ssize_t enable_ts_store(struct device *dev,
size_t size)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ struct tpdm_dataset_attribute *tpdm_attr =
+ container_of(attr, struct tpdm_dataset_attribute, attr);
unsigned long val;
if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
return -EINVAL;
- spin_lock(&drvdata->spinlock);
- drvdata->dsb->patt_ts = !!val;
- spin_unlock(&drvdata->spinlock);
+ guard(spinlock)(&drvdata->spinlock);
+ if (tpdm_attr->mem == DSB_PATT)
+ drvdata->dsb->patt_ts = !!val;
+ else if (tpdm_attr->mem == CMB_PATT)
+ drvdata->cmb->patt_ts = !!val;
+ else
+ return -EINVAL;
+
return size;
}
-static DEVICE_ATTR_RW(enable_ts);
static ssize_t set_type_show(struct device *dev,
struct device_attribute *attr,
@@ -704,6 +894,96 @@ static ssize_t dsb_trig_ts_store(struct device *dev,
}
static DEVICE_ATTR_RW(dsb_trig_ts);
+static ssize_t cmb_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%x\n", drvdata->cmb->trace_mode);
+
+}
+
+static ssize_t cmb_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long trace_mode;
+
+ if (kstrtoul(buf, 0, &trace_mode) || (trace_mode & ~1UL))
+ return -EINVAL;
+
+ spin_lock(&drvdata->spinlock);
+ drvdata->cmb->trace_mode = trace_mode;
+ spin_unlock(&drvdata->spinlock);
+ return size;
+}
+static DEVICE_ATTR_RW(cmb_mode);
+
+static ssize_t cmb_ts_all_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->cmb->ts_all);
+}
+
+static ssize_t cmb_ts_all_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+ return -EINVAL;
+
+ guard(spinlock)(&drvdata->spinlock);
+ if (val)
+ drvdata->cmb->ts_all = true;
+ else
+ drvdata->cmb->ts_all = false;
+
+ return size;
+}
+static DEVICE_ATTR_RW(cmb_ts_all);
+
+static ssize_t cmb_trig_ts_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ return sysfs_emit(buf, "%u\n",
+ (unsigned int)drvdata->cmb->trig_ts);
+}
+
+static ssize_t cmb_trig_ts_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+ unsigned long val;
+
+ if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
+ return -EINVAL;
+
+ guard(spinlock)(&drvdata->spinlock);
+ if (val)
+ drvdata->cmb->trig_ts = true;
+ else
+ drvdata->cmb->trig_ts = false;
+
+ return size;
+}
+static DEVICE_ATTR_RW(cmb_trig_ts);
+
static struct attribute *tpdm_dsb_edge_attrs[] = {
&dev_attr_ctrl_idx.attr,
&dev_attr_ctrl_val.attr,
@@ -772,7 +1052,7 @@ static struct attribute *tpdm_dsb_patt_attrs[] = {
DSB_PATT_MASK_ATTR(5),
DSB_PATT_MASK_ATTR(6),
DSB_PATT_MASK_ATTR(7),
- &dev_attr_enable_ts.attr,
+ DSB_PATT_ENABLE_TS,
&dev_attr_set_type.attr,
NULL,
};
@@ -813,6 +1093,59 @@ static struct attribute *tpdm_dsb_msr_attrs[] = {
NULL,
};
+static struct attribute *tpdm_cmb_trig_patt_attrs[] = {
+ CMB_TRIG_PATT_ATTR(0),
+ CMB_TRIG_PATT_ATTR(1),
+ CMB_TRIG_PATT_MASK_ATTR(0),
+ CMB_TRIG_PATT_MASK_ATTR(1),
+ NULL,
+};
+
+static struct attribute *tpdm_cmb_patt_attrs[] = {
+ CMB_PATT_ATTR(0),
+ CMB_PATT_ATTR(1),
+ CMB_PATT_MASK_ATTR(0),
+ CMB_PATT_MASK_ATTR(1),
+ CMB_PATT_ENABLE_TS,
+ NULL,
+};
+
+static struct attribute *tpdm_cmb_msr_attrs[] = {
+ CMB_MSR_ATTR(0),
+ CMB_MSR_ATTR(1),
+ CMB_MSR_ATTR(2),
+ CMB_MSR_ATTR(3),
+ CMB_MSR_ATTR(4),
+ CMB_MSR_ATTR(5),
+ CMB_MSR_ATTR(6),
+ CMB_MSR_ATTR(7),
+ CMB_MSR_ATTR(8),
+ CMB_MSR_ATTR(9),
+ CMB_MSR_ATTR(10),
+ CMB_MSR_ATTR(11),
+ CMB_MSR_ATTR(12),
+ CMB_MSR_ATTR(13),
+ CMB_MSR_ATTR(14),
+ CMB_MSR_ATTR(15),
+ CMB_MSR_ATTR(16),
+ CMB_MSR_ATTR(17),
+ CMB_MSR_ATTR(18),
+ CMB_MSR_ATTR(19),
+ CMB_MSR_ATTR(20),
+ CMB_MSR_ATTR(21),
+ CMB_MSR_ATTR(22),
+ CMB_MSR_ATTR(23),
+ CMB_MSR_ATTR(24),
+ CMB_MSR_ATTR(25),
+ CMB_MSR_ATTR(26),
+ CMB_MSR_ATTR(27),
+ CMB_MSR_ATTR(28),
+ CMB_MSR_ATTR(29),
+ CMB_MSR_ATTR(30),
+ CMB_MSR_ATTR(31),
+ NULL,
+};
+
static struct attribute *tpdm_dsb_attrs[] = {
&dev_attr_dsb_mode.attr,
&dev_attr_dsb_trig_ts.attr,
@@ -820,6 +1153,13 @@ static struct attribute *tpdm_dsb_attrs[] = {
NULL,
};
+static struct attribute *tpdm_cmb_attrs[] = {
+ &dev_attr_cmb_mode.attr,
+ &dev_attr_cmb_ts_all.attr,
+ &dev_attr_cmb_trig_ts.attr,
+ NULL,
+};
+
static struct attribute_group tpdm_dsb_attr_grp = {
.attrs = tpdm_dsb_attrs,
.is_visible = tpdm_dsb_is_visible,
@@ -849,6 +1189,29 @@ static struct attribute_group tpdm_dsb_msr_grp = {
.name = "dsb_msr",
};
+static struct attribute_group tpdm_cmb_attr_grp = {
+ .attrs = tpdm_cmb_attrs,
+ .is_visible = tpdm_cmb_is_visible,
+};
+
+static struct attribute_group tpdm_cmb_trig_patt_grp = {
+ .attrs = tpdm_cmb_trig_patt_attrs,
+ .is_visible = tpdm_cmb_is_visible,
+ .name = "cmb_trig_patt",
+};
+
+static struct attribute_group tpdm_cmb_patt_grp = {
+ .attrs = tpdm_cmb_patt_attrs,
+ .is_visible = tpdm_cmb_is_visible,
+ .name = "cmb_patt",
+};
+
+static struct attribute_group tpdm_cmb_msr_grp = {
+ .attrs = tpdm_cmb_msr_attrs,
+ .is_visible = tpdm_cmb_msr_is_visible,
+ .name = "cmb_msr",
+};
+
static const struct attribute_group *tpdm_attr_grps[] = {
&tpdm_attr_grp,
&tpdm_dsb_attr_grp,
@@ -856,6 +1219,10 @@ static const struct attribute_group *tpdm_attr_grps[] = {
&tpdm_dsb_trig_patt_grp,
&tpdm_dsb_patt_grp,
&tpdm_dsb_msr_grp,
+ &tpdm_cmb_attr_grp,
+ &tpdm_cmb_trig_patt_grp,
+ &tpdm_cmb_patt_grp,
+ &tpdm_cmb_msr_grp,
NULL,
};
@@ -894,6 +1261,10 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
of_property_read_u32(drvdata->dev->of_node,
"qcom,dsb-msrs-num", &drvdata->dsb_msr_num);
+ if (drvdata && tpdm_has_cmb_dataset(drvdata))
+ of_property_read_u32(drvdata->dev->of_node,
+ "qcom,cmb-msrs-num", &drvdata->cmb_msr_num);
+
/* Set up coresight component description */
desc.name = coresight_alloc_device_name(&tpdm_devs, dev);
if (!desc.name)
@@ -933,7 +1304,7 @@ static struct amba_id tpdm_ids[] = {
.id = 0x000f0e00,
.mask = 0x000fff00,
},
- { 0, 0},
+ { 0, 0, NULL },
};
static struct amba_driver tpdm_driver = {
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.h b/drivers/hwtracing/coresight/coresight-tpdm.h
index 4115b2a17b8d..e08d212642e3 100644
--- a/drivers/hwtracing/coresight/coresight-tpdm.h
+++ b/drivers/hwtracing/coresight/coresight-tpdm.h
@@ -9,6 +9,38 @@
/* The max number of the datasets that TPDM supports */
#define TPDM_DATASETS 7
+/* CMB Subunit Registers */
+#define TPDM_CMB_CR (0xA00)
+/* CMB subunit timestamp insertion enable register */
+#define TPDM_CMB_TIER (0xA04)
+/* CMB subunit timestamp pattern registers */
+#define TPDM_CMB_TPR(n) (0xA08 + (n * 4))
+/* CMB subunit timestamp pattern mask registers */
+#define TPDM_CMB_TPMR(n) (0xA10 + (n * 4))
+/* CMB subunit trigger pattern registers */
+#define TPDM_CMB_XPR(n) (0xA18 + (n * 4))
+/* CMB subunit trigger pattern mask registers */
+#define TPDM_CMB_XPMR(n) (0xA20 + (n * 4))
+/* CMB MSR register */
+#define TPDM_CMB_MSR(n) (0xA80 + (n * 4))
+
+/* Enable bit for CMB subunit */
+#define TPDM_CMB_CR_ENA BIT(0)
+/* Trace collection mode for CMB subunit */
+#define TPDM_CMB_CR_MODE BIT(1)
+/* Timestamp control for pattern match */
+#define TPDM_CMB_TIER_PATT_TSENAB BIT(0)
+/* CMB CTI timestamp request */
+#define TPDM_CMB_TIER_XTRIG_TSENAB BIT(1)
+/* For timestamp fo all trace */
+#define TPDM_CMB_TIER_TS_ALL BIT(2)
+
+/* Patten register number */
+#define TPDM_CMB_MAX_PATT 2
+
+/* MAX number of DSB MSR */
+#define TPDM_CMB_MAX_MSR 32
+
/* DSB Subunit Registers */
#define TPDM_DSB_CR (0x780)
#define TPDM_DSB_TIER (0x784)
@@ -79,10 +111,12 @@
*
* PERIPHIDR0[0] : Fix to 1 if ImplDef subunit present, else 0
* PERIPHIDR0[1] : Fix to 1 if DSB subunit present, else 0
+ * PERIPHIDR0[2] : Fix to 1 if CMB subunit present, else 0
*/
#define TPDM_PIDR0_DS_IMPDEF BIT(0)
#define TPDM_PIDR0_DS_DSB BIT(1)
+#define TPDM_PIDR0_DS_CMB BIT(2)
#define TPDM_DSB_MAX_LINES 256
/* MAX number of EDCR registers */
@@ -113,6 +147,16 @@
} \
})[0].attr.attr)
+#define tpdm_patt_enable_ts(name, mem) \
+ (&((struct tpdm_dataset_attribute[]) { \
+ { \
+ __ATTR(name, 0644, enable_ts_show, \
+ enable_ts_store), \
+ mem, \
+ 0, \
+ } \
+ })[0].attr.attr)
+
#define DSB_EDGE_CTRL_ATTR(nr) \
tpdm_simple_dataset_ro(edcr##nr, \
DSB_EDGE_CTRL, nr)
@@ -137,10 +181,38 @@
tpdm_simple_dataset_rw(tpmr##nr, \
DSB_PATT_MASK, nr)
+#define DSB_PATT_ENABLE_TS \
+ tpdm_patt_enable_ts(enable_ts, \
+ DSB_PATT)
+
#define DSB_MSR_ATTR(nr) \
tpdm_simple_dataset_rw(msr##nr, \
DSB_MSR, nr)
+#define CMB_TRIG_PATT_ATTR(nr) \
+ tpdm_simple_dataset_rw(xpr##nr, \
+ CMB_TRIG_PATT, nr)
+
+#define CMB_TRIG_PATT_MASK_ATTR(nr) \
+ tpdm_simple_dataset_rw(xpmr##nr, \
+ CMB_TRIG_PATT_MASK, nr)
+
+#define CMB_PATT_ATTR(nr) \
+ tpdm_simple_dataset_rw(tpr##nr, \
+ CMB_PATT, nr)
+
+#define CMB_PATT_MASK_ATTR(nr) \
+ tpdm_simple_dataset_rw(tpmr##nr, \
+ CMB_PATT_MASK, nr)
+
+#define CMB_PATT_ENABLE_TS \
+ tpdm_patt_enable_ts(enable_ts, \
+ CMB_PATT)
+
+#define CMB_MSR_ATTR(nr) \
+ tpdm_simple_dataset_rw(msr##nr, \
+ CMB_MSR, nr)
+
/**
* struct dsb_dataset - specifics associated to dsb dataset
* @mode: DSB programming mode
@@ -174,6 +246,30 @@ struct dsb_dataset {
};
/**
+ * struct cmb_dataset
+ * @trace_mode: Dataset collection mode
+ * @patt_val: Save value for pattern
+ * @patt_mask: Save value for pattern mask
+ * @trig_patt: Save value for trigger pattern
+ * @trig_patt_mask: Save value for trigger pattern mask
+ * @msr Save value for MSR
+ * @patt_ts: Indicates if pattern match for timestamp is enabled.
+ * @trig_ts: Indicates if CTI trigger for timestamp is enabled.
+ * @ts_all: Indicates if timestamp is enabled for all packets.
+ */
+struct cmb_dataset {
+ u32 trace_mode;
+ u32 patt_val[TPDM_CMB_MAX_PATT];
+ u32 patt_mask[TPDM_CMB_MAX_PATT];
+ u32 trig_patt[TPDM_CMB_MAX_PATT];
+ u32 trig_patt_mask[TPDM_CMB_MAX_PATT];
+ u32 msr[TPDM_CMB_MAX_MSR];
+ bool patt_ts;
+ bool trig_ts;
+ bool ts_all;
+};
+
+/**
* struct tpdm_drvdata - specifics associated to an TPDM component
* @base: memory mapped base address for this component.
* @dev: The device entity associated to this component.
@@ -182,7 +278,9 @@ struct dsb_dataset {
* @enable: enable status of the component.
* @datasets: The datasets types present of the TPDM.
* @dsb Specifics associated to TPDM DSB.
+ * @cmb Specifics associated to TPDM CMB.
* @dsb_msr_num Number of MSR supported by DSB TPDM
+ * @cmb_msr_num Number of MSR supported by CMB TPDM
*/
struct tpdm_drvdata {
@@ -193,7 +291,9 @@ struct tpdm_drvdata {
bool enable;
unsigned long datasets;
struct dsb_dataset *dsb;
+ struct cmb_dataset *cmb;
u32 dsb_msr_num;
+ u32 cmb_msr_num;
};
/* Enumerate members of various datasets */
@@ -205,6 +305,11 @@ enum dataset_mem {
DSB_PATT,
DSB_PATT_MASK,
DSB_MSR,
+ CMB_TRIG_PATT,
+ CMB_TRIG_PATT_MASK,
+ CMB_PATT,
+ CMB_PATT_MASK,
+ CMB_MSR
};
/**
@@ -220,4 +325,13 @@ struct tpdm_dataset_attribute {
u32 idx;
};
+static bool tpdm_has_dsb_dataset(struct tpdm_drvdata *drvdata)
+{
+ return (drvdata->datasets & TPDM_PIDR0_DS_DSB);
+}
+
+static bool tpdm_has_cmb_dataset(struct tpdm_drvdata *drvdata)
+{
+ return (drvdata->datasets & TPDM_PIDR0_DS_CMB);
+}
#endif /* _CORESIGHT_CORESIGHT_TPDM_H */
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 59eac93fd6bb..29024f880fda 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -58,6 +58,7 @@ struct tpiu_drvdata {
void __iomem *base;
struct clk *atclk;
struct coresight_device *csdev;
+ spinlock_t spinlock;
};
static void tpiu_enable_hw(struct csdev_access *csa)
@@ -72,8 +73,11 @@ static void tpiu_enable_hw(struct csdev_access *csa)
static int tpiu_enable(struct coresight_device *csdev, enum cs_mode mode,
void *__unused)
{
+ struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ guard(spinlock)(&drvdata->spinlock);
tpiu_enable_hw(&csdev->access);
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
dev_dbg(&csdev->dev, "TPIU enabled\n");
return 0;
}
@@ -96,7 +100,11 @@ static void tpiu_disable_hw(struct csdev_access *csa)
static int tpiu_disable(struct coresight_device *csdev)
{
- if (atomic_dec_return(&csdev->refcnt))
+ struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+ guard(spinlock)(&drvdata->spinlock);
+ csdev->refcnt--;
+ if (csdev->refcnt)
return -EBUSY;
tpiu_disable_hw(&csdev->access);
@@ -132,6 +140,8 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
if (!drvdata)
return -ENOMEM;
+ spin_lock_init(&drvdata->spinlock);
+
drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
if (!IS_ERR(drvdata->atclk)) {
ret = clk_prepare_enable(drvdata->atclk);
@@ -218,7 +228,7 @@ static const struct amba_id tpiu_ids[] = {
.id = 0x000bb9e7,
.mask = 0x000fffff,
},
- { 0, 0},
+ { 0, 0, NULL },
};
MODULE_DEVICE_TABLE(amba, tpiu_ids);
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c
index 10e886455b8b..f9ebf20c91e6 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.c
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.c
@@ -103,7 +103,7 @@ static int smb_open(struct inode *inode, struct file *file)
if (drvdata->reading)
return -EBUSY;
- if (atomic_read(&drvdata->csdev->refcnt))
+ if (drvdata->csdev->refcnt)
return -EBUSY;
smb_update_data_size(drvdata);
@@ -207,11 +207,11 @@ static void smb_enable_sysfs(struct coresight_device *csdev)
{
struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent);
- if (drvdata->mode != CS_MODE_DISABLED)
+ if (coresight_get_mode(csdev) != CS_MODE_DISABLED)
return;
smb_enable_hw(drvdata);
- drvdata->mode = CS_MODE_SYSFS;
+ coresight_set_mode(csdev, CS_MODE_SYSFS);
}
static int smb_enable_perf(struct coresight_device *csdev, void *data)
@@ -234,7 +234,7 @@ static int smb_enable_perf(struct coresight_device *csdev, void *data)
if (drvdata->pid == -1) {
smb_enable_hw(drvdata);
drvdata->pid = pid;
- drvdata->mode = CS_MODE_PERF;
+ coresight_set_mode(csdev, CS_MODE_PERF);
}
return 0;
@@ -253,7 +253,8 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
return -EBUSY;
/* Do nothing, the SMB is already enabled as other mode */
- if (drvdata->mode != CS_MODE_DISABLED && drvdata->mode != mode)
+ if (coresight_get_mode(csdev) != CS_MODE_DISABLED &&
+ coresight_get_mode(csdev) != mode)
return -EBUSY;
switch (mode) {
@@ -270,7 +271,7 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode,
if (ret)
return ret;
- atomic_inc(&csdev->refcnt);
+ csdev->refcnt++;
dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n");
return ret;
@@ -285,17 +286,18 @@ static int smb_disable(struct coresight_device *csdev)
if (drvdata->reading)
return -EBUSY;
- if (atomic_dec_return(&csdev->refcnt))
+ csdev->refcnt--;
+ if (csdev->refcnt)
return -EBUSY;
/* Complain if we (somehow) got out of sync */
- WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
+ WARN_ON_ONCE(coresight_get_mode(csdev) == CS_MODE_DISABLED);
smb_disable_hw(drvdata);
/* Dissociate from the target process. */
drvdata->pid = -1;
- drvdata->mode = CS_MODE_DISABLED;
+ coresight_set_mode(csdev, CS_MODE_DISABLED);
dev_dbg(&csdev->dev, "Ultrasoc SMB disabled\n");
return 0;
@@ -380,7 +382,7 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev,
guard(spinlock)(&drvdata->spinlock);
/* Don't do anything if another tracer is using this sink. */
- if (atomic_read(&csdev->refcnt) != 1)
+ if (csdev->refcnt != 1)
return 0;
smb_disable_hw(drvdata);
@@ -586,7 +588,7 @@ static void smb_remove(struct platform_device *pdev)
#ifdef CONFIG_ACPI
static const struct acpi_device_id ultrasoc_smb_acpi_match[] = {
- {"HISI03A1", 0},
+ {"HISI03A1", 0, 0, 0},
{}
};
MODULE_DEVICE_TABLE(acpi, ultrasoc_smb_acpi_match);
diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.h b/drivers/hwtracing/coresight/ultrasoc-smb.h
index 82a44c14a882..a91d39cfccb8 100644
--- a/drivers/hwtracing/coresight/ultrasoc-smb.h
+++ b/drivers/hwtracing/coresight/ultrasoc-smb.h
@@ -109,7 +109,6 @@ struct smb_data_buffer {
* @reading: Synchronise user space access to SMB buffer.
* @pid: Process ID of the process being monitored by the
* session that is using this component.
- * @mode: How this SMB is being used, perf mode or sysfs mode.
*/
struct smb_drv_data {
void __iomem *base;
@@ -119,7 +118,6 @@ struct smb_drv_data {
spinlock_t spinlock;
bool reading;
pid_t pid;
- enum cs_mode mode;
};
#endif
diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c
index c1b5fd2b8974..4bf04a977840 100644
--- a/drivers/hwtracing/ptt/hisi_ptt.c
+++ b/drivers/hwtracing/ptt/hisi_ptt.c
@@ -998,6 +998,9 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
int ret;
u32 val;
+ if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
+ return -ENOENT;
+
if (event->cpu < 0) {
dev_dbg(event->pmu->dev, "Per-task mode not supported\n");
return -EOPNOTSUPP;
@@ -1006,9 +1009,6 @@ static int hisi_ptt_pmu_event_init(struct perf_event *event)
if (event->attach_state & PERF_ATTACH_TASK)
return -EOPNOTSUPP;
- if (event->attr.type != hisi_ptt->hisi_ptt_pmu.type)
- return -ENOENT;
-
ret = hisi_ptt_trace_valid_filter(hisi_ptt, event->attr.config);
if (ret < 0)
return ret;
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index b10574d42b7a..4f41a3c7824d 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -6,21 +6,30 @@
* I2C master mode controller driver, used in Nomadik 8815
* and Ux500 platforms.
*
+ * The Mobileye EyeQ5 platform is also supported; it uses
+ * the same Ux500/DB8500 IP block with two quirks:
+ * - The memory bus only supports 32-bit accesses.
+ * - A register must be configured for the I2C speed mode;
+ * it is located in a shared register region called OLB.
+ *
* Author: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
* Author: Sachin Verma <sachin.verma@st.com>
*/
-#include <linux/init.h>
-#include <linux/module.h>
#include <linux/amba/bus.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/i2c.h>
-#include <linux/err.h>
+#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/pm_runtime.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
#define DRIVER_NAME "nmk-i2c"
@@ -42,61 +51,63 @@
#define I2C_ICR (0x038)
/* Control registers */
-#define I2C_CR_PE (0x1 << 0) /* Peripheral Enable */
-#define I2C_CR_OM (0x3 << 1) /* Operating mode */
-#define I2C_CR_SAM (0x1 << 3) /* Slave addressing mode */
-#define I2C_CR_SM (0x3 << 4) /* Speed mode */
-#define I2C_CR_SGCM (0x1 << 6) /* Slave general call mode */
-#define I2C_CR_FTX (0x1 << 7) /* Flush Transmit */
-#define I2C_CR_FRX (0x1 << 8) /* Flush Receive */
-#define I2C_CR_DMA_TX_EN (0x1 << 9) /* DMA Tx enable */
-#define I2C_CR_DMA_RX_EN (0x1 << 10) /* DMA Rx Enable */
-#define I2C_CR_DMA_SLE (0x1 << 11) /* DMA sync. logic enable */
-#define I2C_CR_LM (0x1 << 12) /* Loopback mode */
-#define I2C_CR_FON (0x3 << 13) /* Filtering on */
-#define I2C_CR_FS (0x3 << 15) /* Force stop enable */
+#define I2C_CR_PE BIT(0) /* Peripheral Enable */
+#define I2C_CR_OM GENMASK(2, 1) /* Operating mode */
+#define I2C_CR_SAM BIT(3) /* Slave addressing mode */
+#define I2C_CR_SM GENMASK(5, 4) /* Speed mode */
+#define I2C_CR_SGCM BIT(6) /* Slave general call mode */
+#define I2C_CR_FTX BIT(7) /* Flush Transmit */
+#define I2C_CR_FRX BIT(8) /* Flush Receive */
+#define I2C_CR_DMA_TX_EN BIT(9) /* DMA Tx enable */
+#define I2C_CR_DMA_RX_EN BIT(10) /* DMA Rx Enable */
+#define I2C_CR_DMA_SLE BIT(11) /* DMA sync. logic enable */
+#define I2C_CR_LM BIT(12) /* Loopback mode */
+#define I2C_CR_FON GENMASK(14, 13) /* Filtering on */
+#define I2C_CR_FS GENMASK(16, 15) /* Force stop enable */
+
+/* Slave control register (SCR) */
+#define I2C_SCR_SLSU GENMASK(31, 16) /* Slave data setup time */
/* Master controller (MCR) register */
-#define I2C_MCR_OP (0x1 << 0) /* Operation */
-#define I2C_MCR_A7 (0x7f << 1) /* 7-bit address */
-#define I2C_MCR_EA10 (0x7 << 8) /* 10-bit Extended address */
-#define I2C_MCR_SB (0x1 << 11) /* Extended address */
-#define I2C_MCR_AM (0x3 << 12) /* Address type */
-#define I2C_MCR_STOP (0x1 << 14) /* Stop condition */
-#define I2C_MCR_LENGTH (0x7ff << 15) /* Transaction length */
+#define I2C_MCR_OP BIT(0) /* Operation */
+#define I2C_MCR_A7 GENMASK(7, 1) /* 7-bit address */
+#define I2C_MCR_EA10 GENMASK(10, 8) /* 10-bit Extended address */
+#define I2C_MCR_SB BIT(11) /* Extended address */
+#define I2C_MCR_AM GENMASK(13, 12) /* Address type */
+#define I2C_MCR_STOP BIT(14) /* Stop condition */
+#define I2C_MCR_LENGTH GENMASK(25, 15) /* Transaction length */
/* Status register (SR) */
-#define I2C_SR_OP (0x3 << 0) /* Operation */
-#define I2C_SR_STATUS (0x3 << 2) /* controller status */
-#define I2C_SR_CAUSE (0x7 << 4) /* Abort cause */
-#define I2C_SR_TYPE (0x3 << 7) /* Receive type */
-#define I2C_SR_LENGTH (0x7ff << 9) /* Transfer length */
+#define I2C_SR_OP GENMASK(1, 0) /* Operation */
+#define I2C_SR_STATUS GENMASK(3, 2) /* controller status */
+#define I2C_SR_CAUSE GENMASK(6, 4) /* Abort cause */
+#define I2C_SR_TYPE GENMASK(8, 7) /* Receive type */
+#define I2C_SR_LENGTH GENMASK(19, 9) /* Transfer length */
+
+/* Baud-rate counter register (BRCR) */
+#define I2C_BRCR_BRCNT1 GENMASK(31, 16) /* Baud-rate counter 1 */
+#define I2C_BRCR_BRCNT2 GENMASK(15, 0) /* Baud-rate counter 2 */
/* Interrupt mask set/clear (IMSCR) bits */
-#define I2C_IT_TXFE (0x1 << 0)
-#define I2C_IT_TXFNE (0x1 << 1)
-#define I2C_IT_TXFF (0x1 << 2)
-#define I2C_IT_TXFOVR (0x1 << 3)
-#define I2C_IT_RXFE (0x1 << 4)
-#define I2C_IT_RXFNF (0x1 << 5)
-#define I2C_IT_RXFF (0x1 << 6)
-#define I2C_IT_RFSR (0x1 << 16)
-#define I2C_IT_RFSE (0x1 << 17)
-#define I2C_IT_WTSR (0x1 << 18)
-#define I2C_IT_MTD (0x1 << 19)
-#define I2C_IT_STD (0x1 << 20)
-#define I2C_IT_MAL (0x1 << 24)
-#define I2C_IT_BERR (0x1 << 25)
-#define I2C_IT_MTDWS (0x1 << 28)
-
-#define GEN_MASK(val, mask, sb) (((val) << (sb)) & (mask))
+#define I2C_IT_TXFE BIT(0)
+#define I2C_IT_TXFNE BIT(1)
+#define I2C_IT_TXFF BIT(2)
+#define I2C_IT_TXFOVR BIT(3)
+#define I2C_IT_RXFE BIT(4)
+#define I2C_IT_RXFNF BIT(5)
+#define I2C_IT_RXFF BIT(6)
+#define I2C_IT_RFSR BIT(16)
+#define I2C_IT_RFSE BIT(17)
+#define I2C_IT_WTSR BIT(18)
+#define I2C_IT_MTD BIT(19)
+#define I2C_IT_STD BIT(20)
+#define I2C_IT_MAL BIT(24)
+#define I2C_IT_BERR BIT(25)
+#define I2C_IT_MTDWS BIT(28)
/* some bits in ICR are reserved */
#define I2C_CLEAR_ALL_INTS 0x131f007f
-/* first three msb bits are reserved */
-#define IRQ_MASK(mask) (mask & 0x1fffffff)
-
/* maximum threshold value */
#define MAX_I2C_FIFO_THRESHOLD 15
@@ -107,6 +118,15 @@ enum i2c_freq_mode {
I2C_FREQ_MODE_FAST_PLUS, /* up to 1 Mb/s */
};
+/* Mobileye EyeQ5 offset into a shared register region (called OLB) */
+#define NMK_I2C_EYEQ5_OLB_IOCR2 0x0B8
+
+enum i2c_eyeq5_speed {
+ I2C_EYEQ5_SPEED_FAST,
+ I2C_EYEQ5_SPEED_FAST_PLUS,
+ I2C_EYEQ5_SPEED_HIGH_SPEED,
+};
+
/**
* struct i2c_vendor_data - per-vendor variations
* @has_mtdws: variant has the MTDWS bit
@@ -131,6 +151,12 @@ enum i2c_operation {
I2C_READ = 0x01
};
+enum i2c_operating_mode {
+ I2C_OM_SLAVE,
+ I2C_OM_MASTER,
+ I2C_OM_MASTER_OR_SLAVE,
+};
+
/**
* struct i2c_nmk_client - client specific data
* @slave_adr: 7-bit slave address
@@ -159,11 +185,13 @@ struct i2c_nmk_client {
* @clk_freq: clock frequency for the operation mode
* @tft: Tx FIFO Threshold in bytes
* @rft: Rx FIFO Threshold in bytes
- * @timeout: Slave response timeout (ms)
+ * @timeout_usecs: Slave response timeout
* @sm: speed mode
* @stop: stop condition.
- * @xfer_complete: acknowledge completion for a I2C message.
+ * @xfer_wq: xfer done wait queue.
+ * @xfer_done: xfer done boolean.
* @result: controller propogated result.
+ * @has_32b_bus: controller is on a bus that only supports 32-bit accesses.
*/
struct nmk_i2c_dev {
struct i2c_vendor_data *vendor;
@@ -176,11 +204,13 @@ struct nmk_i2c_dev {
u32 clk_freq;
unsigned char tft;
unsigned char rft;
- int timeout;
+ u32 timeout_usecs;
enum i2c_freq_mode sm;
int stop;
- struct completion xfer_complete;
+ struct wait_queue_head xfer_wq;
+ bool xfer_done;
int result;
+ bool has_32b_bus;
};
/* controller's abort causes */
@@ -204,18 +234,36 @@ static inline void i2c_clr_bit(void __iomem *reg, u32 mask)
writel(readl(reg) & ~mask, reg);
}
+static inline u8 nmk_i2c_readb(const struct nmk_i2c_dev *priv,
+ unsigned long reg)
+{
+ if (priv->has_32b_bus)
+ return readl(priv->virtbase + reg);
+ else
+ return readb(priv->virtbase + reg);
+}
+
+static inline void nmk_i2c_writeb(const struct nmk_i2c_dev *priv, u32 val,
+ unsigned long reg)
+{
+ if (priv->has_32b_bus)
+ writel(val, priv->virtbase + reg);
+ else
+ writeb(val, priv->virtbase + reg);
+}
+
/**
* flush_i2c_fifo() - This function flushes the I2C FIFO
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
*
* This function flushes the I2C Tx and Rx FIFOs. It returns
* 0 on successful flushing of FIFO
*/
-static int flush_i2c_fifo(struct nmk_i2c_dev *dev)
+static int flush_i2c_fifo(struct nmk_i2c_dev *priv)
{
#define LOOP_ATTEMPTS 10
+ ktime_t timeout;
int i;
- unsigned long timeout;
/*
* flush the transmit and receive FIFO. The flushing
@@ -224,19 +272,19 @@ static int flush_i2c_fifo(struct nmk_i2c_dev *dev)
* bits, until then no one must access Tx, Rx FIFO and
* should poll on these bits waiting for the completion.
*/
- writel((I2C_CR_FTX | I2C_CR_FRX), dev->virtbase + I2C_CR);
+ writel((I2C_CR_FTX | I2C_CR_FRX), priv->virtbase + I2C_CR);
for (i = 0; i < LOOP_ATTEMPTS; i++) {
- timeout = jiffies + dev->adap.timeout;
+ timeout = ktime_add_us(ktime_get(), priv->timeout_usecs);
- while (!time_after(jiffies, timeout)) {
- if ((readl(dev->virtbase + I2C_CR) &
+ while (ktime_after(timeout, ktime_get())) {
+ if ((readl(priv->virtbase + I2C_CR) &
(I2C_CR_FTX | I2C_CR_FRX)) == 0)
- return 0;
+ return 0;
}
}
- dev_err(&dev->adev->dev,
+ dev_err(&priv->adev->dev,
"flushing operation timed out giving up after %d attempts",
LOOP_ATTEMPTS);
@@ -245,120 +293,121 @@ static int flush_i2c_fifo(struct nmk_i2c_dev *dev)
/**
* disable_all_interrupts() - Disable all interrupts of this I2c Bus
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
*/
-static void disable_all_interrupts(struct nmk_i2c_dev *dev)
+static void disable_all_interrupts(struct nmk_i2c_dev *priv)
{
- u32 mask = IRQ_MASK(0);
- writel(mask, dev->virtbase + I2C_IMSCR);
+ writel(0, priv->virtbase + I2C_IMSCR);
}
/**
* clear_all_interrupts() - Clear all interrupts of I2C Controller
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
*/
-static void clear_all_interrupts(struct nmk_i2c_dev *dev)
+static void clear_all_interrupts(struct nmk_i2c_dev *priv)
{
- u32 mask;
- mask = IRQ_MASK(I2C_CLEAR_ALL_INTS);
- writel(mask, dev->virtbase + I2C_ICR);
+ writel(I2C_CLEAR_ALL_INTS, priv->virtbase + I2C_ICR);
}
/**
* init_hw() - initialize the I2C hardware
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
*/
-static int init_hw(struct nmk_i2c_dev *dev)
+static int init_hw(struct nmk_i2c_dev *priv)
{
int stat;
- stat = flush_i2c_fifo(dev);
+ stat = flush_i2c_fifo(priv);
if (stat)
goto exit;
/* disable the controller */
- i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
+ i2c_clr_bit(priv->virtbase + I2C_CR, I2C_CR_PE);
- disable_all_interrupts(dev);
+ disable_all_interrupts(priv);
- clear_all_interrupts(dev);
+ clear_all_interrupts(priv);
- dev->cli.operation = I2C_NO_OPERATION;
+ priv->cli.operation = I2C_NO_OPERATION;
exit:
return stat;
}
/* enable peripheral, master mode operation */
-#define DEFAULT_I2C_REG_CR ((1 << 1) | I2C_CR_PE)
+#define DEFAULT_I2C_REG_CR (FIELD_PREP(I2C_CR_OM, I2C_OM_MASTER) | I2C_CR_PE)
+
+/* grab top three bits from extended I2C addresses */
+#define ADR_3MSB_BITS GENMASK(9, 7)
/**
* load_i2c_mcr_reg() - load the MCR register
- * @dev: private data of controller
+ * @priv: private data of controller
* @flags: message flags
*/
-static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *dev, u16 flags)
+static u32 load_i2c_mcr_reg(struct nmk_i2c_dev *priv, u16 flags)
{
u32 mcr = 0;
unsigned short slave_adr_3msb_bits;
- mcr |= GEN_MASK(dev->cli.slave_adr, I2C_MCR_A7, 1);
+ mcr |= FIELD_PREP(I2C_MCR_A7, priv->cli.slave_adr);
if (unlikely(flags & I2C_M_TEN)) {
/* 10-bit address transaction */
- mcr |= GEN_MASK(2, I2C_MCR_AM, 12);
+ mcr |= FIELD_PREP(I2C_MCR_AM, 2);
/*
* Get the top 3 bits.
* EA10 represents extended address in MCR. This includes
* the extension (MSB bits) of the 7 bit address loaded
* in A7
*/
- slave_adr_3msb_bits = (dev->cli.slave_adr >> 7) & 0x7;
+ slave_adr_3msb_bits = FIELD_GET(ADR_3MSB_BITS,
+ priv->cli.slave_adr);
- mcr |= GEN_MASK(slave_adr_3msb_bits, I2C_MCR_EA10, 8);
+ mcr |= FIELD_PREP(I2C_MCR_EA10, slave_adr_3msb_bits);
} else {
/* 7-bit address transaction */
- mcr |= GEN_MASK(1, I2C_MCR_AM, 12);
+ mcr |= FIELD_PREP(I2C_MCR_AM, 1);
}
/* start byte procedure not applied */
- mcr |= GEN_MASK(0, I2C_MCR_SB, 11);
+ mcr |= FIELD_PREP(I2C_MCR_SB, 0);
/* check the operation, master read/write? */
- if (dev->cli.operation == I2C_WRITE)
- mcr |= GEN_MASK(I2C_WRITE, I2C_MCR_OP, 0);
+ if (priv->cli.operation == I2C_WRITE)
+ mcr |= FIELD_PREP(I2C_MCR_OP, I2C_WRITE);
else
- mcr |= GEN_MASK(I2C_READ, I2C_MCR_OP, 0);
+ mcr |= FIELD_PREP(I2C_MCR_OP, I2C_READ);
/* stop or repeated start? */
- if (dev->stop)
- mcr |= GEN_MASK(1, I2C_MCR_STOP, 14);
+ if (priv->stop)
+ mcr |= FIELD_PREP(I2C_MCR_STOP, 1);
else
- mcr &= ~(GEN_MASK(1, I2C_MCR_STOP, 14));
+ mcr &= ~FIELD_PREP(I2C_MCR_STOP, 1);
- mcr |= GEN_MASK(dev->cli.count, I2C_MCR_LENGTH, 15);
+ mcr |= FIELD_PREP(I2C_MCR_LENGTH, priv->cli.count);
return mcr;
}
/**
* setup_i2c_controller() - setup the controller
- * @dev: private data of controller
+ * @priv: private data of controller
*/
-static void setup_i2c_controller(struct nmk_i2c_dev *dev)
+static void setup_i2c_controller(struct nmk_i2c_dev *priv)
{
u32 brcr1, brcr2;
u32 i2c_clk, div;
u32 ns;
u16 slsu;
- writel(0x0, dev->virtbase + I2C_CR);
- writel(0x0, dev->virtbase + I2C_HSMCR);
- writel(0x0, dev->virtbase + I2C_TFTR);
- writel(0x0, dev->virtbase + I2C_RFTR);
- writel(0x0, dev->virtbase + I2C_DMAR);
+ writel(0x0, priv->virtbase + I2C_CR);
+ writel(0x0, priv->virtbase + I2C_HSMCR);
+ writel(0x0, priv->virtbase + I2C_TFTR);
+ writel(0x0, priv->virtbase + I2C_RFTR);
+ writel(0x0, priv->virtbase + I2C_DMAR);
- i2c_clk = clk_get_rate(dev->clk);
+ i2c_clk = clk_get_rate(priv->clk);
/*
* set the slsu:
@@ -373,7 +422,7 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* slsu = cycles / (1000000000 / f) + 1
*/
ns = DIV_ROUND_UP_ULL(1000000000ULL, i2c_clk);
- switch (dev->sm) {
+ switch (priv->sm) {
case I2C_FREQ_MODE_FAST:
case I2C_FREQ_MODE_FAST_PLUS:
slsu = DIV_ROUND_UP(100, ns); /* Fast */
@@ -388,15 +437,15 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
}
slsu += 1;
- dev_dbg(&dev->adev->dev, "calculated SLSU = %04x\n", slsu);
- writel(slsu << 16, dev->virtbase + I2C_SCR);
+ dev_dbg(&priv->adev->dev, "calculated SLSU = %04x\n", slsu);
+ writel(FIELD_PREP(I2C_SCR_SLSU, slsu), priv->virtbase + I2C_SCR);
/*
* The spec says, in case of std. mode the divider is
* 2 whereas it is 3 for fast and fastplus mode of
* operation. TODO - high speed support.
*/
- div = (dev->clk_freq > I2C_MAX_STANDARD_MODE_FREQ) ? 3 : 2;
+ div = (priv->clk_freq > I2C_MAX_STANDARD_MODE_FREQ) ? 3 : 2;
/*
* generate the mask for baud rate counters. The controller
@@ -405,11 +454,11 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* plus operation. Currently we do not supprt high speed mode
* so set brcr1 to 0.
*/
- brcr1 = 0 << 16;
- brcr2 = (i2c_clk/(dev->clk_freq * div)) & 0xffff;
+ brcr1 = FIELD_PREP(I2C_BRCR_BRCNT1, 0);
+ brcr2 = FIELD_PREP(I2C_BRCR_BRCNT2, i2c_clk / (priv->clk_freq * div));
/* set the baud rate counter register */
- writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR);
+ writel((brcr1 | brcr2), priv->virtbase + I2C_BRCR);
/*
* set the speed mode. Currently we support
@@ -417,125 +466,142 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev)
* TODO - support for fast mode plus (up to 1Mb/s)
* and high speed (up to 3.4 Mb/s)
*/
- if (dev->sm > I2C_FREQ_MODE_FAST) {
- dev_err(&dev->adev->dev,
+ if (priv->sm > I2C_FREQ_MODE_FAST) {
+ dev_err(&priv->adev->dev,
"do not support this mode defaulting to std. mode\n");
- brcr2 = i2c_clk / (I2C_MAX_STANDARD_MODE_FREQ * 2) & 0xffff;
- writel((brcr1 | brcr2), dev->virtbase + I2C_BRCR);
- writel(I2C_FREQ_MODE_STANDARD << 4,
- dev->virtbase + I2C_CR);
+ brcr2 = FIELD_PREP(I2C_BRCR_BRCNT2,
+ i2c_clk / (I2C_MAX_STANDARD_MODE_FREQ * 2));
+ writel((brcr1 | brcr2), priv->virtbase + I2C_BRCR);
+ writel(FIELD_PREP(I2C_CR_SM, I2C_FREQ_MODE_STANDARD),
+ priv->virtbase + I2C_CR);
}
- writel(dev->sm << 4, dev->virtbase + I2C_CR);
+ writel(FIELD_PREP(I2C_CR_SM, priv->sm), priv->virtbase + I2C_CR);
/* set the Tx and Rx FIFO threshold */
- writel(dev->tft, dev->virtbase + I2C_TFTR);
- writel(dev->rft, dev->virtbase + I2C_RFTR);
+ writel(priv->tft, priv->virtbase + I2C_TFTR);
+ writel(priv->rft, priv->virtbase + I2C_RFTR);
+}
+
+static bool nmk_i2c_wait_xfer_done(struct nmk_i2c_dev *priv)
+{
+ if (priv->timeout_usecs < jiffies_to_usecs(1)) {
+ unsigned long timeout_usecs = priv->timeout_usecs;
+ ktime_t timeout = ktime_set(0, timeout_usecs * NSEC_PER_USEC);
+
+ wait_event_hrtimeout(priv->xfer_wq, priv->xfer_done, timeout);
+ } else {
+ unsigned long timeout = usecs_to_jiffies(priv->timeout_usecs);
+
+ wait_event_timeout(priv->xfer_wq, priv->xfer_done, timeout);
+ }
+
+ return priv->xfer_done;
}
/**
* read_i2c() - Read from I2C client device
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
* @flags: message flags
*
* This function reads from i2c client device when controller is in
* master mode. There is a completion timeout. If there is no transfer
* before timeout error is returned.
*/
-static int read_i2c(struct nmk_i2c_dev *dev, u16 flags)
+static int read_i2c(struct nmk_i2c_dev *priv, u16 flags)
{
- int status = 0;
u32 mcr, irq_mask;
- unsigned long timeout;
+ int status = 0;
+ bool xfer_done;
- mcr = load_i2c_mcr_reg(dev, flags);
- writel(mcr, dev->virtbase + I2C_MCR);
+ mcr = load_i2c_mcr_reg(priv, flags);
+ writel(mcr, priv->virtbase + I2C_MCR);
/* load the current CR value */
- writel(readl(dev->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
- dev->virtbase + I2C_CR);
+ writel(readl(priv->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
+ priv->virtbase + I2C_CR);
/* enable the controller */
- i2c_set_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
+ i2c_set_bit(priv->virtbase + I2C_CR, I2C_CR_PE);
- init_completion(&dev->xfer_complete);
+ init_waitqueue_head(&priv->xfer_wq);
+ priv->xfer_done = false;
/* enable interrupts by setting the mask */
irq_mask = (I2C_IT_RXFNF | I2C_IT_RXFF |
I2C_IT_MAL | I2C_IT_BERR);
- if (dev->stop || !dev->vendor->has_mtdws)
+ if (priv->stop || !priv->vendor->has_mtdws)
irq_mask |= I2C_IT_MTD;
else
irq_mask |= I2C_IT_MTDWS;
- irq_mask = I2C_CLEAR_ALL_INTS & IRQ_MASK(irq_mask);
+ irq_mask &= I2C_CLEAR_ALL_INTS;
- writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
- dev->virtbase + I2C_IMSCR);
+ writel(readl(priv->virtbase + I2C_IMSCR) | irq_mask,
+ priv->virtbase + I2C_IMSCR);
- timeout = wait_for_completion_timeout(
- &dev->xfer_complete, dev->adap.timeout);
+ xfer_done = nmk_i2c_wait_xfer_done(priv);
- if (timeout == 0) {
+ if (!xfer_done) {
/* Controller timed out */
- dev_err(&dev->adev->dev, "read from slave 0x%x timed out\n",
- dev->cli.slave_adr);
+ dev_err(&priv->adev->dev, "read from slave 0x%x timed out\n",
+ priv->cli.slave_adr);
status = -ETIMEDOUT;
}
return status;
}
-static void fill_tx_fifo(struct nmk_i2c_dev *dev, int no_bytes)
+static void fill_tx_fifo(struct nmk_i2c_dev *priv, int no_bytes)
{
int count;
for (count = (no_bytes - 2);
(count > 0) &&
- (dev->cli.count != 0);
+ (priv->cli.count != 0);
count--) {
/* write to the Tx FIFO */
- writeb(*dev->cli.buffer,
- dev->virtbase + I2C_TFR);
- dev->cli.buffer++;
- dev->cli.count--;
- dev->cli.xfer_bytes++;
+ nmk_i2c_writeb(priv, *priv->cli.buffer, I2C_TFR);
+ priv->cli.buffer++;
+ priv->cli.count--;
+ priv->cli.xfer_bytes++;
}
}
/**
* write_i2c() - Write data to I2C client.
- * @dev: private data of I2C Driver
+ * @priv: private data of I2C Driver
* @flags: message flags
*
* This function writes data to I2C client
*/
-static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
+static int write_i2c(struct nmk_i2c_dev *priv, u16 flags)
{
- u32 status = 0;
u32 mcr, irq_mask;
- unsigned long timeout;
+ u32 status = 0;
+ bool xfer_done;
- mcr = load_i2c_mcr_reg(dev, flags);
+ mcr = load_i2c_mcr_reg(priv, flags);
- writel(mcr, dev->virtbase + I2C_MCR);
+ writel(mcr, priv->virtbase + I2C_MCR);
/* load the current CR value */
- writel(readl(dev->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
- dev->virtbase + I2C_CR);
+ writel(readl(priv->virtbase + I2C_CR) | DEFAULT_I2C_REG_CR,
+ priv->virtbase + I2C_CR);
/* enable the controller */
- i2c_set_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
+ i2c_set_bit(priv->virtbase + I2C_CR, I2C_CR_PE);
- init_completion(&dev->xfer_complete);
+ init_waitqueue_head(&priv->xfer_wq);
+ priv->xfer_done = false;
/* enable interrupts by settings the masks */
irq_mask = (I2C_IT_TXFOVR | I2C_IT_MAL | I2C_IT_BERR);
/* Fill the TX FIFO with transmit data */
- fill_tx_fifo(dev, MAX_I2C_FIFO_THRESHOLD);
+ fill_tx_fifo(priv, MAX_I2C_FIFO_THRESHOLD);
- if (dev->cli.count != 0)
+ if (priv->cli.count != 0)
irq_mask |= I2C_IT_TXFNE;
/*
@@ -543,23 +609,22 @@ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
* set the MTDWS bit (Master Transaction Done Without Stop)
* to start repeated start operation
*/
- if (dev->stop || !dev->vendor->has_mtdws)
+ if (priv->stop || !priv->vendor->has_mtdws)
irq_mask |= I2C_IT_MTD;
else
irq_mask |= I2C_IT_MTDWS;
- irq_mask = I2C_CLEAR_ALL_INTS & IRQ_MASK(irq_mask);
+ irq_mask &= I2C_CLEAR_ALL_INTS;
- writel(readl(dev->virtbase + I2C_IMSCR) | irq_mask,
- dev->virtbase + I2C_IMSCR);
+ writel(readl(priv->virtbase + I2C_IMSCR) | irq_mask,
+ priv->virtbase + I2C_IMSCR);
- timeout = wait_for_completion_timeout(
- &dev->xfer_complete, dev->adap.timeout);
+ xfer_done = nmk_i2c_wait_xfer_done(priv);
- if (timeout == 0) {
+ if (!xfer_done) {
/* Controller timed out */
- dev_err(&dev->adev->dev, "write to slave 0x%x timed out\n",
- dev->cli.slave_adr);
+ dev_err(&priv->adev->dev, "write to slave 0x%x timed out\n",
+ priv->cli.slave_adr);
status = -ETIMEDOUT;
}
@@ -568,44 +633,39 @@ static int write_i2c(struct nmk_i2c_dev *dev, u16 flags)
/**
* nmk_i2c_xfer_one() - transmit a single I2C message
- * @dev: device with a message encoded into it
+ * @priv: device with a message encoded into it
* @flags: message flags
*/
-static int nmk_i2c_xfer_one(struct nmk_i2c_dev *dev, u16 flags)
+static int nmk_i2c_xfer_one(struct nmk_i2c_dev *priv, u16 flags)
{
int status;
if (flags & I2C_M_RD) {
/* read operation */
- dev->cli.operation = I2C_READ;
- status = read_i2c(dev, flags);
+ priv->cli.operation = I2C_READ;
+ status = read_i2c(priv, flags);
} else {
/* write operation */
- dev->cli.operation = I2C_WRITE;
- status = write_i2c(dev, flags);
+ priv->cli.operation = I2C_WRITE;
+ status = write_i2c(priv, flags);
}
- if (status || (dev->result)) {
+ if (status || priv->result) {
u32 i2c_sr;
u32 cause;
- i2c_sr = readl(dev->virtbase + I2C_SR);
- /*
- * Check if the controller I2C operation status
- * is set to ABORT(11b).
- */
- if (((i2c_sr >> 2) & 0x3) == 0x3) {
- /* get the abort cause */
- cause = (i2c_sr >> 4) & 0x7;
- dev_err(&dev->adev->dev, "%s\n",
+ i2c_sr = readl(priv->virtbase + I2C_SR);
+ if (FIELD_GET(I2C_SR_STATUS, i2c_sr) == I2C_ABORT) {
+ cause = FIELD_GET(I2C_SR_CAUSE, i2c_sr);
+ dev_err(&priv->adev->dev, "%s\n",
cause >= ARRAY_SIZE(abort_causes) ?
"unknown reason" :
abort_causes[cause]);
}
- (void) init_hw(dev);
+ init_hw(priv);
- status = status ? status : dev->result;
+ status = status ? status : priv->result;
}
return status;
@@ -663,24 +723,24 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
{
int status = 0;
int i;
- struct nmk_i2c_dev *dev = i2c_get_adapdata(i2c_adap);
+ struct nmk_i2c_dev *priv = i2c_get_adapdata(i2c_adap);
int j;
- pm_runtime_get_sync(&dev->adev->dev);
+ pm_runtime_get_sync(&priv->adev->dev);
/* Attempt three times to send the message queue */
for (j = 0; j < 3; j++) {
/* setup the i2c controller */
- setup_i2c_controller(dev);
+ setup_i2c_controller(priv);
for (i = 0; i < num_msgs; i++) {
- dev->cli.slave_adr = msgs[i].addr;
- dev->cli.buffer = msgs[i].buf;
- dev->cli.count = msgs[i].len;
- dev->stop = (i < (num_msgs - 1)) ? 0 : 1;
- dev->result = 0;
+ priv->cli.slave_adr = msgs[i].addr;
+ priv->cli.buffer = msgs[i].buf;
+ priv->cli.count = msgs[i].len;
+ priv->stop = (i < (num_msgs - 1)) ? 0 : 1;
+ priv->result = 0;
- status = nmk_i2c_xfer_one(dev, msgs[i].flags);
+ status = nmk_i2c_xfer_one(priv, msgs[i].flags);
if (status != 0)
break;
}
@@ -688,7 +748,7 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
break;
}
- pm_runtime_put_sync(&dev->adev->dev);
+ pm_runtime_put_sync(&priv->adev->dev);
/* return the no. messages processed */
if (status)
@@ -699,14 +759,14 @@ static int nmk_i2c_xfer(struct i2c_adapter *i2c_adap,
/**
* disable_interrupts() - disable the interrupts
- * @dev: private data of controller
+ * @priv: private data of controller
* @irq: interrupt number
*/
-static int disable_interrupts(struct nmk_i2c_dev *dev, u32 irq)
+static int disable_interrupts(struct nmk_i2c_dev *priv, u32 irq)
{
- irq = IRQ_MASK(irq);
- writel(readl(dev->virtbase + I2C_IMSCR) & ~(I2C_CLEAR_ALL_INTS & irq),
- dev->virtbase + I2C_IMSCR);
+ irq &= I2C_CLEAR_ALL_INTS;
+ writel(readl(priv->virtbase + I2C_IMSCR) & ~irq,
+ priv->virtbase + I2C_IMSCR);
return 0;
}
@@ -723,38 +783,39 @@ static int disable_interrupts(struct nmk_i2c_dev *dev, u32 irq)
*/
static irqreturn_t i2c_irq_handler(int irq, void *arg)
{
- struct nmk_i2c_dev *dev = arg;
+ struct nmk_i2c_dev *priv = arg;
+ struct device *dev = &priv->adev->dev;
u32 tft, rft;
u32 count;
u32 misr, src;
/* load Tx FIFO and Rx FIFO threshold values */
- tft = readl(dev->virtbase + I2C_TFTR);
- rft = readl(dev->virtbase + I2C_RFTR);
+ tft = readl(priv->virtbase + I2C_TFTR);
+ rft = readl(priv->virtbase + I2C_RFTR);
/* read interrupt status register */
- misr = readl(dev->virtbase + I2C_MISR);
+ misr = readl(priv->virtbase + I2C_MISR);
src = __ffs(misr);
- switch ((1 << src)) {
+ switch (BIT(src)) {
/* Transmit FIFO nearly empty interrupt */
case I2C_IT_TXFNE:
{
- if (dev->cli.operation == I2C_READ) {
+ if (priv->cli.operation == I2C_READ) {
/*
* in read operation why do we care for writing?
* so disable the Transmit FIFO interrupt
*/
- disable_interrupts(dev, I2C_IT_TXFNE);
+ disable_interrupts(priv, I2C_IT_TXFNE);
} else {
- fill_tx_fifo(dev, (MAX_I2C_FIFO_THRESHOLD - tft));
+ fill_tx_fifo(priv, (MAX_I2C_FIFO_THRESHOLD - tft));
/*
* if done, close the transfer by disabling the
* corresponding TXFNE interrupt
*/
- if (dev->cli.count == 0)
- disable_interrupts(dev, I2C_IT_TXFNE);
+ if (priv->cli.count == 0)
+ disable_interrupts(priv, I2C_IT_TXFNE);
}
}
break;
@@ -768,60 +829,63 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
case I2C_IT_RXFNF:
for (count = rft; count > 0; count--) {
/* Read the Rx FIFO */
- *dev->cli.buffer = readb(dev->virtbase + I2C_RFR);
- dev->cli.buffer++;
+ *priv->cli.buffer = nmk_i2c_readb(priv, I2C_RFR);
+ priv->cli.buffer++;
}
- dev->cli.count -= rft;
- dev->cli.xfer_bytes += rft;
+ priv->cli.count -= rft;
+ priv->cli.xfer_bytes += rft;
break;
/* Rx FIFO full */
case I2C_IT_RXFF:
for (count = MAX_I2C_FIFO_THRESHOLD; count > 0; count--) {
- *dev->cli.buffer = readb(dev->virtbase + I2C_RFR);
- dev->cli.buffer++;
+ *priv->cli.buffer = nmk_i2c_readb(priv, I2C_RFR);
+ priv->cli.buffer++;
}
- dev->cli.count -= MAX_I2C_FIFO_THRESHOLD;
- dev->cli.xfer_bytes += MAX_I2C_FIFO_THRESHOLD;
+ priv->cli.count -= MAX_I2C_FIFO_THRESHOLD;
+ priv->cli.xfer_bytes += MAX_I2C_FIFO_THRESHOLD;
break;
/* Master Transaction Done with/without stop */
case I2C_IT_MTD:
case I2C_IT_MTDWS:
- if (dev->cli.operation == I2C_READ) {
- while (!(readl(dev->virtbase + I2C_RISR)
+ if (priv->cli.operation == I2C_READ) {
+ while (!(readl(priv->virtbase + I2C_RISR)
& I2C_IT_RXFE)) {
- if (dev->cli.count == 0)
+ if (priv->cli.count == 0)
break;
- *dev->cli.buffer =
- readb(dev->virtbase + I2C_RFR);
- dev->cli.buffer++;
- dev->cli.count--;
- dev->cli.xfer_bytes++;
+ *priv->cli.buffer =
+ nmk_i2c_readb(priv, I2C_RFR);
+ priv->cli.buffer++;
+ priv->cli.count--;
+ priv->cli.xfer_bytes++;
}
}
- disable_all_interrupts(dev);
- clear_all_interrupts(dev);
+ disable_all_interrupts(priv);
+ clear_all_interrupts(priv);
- if (dev->cli.count) {
- dev->result = -EIO;
- dev_err(&dev->adev->dev,
- "%lu bytes still remain to be xfered\n",
- dev->cli.count);
- (void) init_hw(dev);
+ if (priv->cli.count) {
+ priv->result = -EIO;
+ dev_err(dev, "%lu bytes still remain to be xfered\n",
+ priv->cli.count);
+ init_hw(priv);
}
- complete(&dev->xfer_complete);
+ priv->xfer_done = true;
+ wake_up(&priv->xfer_wq);
+
break;
/* Master Arbitration lost interrupt */
case I2C_IT_MAL:
- dev->result = -EIO;
- (void) init_hw(dev);
+ priv->result = -EIO;
+ init_hw(priv);
+
+ i2c_set_bit(priv->virtbase + I2C_ICR, I2C_IT_MAL);
+ priv->xfer_done = true;
+ wake_up(&priv->xfer_wq);
- i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_MAL);
- complete(&dev->xfer_complete);
break;
@@ -831,15 +895,20 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
* during the transaction.
*/
case I2C_IT_BERR:
- dev->result = -EIO;
- /* get the status */
- if (((readl(dev->virtbase + I2C_SR) >> 2) & 0x3) == I2C_ABORT)
- (void) init_hw(dev);
+ {
+ u32 sr;
- i2c_set_bit(dev->virtbase + I2C_ICR, I2C_IT_BERR);
- complete(&dev->xfer_complete);
+ sr = readl(priv->virtbase + I2C_SR);
+ priv->result = -EIO;
+ if (FIELD_GET(I2C_SR_STATUS, sr) == I2C_ABORT)
+ init_hw(priv);
- break;
+ i2c_set_bit(priv->virtbase + I2C_ICR, I2C_IT_BERR);
+ priv->xfer_done = true;
+ wake_up(&priv->xfer_wq);
+
+ }
+ break;
/*
* Tx FIFO overrun interrupt.
@@ -847,11 +916,13 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
* the Tx FIFO is full.
*/
case I2C_IT_TXFOVR:
- dev->result = -EIO;
- (void) init_hw(dev);
+ priv->result = -EIO;
+ init_hw(priv);
+
+ dev_err(dev, "Tx Fifo Over run\n");
+ priv->xfer_done = true;
+ wake_up(&priv->xfer_wq);
- dev_err(&dev->adev->dev, "Tx Fifo Over run\n");
- complete(&dev->xfer_complete);
break;
@@ -863,10 +934,10 @@ static irqreturn_t i2c_irq_handler(int irq, void *arg)
case I2C_IT_RFSE:
case I2C_IT_WTSR:
case I2C_IT_STD:
- dev_err(&dev->adev->dev, "unhandled Interrupt\n");
+ dev_err(dev, "unhandled Interrupt\n");
break;
default:
- dev_err(&dev->adev->dev, "spurious Interrupt..\n");
+ dev_err(dev, "spurious Interrupt..\n");
break;
}
@@ -893,9 +964,9 @@ static int nmk_i2c_resume_early(struct device *dev)
static int nmk_i2c_runtime_suspend(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
- struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
+ struct nmk_i2c_dev *priv = amba_get_drvdata(adev);
- clk_disable_unprepare(nmk_i2c->clk);
+ clk_disable_unprepare(priv->clk);
pinctrl_pm_select_idle_state(dev);
return 0;
}
@@ -903,10 +974,10 @@ static int nmk_i2c_runtime_suspend(struct device *dev)
static int nmk_i2c_runtime_resume(struct device *dev)
{
struct amba_device *adev = to_amba_device(dev);
- struct nmk_i2c_dev *nmk_i2c = amba_get_drvdata(adev);
+ struct nmk_i2c_dev *priv = amba_get_drvdata(adev);
int ret;
- ret = clk_prepare_enable(nmk_i2c->clk);
+ ret = clk_prepare_enable(priv->clk);
if (ret) {
dev_err(dev, "can't prepare_enable clock\n");
return ret;
@@ -914,9 +985,9 @@ static int nmk_i2c_runtime_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- ret = init_hw(nmk_i2c);
+ ret = init_hw(priv);
if (ret) {
- clk_disable_unprepare(nmk_i2c->clk);
+ clk_disable_unprepare(priv->clk);
pinctrl_pm_select_idle_state(dev);
}
@@ -939,107 +1010,160 @@ static const struct i2c_algorithm nmk_i2c_algo = {
};
static void nmk_i2c_of_probe(struct device_node *np,
- struct nmk_i2c_dev *nmk)
+ struct nmk_i2c_dev *priv)
{
+ u32 timeout_usecs;
+
/* Default to 100 kHz if no frequency is given in the node */
- if (of_property_read_u32(np, "clock-frequency", &nmk->clk_freq))
- nmk->clk_freq = I2C_MAX_STANDARD_MODE_FREQ;
+ if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq))
+ priv->clk_freq = I2C_MAX_STANDARD_MODE_FREQ;
/* This driver only supports 'standard' and 'fast' modes of operation. */
- if (nmk->clk_freq <= I2C_MAX_STANDARD_MODE_FREQ)
- nmk->sm = I2C_FREQ_MODE_STANDARD;
+ if (priv->clk_freq <= I2C_MAX_STANDARD_MODE_FREQ)
+ priv->sm = I2C_FREQ_MODE_STANDARD;
+ else
+ priv->sm = I2C_FREQ_MODE_FAST;
+ priv->tft = 1; /* Tx FIFO threshold */
+ priv->rft = 8; /* Rx FIFO threshold */
+
+ /* Slave response timeout */
+ if (!of_property_read_u32(np, "i2c-transfer-timeout-us", &timeout_usecs))
+ priv->timeout_usecs = timeout_usecs;
+ else
+ priv->timeout_usecs = 200 * USEC_PER_MSEC;
+}
+
+static const unsigned int nmk_i2c_eyeq5_masks[] = {
+ GENMASK(5, 4),
+ GENMASK(7, 6),
+ GENMASK(9, 8),
+ GENMASK(11, 10),
+ GENMASK(13, 12),
+};
+
+static int nmk_i2c_eyeq5_probe(struct nmk_i2c_dev *priv)
+{
+ struct device *dev = &priv->adev->dev;
+ struct device_node *np = dev->of_node;
+ unsigned int mask, speed_mode;
+ struct regmap *olb;
+ unsigned int id;
+
+ priv->has_32b_bus = true;
+
+ olb = syscon_regmap_lookup_by_phandle_args(np, "mobileye,olb", 1, &id);
+ if (IS_ERR(olb))
+ return PTR_ERR(olb);
+ if (id >= ARRAY_SIZE(nmk_i2c_eyeq5_masks))
+ return -ENOENT;
+
+ if (priv->clk_freq <= 400000)
+ speed_mode = I2C_EYEQ5_SPEED_FAST;
+ else if (priv->clk_freq <= 1000000)
+ speed_mode = I2C_EYEQ5_SPEED_FAST_PLUS;
else
- nmk->sm = I2C_FREQ_MODE_FAST;
- nmk->tft = 1; /* Tx FIFO threshold */
- nmk->rft = 8; /* Rx FIFO threshold */
- nmk->timeout = 200; /* Slave response timeout(ms) */
+ speed_mode = I2C_EYEQ5_SPEED_HIGH_SPEED;
+
+ mask = nmk_i2c_eyeq5_masks[id];
+ regmap_update_bits(olb, NMK_I2C_EYEQ5_OLB_IOCR2,
+ mask, speed_mode << __fls(mask));
+
+ return 0;
}
static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
+ struct nmk_i2c_dev *priv;
struct device_node *np = adev->dev.of_node;
- struct nmk_i2c_dev *dev;
+ struct device *dev = &adev->dev;
struct i2c_adapter *adap;
struct i2c_vendor_data *vendor = id->data;
u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1;
- dev = devm_kzalloc(&adev->dev, sizeof(*dev), GFP_KERNEL);
- if (!dev)
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- dev->vendor = vendor;
- dev->adev = adev;
- nmk_i2c_of_probe(np, dev);
+ priv->vendor = vendor;
+ priv->adev = adev;
+ priv->has_32b_bus = false;
+ nmk_i2c_of_probe(np, priv);
+
+ if (of_device_is_compatible(np, "mobileye,eyeq5-i2c")) {
+ ret = nmk_i2c_eyeq5_probe(priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed OLB lookup\n");
+ }
- if (dev->tft > max_fifo_threshold) {
- dev_warn(&adev->dev, "requested TX FIFO threshold %u, adjusted down to %u\n",
- dev->tft, max_fifo_threshold);
- dev->tft = max_fifo_threshold;
+ if (priv->tft > max_fifo_threshold) {
+ dev_warn(dev, "requested TX FIFO threshold %u, adjusted down to %u\n",
+ priv->tft, max_fifo_threshold);
+ priv->tft = max_fifo_threshold;
}
- if (dev->rft > max_fifo_threshold) {
- dev_warn(&adev->dev, "requested RX FIFO threshold %u, adjusted down to %u\n",
- dev->rft, max_fifo_threshold);
- dev->rft = max_fifo_threshold;
+ if (priv->rft > max_fifo_threshold) {
+ dev_warn(dev, "requested RX FIFO threshold %u, adjusted down to %u\n",
+ priv->rft, max_fifo_threshold);
+ priv->rft = max_fifo_threshold;
}
- amba_set_drvdata(adev, dev);
+ amba_set_drvdata(adev, priv);
- dev->virtbase = devm_ioremap(&adev->dev, adev->res.start,
- resource_size(&adev->res));
- if (!dev->virtbase)
+ priv->virtbase = devm_ioremap(dev, adev->res.start,
+ resource_size(&adev->res));
+ if (!priv->virtbase)
return -ENOMEM;
- dev->irq = adev->irq[0];
- ret = devm_request_irq(&adev->dev, dev->irq, i2c_irq_handler, 0,
- DRIVER_NAME, dev);
+ priv->irq = adev->irq[0];
+ ret = devm_request_irq(dev, priv->irq, i2c_irq_handler, 0,
+ DRIVER_NAME, priv);
if (ret)
- return dev_err_probe(&adev->dev, ret,
- "cannot claim the irq %d\n", dev->irq);
+ return dev_err_probe(dev, ret,
+ "cannot claim the irq %d\n", priv->irq);
- dev->clk = devm_clk_get_enabled(&adev->dev, NULL);
- if (IS_ERR(dev->clk))
- return dev_err_probe(&adev->dev, PTR_ERR(dev->clk),
+ priv->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
"could enable i2c clock\n");
- init_hw(dev);
+ init_hw(priv);
- adap = &dev->adap;
+ adap = &priv->adap;
adap->dev.of_node = np;
- adap->dev.parent = &adev->dev;
+ adap->dev.parent = dev;
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_DEPRECATED;
adap->algo = &nmk_i2c_algo;
- adap->timeout = msecs_to_jiffies(dev->timeout);
+ adap->timeout = usecs_to_jiffies(priv->timeout_usecs);
snprintf(adap->name, sizeof(adap->name),
"Nomadik I2C at %pR", &adev->res);
- i2c_set_adapdata(adap, dev);
+ i2c_set_adapdata(adap, priv);
- dev_info(&adev->dev,
+ dev_info(dev,
"initialize %s on virtual base %p\n",
- adap->name, dev->virtbase);
+ adap->name, priv->virtbase);
ret = i2c_add_adapter(adap);
if (ret)
return ret;
- pm_runtime_put(&adev->dev);
+ pm_runtime_put(dev);
return 0;
}
static void nmk_i2c_remove(struct amba_device *adev)
{
- struct nmk_i2c_dev *dev = amba_get_drvdata(adev);
+ struct nmk_i2c_dev *priv = amba_get_drvdata(adev);
- i2c_del_adapter(&dev->adap);
- flush_i2c_fifo(dev);
- disable_all_interrupts(dev);
- clear_all_interrupts(dev);
+ i2c_del_adapter(&priv->adap);
+ flush_i2c_fifo(priv);
+ disable_all_interrupts(priv);
+ clear_all_interrupts(priv);
/* disable the controller */
- i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE);
+ i2c_clr_bit(priv->virtbase + I2C_CR, I2C_CR_PE);
}
static struct i2c_vendor_data vendor_stn8815 = {
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index f5dfc33b97c0..c3f4ff08ac38 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -49,6 +49,7 @@
#include <linux/pm.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <dt-bindings/mux/mux.h>
@@ -116,6 +117,9 @@ struct pca954x {
unsigned int irq_mask;
raw_spinlock_t lock;
struct regulator *supply;
+
+ struct gpio_desc *reset_gpio;
+ struct reset_control *reset_cont;
};
/* Provide specs for the MAX735x, PCA954x and PCA984x types we know about */
@@ -518,6 +522,35 @@ static int pca954x_init(struct i2c_client *client, struct pca954x *data)
return ret;
}
+static int pca954x_get_reset(struct device *dev, struct pca954x *data)
+{
+ data->reset_cont = devm_reset_control_get_optional_shared(dev, NULL);
+ if (IS_ERR(data->reset_cont))
+ return dev_err_probe(dev, PTR_ERR(data->reset_cont),
+ "Failed to get reset\n");
+ else if (data->reset_cont)
+ return 0;
+
+ /*
+ * fallback to legacy reset-gpios
+ */
+ data->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(data->reset_gpio)) {
+ return dev_err_probe(dev, PTR_ERR(data->reset_gpio),
+ "Failed to get reset gpio");
+ }
+
+ return 0;
+}
+
+static void pca954x_reset_deassert(struct pca954x *data)
+{
+ if (data->reset_cont)
+ reset_control_deassert(data->reset_cont);
+ else
+ gpiod_set_value_cansleep(data->reset_gpio, 0);
+}
+
/*
* I2C init/probing/exit functions
*/
@@ -526,7 +559,6 @@ static int pca954x_probe(struct i2c_client *client)
const struct i2c_device_id *id = i2c_client_get_device_id(client);
struct i2c_adapter *adap = client->adapter;
struct device *dev = &client->dev;
- struct gpio_desc *gpio;
struct i2c_mux_core *muxc;
struct pca954x *data;
int num;
@@ -554,15 +586,13 @@ static int pca954x_probe(struct i2c_client *client)
return dev_err_probe(dev, ret,
"Failed to enable vdd supply\n");
- /* Reset the mux if a reset GPIO is specified. */
- gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(gpio)) {
- ret = PTR_ERR(gpio);
+ ret = pca954x_get_reset(dev, data);
+ if (ret)
goto fail_cleanup;
- }
- if (gpio) {
+
+ if (data->reset_cont || data->reset_gpio) {
udelay(1);
- gpiod_set_value_cansleep(gpio, 0);
+ pca954x_reset_deassert(data);
/* Give the chip some time to recover. */
udelay(1);
}
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 52eb46ef84c1..9c351ffc7bed 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -71,6 +71,15 @@ config IIO_TRIGGERED_EVENT
help
Provides helper functions for setting up triggered events.
+config IIO_BACKEND
+ tristate
+ help
+ Framework to handle complex IIO aggregate devices. The typical
+ architecture that can make use of this framework is to have one
+ device as the frontend device which can be "linked" against one or
+ multiple backend devices. The framework then makes it easy to get
+ and control such backend devices.
+
source "drivers/iio/accel/Kconfig"
source "drivers/iio/adc/Kconfig"
source "drivers/iio/addac/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 9622347a1c1b..0ba0e1521ba4 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_IIO_GTS_HELPER) += industrialio-gts-helper.o
obj-$(CONFIG_IIO_SW_DEVICE) += industrialio-sw-device.o
obj-$(CONFIG_IIO_SW_TRIGGER) += industrialio-sw-trigger.o
obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o
+obj-$(CONFIG_IIO_BACKEND) += industrialio-backend.o
obj-y += accel/
obj-y += adc/
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index c9d7afe489e8..c2da5066e9a7 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -256,11 +256,11 @@ config BMC150_ACCEL_SPI
config BMI088_ACCEL
tristate "Bosch BMI088 Accelerometer Driver"
- depends on SPI
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
select REGMAP
- select BMI088_ACCEL_SPI
+ select BMI088_ACCEL_SPI if SPI
+ select BMI088_ACCEL_I2C if I2C
help
Say yes here to build support for the following Bosch accelerometers:
BMI088, BMI085, BMI090L. Note that all of these are combo module that
@@ -269,6 +269,10 @@ config BMI088_ACCEL
This driver only implements the accelerometer part, which has its own
address and register map. BMG160 provides the gyroscope driver.
+config BMI088_ACCEL_I2C
+ tristate
+ select REGMAP_I2C
+
config BMI088_ACCEL_SPI
tristate
select REGMAP_SPI
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 311ead9c3ef1..db90532ba24a 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_BMC150_ACCEL) += bmc150-accel-core.o
obj-$(CONFIG_BMC150_ACCEL_I2C) += bmc150-accel-i2c.o
obj-$(CONFIG_BMC150_ACCEL_SPI) += bmc150-accel-spi.o
obj-$(CONFIG_BMI088_ACCEL) += bmi088-accel-core.o
+obj-$(CONFIG_BMI088_ACCEL_I2C) += bmi088-accel-i2c.o
obj-$(CONFIG_BMI088_ACCEL_SPI) += bmi088-accel-spi.o
obj-$(CONFIG_DA280) += da280.o
obj-$(CONFIG_DA311) += da311.o
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index 484fe2e9fb17..210228affb80 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -339,22 +339,17 @@ static int adxl367_set_act_threshold(struct adxl367_state *st,
{
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = adxl367_set_measure_en(st, false);
if (ret)
- goto out;
+ return ret;
ret = _adxl367_set_act_threshold(st, act, threshold);
if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
-
-out:
- mutex_unlock(&st->lock);
+ return ret;
- return ret;
+ return adxl367_set_measure_en(st, true);
}
static int adxl367_set_act_proc_mode(struct adxl367_state *st,
@@ -482,51 +477,45 @@ static int adxl367_set_fifo_watermark(struct adxl367_state *st,
static int adxl367_set_range(struct iio_dev *indio_dev,
enum adxl367_range range)
{
- struct adxl367_state *st = iio_priv(indio_dev);
- int ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ struct adxl367_state *st = iio_priv(indio_dev);
+ int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- mutex_lock(&st->lock);
-
- ret = adxl367_set_measure_en(st, false);
- if (ret)
- goto out;
+ guard(mutex)(&st->lock);
- ret = regmap_update_bits(st->regmap, ADXL367_REG_FILTER_CTL,
- ADXL367_FILTER_CTL_RANGE_MASK,
- FIELD_PREP(ADXL367_FILTER_CTL_RANGE_MASK,
- range));
- if (ret)
- goto out;
+ ret = adxl367_set_measure_en(st, false);
+ if (ret)
+ return ret;
- adxl367_scale_act_thresholds(st, st->range, range);
+ ret = regmap_update_bits(st->regmap, ADXL367_REG_FILTER_CTL,
+ ADXL367_FILTER_CTL_RANGE_MASK,
+ FIELD_PREP(ADXL367_FILTER_CTL_RANGE_MASK,
+ range));
+ if (ret)
+ return ret;
- /* Activity thresholds depend on range */
- ret = _adxl367_set_act_threshold(st, ADXL367_ACTIVITY,
- st->act_threshold);
- if (ret)
- goto out;
+ adxl367_scale_act_thresholds(st, st->range, range);
- ret = _adxl367_set_act_threshold(st, ADXL367_INACTIVITY,
- st->inact_threshold);
- if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
- if (ret)
- goto out;
+ /* Activity thresholds depend on range */
+ ret = _adxl367_set_act_threshold(st, ADXL367_ACTIVITY,
+ st->act_threshold);
+ if (ret)
+ return ret;
- st->range = range;
+ ret = _adxl367_set_act_threshold(st, ADXL367_INACTIVITY,
+ st->inact_threshold);
+ if (ret)
+ return ret;
-out:
- mutex_unlock(&st->lock);
+ ret = adxl367_set_measure_en(st, true);
+ if (ret)
+ return ret;
- iio_device_release_direct_mode(indio_dev);
+ st->range = range;
- return ret;
+ return 0;
+ }
+ unreachable();
}
static int adxl367_time_ms_to_samples(struct adxl367_state *st, unsigned int ms)
@@ -587,11 +576,11 @@ static int adxl367_set_act_time_ms(struct adxl367_state *st,
{
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = adxl367_set_measure_en(st, false);
if (ret)
- goto out;
+ return ret;
if (act == ADXL367_ACTIVITY)
ret = _adxl367_set_act_time_ms(st, ms);
@@ -599,14 +588,9 @@ static int adxl367_set_act_time_ms(struct adxl367_state *st,
ret = _adxl367_set_inact_time_ms(st, ms);
if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
-
-out:
- mutex_unlock(&st->lock);
+ return ret;
- return ret;
+ return adxl367_set_measure_en(st, true);
}
static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr)
@@ -636,31 +620,23 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr)
static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr)
{
- struct adxl367_state *st = iio_priv(indio_dev);
- int ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ struct adxl367_state *st = iio_priv(indio_dev);;
+ int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ guard(mutex)(&st->lock);
- mutex_lock(&st->lock);
-
- ret = adxl367_set_measure_en(st, false);
- if (ret)
- goto out;
-
- ret = _adxl367_set_odr(st, odr);
- if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
-
-out:
- mutex_unlock(&st->lock);
+ ret = adxl367_set_measure_en(st, false);
+ if (ret)
+ return ret;
- iio_device_release_direct_mode(indio_dev);
+ ret = _adxl367_set_odr(st, odr);
+ if (ret)
+ return ret;
- return ret;
+ return adxl367_set_measure_en(st, true);
+ }
+ unreachable();
}
static int adxl367_set_temp_adc_en(struct adxl367_state *st, unsigned int reg,
@@ -749,36 +725,32 @@ static int adxl367_read_sample(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val)
{
- struct adxl367_state *st = iio_priv(indio_dev);
- u16 sample;
- int ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ struct adxl367_state *st = iio_priv(indio_dev);
+ u16 sample;
+ int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ guard(mutex)(&st->lock);
- mutex_lock(&st->lock);
-
- ret = adxl367_set_temp_adc_reg_en(st, chan->address, true);
- if (ret)
- goto out;
-
- ret = regmap_bulk_read(st->regmap, chan->address, &st->sample_buf,
- sizeof(st->sample_buf));
- if (ret)
- goto out;
-
- sample = FIELD_GET(ADXL367_DATA_MASK, be16_to_cpu(st->sample_buf));
- *val = sign_extend32(sample, chan->scan_type.realbits - 1);
+ ret = adxl367_set_temp_adc_reg_en(st, chan->address, true);
+ if (ret)
+ return ret;
- ret = adxl367_set_temp_adc_reg_en(st, chan->address, false);
+ ret = regmap_bulk_read(st->regmap, chan->address, &st->sample_buf,
+ sizeof(st->sample_buf));
+ if (ret)
+ return ret;
-out:
- mutex_unlock(&st->lock);
+ sample = FIELD_GET(ADXL367_DATA_MASK, be16_to_cpu(st->sample_buf));
+ *val = sign_extend32(sample, chan->scan_type.realbits - 1);
- iio_device_release_direct_mode(indio_dev);
+ ret = adxl367_set_temp_adc_reg_en(st, chan->address, false);
+ if (ret)
+ return ret;
- return ret ?: IIO_VAL_INT;
+ return IIO_VAL_INT;
+ }
+ unreachable();
}
static int adxl367_get_status(struct adxl367_state *st, u8 *status,
@@ -886,12 +858,12 @@ static int adxl367_read_raw(struct iio_dev *indio_dev,
return adxl367_read_sample(indio_dev, chan, val);
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
- case IIO_ACCEL:
- mutex_lock(&st->lock);
+ case IIO_ACCEL: {
+ guard(mutex)(&st->lock);
*val = adxl367_range_scale_tbl[st->range][0];
*val2 = adxl367_range_scale_tbl[st->range][1];
- mutex_unlock(&st->lock);
return IIO_VAL_INT_PLUS_NANO;
+ }
case IIO_TEMP:
*val = 1000;
*val2 = ADXL367_TEMP_PER_C;
@@ -914,12 +886,12 @@ static int adxl367_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- case IIO_CHAN_INFO_SAMP_FREQ:
- mutex_lock(&st->lock);
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ guard(mutex)(&st->lock);
*val = adxl367_samp_freq_tbl[st->odr][0];
*val2 = adxl367_samp_freq_tbl[st->odr][1];
- mutex_unlock(&st->lock);
return IIO_VAL_INT_PLUS_MICRO;
+ }
default:
return -EINVAL;
}
@@ -1004,18 +976,15 @@ static int adxl367_read_event_value(struct iio_dev *indio_dev,
{
struct adxl367_state *st = iio_priv(indio_dev);
+ guard(mutex)(&st->lock);
switch (info) {
case IIO_EV_INFO_VALUE: {
switch (dir) {
case IIO_EV_DIR_RISING:
- mutex_lock(&st->lock);
*val = st->act_threshold;
- mutex_unlock(&st->lock);
return IIO_VAL_INT;
case IIO_EV_DIR_FALLING:
- mutex_lock(&st->lock);
*val = st->inact_threshold;
- mutex_unlock(&st->lock);
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -1024,15 +993,11 @@ static int adxl367_read_event_value(struct iio_dev *indio_dev,
case IIO_EV_INFO_PERIOD:
switch (dir) {
case IIO_EV_DIR_RISING:
- mutex_lock(&st->lock);
*val = st->act_time_ms;
- mutex_unlock(&st->lock);
*val2 = 1000;
return IIO_VAL_FRACTIONAL;
case IIO_EV_DIR_FALLING:
- mutex_lock(&st->lock);
*val = st->inact_time_ms;
- mutex_unlock(&st->lock);
*val2 = 1000;
return IIO_VAL_FRACTIONAL;
default:
@@ -1110,9 +1075,7 @@ static int adxl367_write_event_config(struct iio_dev *indio_dev,
enum iio_event_direction dir,
int state)
{
- struct adxl367_state *st = iio_priv(indio_dev);
enum adxl367_activity_type act;
- int ret;
switch (dir) {
case IIO_EV_DIR_RISING:
@@ -1125,33 +1088,28 @@ static int adxl367_write_event_config(struct iio_dev *indio_dev,
return -EINVAL;
}
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- mutex_lock(&st->lock);
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ struct adxl367_state *st = iio_priv(indio_dev);
+ int ret;
- ret = adxl367_set_measure_en(st, false);
- if (ret)
- goto out;
+ guard(mutex)(&st->lock);
- ret = adxl367_set_act_interrupt_en(st, act, state);
- if (ret)
- goto out;
-
- ret = adxl367_set_act_en(st, act, state ? ADCL367_ACT_REF_ENABLED
- : ADXL367_ACT_DISABLED);
- if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
+ ret = adxl367_set_measure_en(st, false);
+ if (ret)
+ return ret;
-out:
- mutex_unlock(&st->lock);
+ ret = adxl367_set_act_interrupt_en(st, act, state);
+ if (ret)
+ return ret;
- iio_device_release_direct_mode(indio_dev);
+ ret = adxl367_set_act_en(st, act, state ? ADCL367_ACT_REF_ENABLED
+ : ADXL367_ACT_DISABLED);
+ if (ret)
+ return ret;
- return ret;
+ return adxl367_set_measure_en(st, true);
+ }
+ unreachable();
}
static ssize_t adxl367_get_fifo_enabled(struct device *dev,
@@ -1176,9 +1134,8 @@ static ssize_t adxl367_get_fifo_watermark(struct device *dev,
struct adxl367_state *st = iio_priv(dev_to_iio_dev(dev));
unsigned int fifo_watermark;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
fifo_watermark = st->fifo_watermark;
- mutex_unlock(&st->lock);
return sysfs_emit(buf, "%d\n", fifo_watermark);
}
@@ -1207,22 +1164,17 @@ static int adxl367_set_watermark(struct iio_dev *indio_dev, unsigned int val)
if (val > ADXL367_FIFO_MAX_WATERMARK)
return -EINVAL;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = adxl367_set_measure_en(st, false);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_fifo_watermark(st, val);
if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
-
-out:
- mutex_unlock(&st->lock);
+ return ret;
- return ret;
+ return adxl367_set_measure_en(st, true);
}
static bool adxl367_find_mask_fifo_format(const unsigned long *scan_mask,
@@ -1253,27 +1205,24 @@ static int adxl367_update_scan_mode(struct iio_dev *indio_dev,
if (!adxl367_find_mask_fifo_format(active_scan_mask, &fifo_format))
return -EINVAL;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = adxl367_set_measure_en(st, false);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_fifo_format(st, fifo_format);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_measure_en(st, true);
if (ret)
- goto out;
+ return ret;
st->fifo_set_size = bitmap_weight(active_scan_mask,
indio_dev->masklength);
-out:
- mutex_unlock(&st->lock);
-
- return ret;
+ return 0;
}
static int adxl367_buffer_postenable(struct iio_dev *indio_dev)
@@ -1281,31 +1230,26 @@ static int adxl367_buffer_postenable(struct iio_dev *indio_dev)
struct adxl367_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = adxl367_set_temp_adc_mask_en(st, indio_dev->active_scan_mask,
true);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_measure_en(st, false);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_fifo_watermark_interrupt_en(st, true);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_fifo_mode(st, ADXL367_FIFO_MODE_STREAM);
if (ret)
- goto out;
-
- ret = adxl367_set_measure_en(st, true);
-
-out:
- mutex_unlock(&st->lock);
+ return ret;
- return ret;
+ return adxl367_set_measure_en(st, true);
}
static int adxl367_buffer_predisable(struct iio_dev *indio_dev)
@@ -1313,31 +1257,26 @@ static int adxl367_buffer_predisable(struct iio_dev *indio_dev)
struct adxl367_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = adxl367_set_measure_en(st, false);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_fifo_mode(st, ADXL367_FIFO_MODE_DISABLED);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_fifo_watermark_interrupt_en(st, false);
if (ret)
- goto out;
+ return ret;
ret = adxl367_set_measure_en(st, true);
if (ret)
- goto out;
-
- ret = adxl367_set_temp_adc_mask_en(st, indio_dev->active_scan_mask,
- false);
-
-out:
- mutex_unlock(&st->lock);
+ return ret;
- return ret;
+ return adxl367_set_temp_adc_mask_en(st, indio_dev->active_scan_mask,
+ false);
}
static const struct iio_buffer_setup_ops adxl367_buffer_ops = {
diff --git a/drivers/iio/accel/adxl372_spi.c b/drivers/iio/accel/adxl372_spi.c
index 75a88f16c6c9..787699773f96 100644
--- a/drivers/iio/accel/adxl372_spi.c
+++ b/drivers/iio/accel/adxl372_spi.c
@@ -6,8 +6,8 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
-#include <linux/of.h>
#include <linux/spi/spi.h>
#include "adxl372.h"
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index ab4fccb24b6c..6581772cb0c4 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -13,10 +13,10 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/of.h>
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
index ee1ba134ad42..1c2e40369839 100644
--- a/drivers/iio/accel/bmc150-accel-i2c.c
+++ b/drivers/iio/accel/bmc150-accel-i2c.c
@@ -224,6 +224,19 @@ static const struct acpi_device_id bmc150_accel_acpi_match[] = {
{"BMA250E"},
{"BMC150A"},
{"BMI055A"},
+ /*
+ * The "BOSC0200" identifier used here is not unique to devices using
+ * bmc150. The same "BOSC0200" identifier is found in the ACPI tables
+ * of the ASUS ROG ALLY and Ayaneo AIR Plus which both use a Bosch
+ * BMI323 chip. This creates a conflict with duplicate ACPI identifiers
+ * which multiple drivers want to use. Fortunately, when the bmc150
+ * driver starts to load on the ASUS ROG ALLY, the chip ID check
+ * portion fails (correctly) because the chip IDs received (via i2c)
+ * are unique between bmc150 and bmi323 and a dmesg output similar to
+ * this: "bmc150_accel_i2c i2c-BOSC0200:00: Invalid chip 0" can be
+ * seen. This allows the bmi323 driver to take over for ASUS ROG ALLY,
+ * and other devices using the bmi323 chip.
+ */
{"BOSC0200"},
{"BSBA0150"},
{"DUAL250E"},
@@ -266,7 +279,7 @@ static struct i2c_driver bmc150_accel_driver = {
.driver = {
.name = "bmc150_accel_i2c",
.of_match_table = bmc150_accel_of_match,
- .acpi_match_table = ACPI_PTR(bmc150_accel_acpi_match),
+ .acpi_match_table = bmc150_accel_acpi_match,
.pm = &bmc150_accel_pm_ops,
},
.probe = bmc150_accel_probe,
diff --git a/drivers/iio/accel/bmc150-accel-spi.c b/drivers/iio/accel/bmc150-accel-spi.c
index 921fb46be0b8..a6b9f599eb7b 100644
--- a/drivers/iio/accel/bmc150-accel-spi.c
+++ b/drivers/iio/accel/bmc150-accel-spi.c
@@ -7,7 +7,6 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/acpi.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
@@ -70,7 +69,7 @@ MODULE_DEVICE_TABLE(spi, bmc150_accel_id);
static struct spi_driver bmc150_accel_driver = {
.driver = {
.name = "bmc150_accel_spi",
- .acpi_match_table = ACPI_PTR(bmc150_accel_acpi_match),
+ .acpi_match_table = bmc150_accel_acpi_match,
.pm = &bmc150_accel_pm_ops,
},
.probe = bmc150_accel_probe,
diff --git a/drivers/iio/accel/bmi088-accel-i2c.c b/drivers/iio/accel/bmi088-accel-i2c.c
new file mode 100644
index 000000000000..17e9156bbe89
--- /dev/null
+++ b/drivers/iio/accel/bmi088-accel-i2c.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * 3-axis accelerometer driver supporting following Bosch-Sensortec chips:
+ * - BMI088
+ * - BMI085
+ * - BMI090L
+ *
+ * Copyright 2023 Jun Yan <jerrysteve1101@gmail.com>
+ */
+
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#include "bmi088-accel.h"
+
+static int bmi088_accel_probe(struct i2c_client *i2c)
+{
+ struct regmap *regmap;
+ const struct i2c_device_id *id = i2c_client_get_device_id(i2c);
+
+ regmap = devm_regmap_init_i2c(i2c, &bmi088_regmap_conf);
+ if (IS_ERR(regmap)) {
+ dev_err(&i2c->dev, "Failed to initialize i2c regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ return bmi088_accel_core_probe(&i2c->dev, regmap, i2c->irq,
+ id->driver_data);
+}
+
+static void bmi088_accel_remove(struct i2c_client *i2c)
+{
+ bmi088_accel_core_remove(&i2c->dev);
+}
+
+static const struct of_device_id bmi088_of_match[] = {
+ { .compatible = "bosch,bmi085-accel" },
+ { .compatible = "bosch,bmi088-accel" },
+ { .compatible = "bosch,bmi090l-accel" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bmi088_of_match);
+
+static const struct i2c_device_id bmi088_accel_id[] = {
+ { "bmi085-accel", BOSCH_BMI085 },
+ { "bmi088-accel", BOSCH_BMI088 },
+ { "bmi090l-accel", BOSCH_BMI090L },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, bmi088_accel_id);
+
+static struct i2c_driver bmi088_accel_driver = {
+ .driver = {
+ .name = "bmi088_accel_i2c",
+ .pm = pm_ptr(&bmi088_accel_pm_ops),
+ .of_match_table = bmi088_of_match,
+ },
+ .probe = bmi088_accel_probe,
+ .remove = bmi088_accel_remove,
+ .id_table = bmi088_accel_id,
+};
+module_i2c_driver(bmi088_accel_driver);
+
+MODULE_AUTHOR("Jun Yan <jerrysteve1101@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("BMI088 accelerometer driver (I2C)");
+MODULE_IMPORT_NS(IIO_BMI088);
diff --git a/drivers/iio/accel/da280.c b/drivers/iio/accel/da280.c
index 572bfe9694b0..992286828844 100644
--- a/drivers/iio/accel/da280.c
+++ b/drivers/iio/accel/da280.c
@@ -23,8 +23,6 @@
#define DA280_MODE_ENABLE 0x1e
#define DA280_MODE_DISABLE 0x9e
-enum da280_chipset { da217, da226, da280 };
-
/*
* a value of + or -4096 corresponds to + or - 1G
* scale = 9.81 / 4096 = 0.002395019
@@ -47,6 +45,11 @@ static const struct iio_chan_spec da280_channels[] = {
DA280_CHANNEL(DA280_REG_ACC_Z_LSB, Z),
};
+struct da280_match_data {
+ const char *name;
+ int num_channels;
+};
+
struct da280_data {
struct i2c_client *client;
};
@@ -89,17 +92,6 @@ static const struct iio_info da280_info = {
.read_raw = da280_read_raw,
};
-static enum da280_chipset da280_match_acpi_device(struct device *dev)
-{
- const struct acpi_device_id *id;
-
- id = acpi_match_device(dev->driver->acpi_match_table, dev);
- if (!id)
- return -EINVAL;
-
- return (enum da280_chipset) id->driver_data;
-}
-
static void da280_disable(void *client)
{
da280_enable(client, false);
@@ -107,16 +99,21 @@ static void da280_disable(void *client)
static int da280_probe(struct i2c_client *client)
{
- const struct i2c_device_id *id = i2c_client_get_device_id(client);
- int ret;
+ const struct da280_match_data *match_data;
struct iio_dev *indio_dev;
struct da280_data *data;
- enum da280_chipset chip;
+ int ret;
ret = i2c_smbus_read_byte_data(client, DA280_REG_CHIP_ID);
if (ret != DA280_CHIP_ID)
return (ret < 0) ? ret : -ENODEV;
+ match_data = i2c_get_match_data(client);
+ if (!match_data) {
+ dev_err(&client->dev, "Error match-data not set\n");
+ return -EINVAL;
+ }
+
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
@@ -127,23 +124,8 @@ static int da280_probe(struct i2c_client *client)
indio_dev->info = &da280_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = da280_channels;
-
- if (ACPI_HANDLE(&client->dev)) {
- chip = da280_match_acpi_device(&client->dev);
- } else {
- chip = id->driver_data;
- }
-
- if (chip == da217) {
- indio_dev->name = "da217";
- indio_dev->num_channels = 3;
- } else if (chip == da226) {
- indio_dev->name = "da226";
- indio_dev->num_channels = 2;
- } else {
- indio_dev->name = "da280";
- indio_dev->num_channels = 3;
- }
+ indio_dev->num_channels = match_data->num_channels;
+ indio_dev->name = match_data->name;
ret = da280_enable(client, true);
if (ret < 0)
@@ -168,17 +150,21 @@ static int da280_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(da280_pm_ops, da280_suspend, da280_resume);
+static const struct da280_match_data da217_match_data = { "da217", 3 };
+static const struct da280_match_data da226_match_data = { "da226", 2 };
+static const struct da280_match_data da280_match_data = { "da280", 3 };
+
static const struct acpi_device_id da280_acpi_match[] = {
- {"NSA2513", da217},
- {"MIRAACC", da280},
- {},
+ { "NSA2513", (kernel_ulong_t)&da217_match_data },
+ { "MIRAACC", (kernel_ulong_t)&da280_match_data },
+ {}
};
MODULE_DEVICE_TABLE(acpi, da280_acpi_match);
static const struct i2c_device_id da280_i2c_id[] = {
- { "da217", da217 },
- { "da226", da226 },
- { "da280", da280 },
+ { "da217", (kernel_ulong_t)&da217_match_data },
+ { "da226", (kernel_ulong_t)&da226_match_data },
+ { "da280", (kernel_ulong_t)&da280_match_data },
{}
};
MODULE_DEVICE_TABLE(i2c, da280_i2c_id);
@@ -186,7 +172,7 @@ MODULE_DEVICE_TABLE(i2c, da280_i2c_id);
static struct i2c_driver da280_driver = {
.driver = {
.name = "da280",
- .acpi_match_table = ACPI_PTR(da280_acpi_match),
+ .acpi_match_table = da280_acpi_match,
.pm = pm_sleep_ptr(&da280_pm_ops),
},
.probe = da280_probe,
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 894709286b0c..126e8bdd6d0e 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -422,6 +422,23 @@ static int kiox010a_dsm(struct device *dev, int fn_index)
ACPI_FREE(obj);
return 0;
}
+
+static const struct acpi_device_id kx_acpi_match[] = {
+ {"KXCJ1013", KXCJK1013},
+ {"KXCJ1008", KXCJ91008},
+ {"KXCJ9000", KXCJ91008},
+ {"KIOX0008", KXCJ91008},
+ {"KIOX0009", KXTJ21009},
+ {"KIOX000A", KXCJ91008},
+ {"KIOX010A", KXCJ91008}, /* KXCJ91008 in the display of a yoga 2-in-1 */
+ {"KIOX020A", KXCJ91008}, /* KXCJ91008 in the base of a yoga 2-in-1 */
+ {"KXTJ1009", KXTJ21009},
+ {"KXJ2109", KXTJ21009},
+ {"SMO8500", KXCJ91008},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, kx_acpi_match);
+
#endif
static int kxcjk1013_set_mode(struct kxcjk1013_data *data,
@@ -619,6 +636,84 @@ static int kxcjk1013_set_power_state(struct kxcjk1013_data *data, bool on)
return 0;
}
+#ifdef CONFIG_ACPI
+static bool kxj_acpi_orientation(struct device *dev,
+ struct iio_mount_matrix *orientation)
+{
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ char *str;
+ union acpi_object *obj, *elements;
+ acpi_status status;
+ int i, j, val[3];
+ bool ret = false;
+
+ if (!acpi_has_method(adev->handle, "ROTM"))
+ return false;
+
+ status = acpi_evaluate_object(adev->handle, "ROTM", NULL, &buffer);
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Failed to get ACPI mount matrix: %d\n", status);
+ return false;
+ }
+
+ obj = buffer.pointer;
+ if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count != 3) {
+ dev_err(dev, "Unknown ACPI mount matrix package format\n");
+ goto out_free_buffer;
+ }
+
+ elements = obj->package.elements;
+ for (i = 0; i < 3; i++) {
+ if (elements[i].type != ACPI_TYPE_STRING) {
+ dev_err(dev, "Unknown ACPI mount matrix element format\n");
+ goto out_free_buffer;
+ }
+
+ str = elements[i].string.pointer;
+ if (sscanf(str, "%d %d %d", &val[0], &val[1], &val[2]) != 3) {
+ dev_err(dev, "Incorrect ACPI mount matrix string format\n");
+ goto out_free_buffer;
+ }
+
+ for (j = 0; j < 3; j++) {
+ switch (val[j]) {
+ case -1: str = "-1"; break;
+ case 0: str = "0"; break;
+ case 1: str = "1"; break;
+ default:
+ dev_err(dev, "Invalid value in ACPI mount matrix: %d\n", val[j]);
+ goto out_free_buffer;
+ }
+ orientation->rotation[i * 3 + j] = str;
+ }
+ }
+
+ ret = true;
+
+out_free_buffer:
+ kfree(buffer.pointer);
+ return ret;
+}
+
+static bool kxj1009_apply_acpi_orientation(struct device *dev,
+ struct iio_mount_matrix *orientation)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (adev && acpi_dev_hid_uid_match(adev, "KIOX000A", NULL))
+ return kxj_acpi_orientation(dev, orientation);
+
+ return false;
+}
+#else
+static bool kxj1009_apply_acpi_orientation(struct device *dev,
+ struct iio_mount_matrix *orientation)
+{
+ return false;
+}
+#endif
+
static int kxcjk1013_chip_update_thresholds(struct kxcjk1013_data *data)
{
int ret;
@@ -1449,9 +1544,12 @@ static int kxcjk1013_probe(struct i2c_client *client)
} else {
data->active_high_intr = true; /* default polarity */
- ret = iio_read_mount_matrix(&client->dev, &data->orientation);
- if (ret)
- return ret;
+ if (!kxj1009_apply_acpi_orientation(&client->dev, &data->orientation)) {
+ ret = iio_read_mount_matrix(&client->dev, &data->orientation);
+ if (ret)
+ return ret;
+ }
+
}
ret = devm_regulator_bulk_get_enable(&client->dev,
@@ -1687,22 +1785,6 @@ static const struct dev_pm_ops kxcjk1013_pm_ops = {
kxcjk1013_runtime_resume, NULL)
};
-static const struct acpi_device_id kx_acpi_match[] = {
- {"KXCJ1013", KXCJK1013},
- {"KXCJ1008", KXCJ91008},
- {"KXCJ9000", KXCJ91008},
- {"KIOX0008", KXCJ91008},
- {"KIOX0009", KXTJ21009},
- {"KIOX000A", KXCJ91008},
- {"KIOX010A", KXCJ91008}, /* KXCJ91008 in the display of a yoga 2-in-1 */
- {"KIOX020A", KXCJ91008}, /* KXCJ91008 in the base of a yoga 2-in-1 */
- {"KXTJ1009", KXTJ21009},
- {"KXJ2109", KXTJ21009},
- {"SMO8500", KXCJ91008},
- { },
-};
-MODULE_DEVICE_TABLE(acpi, kx_acpi_match);
-
static const struct i2c_device_id kxcjk1013_id[] = {
{"kxcjk1013", KXCJK1013},
{"kxcj91008", KXCJ91008},
diff --git a/drivers/iio/accel/kxsd9-spi.c b/drivers/iio/accel/kxsd9-spi.c
index 1719a9f1d90a..4414670dfb43 100644
--- a/drivers/iio/accel/kxsd9-spi.c
+++ b/drivers/iio/accel/kxsd9-spi.c
@@ -1,9 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/of.h>
#include <linux/spi/spi.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/regmap.h>
diff --git a/drivers/iio/accel/mma9551.c b/drivers/iio/accel/mma9551.c
index d823f2edc6d4..083c08f65baf 100644
--- a/drivers/iio/accel/mma9551.c
+++ b/drivers/iio/accel/mma9551.c
@@ -604,9 +604,9 @@ MODULE_DEVICE_TABLE(i2c, mma9551_id);
static struct i2c_driver mma9551_driver = {
.driver = {
.name = MMA9551_DRV_NAME,
- .acpi_match_table = ACPI_PTR(mma9551_acpi_match),
+ .acpi_match_table = mma9551_acpi_match,
.pm = pm_ptr(&mma9551_pm_ops),
- },
+ },
.probe = mma9551_probe,
.remove = mma9551_remove,
.id_table = mma9551_id,
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index d01aba4aecba..3cbd0fd4e624 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -1243,9 +1243,9 @@ MODULE_DEVICE_TABLE(i2c, mma9553_id);
static struct i2c_driver mma9553_driver = {
.driver = {
.name = MMA9553_DRV_NAME,
- .acpi_match_table = ACPI_PTR(mma9553_acpi_match),
+ .acpi_match_table = mma9553_acpi_match,
.pm = pm_ptr(&mma9553_pm_ops),
- },
+ },
.probe = mma9553_probe,
.remove = mma9553_remove,
.id_table = mma9553_id,
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index 82e8d0b39049..61839be501c2 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -8,7 +8,7 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
@@ -472,6 +472,7 @@ static int mxc4005_probe(struct i2c_client *client)
static const struct acpi_device_id mxc4005_acpi_match[] = {
{"MXC4005", 0},
{"MXC6655", 0},
+ {"MDA6655", 0},
{ },
};
MODULE_DEVICE_TABLE(acpi, mxc4005_acpi_match);
@@ -493,7 +494,7 @@ MODULE_DEVICE_TABLE(i2c, mxc4005_id);
static struct i2c_driver mxc4005_driver = {
.driver = {
.name = MXC4005_DRV_NAME,
- .acpi_match_table = ACPI_PTR(mxc4005_acpi_match),
+ .acpi_match_table = mxc4005_acpi_match,
.of_match_table = mxc4005_of_match,
},
.probe = mxc4005_probe,
diff --git a/drivers/iio/accel/mxc6255.c b/drivers/iio/accel/mxc6255.c
index 33c2253561e6..ac228128c4f9 100644
--- a/drivers/iio/accel/mxc6255.c
+++ b/drivers/iio/accel/mxc6255.c
@@ -12,7 +12,7 @@
#include <linux/init.h>
#include <linux/iio/iio.h>
#include <linux/delay.h>
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
#include <linux/iio/sysfs.h>
@@ -181,7 +181,7 @@ MODULE_DEVICE_TABLE(i2c, mxc6255_id);
static struct i2c_driver mxc6255_driver = {
.driver = {
.name = MXC6255_DRV_NAME,
- .acpi_match_table = ACPI_PTR(mxc6255_acpi_match),
+ .acpi_match_table = mxc6255_acpi_match,
},
.probe = mxc6255_probe,
.id_table = mxc6255_id,
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 71ee861b2980..fd3749871121 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -10,7 +10,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
@@ -127,14 +126,12 @@ static const struct of_device_id st_accel_of_match[] = {
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id st_accel_acpi_match[] = {
{"SMO8840", (kernel_ulong_t)LIS2DH12_ACCEL_DEV_NAME},
{"SMO8A90", (kernel_ulong_t)LNG2DM_ACCEL_DEV_NAME},
{ },
};
MODULE_DEVICE_TABLE(acpi, st_accel_acpi_match);
-#endif
static const struct i2c_device_id st_accel_id_table[] = {
{ LSM303DLH_ACCEL_DEV_NAME },
@@ -204,7 +201,7 @@ static struct i2c_driver st_accel_driver = {
.driver = {
.name = "st-accel-i2c",
.of_match_table = st_accel_of_match,
- .acpi_match_table = ACPI_PTR(st_accel_acpi_match),
+ .acpi_match_table = st_accel_acpi_match,
},
.probe = st_accel_i2c_probe,
.id_table = st_accel_id_table,
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 3415ac1b4495..668edc88c89d 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -7,11 +7,11 @@
* STK8BA50 7-bit I2C address: 0x18.
*/
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -541,7 +541,7 @@ static struct i2c_driver stk8ba50_driver = {
.driver = {
.name = "stk8ba50",
.pm = pm_sleep_ptr(&stk8ba50_pm_ops),
- .acpi_match_table = ACPI_PTR(stk8ba50_acpi_id),
+ .acpi_match_table = stk8ba50_acpi_id,
},
.probe = stk8ba50_probe,
.remove = stk8ba50_remove,
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 3b73c509bd68..0d9282fa67f5 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -291,7 +291,7 @@ config AD799X
config AD9467
tristate "Analog Devices AD9467 High Speed ADC driver"
depends on SPI
- depends on ADI_AXI_ADC
+ select IIO_BACKEND
help
Say yes here to build support for Analog Devices:
* AD9467 16-Bit, 200 MSPS/250 MSPS Analog-to-Digital Converter
@@ -309,7 +309,7 @@ config ADI_AXI_ADC
select IIO_BUFFER_HW_CONSUMER
select IIO_BUFFER_DMAENGINE
select REGMAP_MMIO
- depends on OF
+ select IIO_BACKEND
help
Say yes here to build support for Analog Devices Generic
AXI ADC IP core. The IP core is used for interfacing with
@@ -930,6 +930,17 @@ config NPCM_ADC
This driver can also be built as a module. If so, the module
will be called npcm_adc.
+config PAC1934
+ tristate "Microchip Technology PAC1934 driver"
+ depends on I2C
+ help
+ Say yes here to build support for Microchip Technology's PAC1931,
+ PAC1932, PAC1933, PAC1934 Single/Multi-Channel Power Monitor with
+ Accumulator.
+
+ This driver can also be built as a module. If so, the module
+ will be called pac1934.
+
config PALMAS_GPADC
tristate "TI Palmas General Purpose ADC"
depends on MFD_PALMAS
@@ -1312,6 +1323,17 @@ config TI_ADS1100
This driver can also be built as a module. If so, the module will be
called ti-ads1100.
+config TI_ADS1298
+ tristate "Texas Instruments ADS1298"
+ depends on SPI
+ select IIO_BUFFER
+ help
+ If you say yes here you get support for Texas Instruments ADS1298
+ medical ADC chips
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads1298.
+
config TI_ADS7950
tristate "Texas Instruments ADS7950 ADC driver"
depends on SPI && GPIOLIB
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index d2fda54a3259..b3c434722364 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -86,6 +86,7 @@ obj-$(CONFIG_MP2629_ADC) += mp2629_adc.o
obj-$(CONFIG_MXS_LRADC_ADC) += mxs-lradc-adc.o
obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_NPCM_ADC) += npcm_adc.o
+obj-$(CONFIG_PAC1934) += pac1934.o
obj-$(CONFIG_PALMAS_GPADC) += palmas_gpadc.o
obj-$(CONFIG_QCOM_SPMI_ADC5) += qcom-spmi-adc5.o
obj-$(CONFIG_QCOM_SPMI_IADC) += qcom-spmi-iadc.o
@@ -116,6 +117,7 @@ obj-$(CONFIG_TI_ADC128S052) += ti-adc128s052.o
obj-$(CONFIG_TI_ADC161S626) += ti-adc161s626.o
obj-$(CONFIG_TI_ADS1015) += ti-ads1015.o
obj-$(CONFIG_TI_ADS1100) += ti-ads1100.o
+obj-$(CONFIG_TI_ADS1298) += ti-ads1298.o
obj-$(CONFIG_TI_ADS7924) += ti-ads7924.o
obj-$(CONFIG_TI_ADS7950) += ti-ads7950.o
obj-$(CONFIG_TI_ADS8344) += ti-ads8344.o
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index 62490424b6ae..febb64e67955 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -887,9 +887,9 @@ static int ad4130_set_filter_mode(struct iio_dev *indio_dev,
unsigned int old_fs;
int ret = 0;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
if (setup_info->filter_mode == val)
- goto out;
+ return 0;
old_fs = setup_info->fs;
old_filter_mode = setup_info->filter_mode;
@@ -911,12 +911,10 @@ static int ad4130_set_filter_mode(struct iio_dev *indio_dev,
if (ret) {
setup_info->fs = old_fs;
setup_info->filter_mode = old_filter_mode;
+ return ret;
}
- out:
- mutex_unlock(&st->lock);
-
- return ret;
+ return 0;
}
static int ad4130_get_filter_mode(struct iio_dev *indio_dev,
@@ -927,9 +925,8 @@ static int ad4130_get_filter_mode(struct iio_dev *indio_dev,
struct ad4130_setup_info *setup_info = &st->chans_info[channel].setup;
enum ad4130_filter_mode filter_mode;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
filter_mode = setup_info->filter_mode;
- mutex_unlock(&st->lock);
return filter_mode;
}
@@ -971,7 +968,7 @@ static int ad4130_set_channel_pga(struct ad4130_state *st, unsigned int channel,
struct ad4130_chan_info *chan_info = &st->chans_info[channel];
struct ad4130_setup_info *setup_info = &chan_info->setup;
unsigned int pga, old_pga;
- int ret = 0;
+ int ret;
for (pga = 0; pga < AD4130_MAX_PGA; pga++)
if (val == st->scale_tbls[setup_info->ref_sel][pga][0] &&
@@ -981,21 +978,20 @@ static int ad4130_set_channel_pga(struct ad4130_state *st, unsigned int channel,
if (pga == AD4130_MAX_PGA)
return -EINVAL;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
if (pga == setup_info->pga)
- goto out;
+ return 0;
old_pga = setup_info->pga;
setup_info->pga = pga;
ret = ad4130_write_channel_setup(st, channel, false);
- if (ret)
+ if (ret) {
setup_info->pga = old_pga;
+ return ret;
+ }
-out:
- mutex_unlock(&st->lock);
-
- return ret;
+ return 0;
}
static int ad4130_set_channel_freq(struct ad4130_state *st,
@@ -1004,26 +1000,25 @@ static int ad4130_set_channel_freq(struct ad4130_state *st,
struct ad4130_chan_info *chan_info = &st->chans_info[channel];
struct ad4130_setup_info *setup_info = &chan_info->setup;
unsigned int fs, old_fs;
- int ret = 0;
+ int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
old_fs = setup_info->fs;
ad4130_freq_to_fs(setup_info->filter_mode, val, val2, &fs);
if (fs == setup_info->fs)
- goto out;
+ return 0;
setup_info->fs = fs;
ret = ad4130_write_channel_setup(st, channel, false);
- if (ret)
+ if (ret) {
setup_info->fs = old_fs;
+ return ret;
+ }
-out:
- mutex_unlock(&st->lock);
-
- return ret;
+ return 0;
}
static int _ad4130_read_sample(struct iio_dev *indio_dev, unsigned int channel,
@@ -1065,20 +1060,13 @@ static int _ad4130_read_sample(struct iio_dev *indio_dev, unsigned int channel,
static int ad4130_read_sample(struct iio_dev *indio_dev, unsigned int channel,
int *val)
{
- struct ad4130_state *st = iio_priv(indio_dev);
- int ret;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ struct ad4130_state *st = iio_priv(indio_dev);
- mutex_lock(&st->lock);
- ret = _ad4130_read_sample(indio_dev, channel, val);
- mutex_unlock(&st->lock);
-
- iio_device_release_direct_mode(indio_dev);
-
- return ret;
+ guard(mutex)(&st->lock);
+ return _ad4130_read_sample(indio_dev, channel, val);
+ }
+ unreachable();
}
static int ad4130_read_raw(struct iio_dev *indio_dev,
@@ -1092,24 +1080,24 @@ static int ad4130_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
return ad4130_read_sample(indio_dev, channel, val);
- case IIO_CHAN_INFO_SCALE:
- mutex_lock(&st->lock);
+ case IIO_CHAN_INFO_SCALE: {
+ guard(mutex)(&st->lock);
*val = st->scale_tbls[setup_info->ref_sel][setup_info->pga][0];
*val2 = st->scale_tbls[setup_info->ref_sel][setup_info->pga][1];
- mutex_unlock(&st->lock);
return IIO_VAL_INT_PLUS_NANO;
+ }
case IIO_CHAN_INFO_OFFSET:
*val = st->bipolar ? -BIT(chan->scan_type.realbits - 1) : 0;
return IIO_VAL_INT;
- case IIO_CHAN_INFO_SAMP_FREQ:
- mutex_lock(&st->lock);
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ guard(mutex)(&st->lock);
ad4130_fs_to_freq(setup_info->filter_mode, setup_info->fs,
val, val2);
- mutex_unlock(&st->lock);
return IIO_VAL_INT_PLUS_NANO;
+ }
default:
return -EINVAL;
}
@@ -1134,9 +1122,9 @@ static int ad4130_read_avail(struct iio_dev *indio_dev,
return IIO_AVAIL_LIST;
case IIO_CHAN_INFO_SAMP_FREQ:
- mutex_lock(&st->lock);
- filter_config = &ad4130_filter_configs[setup_info->filter_mode];
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock) {
+ filter_config = &ad4130_filter_configs[setup_info->filter_mode];
+ }
*vals = (int *)filter_config->samp_freq_avail;
*length = filter_config->samp_freq_avail_len * 2;
@@ -1197,21 +1185,18 @@ static int ad4130_update_scan_mode(struct iio_dev *indio_dev,
unsigned int val = 0;
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
for_each_set_bit(channel, scan_mask, indio_dev->num_channels) {
ret = ad4130_set_channel_enable(st, channel, true);
if (ret)
- goto out;
+ return ret;
val++;
}
st->num_enabled_channels = val;
-out:
- mutex_unlock(&st->lock);
-
return 0;
}
@@ -1232,22 +1217,19 @@ static int ad4130_set_fifo_watermark(struct iio_dev *indio_dev, unsigned int val
*/
eff = rounddown(AD4130_FIFO_SIZE, st->num_enabled_channels);
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = regmap_update_bits(st->regmap, AD4130_FIFO_CONTROL_REG,
AD4130_FIFO_CONTROL_WM_MASK,
FIELD_PREP(AD4130_FIFO_CONTROL_WM_MASK,
ad4130_watermark_reg_val(eff)));
if (ret)
- goto out;
+ return ret;
st->effective_watermark = eff;
st->watermark = val;
-out:
- mutex_unlock(&st->lock);
-
- return ret;
+ return 0;
}
static const struct iio_info ad4130_info = {
@@ -1265,26 +1247,21 @@ static int ad4130_buffer_postenable(struct iio_dev *indio_dev)
struct ad4130_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = ad4130_set_watermark_interrupt_en(st, true);
if (ret)
- goto out;
+ return ret;
ret = irq_set_irq_type(st->spi->irq, st->inv_irq_trigger);
if (ret)
- goto out;
+ return ret;
ret = ad4130_set_fifo_mode(st, AD4130_FIFO_MODE_WM);
if (ret)
- goto out;
-
- ret = ad4130_set_mode(st, AD4130_MODE_CONTINUOUS);
-
-out:
- mutex_unlock(&st->lock);
+ return ret;
- return ret;
+ return ad4130_set_mode(st, AD4130_MODE_CONTINUOUS);
}
static int ad4130_buffer_predisable(struct iio_dev *indio_dev)
@@ -1293,23 +1270,23 @@ static int ad4130_buffer_predisable(struct iio_dev *indio_dev)
unsigned int i;
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = ad4130_set_mode(st, AD4130_MODE_IDLE);
if (ret)
- goto out;
+ return ret;
ret = irq_set_irq_type(st->spi->irq, st->irq_trigger);
if (ret)
- goto out;
+ return ret;
ret = ad4130_set_fifo_mode(st, AD4130_FIFO_MODE_DISABLED);
if (ret)
- goto out;
+ return ret;
ret = ad4130_set_watermark_interrupt_en(st, false);
if (ret)
- goto out;
+ return ret;
/*
* update_scan_mode() is not called in the disable path, disable all
@@ -1318,13 +1295,10 @@ static int ad4130_buffer_predisable(struct iio_dev *indio_dev)
for (i = 0; i < indio_dev->num_channels; i++) {
ret = ad4130_set_channel_enable(st, i, false);
if (ret)
- goto out;
+ return ret;
}
-out:
- mutex_unlock(&st->lock);
-
- return ret;
+ return 0;
}
static const struct iio_buffer_setup_ops ad4130_buffer_ops = {
@@ -1338,9 +1312,8 @@ static ssize_t hwfifo_watermark_show(struct device *dev,
struct ad4130_state *st = iio_priv(dev_to_iio_dev(dev));
unsigned int val;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
val = st->watermark;
- mutex_unlock(&st->lock);
return sysfs_emit(buf, "%d\n", val);
}
diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
index f4255b91acfc..d6876259ad14 100644
--- a/drivers/iio/adc/ad7091r-base.c
+++ b/drivers/iio/adc/ad7091r-base.c
@@ -86,28 +86,25 @@ static int ad7091r_read_raw(struct iio_dev *iio_dev,
unsigned int read_val;
int ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
switch (m) {
case IIO_CHAN_INFO_RAW:
- if (st->mode != AD7091R_MODE_COMMAND) {
- ret = -EBUSY;
- goto unlock;
- }
+ if (st->mode != AD7091R_MODE_COMMAND)
+ return -EBUSY;
ret = ad7091r_read_one(iio_dev, chan->channel, &read_val);
if (ret)
- goto unlock;
+ return ret;
*val = read_val;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
if (st->vref) {
ret = regulator_get_voltage(st->vref);
if (ret < 0)
- goto unlock;
+ return ret;
*val = ret / 1000;
} else {
@@ -115,17 +112,11 @@ static int ad7091r_read_raw(struct iio_dev *iio_dev,
}
*val2 = chan->scan_type.realbits;
- ret = IIO_VAL_FRACTIONAL_LOG2;
- break;
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-
-unlock:
- mutex_unlock(&st->lock);
- return ret;
}
static int ad7091r_read_event_config(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index 6581fce4ba95..7475ec2a56c7 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -17,13 +17,12 @@
#include <linux/of.h>
+#include <linux/iio/backend.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/clk.h>
-#include <linux/iio/adc/adi-axi-adc.h>
-
/*
* ADI High-Speed ADC common spi interface registers
* See Application-Note AN-877:
@@ -102,15 +101,20 @@
#define AD9467_REG_VREF_MASK 0x0F
struct ad9467_chip_info {
- struct adi_axi_adc_chip_info axi_adc_info;
- unsigned int default_output_mode;
- unsigned int vref_mask;
+ const char *name;
+ unsigned int id;
+ const struct iio_chan_spec *channels;
+ unsigned int num_channels;
+ const unsigned int (*scale_table)[2];
+ int num_scales;
+ unsigned long max_rate;
+ unsigned int default_output_mode;
+ unsigned int vref_mask;
};
-#define to_ad9467_chip_info(_info) \
- container_of(_info, struct ad9467_chip_info, axi_adc_info)
-
struct ad9467_state {
+ const struct ad9467_chip_info *info;
+ struct iio_backend *back;
struct spi_device *spi;
struct clk *clk;
unsigned int output_mode;
@@ -151,10 +155,10 @@ static int ad9467_spi_write(struct spi_device *spi, unsigned int reg,
return spi_write(spi, buf, ARRAY_SIZE(buf));
}
-static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg,
+static int ad9467_reg_access(struct iio_dev *indio_dev, unsigned int reg,
unsigned int writeval, unsigned int *readval)
{
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ struct ad9467_state *st = iio_priv(indio_dev);
struct spi_device *spi = st->spi;
int ret;
@@ -191,10 +195,10 @@ static const unsigned int ad9467_scale_table[][2] = {
{2300, 8}, {2400, 9}, {2500, 10},
};
-static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index,
+static void __ad9467_get_scale(struct ad9467_state *st, int index,
unsigned int *val, unsigned int *val2)
{
- const struct adi_axi_adc_chip_info *info = conv->chip_info;
+ const struct ad9467_chip_info *info = st->info;
const struct iio_chan_spec *chan = &info->channels[0];
unsigned int tmp;
@@ -229,52 +233,44 @@ static const struct iio_chan_spec ad9467_channels[] = {
};
static const struct ad9467_chip_info ad9467_chip_tbl = {
- .axi_adc_info = {
- .name = "ad9467",
- .id = CHIPID_AD9467,
- .max_rate = 250000000UL,
- .scale_table = ad9467_scale_table,
- .num_scales = ARRAY_SIZE(ad9467_scale_table),
- .channels = ad9467_channels,
- .num_channels = ARRAY_SIZE(ad9467_channels),
- },
+ .name = "ad9467",
+ .id = CHIPID_AD9467,
+ .max_rate = 250000000UL,
+ .scale_table = ad9467_scale_table,
+ .num_scales = ARRAY_SIZE(ad9467_scale_table),
+ .channels = ad9467_channels,
+ .num_channels = ARRAY_SIZE(ad9467_channels),
.default_output_mode = AD9467_DEF_OUTPUT_MODE,
.vref_mask = AD9467_REG_VREF_MASK,
};
static const struct ad9467_chip_info ad9434_chip_tbl = {
- .axi_adc_info = {
- .name = "ad9434",
- .id = CHIPID_AD9434,
- .max_rate = 500000000UL,
- .scale_table = ad9434_scale_table,
- .num_scales = ARRAY_SIZE(ad9434_scale_table),
- .channels = ad9434_channels,
- .num_channels = ARRAY_SIZE(ad9434_channels),
- },
+ .name = "ad9434",
+ .id = CHIPID_AD9434,
+ .max_rate = 500000000UL,
+ .scale_table = ad9434_scale_table,
+ .num_scales = ARRAY_SIZE(ad9434_scale_table),
+ .channels = ad9434_channels,
+ .num_channels = ARRAY_SIZE(ad9434_channels),
.default_output_mode = AD9434_DEF_OUTPUT_MODE,
.vref_mask = AD9434_REG_VREF_MASK,
};
static const struct ad9467_chip_info ad9265_chip_tbl = {
- .axi_adc_info = {
- .name = "ad9265",
- .id = CHIPID_AD9265,
- .max_rate = 125000000UL,
- .scale_table = ad9265_scale_table,
- .num_scales = ARRAY_SIZE(ad9265_scale_table),
- .channels = ad9467_channels,
- .num_channels = ARRAY_SIZE(ad9467_channels),
- },
+ .name = "ad9265",
+ .id = CHIPID_AD9265,
+ .max_rate = 125000000UL,
+ .scale_table = ad9265_scale_table,
+ .num_scales = ARRAY_SIZE(ad9265_scale_table),
+ .channels = ad9467_channels,
+ .num_channels = ARRAY_SIZE(ad9467_channels),
.default_output_mode = AD9265_DEF_OUTPUT_MODE,
.vref_mask = AD9265_REG_VREF_MASK,
};
-static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
+static int ad9467_get_scale(struct ad9467_state *st, int *val, int *val2)
{
- const struct adi_axi_adc_chip_info *info = conv->chip_info;
- const struct ad9467_chip_info *info1 = to_ad9467_chip_info(info);
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ const struct ad9467_chip_info *info = st->info;
unsigned int i, vref_val;
int ret;
@@ -282,7 +278,7 @@ static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
if (ret < 0)
return ret;
- vref_val = ret & info1->vref_mask;
+ vref_val = ret & info->vref_mask;
for (i = 0; i < info->num_scales; i++) {
if (vref_val == info->scale_table[i][1])
@@ -292,15 +288,14 @@ static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2)
if (i == info->num_scales)
return -ERANGE;
- __ad9467_get_scale(conv, i, val, val2);
+ __ad9467_get_scale(st, i, val, val2);
return IIO_VAL_INT_PLUS_MICRO;
}
-static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
+static int ad9467_set_scale(struct ad9467_state *st, int val, int val2)
{
- const struct adi_axi_adc_chip_info *info = conv->chip_info;
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ const struct ad9467_chip_info *info = st->info;
unsigned int scale_val[2];
unsigned int i;
int ret;
@@ -309,7 +304,7 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
return -EINVAL;
for (i = 0; i < info->num_scales; i++) {
- __ad9467_get_scale(conv, i, &scale_val[0], &scale_val[1]);
+ __ad9467_get_scale(st, i, &scale_val[0], &scale_val[1]);
if (scale_val[0] != val || scale_val[1] != val2)
continue;
@@ -326,15 +321,15 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2)
return -EINVAL;
}
-static int ad9467_read_raw(struct adi_axi_adc_conv *conv,
+static int ad9467_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long m)
{
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ struct ad9467_state *st = iio_priv(indio_dev);
switch (m) {
case IIO_CHAN_INFO_SCALE:
- return ad9467_get_scale(conv, val, val2);
+ return ad9467_get_scale(st, val, val2);
case IIO_CHAN_INFO_SAMP_FREQ:
*val = clk_get_rate(st->clk);
@@ -344,17 +339,17 @@ static int ad9467_read_raw(struct adi_axi_adc_conv *conv,
}
}
-static int ad9467_write_raw(struct adi_axi_adc_conv *conv,
+static int ad9467_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
- const struct adi_axi_adc_chip_info *info = conv->chip_info;
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ struct ad9467_state *st = iio_priv(indio_dev);
+ const struct ad9467_chip_info *info = st->info;
long r_clk;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- return ad9467_set_scale(conv, val, val2);
+ return ad9467_set_scale(st, val, val2);
case IIO_CHAN_INFO_SAMP_FREQ:
r_clk = clk_round_rate(st->clk, val);
if (r_clk < 0 || r_clk > info->max_rate) {
@@ -369,13 +364,13 @@ static int ad9467_write_raw(struct adi_axi_adc_conv *conv,
}
}
-static int ad9467_read_avail(struct adi_axi_adc_conv *conv,
+static int ad9467_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals, int *type, int *length,
long mask)
{
- const struct adi_axi_adc_chip_info *info = conv->chip_info;
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ struct ad9467_state *st = iio_priv(indio_dev);
+ const struct ad9467_chip_info *info = st->info;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -389,6 +384,33 @@ static int ad9467_read_avail(struct adi_axi_adc_conv *conv,
}
}
+static int ad9467_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ad9467_state *st = iio_priv(indio_dev);
+ unsigned int c;
+ int ret;
+
+ for (c = 0; c < st->info->num_channels; c++) {
+ if (test_bit(c, scan_mask))
+ ret = iio_backend_chan_enable(st->back, c);
+ else
+ ret = iio_backend_chan_disable(st->back, c);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct iio_info ad9467_info = {
+ .read_raw = ad9467_read_raw,
+ .write_raw = ad9467_write_raw,
+ .update_scan_mode = ad9467_update_scan_mode,
+ .debugfs_reg_access = ad9467_reg_access,
+ .read_avail = ad9467_read_avail,
+};
+
static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
{
int ret;
@@ -401,10 +423,9 @@ static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode)
AN877_ADC_TRANSFER_SYNC);
}
-static int ad9467_scale_fill(struct adi_axi_adc_conv *conv)
+static int ad9467_scale_fill(struct ad9467_state *st)
{
- const struct adi_axi_adc_chip_info *info = conv->chip_info;
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ const struct ad9467_chip_info *info = st->info;
unsigned int i, val1, val2;
st->scales = devm_kmalloc_array(&st->spi->dev, info->num_scales,
@@ -413,7 +434,7 @@ static int ad9467_scale_fill(struct adi_axi_adc_conv *conv)
return -ENOMEM;
for (i = 0; i < info->num_scales; i++) {
- __ad9467_get_scale(conv, i, &val1, &val2);
+ __ad9467_get_scale(st, i, &val1, &val2);
st->scales[i][0] = val1;
st->scales[i][1] = val2;
}
@@ -421,11 +442,27 @@ static int ad9467_scale_fill(struct adi_axi_adc_conv *conv)
return 0;
}
-static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv)
+static int ad9467_setup(struct ad9467_state *st)
{
- struct ad9467_state *st = adi_axi_adc_conv_priv(conv);
+ struct iio_backend_data_fmt data = {
+ .sign_extend = true,
+ .enable = true,
+ };
+ unsigned int c, mode;
+ int ret;
+
+ mode = st->info->default_output_mode | AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
+ ret = ad9467_outputmode_set(st->spi, mode);
+ if (ret)
+ return ret;
- return ad9467_outputmode_set(st->spi, st->output_mode);
+ for (c = 0; c < st->info->num_channels; c++) {
+ ret = iio_backend_data_format_set(st->back, c, &data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int ad9467_reset(struct device *dev)
@@ -443,25 +480,65 @@ static int ad9467_reset(struct device *dev)
return 0;
}
+static int ad9467_iio_backend_get(struct ad9467_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ struct device_node *__back;
+
+ st->back = devm_iio_backend_get(dev, NULL);
+ if (!IS_ERR(st->back))
+ return 0;
+ /* If not found, don't error out as we might have legacy DT property */
+ if (PTR_ERR(st->back) != -ENOENT)
+ return PTR_ERR(st->back);
+
+ /*
+ * if we don't get the backend using the normal API's, use the legacy
+ * 'adi,adc-dev' property. So we get all nodes with that property, and
+ * look for the one pointing at us. Then we directly lookup that fwnode
+ * on the backend list of registered devices. This is done so we don't
+ * make io-backends mandatory which would break DT ABI.
+ */
+ for_each_node_with_property(__back, "adi,adc-dev") {
+ struct device_node *__me;
+
+ __me = of_parse_phandle(__back, "adi,adc-dev", 0);
+ if (!__me)
+ continue;
+
+ if (!device_match_of_node(dev, __me)) {
+ of_node_put(__me);
+ continue;
+ }
+
+ of_node_put(__me);
+ st->back = __devm_iio_backend_get_from_fwnode_lookup(dev,
+ of_fwnode_handle(__back));
+ of_node_put(__back);
+ return PTR_ERR_OR_ZERO(st->back);
+ }
+
+ return -ENODEV;
+}
+
static int ad9467_probe(struct spi_device *spi)
{
- const struct ad9467_chip_info *info;
- struct adi_axi_adc_conv *conv;
+ struct iio_dev *indio_dev;
struct ad9467_state *st;
unsigned int id;
int ret;
- info = spi_get_device_match_data(spi);
- if (!info)
- return -ENODEV;
-
- conv = devm_adi_axi_adc_conv_register(&spi->dev, sizeof(*st));
- if (IS_ERR(conv))
- return PTR_ERR(conv);
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
- st = adi_axi_adc_conv_priv(conv);
+ st = iio_priv(indio_dev);
st->spi = spi;
+ st->info = spi_get_device_match_data(spi);
+ if (!st->info)
+ return -ENODEV;
+
st->clk = devm_clk_get_enabled(&spi->dev, "adc-clk");
if (IS_ERR(st->clk))
return PTR_ERR(st->clk);
@@ -475,29 +552,39 @@ static int ad9467_probe(struct spi_device *spi)
if (ret)
return ret;
- conv->chip_info = &info->axi_adc_info;
-
- ret = ad9467_scale_fill(conv);
+ ret = ad9467_scale_fill(st);
if (ret)
return ret;
id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID);
- if (id != conv->chip_info->id) {
+ if (id != st->info->id) {
dev_err(&spi->dev, "Mismatch CHIP_ID, got 0x%X, expected 0x%X\n",
- id, conv->chip_info->id);
+ id, st->info->id);
return -ENODEV;
}
- conv->reg_access = ad9467_reg_access;
- conv->write_raw = ad9467_write_raw;
- conv->read_raw = ad9467_read_raw;
- conv->read_avail = ad9467_read_avail;
- conv->preenable_setup = ad9467_preenable_setup;
+ indio_dev->name = st->info->name;
+ indio_dev->channels = st->info->channels;
+ indio_dev->num_channels = st->info->num_channels;
+ indio_dev->info = &ad9467_info;
- st->output_mode = info->default_output_mode |
- AN877_ADC_OUTPUT_MODE_TWOS_COMPLEMENT;
+ ret = ad9467_iio_backend_get(st);
+ if (ret)
+ return ret;
- return 0;
+ ret = devm_iio_backend_request_buffer(&spi->dev, st->back, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_backend_enable(&spi->dev, st->back);
+ if (ret)
+ return ret;
+
+ ret = ad9467_setup(st);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id ad9467_of_match[] = {
@@ -529,4 +616,4 @@ module_spi_driver(ad9467_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD9467 ADC driver");
MODULE_LICENSE("GPL v2");
-MODULE_IMPORT_NS(IIO_ADI_AXI);
+MODULE_IMPORT_NS(IIO_BACKEND);
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index 55442eddf57c..a602429cdde4 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -568,6 +568,7 @@ EXPORT_SYMBOL_NS_GPL(ad_sd_validate_trigger, IIO_AD_SIGMA_DELTA);
static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_dev)
{
struct ad_sigma_delta *sigma_delta = iio_device_get_drvdata(indio_dev);
+ unsigned long irq_flags = irq_get_trigger_type(sigma_delta->spi->irq);
int ret;
if (dev != &sigma_delta->spi->dev) {
@@ -588,9 +589,13 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de
/* the IRQ core clears IRQ_DISABLE_UNLAZY flag when freeing an IRQ */
irq_set_status_flags(sigma_delta->spi->irq, IRQ_DISABLE_UNLAZY);
+ /* Allow overwriting the flags from firmware */
+ if (!irq_flags)
+ irq_flags = sigma_delta->info->irq_flags;
+
ret = devm_request_irq(dev, sigma_delta->spi->irq,
ad_sd_data_rdy_trig_poll,
- sigma_delta->info->irq_flags | IRQF_NO_AUTOEN,
+ irq_flags | IRQF_NO_AUTOEN,
indio_dev->name,
sigma_delta);
if (ret)
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index c247ff1541d2..4156639b3c8b 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/module.h>
@@ -17,13 +18,12 @@
#include <linux/regmap.h>
#include <linux/slab.h>
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/buffer-dmaengine.h>
-
#include <linux/fpga/adi-axi-common.h>
-#include <linux/iio/adc/adi-axi-adc.h>
+
+#include <linux/iio/backend.h>
+#include <linux/iio/buffer-dmaengine.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
/*
* Register definitions:
@@ -44,6 +44,7 @@
#define ADI_AXI_REG_CHAN_CTRL_PN_SEL_OWR BIT(10)
#define ADI_AXI_REG_CHAN_CTRL_IQCOR_EN BIT(9)
#define ADI_AXI_REG_CHAN_CTRL_DCFILT_EN BIT(8)
+#define ADI_AXI_REG_CHAN_CTRL_FMT_MASK GENMASK(6, 4)
#define ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT BIT(6)
#define ADI_AXI_REG_CHAN_CTRL_FMT_TYPE BIT(5)
#define ADI_AXI_REG_CHAN_CTRL_FMT_EN BIT(4)
@@ -55,286 +56,100 @@
ADI_AXI_REG_CHAN_CTRL_FMT_EN | \
ADI_AXI_REG_CHAN_CTRL_ENABLE)
-struct adi_axi_adc_core_info {
- unsigned int version;
-};
-
struct adi_axi_adc_state {
- struct mutex lock;
-
- struct adi_axi_adc_client *client;
struct regmap *regmap;
-};
-
-struct adi_axi_adc_client {
- struct list_head entry;
- struct adi_axi_adc_conv conv;
- struct adi_axi_adc_state *state;
struct device *dev;
- const struct adi_axi_adc_core_info *info;
};
-static LIST_HEAD(registered_clients);
-static DEFINE_MUTEX(registered_clients_lock);
-
-static struct adi_axi_adc_client *conv_to_client(struct adi_axi_adc_conv *conv)
-{
- return container_of(conv, struct adi_axi_adc_client, conv);
-}
-
-void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv)
-{
- struct adi_axi_adc_client *cl = conv_to_client(conv);
-
- return (char *)cl + ALIGN(sizeof(struct adi_axi_adc_client),
- IIO_DMA_MINALIGN);
-}
-EXPORT_SYMBOL_NS_GPL(adi_axi_adc_conv_priv, IIO_ADI_AXI);
-
-static int adi_axi_adc_config_dma_buffer(struct device *dev,
- struct iio_dev *indio_dev)
-{
- const char *dma_name;
-
- if (!device_property_present(dev, "dmas"))
- return 0;
-
- if (device_property_read_string(dev, "dma-names", &dma_name))
- dma_name = "rx";
-
- return devm_iio_dmaengine_buffer_setup(indio_dev->dev.parent,
- indio_dev, dma_name);
-}
-
-static int adi_axi_adc_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2, long mask)
-{
- struct adi_axi_adc_state *st = iio_priv(indio_dev);
- struct adi_axi_adc_conv *conv = &st->client->conv;
-
- if (!conv->read_raw)
- return -EOPNOTSUPP;
-
- return conv->read_raw(conv, chan, val, val2, mask);
-}
-
-static int adi_axi_adc_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
-{
- struct adi_axi_adc_state *st = iio_priv(indio_dev);
- struct adi_axi_adc_conv *conv = &st->client->conv;
-
- if (!conv->write_raw)
- return -EOPNOTSUPP;
-
- return conv->write_raw(conv, chan, val, val2, mask);
-}
-
-static int adi_axi_adc_read_avail(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- const int **vals, int *type, int *length,
- long mask)
-{
- struct adi_axi_adc_state *st = iio_priv(indio_dev);
- struct adi_axi_adc_conv *conv = &st->client->conv;
-
- if (!conv->read_avail)
- return -EOPNOTSUPP;
-
- return conv->read_avail(conv, chan, vals, type, length, mask);
-}
-
-static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev,
- const unsigned long *scan_mask)
+static int axi_adc_enable(struct iio_backend *back)
{
- struct adi_axi_adc_state *st = iio_priv(indio_dev);
- struct adi_axi_adc_conv *conv = &st->client->conv;
- unsigned int i;
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
int ret;
- for (i = 0; i < conv->chip_info->num_channels; i++) {
- if (test_bit(i, scan_mask))
- ret = regmap_set_bits(st->regmap,
- ADI_AXI_REG_CHAN_CTRL(i),
- ADI_AXI_REG_CHAN_CTRL_ENABLE);
- else
- ret = regmap_clear_bits(st->regmap,
- ADI_AXI_REG_CHAN_CTRL(i),
- ADI_AXI_REG_CHAN_CTRL_ENABLE);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct adi_axi_adc_conv *adi_axi_adc_conv_register(struct device *dev,
- size_t sizeof_priv)
-{
- struct adi_axi_adc_client *cl;
- size_t alloc_size;
-
- alloc_size = ALIGN(sizeof(struct adi_axi_adc_client), IIO_DMA_MINALIGN);
- if (sizeof_priv)
- alloc_size += ALIGN(sizeof_priv, IIO_DMA_MINALIGN);
-
- cl = kzalloc(alloc_size, GFP_KERNEL);
- if (!cl)
- return ERR_PTR(-ENOMEM);
-
- mutex_lock(&registered_clients_lock);
-
- cl->dev = get_device(dev);
-
- list_add_tail(&cl->entry, &registered_clients);
-
- mutex_unlock(&registered_clients_lock);
+ ret = regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN,
+ ADI_AXI_REG_RSTN_MMCM_RSTN);
+ if (ret)
+ return ret;
- return &cl->conv;
+ fsleep(10000);
+ return regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN,
+ ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
}
-static void adi_axi_adc_conv_unregister(struct adi_axi_adc_conv *conv)
+static void axi_adc_disable(struct iio_backend *back)
{
- struct adi_axi_adc_client *cl = conv_to_client(conv);
-
- mutex_lock(&registered_clients_lock);
-
- list_del(&cl->entry);
- put_device(cl->dev);
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
- mutex_unlock(&registered_clients_lock);
-
- kfree(cl);
+ regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
}
-static void devm_adi_axi_adc_conv_release(void *conv)
+static int axi_adc_data_format_set(struct iio_backend *back, unsigned int chan,
+ const struct iio_backend_data_fmt *data)
{
- adi_axi_adc_conv_unregister(conv);
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ u32 val;
+
+ if (!data->enable)
+ return regmap_clear_bits(st->regmap,
+ ADI_AXI_REG_CHAN_CTRL(chan),
+ ADI_AXI_REG_CHAN_CTRL_FMT_EN);
+
+ val = FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_EN, true);
+ if (data->sign_extend)
+ val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT, true);
+ if (data->type == IIO_BACKEND_OFFSET_BINARY)
+ val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_TYPE, true);
+
+ return regmap_update_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
+ ADI_AXI_REG_CHAN_CTRL_FMT_MASK, val);
}
-struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev,
- size_t sizeof_priv)
+static int axi_adc_chan_enable(struct iio_backend *back, unsigned int chan)
{
- struct adi_axi_adc_conv *conv;
- int ret;
-
- conv = adi_axi_adc_conv_register(dev, sizeof_priv);
- if (IS_ERR(conv))
- return conv;
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
- ret = devm_add_action_or_reset(dev, devm_adi_axi_adc_conv_release,
- conv);
- if (ret)
- return ERR_PTR(ret);
-
- return conv;
+ return regmap_set_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
+ ADI_AXI_REG_CHAN_CTRL_ENABLE);
}
-EXPORT_SYMBOL_NS_GPL(devm_adi_axi_adc_conv_register, IIO_ADI_AXI);
-
-static const struct iio_info adi_axi_adc_info = {
- .read_raw = &adi_axi_adc_read_raw,
- .write_raw = &adi_axi_adc_write_raw,
- .update_scan_mode = &adi_axi_adc_update_scan_mode,
- .read_avail = &adi_axi_adc_read_avail,
-};
-
-static const struct adi_axi_adc_core_info adi_axi_adc_10_0_a_info = {
- .version = ADI_AXI_PCORE_VER(10, 0, 'a'),
-};
-static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev)
+static int axi_adc_chan_disable(struct iio_backend *back, unsigned int chan)
{
- const struct adi_axi_adc_core_info *info;
- struct adi_axi_adc_client *cl;
- struct device_node *cln;
-
- info = of_device_get_match_data(dev);
- if (!info)
- return ERR_PTR(-ENODEV);
-
- cln = of_parse_phandle(dev->of_node, "adi,adc-dev", 0);
- if (!cln) {
- dev_err(dev, "No 'adi,adc-dev' node defined\n");
- return ERR_PTR(-ENODEV);
- }
-
- mutex_lock(&registered_clients_lock);
-
- list_for_each_entry(cl, &registered_clients, entry) {
- if (!cl->dev)
- continue;
-
- if (cl->dev->of_node != cln)
- continue;
-
- if (!try_module_get(cl->dev->driver->owner)) {
- mutex_unlock(&registered_clients_lock);
- of_node_put(cln);
- return ERR_PTR(-ENODEV);
- }
-
- get_device(cl->dev);
- cl->info = info;
- mutex_unlock(&registered_clients_lock);
- of_node_put(cln);
- return cl;
- }
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
- mutex_unlock(&registered_clients_lock);
- of_node_put(cln);
-
- return ERR_PTR(-EPROBE_DEFER);
+ return regmap_clear_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
+ ADI_AXI_REG_CHAN_CTRL_ENABLE);
}
-static int adi_axi_adc_setup_channels(struct device *dev,
- struct adi_axi_adc_state *st)
+static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
+ struct iio_dev *indio_dev)
{
- struct adi_axi_adc_conv *conv = &st->client->conv;
- int i, ret;
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ struct iio_buffer *buffer;
+ const char *dma_name;
+ int ret;
- if (conv->preenable_setup) {
- ret = conv->preenable_setup(conv);
- if (ret)
- return ret;
- }
+ if (device_property_read_string(st->dev, "dma-names", &dma_name))
+ dma_name = "rx";
- for (i = 0; i < conv->chip_info->num_channels; i++) {
- ret = regmap_write(st->regmap, ADI_AXI_REG_CHAN_CTRL(i),
- ADI_AXI_REG_CHAN_CTRL_DEFAULTS);
- if (ret)
- return ret;
+ buffer = iio_dmaengine_buffer_alloc(st->dev, dma_name);
+ if (IS_ERR(buffer)) {
+ dev_err(st->dev, "Could not get DMA buffer, %ld\n",
+ PTR_ERR(buffer));
+ return ERR_CAST(buffer);
}
- return 0;
-}
-
-static int axi_adc_reset(struct adi_axi_adc_state *st)
-{
- int ret;
-
- ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
- if (ret)
- return ret;
-
- mdelay(10);
- ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN,
- ADI_AXI_REG_RSTN_MMCM_RSTN);
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+ ret = iio_device_attach_buffer(indio_dev, buffer);
if (ret)
- return ret;
+ return ERR_PTR(ret);
- mdelay(10);
- return regmap_write(st->regmap, ADI_AXI_REG_RSTN,
- ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
+ return buffer;
}
-static void adi_axi_adc_cleanup(void *data)
+static void axi_adc_free_buffer(struct iio_backend *back,
+ struct iio_buffer *buffer)
{
- struct adi_axi_adc_client *cl = data;
-
- put_device(cl->dev);
- module_put(cl->dev->driver->owner);
+ iio_dmaengine_buffer_free(buffer);
}
static const struct regmap_config axi_adc_regmap_config = {
@@ -344,45 +159,47 @@ static const struct regmap_config axi_adc_regmap_config = {
.max_register = 0x0800,
};
+static const struct iio_backend_ops adi_axi_adc_generic = {
+ .enable = axi_adc_enable,
+ .disable = axi_adc_disable,
+ .data_format_set = axi_adc_data_format_set,
+ .chan_enable = axi_adc_chan_enable,
+ .chan_disable = axi_adc_chan_disable,
+ .request_buffer = axi_adc_request_buffer,
+ .free_buffer = axi_adc_free_buffer,
+};
+
static int adi_axi_adc_probe(struct platform_device *pdev)
{
- struct adi_axi_adc_conv *conv;
- struct iio_dev *indio_dev;
- struct adi_axi_adc_client *cl;
+ const unsigned int *expected_ver;
struct adi_axi_adc_state *st;
void __iomem *base;
unsigned int ver;
int ret;
- cl = adi_axi_adc_attach_client(&pdev->dev);
- if (IS_ERR(cl))
- return PTR_ERR(cl);
-
- ret = devm_add_action_or_reset(&pdev->dev, adi_axi_adc_cleanup, cl);
- if (ret)
- return ret;
-
- indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st));
- if (indio_dev == NULL)
+ st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
return -ENOMEM;
- st = iio_priv(indio_dev);
- st->client = cl;
- cl->state = st;
- mutex_init(&st->lock);
-
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
+ st->dev = &pdev->dev;
st->regmap = devm_regmap_init_mmio(&pdev->dev, base,
&axi_adc_regmap_config);
if (IS_ERR(st->regmap))
return PTR_ERR(st->regmap);
- conv = &st->client->conv;
+ expected_ver = device_get_match_data(&pdev->dev);
+ if (!expected_ver)
+ return -ENODEV;
- ret = axi_adc_reset(st);
+ /*
+ * Force disable the core. Up to the frontend to enable us. And we can
+ * still read/write registers...
+ */
+ ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
if (ret)
return ret;
@@ -390,33 +207,19 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (cl->info->version > ver) {
+ if (*expected_ver > ver) {
dev_err(&pdev->dev,
"IP core version is too old. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
- ADI_AXI_PCORE_VER_MAJOR(cl->info->version),
- ADI_AXI_PCORE_VER_MINOR(cl->info->version),
- ADI_AXI_PCORE_VER_PATCH(cl->info->version),
+ ADI_AXI_PCORE_VER_MAJOR(*expected_ver),
+ ADI_AXI_PCORE_VER_MINOR(*expected_ver),
+ ADI_AXI_PCORE_VER_PATCH(*expected_ver),
ADI_AXI_PCORE_VER_MAJOR(ver),
ADI_AXI_PCORE_VER_MINOR(ver),
ADI_AXI_PCORE_VER_PATCH(ver));
return -ENODEV;
}
- indio_dev->info = &adi_axi_adc_info;
- indio_dev->name = "adi-axi-adc";
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->num_channels = conv->chip_info->num_channels;
- indio_dev->channels = conv->chip_info->channels;
-
- ret = adi_axi_adc_config_dma_buffer(&pdev->dev, indio_dev);
- if (ret)
- return ret;
-
- ret = adi_axi_adc_setup_channels(&pdev->dev, st);
- if (ret)
- return ret;
-
- ret = devm_iio_device_register(&pdev->dev, indio_dev);
+ ret = devm_iio_backend_register(&pdev->dev, &adi_axi_adc_generic, st);
if (ret)
return ret;
@@ -428,6 +231,8 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
return 0;
}
+static unsigned int adi_axi_adc_10_0_a_info = ADI_AXI_PCORE_VER(10, 0, 'a');
+
/* Match table for of_platform binding */
static const struct of_device_id adi_axi_adc_of_match[] = {
{ .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info },
@@ -447,3 +252,5 @@ module_platform_driver(adi_axi_adc_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices Generic AXI ADC IP core driver");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(IIO_DMAENGINE_BUFFER);
+MODULE_IMPORT_NS(IIO_BACKEND);
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index 7c2a98b8c3a9..8b5bc96cb9fb 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -357,62 +357,55 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
int *val,
long m)
{
- int ret = 0;
- s32 data;
- u8 rxbuf[2];
- struct max1363_state *st = iio_priv(indio_dev);
- struct i2c_client *client = st->client;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- mutex_lock(&st->lock);
-
- /*
- * If monitor mode is enabled, the method for reading a single
- * channel will have to be rather different and has not yet
- * been implemented.
- *
- * Also, cannot read directly if buffered capture enabled.
- */
- if (st->monitor_on) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- /* Check to see if current scan mode is correct */
- if (st->current_mode != &max1363_mode_table[chan->address]) {
- /* Update scan mode if needed */
- st->current_mode = &max1363_mode_table[chan->address];
- ret = max1363_set_scan_mode(st);
- if (ret < 0)
- goto error_ret;
- }
- if (st->chip_info->bits != 8) {
- /* Get reading */
- data = st->recv(client, rxbuf, 2);
- if (data < 0) {
- ret = data;
- goto error_ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ s32 data;
+ u8 rxbuf[2];
+ struct max1363_state *st = iio_priv(indio_dev);
+ struct i2c_client *client = st->client;
+
+ guard(mutex)(&st->lock);
+
+ /*
+ * If monitor mode is enabled, the method for reading a single
+ * channel will have to be rather different and has not yet
+ * been implemented.
+ *
+ * Also, cannot read directly if buffered capture enabled.
+ */
+ if (st->monitor_on)
+ return -EBUSY;
+
+ /* Check to see if current scan mode is correct */
+ if (st->current_mode != &max1363_mode_table[chan->address]) {
+ int ret;
+
+ /* Update scan mode if needed */
+ st->current_mode = &max1363_mode_table[chan->address];
+ ret = max1363_set_scan_mode(st);
+ if (ret < 0)
+ return ret;
}
- data = (rxbuf[1] | rxbuf[0] << 8) &
- ((1 << st->chip_info->bits) - 1);
- } else {
- /* Get reading */
- data = st->recv(client, rxbuf, 1);
- if (data < 0) {
- ret = data;
- goto error_ret;
+ if (st->chip_info->bits != 8) {
+ /* Get reading */
+ data = st->recv(client, rxbuf, 2);
+ if (data < 0)
+ return data;
+
+ data = (rxbuf[1] | rxbuf[0] << 8) &
+ ((1 << st->chip_info->bits) - 1);
+ } else {
+ /* Get reading */
+ data = st->recv(client, rxbuf, 1);
+ if (data < 0)
+ return data;
+
+ data = rxbuf[0];
}
- data = rxbuf[0];
- }
- *val = data;
-
-error_ret:
- mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ *val = data;
+ return 0;
+ }
+ unreachable();
}
static int max1363_read_raw(struct iio_dev *indio_dev,
@@ -710,9 +703,8 @@ static ssize_t max1363_monitor_store_freq(struct device *dev,
if (!found)
return -EINVAL;
- mutex_lock(&st->lock);
- st->monitor_speed = i;
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock)
+ st->monitor_speed = i;
return 0;
}
@@ -815,12 +807,11 @@ static int max1363_read_event_config(struct iio_dev *indio_dev,
int val;
int number = chan->channel;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
if (dir == IIO_EV_DIR_FALLING)
val = (1 << number) & st->mask_low;
else
val = (1 << number) & st->mask_high;
- mutex_unlock(&st->lock);
return val;
}
@@ -962,46 +953,42 @@ static int max1363_write_event_config(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, enum iio_event_type type,
enum iio_event_direction dir, int state)
{
- int ret = 0;
struct max1363_state *st = iio_priv(indio_dev);
- u16 unifiedmask;
- int number = chan->channel;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- mutex_lock(&st->lock);
-
- unifiedmask = st->mask_low | st->mask_high;
- if (dir == IIO_EV_DIR_FALLING) {
-
- if (state == 0)
- st->mask_low &= ~(1 << number);
- else {
- ret = __max1363_check_event_mask((1 << number),
- unifiedmask);
- if (ret)
- goto error_ret;
- st->mask_low |= (1 << number);
- }
- } else {
- if (state == 0)
- st->mask_high &= ~(1 << number);
- else {
- ret = __max1363_check_event_mask((1 << number),
- unifiedmask);
- if (ret)
- goto error_ret;
- st->mask_high |= (1 << number);
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ int number = chan->channel;
+ u16 unifiedmask;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ unifiedmask = st->mask_low | st->mask_high;
+ if (dir == IIO_EV_DIR_FALLING) {
+
+ if (state == 0)
+ st->mask_low &= ~(1 << number);
+ else {
+ ret = __max1363_check_event_mask((1 << number),
+ unifiedmask);
+ if (ret)
+ return ret;
+ st->mask_low |= (1 << number);
+ }
+ } else {
+ if (state == 0)
+ st->mask_high &= ~(1 << number);
+ else {
+ ret = __max1363_check_event_mask((1 << number),
+ unifiedmask);
+ if (ret)
+ return ret;
+ st->mask_high |= (1 << number);
+ }
}
}
-
max1363_monitor_mode_update(st, !!(st->mask_high | st->mask_low));
-error_ret:
- mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ return 0;
}
/*
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index f3b81798b3c9..da1421bd7b62 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -371,6 +371,11 @@ static const struct mcp320x_chip_info mcp320x_chip_infos[] = {
},
};
+static void mcp320x_regulator_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
static int mcp320x_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
@@ -388,7 +393,6 @@ static int mcp320x_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &mcp320x_info;
- spi_set_drvdata(spi, indio_dev);
device_index = spi_get_device_id(spi)->driver_data;
chip_info = &mcp320x_chip_infos[device_index];
@@ -445,27 +449,13 @@ static int mcp320x_probe(struct spi_device *spi)
if (ret < 0)
return ret;
- mutex_init(&adc->lock);
-
- ret = iio_device_register(indio_dev);
+ ret = devm_add_action_or_reset(&spi->dev, mcp320x_regulator_disable, adc->reg);
if (ret < 0)
- goto reg_disable;
-
- return 0;
-
-reg_disable:
- regulator_disable(adc->reg);
-
- return ret;
-}
+ return ret;
-static void mcp320x_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct mcp320x *adc = iio_priv(indio_dev);
+ mutex_init(&adc->lock);
- iio_device_unregister(indio_dev);
- regulator_disable(adc->reg);
+ return devm_iio_device_register(&spi->dev, indio_dev);
}
static const struct of_device_id mcp320x_dt_ids[] = {
@@ -520,7 +510,6 @@ static struct spi_driver mcp320x_driver = {
.of_match_table = mcp320x_dt_ids,
},
.probe = mcp320x_probe,
- .remove = mcp320x_remove,
.id_table = mcp320x_id,
};
module_spi_driver(mcp320x_driver);
diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c
new file mode 100644
index 000000000000..e0c2742da523
--- /dev/null
+++ b/drivers/iio/adc/pac1934.c
@@ -0,0 +1,1636 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * IIO driver for PAC1934 Multi-Channel DC Power/Energy Monitor
+ *
+ * Copyright (C) 2017-2024 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Bogdan Bolocan <bogdan.bolocan@microchip.com>
+ * Author: Victor Tudose
+ * Author: Marius Cristea <marius.cristea@microchip.com>
+ *
+ * Datasheet for PAC1931, PAC1932, PAC1933 and PAC1934 can be found here:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ProductDocuments/DataSheets/PAC1931-Family-Data-Sheet-DS20005850E.pdf
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <asm/unaligned.h>
+
+/*
+ * maximum accumulation time should be (17 * 60 * 1000) around 17 minutes@1024 sps
+ * till PAC1934 accumulation registers starts to saturate
+ */
+#define PAC1934_MAX_RFSH_LIMIT_MS 60000
+/* 50msec is the timeout for validity of the cached registers */
+#define PAC1934_MIN_POLLING_TIME_MS 50
+/*
+ * 1000usec is the minimum wait time for normal conversions when sample
+ * rate doesn't change
+ */
+#define PAC1934_MIN_UPDATE_WAIT_TIME_US 1000
+
+/* 32000mV */
+#define PAC1934_VOLTAGE_MILLIVOLTS_MAX 32000
+/* voltage bits resolution when set for unsigned values */
+#define PAC1934_VOLTAGE_U_RES 16
+/* voltage bits resolution when set for signed values */
+#define PAC1934_VOLTAGE_S_RES 15
+
+/*
+ * max signed value that can be stored on 32 bits and 8 digits fractional value
+ * (2^31 - 1) * 10^8 + 99999999
+ */
+#define PAC_193X_MAX_POWER_ACC 214748364799999999LL
+/*
+ * min signed value that can be stored on 32 bits and 8 digits fractional value
+ * -(2^31) * 10^8 - 99999999
+ */
+#define PAC_193X_MIN_POWER_ACC -214748364899999999LL
+
+#define PAC1934_MAX_NUM_CHANNELS 4
+
+#define PAC1934_MEAS_REG_LEN 76
+#define PAC1934_CTRL_REG_LEN 12
+
+#define PAC1934_DEFAULT_CHIP_SAMP_SPEED_HZ 1024
+
+/* I2C address map */
+#define PAC1934_REFRESH_REG_ADDR 0x00
+#define PAC1934_CTRL_REG_ADDR 0x01
+#define PAC1934_ACC_COUNT_REG_ADDR 0x02
+#define PAC1934_VPOWER_ACC_1_ADDR 0x03
+#define PAC1934_VPOWER_ACC_2_ADDR 0x04
+#define PAC1934_VPOWER_ACC_3_ADDR 0x05
+#define PAC1934_VPOWER_ACC_4_ADDR 0x06
+#define PAC1934_VBUS_1_ADDR 0x07
+#define PAC1934_VBUS_2_ADDR 0x08
+#define PAC1934_VBUS_3_ADDR 0x09
+#define PAC1934_VBUS_4_ADDR 0x0A
+#define PAC1934_VSENSE_1_ADDR 0x0B
+#define PAC1934_VSENSE_2_ADDR 0x0C
+#define PAC1934_VSENSE_3_ADDR 0x0D
+#define PAC1934_VSENSE_4_ADDR 0x0E
+#define PAC1934_VBUS_AVG_1_ADDR 0x0F
+#define PAC1934_VBUS_AVG_2_ADDR 0x10
+#define PAC1934_VBUS_AVG_3_ADDR 0x11
+#define PAC1934_VBUS_AVG_4_ADDR 0x12
+#define PAC1934_VSENSE_AVG_1_ADDR 0x13
+#define PAC1934_VSENSE_AVG_2_ADDR 0x14
+#define PAC1934_VSENSE_AVG_3_ADDR 0x15
+#define PAC1934_VSENSE_AVG_4_ADDR 0x16
+#define PAC1934_VPOWER_1_ADDR 0x17
+#define PAC1934_VPOWER_2_ADDR 0x18
+#define PAC1934_VPOWER_3_ADDR 0x19
+#define PAC1934_VPOWER_4_ADDR 0x1A
+#define PAC1934_REFRESH_V_REG_ADDR 0x1F
+#define PAC1934_CTRL_STAT_REGS_ADDR 0x1C
+#define PAC1934_PID_REG_ADDR 0xFD
+#define PAC1934_MID_REG_ADDR 0xFE
+#define PAC1934_RID_REG_ADDR 0xFF
+
+/* PRODUCT ID REGISTER + MANUFACTURER ID REGISTER + REVISION ID REGISTER */
+#define PAC1934_ID_REG_LEN 3
+#define PAC1934_PID_IDX 0
+#define PAC1934_MID_IDX 1
+#define PAC1934_RID_IDX 2
+
+#define PAC1934_ACPI_GET_NAMES_AND_MOHMS_VALS 1
+#define PAC1934_ACPI_GET_UOHMS_VALS 2
+#define PAC1934_ACPI_GET_BIPOLAR_SETTINGS 4
+#define PAC1934_ACPI_GET_SAMP 5
+
+#define PAC1934_SAMPLE_RATE_SHIFT 6
+
+#define PAC1934_VBUS_SENSE_REG_LEN 2
+#define PAC1934_ACC_REG_LEN 3
+#define PAC1934_VPOWER_REG_LEN 4
+#define PAC1934_VPOWER_ACC_REG_LEN 6
+#define PAC1934_MAX_REGISTER_LENGTH 6
+
+#define PAC1934_CUSTOM_ATTR_FOR_CHANNEL 1
+
+/*
+ * relative offsets when using multi-byte reads/writes even though these
+ * bytes are read one after the other, they are not at adjacent memory
+ * locations within the I2C memory map. The chip can skip some addresses
+ */
+#define PAC1934_CHANNEL_DIS_REG_OFF 0
+#define PAC1934_NEG_PWR_REG_OFF 1
+
+/*
+ * when reading/writing multiple bytes from offset PAC1934_CHANNEL_DIS_REG_OFF,
+ * the chip jumps over the 0x1E (REFRESH_G) and 0x1F (REFRESH_V) offsets
+ */
+#define PAC1934_SLOW_REG_OFF 2
+#define PAC1934_CTRL_ACT_REG_OFF 3
+#define PAC1934_CHANNEL_DIS_ACT_REG_OFF 4
+#define PAC1934_NEG_PWR_ACT_REG_OFF 5
+#define PAC1934_CTRL_LAT_REG_OFF 6
+#define PAC1934_CHANNEL_DIS_LAT_REG_OFF 7
+#define PAC1934_NEG_PWR_LAT_REG_OFF 8
+#define PAC1934_PID_REG_OFF 9
+#define PAC1934_MID_REG_OFF 10
+#define PAC1934_REV_REG_OFF 11
+#define PAC1934_CTRL_STATUS_INFO_LEN 12
+
+#define PAC1934_MID 0x5D
+#define PAC1931_PID 0x58
+#define PAC1932_PID 0x59
+#define PAC1933_PID 0x5A
+#define PAC1934_PID 0x5B
+
+/* Scale constant = (10^3 * 3.2 * 10^9 / 2^28) for mili Watt-second */
+#define PAC1934_SCALE_CONSTANT 11921
+
+#define PAC1934_MAX_VPOWER_RSHIFTED_BY_28B 11921
+#define PAC1934_MAX_VSENSE_RSHIFTED_BY_16B 1525
+
+#define PAC1934_DEV_ATTR(name) (&iio_dev_attr_##name.dev_attr.attr)
+
+#define PAC1934_CRTL_SAMPLE_RATE_MASK GENMASK(7, 6)
+#define PAC1934_CHAN_SLEEP_MASK BIT(5)
+#define PAC1934_CHAN_SLEEP_SET BIT(5)
+#define PAC1934_CHAN_SINGLE_MASK BIT(4)
+#define PAC1934_CHAN_SINGLE_SHOT_SET BIT(4)
+#define PAC1934_CHAN_ALERT_MASK BIT(3)
+#define PAC1934_CHAN_ALERT_EN BIT(3)
+#define PAC1934_CHAN_ALERT_CC_MASK BIT(2)
+#define PAC1934_CHAN_ALERT_CC_EN BIT(2)
+#define PAC1934_CHAN_OVF_ALERT_MASK BIT(1)
+#define PAC1934_CHAN_OVF_ALERT_EN BIT(1)
+#define PAC1934_CHAN_OVF_MASK BIT(0)
+
+#define PAC1934_CHAN_DIS_CH1_OFF_MASK BIT(7)
+#define PAC1934_CHAN_DIS_CH2_OFF_MASK BIT(6)
+#define PAC1934_CHAN_DIS_CH3_OFF_MASK BIT(5)
+#define PAC1934_CHAN_DIS_CH4_OFF_MASK BIT(4)
+#define PAC1934_SMBUS_TIMEOUT_MASK BIT(3)
+#define PAC1934_SMBUS_BYTECOUNT_MASK BIT(2)
+#define PAC1934_SMBUS_NO_SKIP_MASK BIT(1)
+
+#define PAC1934_NEG_PWR_CH1_BIDI_MASK BIT(7)
+#define PAC1934_NEG_PWR_CH2_BIDI_MASK BIT(6)
+#define PAC1934_NEG_PWR_CH3_BIDI_MASK BIT(5)
+#define PAC1934_NEG_PWR_CH4_BIDI_MASK BIT(4)
+#define PAC1934_NEG_PWR_CH1_BIDV_MASK BIT(3)
+#define PAC1934_NEG_PWR_CH2_BIDV_MASK BIT(2)
+#define PAC1934_NEG_PWR_CH3_BIDV_MASK BIT(1)
+#define PAC1934_NEG_PWR_CH4_BIDV_MASK BIT(0)
+
+/*
+ * Universal Unique Identifier (UUID),
+ * 033771E0-1705-47B4-9535-D1BBE14D9A09,
+ * is reserved to Microchip for the PAC1934.
+ */
+#define PAC1934_DSM_UUID "033771E0-1705-47B4-9535-D1BBE14D9A09"
+
+enum pac1934_ids {
+ PAC1931,
+ PAC1932,
+ PAC1933,
+ PAC1934
+};
+
+enum pac1934_samps {
+ PAC1934_SAMP_1024SPS,
+ PAC1934_SAMP_256SPS,
+ PAC1934_SAMP_64SPS,
+ PAC1934_SAMP_8SPS
+};
+
+/*
+ * these indexes are exactly describing the element order within a single
+ * PAC1934 phys channel IIO channel descriptor; see the static const struct
+ * iio_chan_spec pac1934_single_channel[] declaration
+ */
+enum pac1934_ch_idx {
+ PAC1934_CH_ENERGY,
+ PAC1934_CH_POWER,
+ PAC1934_CH_VOLTAGE,
+ PAC1934_CH_CURRENT,
+ PAC1934_CH_VOLTAGE_AVERAGE,
+ PAC1934_CH_CURRENT_AVERAGE
+};
+
+/**
+ * struct pac1934_features - features of a pac1934 instance
+ * @phys_channels: number of physical channels supported by the chip
+ * @name: chip's name
+ */
+struct pac1934_features {
+ u8 phys_channels;
+ const char *name;
+};
+
+struct samp_rate_mapping {
+ u16 samp_rate;
+ u8 shift2value;
+};
+
+static const unsigned int samp_rate_map_tbl[] = {
+ [PAC1934_SAMP_1024SPS] = 1024,
+ [PAC1934_SAMP_256SPS] = 256,
+ [PAC1934_SAMP_64SPS] = 64,
+ [PAC1934_SAMP_8SPS] = 8,
+};
+
+static const struct pac1934_features pac1934_chip_config[] = {
+ [PAC1931] = {
+ .phys_channels = 1,
+ .name = "pac1931",
+ },
+ [PAC1932] = {
+ .phys_channels = 2,
+ .name = "pac1932",
+ },
+ [PAC1933] = {
+ .phys_channels = 3,
+ .name = "pac1933",
+ },
+ [PAC1934] = {
+ .phys_channels = 4,
+ .name = "pac1934",
+ },
+};
+
+/**
+ * struct reg_data - data from the registers
+ * @meas_regs: snapshot of raw measurements registers
+ * @ctrl_regs: snapshot of control registers
+ * @energy_sec_acc: snapshot of energy values
+ * @vpower_acc: accumulated vpower values
+ * @vpower: snapshot of vpower registers
+ * @vbus: snapshot of vbus registers
+ * @vbus_avg: averages of vbus registers
+ * @vsense: snapshot of vsense registers
+ * @vsense_avg: averages of vsense registers
+ * @num_enabled_channels: count of how many chip channels are currently enabled
+ */
+struct reg_data {
+ u8 meas_regs[PAC1934_MEAS_REG_LEN];
+ u8 ctrl_regs[PAC1934_CTRL_REG_LEN];
+ s64 energy_sec_acc[PAC1934_MAX_NUM_CHANNELS];
+ s64 vpower_acc[PAC1934_MAX_NUM_CHANNELS];
+ s32 vpower[PAC1934_MAX_NUM_CHANNELS];
+ s32 vbus[PAC1934_MAX_NUM_CHANNELS];
+ s32 vbus_avg[PAC1934_MAX_NUM_CHANNELS];
+ s32 vsense[PAC1934_MAX_NUM_CHANNELS];
+ s32 vsense_avg[PAC1934_MAX_NUM_CHANNELS];
+ u8 num_enabled_channels;
+};
+
+/**
+ * struct pac1934_chip_info - information about the chip
+ * @client: the i2c-client attached to the device
+ * @lock: synchronize access to driver's state members
+ * @work_chip_rfsh: work queue used for refresh commands
+ * @phys_channels: phys channels count
+ * @active_channels: array of values, true means that channel is active
+ * @enable_energy: array of values, true means that channel energy is measured
+ * @bi_dir: array of bools, true means that channel is bidirectional
+ * @chip_variant: chip variant
+ * @chip_revision: chip revision
+ * @shunts: shunts
+ * @chip_reg_data: chip reg data
+ * @sample_rate_value: sampling frequency
+ * @labels: table with channels labels
+ * @iio_info: iio_info
+ * @tstamp: chip's uptime
+ */
+struct pac1934_chip_info {
+ struct i2c_client *client;
+ struct mutex lock; /* synchronize access to driver's state members */
+ struct delayed_work work_chip_rfsh;
+ u8 phys_channels;
+ bool active_channels[PAC1934_MAX_NUM_CHANNELS];
+ bool enable_energy[PAC1934_MAX_NUM_CHANNELS];
+ bool bi_dir[PAC1934_MAX_NUM_CHANNELS];
+ u8 chip_variant;
+ u8 chip_revision;
+ u32 shunts[PAC1934_MAX_NUM_CHANNELS];
+ struct reg_data chip_reg_data;
+ s32 sample_rate_value;
+ char *labels[PAC1934_MAX_NUM_CHANNELS];
+ struct iio_info iio_info;
+ unsigned long tstamp;
+};
+
+#define TO_PAC1934_CHIP_INFO(d) container_of(d, struct pac1934_chip_info, work_chip_rfsh)
+
+#define PAC1934_VPOWER_ACC_CHANNEL(_index, _si, _address) { \
+ .type = IIO_ENERGY, \
+ .address = (_address), \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_ENABLE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 48, \
+ .storagebits = 64, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+#define PAC1934_VBUS_CHANNEL(_index, _si, _address) { \
+ .type = IIO_VOLTAGE, \
+ .address = (_address), \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+#define PAC1934_VBUS_AVG_CHANNEL(_index, _si, _address) { \
+ .type = IIO_VOLTAGE, \
+ .address = (_address), \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_AVERAGE_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+#define PAC1934_VSENSE_CHANNEL(_index, _si, _address) { \
+ .type = IIO_CURRENT, \
+ .address = (_address), \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+#define PAC1934_VSENSE_AVG_CHANNEL(_index, _si, _address) { \
+ .type = IIO_CURRENT, \
+ .address = (_address), \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_AVERAGE_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+#define PAC1934_VPOWER_CHANNEL(_index, _si, _address) { \
+ .type = IIO_POWER, \
+ .address = (_address), \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 28, \
+ .storagebits = 32, \
+ .shift = 4, \
+ .endianness = IIO_CPU, \
+ } \
+}
+
+static const struct iio_chan_spec pac1934_single_channel[] = {
+ PAC1934_VPOWER_ACC_CHANNEL(0, 0, PAC1934_VPOWER_ACC_1_ADDR),
+ PAC1934_VPOWER_CHANNEL(0, 0, PAC1934_VPOWER_1_ADDR),
+ PAC1934_VBUS_CHANNEL(0, 0, PAC1934_VBUS_1_ADDR),
+ PAC1934_VSENSE_CHANNEL(0, 0, PAC1934_VSENSE_1_ADDR),
+ PAC1934_VBUS_AVG_CHANNEL(0, 0, PAC1934_VBUS_AVG_1_ADDR),
+ PAC1934_VSENSE_AVG_CHANNEL(0, 0, PAC1934_VSENSE_AVG_1_ADDR),
+};
+
+/* Low-level I2c functions used to transfer up to 76 bytes at once */
+static int pac1934_i2c_read(struct i2c_client *client, u8 reg_addr,
+ void *databuf, u8 len)
+{
+ int ret;
+ struct i2c_msg msgs[2] = {
+ {
+ .addr = client->addr,
+ .len = 1,
+ .buf = (u8 *)&reg_addr,
+ },
+ {
+ .addr = client->addr,
+ .len = len,
+ .buf = databuf,
+ .flags = I2C_M_RD
+ }
+ };
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int pac1934_get_samp_rate_idx(struct pac1934_chip_info *info,
+ u32 new_samp_rate)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < ARRAY_SIZE(samp_rate_map_tbl); cnt++)
+ if (new_samp_rate == samp_rate_map_tbl[cnt])
+ return cnt;
+
+ /* not a valid sample rate value */
+ return -EINVAL;
+}
+
+static ssize_t pac1934_shunt_value_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct pac1934_chip_info *info = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+
+ return sysfs_emit(buf, "%u\n", info->shunts[this_attr->address]);
+}
+
+static ssize_t pac1934_shunt_value_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct pac1934_chip_info *info = iio_priv(indio_dev);
+ struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ int sh_val;
+
+ if (kstrtouint(buf, 10, &sh_val)) {
+ dev_err(dev, "Shunt value is not valid\n");
+ return -EINVAL;
+ }
+
+ scoped_guard(mutex, &info->lock)
+ info->shunts[this_attr->address] = sh_val;
+
+ return count;
+}
+
+static int pac1934_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel,
+ const int **vals, int *type, int *length, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *type = IIO_VAL_INT;
+ *vals = samp_rate_map_tbl;
+ *length = ARRAY_SIZE(samp_rate_map_tbl);
+ return IIO_AVAIL_LIST;
+ }
+
+ return -EINVAL;
+}
+
+static int pac1934_send_refresh(struct pac1934_chip_info *info,
+ u8 refresh_cmd, u32 wait_time)
+{
+ /* this function only sends REFRESH or REFRESH_V */
+ struct i2c_client *client = info->client;
+ int ret;
+ u8 bidir_reg;
+ bool revision_bug = false;
+
+ if (info->chip_revision == 2 || info->chip_revision == 3) {
+ /*
+ * chip rev 2 and 3 bug workaround
+ * see: PAC1934 Family Data Sheet Errata DS80000836A.pdf
+ */
+ revision_bug = true;
+
+ bidir_reg =
+ FIELD_PREP(PAC1934_NEG_PWR_CH1_BIDI_MASK, info->bi_dir[0]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH2_BIDI_MASK, info->bi_dir[1]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH3_BIDI_MASK, info->bi_dir[2]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH4_BIDI_MASK, info->bi_dir[3]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH1_BIDV_MASK, info->bi_dir[0]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH2_BIDV_MASK, info->bi_dir[1]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH3_BIDV_MASK, info->bi_dir[2]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH4_BIDV_MASK, info->bi_dir[3]);
+
+ ret = i2c_smbus_write_byte_data(client,
+ PAC1934_CTRL_STAT_REGS_ADDR +
+ PAC1934_NEG_PWR_REG_OFF,
+ bidir_reg);
+ if (ret)
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte(client, refresh_cmd);
+ if (ret) {
+ dev_err(&client->dev, "%s - cannot send 0x%02X\n",
+ __func__, refresh_cmd);
+ return ret;
+ }
+
+ if (revision_bug) {
+ /*
+ * chip rev 2 and 3 bug workaround - write again the same
+ * register write the updated registers back
+ */
+ ret = i2c_smbus_write_byte_data(client,
+ PAC1934_CTRL_STAT_REGS_ADDR +
+ PAC1934_NEG_PWR_REG_OFF, bidir_reg);
+ if (ret)
+ return ret;
+ }
+
+ /* register data retrieval timestamp */
+ info->tstamp = jiffies;
+
+ /* wait till the data is available */
+ usleep_range(wait_time, wait_time + 100);
+
+ return ret;
+}
+
+static int pac1934_reg_snapshot(struct pac1934_chip_info *info,
+ bool do_refresh, u8 refresh_cmd, u32 wait_time)
+{
+ int ret;
+ struct i2c_client *client = info->client;
+ u8 samp_shift, ctrl_regs_tmp;
+ u8 *offset_reg_data_p;
+ u16 tmp_value;
+ u32 samp_rate, cnt, tmp;
+ s64 curr_energy, inc;
+ u64 tmp_energy;
+ struct reg_data *reg_data;
+
+ guard(mutex)(&info->lock);
+
+ if (do_refresh) {
+ ret = pac1934_send_refresh(info, refresh_cmd, wait_time);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s - cannot send refresh\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ ret = i2c_smbus_read_i2c_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
+ PAC1934_CTRL_REG_LEN,
+ (u8 *)info->chip_reg_data.ctrl_regs);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s - cannot read ctrl/status registers\n",
+ __func__);
+ return ret;
+ }
+
+ reg_data = &info->chip_reg_data;
+
+ /* read the data registers */
+ ret = pac1934_i2c_read(client, PAC1934_ACC_COUNT_REG_ADDR,
+ (u8 *)reg_data->meas_regs, PAC1934_MEAS_REG_LEN);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s - cannot read ACC_COUNT register: %d:%d\n",
+ __func__, ret, PAC1934_MEAS_REG_LEN);
+ return ret;
+ }
+
+ /* see how much shift is required by the sample rate */
+ samp_rate = samp_rate_map_tbl[((reg_data->ctrl_regs[PAC1934_CTRL_LAT_REG_OFF]) >> 6)];
+ samp_shift = get_count_order(samp_rate);
+
+ ctrl_regs_tmp = reg_data->ctrl_regs[PAC1934_CHANNEL_DIS_LAT_REG_OFF];
+ offset_reg_data_p = &reg_data->meas_regs[PAC1934_ACC_REG_LEN];
+
+ /* start with VPOWER_ACC */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ /* check if the channel is active, skip all fields if disabled */
+ if ((ctrl_regs_tmp << cnt) & 0x80)
+ continue;
+
+ /* skip if the energy accumulation is disabled */
+ if (info->enable_energy[cnt]) {
+ curr_energy = info->chip_reg_data.energy_sec_acc[cnt];
+
+ tmp_energy = get_unaligned_be48(offset_reg_data_p);
+
+ if (info->bi_dir[cnt])
+ reg_data->vpower_acc[cnt] = sign_extend64(tmp_energy, 47);
+ else
+ reg_data->vpower_acc[cnt] = tmp_energy;
+
+ /*
+ * compute the scaled to 1 second accumulated energy value;
+ * energy accumulator scaled to 1sec = VPOWER_ACC/2^samp_shift
+ * the chip's sampling rate is 2^samp_shift samples/sec
+ */
+ inc = (reg_data->vpower_acc[cnt] >> samp_shift);
+
+ /* add the power_acc field */
+ curr_energy += inc;
+
+ clamp(curr_energy, PAC_193X_MIN_POWER_ACC, PAC_193X_MAX_POWER_ACC);
+
+ reg_data->energy_sec_acc[cnt] = curr_energy;
+ }
+
+ offset_reg_data_p += PAC1934_VPOWER_ACC_REG_LEN;
+ }
+
+ /* continue with VBUS */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if ((ctrl_regs_tmp << cnt) & 0x80)
+ continue;
+
+ tmp_value = get_unaligned_be16(offset_reg_data_p);
+
+ if (info->bi_dir[cnt])
+ reg_data->vbus[cnt] = sign_extend32((u32)(tmp_value), 15);
+ else
+ reg_data->vbus[cnt] = tmp_value;
+
+ offset_reg_data_p += PAC1934_VBUS_SENSE_REG_LEN;
+ }
+
+ /* VSENSE */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if ((ctrl_regs_tmp << cnt) & 0x80)
+ continue;
+
+ tmp_value = get_unaligned_be16(offset_reg_data_p);
+
+ if (info->bi_dir[cnt])
+ reg_data->vsense[cnt] = sign_extend32((u32)(tmp_value), 15);
+ else
+ reg_data->vsense[cnt] = tmp_value;
+
+ offset_reg_data_p += PAC1934_VBUS_SENSE_REG_LEN;
+ }
+
+ /* VBUS_AVG */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if ((ctrl_regs_tmp << cnt) & 0x80)
+ continue;
+
+ tmp_value = get_unaligned_be16(offset_reg_data_p);
+
+ if (info->bi_dir[cnt])
+ reg_data->vbus_avg[cnt] = sign_extend32((u32)(tmp_value), 15);
+ else
+ reg_data->vbus_avg[cnt] = tmp_value;
+
+ offset_reg_data_p += PAC1934_VBUS_SENSE_REG_LEN;
+ }
+
+ /* VSENSE_AVG */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if ((ctrl_regs_tmp << cnt) & 0x80)
+ continue;
+
+ tmp_value = get_unaligned_be16(offset_reg_data_p);
+
+ if (info->bi_dir[cnt])
+ reg_data->vsense_avg[cnt] = sign_extend32((u32)(tmp_value), 15);
+ else
+ reg_data->vsense_avg[cnt] = tmp_value;
+
+ offset_reg_data_p += PAC1934_VBUS_SENSE_REG_LEN;
+ }
+
+ /* VPOWER */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if ((ctrl_regs_tmp << cnt) & 0x80)
+ continue;
+
+ tmp = get_unaligned_be32(offset_reg_data_p) >> 4;
+
+ if (info->bi_dir[cnt])
+ reg_data->vpower[cnt] = sign_extend32(tmp, 27);
+ else
+ reg_data->vpower[cnt] = tmp;
+
+ offset_reg_data_p += PAC1934_VPOWER_REG_LEN;
+ }
+
+ return 0;
+}
+
+static int pac1934_retrieve_data(struct pac1934_chip_info *info,
+ u32 wait_time)
+{
+ int ret = 0;
+
+ /*
+ * check if the minimal elapsed time has passed and if so,
+ * re-read the chip, otherwise the cached info is just fine
+ */
+ if (time_after(jiffies, info->tstamp + msecs_to_jiffies(PAC1934_MIN_POLLING_TIME_MS))) {
+ ret = pac1934_reg_snapshot(info, true, PAC1934_REFRESH_REG_ADDR,
+ wait_time);
+
+ /*
+ * Re-schedule the work for the read registers on timeout
+ * (to prevent chip registers saturation)
+ */
+ mod_delayed_work(system_wq, &info->work_chip_rfsh,
+ msecs_to_jiffies(PAC1934_MAX_RFSH_LIMIT_MS));
+ }
+
+ return ret;
+}
+
+static int pac1934_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct pac1934_chip_info *info = iio_priv(indio_dev);
+ s64 curr_energy;
+ int ret, channel = chan->channel - 1;
+
+ ret = pac1934_retrieve_data(info, PAC1934_MIN_UPDATE_WAIT_TIME_US);
+ if (ret < 0)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = info->chip_reg_data.vbus[channel];
+ return IIO_VAL_INT;
+ case IIO_CURRENT:
+ *val = info->chip_reg_data.vsense[channel];
+ return IIO_VAL_INT;
+ case IIO_POWER:
+ *val = info->chip_reg_data.vpower[channel];
+ return IIO_VAL_INT;
+ case IIO_ENERGY:
+ curr_energy = info->chip_reg_data.energy_sec_acc[channel];
+ *val = (u32)curr_energy;
+ *val2 = (u32)(curr_energy >> 32);
+ return IIO_VAL_INT_64;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_AVERAGE_RAW:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = info->chip_reg_data.vbus_avg[channel];
+ return IIO_VAL_INT;
+ case IIO_CURRENT:
+ *val = info->chip_reg_data.vsense_avg[channel];
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->address) {
+ /* Voltages - scale for millivolts */
+ case PAC1934_VBUS_1_ADDR:
+ case PAC1934_VBUS_2_ADDR:
+ case PAC1934_VBUS_3_ADDR:
+ case PAC1934_VBUS_4_ADDR:
+ case PAC1934_VBUS_AVG_1_ADDR:
+ case PAC1934_VBUS_AVG_2_ADDR:
+ case PAC1934_VBUS_AVG_3_ADDR:
+ case PAC1934_VBUS_AVG_4_ADDR:
+ *val = PAC1934_VOLTAGE_MILLIVOLTS_MAX;
+ if (chan->scan_type.sign == 'u')
+ *val2 = PAC1934_VOLTAGE_U_RES;
+ else
+ *val2 = PAC1934_VOLTAGE_S_RES;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ /*
+ * Currents - scale for mA - depends on the
+ * channel's shunt value
+ * (100mV * 1000000) / (2^16 * shunt(uohm))
+ */
+ case PAC1934_VSENSE_1_ADDR:
+ case PAC1934_VSENSE_2_ADDR:
+ case PAC1934_VSENSE_3_ADDR:
+ case PAC1934_VSENSE_4_ADDR:
+ case PAC1934_VSENSE_AVG_1_ADDR:
+ case PAC1934_VSENSE_AVG_2_ADDR:
+ case PAC1934_VSENSE_AVG_3_ADDR:
+ case PAC1934_VSENSE_AVG_4_ADDR:
+ *val = PAC1934_MAX_VSENSE_RSHIFTED_BY_16B;
+ if (chan->scan_type.sign == 'u')
+ *val2 = info->shunts[channel];
+ else
+ *val2 = info->shunts[channel] >> 1;
+ return IIO_VAL_FRACTIONAL;
+ /*
+ * Power - uW - it will use the combined scale
+ * for current and voltage
+ * current(mA) * voltage(mV) = power (uW)
+ */
+ case PAC1934_VPOWER_1_ADDR:
+ case PAC1934_VPOWER_2_ADDR:
+ case PAC1934_VPOWER_3_ADDR:
+ case PAC1934_VPOWER_4_ADDR:
+ *val = PAC1934_MAX_VPOWER_RSHIFTED_BY_28B;
+ if (chan->scan_type.sign == 'u')
+ *val2 = info->shunts[channel];
+ else
+ *val2 = info->shunts[channel] >> 1;
+ return IIO_VAL_FRACTIONAL;
+ case PAC1934_VPOWER_ACC_1_ADDR:
+ case PAC1934_VPOWER_ACC_2_ADDR:
+ case PAC1934_VPOWER_ACC_3_ADDR:
+ case PAC1934_VPOWER_ACC_4_ADDR:
+ /*
+ * expresses the 32 bit scale value here compute
+ * the scale for energy (miliWatt-second or miliJoule)
+ */
+ *val = PAC1934_SCALE_CONSTANT;
+
+ if (chan->scan_type.sign == 'u')
+ *val2 = info->shunts[channel];
+ else
+ *val2 = info->shunts[channel] >> 1;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = info->sample_rate_value;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_ENABLE:
+ *val = info->enable_energy[channel];
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1934_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct pac1934_chip_info *info = iio_priv(indio_dev);
+ struct i2c_client *client = info->client;
+ int ret = -EINVAL;
+ s32 old_samp_rate;
+ u8 ctrl_reg;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = pac1934_get_samp_rate_idx(info, val);
+ if (ret < 0)
+ return ret;
+
+ /* write the new sampling value and trigger a snapshot(incl refresh) */
+ scoped_guard(mutex, &info->lock) {
+ ctrl_reg = FIELD_PREP(PAC1934_CRTL_SAMPLE_RATE_MASK, ret);
+ ret = i2c_smbus_write_byte_data(client, PAC1934_CTRL_REG_ADDR, ctrl_reg);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s - can't update sample rate\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ old_samp_rate = info->sample_rate_value;
+ info->sample_rate_value = val;
+
+ /*
+ * now, force a snapshot with refresh - call retrieve
+ * data in order to update the refresh timer
+ * alter the timestamp in order to force trigger a
+ * register snapshot and a timestamp update
+ */
+ info->tstamp -= msecs_to_jiffies(PAC1934_MIN_POLLING_TIME_MS);
+ ret = pac1934_retrieve_data(info, (1024 / old_samp_rate) * 1000);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "%s - cannot snapshot ctrl and measurement regs\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+ case IIO_CHAN_INFO_ENABLE:
+ scoped_guard(mutex, &info->lock) {
+ info->enable_energy[chan->channel - 1] = val ? true : false;
+ if (!val)
+ info->chip_reg_data.energy_sec_acc[chan->channel - 1] = 0;
+ }
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int pac1934_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, char *label)
+{
+ struct pac1934_chip_info *info = iio_priv(indio_dev);
+
+ switch (chan->address) {
+ case PAC1934_VBUS_1_ADDR:
+ case PAC1934_VBUS_2_ADDR:
+ case PAC1934_VBUS_3_ADDR:
+ case PAC1934_VBUS_4_ADDR:
+ return sysfs_emit(label, "%s_VBUS_%d\n",
+ info->labels[chan->scan_index],
+ chan->scan_index + 1);
+ case PAC1934_VBUS_AVG_1_ADDR:
+ case PAC1934_VBUS_AVG_2_ADDR:
+ case PAC1934_VBUS_AVG_3_ADDR:
+ case PAC1934_VBUS_AVG_4_ADDR:
+ return sysfs_emit(label, "%s_VBUS_AVG_%d\n",
+ info->labels[chan->scan_index],
+ chan->scan_index + 1);
+ case PAC1934_VSENSE_1_ADDR:
+ case PAC1934_VSENSE_2_ADDR:
+ case PAC1934_VSENSE_3_ADDR:
+ case PAC1934_VSENSE_4_ADDR:
+ return sysfs_emit(label, "%s_IBUS_%d\n",
+ info->labels[chan->scan_index],
+ chan->scan_index + 1);
+ case PAC1934_VSENSE_AVG_1_ADDR:
+ case PAC1934_VSENSE_AVG_2_ADDR:
+ case PAC1934_VSENSE_AVG_3_ADDR:
+ case PAC1934_VSENSE_AVG_4_ADDR:
+ return sysfs_emit(label, "%s_IBUS_AVG_%d\n",
+ info->labels[chan->scan_index],
+ chan->scan_index + 1);
+ case PAC1934_VPOWER_1_ADDR:
+ case PAC1934_VPOWER_2_ADDR:
+ case PAC1934_VPOWER_3_ADDR:
+ case PAC1934_VPOWER_4_ADDR:
+ return sysfs_emit(label, "%s_POWER_%d\n",
+ info->labels[chan->scan_index],
+ chan->scan_index + 1);
+ case PAC1934_VPOWER_ACC_1_ADDR:
+ case PAC1934_VPOWER_ACC_2_ADDR:
+ case PAC1934_VPOWER_ACC_3_ADDR:
+ case PAC1934_VPOWER_ACC_4_ADDR:
+ return sysfs_emit(label, "%s_ENERGY_%d\n",
+ info->labels[chan->scan_index],
+ chan->scan_index + 1);
+ }
+
+ return 0;
+}
+
+static void pac1934_work_periodic_rfsh(struct work_struct *work)
+{
+ struct pac1934_chip_info *info = TO_PAC1934_CHIP_INFO((struct delayed_work *)work);
+ struct device *dev = &info->client->dev;
+
+ dev_dbg(dev, "%s - Periodic refresh\n", __func__);
+
+ /* do a REFRESH, then read */
+ pac1934_reg_snapshot(info, true, PAC1934_REFRESH_REG_ADDR,
+ PAC1934_MIN_UPDATE_WAIT_TIME_US);
+
+ schedule_delayed_work(&info->work_chip_rfsh,
+ msecs_to_jiffies(PAC1934_MAX_RFSH_LIMIT_MS));
+}
+
+static int pac1934_read_revision(struct pac1934_chip_info *info, u8 *buf)
+{
+ int ret;
+ struct i2c_client *client = info->client;
+
+ ret = i2c_smbus_read_i2c_block_data(client, PAC1934_PID_REG_ADDR,
+ PAC1934_ID_REG_LEN,
+ buf);
+ if (ret < 0) {
+ dev_err(&client->dev, "cannot read revision\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pac1934_chip_identify(struct pac1934_chip_info *info)
+{
+ u8 rev_info[PAC1934_ID_REG_LEN];
+ struct device *dev = &info->client->dev;
+ int ret = 0;
+
+ ret = pac1934_read_revision(info, (u8 *)rev_info);
+ if (ret)
+ return ret;
+
+ info->chip_variant = rev_info[PAC1934_PID_IDX];
+ info->chip_revision = rev_info[PAC1934_RID_IDX];
+
+ dev_dbg(dev, "Chip variant: 0x%02X\n", info->chip_variant);
+ dev_dbg(dev, "Chip revision: 0x%02X\n", info->chip_revision);
+
+ switch (info->chip_variant) {
+ case PAC1934_PID:
+ return PAC1934;
+ case PAC1933_PID:
+ return PAC1933;
+ case PAC1932_PID:
+ return PAC1932;
+ case PAC1931_PID:
+ return PAC1931;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * documentation related to the ACPI device definition
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ApplicationNotes/ApplicationNotes/PAC1934-Integration-Notes-for-Microsoft-Windows-10-and-Windows-11-Driver-Support-DS00002534.pdf
+ */
+static bool pac1934_acpi_parse_channel_config(struct i2c_client *client,
+ struct pac1934_chip_info *info)
+{
+ acpi_handle handle;
+ union acpi_object *rez;
+ struct device *dev = &client->dev;
+ unsigned short bi_dir_mask;
+ int idx, i;
+ guid_t guid;
+
+ handle = ACPI_HANDLE(dev);
+
+ guid_parse(PAC1934_DSM_UUID, &guid);
+
+ rez = acpi_evaluate_dsm(handle, &guid, 0, PAC1934_ACPI_GET_NAMES_AND_MOHMS_VALS, NULL);
+ if (!rez)
+ return false;
+
+ for (i = 0; i < rez->package.count; i += 2) {
+ idx = i / 2;
+ info->labels[idx] =
+ devm_kmemdup(dev, rez->package.elements[i].string.pointer,
+ (size_t)rez->package.elements[i].string.length + 1,
+ GFP_KERNEL);
+ info->labels[idx][rez->package.elements[i].string.length] = '\0';
+ info->shunts[idx] = rez->package.elements[i + 1].integer.value * 1000;
+ info->active_channels[idx] = (info->shunts[idx] != 0);
+ }
+
+ ACPI_FREE(rez);
+
+ rez = acpi_evaluate_dsm(handle, &guid, 1, PAC1934_ACPI_GET_UOHMS_VALS, NULL);
+ if (!rez) {
+ /*
+ * initializing with default values
+ * we assume all channels are unidirectional(the mask is zero)
+ * and assign the default sampling rate
+ */
+ info->sample_rate_value = PAC1934_DEFAULT_CHIP_SAMP_SPEED_HZ;
+ return true;
+ }
+
+ for (i = 0; i < rez->package.count; i++) {
+ idx = i;
+ info->shunts[idx] = rez->package.elements[i].integer.value;
+ info->active_channels[idx] = (info->shunts[idx] != 0);
+ }
+
+ ACPI_FREE(rez);
+
+ rez = acpi_evaluate_dsm(handle, &guid, 1, PAC1934_ACPI_GET_BIPOLAR_SETTINGS, NULL);
+ if (!rez)
+ return false;
+
+ bi_dir_mask = rez->package.elements[0].integer.value;
+ info->bi_dir[0] = ((bi_dir_mask & (1 << 3)) | (bi_dir_mask & (1 << 7))) != 0;
+ info->bi_dir[1] = ((bi_dir_mask & (1 << 2)) | (bi_dir_mask & (1 << 6))) != 0;
+ info->bi_dir[2] = ((bi_dir_mask & (1 << 1)) | (bi_dir_mask & (1 << 5))) != 0;
+ info->bi_dir[3] = ((bi_dir_mask & (1 << 0)) | (bi_dir_mask & (1 << 4))) != 0;
+
+ ACPI_FREE(rez);
+
+ rez = acpi_evaluate_dsm(handle, &guid, 1, PAC1934_ACPI_GET_SAMP, NULL);
+ if (!rez)
+ return false;
+
+ info->sample_rate_value = rez->package.elements[0].integer.value;
+
+ ACPI_FREE(rez);
+
+ return true;
+}
+
+static bool pac1934_of_parse_channel_config(struct i2c_client *client,
+ struct pac1934_chip_info *info)
+{
+ struct fwnode_handle *node, *fwnode;
+ struct device *dev = &client->dev;
+ unsigned int current_channel;
+ int idx, ret;
+
+ info->sample_rate_value = 1024;
+ current_channel = 1;
+
+ fwnode = dev_fwnode(dev);
+ fwnode_for_each_available_child_node(fwnode, node) {
+ ret = fwnode_property_read_u32(node, "reg", &idx);
+ if (ret) {
+ dev_err_probe(dev, ret,
+ "reading invalid channel index\n");
+ goto err_fwnode;
+ }
+ /* adjust idx to match channel index (1 to 4) from the datasheet */
+ idx--;
+
+ if (current_channel >= (info->phys_channels + 1) ||
+ idx >= info->phys_channels || idx < 0) {
+ dev_err_probe(dev, -EINVAL,
+ "%s: invalid channel_index %d value\n",
+ fwnode_get_name(node), idx);
+ goto err_fwnode;
+ }
+
+ /* enable channel */
+ info->active_channels[idx] = true;
+
+ ret = fwnode_property_read_u32(node, "shunt-resistor-micro-ohms",
+ &info->shunts[idx]);
+ if (ret) {
+ dev_err_probe(dev, ret,
+ "%s: invalid shunt-resistor value: %d\n",
+ fwnode_get_name(node), info->shunts[idx]);
+ goto err_fwnode;
+ }
+
+ if (fwnode_property_present(node, "label")) {
+ ret = fwnode_property_read_string(node, "label",
+ (const char **)&info->labels[idx]);
+ if (ret) {
+ dev_err_probe(dev, ret,
+ "%s: invalid rail-name value\n",
+ fwnode_get_name(node));
+ goto err_fwnode;
+ }
+ }
+
+ info->bi_dir[idx] = fwnode_property_read_bool(node, "bipolar");
+
+ current_channel++;
+ }
+
+ return true;
+
+err_fwnode:
+ fwnode_handle_put(node);
+
+ return false;
+}
+
+static void pac1934_cancel_delayed_work(void *dwork)
+{
+ cancel_delayed_work_sync(dwork);
+}
+
+static int pac1934_chip_configure(struct pac1934_chip_info *info)
+{
+ int cnt, ret;
+ struct i2c_client *client = info->client;
+ u8 regs[PAC1934_CTRL_STATUS_INFO_LEN], idx, ctrl_reg;
+ u32 wait_time;
+
+ info->chip_reg_data.num_enabled_channels = 0;
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if (info->active_channels[cnt])
+ info->chip_reg_data.num_enabled_channels++;
+ }
+
+ /*
+ * read whatever information was gathered before the driver was loaded
+ * establish which channels are enabled/disabled and then establish the
+ * information retrieval mode (using SKIP or no).
+ * Read the chip ID values
+ */
+ ret = i2c_smbus_read_i2c_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
+ ARRAY_SIZE(regs),
+ (u8 *)regs);
+ if (ret < 0) {
+ dev_err_probe(&client->dev, ret,
+ "%s - cannot read regs from 0x%02X\n",
+ __func__, PAC1934_CTRL_STAT_REGS_ADDR);
+ return ret;
+ }
+
+ /* write the CHANNEL_DIS and the NEG_PWR registers */
+ regs[PAC1934_CHANNEL_DIS_REG_OFF] =
+ FIELD_PREP(PAC1934_CHAN_DIS_CH1_OFF_MASK, info->active_channels[0] ? 0 : 1) |
+ FIELD_PREP(PAC1934_CHAN_DIS_CH2_OFF_MASK, info->active_channels[1] ? 0 : 1) |
+ FIELD_PREP(PAC1934_CHAN_DIS_CH3_OFF_MASK, info->active_channels[2] ? 0 : 1) |
+ FIELD_PREP(PAC1934_CHAN_DIS_CH4_OFF_MASK, info->active_channels[3] ? 0 : 1) |
+ FIELD_PREP(PAC1934_SMBUS_TIMEOUT_MASK, 0) |
+ FIELD_PREP(PAC1934_SMBUS_BYTECOUNT_MASK, 0) |
+ FIELD_PREP(PAC1934_SMBUS_NO_SKIP_MASK, 0);
+
+ regs[PAC1934_NEG_PWR_REG_OFF] =
+ FIELD_PREP(PAC1934_NEG_PWR_CH1_BIDI_MASK, info->bi_dir[0]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH2_BIDI_MASK, info->bi_dir[1]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH3_BIDI_MASK, info->bi_dir[2]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH4_BIDI_MASK, info->bi_dir[3]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH1_BIDV_MASK, info->bi_dir[0]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH2_BIDV_MASK, info->bi_dir[1]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH3_BIDV_MASK, info->bi_dir[2]) |
+ FIELD_PREP(PAC1934_NEG_PWR_CH4_BIDV_MASK, info->bi_dir[3]);
+
+ /* no SLOW triggered REFRESH, clear POR */
+ regs[PAC1934_SLOW_REG_OFF] = 0;
+
+ ret = i2c_smbus_write_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
+ ARRAY_SIZE(regs), (u8 *)regs);
+ if (ret)
+ return ret;
+
+ /* Default sampling rate */
+ ctrl_reg = FIELD_PREP(PAC1934_CRTL_SAMPLE_RATE_MASK, PAC1934_SAMP_1024SPS);
+
+ ret = i2c_smbus_write_byte_data(client, PAC1934_CTRL_REG_ADDR, ctrl_reg);
+ if (ret)
+ return ret;
+
+ /*
+ * send a REFRESH to the chip, so the new settings take place
+ * as well as resetting the accumulators
+ */
+ ret = i2c_smbus_write_byte(client, PAC1934_REFRESH_REG_ADDR);
+ if (ret) {
+ dev_err(&client->dev,
+ "%s - cannot send 0x%02X\n",
+ __func__, PAC1934_REFRESH_REG_ADDR);
+ return ret;
+ }
+
+ /*
+ * get the current(in the chip) sampling speed and compute the
+ * required timeout based on its value
+ * the timeout is 1/sampling_speed
+ */
+ idx = regs[PAC1934_CTRL_ACT_REG_OFF] >> PAC1934_SAMPLE_RATE_SHIFT;
+ wait_time = (1024 / samp_rate_map_tbl[idx]) * 1000;
+
+ /*
+ * wait the maximum amount of time to be on the safe side
+ * the maximum wait time is for 8sps
+ */
+ usleep_range(wait_time, wait_time + 100);
+
+ INIT_DELAYED_WORK(&info->work_chip_rfsh, pac1934_work_periodic_rfsh);
+ /* Setup the latest moment for reading the regs before saturation */
+ schedule_delayed_work(&info->work_chip_rfsh,
+ msecs_to_jiffies(PAC1934_MAX_RFSH_LIMIT_MS));
+
+ return devm_add_action_or_reset(&client->dev, pac1934_cancel_delayed_work,
+ &info->work_chip_rfsh);
+}
+
+static int pac1934_prep_iio_channels(struct pac1934_chip_info *info, struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ch_sp;
+ int channel_size, attribute_count, cnt;
+ void *dyn_ch_struct, *tmp_data;
+ struct device *dev = &info->client->dev;
+
+ /* find out dynamically how many IIO channels we need */
+ attribute_count = 0;
+ channel_size = 0;
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if (!info->active_channels[cnt])
+ continue;
+
+ /* add the size of the properties of one chip physical channel */
+ channel_size += sizeof(pac1934_single_channel);
+ /* count how many enabled channels we have */
+ attribute_count += ARRAY_SIZE(pac1934_single_channel);
+ dev_dbg(dev, ":%s: Channel %d active\n", __func__, cnt + 1);
+ }
+
+ dyn_ch_struct = devm_kzalloc(dev, channel_size, GFP_KERNEL);
+ if (!dyn_ch_struct)
+ return -EINVAL;
+
+ tmp_data = dyn_ch_struct;
+
+ /* populate the dynamic channels and make all the adjustments */
+ for (cnt = 0; cnt < info->phys_channels; cnt++) {
+ if (!info->active_channels[cnt])
+ continue;
+
+ memcpy(tmp_data, pac1934_single_channel, sizeof(pac1934_single_channel));
+ ch_sp = (struct iio_chan_spec *)tmp_data;
+ ch_sp[PAC1934_CH_ENERGY].channel = cnt + 1;
+ ch_sp[PAC1934_CH_ENERGY].scan_index = cnt;
+ ch_sp[PAC1934_CH_ENERGY].address = cnt + PAC1934_VPOWER_ACC_1_ADDR;
+ ch_sp[PAC1934_CH_POWER].channel = cnt + 1;
+ ch_sp[PAC1934_CH_POWER].scan_index = cnt;
+ ch_sp[PAC1934_CH_POWER].address = cnt + PAC1934_VPOWER_1_ADDR;
+ ch_sp[PAC1934_CH_VOLTAGE].channel = cnt + 1;
+ ch_sp[PAC1934_CH_VOLTAGE].scan_index = cnt;
+ ch_sp[PAC1934_CH_VOLTAGE].address = cnt + PAC1934_VBUS_1_ADDR;
+ ch_sp[PAC1934_CH_CURRENT].channel = cnt + 1;
+ ch_sp[PAC1934_CH_CURRENT].scan_index = cnt;
+ ch_sp[PAC1934_CH_CURRENT].address = cnt + PAC1934_VSENSE_1_ADDR;
+
+ /*
+ * In order to be able to use labels for PAC1934_CH_VOLTAGE, and
+ * PAC1934_CH_VOLTAGE_AVERAGE,respectively PAC1934_CH_CURRENT
+ * and PAC1934_CH_CURRENT_AVERAGE we need to use different
+ * channel numbers. We will add +5 (+1 to maximum PAC channels).
+ */
+ ch_sp[PAC1934_CH_VOLTAGE_AVERAGE].channel = cnt + 5;
+ ch_sp[PAC1934_CH_VOLTAGE_AVERAGE].scan_index = cnt;
+ ch_sp[PAC1934_CH_VOLTAGE_AVERAGE].address = cnt + PAC1934_VBUS_AVG_1_ADDR;
+ ch_sp[PAC1934_CH_CURRENT_AVERAGE].channel = cnt + 5;
+ ch_sp[PAC1934_CH_CURRENT_AVERAGE].scan_index = cnt;
+ ch_sp[PAC1934_CH_CURRENT_AVERAGE].address = cnt + PAC1934_VSENSE_AVG_1_ADDR;
+
+ /*
+ * now modify the parameters in all channels if the
+ * whole chip rail(channel) is bi-directional
+ */
+ if (info->bi_dir[cnt]) {
+ ch_sp[PAC1934_CH_ENERGY].scan_type.sign = 's';
+ ch_sp[PAC1934_CH_ENERGY].scan_type.realbits = 47;
+ ch_sp[PAC1934_CH_POWER].scan_type.sign = 's';
+ ch_sp[PAC1934_CH_POWER].scan_type.realbits = 27;
+ ch_sp[PAC1934_CH_VOLTAGE].scan_type.sign = 's';
+ ch_sp[PAC1934_CH_VOLTAGE].scan_type.realbits = 15;
+ ch_sp[PAC1934_CH_CURRENT].scan_type.sign = 's';
+ ch_sp[PAC1934_CH_CURRENT].scan_type.realbits = 15;
+ ch_sp[PAC1934_CH_VOLTAGE_AVERAGE].scan_type.sign = 's';
+ ch_sp[PAC1934_CH_VOLTAGE_AVERAGE].scan_type.realbits = 15;
+ ch_sp[PAC1934_CH_CURRENT_AVERAGE].scan_type.sign = 's';
+ ch_sp[PAC1934_CH_CURRENT_AVERAGE].scan_type.realbits = 15;
+ }
+ tmp_data += sizeof(pac1934_single_channel);
+ }
+
+ /*
+ * send the updated dynamic channel structure information towards IIO
+ * prepare the required field for IIO class registration
+ */
+ indio_dev->num_channels = attribute_count;
+ indio_dev->channels = (const struct iio_chan_spec *)dyn_ch_struct;
+
+ return 0;
+}
+
+static IIO_DEVICE_ATTR(in_shunt_resistor1, 0644,
+ pac1934_shunt_value_show, pac1934_shunt_value_store, 0);
+static IIO_DEVICE_ATTR(in_shunt_resistor2, 0644,
+ pac1934_shunt_value_show, pac1934_shunt_value_store, 1);
+static IIO_DEVICE_ATTR(in_shunt_resistor3, 0644,
+ pac1934_shunt_value_show, pac1934_shunt_value_store, 2);
+static IIO_DEVICE_ATTR(in_shunt_resistor4, 0644,
+ pac1934_shunt_value_show, pac1934_shunt_value_store, 3);
+
+static int pac1934_prep_custom_attributes(struct pac1934_chip_info *info,
+ struct iio_dev *indio_dev)
+{
+ int i, active_channels_count = 0;
+ struct attribute **pac1934_custom_attr;
+ struct attribute_group *pac1934_group;
+ struct device *dev = &info->client->dev;
+
+ for (i = 0 ; i < info->phys_channels; i++)
+ if (info->active_channels[i])
+ active_channels_count++;
+
+ pac1934_group = devm_kzalloc(dev, sizeof(*pac1934_group), GFP_KERNEL);
+ if (!pac1934_group)
+ return -ENOMEM;
+
+ pac1934_custom_attr = devm_kzalloc(dev,
+ (PAC1934_CUSTOM_ATTR_FOR_CHANNEL *
+ active_channels_count)
+ * sizeof(*pac1934_group) + 1,
+ GFP_KERNEL);
+ if (!pac1934_custom_attr)
+ return -ENOMEM;
+
+ i = 0;
+ if (info->active_channels[0])
+ pac1934_custom_attr[i++] = PAC1934_DEV_ATTR(in_shunt_resistor1);
+
+ if (info->active_channels[1])
+ pac1934_custom_attr[i++] = PAC1934_DEV_ATTR(in_shunt_resistor2);
+
+ if (info->active_channels[2])
+ pac1934_custom_attr[i++] = PAC1934_DEV_ATTR(in_shunt_resistor3);
+
+ if (info->active_channels[3])
+ pac1934_custom_attr[i] = PAC1934_DEV_ATTR(in_shunt_resistor4);
+
+ pac1934_group->attrs = pac1934_custom_attr;
+ info->iio_info.attrs = pac1934_group;
+
+ return 0;
+}
+
+static void pac1934_mutex_destroy(void *data)
+{
+ struct mutex *lock = data;
+
+ mutex_destroy(lock);
+}
+
+static const struct iio_info pac1934_info = {
+ .read_raw = pac1934_read_raw,
+ .write_raw = pac1934_write_raw,
+ .read_avail = pac1934_read_avail,
+ .read_label = pac1934_read_label,
+};
+
+static int pac1934_probe(struct i2c_client *client)
+{
+ struct pac1934_chip_info *info;
+ const struct pac1934_features *chip;
+ struct iio_dev *indio_dev;
+ int cnt, ret;
+ bool match = false;
+ struct device *dev = &client->dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*info));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ info = iio_priv(indio_dev);
+
+ info->client = client;
+
+ /* always start with energy accumulation enabled */
+ for (cnt = 0; cnt < PAC1934_MAX_NUM_CHANNELS; cnt++)
+ info->enable_energy[cnt] = true;
+
+ ret = pac1934_chip_identify(info);
+ if (ret < 0) {
+ /*
+ * If failed to identify the hardware based on internal
+ * registers, try using fallback compatible in device tree
+ * to deal with some newer part number.
+ */
+ chip = i2c_get_match_data(client);
+ if (!chip)
+ return -EINVAL;
+
+ info->phys_channels = chip->phys_channels;
+ indio_dev->name = chip->name;
+ } else {
+ info->phys_channels = pac1934_chip_config[ret].phys_channels;
+ indio_dev->name = pac1934_chip_config[ret].name;
+ }
+
+ if (acpi_match_device(dev->driver->acpi_match_table, dev))
+ match = pac1934_acpi_parse_channel_config(client, info);
+ else
+ /*
+ * This makes it possible to use also ACPI PRP0001 for
+ * registering the device using device tree properties.
+ */
+ match = pac1934_of_parse_channel_config(client, info);
+
+ if (!match)
+ return dev_err_probe(dev, -EINVAL,
+ "parameter parsing returned an error\n");
+
+ mutex_init(&info->lock);
+ ret = devm_add_action_or_reset(dev, pac1934_mutex_destroy,
+ &info->lock);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * do now any chip specific initialization (e.g. read/write
+ * some registers), enable/disable certain channels, change the sampling
+ * rate to the requested value
+ */
+ ret = pac1934_chip_configure(info);
+ if (ret < 0)
+ return ret;
+
+ /* prepare the channel information */
+ ret = pac1934_prep_iio_channels(info, indio_dev);
+ if (ret < 0)
+ return ret;
+
+ info->iio_info = pac1934_info;
+ indio_dev->info = &info->iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = pac1934_prep_custom_attributes(info, indio_dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Can't configure custom attributes for PAC1934 device\n");
+
+ /*
+ * read whatever has been accumulated in the chip so far
+ * and reset the accumulators
+ */
+ ret = pac1934_reg_snapshot(info, true, PAC1934_REFRESH_REG_ADDR,
+ PAC1934_MIN_UPDATE_WAIT_TIME_US);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret,
+ "Can't register IIO device\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id pac1934_id[] = {
+ { .name = "pac1931", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1931] },
+ { .name = "pac1932", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1932] },
+ { .name = "pac1933", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1933] },
+ { .name = "pac1934", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1934] },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pac1934_id);
+
+static const struct of_device_id pac1934_of_match[] = {
+ {
+ .compatible = "microchip,pac1931",
+ .data = &pac1934_chip_config[PAC1931]
+ },
+ {
+ .compatible = "microchip,pac1932",
+ .data = &pac1934_chip_config[PAC1932]
+ },
+ {
+ .compatible = "microchip,pac1933",
+ .data = &pac1934_chip_config[PAC1933]
+ },
+ {
+ .compatible = "microchip,pac1934",
+ .data = &pac1934_chip_config[PAC1934]
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, pac1934_of_match);
+
+/*
+ * using MCHP1930 to be compatible with BIOS ACPI. See example:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/OTH/ApplicationNotes/ApplicationNotes/PAC1934-Integration-Notes-for-Microsoft-Windows-10-and-Windows-11-Driver-Support-DS00002534.pdf
+ */
+static const struct acpi_device_id pac1934_acpi_match[] = {
+ { "MCHP1930", .driver_data = (kernel_ulong_t)&pac1934_chip_config[PAC1934] },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, pac1934_acpi_match);
+
+static struct i2c_driver pac1934_driver = {
+ .driver = {
+ .name = "pac1934",
+ .of_match_table = pac1934_of_match,
+ .acpi_match_table = pac1934_acpi_match
+ },
+ .probe = pac1934_probe,
+ .id_table = pac1934_id,
+};
+
+module_i2c_driver(pac1934_driver);
+
+MODULE_AUTHOR("Bogdan Bolocan <bogdan.bolocan@microchip.com>");
+MODULE_AUTHOR("Victor Tudose");
+MODULE_AUTHOR("Marius Cristea <marius.cristea@microchip.com>");
+MODULE_DESCRIPTION("IIO driver for PAC1934 Multi-Channel DC Power/Energy Monitor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
index 01c5586df56d..c9d2c66434e4 100644
--- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
+++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
@@ -372,7 +372,6 @@ static const struct xoadc_channel pm8921_xoadc_channels[] = {
* @name: name of this channel
* @hwchan: pointer to hardware channel information (muxing & scaling settings)
* @calibration: whether to use absolute or ratiometric calibration
- * @scale_fn_type: scaling function type
* @decimation: 0,1,2,3
* @amux_ip_rsv: ratiometric scale value if using ratiometric
* calibration: 0, 1, 2, 4, 5.
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index dd94667a623b..bbe954a738c7 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -52,7 +52,7 @@
#define SARADC2_START BIT(4)
#define SARADC2_SINGLE_MODE BIT(5)
-#define SARADC2_CONV_CHANNELS GENMASK(15, 0)
+#define SARADC2_CONV_CHANNELS GENMASK(3, 0)
struct rockchip_saradc;
@@ -102,12 +102,12 @@ static void rockchip_saradc_start_v2(struct rockchip_saradc *info, int chn)
writel_relaxed(0xc, info->regs + SARADC_T_DAS_SOC);
writel_relaxed(0x20, info->regs + SARADC_T_PD_SOC);
val = FIELD_PREP(SARADC2_EN_END_INT, 1);
- val |= val << 16;
+ val |= SARADC2_EN_END_INT << 16;
writel_relaxed(val, info->regs + SARADC2_END_INT_EN);
val = FIELD_PREP(SARADC2_START, 1) |
FIELD_PREP(SARADC2_SINGLE_MODE, 1) |
FIELD_PREP(SARADC2_CONV_CHANNELS, chn);
- val |= val << 16;
+ val |= (SARADC2_START | SARADC2_SINGLE_MODE | SARADC2_CONV_CHANNELS) << 16;
writel(val, info->regs + SARADC2_CONV_CON);
}
@@ -450,16 +450,11 @@ static int rockchip_saradc_probe(struct platform_device *pdev)
* The reset should be an optional property, as it should work
* with old devicetrees as well
*/
- info->reset = devm_reset_control_get_exclusive(&pdev->dev,
- "saradc-apb");
+ info->reset = devm_reset_control_get_optional_exclusive(&pdev->dev,
+ "saradc-apb");
if (IS_ERR(info->reset)) {
ret = PTR_ERR(info->reset);
- if (ret != -ENOENT)
- return dev_err_probe(&pdev->dev, ret,
- "failed to get saradc-apb\n");
-
- dev_dbg(&pdev->dev, "no reset control found\n");
- info->reset = NULL;
+ return dev_err_probe(&pdev->dev, ret, "failed to get saradc-apb\n");
}
init_completion(&info->completion);
diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c
index ad4cea6839b2..a5464737e527 100644
--- a/drivers/iio/adc/rtq6056.c
+++ b/drivers/iio/adc/rtq6056.c
@@ -39,6 +39,10 @@
#define RTQ6056_DEFAULT_CONFIG 0x4127
#define RTQ6056_CONT_ALLON 7
+#define RTQ6059_DEFAULT_CONFIG 0x3C47
+#define RTQ6059_VBUS_LSB_OFFSET 3
+#define RTQ6059_AVG_BASE 8
+
enum {
RTQ6056_CH_VSHUNT = 0,
RTQ6056_CH_VBUS,
@@ -47,19 +51,46 @@ enum {
RTQ6056_MAX_CHANNEL
};
+/*
+ * The enum is to present the 0x00 CONFIG RG bitfield for the 16bit RG value
+ * field value order from LSB to MSB
+ * RTQ6053/6 is OPMODE->VSHUNTCT->VBUSCT->AVG->RESET
+ * RTQ6059 is OPMODE->SADC->BADC->PGA->RESET
+ */
enum {
F_OPMODE = 0,
F_VSHUNTCT,
+ F_RTQ6059_SADC = F_VSHUNTCT,
F_VBUSCT,
+ F_RTQ6059_BADC = F_VBUSCT,
F_AVG,
+ F_RTQ6059_PGA = F_AVG,
F_RESET,
F_MAX_FIELDS
};
+struct rtq6056_priv;
+
+struct richtek_dev_data {
+ bool fixed_samp_freq;
+ u8 vbus_offset;
+ int default_conv_time_us;
+ unsigned int default_config;
+ unsigned int calib_coefficient;
+ const int *avg_sample_list;
+ int avg_sample_list_length;
+ const struct reg_field *reg_fields;
+ const struct iio_chan_spec *channels;
+ int num_channels;
+ int (*read_scale)(struct iio_chan_spec const *ch, int *val, int *val2);
+ int (*set_average)(struct rtq6056_priv *priv, int val);
+};
+
struct rtq6056_priv {
struct device *dev;
struct regmap *regmap;
struct regmap_field *rm_fields[F_MAX_FIELDS];
+ const struct richtek_dev_data *devdata;
u32 shunt_resistor_uohm;
int vshuntct_us;
int vbusct_us;
@@ -74,6 +105,14 @@ static const struct reg_field rtq6056_reg_fields[F_MAX_FIELDS] = {
[F_RESET] = REG_FIELD(RTQ6056_REG_CONFIG, 15, 15),
};
+static const struct reg_field rtq6059_reg_fields[F_MAX_FIELDS] = {
+ [F_OPMODE] = REG_FIELD(RTQ6056_REG_CONFIG, 0, 2),
+ [F_RTQ6059_SADC] = REG_FIELD(RTQ6056_REG_CONFIG, 3, 6),
+ [F_RTQ6059_BADC] = REG_FIELD(RTQ6056_REG_CONFIG, 7, 10),
+ [F_RTQ6059_PGA] = REG_FIELD(RTQ6056_REG_CONFIG, 11, 12),
+ [F_RESET] = REG_FIELD(RTQ6056_REG_CONFIG, 15, 15),
+};
+
static const struct iio_chan_spec rtq6056_channels[RTQ6056_MAX_CHANNEL + 1] = {
{
.type = IIO_VOLTAGE,
@@ -151,10 +190,93 @@ static const struct iio_chan_spec rtq6056_channels[RTQ6056_MAX_CHANNEL + 1] = {
IIO_CHAN_SOFT_TIMESTAMP(RTQ6056_MAX_CHANNEL),
};
+/*
+ * Difference between RTQ6056 and RTQ6059
+ * - Fixed sampling conversion time
+ * - Average sample numbers
+ * - Channel scale
+ * - calibration coefficient
+ */
+static const struct iio_chan_spec rtq6059_channels[RTQ6056_MAX_CHANNEL + 1] = {
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 0,
+ .address = RTQ6056_REG_SHUNTVOLT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .channel = 1,
+ .address = RTQ6056_REG_BUSVOLT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_POWER,
+ .indexed = 1,
+ .channel = 2,
+ .address = RTQ6056_REG_POWER,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 2,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .channel = 3,
+ .address = RTQ6056_REG_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ .scan_index = 3,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(RTQ6056_MAX_CHANNEL),
+};
+
static int rtq6056_adc_read_channel(struct rtq6056_priv *priv,
struct iio_chan_spec const *ch,
int *val)
{
+ const struct richtek_dev_data *devdata = priv->devdata;
struct device *dev = priv->dev;
unsigned int addr = ch->address;
unsigned int regval;
@@ -168,12 +290,21 @@ static int rtq6056_adc_read_channel(struct rtq6056_priv *priv,
return ret;
/* Power and VBUS is unsigned 16-bit, others are signed 16-bit */
- if (addr == RTQ6056_REG_BUSVOLT || addr == RTQ6056_REG_POWER)
+ switch (addr) {
+ case RTQ6056_REG_BUSVOLT:
+ regval >>= devdata->vbus_offset;
*val = regval;
- else
+ return IIO_VAL_INT;
+ case RTQ6056_REG_POWER:
+ *val = regval;
+ return IIO_VAL_INT;
+ case RTQ6056_REG_SHUNTVOLT:
+ case RTQ6056_REG_CURRENT:
*val = sign_extend32(regval, 16);
-
- return IIO_VAL_INT;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
}
static int rtq6056_adc_read_scale(struct iio_chan_spec const *ch, int *val,
@@ -199,6 +330,28 @@ static int rtq6056_adc_read_scale(struct iio_chan_spec const *ch, int *val,
}
}
+static int rtq6059_adc_read_scale(struct iio_chan_spec const *ch, int *val,
+ int *val2)
+{
+ switch (ch->address) {
+ case RTQ6056_REG_SHUNTVOLT:
+ /* VSHUNT lsb 10uV */
+ *val = 10000;
+ *val2 = 1000000;
+ return IIO_VAL_FRACTIONAL;
+ case RTQ6056_REG_BUSVOLT:
+ /* VBUS lsb 4mV */
+ *val = 4;
+ return IIO_VAL_INT;
+ case RTQ6056_REG_POWER:
+ /* Power lsb 20mW */
+ *val = 20;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
/*
* Sample frequency for channel VSHUNT and VBUS. The indices correspond
* with the bit value expected by the chip. And it can be found at
@@ -248,6 +401,10 @@ static const int rtq6056_avg_sample_list[] = {
1, 4, 16, 64, 128, 256, 512, 1024,
};
+static const int rtq6059_avg_sample_list[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128,
+};
+
static int rtq6056_adc_set_average(struct rtq6056_priv *priv, int val)
{
unsigned int selector;
@@ -268,6 +425,30 @@ static int rtq6056_adc_set_average(struct rtq6056_priv *priv, int val)
return 0;
}
+static int rtq6059_adc_set_average(struct rtq6056_priv *priv, int val)
+{
+ unsigned int selector;
+ int ret;
+
+ if (val > 128 || val < 1)
+ return -EINVAL;
+
+ /* The supported average sample is 2^x (x from 0 to 7) */
+ selector = fls(val) - 1;
+
+ ret = regmap_field_write(priv->rm_fields[F_RTQ6059_BADC],
+ RTQ6059_AVG_BASE + selector);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(priv->rm_fields[F_RTQ6059_SADC],
+ RTQ6059_AVG_BASE + selector);
+
+ priv->avg_sample = BIT(selector);
+
+ return 0;
+}
+
static int rtq6056_adc_get_sample_freq(struct rtq6056_priv *priv,
struct iio_chan_spec const *ch, int *val)
{
@@ -292,12 +473,13 @@ static int rtq6056_adc_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct rtq6056_priv *priv = iio_priv(indio_dev);
+ const struct richtek_dev_data *devdata = priv->devdata;
switch (mask) {
case IIO_CHAN_INFO_RAW:
return rtq6056_adc_read_channel(priv, chan, val);
case IIO_CHAN_INFO_SCALE:
- return rtq6056_adc_read_scale(chan, val, val2);
+ return devdata->read_scale(chan, val, val2);
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
*val = priv->avg_sample;
return IIO_VAL_INT;
@@ -313,6 +495,9 @@ static int rtq6056_adc_read_avail(struct iio_dev *indio_dev,
const int **vals, int *type, int *length,
long mask)
{
+ struct rtq6056_priv *priv = iio_priv(indio_dev);
+ const struct richtek_dev_data *devdata = priv->devdata;
+
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
*vals = rtq6056_samp_freq_list;
@@ -320,9 +505,9 @@ static int rtq6056_adc_read_avail(struct iio_dev *indio_dev,
*length = ARRAY_SIZE(rtq6056_samp_freq_list);
return IIO_AVAIL_LIST;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- *vals = rtq6056_avg_sample_list;
+ *vals = devdata->avg_sample_list;
+ *length = devdata->avg_sample_list_length;
*type = IIO_VAL_INT;
- *length = ARRAY_SIZE(rtq6056_avg_sample_list);
return IIO_AVAIL_LIST;
default:
return -EINVAL;
@@ -334,6 +519,7 @@ static int rtq6056_adc_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct rtq6056_priv *priv = iio_priv(indio_dev);
+ const struct richtek_dev_data *devdata = priv->devdata;
int ret;
ret = iio_device_claim_direct_mode(indio_dev);
@@ -342,10 +528,15 @@ static int rtq6056_adc_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
+ if (devdata->fixed_samp_freq) {
+ ret = -EINVAL;
+ break;
+ }
+
ret = rtq6056_adc_set_samp_freq(priv, chan, val);
break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- ret = rtq6056_adc_set_average(priv, val);
+ ret = devdata->set_average(priv, val);
break;
default:
ret = -EINVAL;
@@ -374,6 +565,7 @@ static int rtq6056_adc_read_label(struct iio_dev *indio_dev,
static int rtq6056_set_shunt_resistor(struct rtq6056_priv *priv,
int resistor_uohm)
{
+ const struct richtek_dev_data *devdata = priv->devdata;
unsigned int calib_val;
int ret;
@@ -382,8 +574,8 @@ static int rtq6056_set_shunt_resistor(struct rtq6056_priv *priv,
return -EINVAL;
}
- /* calibration = 5120000 / (Rshunt (uOhm) * current lsb (1mA)) */
- calib_val = 5120000 / resistor_uohm;
+ /* calibration = coefficient / (Rshunt (uOhm) * current lsb (1mA)) */
+ calib_val = devdata->calib_coefficient / resistor_uohm;
ret = regmap_write(priv->regmap, RTQ6056_REG_CALIBRATION, calib_val);
if (ret)
return ret;
@@ -450,6 +642,7 @@ static irqreturn_t rtq6056_buffer_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct rtq6056_priv *priv = iio_priv(indio_dev);
+ const struct richtek_dev_data *devdata = priv->devdata;
struct device *dev = priv->dev;
struct {
u16 vals[RTQ6056_MAX_CHANNEL];
@@ -469,6 +662,9 @@ static irqreturn_t rtq6056_buffer_trigger_handler(int irq, void *p)
if (ret)
goto out;
+ if (addr == RTQ6056_REG_BUSVOLT)
+ raw >>= devdata->vbus_offset;
+
data.vals[i++] = raw;
}
@@ -528,20 +724,26 @@ static int rtq6056_probe(struct i2c_client *i2c)
struct rtq6056_priv *priv;
struct device *dev = &i2c->dev;
struct regmap *regmap;
+ const struct richtek_dev_data *devdata;
unsigned int vendor_id, shunt_resistor_uohm;
int ret;
if (!i2c_check_functionality(i2c->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EOPNOTSUPP;
+ devdata = device_get_match_data(dev);
+ if (!devdata)
+ return dev_err_probe(dev, -EINVAL, "Invalid dev data\n");
+
indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
if (!indio_dev)
return -ENOMEM;
priv = iio_priv(indio_dev);
priv->dev = dev;
- priv->vshuntct_us = priv->vbusct_us = 1037;
+ priv->vshuntct_us = priv->vbusct_us = devdata->default_conv_time_us;
priv->avg_sample = 1;
+ priv->devdata = devdata;
i2c_set_clientdata(i2c, priv);
regmap = devm_regmap_init_i2c(i2c, &rtq6056_regmap_config);
@@ -561,15 +763,11 @@ static int rtq6056_probe(struct i2c_client *i2c)
"Invalid vendor id 0x%04x\n", vendor_id);
ret = devm_regmap_field_bulk_alloc(dev, regmap, priv->rm_fields,
- rtq6056_reg_fields, F_MAX_FIELDS);
+ devdata->reg_fields, F_MAX_FIELDS);
if (ret)
return dev_err_probe(dev, ret, "Failed to init regmap field\n");
- /*
- * By default, configure average sample as 1, bus and shunt conversion
- * time as 1037 microsecond, and operating mode to all on.
- */
- ret = regmap_write(regmap, RTQ6056_REG_CONFIG, RTQ6056_DEFAULT_CONFIG);
+ ret = regmap_write(regmap, RTQ6056_REG_CONFIG, devdata->default_config);
if (ret)
return dev_err_probe(dev, ret,
"Failed to enable continuous sensing\n");
@@ -598,8 +796,8 @@ static int rtq6056_probe(struct i2c_client *i2c)
indio_dev->name = "rtq6056";
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = rtq6056_channels;
- indio_dev->num_channels = ARRAY_SIZE(rtq6056_channels);
+ indio_dev->channels = devdata->channels;
+ indio_dev->num_channels = devdata->num_channels;
indio_dev->info = &rtq6056_info;
ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
@@ -640,8 +838,45 @@ static int rtq6056_runtime_resume(struct device *dev)
static DEFINE_RUNTIME_DEV_PM_OPS(rtq6056_pm_ops, rtq6056_runtime_suspend,
rtq6056_runtime_resume, NULL);
+static const struct richtek_dev_data rtq6056_devdata = {
+ .default_conv_time_us = 1037,
+ .calib_coefficient = 5120000,
+ /*
+ * By default, configure average sample as 1, bus and shunt conversion
+ * time as 1037 microsecond, and operating mode to all on.
+ */
+ .default_config = RTQ6056_DEFAULT_CONFIG,
+ .avg_sample_list = rtq6056_avg_sample_list,
+ .avg_sample_list_length = ARRAY_SIZE(rtq6056_avg_sample_list),
+ .reg_fields = rtq6056_reg_fields,
+ .channels = rtq6056_channels,
+ .num_channels = ARRAY_SIZE(rtq6056_channels),
+ .read_scale = rtq6056_adc_read_scale,
+ .set_average = rtq6056_adc_set_average,
+};
+
+static const struct richtek_dev_data rtq6059_devdata = {
+ .fixed_samp_freq = true,
+ .vbus_offset = RTQ6059_VBUS_LSB_OFFSET,
+ .default_conv_time_us = 532,
+ .calib_coefficient = 40960000,
+ /*
+ * By default, configure average sample as 1, bus and shunt conversion
+ * time as 532 microsecond, and operating mode to all on.
+ */
+ .default_config = RTQ6059_DEFAULT_CONFIG,
+ .avg_sample_list = rtq6059_avg_sample_list,
+ .avg_sample_list_length = ARRAY_SIZE(rtq6059_avg_sample_list),
+ .reg_fields = rtq6059_reg_fields,
+ .channels = rtq6059_channels,
+ .num_channels = ARRAY_SIZE(rtq6059_channels),
+ .read_scale = rtq6059_adc_read_scale,
+ .set_average = rtq6059_adc_set_average,
+};
+
static const struct of_device_id rtq6056_device_match[] = {
- { .compatible = "richtek,rtq6056" },
+ { .compatible = "richtek,rtq6056", .data = &rtq6056_devdata },
+ { .compatible = "richtek,rtq6059", .data = &rtq6059_devdata },
{}
};
MODULE_DEVICE_TABLE(of, rtq6056_device_match);
diff --git a/drivers/iio/adc/ti-adc108s102.c b/drivers/iio/adc/ti-adc108s102.c
index c82a161630e1..69fcbbc7e418 100644
--- a/drivers/iio/adc/ti-adc108s102.c
+++ b/drivers/iio/adc/ti-adc108s102.c
@@ -293,13 +293,11 @@ static const struct of_device_id adc108s102_of_match[] = {
};
MODULE_DEVICE_TABLE(of, adc108s102_of_match);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id adc108s102_acpi_ids[] = {
{ "INT3495", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, adc108s102_acpi_ids);
-#endif
static const struct spi_device_id adc108s102_id[] = {
{ "adc108s102", 0 },
@@ -311,7 +309,7 @@ static struct spi_driver adc108s102_driver = {
.driver = {
.name = "adc108s102",
.of_match_table = adc108s102_of_match,
- .acpi_match_table = ACPI_PTR(adc108s102_acpi_ids),
+ .acpi_match_table = adc108s102_acpi_ids,
},
.probe = adc108s102_probe,
.id_table = adc108s102_id,
diff --git a/drivers/iio/adc/ti-ads1015.c b/drivers/iio/adc/ti-ads1015.c
index 6799ea49dbc7..6ae967e4d8fa 100644
--- a/drivers/iio/adc/ti-ads1015.c
+++ b/drivers/iio/adc/ti-ads1015.c
@@ -925,7 +925,7 @@ static int ads1015_client_get_channels_config(struct i2c_client *client)
if (!fwnode_property_read_u32(node, "ti,gain", &pval)) {
pga = pval;
- if (pga > 6) {
+ if (pga > 5) {
dev_err(dev, "invalid gain on %pfw\n", node);
fwnode_handle_put(node);
return -EINVAL;
diff --git a/drivers/iio/adc/ti-ads1298.c b/drivers/iio/adc/ti-ads1298.c
new file mode 100644
index 000000000000..1d1eaba3d6d1
--- /dev/null
+++ b/drivers/iio/adc/ti-ads1298.c
@@ -0,0 +1,771 @@
+// SPDX-License-Identifier: GPL-2.0
+/* TI ADS1298 chip family driver
+ * Copyright (C) 2023 - 2024 Topic Embedded Products
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/log2.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/units.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/kfifo_buf.h>
+
+#include <asm/unaligned.h>
+
+/* Commands */
+#define ADS1298_CMD_WAKEUP 0x02
+#define ADS1298_CMD_STANDBY 0x04
+#define ADS1298_CMD_RESET 0x06
+#define ADS1298_CMD_START 0x08
+#define ADS1298_CMD_STOP 0x0a
+#define ADS1298_CMD_RDATAC 0x10
+#define ADS1298_CMD_SDATAC 0x11
+#define ADS1298_CMD_RDATA 0x12
+#define ADS1298_CMD_RREG 0x20
+#define ADS1298_CMD_WREG 0x40
+
+/* Registers */
+#define ADS1298_REG_ID 0x00
+#define ADS1298_MASK_ID_FAMILY GENMASK(7, 3)
+#define ADS1298_MASK_ID_CHANNELS GENMASK(2, 0)
+#define ADS1298_ID_FAMILY_ADS129X 0x90
+#define ADS1298_ID_FAMILY_ADS129XR 0xd0
+
+#define ADS1298_REG_CONFIG1 0x01
+#define ADS1298_MASK_CONFIG1_HR BIT(7)
+#define ADS1298_MASK_CONFIG1_DR GENMASK(2, 0)
+#define ADS1298_SHIFT_DR_HR 6
+#define ADS1298_SHIFT_DR_LP 7
+#define ADS1298_LOWEST_DR 0x06
+
+#define ADS1298_REG_CONFIG2 0x02
+#define ADS1298_MASK_CONFIG2_RESERVED BIT(6)
+#define ADS1298_MASK_CONFIG2_WCT_CHOP BIT(5)
+#define ADS1298_MASK_CONFIG2_INT_TEST BIT(4)
+#define ADS1298_MASK_CONFIG2_TEST_AMP BIT(2)
+#define ADS1298_MASK_CONFIG2_TEST_FREQ_DC GENMASK(1, 0)
+#define ADS1298_MASK_CONFIG2_TEST_FREQ_SLOW 0
+#define ADS1298_MASK_CONFIG2_TEST_FREQ_FAST BIT(0)
+
+#define ADS1298_REG_CONFIG3 0x03
+#define ADS1298_MASK_CONFIG3_PWR_REFBUF BIT(7)
+#define ADS1298_MASK_CONFIG3_RESERVED BIT(6)
+#define ADS1298_MASK_CONFIG3_VREF_4V BIT(5)
+
+#define ADS1298_REG_LOFF 0x04
+#define ADS1298_REG_CHnSET(n) (0x05 + n)
+#define ADS1298_MASK_CH_PD BIT(7)
+#define ADS1298_MASK_CH_PGA GENMASK(6, 4)
+#define ADS1298_MASK_CH_MUX GENMASK(2, 0)
+
+#define ADS1298_REG_LOFF_STATP 0x12
+#define ADS1298_REG_LOFF_STATN 0x13
+#define ADS1298_REG_CONFIG4 0x17
+#define ADS1298_MASK_CONFIG4_SINGLE_SHOT BIT(3)
+
+#define ADS1298_REG_WCT1 0x18
+#define ADS1298_REG_WCT2 0x19
+
+#define ADS1298_MAX_CHANNELS 8
+#define ADS1298_BITS_PER_SAMPLE 24
+#define ADS1298_CLK_RATE_HZ 2048000
+#define ADS1298_CLOCKS_TO_USECS(x) \
+ (DIV_ROUND_UP((x) * MICROHZ_PER_HZ, ADS1298_CLK_RATE_HZ))
+/*
+ * Read/write register commands require 4 clocks to decode, for speeds above
+ * 2x the clock rate, this would require extra time between the command byte and
+ * the data. Much simpler is to just limit the SPI transfer speed while doing
+ * register access.
+ */
+#define ADS1298_SPI_BUS_SPEED_SLOW ADS1298_CLK_RATE_HZ
+/* For reading and writing registers, we need a 3-byte buffer */
+#define ADS1298_SPI_CMD_BUFFER_SIZE 3
+/* Outputs status word and 'n' 24-bit samples, plus the command byte */
+#define ADS1298_SPI_RDATA_BUFFER_SIZE(n) (((n) + 1) * 3 + 1)
+#define ADS1298_SPI_RDATA_BUFFER_SIZE_MAX \
+ ADS1298_SPI_RDATA_BUFFER_SIZE(ADS1298_MAX_CHANNELS)
+
+struct ads1298_private {
+ const struct ads1298_chip_info *chip_info;
+ struct spi_device *spi;
+ struct regulator *reg_avdd;
+ struct regulator *reg_vref;
+ struct clk *clk;
+ struct regmap *regmap;
+ struct completion completion;
+ struct iio_trigger *trig;
+ struct spi_transfer rdata_xfer;
+ struct spi_message rdata_msg;
+ spinlock_t irq_busy_lock; /* Handshake between SPI and DRDY irqs */
+ /*
+ * rdata_xfer_busy increments when a DRDY occurs and decrements when SPI
+ * completion is reported. Hence its meaning is:
+ * 0 = Waiting for DRDY interrupt
+ * 1 = SPI transfer in progress
+ * 2 = DRDY during SPI transfer, start another transfer on completion
+ * >2 = Multiple DRDY during transfer, lost rdata_xfer_busy - 2 samples
+ */
+ unsigned int rdata_xfer_busy;
+
+ /* Temporary storage for demuxing data after SPI transfer */
+ u32 bounce_buffer[ADS1298_MAX_CHANNELS];
+
+ /* For synchronous SPI exchanges (read/write registers) */
+ u8 cmd_buffer[ADS1298_SPI_CMD_BUFFER_SIZE] __aligned(IIO_DMA_MINALIGN);
+
+ /* Buffer used for incoming SPI data */
+ u8 rx_buffer[ADS1298_SPI_RDATA_BUFFER_SIZE_MAX];
+ /* Contains the RDATA command and zeroes to clock out */
+ u8 tx_buffer[ADS1298_SPI_RDATA_BUFFER_SIZE_MAX];
+};
+
+/* Three bytes per sample in RX buffer, starting at offset 4 */
+#define ADS1298_OFFSET_IN_RX_BUFFER(index) (3 * (index) + 4)
+
+#define ADS1298_CHAN(index) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = index, \
+ .address = ADS1298_OFFSET_IN_RX_BUFFER(index), \
+ .info_mask_separate = \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = ADS1298_BITS_PER_SAMPLE, \
+ .storagebits = 32, \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+static const struct iio_chan_spec ads1298_channels[] = {
+ ADS1298_CHAN(0),
+ ADS1298_CHAN(1),
+ ADS1298_CHAN(2),
+ ADS1298_CHAN(3),
+ ADS1298_CHAN(4),
+ ADS1298_CHAN(5),
+ ADS1298_CHAN(6),
+ ADS1298_CHAN(7),
+};
+
+static int ads1298_write_cmd(struct ads1298_private *priv, u8 command)
+{
+ struct spi_transfer xfer = {
+ .tx_buf = priv->cmd_buffer,
+ .rx_buf = priv->cmd_buffer,
+ .len = 1,
+ .speed_hz = ADS1298_SPI_BUS_SPEED_SLOW,
+ .delay = {
+ .value = 2,
+ .unit = SPI_DELAY_UNIT_USECS,
+ },
+ };
+
+ priv->cmd_buffer[0] = command;
+
+ return spi_sync_transfer(priv->spi, &xfer, 1);
+}
+
+static int ads1298_read_one(struct ads1298_private *priv, int chan_index)
+{
+ int ret;
+
+ /* Enable the channel */
+ ret = regmap_update_bits(priv->regmap, ADS1298_REG_CHnSET(chan_index),
+ ADS1298_MASK_CH_PD, 0);
+ if (ret)
+ return ret;
+
+ /* Enable single-shot mode, so we don't need to send a STOP */
+ ret = regmap_update_bits(priv->regmap, ADS1298_REG_CONFIG4,
+ ADS1298_MASK_CONFIG4_SINGLE_SHOT,
+ ADS1298_MASK_CONFIG4_SINGLE_SHOT);
+ if (ret)
+ return ret;
+
+ reinit_completion(&priv->completion);
+
+ ret = ads1298_write_cmd(priv, ADS1298_CMD_START);
+ if (ret < 0) {
+ dev_err(&priv->spi->dev, "CMD_START error: %d\n", ret);
+ return ret;
+ }
+
+ /* Cannot take longer than 40ms (250Hz) */
+ ret = wait_for_completion_timeout(&priv->completion, msecs_to_jiffies(50));
+ if (!ret)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int ads1298_get_samp_freq(struct ads1298_private *priv, int *val)
+{
+ unsigned long rate;
+ unsigned int cfg;
+ int ret;
+
+ ret = regmap_read(priv->regmap, ADS1298_REG_CONFIG1, &cfg);
+ if (ret)
+ return ret;
+
+ if (priv->clk)
+ rate = clk_get_rate(priv->clk);
+ else
+ rate = ADS1298_CLK_RATE_HZ;
+ if (!rate)
+ return -EINVAL;
+
+ /* Data rate shift depends on HR/LP mode */
+ if (cfg & ADS1298_MASK_CONFIG1_HR)
+ rate >>= ADS1298_SHIFT_DR_HR;
+ else
+ rate >>= ADS1298_SHIFT_DR_LP;
+
+ *val = rate >> (cfg & ADS1298_MASK_CONFIG1_DR);
+
+ return IIO_VAL_INT;
+}
+
+static int ads1298_set_samp_freq(struct ads1298_private *priv, int val)
+{
+ unsigned long rate;
+ unsigned int factor;
+ unsigned int cfg;
+
+ if (priv->clk)
+ rate = clk_get_rate(priv->clk);
+ else
+ rate = ADS1298_CLK_RATE_HZ;
+ if (!rate)
+ return -EINVAL;
+ if (val <= 0)
+ return -EINVAL;
+
+ factor = (rate >> ADS1298_SHIFT_DR_HR) / val;
+ if (factor >= BIT(ADS1298_SHIFT_DR_LP))
+ cfg = ADS1298_LOWEST_DR;
+ else if (factor)
+ cfg = ADS1298_MASK_CONFIG1_HR | ilog2(factor); /* Use HR mode */
+ else
+ cfg = ADS1298_MASK_CONFIG1_HR; /* Fastest possible */
+
+ return regmap_update_bits(priv->regmap, ADS1298_REG_CONFIG1,
+ ADS1298_MASK_CONFIG1_HR | ADS1298_MASK_CONFIG1_DR,
+ cfg);
+}
+
+static const u8 ads1298_pga_settings[] = { 6, 1, 2, 3, 4, 8, 12 };
+
+static int ads1298_get_scale(struct ads1298_private *priv,
+ int channel, int *val, int *val2)
+{
+ int ret;
+ unsigned int regval;
+ u8 gain;
+
+ if (priv->reg_vref) {
+ ret = regulator_get_voltage(priv->reg_vref);
+ if (ret < 0)
+ return ret;
+
+ *val = ret / MILLI; /* Convert to millivolts */
+ } else {
+ ret = regmap_read(priv->regmap, ADS1298_REG_CONFIG3, &regval);
+ if (ret)
+ return ret;
+
+ /* Refererence in millivolts */
+ *val = regval & ADS1298_MASK_CONFIG3_VREF_4V ? 4000 : 2400;
+ }
+
+ ret = regmap_read(priv->regmap, ADS1298_REG_CHnSET(channel), &regval);
+ if (ret)
+ return ret;
+
+ gain = ads1298_pga_settings[FIELD_GET(ADS1298_MASK_CH_PGA, regval)];
+ *val /= gain; /* Full scale is VREF / gain */
+
+ *val2 = ADS1298_BITS_PER_SAMPLE - 1; /* Signed, hence the -1 */
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+}
+
+static int ads1298_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ads1298_read_one(priv, chan->scan_index);
+
+ iio_device_release_direct_mode(indio_dev);
+
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(get_unaligned_be24(priv->rx_buffer + chan->address),
+ ADS1298_BITS_PER_SAMPLE - 1);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ return ads1298_get_scale(priv, chan->channel, val, val2);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return ads1298_get_samp_freq(priv, val);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = regmap_read(priv->regmap, ADS1298_REG_CONFIG1, val);
+ if (ret)
+ return ret;
+
+ *val = 16 << (*val & ADS1298_MASK_CONFIG1_DR);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads1298_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return ads1298_set_samp_freq(priv, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads1298_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct ads1298_private *priv = context;
+ struct spi_transfer reg_write_xfer = {
+ .tx_buf = priv->cmd_buffer,
+ .rx_buf = priv->cmd_buffer,
+ .len = 3,
+ .speed_hz = ADS1298_SPI_BUS_SPEED_SLOW,
+ .delay = {
+ .value = 2,
+ .unit = SPI_DELAY_UNIT_USECS,
+ },
+ };
+
+ priv->cmd_buffer[0] = ADS1298_CMD_WREG | reg;
+ priv->cmd_buffer[1] = 0; /* Number of registers to be written - 1 */
+ priv->cmd_buffer[2] = val;
+
+ return spi_sync_transfer(priv->spi, &reg_write_xfer, 1);
+}
+
+static int ads1298_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct ads1298_private *priv = context;
+ struct spi_transfer reg_read_xfer = {
+ .tx_buf = priv->cmd_buffer,
+ .rx_buf = priv->cmd_buffer,
+ .len = 3,
+ .speed_hz = ADS1298_SPI_BUS_SPEED_SLOW,
+ .delay = {
+ .value = 2,
+ .unit = SPI_DELAY_UNIT_USECS,
+ },
+ };
+ int ret;
+
+ priv->cmd_buffer[0] = ADS1298_CMD_RREG | reg;
+ priv->cmd_buffer[1] = 0; /* Number of registers to be read - 1 */
+ priv->cmd_buffer[2] = 0;
+
+ ret = spi_sync_transfer(priv->spi, &reg_read_xfer, 1);
+ if (ret)
+ return ret;
+
+ *val = priv->cmd_buffer[2];
+
+ return 0;
+}
+
+static int ads1298_reg_access(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(priv->regmap, reg, readval);
+
+ return regmap_write(priv->regmap, reg, writeval);
+}
+
+static void ads1298_rdata_unmark_busy(struct ads1298_private *priv)
+{
+ /* Notify we're no longer waiting for the SPI transfer to complete */
+ guard(spinlock_irqsave)(&priv->irq_busy_lock);
+ priv->rdata_xfer_busy = 0;
+}
+
+static int ads1298_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+ unsigned int val;
+ int ret;
+ int i;
+
+ /* Make the interrupt routines start with a clean slate */
+ ads1298_rdata_unmark_busy(priv);
+
+ /* Configure power-down bits to match scan mask */
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ val = test_bit(i, scan_mask) ? 0 : ADS1298_MASK_CH_PD;
+ ret = regmap_update_bits(priv->regmap, ADS1298_REG_CHnSET(i),
+ ADS1298_MASK_CH_PD, val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct iio_info ads1298_info = {
+ .read_raw = &ads1298_read_raw,
+ .write_raw = &ads1298_write_raw,
+ .update_scan_mode = &ads1298_update_scan_mode,
+ .debugfs_reg_access = &ads1298_reg_access,
+};
+
+static void ads1298_rdata_release_busy_or_restart(struct ads1298_private *priv)
+{
+ guard(spinlock_irqsave)(&priv->irq_busy_lock);
+
+ if (priv->rdata_xfer_busy > 1) {
+ /*
+ * DRDY interrupt occurred before SPI completion. Start a new
+ * SPI transaction now to retrieve the data that wasn't latched
+ * into the ADS1298 chip's transfer buffer yet.
+ */
+ spi_async(priv->spi, &priv->rdata_msg);
+ /*
+ * If more than one DRDY took place, there was an overrun. Since
+ * the sample is already lost, reset the counter to 1 so that
+ * we will wait for a DRDY interrupt after this SPI transaction.
+ */
+ priv->rdata_xfer_busy = 1;
+ } else {
+ /* No pending data, wait for DRDY */
+ priv->rdata_xfer_busy = 0;
+ }
+}
+
+/* Called from SPI completion interrupt handler */
+static void ads1298_rdata_complete(void *context)
+{
+ struct iio_dev *indio_dev = context;
+ struct ads1298_private *priv = iio_priv(indio_dev);
+ int scan_index;
+ u32 *bounce = priv->bounce_buffer;
+
+ if (!iio_buffer_enabled(indio_dev)) {
+ /*
+ * for a single transfer mode we're kept in direct_mode until
+ * completion, avoiding a race with buffered IO.
+ */
+ ads1298_rdata_unmark_busy(priv);
+ complete(&priv->completion);
+ return;
+ }
+
+ /* Demux the channel data into our bounce buffer */
+ for_each_set_bit(scan_index, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ const struct iio_chan_spec *scan_chan =
+ &indio_dev->channels[scan_index];
+ const u8 *data = priv->rx_buffer + scan_chan->address;
+
+ *bounce++ = get_unaligned_be24(data);
+ }
+
+ /* rx_buffer can be overwritten from this point on */
+ ads1298_rdata_release_busy_or_restart(priv);
+
+ iio_push_to_buffers(indio_dev, priv->bounce_buffer);
+}
+
+static irqreturn_t ads1298_interrupt(int irq, void *dev_id)
+{
+ struct iio_dev *indio_dev = dev_id;
+ struct ads1298_private *priv = iio_priv(indio_dev);
+ unsigned int wasbusy;
+
+ guard(spinlock_irqsave)(&priv->irq_busy_lock);
+
+ wasbusy = priv->rdata_xfer_busy++;
+ /* When no SPI transfer in transit, start one now */
+ if (!wasbusy)
+ spi_async(priv->spi, &priv->rdata_msg);
+
+ return IRQ_HANDLED;
+};
+
+static int ads1298_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+ int ret;
+
+ /* Disable single-shot mode */
+ ret = regmap_update_bits(priv->regmap, ADS1298_REG_CONFIG4,
+ ADS1298_MASK_CONFIG4_SINGLE_SHOT, 0);
+ if (ret)
+ return ret;
+
+ return ads1298_write_cmd(priv, ADS1298_CMD_START);
+}
+
+static int ads1298_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+
+ return ads1298_write_cmd(priv, ADS1298_CMD_STOP);
+}
+
+static const struct iio_buffer_setup_ops ads1298_setup_ops = {
+ .postenable = &ads1298_buffer_postenable,
+ .predisable = &ads1298_buffer_predisable,
+};
+
+static void ads1298_reg_disable(void *reg)
+{
+ regulator_disable(reg);
+}
+
+static const struct regmap_range ads1298_regmap_volatile_range[] = {
+ regmap_reg_range(ADS1298_REG_LOFF_STATP, ADS1298_REG_LOFF_STATN),
+};
+
+static const struct regmap_access_table ads1298_regmap_volatile = {
+ .yes_ranges = ads1298_regmap_volatile_range,
+ .n_yes_ranges = ARRAY_SIZE(ads1298_regmap_volatile_range),
+};
+
+static const struct regmap_config ads1298_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .reg_read = ads1298_reg_read,
+ .reg_write = ads1298_reg_write,
+ .max_register = ADS1298_REG_WCT2,
+ .volatile_table = &ads1298_regmap_volatile,
+ .cache_type = REGCACHE_MAPLE,
+};
+
+static int ads1298_init(struct iio_dev *indio_dev)
+{
+ struct ads1298_private *priv = iio_priv(indio_dev);
+ struct device *dev = &priv->spi->dev;
+ const char *suffix;
+ unsigned int val;
+ int ret;
+
+ /* Device initializes into RDATAC mode, which we don't want */
+ ret = ads1298_write_cmd(priv, ADS1298_CMD_SDATAC);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(priv->regmap, ADS1298_REG_ID, &val);
+ if (ret)
+ return ret;
+
+ /* Fill in name and channel count based on what the chip told us */
+ indio_dev->num_channels = 4 + 2 * (val & ADS1298_MASK_ID_CHANNELS);
+ switch (val & ADS1298_MASK_ID_FAMILY) {
+ case ADS1298_ID_FAMILY_ADS129X:
+ suffix = "";
+ break;
+ case ADS1298_ID_FAMILY_ADS129XR:
+ suffix = "r";
+ break;
+ default:
+ return dev_err_probe(dev, -ENODEV, "Unknown ID: 0x%x\n", val);
+ }
+ indio_dev->name = devm_kasprintf(dev, GFP_KERNEL, "ads129%u%s",
+ indio_dev->num_channels, suffix);
+
+ /* Enable internal test signal, double amplitude, double frequency */
+ ret = regmap_write(priv->regmap, ADS1298_REG_CONFIG2,
+ ADS1298_MASK_CONFIG2_RESERVED |
+ ADS1298_MASK_CONFIG2_INT_TEST |
+ ADS1298_MASK_CONFIG2_TEST_AMP |
+ ADS1298_MASK_CONFIG2_TEST_FREQ_FAST);
+ if (ret)
+ return ret;
+
+ val = ADS1298_MASK_CONFIG3_RESERVED; /* Must write 1 always */
+ if (!priv->reg_vref) {
+ /* Enable internal reference */
+ val |= ADS1298_MASK_CONFIG3_PWR_REFBUF;
+ /* Use 4V VREF when power supply is at least 4.4V */
+ if (regulator_get_voltage(priv->reg_avdd) >= 4400000)
+ val |= ADS1298_MASK_CONFIG3_VREF_4V;
+ }
+ return regmap_write(priv->regmap, ADS1298_REG_CONFIG3, val);
+}
+
+static int ads1298_probe(struct spi_device *spi)
+{
+ struct ads1298_private *priv;
+ struct iio_dev *indio_dev;
+ struct device *dev = &spi->dev;
+ struct gpio_desc *reset_gpio;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+
+ /* Reset to be asserted before enabling clock and power */
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(reset_gpio),
+ "Cannot get reset GPIO\n");
+
+ /* VREF can be supplied externally, otherwise use internal reference */
+ priv->reg_vref = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(priv->reg_vref)) {
+ if (PTR_ERR(priv->reg_vref) != -ENODEV)
+ return dev_err_probe(dev, PTR_ERR(priv->reg_vref),
+ "Failed to get vref regulator\n");
+
+ priv->reg_vref = NULL;
+ } else {
+ ret = regulator_enable(priv->reg_vref);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, ads1298_reg_disable, priv->reg_vref);
+ if (ret)
+ return ret;
+ }
+
+ priv->clk = devm_clk_get_optional_enabled(dev, "clk");
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get clk\n");
+
+ priv->reg_avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(priv->reg_avdd))
+ return dev_err_probe(dev, PTR_ERR(priv->reg_avdd),
+ "Failed to get avdd regulator\n");
+
+ ret = regulator_enable(priv->reg_avdd);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable avdd regulator\n");
+
+ ret = devm_add_action_or_reset(dev, ads1298_reg_disable, priv->reg_avdd);
+ if (ret)
+ return ret;
+
+ priv->spi = spi;
+ init_completion(&priv->completion);
+ spin_lock_init(&priv->irq_busy_lock);
+ priv->regmap = devm_regmap_init(dev, NULL, priv, &ads1298_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return PTR_ERR(priv->regmap);
+
+ indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
+ indio_dev->channels = ads1298_channels;
+ indio_dev->info = &ads1298_info;
+
+ if (reset_gpio) {
+ /*
+ * Deassert reset now that clock and power are active.
+ * Minimum reset pulsewidth is 2 clock cycles.
+ */
+ fsleep(ADS1298_CLOCKS_TO_USECS(2));
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ } else {
+ ret = ads1298_write_cmd(priv, ADS1298_CMD_RESET);
+ if (ret)
+ return dev_err_probe(dev, ret, "RESET failed\n");
+ }
+ /* Wait 18 clock cycles for reset command to complete */
+ fsleep(ADS1298_CLOCKS_TO_USECS(18));
+
+ ret = ads1298_init(indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Init failed\n");
+
+ priv->tx_buffer[0] = ADS1298_CMD_RDATA;
+ priv->rdata_xfer.tx_buf = priv->tx_buffer;
+ priv->rdata_xfer.rx_buf = priv->rx_buffer;
+ priv->rdata_xfer.len = ADS1298_SPI_RDATA_BUFFER_SIZE(indio_dev->num_channels);
+ /* Must keep CS low for 4 clocks */
+ priv->rdata_xfer.delay.value = 2;
+ priv->rdata_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
+ spi_message_init_with_transfers(&priv->rdata_msg, &priv->rdata_xfer, 1);
+ priv->rdata_msg.complete = &ads1298_rdata_complete;
+ priv->rdata_msg.context = indio_dev;
+
+ ret = devm_request_irq(dev, spi->irq, &ads1298_interrupt,
+ IRQF_TRIGGER_FALLING, indio_dev->name,
+ indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, &ads1298_setup_ops);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct spi_device_id ads1298_id[] = {
+ { "ads1298" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ads1298_id);
+
+static const struct of_device_id ads1298_of_table[] = {
+ { .compatible = "ti,ads1298" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ads1298_of_table);
+
+static struct spi_driver ads1298_driver = {
+ .driver = {
+ .name = "ads1298",
+ .of_match_table = ads1298_of_table,
+ },
+ .probe = ads1298_probe,
+ .id_table = ads1298_id,
+};
+module_spi_driver(ads1298_driver);
+
+MODULE_AUTHOR("Mike Looijmans <mike.looijmans@topic.nl>");
+MODULE_DESCRIPTION("TI ADS1298 ADC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index ef06a897421a..9440a268a78c 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -11,7 +11,7 @@
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c
index ed4d72922696..2ee4c0d70281 100644
--- a/drivers/iio/amplifiers/hmc425a.c
+++ b/drivers/iio/amplifiers/hmc425a.c
@@ -2,9 +2,10 @@
/*
* HMC425A and similar Gain Amplifiers
*
- * Copyright 2020 Analog Devices Inc.
+ * Copyright 2020, 2024 Analog Devices Inc.
*/
+#include <linux/bits.h>
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -12,6 +13,7 @@
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -20,10 +22,24 @@
#include <linux/regulator/consumer.h>
#include <linux/sysfs.h>
+/*
+ * The LTC6373 amplifier supports configuring gain using GPIO's with the following
+ * values (OUTPUT_V / INPUT_V): 0(shutdown), 0.25, 0.5, 1, 2, 4, 8, 16
+ *
+ * Except for the shutdown value, all can be converted to dB using 20 * log10(x)
+ * From here, it is observed that all values are multiples of the '2' gain setting,
+ * with the correspondent of 6.020dB.
+ */
+#define LTC6373_CONVERSION_CONSTANT 6020
+#define LTC6373_MIN_GAIN_CODE 0x6
+#define LTC6373_CONVERSION_MASK GENMASK(2, 0)
+#define LTC6373_SHUTDOWN GENMASK(2, 0)
+
enum hmc425a_type {
ID_HMC425A,
ID_HMC540S,
- ID_ADRF5740
+ ID_ADRF5740,
+ ID_LTC6373,
};
struct hmc425a_chip_info {
@@ -34,16 +50,110 @@ struct hmc425a_chip_info {
int gain_min;
int gain_max;
int default_gain;
+ int powerdown_val;
+ bool has_powerdown;
+
+ int (*gain_dB_to_code)(int gain, int *code);
+ int (*code_to_gain_dB)(int code, int *val, int *val2);
};
struct hmc425a_state {
struct mutex lock; /* protect sensor state */
- struct hmc425a_chip_info *chip_info;
+ const struct hmc425a_chip_info *chip_info;
struct gpio_descs *gpios;
- enum hmc425a_type type;
u32 gain;
+ bool powerdown;
};
+static int gain_dB_to_code(struct hmc425a_state *st, int val, int val2, int *code)
+{
+ const struct hmc425a_chip_info *inf = st->chip_info;
+ int gain;
+
+ if (val < 0)
+ gain = (val * 1000) - (val2 / 1000);
+ else
+ gain = (val * 1000) + (val2 / 1000);
+
+ if (gain > inf->gain_max || gain < inf->gain_min)
+ return -EINVAL;
+ if (st->powerdown)
+ return -EPERM;
+
+ return st->chip_info->gain_dB_to_code(gain, code);
+}
+
+static int hmc425a_gain_dB_to_code(int gain, int *code)
+{
+ *code = ~((abs(gain) / 500) & 0x3F);
+ return 0;
+}
+
+static int hmc540s_gain_dB_to_code(int gain, int *code)
+{
+ *code = ~((abs(gain) / 1000) & 0xF);
+ return 0;
+}
+
+static int adrf5740_gain_dB_to_code(int gain, int *code)
+{
+ int temp = (abs(gain) / 2000) & 0xF;
+
+ /* Bit [0-3]: 2dB 4dB 8dB 8dB */
+ *code = temp & BIT(3) ? temp | BIT(2) : temp;
+ return 0;
+}
+
+static int ltc6373_gain_dB_to_code(int gain, int *code)
+{
+ *code = ~(DIV_ROUND_CLOSEST(gain, LTC6373_CONVERSION_CONSTANT) + 3)
+ & LTC6373_CONVERSION_MASK;
+ return 0;
+}
+
+static int code_to_gain_dB(struct hmc425a_state *st, int *val, int *val2)
+{
+ if (st->powerdown)
+ return -EPERM;
+ return st->chip_info->code_to_gain_dB(st->gain, val, val2);
+}
+
+static int hmc425a_code_to_gain_dB(int code, int *val, int *val2)
+{
+ *val = (~code * -500) / 1000;
+ *val2 = ((~code * -500) % 1000) * 1000;
+ return 0;
+}
+
+static int hmc540s_code_to_gain_dB(int code, int *val, int *val2)
+{
+ *val = (~code * -1000) / 1000;
+ *val2 = ((~code * -1000) % 1000) * 1000;
+ return 0;
+}
+
+static int adrf5740_code_to_gain_dB(int code, int *val, int *val2)
+{
+ /*
+ * Bit [0-3]: 2dB 4dB 8dB 8dB
+ * When BIT(3) is set, unset BIT(2) and use 3 as double the place value
+ */
+ code = code & BIT(3) ? code & ~BIT(2) : code;
+ *val = (code * -2000) / 1000;
+ *val2 = ((code * -2000) % 1000) * 1000;
+ return 0;
+}
+
+static int ltc6373_code_to_gain_dB(int code, int *val, int *val2)
+{
+ int gain = ((~code & LTC6373_CONVERSION_MASK) - 3) *
+ LTC6373_CONVERSION_CONSTANT;
+
+ *val = gain / 1000;
+ *val2 = (gain % 1000) * 1000;
+ return 0;
+}
+
static int hmc425a_write(struct iio_dev *indio_dev, u32 value)
{
struct hmc425a_state *st = iio_priv(indio_dev);
@@ -61,30 +171,14 @@ static int hmc425a_read_raw(struct iio_dev *indio_dev,
int *val2, long m)
{
struct hmc425a_state *st = iio_priv(indio_dev);
- int code, gain = 0;
int ret;
mutex_lock(&st->lock);
switch (m) {
case IIO_CHAN_INFO_HARDWAREGAIN:
- code = st->gain;
-
- switch (st->type) {
- case ID_HMC425A:
- gain = ~code * -500;
+ ret = code_to_gain_dB(st, val, val2);
+ if (ret)
break;
- case ID_HMC540S:
- gain = ~code * -1000;
- break;
- case ID_ADRF5740:
- code = code & BIT(3) ? code & ~BIT(2) : code;
- gain = code * -2000;
- break;
- }
-
- *val = gain / 1000;
- *val2 = (gain % 1000) * 1000;
-
ret = IIO_VAL_INT_PLUS_MICRO_DB;
break;
default:
@@ -100,34 +194,14 @@ static int hmc425a_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct hmc425a_state *st = iio_priv(indio_dev);
- struct hmc425a_chip_info *inf = st->chip_info;
- int code = 0, gain;
- int ret;
-
- if (val < 0)
- gain = (val * 1000) - (val2 / 1000);
- else
- gain = (val * 1000) + (val2 / 1000);
-
- if (gain > inf->gain_max || gain < inf->gain_min)
- return -EINVAL;
-
- switch (st->type) {
- case ID_HMC425A:
- code = ~((abs(gain) / 500) & 0x3F);
- break;
- case ID_HMC540S:
- code = ~((abs(gain) / 1000) & 0xF);
- break;
- case ID_ADRF5740:
- code = (abs(gain) / 2000) & 0xF;
- code = code & BIT(3) ? code | BIT(2) : code;
- break;
- }
+ int code = 0, ret;
mutex_lock(&st->lock);
switch (mask) {
case IIO_CHAN_INFO_HARDWAREGAIN:
+ ret = gain_dB_to_code(st, val, val2, &code);
+ if (ret)
+ break;
st->gain = code;
ret = hmc425a_write(indio_dev, st->gain);
@@ -158,6 +232,48 @@ static const struct iio_info hmc425a_info = {
.write_raw_get_fmt = &hmc425a_write_raw_get_fmt,
};
+static ssize_t ltc6373_read_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct hmc425a_state *st = iio_priv(indio_dev);
+
+ return sysfs_emit(buf, "%d\n", st->powerdown);
+}
+
+static ssize_t ltc6373_write_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf,
+ size_t len)
+{
+ struct hmc425a_state *st = iio_priv(indio_dev);
+ bool powerdown;
+ int code, ret;
+
+ ret = kstrtobool(buf, &powerdown);
+ if (ret)
+ return ret;
+
+ mutex_lock(&st->lock);
+ st->powerdown = powerdown;
+ code = (powerdown) ? LTC6373_SHUTDOWN : st->gain;
+ hmc425a_write(indio_dev, code);
+ mutex_unlock(&st->lock);
+ return len;
+}
+
+static const struct iio_chan_spec_ext_info ltc6373_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = ltc6373_read_powerdown,
+ .write = ltc6373_write_powerdown,
+ .shared = IIO_SEPARATE,
+ },
+ {}
+};
+
#define HMC425A_CHAN(_channel) \
{ \
.type = IIO_VOLTAGE, \
@@ -167,20 +283,25 @@ static const struct iio_info hmc425a_info = {
.info_mask_separate = BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
}
+#define LTC6373_CHAN(_channel) \
+{ \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
+ .ext_info = ltc6373_ext_info, \
+}
+
static const struct iio_chan_spec hmc425a_channels[] = {
HMC425A_CHAN(0),
};
-/* Match table for of_platform binding */
-static const struct of_device_id hmc425a_of_match[] = {
- { .compatible = "adi,hmc425a", .data = (void *)ID_HMC425A },
- { .compatible = "adi,hmc540s", .data = (void *)ID_HMC540S },
- { .compatible = "adi,adrf5740", .data = (void *)ID_ADRF5740 },
- {},
+static const struct iio_chan_spec ltc6373_channels[] = {
+ LTC6373_CHAN(0),
};
-MODULE_DEVICE_TABLE(of, hmc425a_of_match);
-static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
+static const struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
[ID_HMC425A] = {
.name = "hmc425a",
.channels = hmc425a_channels,
@@ -189,6 +310,8 @@ static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
.gain_min = -31500,
.gain_max = 0,
.default_gain = -0x40, /* set default gain -31.5db*/
+ .gain_dB_to_code = hmc425a_gain_dB_to_code,
+ .code_to_gain_dB = hmc425a_code_to_gain_dB,
},
[ID_HMC540S] = {
.name = "hmc540s",
@@ -198,6 +321,8 @@ static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
.gain_min = -15000,
.gain_max = 0,
.default_gain = -0x10, /* set default gain -15.0db*/
+ .gain_dB_to_code = hmc540s_gain_dB_to_code,
+ .code_to_gain_dB = hmc540s_code_to_gain_dB,
},
[ID_ADRF5740] = {
.name = "adrf5740",
@@ -207,6 +332,21 @@ static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = {
.gain_min = -22000,
.gain_max = 0,
.default_gain = 0xF, /* set default gain -22.0db*/
+ .gain_dB_to_code = adrf5740_gain_dB_to_code,
+ .code_to_gain_dB = adrf5740_code_to_gain_dB,
+ },
+ [ID_LTC6373] = {
+ .name = "ltc6373",
+ .channels = ltc6373_channels,
+ .num_channels = ARRAY_SIZE(ltc6373_channels),
+ .num_gpios = 3,
+ .gain_min = -12041, /* gain setting x0.25*/
+ .gain_max = 24082, /* gain setting x16 */
+ .default_gain = LTC6373_MIN_GAIN_CODE,
+ .powerdown_val = LTC6373_SHUTDOWN,
+ .has_powerdown = true,
+ .gain_dB_to_code = ltc6373_gain_dB_to_code,
+ .code_to_gain_dB = ltc6373_code_to_gain_dB,
},
};
@@ -221,9 +361,8 @@ static int hmc425a_probe(struct platform_device *pdev)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->type = (uintptr_t)device_get_match_data(&pdev->dev);
- st->chip_info = &hmc425a_chip_info_tbl[st->type];
+ st->chip_info = device_get_match_data(&pdev->dev);
indio_dev->num_channels = st->chip_info->num_channels;
indio_dev->channels = st->chip_info->channels;
indio_dev->name = st->chip_info->name;
@@ -249,12 +388,31 @@ static int hmc425a_probe(struct platform_device *pdev)
indio_dev->info = &hmc425a_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- /* Set default gain */
- hmc425a_write(indio_dev, st->gain);
+ if (st->chip_info->has_powerdown) {
+ st->powerdown = true;
+ hmc425a_write(indio_dev, st->chip_info->powerdown_val);
+ } else {
+ /* Set default gain */
+ hmc425a_write(indio_dev, st->gain);
+ }
return devm_iio_device_register(&pdev->dev, indio_dev);
}
+/* Match table for of_platform binding */
+static const struct of_device_id hmc425a_of_match[] = {
+ { .compatible = "adi,hmc425a",
+ .data = &hmc425a_chip_info_tbl[ID_HMC425A]},
+ { .compatible = "adi,hmc540s",
+ .data = &hmc425a_chip_info_tbl[ID_HMC540S]},
+ { .compatible = "adi,adrf5740",
+ .data = &hmc425a_chip_info_tbl[ID_ADRF5740]},
+ { .compatible = "adi,ltc6373",
+ .data = &hmc425a_chip_info_tbl[ID_LTC6373]},
+ {}
+};
+MODULE_DEVICE_TABLE(of, hmc425a_of_match);
+
static struct platform_driver hmc425a_driver = {
.driver = {
.name = KBUILD_MODNAME,
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 5f85ba38e6f6..a18c1da292af 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -159,7 +159,7 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
* release it.
*/
-static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
const char *channel)
{
struct dmaengine_buffer *dmaengine_buffer;
@@ -210,6 +210,7 @@ err_free:
kfree(dmaengine_buffer);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_alloc, IIO_DMAENGINE_BUFFER);
/**
* iio_dmaengine_buffer_free() - Free dmaengine buffer
@@ -217,7 +218,7 @@ err_free:
*
* Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
*/
-static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
+void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
{
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(buffer);
@@ -227,6 +228,7 @@ static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
iio_buffer_put(buffer);
}
+EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
static void __devm_iio_dmaengine_buffer_free(void *buffer)
{
@@ -279,8 +281,7 @@ int devm_iio_dmaengine_buffer_setup(struct device *dev,
{
struct iio_buffer *buffer;
- buffer = devm_iio_dmaengine_buffer_alloc(indio_dev->dev.parent,
- channel);
+ buffer = devm_iio_dmaengine_buffer_alloc(dev, channel);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
@@ -288,7 +289,7 @@ int devm_iio_dmaengine_buffer_setup(struct device *dev,
return iio_device_attach_buffer(indio_dev, buffer);
}
-EXPORT_SYMBOL_GPL(devm_iio_dmaengine_buffer_setup);
+EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
diff --git a/drivers/iio/chemical/pms7003.c b/drivers/iio/chemical/pms7003.c
index b5cf15a515d2..43025866d5b7 100644
--- a/drivers/iio/chemical/pms7003.c
+++ b/drivers/iio/chemical/pms7003.c
@@ -211,8 +211,8 @@ static bool pms7003_frame_is_okay(struct pms7003_frame *frame)
return checksum == pms7003_calc_checksum(frame);
}
-static ssize_t pms7003_receive_buf(struct serdev_device *serdev, const u8 *buf,
- size_t size)
+static size_t pms7003_receive_buf(struct serdev_device *serdev, const u8 *buf,
+ size_t size)
{
struct iio_dev *indio_dev = serdev_device_get_drvdata(serdev);
struct pms7003_state *state = iio_priv(indio_dev);
diff --git a/drivers/iio/chemical/scd30_serial.c b/drivers/iio/chemical/scd30_serial.c
index a47654591e55..2adb76dbb020 100644
--- a/drivers/iio/chemical/scd30_serial.c
+++ b/drivers/iio/chemical/scd30_serial.c
@@ -174,8 +174,8 @@ static int scd30_serdev_command(struct scd30_state *state, enum scd30_cmd cmd, u
return 0;
}
-static ssize_t scd30_serdev_receive_buf(struct serdev_device *serdev,
- const u8 *buf, size_t size)
+static size_t scd30_serdev_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t size)
{
struct iio_dev *indio_dev = serdev_device_get_drvdata(serdev);
struct scd30_serdev_priv *priv;
diff --git a/drivers/iio/chemical/sps30_serial.c b/drivers/iio/chemical/sps30_serial.c
index 3afa89f8acc3..a6dfbe28c914 100644
--- a/drivers/iio/chemical/sps30_serial.c
+++ b/drivers/iio/chemical/sps30_serial.c
@@ -210,8 +210,8 @@ static int sps30_serial_command(struct sps30_state *state, unsigned char cmd,
return rsp_size;
}
-static ssize_t sps30_serial_receive_buf(struct serdev_device *serdev,
- const u8 *buf, size_t size)
+static size_t sps30_serial_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t size)
{
struct iio_dev *indio_dev = dev_get_drvdata(&serdev->dev);
struct sps30_serial_priv *priv;
diff --git a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
index 03823ee57f59..3b0f9598a7c7 100644
--- a/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
+++ b/drivers/iio/common/inv_sensors/inv_sensors_timestamp.c
@@ -126,7 +126,7 @@ void inv_sensors_timestamp_interrupt(struct inv_sensors_timestamp *ts,
struct inv_sensors_timestamp_interval *it;
int64_t delta, interval;
const uint32_t fifo_mult = fifo_period / ts->chip.clock_period;
- uint32_t period = ts->period;
+ uint32_t period;
bool valid = false;
if (fifo_nb == 0)
diff --git a/drivers/iio/dac/mcp4821.c b/drivers/iio/dac/mcp4821.c
index 8a0480d33845..782e8f6b7782 100644
--- a/drivers/iio/dac/mcp4821.c
+++ b/drivers/iio/dac/mcp4821.c
@@ -17,7 +17,7 @@
*/
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/dummy/iio_dummy_evgen.c b/drivers/iio/dummy/iio_dummy_evgen.c
index 5a0072727ba4..16d3f144dda0 100644
--- a/drivers/iio/dummy/iio_dummy_evgen.c
+++ b/drivers/iio/dummy/iio_dummy_evgen.c
@@ -31,8 +31,6 @@
* @regs: irq regs we are faking
* @lock: protect the evgen state
* @inuse: mask of which irqs are connected
- * @irq_sim: interrupt simulator
- * @base: base of irq range
* @irq_sim_domain: irq simulator domain
*/
struct iio_dummy_eventgen {
diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
index c24f609c2ade..09efacaf8f78 100644
--- a/drivers/iio/dummy/iio_simple_dummy.c
+++ b/drivers/iio/dummy/iio_simple_dummy.c
@@ -283,65 +283,63 @@ static int iio_dummy_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
- int ret = -EINVAL;
- mutex_lock(&st->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW: /* magic value - channel value read */
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->output) {
- /* Set integer part to cached value */
- *val = st->dac_val;
- ret = IIO_VAL_INT;
- } else if (chan->differential) {
- if (chan->channel == 1)
- *val = st->differential_adc_val[0];
- else
- *val = st->differential_adc_val[1];
- ret = IIO_VAL_INT;
- } else {
- *val = st->single_ended_adc_val;
- ret = IIO_VAL_INT;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ guard(mutex)(&st->lock);
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output) {
+ /* Set integer part to cached value */
+ *val = st->dac_val;
+ return IIO_VAL_INT;
+ } else if (chan->differential) {
+ if (chan->channel == 1)
+ *val = st->differential_adc_val[0];
+ else
+ *val = st->differential_adc_val[1];
+ return IIO_VAL_INT;
+ } else {
+ *val = st->single_ended_adc_val;
+ return IIO_VAL_INT;
+ }
+
+ case IIO_ACCEL:
+ *val = st->accel_val;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
}
- break;
- case IIO_ACCEL:
- *val = st->accel_val;
- ret = IIO_VAL_INT;
- break;
- default:
- break;
}
- break;
+ unreachable();
case IIO_CHAN_INFO_PROCESSED:
- switch (chan->type) {
- case IIO_STEPS:
- *val = st->steps;
- ret = IIO_VAL_INT;
- break;
- case IIO_ACTIVITY:
- switch (chan->channel2) {
- case IIO_MOD_RUNNING:
- *val = st->activity_running;
- ret = IIO_VAL_INT;
- break;
- case IIO_MOD_WALKING:
- *val = st->activity_walking;
- ret = IIO_VAL_INT;
- break;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
+ guard(mutex)(&st->lock);
+ switch (chan->type) {
+ case IIO_STEPS:
+ *val = st->steps;
+ return IIO_VAL_INT;
+ case IIO_ACTIVITY:
+ switch (chan->channel2) {
+ case IIO_MOD_RUNNING:
+ *val = st->activity_running;
+ return IIO_VAL_INT;
+ case IIO_MOD_WALKING:
+ *val = st->activity_walking;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
default:
- break;
+ return -EINVAL;
}
- break;
- default:
- break;
}
- break;
+ unreachable();
case IIO_CHAN_INFO_OFFSET:
/* only single ended adc -> 7 */
*val = 7;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
@@ -350,60 +348,57 @@ static int iio_dummy_read_raw(struct iio_dev *indio_dev,
/* only single ended adc -> 0.001333 */
*val = 0;
*val2 = 1333;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
+ return IIO_VAL_INT_PLUS_MICRO;
case 1:
/* all differential adc -> 0.000001344 */
*val = 0;
*val2 = 1344;
- ret = IIO_VAL_INT_PLUS_NANO;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
}
- break;
default:
- break;
+ return -EINVAL;
}
- break;
- case IIO_CHAN_INFO_CALIBBIAS:
+ case IIO_CHAN_INFO_CALIBBIAS: {
+ guard(mutex)(&st->lock);
/* only the acceleration axis - read from cache */
*val = st->accel_calibbias;
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_CALIBSCALE:
+ return IIO_VAL_INT;
+ }
+ case IIO_CHAN_INFO_CALIBSCALE: {
+ guard(mutex)(&st->lock);
*val = st->accel_calibscale->val;
*val2 = st->accel_calibscale->val2;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
*val = 3;
*val2 = 33;
- ret = IIO_VAL_INT_PLUS_NANO;
- break;
- case IIO_CHAN_INFO_ENABLE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_ENABLE: {
+ guard(mutex)(&st->lock);
switch (chan->type) {
case IIO_STEPS:
*val = st->steps_enabled;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
default:
- break;
+ return -EINVAL;
}
- break;
- case IIO_CHAN_INFO_CALIBHEIGHT:
+ }
+ case IIO_CHAN_INFO_CALIBHEIGHT: {
+ guard(mutex)(&st->lock);
switch (chan->type) {
case IIO_STEPS:
*val = st->height;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
default:
- break;
+ return -EINVAL;
}
- break;
-
+ }
default:
- break;
+ return -EINVAL;
}
- mutex_unlock(&st->lock);
- return ret;
}
/**
@@ -426,7 +421,6 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
long mask)
{
int i;
- int ret = 0;
struct iio_dummy_state *st = iio_priv(indio_dev);
switch (mask) {
@@ -436,10 +430,10 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
if (chan->output == 0)
return -EINVAL;
- /* Locking not required as writing single value */
- mutex_lock(&st->lock);
- st->dac_val = val;
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock) {
+ /* Locking not required as writing single value */
+ st->dac_val = val;
+ }
return 0;
default:
return -EINVAL;
@@ -447,9 +441,9 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
case IIO_STEPS:
- mutex_lock(&st->lock);
- st->steps = val;
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock) {
+ st->steps = val;
+ }
return 0;
case IIO_ACTIVITY:
if (val < 0)
@@ -470,30 +464,29 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- case IIO_CHAN_INFO_CALIBSCALE:
- mutex_lock(&st->lock);
+ case IIO_CHAN_INFO_CALIBSCALE: {
+ guard(mutex)(&st->lock);
/* Compare against table - hard matching here */
for (i = 0; i < ARRAY_SIZE(dummy_scales); i++)
if (val == dummy_scales[i].val &&
val2 == dummy_scales[i].val2)
break;
if (i == ARRAY_SIZE(dummy_scales))
- ret = -EINVAL;
- else
- st->accel_calibscale = &dummy_scales[i];
- mutex_unlock(&st->lock);
- return ret;
+ return -EINVAL;
+ st->accel_calibscale = &dummy_scales[i];
+ return 0;
+ }
case IIO_CHAN_INFO_CALIBBIAS:
- mutex_lock(&st->lock);
- st->accel_calibbias = val;
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock) {
+ st->accel_calibbias = val;
+ }
return 0;
case IIO_CHAN_INFO_ENABLE:
switch (chan->type) {
case IIO_STEPS:
- mutex_lock(&st->lock);
- st->steps_enabled = val;
- mutex_unlock(&st->lock);
+ scoped_guard(mutex, &st->lock) {
+ st->steps_enabled = val;
+ }
return 0;
default:
return -EINVAL;
diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig
index 9e85dfa58508..c455be7d4a1c 100644
--- a/drivers/iio/frequency/Kconfig
+++ b/drivers/iio/frequency/Kconfig
@@ -60,6 +60,16 @@ config ADF4377
To compile this driver as a module, choose M here: the
module will be called adf4377.
+config ADMFM2000
+ tristate "Analog Devices ADMFM2000 Dual Microwave Down Converter"
+ depends on GPIOLIB
+ help
+ Say yes here to build support for Analog Devices ADMFM2000 Dual
+ Microwave Down Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called admfm2000.
+
config ADMV1013
tristate "Analog Devices ADMV1013 Microwave Upconverter"
depends on SPI && COMMON_CLK
diff --git a/drivers/iio/frequency/Makefile b/drivers/iio/frequency/Makefile
index b616c29b4a08..70d0e0b70e80 100644
--- a/drivers/iio/frequency/Makefile
+++ b/drivers/iio/frequency/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_AD9523) += ad9523.o
obj-$(CONFIG_ADF4350) += adf4350.o
obj-$(CONFIG_ADF4371) += adf4371.o
obj-$(CONFIG_ADF4377) += adf4377.o
+obj-$(CONFIG_ADMFM2000) += admfm2000.o
obj-$(CONFIG_ADMV1013) += admv1013.o
obj-$(CONFIG_ADMV1014) += admv1014.o
obj-$(CONFIG_ADMV4420) += admv4420.o
diff --git a/drivers/iio/frequency/admfm2000.c b/drivers/iio/frequency/admfm2000.c
new file mode 100644
index 000000000000..c34d79e55a7c
--- /dev/null
+++ b/drivers/iio/frequency/admfm2000.c
@@ -0,0 +1,282 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADMFM2000 Dual Microwave Down Converter
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define ADMFM2000_MIXER_MODE 0
+#define ADMFM2000_DIRECT_IF_MODE 1
+#define ADMFM2000_DSA_GPIOS 5
+#define ADMFM2000_MODE_GPIOS 2
+#define ADMFM2000_MAX_GAIN 0
+#define ADMFM2000_MIN_GAIN -31000
+#define ADMFM2000_DEFAULT_GAIN -0x20
+
+struct admfm2000_state {
+ struct mutex lock; /* protect sensor state */
+ struct gpio_desc *sw1_ch[2];
+ struct gpio_desc *sw2_ch[2];
+ struct gpio_desc *dsa1_gpios[5];
+ struct gpio_desc *dsa2_gpios[5];
+ u32 gain[2];
+};
+
+static int admfm2000_mode(struct iio_dev *indio_dev, u32 chan, u32 mode)
+{
+ struct admfm2000_state *st = iio_priv(indio_dev);
+ int i;
+
+ switch (mode) {
+ case ADMFM2000_MIXER_MODE:
+ for (i = 0; i < ADMFM2000_MODE_GPIOS; i++) {
+ gpiod_set_value_cansleep(st->sw1_ch[i], (chan == 0) ? 1 : 0);
+ gpiod_set_value_cansleep(st->sw2_ch[i], (chan == 0) ? 0 : 1);
+ }
+ return 0;
+ case ADMFM2000_DIRECT_IF_MODE:
+ for (i = 0; i < ADMFM2000_MODE_GPIOS; i++) {
+ gpiod_set_value_cansleep(st->sw1_ch[i], (chan == 0) ? 0 : 1);
+ gpiod_set_value_cansleep(st->sw2_ch[i], (chan == 0) ? 1 : 0);
+ }
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admfm2000_attenuation(struct iio_dev *indio_dev, u32 chan, u32 value)
+{
+ struct admfm2000_state *st = iio_priv(indio_dev);
+ int i;
+
+ switch (chan) {
+ case 0:
+ for (i = 0; i < ADMFM2000_DSA_GPIOS; i++)
+ gpiod_set_value_cansleep(st->dsa1_gpios[i], value & (1 << i));
+ return 0;
+ case 1:
+ for (i = 0; i < ADMFM2000_DSA_GPIOS; i++)
+ gpiod_set_value_cansleep(st->dsa2_gpios[i], value & (1 << i));
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admfm2000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct admfm2000_state *st = iio_priv(indio_dev);
+ int gain;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ mutex_lock(&st->lock);
+ gain = ~(st->gain[chan->channel]) * -1000;
+ *val = gain / 1000;
+ *val2 = (gain % 1000) * 1000;
+ mutex_unlock(&st->lock);
+
+ return IIO_VAL_INT_PLUS_MICRO_DB;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admfm2000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct admfm2000_state *st = iio_priv(indio_dev);
+ int gain, ret;
+
+ if (val < 0)
+ gain = (val * 1000) - (val2 / 1000);
+ else
+ gain = (val * 1000) + (val2 / 1000);
+
+ if (gain > ADMFM2000_MAX_GAIN || gain < ADMFM2000_MIN_GAIN)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ mutex_lock(&st->lock);
+ st->gain[chan->channel] = ~((abs(gain) / 1000) & 0x1F);
+
+ ret = admfm2000_attenuation(indio_dev, chan->channel,
+ st->gain[chan->channel]);
+ mutex_unlock(&st->lock);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int admfm2000_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ return IIO_VAL_INT_PLUS_MICRO_DB;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info admfm2000_info = {
+ .read_raw = &admfm2000_read_raw,
+ .write_raw = &admfm2000_write_raw,
+ .write_raw_get_fmt = &admfm2000_write_raw_get_fmt,
+};
+
+#define ADMFM2000_CHAN(_channel) { \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_HARDWAREGAIN), \
+}
+
+static const struct iio_chan_spec admfm2000_channels[] = {
+ ADMFM2000_CHAN(0),
+ ADMFM2000_CHAN(1),
+};
+
+static int admfm2000_channel_config(struct admfm2000_state *st,
+ struct iio_dev *indio_dev)
+{
+ struct platform_device *pdev = to_platform_device(indio_dev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct fwnode_handle *child;
+ struct gpio_desc **dsa;
+ struct gpio_desc **sw;
+ int ret, i;
+ bool mode;
+ u32 reg;
+
+ device_for_each_child_node(dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret) {
+ fwnode_handle_put(child);
+ return dev_err_probe(dev, ret,
+ "Failed to get reg property\n");
+ }
+
+ if (reg >= indio_dev->num_channels) {
+ fwnode_handle_put(child);
+ return dev_err_probe(dev, -EINVAL, "reg bigger than: %d\n",
+ indio_dev->num_channels);
+ }
+
+ if (fwnode_property_present(child, "adi,mixer-mode"))
+ mode = ADMFM2000_MIXER_MODE;
+ else
+ mode = ADMFM2000_DIRECT_IF_MODE;
+
+ switch (reg) {
+ case 0:
+ sw = st->sw1_ch;
+ dsa = st->dsa1_gpios;
+ break;
+ case 1:
+ sw = st->sw2_ch;
+ dsa = st->dsa2_gpios;
+ break;
+ default:
+ fwnode_handle_put(child);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ADMFM2000_MODE_GPIOS; i++) {
+ sw[i] = devm_fwnode_gpiod_get_index(dev, child, "switch",
+ i, GPIOD_OUT_LOW, NULL);
+ if (IS_ERR(sw[i])) {
+ fwnode_handle_put(child);
+ return dev_err_probe(dev, PTR_ERR(sw[i]),
+ "Failed to get gpios\n");
+ }
+ }
+
+ for (i = 0; i < ADMFM2000_DSA_GPIOS; i++) {
+ dsa[i] = devm_fwnode_gpiod_get_index(dev, child,
+ "attenuation", i,
+ GPIOD_OUT_LOW, NULL);
+ if (IS_ERR(dsa[i])) {
+ fwnode_handle_put(child);
+ return dev_err_probe(dev, PTR_ERR(dsa[i]),
+ "Failed to get gpios\n");
+ }
+ }
+
+ ret = admfm2000_mode(indio_dev, reg, mode);
+ if (ret) {
+ fwnode_handle_put(child);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int admfm2000_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct admfm2000_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ indio_dev->name = "admfm2000";
+ indio_dev->num_channels = ARRAY_SIZE(admfm2000_channels);
+ indio_dev->channels = admfm2000_channels;
+ indio_dev->info = &admfm2000_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ st->gain[0] = ADMFM2000_DEFAULT_GAIN;
+ st->gain[1] = ADMFM2000_DEFAULT_GAIN;
+
+ mutex_init(&st->lock);
+
+ ret = admfm2000_channel_config(st, indio_dev);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id admfm2000_of_match[] = {
+ { .compatible = "adi,admfm2000" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, admfm2000_of_match);
+
+static struct platform_driver admfm2000_driver = {
+ .driver = {
+ .name = "admfm2000",
+ .of_match_table = admfm2000_of_match,
+ },
+ .probe = admfm2000_probe,
+};
+module_platform_driver(admfm2000_driver);
+
+MODULE_AUTHOR("Kim Seer Paller <kimseer.paller@analog.com>");
+MODULE_DESCRIPTION("ADMFM2000 Dual Microwave Down Converter");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c
index 2f9675596138..9c8e20c25e96 100644
--- a/drivers/iio/gyro/bmg160_i2c.c
+++ b/drivers/iio/gyro/bmg160_i2c.c
@@ -3,7 +3,7 @@
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/module.h>
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include "bmg160.h"
@@ -66,7 +66,7 @@ MODULE_DEVICE_TABLE(of, bmg160_of_match);
static struct i2c_driver bmg160_i2c_driver = {
.driver = {
.name = "bmg160_i2c",
- .acpi_match_table = ACPI_PTR(bmg160_acpi_match),
+ .acpi_match_table = bmg160_acpi_match,
.of_match_table = bmg160_of_match,
.pm = &bmg160_pm_ops,
},
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index df3bc5c3d378..1dbe48dae74e 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -346,6 +346,13 @@ err:
return IRQ_HANDLED;
}
+static void afe4403_regulator_disable(void *data)
+{
+ struct regulator *regulator = data;
+
+ regulator_disable(regulator);
+}
+
#define AFE4403_TIMING_PAIRS \
{ AFE440X_LED2STC, 0x000050 }, \
{ AFE440X_LED2ENDC, 0x0003e7 }, \
@@ -495,19 +502,24 @@ static int afe4403_probe(struct spi_device *spi)
dev_err(afe->dev, "Unable to enable regulator\n");
return ret;
}
+ ret = devm_add_action_or_reset(afe->dev, afe4403_regulator_disable, afe->regulator);
+ if (ret) {
+ dev_err(afe->dev, "Unable to add regulator disable action\n");
+ return ret;
+ }
ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
AFE440X_CONTROL0_SW_RESET);
if (ret) {
dev_err(afe->dev, "Unable to reset device\n");
- goto err_disable_reg;
+ return ret;
}
ret = regmap_multi_reg_write(afe->regmap, afe4403_reg_sequences,
ARRAY_SIZE(afe4403_reg_sequences));
if (ret) {
dev_err(afe->dev, "Unable to set register defaults\n");
- goto err_disable_reg;
+ return ret;
}
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -523,16 +535,15 @@ static int afe4403_probe(struct spi_device *spi)
iio_device_id(indio_dev));
if (!afe->trig) {
dev_err(afe->dev, "Unable to allocate IIO trigger\n");
- ret = -ENOMEM;
- goto err_disable_reg;
+ return -ENOMEM;
}
iio_trigger_set_drvdata(afe->trig, indio_dev);
- ret = iio_trigger_register(afe->trig);
+ ret = devm_iio_trigger_register(afe->dev, afe->trig);
if (ret) {
dev_err(afe->dev, "Unable to register IIO trigger\n");
- goto err_disable_reg;
+ return ret;
}
ret = devm_request_threaded_irq(afe->dev, afe->irq,
@@ -542,52 +553,25 @@ static int afe4403_probe(struct spi_device *spi)
afe->trig);
if (ret) {
dev_err(afe->dev, "Unable to request IRQ\n");
- goto err_trig;
+ return ret;
}
}
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- afe4403_trigger_handler, NULL);
+ ret = devm_iio_triggered_buffer_setup(afe->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ afe4403_trigger_handler, NULL);
if (ret) {
dev_err(afe->dev, "Unable to setup buffer\n");
- goto err_trig;
+ return ret;
}
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(afe->dev, indio_dev);
if (ret) {
dev_err(afe->dev, "Unable to register IIO device\n");
- goto err_buff;
+ return ret;
}
return 0;
-
-err_buff:
- iio_triggered_buffer_cleanup(indio_dev);
-err_trig:
- if (afe->irq > 0)
- iio_trigger_unregister(afe->trig);
-err_disable_reg:
- regulator_disable(afe->regulator);
-
- return ret;
-}
-
-static void afe4403_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct afe4403_data *afe = iio_priv(indio_dev);
- int ret;
-
- iio_device_unregister(indio_dev);
-
- iio_triggered_buffer_cleanup(indio_dev);
-
- if (afe->irq > 0)
- iio_trigger_unregister(afe->trig);
-
- ret = regulator_disable(afe->regulator);
- if (ret)
- dev_warn(afe->dev, "Unable to disable regulator\n");
}
static const struct spi_device_id afe4403_ids[] = {
@@ -603,7 +587,6 @@ static struct spi_driver afe4403_spi_driver = {
.pm = pm_sleep_ptr(&afe4403_pm_ops),
},
.probe = afe4403_probe,
- .remove = afe4403_remove,
.id_table = afe4403_ids,
};
module_spi_driver(afe4403_spi_driver);
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index ede1e8201311..7768b07ef7a6 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -349,6 +349,13 @@ err:
return IRQ_HANDLED;
}
+static void afe4404_regulator_disable(void *data)
+{
+ struct regulator *regulator = data;
+
+ regulator_disable(regulator);
+}
+
/* Default timings from data-sheet */
#define AFE4404_TIMING_PAIRS \
{ AFE440X_PRPCOUNT, 39999 }, \
@@ -502,19 +509,24 @@ static int afe4404_probe(struct i2c_client *client)
dev_err(afe->dev, "Unable to enable regulator\n");
return ret;
}
+ ret = devm_add_action_or_reset(afe->dev, afe4404_regulator_disable, afe->regulator);
+ if (ret) {
+ dev_err(afe->dev, "Unable to enable regulator\n");
+ return ret;
+ }
ret = regmap_write(afe->regmap, AFE440X_CONTROL0,
AFE440X_CONTROL0_SW_RESET);
if (ret) {
dev_err(afe->dev, "Unable to reset device\n");
- goto disable_reg;
+ return ret;
}
ret = regmap_multi_reg_write(afe->regmap, afe4404_reg_sequences,
ARRAY_SIZE(afe4404_reg_sequences));
if (ret) {
dev_err(afe->dev, "Unable to set register defaults\n");
- goto disable_reg;
+ return ret;
}
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -530,16 +542,15 @@ static int afe4404_probe(struct i2c_client *client)
iio_device_id(indio_dev));
if (!afe->trig) {
dev_err(afe->dev, "Unable to allocate IIO trigger\n");
- ret = -ENOMEM;
- goto disable_reg;
+ return -ENOMEM;
}
iio_trigger_set_drvdata(afe->trig, indio_dev);
- ret = iio_trigger_register(afe->trig);
+ ret = devm_iio_trigger_register(afe->dev, afe->trig);
if (ret) {
dev_err(afe->dev, "Unable to register IIO trigger\n");
- goto disable_reg;
+ return ret;
}
ret = devm_request_threaded_irq(afe->dev, afe->irq,
@@ -549,52 +560,25 @@ static int afe4404_probe(struct i2c_client *client)
afe->trig);
if (ret) {
dev_err(afe->dev, "Unable to request IRQ\n");
- goto disable_reg;
+ return ret;
}
}
- ret = iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
- afe4404_trigger_handler, NULL);
+ ret = devm_iio_triggered_buffer_setup(afe->dev, indio_dev,
+ &iio_pollfunc_store_time,
+ afe4404_trigger_handler, NULL);
if (ret) {
dev_err(afe->dev, "Unable to setup buffer\n");
- goto unregister_trigger;
+ return ret;
}
- ret = iio_device_register(indio_dev);
+ ret = devm_iio_device_register(afe->dev, indio_dev);
if (ret) {
dev_err(afe->dev, "Unable to register IIO device\n");
- goto unregister_triggered_buffer;
+ return ret;
}
return 0;
-
-unregister_triggered_buffer:
- iio_triggered_buffer_cleanup(indio_dev);
-unregister_trigger:
- if (afe->irq > 0)
- iio_trigger_unregister(afe->trig);
-disable_reg:
- regulator_disable(afe->regulator);
-
- return ret;
-}
-
-static void afe4404_remove(struct i2c_client *client)
-{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
- struct afe4404_data *afe = iio_priv(indio_dev);
- int ret;
-
- iio_device_unregister(indio_dev);
-
- iio_triggered_buffer_cleanup(indio_dev);
-
- if (afe->irq > 0)
- iio_trigger_unregister(afe->trig);
-
- ret = regulator_disable(afe->regulator);
- if (ret)
- dev_err(afe->dev, "Unable to disable regulator\n");
}
static const struct i2c_device_id afe4404_ids[] = {
@@ -610,7 +594,6 @@ static struct i2c_driver afe4404_i2c_driver = {
.pm = pm_sleep_ptr(&afe4404_pm_ops),
},
.probe = afe4404_probe,
- .remove = afe4404_remove,
.id_table = afe4404_ids,
};
module_i2c_driver(afe4404_i2c_driver);
diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c
index ed70415512f6..1e5d0d4797b1 100644
--- a/drivers/iio/humidity/hdc3020.c
+++ b/drivers/iio/humidity/hdc3020.c
@@ -5,41 +5,66 @@
*
* Copyright (C) 2023
*
+ * Copyright (C) 2024 Liebherr-Electronics and Drives GmbH
+ *
* Datasheet: https://www.ti.com/lit/ds/symlink/hdc3020.pdf
*/
+#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/cleanup.h>
#include <linux/crc8.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/units.h>
#include <asm/unaligned.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
-#define HDC3020_HEATER_CMD_MSB 0x30 /* shared by all heater commands */
-#define HDC3020_HEATER_ENABLE 0x6D
-#define HDC3020_HEATER_DISABLE 0x66
-#define HDC3020_HEATER_CONFIG 0x6E
+#define HDC3020_S_AUTO_10HZ_MOD0 0x2737
+#define HDC3020_S_STATUS 0x3041
+#define HDC3020_HEATER_DISABLE 0x3066
+#define HDC3020_HEATER_ENABLE 0x306D
+#define HDC3020_HEATER_CONFIG 0x306E
+#define HDC3020_EXIT_AUTO 0x3093
+#define HDC3020_S_T_RH_THRESH_LOW 0x6100
+#define HDC3020_S_T_RH_THRESH_LOW_CLR 0x610B
+#define HDC3020_S_T_RH_THRESH_HIGH_CLR 0x6116
+#define HDC3020_S_T_RH_THRESH_HIGH 0x611D
+#define HDC3020_R_T_RH_AUTO 0xE000
+#define HDC3020_R_T_LOW_AUTO 0xE002
+#define HDC3020_R_T_HIGH_AUTO 0xE003
+#define HDC3020_R_RH_LOW_AUTO 0xE004
+#define HDC3020_R_RH_HIGH_AUTO 0xE005
+#define HDC3020_R_T_RH_THRESH_LOW 0xE102
+#define HDC3020_R_T_RH_THRESH_LOW_CLR 0xE109
+#define HDC3020_R_T_RH_THRESH_HIGH_CLR 0xE114
+#define HDC3020_R_T_RH_THRESH_HIGH 0xE11F
+#define HDC3020_R_STATUS 0xF32D
+
+#define HDC3020_THRESH_TEMP_MASK GENMASK(8, 0)
+#define HDC3020_THRESH_TEMP_TRUNC_SHIFT 7
+#define HDC3020_THRESH_HUM_MASK GENMASK(15, 9)
+#define HDC3020_THRESH_HUM_TRUNC_SHIFT 9
+
+#define HDC3020_STATUS_T_LOW_ALERT BIT(6)
+#define HDC3020_STATUS_T_HIGH_ALERT BIT(7)
+#define HDC3020_STATUS_RH_LOW_ALERT BIT(8)
+#define HDC3020_STATUS_RH_HIGH_ALERT BIT(9)
#define HDC3020_READ_RETRY_TIMES 10
#define HDC3020_BUSY_DELAY_MS 10
#define HDC3020_CRC8_POLYNOMIAL 0x31
-static const u8 HDC3020_S_AUTO_10HZ_MOD0[2] = { 0x27, 0x37 };
-
-static const u8 HDC3020_EXIT_AUTO[2] = { 0x30, 0x93 };
-
-static const u8 HDC3020_R_T_RH_AUTO[2] = { 0xE0, 0x00 };
-static const u8 HDC3020_R_T_LOW_AUTO[2] = { 0xE0, 0x02 };
-static const u8 HDC3020_R_T_HIGH_AUTO[2] = { 0xE0, 0x03 };
-static const u8 HDC3020_R_RH_LOW_AUTO[2] = { 0xE0, 0x04 };
-static const u8 HDC3020_R_RH_HIGH_AUTO[2] = { 0xE0, 0x05 };
+#define HDC3020_MIN_TEMP -40
+#define HDC3020_MAX_TEMP 125
struct hdc3020_data {
struct i2c_client *client;
@@ -54,18 +79,37 @@ struct hdc3020_data {
static const int hdc3020_heater_vals[] = {0, 1, 0x3FFF};
+static const struct iio_event_spec hdc3020_t_rh_event[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
+ BIT(IIO_EV_INFO_HYSTERESIS),
+ },
+};
+
static const struct iio_chan_spec hdc3020_channels[] = {
{
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_PEAK) |
BIT(IIO_CHAN_INFO_TROUGH) | BIT(IIO_CHAN_INFO_OFFSET),
+ .event_spec = hdc3020_t_rh_event,
+ .num_event_specs = ARRAY_SIZE(hdc3020_t_rh_event),
},
{
.type = IIO_HUMIDITYRELATIVE,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_PEAK) |
BIT(IIO_CHAN_INFO_TROUGH),
+ .event_spec = hdc3020_t_rh_event,
+ .num_event_specs = ARRAY_SIZE(hdc3020_t_rh_event),
},
{
/*
@@ -82,7 +126,7 @@ static const struct iio_chan_spec hdc3020_channels[] = {
DECLARE_CRC8_TABLE(hdc3020_crc8_table);
-static int hdc3020_write_bytes(struct hdc3020_data *data, const u8 *buf, u8 len)
+static int hdc3020_write_bytes(struct hdc3020_data *data, u8 *buf, u8 len)
{
struct i2c_client *client = data->client;
struct i2c_msg msg;
@@ -90,7 +134,7 @@ static int hdc3020_write_bytes(struct hdc3020_data *data, const u8 *buf, u8 len)
msg.addr = client->addr;
msg.flags = 0;
- msg.buf = (char *)buf;
+ msg.buf = buf;
msg.len = len;
/*
@@ -109,26 +153,28 @@ static int hdc3020_write_bytes(struct hdc3020_data *data, const u8 *buf, u8 len)
return -ETIMEDOUT;
}
-static int hdc3020_read_bytes(struct hdc3020_data *data, const u8 *buf,
- void *val, int len)
+static
+int hdc3020_read_bytes(struct hdc3020_data *data, u16 reg, u8 *buf, int len)
{
+ u8 reg_buf[2];
int ret, cnt;
struct i2c_client *client = data->client;
struct i2c_msg msg[2] = {
[0] = {
.addr = client->addr,
.flags = 0,
- .buf = (char *)buf,
+ .buf = reg_buf,
.len = 2,
},
[1] = {
.addr = client->addr,
.flags = I2C_M_RD,
- .buf = val,
+ .buf = buf,
.len = len,
},
};
+ put_unaligned_be16(reg, reg_buf);
/*
* During the measurement process, HDC3020 will not return data.
* So wait for a while and try again
@@ -145,48 +191,12 @@ static int hdc3020_read_bytes(struct hdc3020_data *data, const u8 *buf,
return -ETIMEDOUT;
}
-static int hdc3020_read_measurement(struct hdc3020_data *data,
- enum iio_chan_type type, int *val)
-{
- u8 crc, buf[6];
- int ret;
-
- ret = hdc3020_read_bytes(data, HDC3020_R_T_RH_AUTO, buf, 6);
- if (ret < 0)
- return ret;
-
- /* CRC check of the temperature measurement */
- crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
- if (crc != buf[2])
- return -EINVAL;
-
- /* CRC check of the relative humidity measurement */
- crc = crc8(hdc3020_crc8_table, buf + 3, 2, CRC8_INIT_VALUE);
- if (crc != buf[5])
- return -EINVAL;
-
- if (type == IIO_TEMP)
- *val = get_unaligned_be16(buf);
- else if (type == IIO_HUMIDITYRELATIVE)
- *val = get_unaligned_be16(&buf[3]);
- else
- return -EINVAL;
-
- return 0;
-}
-
-/*
- * After exiting the automatic measurement mode or resetting, the peak
- * value will be reset to the default value
- * This method is used to get the highest temp measured during automatic
- * measurement
- */
-static int hdc3020_read_high_peak_t(struct hdc3020_data *data, int *val)
+static int hdc3020_read_be16(struct hdc3020_data *data, u16 reg)
{
u8 crc, buf[3];
int ret;
- ret = hdc3020_read_bytes(data, HDC3020_R_T_HIGH_AUTO, buf, 3);
+ ret = hdc3020_read_bytes(data, reg, buf, 3);
if (ret < 0)
return ret;
@@ -194,73 +204,43 @@ static int hdc3020_read_high_peak_t(struct hdc3020_data *data, int *val)
if (crc != buf[2])
return -EINVAL;
- *val = get_unaligned_be16(buf);
-
- return 0;
+ return get_unaligned_be16(buf);
}
-/*
- * This method is used to get the lowest temp measured during automatic
- * measurement
- */
-static int hdc3020_read_low_peak_t(struct hdc3020_data *data, int *val)
+static int hdc3020_exec_cmd(struct hdc3020_data *data, u16 reg)
{
- u8 crc, buf[3];
- int ret;
+ u8 reg_buf[2];
- ret = hdc3020_read_bytes(data, HDC3020_R_T_LOW_AUTO, buf, 3);
- if (ret < 0)
- return ret;
-
- crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
- if (crc != buf[2])
- return -EINVAL;
-
- *val = get_unaligned_be16(buf);
-
- return 0;
+ put_unaligned_be16(reg, reg_buf);
+ return hdc3020_write_bytes(data, reg_buf, 2);
}
-/*
- * This method is used to get the highest humidity measured during automatic
- * measurement
- */
-static int hdc3020_read_high_peak_rh(struct hdc3020_data *data, int *val)
+static int hdc3020_read_measurement(struct hdc3020_data *data,
+ enum iio_chan_type type, int *val)
{
- u8 crc, buf[3];
+ u8 crc, buf[6];
int ret;
- ret = hdc3020_read_bytes(data, HDC3020_R_RH_HIGH_AUTO, buf, 3);
+ ret = hdc3020_read_bytes(data, HDC3020_R_T_RH_AUTO, buf, 6);
if (ret < 0)
return ret;
+ /* CRC check of the temperature measurement */
crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
if (crc != buf[2])
return -EINVAL;
- *val = get_unaligned_be16(buf);
-
- return 0;
-}
-
-/*
- * This method is used to get the lowest humidity measured during automatic
- * measurement
- */
-static int hdc3020_read_low_peak_rh(struct hdc3020_data *data, int *val)
-{
- u8 crc, buf[3];
- int ret;
-
- ret = hdc3020_read_bytes(data, HDC3020_R_RH_LOW_AUTO, buf, 3);
- if (ret < 0)
- return ret;
-
- crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE);
- if (crc != buf[2])
+ /* CRC check of the relative humidity measurement */
+ crc = crc8(hdc3020_crc8_table, buf + 3, 2, CRC8_INIT_VALUE);
+ if (crc != buf[5])
return -EINVAL;
- *val = get_unaligned_be16(buf);
+ if (type == IIO_TEMP)
+ *val = get_unaligned_be16(buf);
+ else if (type == IIO_HUMIDITYRELATIVE)
+ *val = get_unaligned_be16(&buf[3]);
+ else
+ return -EINVAL;
return 0;
}
@@ -286,28 +266,28 @@ static int hdc3020_read_raw(struct iio_dev *indio_dev,
}
case IIO_CHAN_INFO_PEAK: {
guard(mutex)(&data->lock);
- if (chan->type == IIO_TEMP) {
- ret = hdc3020_read_high_peak_t(data, val);
- if (ret < 0)
- return ret;
- } else {
- ret = hdc3020_read_high_peak_rh(data, val);
- if (ret < 0)
- return ret;
- }
+ if (chan->type == IIO_TEMP)
+ ret = hdc3020_read_be16(data, HDC3020_R_T_HIGH_AUTO);
+ else
+ ret = hdc3020_read_be16(data, HDC3020_R_RH_HIGH_AUTO);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
return IIO_VAL_INT;
}
case IIO_CHAN_INFO_TROUGH: {
guard(mutex)(&data->lock);
- if (chan->type == IIO_TEMP) {
- ret = hdc3020_read_low_peak_t(data, val);
- if (ret < 0)
- return ret;
- } else {
- ret = hdc3020_read_low_peak_rh(data, val);
- if (ret < 0)
- return ret;
- }
+ if (chan->type == IIO_TEMP)
+ ret = hdc3020_read_be16(data, HDC3020_R_T_LOW_AUTO);
+ else
+ ret = hdc3020_read_be16(data, HDC3020_R_RH_LOW_AUTO);
+
+ if (ret < 0)
+ return ret;
+
+ *val = ret;
return IIO_VAL_INT;
}
case IIO_CHAN_INFO_SCALE:
@@ -352,23 +332,17 @@ static int hdc3020_update_heater(struct hdc3020_data *data, int val)
if (val < hdc3020_heater_vals[0] || val > hdc3020_heater_vals[2])
return -EINVAL;
- buf[0] = HDC3020_HEATER_CMD_MSB;
+ if (!val)
+ hdc3020_exec_cmd(data, HDC3020_HEATER_DISABLE);
- if (!val) {
- buf[1] = HDC3020_HEATER_DISABLE;
- return hdc3020_write_bytes(data, buf, 2);
- }
-
- buf[1] = HDC3020_HEATER_CONFIG;
+ put_unaligned_be16(HDC3020_HEATER_CONFIG, buf);
put_unaligned_be16(val & GENMASK(13, 0), &buf[2]);
buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
ret = hdc3020_write_bytes(data, buf, 5);
if (ret < 0)
return ret;
- buf[1] = HDC3020_HEATER_ENABLE;
-
- return hdc3020_write_bytes(data, buf, 2);
+ return hdc3020_exec_cmd(data, HDC3020_HEATER_ENABLE);
}
static int hdc3020_write_raw(struct iio_dev *indio_dev,
@@ -389,15 +363,197 @@ static int hdc3020_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int hdc3020_write_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct hdc3020_data *data = iio_priv(indio_dev);
+ u8 buf[5];
+ u64 tmp;
+ u16 reg;
+ int ret;
+
+ /* Supported temperature range is from –40 to 125 degree celsius */
+ if (val < HDC3020_MIN_TEMP || val > HDC3020_MAX_TEMP)
+ return -EINVAL;
+
+ /* Select threshold register */
+ if (info == IIO_EV_INFO_VALUE) {
+ if (dir == IIO_EV_DIR_RISING)
+ reg = HDC3020_S_T_RH_THRESH_HIGH;
+ else
+ reg = HDC3020_S_T_RH_THRESH_LOW;
+ } else {
+ if (dir == IIO_EV_DIR_RISING)
+ reg = HDC3020_S_T_RH_THRESH_HIGH_CLR;
+ else
+ reg = HDC3020_S_T_RH_THRESH_LOW_CLR;
+ }
+
+ guard(mutex)(&data->lock);
+ ret = hdc3020_read_be16(data, reg);
+ if (ret < 0)
+ return ret;
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ /*
+ * Calculate temperature threshold, shift it down to get the
+ * truncated threshold representation in the 9LSBs while keeping
+ * the current humidity threshold in the 7 MSBs.
+ */
+ tmp = ((u64)(((val + 45) * MICRO) + val2)) * 65535ULL;
+ tmp = div_u64(tmp, MICRO * 175);
+ val = tmp >> HDC3020_THRESH_TEMP_TRUNC_SHIFT;
+ val = FIELD_PREP(HDC3020_THRESH_TEMP_MASK, val);
+ val |= (FIELD_GET(HDC3020_THRESH_HUM_MASK, ret) <<
+ HDC3020_THRESH_HUM_TRUNC_SHIFT);
+ break;
+ case IIO_HUMIDITYRELATIVE:
+ /*
+ * Calculate humidity threshold, shift it down and up to get the
+ * truncated threshold representation in the 7MSBs while keeping
+ * the current temperature threshold in the 9 LSBs.
+ */
+ tmp = ((u64)((val * MICRO) + val2)) * 65535ULL;
+ tmp = div_u64(tmp, MICRO * 100);
+ val = tmp >> HDC3020_THRESH_HUM_TRUNC_SHIFT;
+ val = FIELD_PREP(HDC3020_THRESH_HUM_MASK, val);
+ val |= FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be16(val, buf + 2);
+ buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE);
+ return hdc3020_write_bytes(data, buf, 5);
+}
+
+static int hdc3020_read_thresh(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct hdc3020_data *data = iio_priv(indio_dev);
+ u16 reg;
+ int ret;
+
+ /* Select threshold register */
+ if (info == IIO_EV_INFO_VALUE) {
+ if (dir == IIO_EV_DIR_RISING)
+ reg = HDC3020_R_T_RH_THRESH_HIGH;
+ else
+ reg = HDC3020_R_T_RH_THRESH_LOW;
+ } else {
+ if (dir == IIO_EV_DIR_RISING)
+ reg = HDC3020_R_T_RH_THRESH_HIGH_CLR;
+ else
+ reg = HDC3020_R_T_RH_THRESH_LOW_CLR;
+ }
+
+ guard(mutex)(&data->lock);
+ ret = hdc3020_read_be16(data, reg);
+ if (ret < 0)
+ return ret;
+
+ switch (chan->type) {
+ case IIO_TEMP:
+ /*
+ * Get the temperature threshold from 9 LSBs, shift them to get
+ * the truncated temperature threshold representation and
+ * calculate the threshold according to the formula in the
+ * datasheet.
+ */
+ *val = FIELD_GET(HDC3020_THRESH_TEMP_MASK, ret);
+ *val = *val << HDC3020_THRESH_TEMP_TRUNC_SHIFT;
+ *val = -2949075 + (175 * (*val));
+ *val2 = 65535;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_HUMIDITYRELATIVE:
+ /*
+ * Get the humidity threshold from 7 MSBs, shift them to get the
+ * truncated humidity threshold representation and calculate the
+ * threshold according to the formula in the datasheet.
+ */
+ *val = FIELD_GET(HDC3020_THRESH_HUM_MASK, ret);
+ *val = (*val << HDC3020_THRESH_HUM_TRUNC_SHIFT) * 100;
+ *val2 = 65535;
+ return IIO_VAL_FRACTIONAL;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static irqreturn_t hdc3020_interrupt_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct hdc3020_data *data;
+ s64 time;
+ int ret;
+
+ data = iio_priv(indio_dev);
+ ret = hdc3020_read_be16(data, HDC3020_R_STATUS);
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ if (!(ret & (HDC3020_STATUS_T_HIGH_ALERT | HDC3020_STATUS_T_LOW_ALERT |
+ HDC3020_STATUS_RH_HIGH_ALERT | HDC3020_STATUS_RH_LOW_ALERT)))
+ return IRQ_NONE;
+
+ time = iio_get_time_ns(indio_dev);
+ if (ret & HDC3020_STATUS_T_HIGH_ALERT)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_TEMP, 0,
+ IIO_NO_MOD,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ time);
+
+ if (ret & HDC3020_STATUS_T_LOW_ALERT)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_TEMP, 0,
+ IIO_NO_MOD,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ time);
+
+ if (ret & HDC3020_STATUS_RH_HIGH_ALERT)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_HUMIDITYRELATIVE, 0,
+ IIO_NO_MOD,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ time);
+
+ if (ret & HDC3020_STATUS_RH_LOW_ALERT)
+ iio_push_event(indio_dev,
+ IIO_MOD_EVENT_CODE(IIO_HUMIDITYRELATIVE, 0,
+ IIO_NO_MOD,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ time);
+
+ return IRQ_HANDLED;
+}
+
static const struct iio_info hdc3020_info = {
.read_raw = hdc3020_read_raw,
.write_raw = hdc3020_write_raw,
.read_avail = hdc3020_read_available,
+ .read_event_value = hdc3020_read_thresh,
+ .write_event_value = hdc3020_write_thresh,
};
static void hdc3020_stop(void *data)
{
- hdc3020_write_bytes((struct hdc3020_data *)data, HDC3020_EXIT_AUTO, 2);
+ hdc3020_exec_cmd((struct hdc3020_data *)data, HDC3020_EXIT_AUTO);
}
static int hdc3020_probe(struct i2c_client *client)
@@ -424,8 +580,25 @@ static int hdc3020_probe(struct i2c_client *client)
indio_dev->info = &hdc3020_info;
indio_dev->channels = hdc3020_channels;
indio_dev->num_channels = ARRAY_SIZE(hdc3020_channels);
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, hdc3020_interrupt_handler,
+ IRQF_ONESHOT, "hdc3020",
+ indio_dev);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to request IRQ\n");
+
+ /*
+ * The alert output is activated by default upon power up,
+ * hardware reset, and soft reset. Clear the status register.
+ */
+ ret = hdc3020_exec_cmd(data, HDC3020_S_STATUS);
+ if (ret)
+ return ret;
+ }
- ret = hdc3020_write_bytes(data, HDC3020_S_AUTO_10HZ_MOD0, 2);
+ ret = hdc3020_exec_cmd(data, HDC3020_S_AUTO_10HZ_MOD0);
if (ret)
return dev_err_probe(&client->dev, ret,
"Unable to set up measurement\n");
diff --git a/drivers/iio/humidity/hts221_i2c.c b/drivers/iio/humidity/hts221_i2c.c
index 30f2068ea156..5cb263e0ef5a 100644
--- a/drivers/iio/humidity/hts221_i2c.c
+++ b/drivers/iio/humidity/hts221_i2c.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -63,7 +63,7 @@ static struct i2c_driver hts221_driver = {
.name = "hts221_i2c",
.pm = pm_sleep_ptr(&hts221_pm_ops),
.of_match_table = hts221_i2c_of_match,
- .acpi_match_table = ACPI_PTR(hts221_acpi_match),
+ .acpi_match_table = hts221_acpi_match,
},
.probe = hts221_i2c_probe,
.id_table = hts221_i2c_id_table,
diff --git a/drivers/iio/imu/adis16475.c b/drivers/iio/imu/adis16475.c
index 64be656f0b80..01f55cc902fa 100644
--- a/drivers/iio/imu/adis16475.c
+++ b/drivers/iio/imu/adis16475.c
@@ -1363,22 +1363,16 @@ static int adis16475_config_sync_mode(struct adis16475 *st)
static int adis16475_config_irq_pin(struct adis16475 *st)
{
int ret;
- struct irq_data *desc;
u32 irq_type;
u16 val = 0;
u8 polarity;
struct spi_device *spi = st->adis.spi;
- desc = irq_get_irq_data(spi->irq);
- if (!desc) {
- dev_err(&spi->dev, "Could not find IRQ %d\n", spi->irq);
- return -EINVAL;
- }
/*
* It is possible to configure the data ready polarity. Furthermore, we
* need to update the adis struct if we want data ready as active low.
*/
- irq_type = irqd_get_trigger_type(desc);
+ irq_type = irq_get_trigger_type(spi->irq);
if (irq_type == IRQ_TYPE_EDGE_RISING) {
polarity = 1;
st->adis.irq_flag = IRQF_TRIGGER_RISING;
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index fe520194a837..b40a55bba30c 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -1246,18 +1246,11 @@ static int adis16480_config_irq_pin(struct adis16480 *st)
{
struct device *dev = &st->adis.spi->dev;
struct fwnode_handle *fwnode = dev_fwnode(dev);
- struct irq_data *desc;
enum adis16480_int_pin pin;
unsigned int irq_type;
uint16_t val;
int i, irq = 0;
- desc = irq_get_irq_data(st->adis.spi->irq);
- if (!desc) {
- dev_err(dev, "Could not find IRQ %d\n", irq);
- return -EINVAL;
- }
-
/* Disable data ready since the default after reset is on */
val = ADIS16480_DRDY_EN(0);
@@ -1285,7 +1278,7 @@ static int adis16480_config_irq_pin(struct adis16480 *st)
* configured as positive or negative, corresponding to
* IRQ_TYPE_EDGE_RISING or IRQ_TYPE_EDGE_FALLING respectively.
*/
- irq_type = irqd_get_trigger_type(desc);
+ irq_type = irq_get_trigger_type(st->adis.spi->irq);
if (irq_type == IRQ_TYPE_EDGE_RISING) { /* Default */
val |= ADIS16480_DRDY_POL(1);
} else if (irq_type == IRQ_TYPE_EDGE_FALLING) {
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
index 81652c08e644..a081305254db 100644
--- a/drivers/iio/imu/bmi160/bmi160_i2c.c
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -43,6 +43,15 @@ static const struct i2c_device_id bmi160_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, bmi160_i2c_id);
static const struct acpi_device_id bmi160_acpi_match[] = {
+ /*
+ * FIRMWARE BUG WORKAROUND
+ * Some manufacturers like GPD, Lenovo or Aya used the incorrect
+ * ID "10EC5280" for bmi160 in their DSDT. A fixed firmware is not
+ * available as of Feb 2024 after trying to work with OEMs, and
+ * this is not expected to change anymore since at least some of
+ * the affected devices are from 2021/2022.
+ */
+ {"10EC5280", 0},
{"BMI0160", 0},
{ },
};
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index 183af482828f..5d42ab9b176a 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -1668,52 +1668,41 @@ static int bmi323_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct bmi323_data *data = iio_priv(indio_dev);
- int ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = bmi323_set_odr(data, bmi323_iio_to_sensor(chan->type),
- val, val2);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return bmi323_set_odr(data,
+ bmi323_iio_to_sensor(chan->type),
+ val, val2);
+ unreachable();
case IIO_CHAN_INFO_SCALE:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = bmi323_set_scale(data, bmi323_iio_to_sensor(chan->type),
- val, val2);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return bmi323_set_scale(data,
+ bmi323_iio_to_sensor(chan->type),
+ val, val2);
+ unreachable();
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = bmi323_set_average(data, bmi323_iio_to_sensor(chan->type),
- val);
-
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return bmi323_set_average(data,
+ bmi323_iio_to_sensor(chan->type),
+ val);
+ unreachable();
case IIO_CHAN_INFO_ENABLE:
return bmi323_enable_steps(data, val);
- case IIO_CHAN_INFO_PROCESSED:
- scoped_guard(mutex, &data->mutex) {
- if (val || !FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK,
- data->feature_events))
- return -EINVAL;
+ case IIO_CHAN_INFO_PROCESSED: {
+ guard(mutex)(&data->mutex);
- /* Clear step counter value */
- ret = bmi323_update_ext_reg(data, BMI323_STEP_SC1_REG,
- BMI323_STEP_SC1_RST_CNT_MSK,
- FIELD_PREP(BMI323_STEP_SC1_RST_CNT_MSK,
- 1));
- }
- return ret;
+ if (val || !FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK,
+ data->feature_events))
+ return -EINVAL;
+
+ /* Clear step counter value */
+ return bmi323_update_ext_reg(data, BMI323_STEP_SC1_REG,
+ BMI323_STEP_SC1_RST_CNT_MSK,
+ FIELD_PREP(BMI323_STEP_SC1_RST_CNT_MSK,
+ 1));
+ }
default:
return -EINVAL;
}
@@ -1724,7 +1713,6 @@ static int bmi323_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct bmi323_data *data = iio_priv(indio_dev);
- int ret;
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
@@ -1733,14 +1721,10 @@ static int bmi323_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_ACCEL:
case IIO_ANGL_VEL:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = bmi323_read_axis(data, chan, val);
-
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY,
+ indio_dev)
+ return bmi323_read_axis(data, chan, val);
+ unreachable();
case IIO_TEMP:
return bmi323_get_temp_data(data, val);
default:
diff --git a/drivers/iio/imu/bmi323/bmi323_i2c.c b/drivers/iio/imu/bmi323/bmi323_i2c.c
index 20a8001b9956..52140bf05765 100644
--- a/drivers/iio/imu/bmi323/bmi323_i2c.c
+++ b/drivers/iio/imu/bmi323/bmi323_i2c.c
@@ -93,6 +93,26 @@ static int bmi323_i2c_probe(struct i2c_client *i2c)
return bmi323_core_probe(dev);
}
+static const struct acpi_device_id bmi323_acpi_match[] = {
+ /*
+ * The "BOSC0200" identifier used here is not unique to bmi323 devices.
+ * The same "BOSC0200" identifier is found in the ACPI tables of devices
+ * using the bmc150 chip. This creates a conflict with duplicate ACPI
+ * identifiers which multiple drivers want to use. If a non-bmi323
+ * device starts to load with this "BOSC0200" ACPI match here, then the
+ * chip ID check portion should fail because the chip IDs received (via
+ * i2c) are unique between bmc150 and bmi323 and the driver should
+ * relinquish the device. If and when a different driver (such as
+ * bmc150) starts to load with the "BOSC0200" ACPI match, a short reset
+ * should ensure that the device is not in a bad state during that
+ * driver initialization. This device reset does occur in both the
+ * bmi323 and bmc150 init sequences.
+ */
+ { "BOSC0200" },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, bmi323_acpi_match);
+
static const struct i2c_device_id bmi323_i2c_ids[] = {
{ "bmi323" },
{ }
@@ -109,6 +129,7 @@ static struct i2c_driver bmi323_i2c_driver = {
.driver = {
.name = "bmi323",
.of_match_table = bmi323_of_i2c_match,
+ .acpi_match_table = bmi323_acpi_match,
},
.probe = bmi323_i2c_probe,
.id_table = bmi323_i2c_ids,
diff --git a/drivers/iio/imu/bno055/bno055_ser_core.c b/drivers/iio/imu/bno055/bno055_ser_core.c
index 5677bdf4f846..694ff14a3aa2 100644
--- a/drivers/iio/imu/bno055/bno055_ser_core.c
+++ b/drivers/iio/imu/bno055/bno055_ser_core.c
@@ -378,8 +378,8 @@ static void bno055_ser_handle_rx(struct bno055_ser_priv *priv, int status)
* Also, we assume to RX one pkt per time (i.e. the HW doesn't send anything
* unless we require to AND we don't queue more than one request per time).
*/
-static ssize_t bno055_ser_receive_buf(struct serdev_device *serdev,
- const u8 *buf, size_t size)
+static size_t bno055_ser_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t size)
{
int status;
struct bno055_ser_priv *priv = serdev_device_get_drvdata(serdev);
diff --git a/drivers/iio/imu/fxos8700_i2c.c b/drivers/iio/imu/fxos8700_i2c.c
index 2ace306d0f9a..e99677ad96a2 100644
--- a/drivers/iio/imu/fxos8700_i2c.c
+++ b/drivers/iio/imu/fxos8700_i2c.c
@@ -10,7 +10,6 @@
* 1 | 0 | 0x1C
* 1 | 1 | 0x1F
*/
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -57,7 +56,7 @@ MODULE_DEVICE_TABLE(of, fxos8700_of_match);
static struct i2c_driver fxos8700_i2c_driver = {
.driver = {
.name = "fxos8700_i2c",
- .acpi_match_table = ACPI_PTR(fxos8700_acpi_match),
+ .acpi_match_table = fxos8700_acpi_match,
.of_match_table = fxos8700_of_match,
},
.probe = fxos8700_i2c_probe,
diff --git a/drivers/iio/imu/fxos8700_spi.c b/drivers/iio/imu/fxos8700_spi.c
index 27e694cce173..6b0dc7a776b9 100644
--- a/drivers/iio/imu/fxos8700_spi.c
+++ b/drivers/iio/imu/fxos8700_spi.c
@@ -2,7 +2,6 @@
/*
* FXOS8700 - NXP IMU, SPI bits
*/
-#include <linux/acpi.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
@@ -46,7 +45,7 @@ static struct spi_driver fxos8700_spi_driver = {
.probe = fxos8700_spi_probe,
.id_table = fxos8700_spi_id,
.driver = {
- .acpi_match_table = ACPI_PTR(fxos8700_acpi_match),
+ .acpi_match_table = fxos8700_acpi_match,
.of_match_table = fxos8700_of_match,
.name = "fxos8700_spi",
},
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index 958167b31241..7d3e061f3046 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -1514,7 +1514,7 @@ MODULE_DEVICE_TABLE(i2c, kmx61_id);
static struct i2c_driver kmx61_driver = {
.driver = {
.name = KMX61_DRV_NAME,
- .acpi_match_table = ACPI_PTR(kmx61_acpi_match),
+ .acpi_match_table = kmx61_acpi_match,
.pm = pm_ptr(&kmx61_pm_ops),
},
.probe = kmx61_probe,
diff --git a/drivers/iio/imu/st_lsm6dsx/Kconfig b/drivers/iio/imu/st_lsm6dsx/Kconfig
index 5865a295a4df..89d687ec3099 100644
--- a/drivers/iio/imu/st_lsm6dsx/Kconfig
+++ b/drivers/iio/imu/st_lsm6dsx/Kconfig
@@ -11,11 +11,32 @@ config IIO_ST_LSM6DSX
select IIO_ST_LSM6DSX_I3C if (I3C)
help
Say yes here to build support for STMicroelectronics LSM6DSx imu
- sensor. Supported devices: lsm6ds3, lsm6ds3h, lsm6dsl, lsm6dsm,
- ism330dlc, lsm6dso, lsm6dsox, asm330lhh, asm330lhhx, lsm6dsr,
- lsm6ds3tr-c, ism330dhcx, lsm6dsrx, lsm6ds0, lsm6dsop, lsm6dstx,
- lsm6dsv, lsm6dsv16x, lsm6dso16is, ism330is, asm330lhb, lsm6dst
- and the accelerometer/gyroscope of lsm9ds1.
+ sensor.
+ Supported devices:
+ - asm330lhb
+ - asm330lhh
+ - asm330lhhx
+ - asm330lhhxg1
+ - ism330dhcx
+ - ism330dlc
+ - ism330is
+ - lsm6ds0
+ - lsm6ds3
+ - lsm6ds3h
+ - lsm6ds3tr-c
+ - lsm6dsl
+ - lsm6dsm
+ - lsm6dso
+ - lsm6dso16is
+ - lsm6dsop
+ - lsm6dsox
+ - lsm6dsr
+ - lsm6dsrx
+ - lsm6dst
+ - lsm6dstx
+ - lsm6dsv
+ - lsm6dsv16x
+ - lsm9ds1
To compile this driver as a module, choose M here: the module
will be called st_lsm6dsx.
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
index c19237717e81..a3b93566533b 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.h
@@ -38,6 +38,7 @@
#define ST_LSM6DSO16IS_DEV_NAME "lsm6dso16is"
#define ST_ISM330IS_DEV_NAME "ism330is"
#define ST_ASM330LHB_DEV_NAME "asm330lhb"
+#define ST_ASM330LHHXG1_DEV_NAME "asm330lhhxg1"
enum st_lsm6dsx_hw_id {
ST_LSM6DS3_ID = 1,
@@ -63,6 +64,7 @@ enum st_lsm6dsx_hw_id {
ST_LSM6DSO16IS_ID,
ST_ISM330IS_ID,
ST_ASM330LHB_ID,
+ ST_ASM330LHHXG1_ID,
ST_LSM6DSX_MAX_ID,
};
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
index 066fe561c5e8..0a7cd8c1aa33 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_buffer.c
@@ -2,7 +2,7 @@
/*
* STMicroelectronics st_lsm6dsx FIFO buffer library driver
*
- * LSM6DS3/LSM6DS3H/LSM6DSL/LSM6DSM/ISM330DLC/LSM6DS3TR-C:
+ * Pattern FIFO:
* The FIFO buffer can be configured to store data from gyroscope and
* accelerometer. Samples are queued without any tag according to a
* specific pattern based on 'FIFO data sets' (6 bytes each):
@@ -14,12 +14,34 @@
* (e.g. Gx, Gy, Gz, Ax, Ay, Az), then data are repeated depending on the
* value of the decimation factor and ODR set for each FIFO data set.
*
- * LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/LSM6DSRX/ISM330DHCX/
- * LSM6DST/LSM6DSOP/LSM6DSTX/LSM6DSV/ASM330LHB:
+ * Supported devices:
+ * - ISM330DLC
+ * - LSM6DS3
+ * - LSM6DS3H
+ * - LSM6DS3TR-C
+ * - LSM6DSL
+ * - LSM6DSM
+ *
+ * Tagged FIFO:
* The FIFO buffer can be configured to store data from gyroscope and
* accelerometer. Each sample is queued with a tag (1B) indicating data
* source (gyroscope, accelerometer, hw timer).
*
+ * Supported devices:
+ * - ASM330LHB
+ * - ASM330LHH
+ * - ASM330LHHX
+ * - ASM330LHHXG1
+ * - ISM330DHCX
+ * - LSM6DSO
+ * - LSM6DSOP
+ * - LSM6DSOX
+ * - LSM6DSR
+ * - LSM6DSRX
+ * - LSM6DST
+ * - LSM6DSTX
+ * - LSM6DSV
+ *
* FIFO supported modes:
* - BYPASS: FIFO disabled
* - CONTINUOUS: FIFO enabled. When the buffer is full, the FIFO index
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
index b6e6b1df8a61..0716986f9812 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_core.c
@@ -14,34 +14,51 @@
* by a different driver.
*
* Supported sensors:
- * - LSM6DS3:
+ *
+ * - LSM6DS3
* - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 8KB
*
- * - LSM6DS3H/LSM6DSL/LSM6DSM/ISM330DLC/LSM6DS3TR-C:
+ * - ISM330DLC
+ * - LSM6DS3H
+ * - LSM6DS3TR-C
+ * - LSM6DSL
+ * - LSM6DSM
* - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 4KB
*
- * - LSM6DSO/LSM6DSOX/ASM330LHH/ASM330LHHX/LSM6DSR/ISM330DHCX/LSM6DST/LSM6DSOP/
- * LSM6DSTX/LSM6DSO16IS/ISM330IS:
+ * - ASM330LHH
+ * - ASM330LHHX
+ * - ASM330LHHXG1
+ * - ISM330DHCX
+ * - ISM330IS
+ * - LSM6DSO
+ * - LSM6DSO16IS
+ * - LSM6DSOP
+ * - LSM6DSOX
+ * - LSM6DSR
+ * - LSM6DST
+ * - LSM6DSTX
* - Accelerometer/Gyroscope supported ODR [Hz]: 12.5, 26, 52, 104, 208, 416,
* 833
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-245/+-500/+-1000/+-2000
* - FIFO size: 3KB
*
- * - LSM6DSV/LSM6DSV16X:
+ * - LSM6DSV
+ * - LSM6DSV16X
* - Accelerometer/Gyroscope supported ODR [Hz]: 7.5, 15, 30, 60, 120, 240,
* 480, 960
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported full-scale [dps]: +-125/+-250/+-500/+-1000/+-2000
* - FIFO size: 3KB
*
- * - LSM9DS1/LSM6DS0:
+ * - LSM6DS0
+ * - LSM9DS1
* - Accelerometer supported ODR [Hz]: 10, 50, 119, 238, 476, 952
* - Accelerometer supported full-scale [g]: +-2/+-4/+-8/+-16
* - Gyroscope supported ODR [Hz]: 15, 60, 119, 238, 476, 952
@@ -821,6 +838,10 @@ static const struct st_lsm6dsx_settings st_lsm6dsx_sensor_settings[] = {
.name = ST_ASM330LHHX_DEV_NAME,
.wai = 0x6b,
}, {
+ .hw_id = ST_ASM330LHHXG1_ID,
+ .name = ST_ASM330LHHXG1_DEV_NAME,
+ .wai = 0x6b,
+ }, {
.hw_id = ST_LSM6DSTX_ID,
.name = ST_LSM6DSTX_DEV_NAME,
.wai = 0x6d,
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
index 911444ec57c0..cddf41cc0ca9 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.c
@@ -134,6 +134,10 @@ static const struct of_device_id st_lsm6dsx_i2c_of_match[] = {
.compatible = "st,asm330lhb",
.data = (void *)ST_ASM330LHB_ID,
},
+ {
+ .compatible = "st,asm330lhhxg1",
+ .data = (void *)ST_ASM330LHHXG1_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_i2c_of_match);
@@ -168,6 +172,7 @@ static const struct i2c_device_id st_lsm6dsx_i2c_id_table[] = {
{ ST_LSM6DSO16IS_DEV_NAME, ST_LSM6DSO16IS_ID },
{ ST_ISM330IS_DEV_NAME, ST_ISM330IS_ID },
{ ST_ASM330LHB_DEV_NAME, ST_ASM330LHB_ID },
+ { ST_ASM330LHHXG1_DEV_NAME, ST_ASM330LHHXG1_ID },
{},
};
MODULE_DEVICE_TABLE(i2c, st_lsm6dsx_i2c_id_table);
diff --git a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
index f56c170c41a9..c122c8831365 100644
--- a/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
+++ b/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.c
@@ -129,6 +129,10 @@ static const struct of_device_id st_lsm6dsx_spi_of_match[] = {
.compatible = "st,asm330lhb",
.data = (void *)ST_ASM330LHB_ID,
},
+ {
+ .compatible = "st,asm330lhhxg1",
+ .data = (void *)ST_ASM330LHHXG1_ID,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_lsm6dsx_spi_of_match);
@@ -157,6 +161,7 @@ static const struct spi_device_id st_lsm6dsx_spi_id_table[] = {
{ ST_LSM6DSO16IS_DEV_NAME, ST_LSM6DSO16IS_ID },
{ ST_ISM330IS_DEV_NAME, ST_ISM330IS_ID },
{ ST_ASM330LHB_DEV_NAME, ST_ASM330LHB_ID },
+ { ST_ASM330LHHXG1_DEV_NAME, ST_ASM330LHHXG1_ID },
{},
};
MODULE_DEVICE_TABLE(spi, st_lsm6dsx_spi_id_table);
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h
index 76678cdefb07..e67d31b48441 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0.h
@@ -4,9 +4,12 @@
#ifndef ST_LSM9DS0_H
#define ST_LSM9DS0_H
-struct iio_dev;
+struct device;
+struct regmap;
struct regulator;
+struct iio_dev;
+
struct st_lsm9ds0 {
struct device *dev;
const char *name;
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c
index e887b45cdbcd..10c1b2ba7a3d 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_core.c
@@ -7,10 +7,10 @@
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
-#include <linux/device.h>
+#include <linux/array_size.h>
+#include <linux/dev_printk.h>
#include <linux/err.h>
#include <linux/module.h>
-#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/iio/common/st_sensors.h>
@@ -25,10 +25,9 @@ static int st_lsm9ds0_probe_accel(struct st_lsm9ds0 *lsm9ds0, struct regmap *reg
struct st_sensor_data *data;
settings = st_accel_get_settings(lsm9ds0->name);
- if (!settings) {
- dev_err(dev, "device name %s not recognized.\n", lsm9ds0->name);
- return -ENODEV;
- }
+ if (!settings)
+ return dev_err_probe(dev, -ENODEV, "device name %s not recognized.\n",
+ lsm9ds0->name);
lsm9ds0->accel = devm_iio_device_alloc(dev, sizeof(*data));
if (!lsm9ds0->accel)
@@ -51,10 +50,9 @@ static int st_lsm9ds0_probe_magn(struct st_lsm9ds0 *lsm9ds0, struct regmap *regm
struct st_sensor_data *data;
settings = st_magn_get_settings(lsm9ds0->name);
- if (!settings) {
- dev_err(dev, "device name %s not recognized.\n", lsm9ds0->name);
- return -ENODEV;
- }
+ if (!settings)
+ return dev_err_probe(dev, -ENODEV, "device name %s not recognized.\n",
+ lsm9ds0->name);
lsm9ds0->magn = devm_iio_device_alloc(dev, sizeof(*data));
if (!lsm9ds0->magn)
@@ -80,8 +78,7 @@ int st_lsm9ds0_probe(struct st_lsm9ds0 *lsm9ds0, struct regmap *regmap)
ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names),
regulator_names);
if (ret)
- return dev_err_probe(dev, ret,
- "unable to enable Vdd supply\n");
+ return dev_err_probe(dev, ret, "unable to enable Vdd supply\n");
/* Setup accelerometer device */
ret = st_lsm9ds0_probe_accel(lsm9ds0, regmap);
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
index 61d855083aa0..d03cec3b24fe 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_i2c.c
@@ -7,8 +7,10 @@
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp_types.h>
#include <linux/i2c.h>
-#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
@@ -39,7 +41,7 @@ MODULE_DEVICE_TABLE(i2c, st_lsm9ds0_id_table);
static const struct acpi_device_id st_lsm9ds0_acpi_match[] = {
{"ACCL0001", (kernel_ulong_t)LSM303D_IMU_DEV_NAME},
- { },
+ {}
};
MODULE_DEVICE_TABLE(acpi, st_lsm9ds0_acpi_match);
diff --git a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
index 8cc041d56cf7..69e9135795a3 100644
--- a/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
+++ b/drivers/iio/imu/st_lsm9ds0/st_lsm9ds0_spi.c
@@ -7,7 +7,9 @@
* Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
*/
-#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gfp_types.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
new file mode 100644
index 000000000000..2fea2bbbe47f
--- /dev/null
+++ b/drivers/iio/industrialio-backend.c
@@ -0,0 +1,418 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Framework to handle complex IIO aggregate devices.
+ *
+ * The typical architecture is to have one device as the frontend device which
+ * can be "linked" against one or multiple backend devices. All the IIO and
+ * userspace interface is expected to be registers/managed by the frontend
+ * device which will callback into the backends when needed (to get/set some
+ * configuration that it does not directly control).
+ *
+ * -------------------------------------------------------
+ * ------------------ | ------------ ------------ ------- FPGA|
+ * | ADC |------------------------| | ADC CORE |---------| DMA CORE |------| RAM | |
+ * | (Frontend/IIO) | Serial Data (eg: LVDS) | |(backend) |---------| |------| | |
+ * | |------------------------| ------------ ------------ ------- |
+ * ------------------ -------------------------------------------------------
+ *
+ * The framework interface is pretty simple:
+ * - Backends should register themselves with devm_iio_backend_register()
+ * - Frontend devices should get backends with devm_iio_backend_get()
+ *
+ * Also to note that the primary target for this framework are converters like
+ * ADC/DACs so iio_backend_ops will have some operations typical of converter
+ * devices. On top of that, this is "generic" for all IIO which means any kind
+ * of device can make use of the framework. That said, If the iio_backend_ops
+ * struct begins to grow out of control, we can always refactor things so that
+ * the industrialio-backend.c is only left with the really generic stuff. Then,
+ * we can build on top of it depending on the needs.
+ *
+ * Copyright (C) 2023-2024 Analog Devices Inc.
+ */
+#define dev_fmt(fmt) "iio-backend: " fmt
+
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <linux/iio/backend.h>
+
+struct iio_backend {
+ struct list_head entry;
+ const struct iio_backend_ops *ops;
+ struct device *dev;
+ struct module *owner;
+ void *priv;
+};
+
+/*
+ * Helper struct for requesting buffers. This ensures that we have all data
+ * that we need to free the buffer in a device managed action.
+ */
+struct iio_backend_buffer_pair {
+ struct iio_backend *back;
+ struct iio_buffer *buffer;
+};
+
+static LIST_HEAD(iio_back_list);
+static DEFINE_MUTEX(iio_back_lock);
+
+/*
+ * Helper macros to call backend ops. Makes sure the option is supported.
+ */
+#define iio_backend_check_op(back, op) ({ \
+ struct iio_backend *____back = back; \
+ int ____ret = 0; \
+ \
+ if (!____back->ops->op) \
+ ____ret = -EOPNOTSUPP; \
+ \
+ ____ret; \
+})
+
+#define iio_backend_op_call(back, op, args...) ({ \
+ struct iio_backend *__back = back; \
+ int __ret; \
+ \
+ __ret = iio_backend_check_op(__back, op); \
+ if (!__ret) \
+ __ret = __back->ops->op(__back, ##args); \
+ \
+ __ret; \
+})
+
+#define iio_backend_ptr_op_call(back, op, args...) ({ \
+ struct iio_backend *__back = back; \
+ void *ptr_err; \
+ int __ret; \
+ \
+ __ret = iio_backend_check_op(__back, op); \
+ if (__ret) \
+ ptr_err = ERR_PTR(__ret); \
+ else \
+ ptr_err = __back->ops->op(__back, ##args); \
+ \
+ ptr_err; \
+})
+
+#define iio_backend_void_op_call(back, op, args...) { \
+ struct iio_backend *__back = back; \
+ int __ret; \
+ \
+ __ret = iio_backend_check_op(__back, op); \
+ if (!__ret) \
+ __back->ops->op(__back, ##args); \
+}
+
+/**
+ * iio_backend_chan_enable - Enable a backend channel
+ * @back: Backend device
+ * @chan: Channel number
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan)
+{
+ return iio_backend_op_call(back, chan_enable, chan);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_chan_enable, IIO_BACKEND);
+
+/**
+ * iio_backend_chan_disable - Disable a backend channel
+ * @back: Backend device
+ * @chan: Channel number
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan)
+{
+ return iio_backend_op_call(back, chan_disable, chan);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_chan_disable, IIO_BACKEND);
+
+static void __iio_backend_disable(void *back)
+{
+ iio_backend_void_op_call(back, disable);
+}
+
+/**
+ * devm_iio_backend_enable - Device managed backend enable
+ * @dev: Consumer device for the backend
+ * @back: Backend device
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int devm_iio_backend_enable(struct device *dev, struct iio_backend *back)
+{
+ int ret;
+
+ ret = iio_backend_op_call(back, enable);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, __iio_backend_disable, back);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_backend_enable, IIO_BACKEND);
+
+/**
+ * iio_backend_data_format_set - Configure the channel data format
+ * @back: Backend device
+ * @chan: Channel number
+ * @data: Data format
+ *
+ * Properly configure a channel with respect to the expected data format. A
+ * @struct iio_backend_data_fmt must be passed with the settings.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan,
+ const struct iio_backend_data_fmt *data)
+{
+ if (!data || data->type >= IIO_BACKEND_DATA_TYPE_MAX)
+ return -EINVAL;
+
+ return iio_backend_op_call(back, data_format_set, chan, data);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_data_format_set, IIO_BACKEND);
+
+static void iio_backend_free_buffer(void *arg)
+{
+ struct iio_backend_buffer_pair *pair = arg;
+
+ iio_backend_void_op_call(pair->back, free_buffer, pair->buffer);
+}
+
+/**
+ * devm_iio_backend_request_buffer - Device managed buffer request
+ * @dev: Consumer device for the backend
+ * @back: Backend device
+ * @indio_dev: IIO device
+ *
+ * Request an IIO buffer from the backend. The type of the buffer (typically
+ * INDIO_BUFFER_HARDWARE) is up to the backend to decide. This is because,
+ * normally, the backend dictates what kind of buffering we can get.
+ *
+ * The backend .free_buffer() hooks is automatically called on @dev detach.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int devm_iio_backend_request_buffer(struct device *dev,
+ struct iio_backend *back,
+ struct iio_dev *indio_dev)
+{
+ struct iio_backend_buffer_pair *pair;
+ struct iio_buffer *buffer;
+
+ pair = devm_kzalloc(dev, sizeof(*pair), GFP_KERNEL);
+ if (!pair)
+ return -ENOMEM;
+
+ buffer = iio_backend_ptr_op_call(back, request_buffer, indio_dev);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ /* weak reference should be all what we need */
+ pair->back = back;
+ pair->buffer = buffer;
+
+ return devm_add_action_or_reset(dev, iio_backend_free_buffer, pair);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, IIO_BACKEND);
+
+static void iio_backend_release(void *arg)
+{
+ struct iio_backend *back = arg;
+
+ module_put(back->owner);
+}
+
+static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back)
+{
+ struct device_link *link;
+ int ret;
+
+ /*
+ * Make sure the provider cannot be unloaded before the consumer module.
+ * Note that device_links would still guarantee that nothing is
+ * accessible (and breaks) but this makes it explicit that the consumer
+ * module must be also unloaded.
+ */
+ if (!try_module_get(back->owner))
+ return dev_err_probe(dev, -ENODEV,
+ "Cannot get module reference\n");
+
+ ret = devm_add_action_or_reset(dev, iio_backend_release, back);
+ if (ret)
+ return ret;
+
+ link = device_link_add(dev, back->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
+ if (!link)
+ return dev_err_probe(dev, -EINVAL,
+ "Could not link to supplier(%s)\n",
+ dev_name(back->dev));
+
+ dev_dbg(dev, "Found backend(%s) device\n", dev_name(back->dev));
+
+ return 0;
+}
+
+/**
+ * devm_iio_backend_get - Device managed backend device get
+ * @dev: Consumer device for the backend
+ * @name: Backend name
+ *
+ * Get's the backend associated with @dev.
+ *
+ * RETURNS:
+ * A backend pointer, negative error pointer otherwise.
+ */
+struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name)
+{
+ struct fwnode_handle *fwnode;
+ struct iio_backend *back;
+ unsigned int index;
+ int ret;
+
+ if (name) {
+ ret = device_property_match_string(dev, "io-backend-names",
+ name);
+ if (ret < 0)
+ return ERR_PTR(ret);
+ index = ret;
+ } else {
+ index = 0;
+ }
+
+ fwnode = fwnode_find_reference(dev_fwnode(dev), "io-backends", index);
+ if (IS_ERR(fwnode)) {
+ dev_err_probe(dev, PTR_ERR(fwnode),
+ "Cannot get Firmware reference\n");
+ return ERR_CAST(fwnode);
+ }
+
+ guard(mutex)(&iio_back_lock);
+ list_for_each_entry(back, &iio_back_list, entry) {
+ if (!device_match_fwnode(back->dev, fwnode))
+ continue;
+
+ fwnode_handle_put(fwnode);
+ ret = __devm_iio_backend_get(dev, back);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return back;
+ }
+
+ fwnode_handle_put(fwnode);
+ return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, IIO_BACKEND);
+
+/**
+ * __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get
+ * @dev: Consumer device for the backend
+ * @fwnode: Firmware node of the backend device
+ *
+ * Search the backend list for a device matching @fwnode.
+ * This API should not be used and it's only present for preventing the first
+ * user of this framework to break it's DT ABI.
+ *
+ * RETURNS:
+ * A backend pointer, negative error pointer otherwise.
+ */
+struct iio_backend *
+__devm_iio_backend_get_from_fwnode_lookup(struct device *dev,
+ struct fwnode_handle *fwnode)
+{
+ struct iio_backend *back;
+ int ret;
+
+ guard(mutex)(&iio_back_lock);
+ list_for_each_entry(back, &iio_back_list, entry) {
+ if (!device_match_fwnode(back->dev, fwnode))
+ continue;
+
+ ret = __devm_iio_backend_get(dev, back);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return back;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL_NS_GPL(__devm_iio_backend_get_from_fwnode_lookup, IIO_BACKEND);
+
+/**
+ * iio_backend_get_priv - Get driver private data
+ * @back: Backend device
+ */
+void *iio_backend_get_priv(const struct iio_backend *back)
+{
+ return back->priv;
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_get_priv, IIO_BACKEND);
+
+static void iio_backend_unregister(void *arg)
+{
+ struct iio_backend *back = arg;
+
+ guard(mutex)(&iio_back_lock);
+ list_del(&back->entry);
+}
+
+/**
+ * devm_iio_backend_register - Device managed backend device register
+ * @dev: Backend device being registered
+ * @ops: Backend ops
+ * @priv: Device private data
+ *
+ * @ops is mandatory. Not providing it results in -EINVAL.
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int devm_iio_backend_register(struct device *dev,
+ const struct iio_backend_ops *ops, void *priv)
+{
+ struct iio_backend *back;
+
+ if (!ops)
+ return dev_err_probe(dev, -EINVAL, "No backend ops given\n");
+
+ /*
+ * Through device_links, we guarantee that a frontend device cannot be
+ * bound/exist if the backend driver is not around. Hence, we can bind
+ * the backend object lifetime with the device being passed since
+ * removing it will tear the frontend/consumer down.
+ */
+ back = devm_kzalloc(dev, sizeof(*back), GFP_KERNEL);
+ if (!back)
+ return -ENOMEM;
+
+ back->ops = ops;
+ back->owner = dev->driver->owner;
+ back->dev = dev;
+ back->priv = priv;
+ scoped_guard(mutex, &iio_back_lock)
+ list_add(&back->entry, &iio_back_list);
+
+ return devm_add_action_or_reset(dev, iio_backend_unregister, back);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_backend_register, IIO_BACKEND);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_DESCRIPTION("Framework to handle complex IIO aggregate devices");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 173dc00762a1..4302093b92c7 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -42,7 +42,7 @@ static DEFINE_IDA(iio_ida);
static dev_t iio_devt;
#define IIO_DEV_MAX 256
-struct bus_type iio_bus_type = {
+const struct bus_type iio_bus_type = {
.name = "iio",
};
EXPORT_SYMBOL(iio_bus_type);
@@ -213,9 +213,7 @@ bool iio_buffer_enabled(struct iio_dev *indio_dev)
{
struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
- return iio_dev_opaque->currentmode &
- (INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE |
- INDIO_BUFFER_TRIGGERED);
+ return iio_dev_opaque->currentmode & INDIO_ALL_BUFFER_MODES;
}
EXPORT_SYMBOL_GPL(iio_buffer_enabled);
diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
index 7653261d2dc2..b51eb6cb766f 100644
--- a/drivers/iio/industrialio-gts-helper.c
+++ b/drivers/iio/industrialio-gts-helper.c
@@ -34,24 +34,11 @@
static int iio_gts_get_gain(const u64 max, const u64 scale)
{
u64 full = max;
- int tmp = 1;
if (scale > full || !scale)
return -EINVAL;
- if (U64_MAX - full < scale) {
- /* Risk of overflow */
- if (full - scale < scale)
- return 1;
-
- full -= scale;
- tmp++;
- }
-
- while (full > scale * (u64)tmp)
- tmp++;
-
- return tmp;
+ return div64_u64(full, scale);
}
/**
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 143003232d1c..fd5a9879a582 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -87,13 +87,14 @@ config APDS9960
module will be called apds9960
config AS73211
- tristate "AMS AS73211 XYZ color sensor"
+ tristate "AMS AS73211 XYZ color sensor and AMS AS7331 UV sensor"
depends on I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
If you say yes here you get support for the AMS AS73211
- JENCOLOR(R) Digital XYZ Sensor.
+ JENCOLOR(R) Digital XYZ and the AMS AS7331 UVA, UVB and UVC
+ ultraviolet sensors.
For triggered measurements, you will need an additional trigger driver
like IIO_HRTIMER_TRIGGER or IIO_SYSFS_TRIGGER.
diff --git a/drivers/iio/light/al3010.c b/drivers/iio/light/al3010.c
index 8f0119f392b7..53569587ccb7 100644
--- a/drivers/iio/light/al3010.c
+++ b/drivers/iio/light/al3010.c
@@ -17,7 +17,7 @@
#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
diff --git a/drivers/iio/light/al3320a.c b/drivers/iio/light/al3320a.c
index d5957d85c278..105f379b9b41 100644
--- a/drivers/iio/light/al3320a.c
+++ b/drivers/iio/light/al3320a.c
@@ -15,7 +15,6 @@
#include <linux/bitfield.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/mod_devicetable.h>
#include <linux/iio/iio.h>
diff --git a/drivers/iio/light/as73211.c b/drivers/iio/light/as73211.c
index ec97a3a46839..be0068081ebb 100644
--- a/drivers/iio/light/as73211.c
+++ b/drivers/iio/light/as73211.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Support for AMS AS73211 JENCOLOR(R) Digital XYZ Sensor
+ * Support for AMS AS73211 JENCOLOR(R) Digital XYZ Sensor and AMS AS7331
+ * UVA, UVB and UVC (DUV) Ultraviolet Sensor
*
* Author: Christian Eggers <ceggers@arri.de>
*
@@ -9,7 +10,9 @@
* Color light sensor with 16-bit channels for x, y, z and temperature);
* 7-bit I2C slave address 0x74 .. 0x77.
*
- * Datasheet: https://ams.com/documents/20143/36005/AS73211_DS000556_3-01.pdf
+ * Datasheets:
+ * AS73211: https://ams.com/documents/20143/36005/AS73211_DS000556_3-01.pdf
+ * AS7331: https://ams.com/documents/20143/9106314/AS7331_DS001047_4-00.pdf
*/
#include <linux/bitfield.h>
@@ -84,6 +87,20 @@ static const int as73211_hardwaregain_avail[] = {
1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048,
};
+struct as73211_data;
+
+/**
+ * struct as73211_spec_dev_data - device-specific data
+ * @intensity_scale: Function to retrieve intensity scale values.
+ * @channels: Device channels.
+ * @num_channels: Number of channels of the device.
+ */
+struct as73211_spec_dev_data {
+ int (*intensity_scale)(struct as73211_data *data, int chan, int *val, int *val2);
+ struct iio_chan_spec const *channels;
+ int num_channels;
+};
+
/**
* struct as73211_data - Instance data for one AS73211
* @client: I2C client.
@@ -94,6 +111,7 @@ static const int as73211_hardwaregain_avail[] = {
* @mutex: Keeps cached registers in sync with the device.
* @completion: Completion to wait for interrupt.
* @int_time_avail: Available integration times (depend on sampling frequency).
+ * @spec_dev: device-specific configuration.
*/
struct as73211_data {
struct i2c_client *client;
@@ -104,6 +122,7 @@ struct as73211_data {
struct mutex mutex;
struct completion completion;
int int_time_avail[AS73211_SAMPLE_TIME_NUM * 2];
+ const struct as73211_spec_dev_data *spec_dev;
};
#define AS73211_COLOR_CHANNEL(_color, _si, _addr) { \
@@ -138,6 +157,10 @@ struct as73211_data {
#define AS73211_SCALE_Y 298384270 /* nW/m^2 */
#define AS73211_SCALE_Z 160241927 /* nW/m^2 */
+#define AS7331_SCALE_UVA 340000 /* nW/cm^2 */
+#define AS7331_SCALE_UVB 378000 /* nW/cm^2 */
+#define AS7331_SCALE_UVC 166000 /* nW/cm^2 */
+
/* Channel order MUST match devices result register order */
#define AS73211_SCAN_INDEX_TEMP 0
#define AS73211_SCAN_INDEX_X 1
@@ -176,6 +199,28 @@ static const struct iio_chan_spec as73211_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(AS73211_SCAN_INDEX_TS),
};
+static const struct iio_chan_spec as7331_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate =
+ BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .address = AS73211_OUT_TEMP,
+ .scan_index = AS73211_SCAN_INDEX_TEMP,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_LE,
+ }
+ },
+ AS73211_COLOR_CHANNEL(LIGHT_UVA, AS73211_SCAN_INDEX_X, AS73211_OUT_MRES1),
+ AS73211_COLOR_CHANNEL(LIGHT_UVB, AS73211_SCAN_INDEX_Y, AS73211_OUT_MRES2),
+ AS73211_COLOR_CHANNEL(LIGHT_DUV, AS73211_SCAN_INDEX_Z, AS73211_OUT_MRES3),
+ IIO_CHAN_SOFT_TIMESTAMP(AS73211_SCAN_INDEX_TS),
+};
+
static unsigned int as73211_integration_time_1024cyc(struct as73211_data *data)
{
/*
@@ -316,6 +361,48 @@ static int as73211_req_data(struct as73211_data *data)
return 0;
}
+static int as73211_intensity_scale(struct as73211_data *data, int chan,
+ int *val, int *val2)
+{
+ switch (chan) {
+ case IIO_MOD_X:
+ *val = AS73211_SCALE_X;
+ break;
+ case IIO_MOD_Y:
+ *val = AS73211_SCALE_Y;
+ break;
+ case IIO_MOD_Z:
+ *val = AS73211_SCALE_Z;
+ break;
+ default:
+ return -EINVAL;
+ }
+ *val2 = as73211_integration_time_1024cyc(data) * as73211_gain(data);
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int as7331_intensity_scale(struct as73211_data *data, int chan,
+ int *val, int *val2)
+{
+ switch (chan) {
+ case IIO_MOD_LIGHT_UVA:
+ *val = AS7331_SCALE_UVA;
+ break;
+ case IIO_MOD_LIGHT_UVB:
+ *val = AS7331_SCALE_UVB;
+ break;
+ case IIO_MOD_LIGHT_DUV:
+ *val = AS7331_SCALE_UVC;
+ break;
+ default:
+ return -EINVAL;
+ }
+ *val2 = as73211_integration_time_1024cyc(data) * as73211_gain(data);
+
+ return IIO_VAL_FRACTIONAL;
+}
+
static int as73211_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
@@ -355,30 +442,13 @@ static int as73211_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec cons
*val2 = AS73211_SCALE_TEMP_MICRO;
return IIO_VAL_INT_PLUS_MICRO;
- case IIO_INTENSITY: {
- unsigned int scale;
-
- switch (chan->channel2) {
- case IIO_MOD_X:
- scale = AS73211_SCALE_X;
- break;
- case IIO_MOD_Y:
- scale = AS73211_SCALE_Y;
- break;
- case IIO_MOD_Z:
- scale = AS73211_SCALE_Z;
- break;
- default:
- return -EINVAL;
- }
- scale /= as73211_gain(data);
- scale /= as73211_integration_time_1024cyc(data);
- *val = scale;
- return IIO_VAL_INT;
+ case IIO_INTENSITY:
+ return data->spec_dev->intensity_scale(data, chan->channel2,
+ val, val2);
default:
return -EINVAL;
- }}
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
/* f_samp is configured in CREG3 in powers of 2 (x 1.024 MHz) */
@@ -676,13 +746,17 @@ static int as73211_probe(struct i2c_client *client)
i2c_set_clientdata(client, indio_dev);
data->client = client;
+ data->spec_dev = i2c_get_match_data(client);
+ if (!data->spec_dev)
+ return -EINVAL;
+
mutex_init(&data->mutex);
init_completion(&data->completion);
indio_dev->info = &as73211_info;
indio_dev->name = AS73211_DRV_NAME;
- indio_dev->channels = as73211_channels;
- indio_dev->num_channels = ARRAY_SIZE(as73211_channels);
+ indio_dev->channels = data->spec_dev->channels;
+ indio_dev->num_channels = data->spec_dev->num_channels;
indio_dev->modes = INDIO_DIRECT_MODE;
ret = i2c_smbus_read_byte_data(data->client, AS73211_REG_OSR);
@@ -772,14 +846,28 @@ static int as73211_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(as73211_pm_ops, as73211_suspend,
as73211_resume);
+static const struct as73211_spec_dev_data as73211_spec = {
+ .intensity_scale = as73211_intensity_scale,
+ .channels = as73211_channels,
+ .num_channels = ARRAY_SIZE(as73211_channels),
+};
+
+static const struct as73211_spec_dev_data as7331_spec = {
+ .intensity_scale = as7331_intensity_scale,
+ .channels = as7331_channels,
+ .num_channels = ARRAY_SIZE(as7331_channels),
+};
+
static const struct of_device_id as73211_of_match[] = {
- { .compatible = "ams,as73211" },
+ { .compatible = "ams,as73211", &as73211_spec },
+ { .compatible = "ams,as7331", &as7331_spec },
{ }
};
MODULE_DEVICE_TABLE(of, as73211_of_match);
static const struct i2c_device_id as73211_id[] = {
- { "as73211", 0 },
+ { "as73211", (kernel_ulong_t)&as73211_spec },
+ { "as7331", (kernel_ulong_t)&as7331_spec },
{ }
};
MODULE_DEVICE_TABLE(i2c, as73211_id);
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index b6c4bef2a7bb..260281194f61 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -14,8 +14,11 @@
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum {
- CHANNEL_SCAN_INDEX_INTENSITY = 0,
- CHANNEL_SCAN_INDEX_ILLUM = 1,
+ CHANNEL_SCAN_INDEX_INTENSITY,
+ CHANNEL_SCAN_INDEX_ILLUM,
+ CHANNEL_SCAN_INDEX_COLOR_TEMP,
+ CHANNEL_SCAN_INDEX_CHROMATICITY_X,
+ CHANNEL_SCAN_INDEX_CHROMATICITY_Y,
CHANNEL_SCAN_INDEX_MAX
};
@@ -25,6 +28,7 @@ struct als_state {
struct hid_sensor_hub_callbacks callbacks;
struct hid_sensor_common common_attributes;
struct hid_sensor_hub_attribute_info als[CHANNEL_SCAN_INDEX_MAX];
+ struct iio_chan_spec channels[CHANNEL_SCAN_INDEX_MAX + 1];
struct {
u32 illum[CHANNEL_SCAN_INDEX_MAX];
u64 timestamp __aligned(8);
@@ -33,7 +37,18 @@ struct als_state {
int scale_post_decml;
int scale_precision;
int value_offset;
+ int num_channels;
s64 timestamp;
+ unsigned long als_scan_mask[2];
+};
+
+/* The order of usage ids must match scan index starting from CHANNEL_SCAN_INDEX_INTENSITY */
+static const u32 als_usage_ids[] = {
+ HID_USAGE_SENSOR_LIGHT_ILLUM,
+ HID_USAGE_SENSOR_LIGHT_ILLUM,
+ HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE,
+ HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X,
+ HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y,
};
static const u32 als_sensitivity_addresses[] = {
@@ -65,6 +80,40 @@ static const struct iio_chan_spec als_channels[] = {
BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
.scan_index = CHANNEL_SCAN_INDEX_ILLUM,
},
+ {
+ .type = IIO_COLORTEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
+ .scan_index = CHANNEL_SCAN_INDEX_COLOR_TEMP,
+ },
+ {
+ .type = IIO_CHROMATICITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_X,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
+ .scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_X,
+ },
+ {
+ .type = IIO_CHROMATICITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_Y,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS) |
+ BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
+ .scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_Y,
+ },
IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
};
@@ -103,6 +152,21 @@ static int als_read_raw(struct iio_dev *indio_dev,
min = als_state->als[chan->scan_index].logical_minimum;
address = HID_USAGE_SENSOR_LIGHT_ILLUM;
break;
+ case CHANNEL_SCAN_INDEX_COLOR_TEMP:
+ report_id = als_state->als[chan->scan_index].report_id;
+ min = als_state->als[chan->scan_index].logical_minimum;
+ address = HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE;
+ break;
+ case CHANNEL_SCAN_INDEX_CHROMATICITY_X:
+ report_id = als_state->als[chan->scan_index].report_id;
+ min = als_state->als[chan->scan_index].logical_minimum;
+ address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X;
+ break;
+ case CHANNEL_SCAN_INDEX_CHROMATICITY_Y:
+ report_id = als_state->als[chan->scan_index].report_id;
+ min = als_state->als[chan->scan_index].logical_minimum;
+ address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y;
+ break;
default:
report_id = -1;
break;
@@ -223,6 +287,18 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
als_state->scan.illum[CHANNEL_SCAN_INDEX_ILLUM] = sample_data;
ret = 0;
break;
+ case HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE:
+ als_state->scan.illum[CHANNEL_SCAN_INDEX_COLOR_TEMP] = sample_data;
+ ret = 0;
+ break;
+ case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X:
+ als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_X] = sample_data;
+ ret = 0;
+ break;
+ case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y:
+ als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_Y] = sample_data;
+ ret = 0;
+ break;
case HID_USAGE_SENSOR_TIME_TIMESTAMP:
als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes,
*(s64 *)raw_data);
@@ -238,27 +314,38 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
/* Parse report which is specific to an usage id*/
static int als_parse_report(struct platform_device *pdev,
struct hid_sensor_hub_device *hsdev,
- struct iio_chan_spec *channels,
unsigned usage_id,
struct als_state *st)
{
- int ret;
+ struct iio_chan_spec *channels;
+ int ret, index = 0;
int i;
- for (i = 0; i <= CHANNEL_SCAN_INDEX_ILLUM; ++i) {
+ channels = st->channels;
+
+ for (i = 0; i < CHANNEL_SCAN_INDEX_MAX; ++i) {
ret = sensor_hub_input_get_attribute_info(hsdev,
HID_INPUT_REPORT,
usage_id,
- HID_USAGE_SENSOR_LIGHT_ILLUM,
+ als_usage_ids[i],
&st->als[i]);
if (ret < 0)
- return ret;
- als_adjust_channel_bit_mask(channels, i, st->als[i].size);
+ continue;
+
+ channels[index] = als_channels[i];
+ st->als_scan_mask[0] |= BIT(i);
+ als_adjust_channel_bit_mask(channels, index, st->als[i].size);
+ ++index;
dev_dbg(&pdev->dev, "als %x:%x\n", st->als[i].index,
st->als[i].report_id);
}
+ st->num_channels = index;
+ /* Return success even if one usage id is present */
+ if (index)
+ ret = 0;
+
st->scale_precision = hid_sensor_format_scale(usage_id,
&st->als[CHANNEL_SCAN_INDEX_INTENSITY],
&st->scale_pre_decml, &st->scale_post_decml);
@@ -294,15 +381,7 @@ static int hid_als_probe(struct platform_device *pdev)
return ret;
}
- indio_dev->channels = devm_kmemdup(&pdev->dev, als_channels,
- sizeof(als_channels), GFP_KERNEL);
- if (!indio_dev->channels) {
- dev_err(&pdev->dev, "failed to duplicate channels\n");
- return -ENOMEM;
- }
-
ret = als_parse_report(pdev, hsdev,
- (struct iio_chan_spec *)indio_dev->channels,
hsdev->usage,
als_state);
if (ret) {
@@ -310,8 +389,15 @@ static int hid_als_probe(struct platform_device *pdev)
return ret;
}
- indio_dev->num_channels =
- ARRAY_SIZE(als_channels);
+ /* Add timestamp channel */
+ als_state->channels[als_state->num_channels] = als_channels[CHANNEL_SCAN_INDEX_TIMESTAMP];
+
+ /* +1 for adding timestamp channel */
+ indio_dev->num_channels = als_state->num_channels + 1;
+
+ indio_dev->channels = als_state->channels;
+ indio_dev->available_scan_masks = als_state->als_scan_mask;
+
indio_dev->info = &als_info;
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
diff --git a/drivers/iio/light/jsa1212.c b/drivers/iio/light/jsa1212.c
index 37e2807041a1..869196746045 100644
--- a/drivers/iio/light/jsa1212.c
+++ b/drivers/iio/light/jsa1212.c
@@ -12,10 +12,10 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/mutex.h>
-#include <linux/acpi.h>
#include <linux/regmap.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -438,7 +438,7 @@ static struct i2c_driver jsa1212_driver = {
.driver = {
.name = JSA1212_DRIVER_NAME,
.pm = pm_sleep_ptr(&jsa1212_pm_ops),
- .acpi_match_table = ACPI_PTR(jsa1212_acpi_match),
+ .acpi_match_table = jsa1212_acpi_match,
},
.probe = jsa1212_probe,
.remove = jsa1212_remove,
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 061c122fdc5e..8c516ede9116 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -10,6 +10,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/err.h>
#include <linux/delay.h>
@@ -1639,7 +1640,7 @@ static struct i2c_driver ltr501_driver = {
.name = LTR501_DRV_NAME,
.of_match_table = ltr501_of_match,
.pm = pm_sleep_ptr(&ltr501_pm_ops),
- .acpi_match_table = ACPI_PTR(ltr_acpi_match),
+ .acpi_match_table = ltr_acpi_match,
},
.probe = ltr501_probe,
.remove = ltr501_remove,
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
index db96c5b73100..26b464b1b650 100644
--- a/drivers/iio/light/max44000.c
+++ b/drivers/iio/light/max44000.c
@@ -10,6 +10,7 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
@@ -19,7 +20,6 @@
#include <linux/iio/buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
-#include <linux/acpi.h>
#define MAX44000_DRV_NAME "max44000"
@@ -603,18 +603,16 @@ static const struct i2c_device_id max44000_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max44000_id);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id max44000_acpi_match[] = {
{"MAX44000", 0},
{ }
};
MODULE_DEVICE_TABLE(acpi, max44000_acpi_match);
-#endif
static struct i2c_driver max44000_driver = {
.driver = {
.name = MAX44000_DRV_NAME,
- .acpi_match_table = ACPI_PTR(max44000_acpi_match),
+ .acpi_match_table = max44000_acpi_match,
},
.probe = max44000_probe,
.id_table = max44000_id,
diff --git a/drivers/iio/light/rpr0521.c b/drivers/iio/light/rpr0521.c
index bbb8581622f2..40d5732b5e32 100644
--- a/drivers/iio/light/rpr0521.c
+++ b/drivers/iio/light/rpr0521.c
@@ -10,11 +10,11 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/delay.h>
-#include <linux/acpi.h>
#include <linux/iio/iio.h>
#include <linux/iio/buffer.h>
@@ -1119,7 +1119,7 @@ static struct i2c_driver rpr0521_driver = {
.driver = {
.name = RPR0521_DRV_NAME,
.pm = pm_ptr(&rpr0521_pm_ops),
- .acpi_match_table = ACPI_PTR(rpr0521_acpi_match),
+ .acpi_match_table = rpr0521_acpi_match,
},
.probe = rpr0521_probe,
.remove = rpr0521_remove,
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 72b08d870d33..7b71ad71d78d 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -7,11 +7,11 @@
* IIO driver for STK3310/STK3311. 7-bit I2C address: 0x48.
*/
-#include <linux/acpi.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/regmap.h>
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
@@ -712,7 +712,7 @@ static struct i2c_driver stk3310_driver = {
.name = "stk3310",
.of_match_table = stk3310_of_match,
.pm = pm_sleep_ptr(&stk3310_pm_ops),
- .acpi_match_table = ACPI_PTR(stk3310_acpi_id),
+ .acpi_match_table = stk3310_acpi_id,
},
.probe = stk3310_probe,
.remove = stk3310_remove,
diff --git a/drivers/iio/light/us5182d.c b/drivers/iio/light/us5182d.c
index 61b3b2aea626..9189a1d4d7e1 100644
--- a/drivers/iio/light/us5182d.c
+++ b/drivers/iio/light/us5182d.c
@@ -9,7 +9,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/acpi.h>
+#include <linux/mod_devicetable.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/iio/events.h>
@@ -972,7 +972,7 @@ static struct i2c_driver us5182d_driver = {
.name = US5182D_DRV_NAME,
.pm = pm_ptr(&us5182d_pm_ops),
.of_match_table = us5182d_of_match,
- .acpi_match_table = ACPI_PTR(us5182d_acpi_match),
+ .acpi_match_table = us5182d_acpi_match,
},
.probe = us5182d_probe,
.remove = us5182d_remove,
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index fdf763a04b0b..4e3641ff2ed4 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -90,6 +90,7 @@
#define VCNL4040_PS_CONF1_PS_SHUTDOWN BIT(0)
#define VCNL4040_PS_CONF2_PS_IT GENMASK(3, 1) /* Proximity integration time */
#define VCNL4040_CONF1_PS_PERS GENMASK(5, 4) /* Proximity interrupt persistence setting */
+#define VCNL4040_PS_CONF2_PS_HD BIT(11) /* Proximity high definition */
#define VCNL4040_PS_CONF2_PS_INT GENMASK(9, 8) /* Proximity interrupt mode */
#define VCNL4040_PS_CONF3_MPS GENMASK(6, 5) /* Proximity multi pulse number */
#define VCNL4040_PS_MS_LED_I GENMASK(10, 8) /* Proximity current */
@@ -114,6 +115,13 @@
#define VCNL4010_INT_DRDY \
(BIT(VCNL4010_INT_PROXIMITY) | BIT(VCNL4010_INT_ALS))
+#define VCNL4040_CONF3_PS_MPS_16BITS 3 /* 8 multi pulses */
+#define VCNL4040_CONF3_PS_LED_I_16BITS 3 /* 120 mA */
+
+#define VCNL4040_CONF3_PS_SAMPLE_16BITS \
+ (FIELD_PREP(VCNL4040_PS_CONF3_MPS, VCNL4040_CONF3_PS_MPS_16BITS) | \
+ FIELD_PREP(VCNL4040_PS_MS_LED_I, VCNL4040_CONF3_PS_LED_I_16BITS))
+
static const int vcnl4010_prox_sampling_frequency[][2] = {
{1, 950000},
{3, 906250},
@@ -195,6 +203,7 @@ struct vcnl4000_data {
enum vcnl4000_device_ids id;
int rev;
int al_scale;
+ int ps_scale;
u8 ps_int; /* proximity interrupt mode */
u8 als_int; /* ambient light interrupt mode*/
const struct vcnl4000_chip_spec *chip_spec;
@@ -345,6 +354,7 @@ static int vcnl4200_set_power_state(struct vcnl4000_data *data, bool on)
static int vcnl4200_init(struct vcnl4000_data *data)
{
int ret, id;
+ u16 regval;
ret = i2c_smbus_read_word_data(data->client, VCNL4200_DEV_ID);
if (ret < 0)
@@ -386,9 +396,32 @@ static int vcnl4200_init(struct vcnl4000_data *data)
break;
}
data->al_scale = data->chip_spec->ulux_step;
+ data->ps_scale = 16;
mutex_init(&data->vcnl4200_al.lock);
mutex_init(&data->vcnl4200_ps.lock);
+ /* Use 16 bits proximity sensor readings */
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF1);
+ if (ret < 0)
+ return ret;
+
+ regval = ret | VCNL4040_PS_CONF2_PS_HD;
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF1,
+ regval);
+ if (ret < 0)
+ return ret;
+
+ /* Align proximity sensor sample rate to 16 bits data width */
+ ret = i2c_smbus_read_word_data(data->client, VCNL4200_PS_CONF3);
+ if (ret < 0)
+ return ret;
+
+ regval = ret | VCNL4040_CONF3_PS_SAMPLE_16BITS;
+ ret = i2c_smbus_write_word_data(data->client, VCNL4200_PS_CONF3,
+ regval);
+ if (ret < 0)
+ return ret;
+
ret = data->chip_spec->set_power_state(data, true);
if (ret < 0)
return ret;
@@ -901,8 +934,9 @@ static int vcnl4000_read_raw(struct iio_dev *indio_dev,
break;
case IIO_PROXIMITY:
ret = data->chip_spec->measure_proximity(data, val);
+ *val2 = data->ps_scale;
if (!ret)
- ret = IIO_VAL_INT;
+ ret = IIO_VAL_FRACTIONAL;
break;
default:
ret = -EINVAL;
diff --git a/drivers/iio/light/vl6180.c b/drivers/iio/light/vl6180.c
index d4948dfc31ff..dcadf6428a87 100644
--- a/drivers/iio/light/vl6180.c
+++ b/drivers/iio/light/vl6180.c
@@ -20,7 +20,6 @@
#include <linux/i2c.h>
#include <linux/mutex.h>
#include <linux/err.h>
-#include <linux/of.h>
#include <linux/delay.h>
#include <linux/util_macros.h>
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 38532d840f2a..cd2917d71904 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -6,6 +6,18 @@
menu "Magnetometer sensors"
+config AF8133J
+ tristate "Voltafield AF8133J 3-Axis Magnetometer"
+ depends on I2C
+ depends on OF
+ select REGMAP_I2C
+ help
+ Say yes here to build support for Voltafield AF8133J I2C-based
+ 3-axis magnetometer chip.
+
+ To compile this driver as a module, choose M here: the module
+ will be called af8133j.
+
config AK8974
tristate "Asahi Kasei AK8974 3-Axis Magnetometer"
depends on I2C
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index b1c784ea71c8..ec5c46fbf999 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -4,6 +4,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_AF8133J) += af8133j.o
obj-$(CONFIG_AK8974) += ak8974.o
obj-$(CONFIG_AK8975) += ak8975.o
obj-$(CONFIG_BMC150_MAGN) += bmc150_magn.o
diff --git a/drivers/iio/magnetometer/af8133j.c b/drivers/iio/magnetometer/af8133j.c
new file mode 100644
index 000000000000..742bbdf25f08
--- /dev/null
+++ b/drivers/iio/magnetometer/af8133j.c
@@ -0,0 +1,528 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * af8133j.c - Voltafield AF8133J magnetometer driver
+ *
+ * Copyright 2021 Icenowy Zheng <icenowy@aosc.io>
+ * Copyright 2024 Ondřej Jirman <megi@xff.cz>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define AF8133J_REG_OUT 0x03
+#define AF8133J_REG_PCODE 0x00
+#define AF8133J_REG_PCODE_VAL 0x5e
+#define AF8133J_REG_STATUS 0x02
+#define AF8133J_REG_STATUS_ACQ BIT(0)
+#define AF8133J_REG_STATE 0x0a
+#define AF8133J_REG_STATE_STBY 0x00
+#define AF8133J_REG_STATE_WORK 0x01
+#define AF8133J_REG_RANGE 0x0b
+#define AF8133J_REG_RANGE_22G 0x12
+#define AF8133J_REG_RANGE_12G 0x34
+#define AF8133J_REG_SWR 0x11
+#define AF8133J_REG_SWR_PERFORM 0x81
+
+static const char * const af8133j_supply_names[] = {
+ "avdd",
+ "dvdd",
+};
+
+struct af8133j_data {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ /*
+ * Protect device internal state between starting a measurement
+ * and reading the result.
+ */
+ struct mutex mutex;
+ struct iio_mount_matrix orientation;
+
+ struct gpio_desc *reset_gpiod;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(af8133j_supply_names)];
+
+ u8 range;
+};
+
+enum af8133j_axis {
+ AXIS_X = 0,
+ AXIS_Y,
+ AXIS_Z,
+};
+
+static struct iio_mount_matrix *
+af8133j_get_mount_matrix(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct af8133j_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
+}
+
+static const struct iio_chan_spec_ext_info af8133j_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, af8133j_get_mount_matrix),
+ { }
+};
+
+#define AF8133J_CHANNEL(_si, _axis) { \
+ .type = IIO_MAGN, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_ ## _axis, \
+ .address = AXIS_ ## _axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = af8133j_ext_info, \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+}
+
+static const struct iio_chan_spec af8133j_channels[] = {
+ AF8133J_CHANNEL(0, X),
+ AF8133J_CHANNEL(1, Y),
+ AF8133J_CHANNEL(2, Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static int af8133j_product_check(struct af8133j_data *data)
+{
+ struct device *dev = &data->client->dev;
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, AF8133J_REG_PCODE, &val);
+ if (ret) {
+ dev_err(dev, "Error reading product code (%d)\n", ret);
+ return ret;
+ }
+
+ if (val != AF8133J_REG_PCODE_VAL) {
+ dev_warn(dev, "Invalid product code (0x%02x)\n", val);
+ return 0; /* Allow unknown ID so fallback compatibles work */
+ }
+
+ return 0;
+}
+
+static int af8133j_reset(struct af8133j_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ if (data->reset_gpiod) {
+ /* If we have GPIO reset line, use it */
+ gpiod_set_value_cansleep(data->reset_gpiod, 1);
+ udelay(10);
+ gpiod_set_value_cansleep(data->reset_gpiod, 0);
+ } else {
+ /* Otherwise use software reset */
+ ret = regmap_write(data->regmap, AF8133J_REG_SWR,
+ AF8133J_REG_SWR_PERFORM);
+ if (ret) {
+ dev_err(dev, "Failed to reset the chip\n");
+ return ret;
+ }
+ }
+
+ /* Wait for reset to finish */
+ usleep_range(1000, 1100);
+
+ /* Restore range setting */
+ if (data->range == AF8133J_REG_RANGE_22G) {
+ ret = regmap_write(data->regmap, AF8133J_REG_RANGE, data->range);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void af8133j_power_down(struct af8133j_data *data)
+{
+ gpiod_set_value_cansleep(data->reset_gpiod, 1);
+ regulator_bulk_disable(ARRAY_SIZE(data->supplies), data->supplies);
+}
+
+static int af8133j_power_up(struct af8133j_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(data->supplies), data->supplies);
+ if (ret) {
+ dev_err(dev, "Could not enable regulators\n");
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(data->reset_gpiod, 0);
+
+ /* Wait for power on reset */
+ usleep_range(15000, 16000);
+
+ ret = af8133j_reset(data);
+ if (ret) {
+ af8133j_power_down(data);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int af8133j_take_measurement(struct af8133j_data *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_write(data->regmap,
+ AF8133J_REG_STATE, AF8133J_REG_STATE_WORK);
+ if (ret)
+ return ret;
+
+ /* The datasheet says "Mesaure Time <1.5ms" */
+ ret = regmap_read_poll_timeout(data->regmap, AF8133J_REG_STATUS, val,
+ val & AF8133J_REG_STATUS_ACQ,
+ 500, 1500);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap,
+ AF8133J_REG_STATE, AF8133J_REG_STATE_STBY);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int af8133j_read_measurement(struct af8133j_data *data, __le16 buf[3])
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret) {
+ /*
+ * Ignore EACCES because that happens when RPM is disabled
+ * during system sleep, while userspace leave eg. hrtimer
+ * trigger attached and IIO core keeps trying to do measurements.
+ */
+ if (ret != -EACCES)
+ dev_err(dev, "Failed to power on (%d)\n", ret);
+ return ret;
+ }
+
+ scoped_guard(mutex, &data->mutex) {
+ ret = af8133j_take_measurement(data);
+ if (ret)
+ goto out_rpm_put;
+
+ ret = regmap_bulk_read(data->regmap, AF8133J_REG_OUT,
+ buf, sizeof(__le16) * 3);
+ }
+
+out_rpm_put:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static const int af8133j_scales[][2] = {
+ [0] = { 0, 366210 }, /* 12 gauss */
+ [1] = { 0, 671386 }, /* 22 gauss */
+};
+
+static int af8133j_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct af8133j_data *data = iio_priv(indio_dev);
+ __le16 buf[3];
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = af8133j_read_measurement(data, buf);
+ if (ret)
+ return ret;
+
+ *val = sign_extend32(le16_to_cpu(buf[chan->address]),
+ chan->scan_type.realbits - 1);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+
+ if (data->range == AF8133J_REG_RANGE_12G)
+ *val2 = af8133j_scales[0][1];
+ else
+ *val2 = af8133j_scales[1][1];
+
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int af8133j_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (const int *)af8133j_scales;
+ *length = ARRAY_SIZE(af8133j_scales) * 2;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int af8133j_set_scale(struct af8133j_data *data,
+ unsigned int val, unsigned int val2)
+{
+ struct device *dev = &data->client->dev;
+ u8 range;
+ int ret = 0;
+
+ if (af8133j_scales[0][0] == val && af8133j_scales[0][1] == val2)
+ range = AF8133J_REG_RANGE_12G;
+ else if (af8133j_scales[1][0] == val && af8133j_scales[1][1] == val2)
+ range = AF8133J_REG_RANGE_22G;
+ else
+ return -EINVAL;
+
+ pm_runtime_disable(dev);
+
+ /*
+ * When suspended, just store the new range to data->range to be
+ * applied later during power up.
+ */
+ if (!pm_runtime_status_suspended(dev))
+ scoped_guard(mutex, &data->mutex)
+ ret = regmap_write(data->regmap,
+ AF8133J_REG_RANGE, range);
+
+ pm_runtime_enable(dev);
+
+ data->range = range;
+ return ret;
+}
+
+static int af8133j_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct af8133j_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return af8133j_set_scale(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int af8133j_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static const struct iio_info af8133j_info = {
+ .read_raw = af8133j_read_raw,
+ .read_avail = af8133j_read_avail,
+ .write_raw = af8133j_write_raw,
+ .write_raw_get_fmt = af8133j_write_raw_get_fmt,
+};
+
+static irqreturn_t af8133j_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct af8133j_data *data = iio_priv(indio_dev);
+ s64 timestamp = iio_get_time_ns(indio_dev);
+ struct {
+ __le16 values[3];
+ s64 timestamp __aligned(8);
+ } sample;
+ int ret;
+
+ memset(&sample, 0, sizeof(sample));
+
+ ret = af8133j_read_measurement(data, sample.values);
+ if (ret)
+ goto out_done;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &sample, timestamp);
+
+out_done:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config af8133j_regmap_config = {
+ .name = "af8133j_regmap",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AF8133J_REG_SWR,
+ .cache_type = REGCACHE_NONE,
+};
+
+static void af8133j_power_down_action(void *ptr)
+{
+ struct af8133j_data *data = ptr;
+
+ if (!pm_runtime_status_suspended(&data->client->dev))
+ af8133j_power_down(data);
+}
+
+static int af8133j_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct af8133j_data *data;
+ struct iio_dev *indio_dev;
+ struct regmap *regmap;
+ int ret, i;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ regmap = devm_regmap_init_i2c(client, &af8133j_regmap_config);
+ if (IS_ERR(regmap))
+ return dev_err_probe(dev, PTR_ERR(regmap),
+ "regmap initialization failed\n");
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+ data->regmap = regmap;
+ data->range = AF8133J_REG_RANGE_12G;
+ mutex_init(&data->mutex);
+
+ data->reset_gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(data->reset_gpiod))
+ return dev_err_probe(dev, PTR_ERR(data->reset_gpiod),
+ "Failed to get reset gpio\n");
+
+ for (i = 0; i < ARRAY_SIZE(af8133j_supply_names); i++)
+ data->supplies[i].supply = af8133j_supply_names[i];
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(data->supplies),
+ data->supplies);
+ if (ret)
+ return ret;
+
+ ret = iio_read_mount_matrix(dev, &data->orientation);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to read mount matrix\n");
+
+ ret = af8133j_power_up(data);
+ if (ret)
+ return ret;
+
+ pm_runtime_set_active(dev);
+
+ ret = devm_add_action_or_reset(dev, af8133j_power_down_action, data);
+ if (ret)
+ return ret;
+
+ ret = af8133j_product_check(data);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_noresume(dev);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 500);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_put_autosuspend(dev);
+
+ indio_dev->info = &af8133j_info;
+ indio_dev->name = "af8133j";
+ indio_dev->channels = af8133j_channels;
+ indio_dev->num_channels = ARRAY_SIZE(af8133j_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ &af8133j_trigger_handler, NULL);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "Failed to setup iio triggered buffer\n");
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register iio device");
+
+ return 0;
+}
+
+static int af8133j_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct af8133j_data *data = iio_priv(indio_dev);
+
+ af8133j_power_down(data);
+
+ return 0;
+}
+
+static int af8133j_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct af8133j_data *data = iio_priv(indio_dev);
+
+ return af8133j_power_up(data);
+}
+
+static const struct dev_pm_ops af8133j_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ RUNTIME_PM_OPS(af8133j_runtime_suspend, af8133j_runtime_resume, NULL)
+};
+
+static const struct of_device_id af8133j_of_match[] = {
+ { .compatible = "voltafield,af8133j", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, af8133j_of_match);
+
+static const struct i2c_device_id af8133j_id[] = {
+ { "af8133j", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, af8133j_id);
+
+static struct i2c_driver af8133j_driver = {
+ .driver = {
+ .name = "af8133j",
+ .of_match_table = af8133j_of_match,
+ .pm = pm_ptr(&af8133j_pm_ops),
+ },
+ .probe = af8133j_probe,
+ .id_table = af8133j_id,
+};
+
+module_i2c_driver(af8133j_driver);
+
+MODULE_AUTHOR("Icenowy Zheng <icenowy@aosc.io>");
+MODULE_AUTHOR("Ondřej Jirman <megi@xff.cz>");
+MODULE_DESCRIPTION("Voltafield AF8133J magnetic sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
index 281d1fa31c8e..48d9c698f520 100644
--- a/drivers/iio/magnetometer/bmc150_magn_i2c.c
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -11,7 +11,6 @@
#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/acpi.h>
#include <linux/regmap.h>
#include "bmc150_magn.h"
@@ -68,7 +67,7 @@ static struct i2c_driver bmc150_magn_driver = {
.driver = {
.name = "bmc150_magn_i2c",
.of_match_table = bmc150_magn_of_match,
- .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
+ .acpi_match_table = bmc150_magn_acpi_match,
.pm = &bmc150_magn_pm_ops,
},
.probe = bmc150_magn_i2c_probe,
diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c
index 882987721071..abc75a05c46a 100644
--- a/drivers/iio/magnetometer/bmc150_magn_spi.c
+++ b/drivers/iio/magnetometer/bmc150_magn_spi.c
@@ -10,7 +10,6 @@
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
-#include <linux/acpi.h>
#include <linux/regmap.h>
#include "bmc150_magn.h"
@@ -55,7 +54,7 @@ static struct spi_driver bmc150_magn_spi_driver = {
.remove = bmc150_magn_spi_remove,
.id_table = bmc150_magn_spi_id,
.driver = {
- .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
+ .acpi_match_table = bmc150_magn_acpi_match,
.name = "bmc150_magn_spi",
},
};
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index b495b8a63928..6b9f4b056191 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -10,11 +10,11 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/delay.h>
#include <linux/regmap.h>
-#include <linux/acpi.h>
#include <linux/pm.h>
#include <linux/iio/iio.h>
@@ -573,7 +573,7 @@ static struct i2c_driver mmc35240_driver = {
.name = MMC35240_DRV_NAME,
.of_match_table = mmc35240_of_match,
.pm = pm_sleep_ptr(&mmc35240_pm_ops),
- .acpi_match_table = ACPI_PTR(mmc35240_acpi_match),
+ .acpi_match_table = mmc35240_acpi_match,
},
.probe = mmc35240_probe,
.id_table = mmc35240_id,
diff --git a/drivers/iio/potentiometer/max5487.c b/drivers/iio/potentiometer/max5487.c
index 42723c996c9f..4838d2e72f53 100644
--- a/drivers/iio/potentiometer/max5487.c
+++ b/drivers/iio/potentiometer/max5487.c
@@ -5,8 +5,8 @@
* Copyright (C) 2016 Cristina-Gabriela Moraru <cristina.moraru09@gmail.com>
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/spi/spi.h>
-#include <linux/acpi.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/iio.h>
@@ -144,7 +144,7 @@ MODULE_DEVICE_TABLE(acpi, max5487_acpi_match);
static struct spi_driver max5487_driver = {
.driver = {
.name = "max5487",
- .acpi_match_table = ACPI_PTR(max5487_acpi_match),
+ .acpi_match_table = max5487_acpi_match,
},
.id_table = max5487_id,
.probe = max5487_spi_probe,
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 79adfd059c3a..3ad38506028e 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -114,6 +114,8 @@ config HSC030PA
depends on (I2C || SPI_MASTER)
select HSC030PA_I2C if I2C
select HSC030PA_SPI if SPI_MASTER
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say Y here to build support for the Honeywell TruStability
HSC and SSC pressure and temperature sensor series.
@@ -181,7 +183,9 @@ config MPL3115
config MPRLS0025PA
tristate "Honeywell MPRLS0025PA (MicroPressure sensors series)"
- depends on I2C
+ depends on (I2C || SPI_MASTER)
+ select MPRLS0025PA_I2C if I2C
+ select MPRLS0025PA_SPI if SPI_MASTER
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
@@ -192,6 +196,16 @@ config MPRLS0025PA
To compile this driver as a module, choose M here: the module will be
called mprls0025pa.
+config MPRLS0025PA_I2C
+ tristate
+ depends on MPRLS0025PA
+ depends on I2C
+
+config MPRLS0025PA_SPI
+ tristate
+ depends on MPRLS0025PA
+ depends on SPI_MASTER
+
config MS5611
tristate "Measurement Specialties MS5611 pressure sensor driver"
select IIO_BUFFER
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index b0f8b94662f2..a93709e35760 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -24,6 +24,8 @@ obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o
obj-$(CONFIG_MPL115_SPI) += mpl115_spi.o
obj-$(CONFIG_MPL3115) += mpl3115.o
obj-$(CONFIG_MPRLS0025PA) += mprls0025pa.o
+obj-$(CONFIG_MPRLS0025PA_I2C) += mprls0025pa_i2c.o
+obj-$(CONFIG_MPRLS0025PA_SPI) += mprls0025pa_spi.o
obj-$(CONFIG_MS5611) += ms5611_core.o
obj-$(CONFIG_MS5611_I2C) += ms5611_i2c.o
obj-$(CONFIG_MS5611_SPI) += ms5611_spi.o
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
index a072de6cb59c..261af1562827 100644
--- a/drivers/iio/pressure/hp206c.c
+++ b/drivers/iio/pressure/hp206c.c
@@ -11,12 +11,12 @@
*/
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/delay.h>
#include <linux/util_macros.h>
-#include <linux/acpi.h>
#include <asm/unaligned.h>
@@ -400,20 +400,18 @@ static const struct i2c_device_id hp206c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, hp206c_id);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id hp206c_acpi_match[] = {
{"HOP206C", 0},
{ },
};
MODULE_DEVICE_TABLE(acpi, hp206c_acpi_match);
-#endif
static struct i2c_driver hp206c_driver = {
.probe = hp206c_probe,
.id_table = hp206c_id,
.driver = {
.name = "hp206c",
- .acpi_match_table = ACPI_PTR(hp206c_acpi_match),
+ .acpi_match_table = hp206c_acpi_match,
},
};
diff --git a/drivers/iio/pressure/hsc030pa.c b/drivers/iio/pressure/hsc030pa.c
index d6a51f0c335f..1682b90d4557 100644
--- a/drivers/iio/pressure/hsc030pa.c
+++ b/drivers/iio/pressure/hsc030pa.c
@@ -22,8 +22,11 @@
#include <linux/types.h>
#include <linux/units.h>
+#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include <asm/unaligned.h>
@@ -297,6 +300,29 @@ static int hsc_get_measurement(struct hsc_data *data)
return 0;
}
+static irqreturn_t hsc_trigger_handler(int irq, void *private)
+{
+ struct iio_poll_func *pf = private;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct hsc_data *data = iio_priv(indio_dev);
+ int ret;
+
+ ret = hsc_get_measurement(data);
+ if (ret)
+ goto error;
+
+ memcpy(&data->scan.chan[0], &data->buffer[0], 2);
+ memcpy(&data->scan.chan[1], &data->buffer[2], 2);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
+ iio_get_time_ns(indio_dev));
+
+error:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
/*
* IIO ABI expects
* value = (conv + offset) * scale
@@ -382,13 +408,29 @@ static const struct iio_chan_spec hsc_channels[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 14,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
},
{
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 11,
+ .storagebits = 16,
+ .shift = 5,
+ .endianness = IIO_BE,
+ },
},
+ IIO_CHAN_SOFT_TIMESTAMP(2),
};
static const struct iio_info hsc_info = {
@@ -406,7 +448,7 @@ int hsc_common_probe(struct device *dev, hsc_recv_fn recv)
struct hsc_data *hsc;
struct iio_dev *indio_dev;
const char *triplet;
- u64 tmp;
+ s64 tmp;
int ret;
indio_dev = devm_iio_device_alloc(dev, sizeof(*hsc));
@@ -485,6 +527,11 @@ int hsc_common_probe(struct device *dev, hsc_recv_fn recv)
indio_dev->channels = hsc->chip->channels;
indio_dev->num_channels = hsc->chip->num_channels;
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
+ hsc_trigger_handler, NULL);
+ if (ret)
+ return ret;
+
return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS(hsc_common_probe, IIO_HONEYWELL_HSC030PA);
diff --git a/drivers/iio/pressure/hsc030pa.h b/drivers/iio/pressure/hsc030pa.h
index d20420dba4f6..9b40f46f575f 100644
--- a/drivers/iio/pressure/hsc030pa.h
+++ b/drivers/iio/pressure/hsc030pa.h
@@ -10,7 +10,10 @@
#include <linux/types.h>
+#include <linux/iio/iio.h>
+
#define HSC_REG_MEASUREMENT_RD_SIZE 4
+#define HSC_RESP_TIME_MS 2
struct device;
@@ -53,6 +56,10 @@ struct hsc_data {
s32 p_scale_dec;
s64 p_offset;
s32 p_offset_dec;
+ struct {
+ __be16 chan[2];
+ s64 timestamp __aligned(8);
+ } scan;
u8 buffer[HSC_REG_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
};
diff --git a/drivers/iio/pressure/hsc030pa_i2c.c b/drivers/iio/pressure/hsc030pa_i2c.c
index e2b524b36417..b3fd230e71da 100644
--- a/drivers/iio/pressure/hsc030pa_i2c.c
+++ b/drivers/iio/pressure/hsc030pa_i2c.c
@@ -4,14 +4,17 @@
*
* Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro>
*
- * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf [hsc]
- * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/common/documents/sps-siot-i2c-comms-digital-output-pressure-sensors-tn-008201-3-en-ciid-45841.pdf [i2c related]
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/common/documents/sps-siot-i2c-comms-digital-output-pressure-sensors-tn-008201-3-en-ciid-45841.pdf
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/common/documents/sps-siot-sleep-mode-technical-note-008286-1-en-ciid-155793.pdf
*/
+#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/errno.h>
#include <linux/i2c.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/types.h>
#include <linux/iio/iio.h>
@@ -23,6 +26,8 @@ static int hsc_i2c_recv(struct hsc_data *data)
struct i2c_msg msg;
int ret;
+ msleep_interruptible(HSC_RESP_TIME_MS);
+
msg.addr = client->addr;
msg.flags = client->flags | I2C_M_RD;
msg.len = HSC_REG_MEASUREMENT_RD_SIZE;
diff --git a/drivers/iio/pressure/hsc030pa_spi.c b/drivers/iio/pressure/hsc030pa_spi.c
index a719bade8326..818fa6303454 100644
--- a/drivers/iio/pressure/hsc030pa_spi.c
+++ b/drivers/iio/pressure/hsc030pa_spi.c
@@ -4,13 +4,17 @@
*
* Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro>
*
- * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/common/documents/sps-siot-spi-comms-digital-ouptu-pressure-sensors-tn-008202-3-en-ciid-45843.pdf
+ * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/common/documents/sps-siot-sleep-mode-technical-note-008286-1-en-ciid-155793.pdf
*/
+#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
#include <linux/stddef.h>
+#include <linux/types.h>
#include <linux/iio/iio.h>
@@ -25,6 +29,7 @@ static int hsc_spi_recv(struct hsc_data *data)
.len = HSC_REG_MEASUREMENT_RD_SIZE,
};
+ msleep_interruptible(HSC_RESP_TIME_MS);
return spi_sync_transfer(spi, &xfer, 1);
}
diff --git a/drivers/iio/pressure/mprls0025pa.c b/drivers/iio/pressure/mprls0025pa.c
index 30fb2de36821..33a15d4c642c 100644
--- a/drivers/iio/pressure/mprls0025pa.c
+++ b/drivers/iio/pressure/mprls0025pa.c
@@ -5,17 +5,13 @@
* Copyright (c) Andreas Klinger <ak@it-klinger.de>
*
* Data sheet:
- * https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/
- * products/sensors/pressure-sensors/board-mount-pressure-sensors/
- * micropressure-mpr-series/documents/
- * sps-siot-mpr-series-datasheet-32332628-ciid-172626.pdf
+ * https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/micropressure-mpr-series/documents/sps-siot-mpr-series-datasheet-32332628-ciid-172626.pdf
*
- * 7-bit I2C default slave address: 0x18
*/
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/i2c.h>
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
#include <linux/math64.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -25,7 +21,6 @@
#include <linux/gpio/consumer.h>
#include <linux/iio/buffer.h>
-#include <linux/iio/iio.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -33,11 +28,15 @@
#include <asm/unaligned.h>
-/* bits in i2c status byte */
-#define MPR_I2C_POWER BIT(6) /* device is powered */
-#define MPR_I2C_BUSY BIT(5) /* device is busy */
-#define MPR_I2C_MEMORY BIT(2) /* integrity test passed */
-#define MPR_I2C_MATH BIT(0) /* internal math saturation */
+#include "mprls0025pa.h"
+
+/* bits in status byte */
+#define MPR_ST_POWER BIT(6) /* device is powered */
+#define MPR_ST_BUSY BIT(5) /* device is busy */
+#define MPR_ST_MEMORY BIT(2) /* integrity test passed */
+#define MPR_ST_MATH BIT(0) /* internal math saturation */
+
+#define MPR_ST_ERR_FLAG (MPR_ST_BUSY | MPR_ST_MEMORY | MPR_ST_MATH)
/*
* support _RAW sysfs interface:
@@ -70,60 +69,87 @@
* transfer function B: 2.5% to 22.5% of 2^24
* transfer function C: 20% to 80% of 2^24
*/
-enum mpr_func_id {
- MPR_FUNCTION_A,
- MPR_FUNCTION_B,
- MPR_FUNCTION_C,
-};
-
struct mpr_func_spec {
u32 output_min;
u32 output_max;
};
static const struct mpr_func_spec mpr_func_spec[] = {
- [MPR_FUNCTION_A] = {.output_min = 1677722, .output_max = 15099494},
- [MPR_FUNCTION_B] = {.output_min = 419430, .output_max = 3774874},
- [MPR_FUNCTION_C] = {.output_min = 3355443, .output_max = 13421773},
+ [MPR_FUNCTION_A] = { .output_min = 1677722, .output_max = 15099494 },
+ [MPR_FUNCTION_B] = { .output_min = 419430, .output_max = 3774874 },
+ [MPR_FUNCTION_C] = { .output_min = 3355443, .output_max = 13421773 },
};
-struct mpr_chan {
- s32 pres; /* pressure value */
- s64 ts; /* timestamp */
+enum mpr_variants {
+ MPR0001BA = 0x00, MPR01_6BA = 0x01, MPR02_5BA = 0x02, MPR0060MG = 0x03,
+ MPR0100MG = 0x04, MPR0160MG = 0x05, MPR0250MG = 0x06, MPR0400MG = 0x07,
+ MPR0600MG = 0x08, MPR0001BG = 0x09, MPR01_6BG = 0x0a, MPR02_5BG = 0x0b,
+ MPR0100KA = 0x0c, MPR0160KA = 0x0d, MPR0250KA = 0x0e, MPR0006KG = 0x0f,
+ MPR0010KG = 0x10, MPR0016KG = 0x11, MPR0025KG = 0x12, MPR0040KG = 0x13,
+ MPR0060KG = 0x14, MPR0100KG = 0x15, MPR0160KG = 0x16, MPR0250KG = 0x17,
+ MPR0015PA = 0x18, MPR0025PA = 0x19, MPR0030PA = 0x1a, MPR0001PG = 0x1b,
+ MPR0005PG = 0x1c, MPR0015PG = 0x1d, MPR0030PG = 0x1e, MPR0300YG = 0x1f,
+ MPR_VARIANTS_MAX
+};
+
+static const char * const mpr_triplet_variants[MPR_VARIANTS_MAX] = {
+ [MPR0001BA] = "0001BA", [MPR01_6BA] = "01.6BA", [MPR02_5BA] = "02.5BA",
+ [MPR0060MG] = "0060MG", [MPR0100MG] = "0100MG", [MPR0160MG] = "0160MG",
+ [MPR0250MG] = "0250MG", [MPR0400MG] = "0400MG", [MPR0600MG] = "0600MG",
+ [MPR0001BG] = "0001BG", [MPR01_6BG] = "01.6BG", [MPR02_5BG] = "02.5BG",
+ [MPR0100KA] = "0100KA", [MPR0160KA] = "0160KA", [MPR0250KA] = "0250KA",
+ [MPR0006KG] = "0006KG", [MPR0010KG] = "0010KG", [MPR0016KG] = "0016KG",
+ [MPR0025KG] = "0025KG", [MPR0040KG] = "0040KG", [MPR0060KG] = "0060KG",
+ [MPR0100KG] = "0100KG", [MPR0160KG] = "0160KG", [MPR0250KG] = "0250KG",
+ [MPR0015PA] = "0015PA", [MPR0025PA] = "0025PA", [MPR0030PA] = "0030PA",
+ [MPR0001PG] = "0001PG", [MPR0005PG] = "0005PG", [MPR0015PG] = "0015PG",
+ [MPR0030PG] = "0030PG", [MPR0300YG] = "0300YG"
+};
+
+/**
+ * struct mpr_range_config - list of pressure ranges based on nomenclature
+ * @pmin: lowest pressure that can be measured
+ * @pmax: highest pressure that can be measured
+ */
+struct mpr_range_config {
+ const s32 pmin;
+ const s32 pmax;
};
-struct mpr_data {
- struct i2c_client *client;
- struct mutex lock; /*
- * access to device during read
- */
- u32 pmin; /* minimal pressure in pascal */
- u32 pmax; /* maximal pressure in pascal */
- enum mpr_func_id function; /* transfer function */
- u32 outmin; /*
- * minimal numerical range raw
- * value from sensor
- */
- u32 outmax; /*
- * maximal numerical range raw
- * value from sensor
- */
- int scale; /* int part of scale */
- int scale2; /* nano part of scale */
- int offset; /* int part of offset */
- int offset2; /* nano part of offset */
- struct gpio_desc *gpiod_reset; /* reset */
- int irq; /*
- * end of conversion irq;
- * used to distinguish between
- * irq mode and reading in a
- * loop until data is ready
- */
- struct completion completion; /* handshake from irq to read */
- struct mpr_chan chan; /*
- * channel values for buffered
- * mode
- */
+/* All min max limits have been converted to pascals */
+static const struct mpr_range_config mpr_range_config[MPR_VARIANTS_MAX] = {
+ [MPR0001BA] = { .pmin = 0, .pmax = 100000 },
+ [MPR01_6BA] = { .pmin = 0, .pmax = 160000 },
+ [MPR02_5BA] = { .pmin = 0, .pmax = 250000 },
+ [MPR0060MG] = { .pmin = 0, .pmax = 6000 },
+ [MPR0100MG] = { .pmin = 0, .pmax = 10000 },
+ [MPR0160MG] = { .pmin = 0, .pmax = 16000 },
+ [MPR0250MG] = { .pmin = 0, .pmax = 25000 },
+ [MPR0400MG] = { .pmin = 0, .pmax = 40000 },
+ [MPR0600MG] = { .pmin = 0, .pmax = 60000 },
+ [MPR0001BG] = { .pmin = 0, .pmax = 100000 },
+ [MPR01_6BG] = { .pmin = 0, .pmax = 160000 },
+ [MPR02_5BG] = { .pmin = 0, .pmax = 250000 },
+ [MPR0100KA] = { .pmin = 0, .pmax = 100000 },
+ [MPR0160KA] = { .pmin = 0, .pmax = 160000 },
+ [MPR0250KA] = { .pmin = 0, .pmax = 250000 },
+ [MPR0006KG] = { .pmin = 0, .pmax = 6000 },
+ [MPR0010KG] = { .pmin = 0, .pmax = 10000 },
+ [MPR0016KG] = { .pmin = 0, .pmax = 16000 },
+ [MPR0025KG] = { .pmin = 0, .pmax = 25000 },
+ [MPR0040KG] = { .pmin = 0, .pmax = 40000 },
+ [MPR0060KG] = { .pmin = 0, .pmax = 60000 },
+ [MPR0100KG] = { .pmin = 0, .pmax = 100000 },
+ [MPR0160KG] = { .pmin = 0, .pmax = 160000 },
+ [MPR0250KG] = { .pmin = 0, .pmax = 250000 },
+ [MPR0015PA] = { .pmin = 0, .pmax = 103421 },
+ [MPR0025PA] = { .pmin = 0, .pmax = 172369 },
+ [MPR0030PA] = { .pmin = 0, .pmax = 206843 },
+ [MPR0001PG] = { .pmin = 0, .pmax = 6895 },
+ [MPR0005PG] = { .pmin = 0, .pmax = 34474 },
+ [MPR0015PG] = { .pmin = 0, .pmax = 103421 },
+ [MPR0030PG] = { .pmin = 0, .pmax = 206843 },
+ [MPR0300YG] = { .pmin = 0, .pmax = 39997 }
};
static const struct iio_chan_spec mpr_channels[] = {
@@ -153,11 +179,11 @@ static void mpr_reset(struct mpr_data *data)
}
/**
- * mpr_read_pressure() - Read pressure value from sensor via I2C
+ * mpr_read_pressure() - Read pressure value from sensor
* @data: Pointer to private data struct.
* @press: Output value read from sensor.
*
- * Reading from the sensor by sending and receiving I2C telegrams.
+ * Reading from the sensor by sending and receiving telegrams.
*
* If there is an end of conversion (EOC) interrupt registered the function
* waits for a maximum of one second for the interrupt.
@@ -170,25 +196,17 @@ static void mpr_reset(struct mpr_data *data)
*/
static int mpr_read_pressure(struct mpr_data *data, s32 *press)
{
- struct device *dev = &data->client->dev;
+ struct device *dev = data->dev;
int ret, i;
- u8 wdata[] = {0xAA, 0x00, 0x00};
- s32 status;
int nloops = 10;
- u8 buf[4];
reinit_completion(&data->completion);
- ret = i2c_master_send(data->client, wdata, sizeof(wdata));
+ ret = data->ops->write(data, MPR_CMD_SYNC, MPR_PKT_SYNC_LEN);
if (ret < 0) {
dev_err(dev, "error while writing ret: %d\n", ret);
return ret;
}
- if (ret != sizeof(wdata)) {
- dev_err(dev, "received size doesn't fit - ret: %d / %u\n", ret,
- (u32)sizeof(wdata));
- return -EIO;
- }
if (data->irq > 0) {
ret = wait_for_completion_timeout(&data->completion, HZ);
@@ -206,14 +224,14 @@ static int mpr_read_pressure(struct mpr_data *data, s32 *press)
* quite long
*/
usleep_range(5000, 10000);
- status = i2c_smbus_read_byte(data->client);
- if (status < 0) {
+ ret = data->ops->read(data, MPR_CMD_NOP, 1);
+ if (ret < 0) {
dev_err(dev,
"error while reading, status: %d\n",
- status);
- return status;
+ ret);
+ return ret;
}
- if (!(status & MPR_I2C_BUSY))
+ if (!(data->buffer[0] & MPR_ST_ERR_FLAG))
break;
}
if (i == nloops) {
@@ -222,29 +240,19 @@ static int mpr_read_pressure(struct mpr_data *data, s32 *press)
}
}
- ret = i2c_master_recv(data->client, buf, sizeof(buf));
- if (ret < 0) {
- dev_err(dev, "error in i2c_master_recv ret: %d\n", ret);
+ ret = data->ops->read(data, MPR_CMD_NOP, MPR_PKT_NOP_LEN);
+ if (ret < 0)
return ret;
- }
- if (ret != sizeof(buf)) {
- dev_err(dev, "received size doesn't fit - ret: %d / %u\n", ret,
- (u32)sizeof(buf));
- return -EIO;
- }
- if (buf[0] & MPR_I2C_BUSY) {
- /*
- * it should never be the case that status still indicates
- * business
- */
- dev_err(dev, "data still not ready: %08x\n", buf[0]);
+ if (data->buffer[0] & MPR_ST_ERR_FLAG) {
+ dev_err(data->dev,
+ "unexpected status byte %02x\n", data->buffer[0]);
return -ETIMEDOUT;
}
- *press = get_unaligned_be24(&buf[1]);
+ *press = get_unaligned_be24(&data->buffer[1]);
- dev_dbg(dev, "received: %*ph cnt: %d\n", ret, buf, *press);
+ dev_dbg(dev, "received: %*ph cnt: %d\n", ret, data->buffer, *press);
return 0;
}
@@ -271,7 +279,7 @@ static irqreturn_t mpr_trigger_handler(int irq, void *p)
goto err;
iio_push_to_buffers_with_timestamp(indio_dev, &data->chan,
- iio_get_time_ns(indio_dev));
+ iio_get_time_ns(indio_dev));
err:
mutex_unlock(&data->lock);
@@ -316,25 +324,23 @@ static const struct iio_info mpr_info = {
.read_raw = &mpr_read_raw,
};
-static int mpr_probe(struct i2c_client *client)
+int mpr_common_probe(struct device *dev, const struct mpr_ops *ops, int irq)
{
int ret;
struct mpr_data *data;
struct iio_dev *indio_dev;
- struct device *dev = &client->dev;
+ const char *triplet;
s64 scale, offset;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE))
- return dev_err_probe(dev, -EOPNOTSUPP,
- "I2C functionality not supported\n");
+ u32 func;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
- return dev_err_probe(dev, -ENOMEM, "couldn't get iio_dev\n");
+ return -ENOMEM;
data = iio_priv(indio_dev);
- data->client = client;
- data->irq = client->irq;
+ data->dev = dev;
+ data->ops = ops;
+ data->irq = irq;
mutex_init(&data->lock);
init_completion(&data->completion);
@@ -348,103 +354,102 @@ static int mpr_probe(struct i2c_client *client)
ret = devm_regulator_get_enable(dev, "vdd");
if (ret)
return dev_err_probe(dev, ret,
- "can't get and enable vdd supply\n");
+ "can't get and enable vdd supply\n");
- if (dev_fwnode(dev)) {
+ ret = data->ops->init(data->dev);
+ if (ret)
+ return ret;
+
+ ret = device_property_read_u32(dev,
+ "honeywell,transfer-function", &func);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "honeywell,transfer-function could not be read\n");
+ data->function = func - 1;
+ if (data->function > MPR_FUNCTION_C)
+ return dev_err_probe(dev, -EINVAL,
+ "honeywell,transfer-function %d invalid\n",
+ data->function);
+
+ ret = device_property_read_string(dev, "honeywell,pressure-triplet",
+ &triplet);
+ if (ret) {
ret = device_property_read_u32(dev, "honeywell,pmin-pascal",
- &data->pmin);
+ &data->pmin);
if (ret)
return dev_err_probe(dev, ret,
- "honeywell,pmin-pascal could not be read\n");
+ "honeywell,pmin-pascal could not be read\n");
+
ret = device_property_read_u32(dev, "honeywell,pmax-pascal",
- &data->pmax);
+ &data->pmax);
if (ret)
return dev_err_probe(dev, ret,
- "honeywell,pmax-pascal could not be read\n");
- ret = device_property_read_u32(dev,
- "honeywell,transfer-function", &data->function);
- if (ret)
- return dev_err_probe(dev, ret,
- "honeywell,transfer-function could not be read\n");
- if (data->function > MPR_FUNCTION_C)
- return dev_err_probe(dev, -EINVAL,
- "honeywell,transfer-function %d invalid\n",
- data->function);
+ "honeywell,pmax-pascal could not be read\n");
} else {
- /* when loaded as i2c device we need to use default values */
- dev_notice(dev, "firmware node not found; using defaults\n");
- data->pmin = 0;
- data->pmax = 172369; /* 25 psi */
- data->function = MPR_FUNCTION_A;
+ ret = device_property_match_property_string(dev,
+ "honeywell,pressure-triplet",
+ mpr_triplet_variants,
+ MPR_VARIANTS_MAX);
+ if (ret < 0)
+ return dev_err_probe(dev, -EINVAL,
+ "honeywell,pressure-triplet is invalid\n");
+
+ data->pmin = mpr_range_config[ret].pmin;
+ data->pmax = mpr_range_config[ret].pmax;
}
+ if (data->pmin >= data->pmax)
+ return dev_err_probe(dev, -EINVAL,
+ "pressure limits are invalid\n");
+
data->outmin = mpr_func_spec[data->function].output_min;
data->outmax = mpr_func_spec[data->function].output_max;
/* use 64 bit calculation for preserving a reasonable precision */
scale = div_s64(((s64)(data->pmax - data->pmin)) * NANO,
- data->outmax - data->outmin);
+ data->outmax - data->outmin);
data->scale = div_s64_rem(scale, NANO, &data->scale2);
/*
* multiply with NANO before dividing by scale and later divide by NANO
* again.
*/
offset = ((-1LL) * (s64)data->outmin) * NANO -
- div_s64(div_s64((s64)data->pmin * NANO, scale), NANO);
+ div_s64(div_s64((s64)data->pmin * NANO, scale), NANO);
data->offset = div_s64_rem(offset, NANO, &data->offset2);
if (data->irq > 0) {
ret = devm_request_irq(dev, data->irq, mpr_eoc_handler,
- IRQF_TRIGGER_RISING, client->name, data);
+ IRQF_TRIGGER_RISING,
+ dev_name(dev),
+ data);
if (ret)
return dev_err_probe(dev, ret,
- "request irq %d failed\n", data->irq);
+ "request irq %d failed\n", data->irq);
}
data->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
- GPIOD_OUT_HIGH);
+ GPIOD_OUT_HIGH);
if (IS_ERR(data->gpiod_reset))
return dev_err_probe(dev, PTR_ERR(data->gpiod_reset),
- "request reset-gpio failed\n");
+ "request reset-gpio failed\n");
mpr_reset(data);
ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
- mpr_trigger_handler, NULL);
+ mpr_trigger_handler, NULL);
if (ret)
return dev_err_probe(dev, ret,
- "iio triggered buffer setup failed\n");
+ "iio triggered buffer setup failed\n");
ret = devm_iio_device_register(dev, indio_dev);
if (ret)
return dev_err_probe(dev, ret,
- "unable to register iio device\n");
+ "unable to register iio device\n");
return 0;
}
-
-static const struct of_device_id mpr_matches[] = {
- { .compatible = "honeywell,mprls0025pa" },
- { }
-};
-MODULE_DEVICE_TABLE(of, mpr_matches);
-
-static const struct i2c_device_id mpr_id[] = {
- { "mprls0025pa" },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, mpr_id);
-
-static struct i2c_driver mpr_driver = {
- .probe = mpr_probe,
- .id_table = mpr_id,
- .driver = {
- .name = "mprls0025pa",
- .of_match_table = mpr_matches,
- },
-};
-module_i2c_driver(mpr_driver);
+EXPORT_SYMBOL_NS(mpr_common_probe, IIO_HONEYWELL_MPRLS0025PA);
MODULE_AUTHOR("Andreas Klinger <ak@it-klinger.de>");
-MODULE_DESCRIPTION("Honeywell MPRLS0025PA I2C driver");
+MODULE_DESCRIPTION("Honeywell MPR pressure sensor core driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/mprls0025pa.h b/drivers/iio/pressure/mprls0025pa.h
new file mode 100644
index 000000000000..9d5c30afa9d6
--- /dev/null
+++ b/drivers/iio/pressure/mprls0025pa.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MPRLS0025PA - Honeywell MicroPressure pressure sensor series driver
+ *
+ * Copyright (c) Andreas Klinger <ak@it-klinger.de>
+ *
+ * Data sheet:
+ * https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/micropressure-mpr-series/documents/sps-siot-mpr-series-datasheet-32332628-ciid-172626.pdf
+ */
+
+#ifndef _MPRLS0025PA_H
+#define _MPRLS0025PA_H
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#include <linux/iio/iio.h>
+
+#define MPR_MEASUREMENT_RD_SIZE 4
+#define MPR_CMD_NOP 0xf0
+#define MPR_CMD_SYNC 0xaa
+#define MPR_PKT_NOP_LEN MPR_MEASUREMENT_RD_SIZE
+#define MPR_PKT_SYNC_LEN 3
+
+struct device;
+
+struct iio_chan_spec;
+struct iio_dev;
+
+struct mpr_data;
+struct mpr_ops;
+
+/**
+ * struct mpr_chan
+ * @pres: pressure value
+ * @ts: timestamp
+ */
+struct mpr_chan {
+ s32 pres;
+ s64 ts;
+};
+
+enum mpr_func_id {
+ MPR_FUNCTION_A,
+ MPR_FUNCTION_B,
+ MPR_FUNCTION_C,
+};
+
+/**
+ * struct mpr_data
+ * @dev: current device structure
+ * @ops: functions that implement the sensor reads/writes, bus init
+ * @lock: access to device during read
+ * @pmin: minimal pressure in pascal
+ * @pmax: maximal pressure in pascal
+ * @function: transfer function
+ * @outmin: minimum raw pressure in counts (based on transfer function)
+ * @outmax: maximum raw pressure in counts (based on transfer function)
+ * @scale: pressure scale
+ * @scale2: pressure scale, decimal number
+ * @offset: pressure offset
+ * @offset2: pressure offset, decimal number
+ * @gpiod_reset: reset
+ * @irq: end of conversion irq. used to distinguish between irq mode and
+ * reading in a loop until data is ready
+ * @completion: handshake from irq to read
+ * @chan: channel values for buffered mode
+ * @buffer: raw conversion data
+ */
+struct mpr_data {
+ struct device *dev;
+ const struct mpr_ops *ops;
+ struct mutex lock;
+ u32 pmin;
+ u32 pmax;
+ enum mpr_func_id function;
+ u32 outmin;
+ u32 outmax;
+ int scale;
+ int scale2;
+ int offset;
+ int offset2;
+ struct gpio_desc *gpiod_reset;
+ int irq;
+ struct completion completion;
+ struct mpr_chan chan;
+ u8 buffer[MPR_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
+};
+
+struct mpr_ops {
+ int (*init)(struct device *dev);
+ int (*read)(struct mpr_data *data, const u8 cmd, const u8 cnt);
+ int (*write)(struct mpr_data *data, const u8 cmd, const u8 cnt);
+};
+
+int mpr_common_probe(struct device *dev, const struct mpr_ops *ops, int irq);
+
+#endif
diff --git a/drivers/iio/pressure/mprls0025pa_i2c.c b/drivers/iio/pressure/mprls0025pa_i2c.c
new file mode 100644
index 000000000000..7a5c5aa2b456
--- /dev/null
+++ b/drivers/iio/pressure/mprls0025pa_i2c.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MPRLS0025PA - Honeywell MicroPressure pressure sensor series driver
+ *
+ * Copyright (c) Andreas Klinger <ak@it-klinger.de>
+ *
+ * Data sheet:
+ * https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/micropressure-mpr-series/documents/sps-siot-mpr-series-datasheet-32332628-ciid-172626.pdf
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include "mprls0025pa.h"
+
+static int mpr_i2c_init(struct device *unused)
+{
+ return 0;
+}
+
+static int mpr_i2c_read(struct mpr_data *data, const u8 unused, const u8 cnt)
+{
+ int ret;
+ struct i2c_client *client = to_i2c_client(data->dev);
+
+ if (cnt > MPR_MEASUREMENT_RD_SIZE)
+ return -EOVERFLOW;
+
+ memset(data->buffer, 0, MPR_MEASUREMENT_RD_SIZE);
+ ret = i2c_master_recv(client, data->buffer, cnt);
+ if (ret < 0)
+ return ret;
+ else if (ret != cnt)
+ return -EIO;
+
+ return 0;
+}
+
+static int mpr_i2c_write(struct mpr_data *data, const u8 cmd, const u8 unused)
+{
+ int ret;
+ struct i2c_client *client = to_i2c_client(data->dev);
+ u8 wdata[MPR_PKT_SYNC_LEN];
+
+ memset(wdata, 0, sizeof(wdata));
+ wdata[0] = cmd;
+
+ ret = i2c_master_send(client, wdata, MPR_PKT_SYNC_LEN);
+ if (ret < 0)
+ return ret;
+ else if (ret != MPR_PKT_SYNC_LEN)
+ return -EIO;
+
+ return 0;
+}
+
+static const struct mpr_ops mpr_i2c_ops = {
+ .init = mpr_i2c_init,
+ .read = mpr_i2c_read,
+ .write = mpr_i2c_write,
+};
+
+static int mpr_i2c_probe(struct i2c_client *client)
+{
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE))
+ return -EOPNOTSUPP;
+
+ return mpr_common_probe(&client->dev, &mpr_i2c_ops, client->irq);
+}
+
+static const struct of_device_id mpr_i2c_match[] = {
+ { .compatible = "honeywell,mprls0025pa" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpr_i2c_match);
+
+static const struct i2c_device_id mpr_i2c_id[] = {
+ { "mprls0025pa" },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, mpr_i2c_id);
+
+static struct i2c_driver mpr_i2c_driver = {
+ .probe = mpr_i2c_probe,
+ .id_table = mpr_i2c_id,
+ .driver = {
+ .name = "mprls0025pa",
+ .of_match_table = mpr_i2c_match,
+ },
+};
+module_i2c_driver(mpr_i2c_driver);
+
+MODULE_AUTHOR("Andreas Klinger <ak@it-klinger.de>");
+MODULE_DESCRIPTION("Honeywell MPR pressure sensor i2c driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HONEYWELL_MPRLS0025PA);
diff --git a/drivers/iio/pressure/mprls0025pa_spi.c b/drivers/iio/pressure/mprls0025pa_spi.c
new file mode 100644
index 000000000000..3aed14cd95c5
--- /dev/null
+++ b/drivers/iio/pressure/mprls0025pa_spi.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MPRLS0025PA - Honeywell MicroPressure MPR series SPI sensor driver
+ *
+ * Copyright (c) 2024 Petre Rodan <petre.rodan@subdimension.ro>
+ *
+ * Data sheet:
+ * https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/micropressure-mpr-series/documents/sps-siot-mpr-series-datasheet-32332628-ciid-172626.pdf
+ */
+
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#include "mprls0025pa.h"
+
+struct mpr_spi_buf {
+ u8 tx[MPR_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN);
+};
+
+static int mpr_spi_init(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct mpr_spi_buf *buf;
+
+ buf = devm_kzalloc(dev, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, buf);
+
+ return 0;
+}
+
+static int mpr_spi_xfer(struct mpr_data *data, const u8 cmd, const u8 pkt_len)
+{
+ struct spi_device *spi = to_spi_device(data->dev);
+ struct mpr_spi_buf *buf = spi_get_drvdata(spi);
+ struct spi_transfer xfer;
+
+ if (pkt_len > MPR_MEASUREMENT_RD_SIZE)
+ return -EOVERFLOW;
+
+ buf->tx[0] = cmd;
+ xfer.tx_buf = buf->tx;
+ xfer.rx_buf = data->buffer;
+ xfer.len = pkt_len;
+
+ return spi_sync_transfer(spi, &xfer, 1);
+}
+
+static const struct mpr_ops mpr_spi_ops = {
+ .init = mpr_spi_init,
+ .read = mpr_spi_xfer,
+ .write = mpr_spi_xfer,
+};
+
+static int mpr_spi_probe(struct spi_device *spi)
+{
+ return mpr_common_probe(&spi->dev, &mpr_spi_ops, spi->irq);
+}
+
+static const struct of_device_id mpr_spi_match[] = {
+ { .compatible = "honeywell,mprls0025pa" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpr_spi_match);
+
+static const struct spi_device_id mpr_spi_id[] = {
+ { "mprls0025pa" },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, mpr_spi_id);
+
+static struct spi_driver mpr_spi_driver = {
+ .driver = {
+ .name = "mprls0025pa",
+ .of_match_table = mpr_spi_match,
+ },
+ .probe = mpr_spi_probe,
+ .id_table = mpr_spi_id,
+};
+module_spi_driver(mpr_spi_driver);
+
+MODULE_AUTHOR("Petre Rodan <petre.rodan@subdimension.ro>");
+MODULE_DESCRIPTION("Honeywell MPR pressure sensor spi driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(IIO_HONEYWELL_MPRLS0025PA);
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 5101552e3f38..389523d6ae32 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -7,7 +7,6 @@
* Denis Ciocca <denis.ciocca@st.com>
*/
-#include <linux/acpi.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
@@ -55,13 +54,11 @@ static const struct of_device_id st_press_of_match[] = {
};
MODULE_DEVICE_TABLE(of, st_press_of_match);
-#ifdef CONFIG_ACPI
static const struct acpi_device_id st_press_acpi_match[] = {
{"SNO9210", LPS22HB},
{ },
};
MODULE_DEVICE_TABLE(acpi, st_press_acpi_match);
-#endif
static const struct i2c_device_id st_press_id_table[] = {
{ LPS001WP_PRESS_DEV_NAME, LPS001WP },
@@ -114,7 +111,7 @@ static struct i2c_driver st_press_driver = {
.driver = {
.name = "st-press-i2c",
.of_match_table = st_press_of_match,
- .acpi_match_table = ACPI_PTR(st_press_acpi_match),
+ .acpi_match_table = st_press_acpi_match,
},
.probe = st_press_i2c_probe,
.id_table = st_press_id_table,
diff --git a/drivers/iio/proximity/isl29501.c b/drivers/iio/proximity/isl29501.c
index bcebacaf3dab..4982686fb4c3 100644
--- a/drivers/iio/proximity/isl29501.c
+++ b/drivers/iio/proximity/isl29501.c
@@ -995,17 +995,16 @@ static const struct i2c_device_id isl29501_id[] = {
MODULE_DEVICE_TABLE(i2c, isl29501_id);
-#if defined(CONFIG_OF)
static const struct of_device_id isl29501_i2c_matches[] = {
{ .compatible = "renesas,isl29501" },
{ }
};
MODULE_DEVICE_TABLE(of, isl29501_i2c_matches);
-#endif
static struct i2c_driver isl29501_driver = {
.driver = {
.name = "isl29501",
+ .of_match_table = isl29501_i2c_matches,
},
.id_table = isl29501_id,
.probe = isl29501_probe,
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index 0d230a0dff56..427c9343d6d1 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -337,28 +337,19 @@ static int sx9310_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct sx_common_data *data = iio_priv(indio_dev);
- int ret;
if (chan->type != IIO_PROXIMITY)
return -EINVAL;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = sx_common_read_proximity(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return sx_common_read_proximity(data, chan, val);
+ unreachable();
case IIO_CHAN_INFO_HARDWAREGAIN:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = sx9310_read_gain(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return sx9310_read_gain(data, chan, val);
+ unreachable();
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9310_read_samp_freq(data, val, val2);
default:
@@ -546,12 +537,10 @@ static int sx9310_write_thresh(struct sx_common_data *data,
return -EINVAL;
regval = FIELD_PREP(SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, reg,
- SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
- mutex_unlock(&data->mutex);
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, reg,
+ SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval);
}
static int sx9310_write_hysteresis(struct sx_common_data *data,
@@ -576,17 +565,14 @@ static int sx9310_write_hysteresis(struct sx_common_data *data,
return -EINVAL;
hyst = FIELD_PREP(SX9310_REG_PROX_CTRL10_HYST_MASK, hyst);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL10,
- SX9310_REG_PROX_CTRL10_HYST_MASK, hyst);
- mutex_unlock(&data->mutex);
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL10,
+ SX9310_REG_PROX_CTRL10_HYST_MASK, hyst);
}
static int sx9310_write_far_debounce(struct sx_common_data *data, int val)
{
- int ret;
unsigned int regval;
if (val > 0)
@@ -596,18 +582,14 @@ static int sx9310_write_far_debounce(struct sx_common_data *data, int val)
regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK, val);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL10,
- SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK,
- regval);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL10,
+ SX9310_REG_PROX_CTRL10_FAR_DEBOUNCE_MASK,
+ regval);
}
static int sx9310_write_close_debounce(struct sx_common_data *data, int val)
{
- int ret;
unsigned int regval;
if (val > 0)
@@ -617,13 +599,10 @@ static int sx9310_write_close_debounce(struct sx_common_data *data, int val)
regval = FIELD_PREP(SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK, val);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL10,
- SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK,
- regval);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL10,
+ SX9310_REG_PROX_CTRL10_CLOSE_DEBOUNCE_MASK,
+ regval);
}
static int sx9310_write_event_val(struct iio_dev *indio_dev,
@@ -658,7 +637,7 @@ static int sx9310_write_event_val(struct iio_dev *indio_dev,
static int sx9310_set_samp_freq(struct sx_common_data *data, int val, int val2)
{
- int i, ret;
+ int i;
for (i = 0; i < ARRAY_SIZE(sx9310_samp_freq_table); i++)
if (val == sx9310_samp_freq_table[i].val &&
@@ -668,23 +647,17 @@ static int sx9310_set_samp_freq(struct sx_common_data *data, int val, int val2)
if (i == ARRAY_SIZE(sx9310_samp_freq_table))
return -EINVAL;
- mutex_lock(&data->mutex);
-
- ret = regmap_update_bits(
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(
data->regmap, SX9310_REG_PROX_CTRL0,
SX9310_REG_PROX_CTRL0_SCANPERIOD_MASK,
FIELD_PREP(SX9310_REG_PROX_CTRL0_SCANPERIOD_MASK, i));
-
- mutex_unlock(&data->mutex);
-
- return ret;
}
static int sx9310_write_gain(struct sx_common_data *data,
const struct iio_chan_spec *chan, int val)
{
unsigned int gain, mask;
- int ret;
gain = ilog2(val);
@@ -703,12 +676,9 @@ static int sx9310_write_gain(struct sx_common_data *data,
return -EINVAL;
}
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL3, mask,
- gain);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9310_REG_PROX_CTRL3, mask,
+ gain);
}
static int sx9310_write_raw(struct iio_dev *indio_dev,
@@ -969,22 +939,18 @@ static int sx9310_suspend(struct device *dev)
disable_irq_nosync(data->client->irq);
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = regmap_read(data->regmap, SX9310_REG_PROX_CTRL0,
&data->suspend_ctrl);
if (ret)
- goto out;
+ return ret;
ctrl0 = data->suspend_ctrl & ~SX9310_REG_PROX_CTRL0_SENSOREN_MASK;
ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0, ctrl0);
if (ret)
- goto out;
-
- ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 0);
+ return ret;
-out:
- mutex_unlock(&data->mutex);
- return ret;
+ return regmap_write(data->regmap, SX9310_REG_PAUSE, 0);
}
static int sx9310_resume(struct device *dev)
@@ -992,18 +958,16 @@ static int sx9310_resume(struct device *dev)
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
int ret;
- mutex_lock(&data->mutex);
- ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 1);
- if (ret)
- goto out;
-
- ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0,
- data->suspend_ctrl);
+ scoped_guard(mutex, &data->mutex) {
+ ret = regmap_write(data->regmap, SX9310_REG_PAUSE, 1);
+ if (ret)
+ return ret;
-out:
- mutex_unlock(&data->mutex);
- if (ret)
- return ret;
+ ret = regmap_write(data->regmap, SX9310_REG_PROX_CTRL0,
+ data->suspend_ctrl);
+ if (ret)
+ return ret;
+ }
enable_irq(data->client->irq);
return 0;
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index ac2ed2da21cc..aa0d14a49d5e 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -429,25 +429,16 @@ static int sx9324_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct sx_common_data *data = iio_priv(indio_dev);
- int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = sx_common_read_proximity(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return sx_common_read_proximity(data, chan, val);
+ unreachable();
case IIO_CHAN_INFO_HARDWAREGAIN:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = sx9324_read_gain(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return sx9324_read_gain(data, chan, val);
+ unreachable();
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9324_read_samp_freq(data, val, val2);
default:
@@ -484,7 +475,7 @@ static int sx9324_read_avail(struct iio_dev *indio_dev,
static int sx9324_set_samp_freq(struct sx_common_data *data,
int val, int val2)
{
- int i, ret;
+ int i;
for (i = 0; i < ARRAY_SIZE(sx9324_samp_freq_table); i++)
if (val == sx9324_samp_freq_table[i].val &&
@@ -494,15 +485,11 @@ static int sx9324_set_samp_freq(struct sx_common_data *data,
if (i == ARRAY_SIZE(sx9324_samp_freq_table))
return -EINVAL;
- mutex_lock(&data->mutex);
-
- ret = regmap_update_bits(data->regmap,
- SX9324_REG_GNRL_CTRL0,
- SX9324_REG_GNRL_CTRL0_SCANPERIOD_MASK, i);
-
- mutex_unlock(&data->mutex);
+ guard(mutex)(&data->mutex);
- return ret;
+ return regmap_update_bits(data->regmap,
+ SX9324_REG_GNRL_CTRL0,
+ SX9324_REG_GNRL_CTRL0_SCANPERIOD_MASK, i);
}
static int sx9324_read_thresh(struct sx_common_data *data,
@@ -623,7 +610,6 @@ static int sx9324_write_thresh(struct sx_common_data *data,
const struct iio_chan_spec *chan, int _val)
{
unsigned int reg, val = _val;
- int ret;
reg = SX9324_REG_PROX_CTRL6 + chan->channel / 2;
@@ -633,11 +619,9 @@ static int sx9324_write_thresh(struct sx_common_data *data,
if (val > 0xff)
return -EINVAL;
- mutex_lock(&data->mutex);
- ret = regmap_write(data->regmap, reg, val);
- mutex_unlock(&data->mutex);
+ guard(mutex)(&data->mutex);
- return ret;
+ return regmap_write(data->regmap, reg, val);
}
static int sx9324_write_hysteresis(struct sx_common_data *data,
@@ -662,18 +646,15 @@ static int sx9324_write_hysteresis(struct sx_common_data *data,
return -EINVAL;
hyst = FIELD_PREP(SX9324_REG_PROX_CTRL5_HYST_MASK, hyst);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9324_REG_PROX_CTRL5,
- SX9324_REG_PROX_CTRL5_HYST_MASK, hyst);
- mutex_unlock(&data->mutex);
+ guard(mutex)(&data->mutex);
- return ret;
+ return regmap_update_bits(data->regmap, SX9324_REG_PROX_CTRL5,
+ SX9324_REG_PROX_CTRL5_HYST_MASK, hyst);
}
static int sx9324_write_far_debounce(struct sx_common_data *data, int _val)
{
unsigned int regval, val = _val;
- int ret;
if (val > 0)
val = ilog2(val);
@@ -682,19 +663,16 @@ static int sx9324_write_far_debounce(struct sx_common_data *data, int _val)
regval = FIELD_PREP(SX9324_REG_PROX_CTRL5_FAR_DEBOUNCE_MASK, val);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9324_REG_PROX_CTRL5,
- SX9324_REG_PROX_CTRL5_FAR_DEBOUNCE_MASK,
- regval);
- mutex_unlock(&data->mutex);
+ guard(mutex)(&data->mutex);
- return ret;
+ return regmap_update_bits(data->regmap, SX9324_REG_PROX_CTRL5,
+ SX9324_REG_PROX_CTRL5_FAR_DEBOUNCE_MASK,
+ regval);
}
static int sx9324_write_close_debounce(struct sx_common_data *data, int _val)
{
unsigned int regval, val = _val;
- int ret;
if (val > 0)
val = ilog2(val);
@@ -703,13 +681,11 @@ static int sx9324_write_close_debounce(struct sx_common_data *data, int _val)
regval = FIELD_PREP(SX9324_REG_PROX_CTRL5_CLOSE_DEBOUNCE_MASK, val);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9324_REG_PROX_CTRL5,
- SX9324_REG_PROX_CTRL5_CLOSE_DEBOUNCE_MASK,
- regval);
- mutex_unlock(&data->mutex);
+ guard(mutex)(&data->mutex);
- return ret;
+ return regmap_update_bits(data->regmap, SX9324_REG_PROX_CTRL5,
+ SX9324_REG_PROX_CTRL5_CLOSE_DEBOUNCE_MASK,
+ regval);
}
static int sx9324_write_event_val(struct iio_dev *indio_dev,
@@ -746,7 +722,6 @@ static int sx9324_write_gain(struct sx_common_data *data,
const struct iio_chan_spec *chan, int val)
{
unsigned int gain, reg;
- int ret;
reg = SX9324_REG_PROX_CTRL0 + chan->channel / 2;
@@ -756,13 +731,11 @@ static int sx9324_write_gain(struct sx_common_data *data,
gain = FIELD_PREP(SX9324_REG_PROX_CTRL0_GAIN_MASK, gain);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, reg,
- SX9324_REG_PROX_CTRL0_GAIN_MASK,
- gain);
- mutex_unlock(&data->mutex);
+ guard(mutex)(&data->mutex);
- return ret;
+ return regmap_update_bits(data->regmap, reg,
+ SX9324_REG_PROX_CTRL0_GAIN_MASK,
+ gain);
}
static int sx9324_write_raw(struct iio_dev *indio_dev,
@@ -873,6 +846,29 @@ static int sx9324_init_compensation(struct iio_dev *indio_dev)
20000, 2000000);
}
+static u8 sx9324_parse_phase_prop(struct device *dev,
+ struct sx_common_reg_default *reg_def,
+ const char *prop)
+{
+ unsigned int pin_defs[SX9324_NUM_PINS];
+ int count, ret, pin;
+ u32 raw = 0;
+
+ count = device_property_count_u32(dev, prop);
+ if (count != ARRAY_SIZE(pin_defs))
+ return reg_def->def;
+ ret = device_property_read_u32_array(dev, prop, pin_defs,
+ ARRAY_SIZE(pin_defs));
+ if (ret)
+ return reg_def->def;
+
+ for (pin = 0; pin < SX9324_NUM_PINS; pin++)
+ raw |= (pin_defs[pin] << (2 * pin)) &
+ SX9324_REG_AFE_PH0_PIN_MASK(pin);
+
+ return raw;
+}
+
static const struct sx_common_reg_default *
sx9324_get_default_reg(struct device *dev, int idx,
struct sx_common_reg_default *reg_def)
@@ -881,37 +877,29 @@ sx9324_get_default_reg(struct device *dev, int idx,
"highest" };
static const char * const sx9324_csidle[] = { "hi-z", "hi-z", "gnd",
"vdd" };
-#define SX9324_PIN_DEF "semtech,ph0-pin"
-#define SX9324_RESOLUTION_DEF "semtech,ph01-resolution"
-#define SX9324_PROXRAW_DEF "semtech,ph01-proxraw-strength"
- unsigned int pin_defs[SX9324_NUM_PINS];
- char prop[] = SX9324_PROXRAW_DEF;
u32 start = 0, raw = 0, pos = 0;
- int ret, count, ph, pin;
+ const char *prop;
+ int ret;
memcpy(reg_def, &sx9324_default_regs[idx], sizeof(*reg_def));
sx_common_get_raw_register_config(dev, reg_def);
switch (reg_def->reg) {
case SX9324_REG_AFE_PH0:
+ reg_def->def = sx9324_parse_phase_prop(dev, reg_def,
+ "semtech,ph0-pin");
+ break;
case SX9324_REG_AFE_PH1:
+ reg_def->def = sx9324_parse_phase_prop(dev, reg_def,
+ "semtech,ph1-pin");
+ break;
case SX9324_REG_AFE_PH2:
+ reg_def->def = sx9324_parse_phase_prop(dev, reg_def,
+ "semtech,ph2-pin");
+ break;
case SX9324_REG_AFE_PH3:
- ph = reg_def->reg - SX9324_REG_AFE_PH0;
- snprintf(prop, ARRAY_SIZE(prop), "semtech,ph%d-pin", ph);
-
- count = device_property_count_u32(dev, prop);
- if (count != ARRAY_SIZE(pin_defs))
- break;
- ret = device_property_read_u32_array(dev, prop, pin_defs,
- ARRAY_SIZE(pin_defs));
- if (ret)
- break;
-
- for (pin = 0; pin < SX9324_NUM_PINS; pin++)
- raw |= (pin_defs[pin] << (2 * pin)) &
- SX9324_REG_AFE_PH0_PIN_MASK(pin);
- reg_def->def = raw;
+ reg_def->def = sx9324_parse_phase_prop(dev, reg_def,
+ "semtech,ph3-pin");
break;
case SX9324_REG_AFE_CTRL0:
ret = device_property_match_property_string(dev, "semtech,cs-idle-sleep",
@@ -933,11 +921,9 @@ sx9324_get_default_reg(struct device *dev, int idx,
case SX9324_REG_AFE_CTRL4:
case SX9324_REG_AFE_CTRL7:
if (reg_def->reg == SX9324_REG_AFE_CTRL4)
- strncpy(prop, "semtech,ph01-resolution",
- ARRAY_SIZE(prop));
+ prop = "semtech,ph01-resolution";
else
- strncpy(prop, "semtech,ph23-resolution",
- ARRAY_SIZE(prop));
+ prop = "semtech,ph23-resolution";
ret = device_property_read_u32(dev, prop, &raw);
if (ret)
@@ -1008,11 +994,9 @@ sx9324_get_default_reg(struct device *dev, int idx,
case SX9324_REG_PROX_CTRL0:
case SX9324_REG_PROX_CTRL1:
if (reg_def->reg == SX9324_REG_PROX_CTRL0)
- strncpy(prop, "semtech,ph01-proxraw-strength",
- ARRAY_SIZE(prop));
+ prop = "semtech,ph01-proxraw-strength";
else
- strncpy(prop, "semtech,ph23-proxraw-strength",
- ARRAY_SIZE(prop));
+ prop = "semtech,ph23-proxraw-strength";
ret = device_property_read_u32(dev, prop, &raw);
if (ret)
break;
@@ -1081,34 +1065,30 @@ static int sx9324_suspend(struct device *dev)
disable_irq_nosync(data->client->irq);
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = regmap_read(data->regmap, SX9324_REG_GNRL_CTRL1, &regval);
+ if (ret < 0)
+ return ret;
data->suspend_ctrl =
FIELD_GET(SX9324_REG_GNRL_CTRL1_PHEN_MASK, regval);
- if (ret < 0)
- goto out;
/* Disable all phases, send the device to sleep. */
- ret = regmap_write(data->regmap, SX9324_REG_GNRL_CTRL1, 0);
-
-out:
- mutex_unlock(&data->mutex);
- return ret;
+ return regmap_write(data->regmap, SX9324_REG_GNRL_CTRL1, 0);
}
static int sx9324_resume(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
- int ret;
- mutex_lock(&data->mutex);
- ret = regmap_write(data->regmap, SX9324_REG_GNRL_CTRL1,
- data->suspend_ctrl | SX9324_REG_GNRL_CTRL1_PAUSECTRL);
- mutex_unlock(&data->mutex);
- if (ret)
- return ret;
+ scoped_guard(mutex, &data->mutex) {
+ int ret = regmap_write(data->regmap, SX9324_REG_GNRL_CTRL1,
+ data->suspend_ctrl |
+ SX9324_REG_GNRL_CTRL1_PAUSECTRL);
+ if (ret)
+ return ret;
+ }
enable_irq(data->client->irq);
return 0;
diff --git a/drivers/iio/proximity/sx9360.c b/drivers/iio/proximity/sx9360.c
index 2c4e14a4fe9f..75a1c29f14eb 100644
--- a/drivers/iio/proximity/sx9360.c
+++ b/drivers/iio/proximity/sx9360.c
@@ -322,25 +322,16 @@ static int sx9360_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct sx_common_data *data = iio_priv(indio_dev);
- int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = sx_common_read_proximity(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return sx_common_read_proximity(data, chan, val);
+ unreachable();
case IIO_CHAN_INFO_HARDWAREGAIN:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- ret = sx9360_read_gain(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
- return ret;
+ iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
+ return sx9360_read_gain(data, chan, val);
+ unreachable();
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9360_read_samp_freq(data, val, val2);
default:
@@ -387,19 +378,15 @@ static int sx9360_read_avail(struct iio_dev *indio_dev,
static int sx9360_set_samp_freq(struct sx_common_data *data,
int val, int val2)
{
- int ret, reg;
+ int reg;
__be16 buf;
reg = val * 8192 / SX9360_FOSC_HZ + val2 * 8192 / (SX9360_FOSC_MHZ);
buf = cpu_to_be16(reg);
- mutex_lock(&data->mutex);
-
- ret = regmap_bulk_write(data->regmap, SX9360_REG_GNRL_CTRL1, &buf,
- sizeof(buf));
+ guard(mutex)(&data->mutex);
- mutex_unlock(&data->mutex);
-
- return ret;
+ return regmap_bulk_write(data->regmap, SX9360_REG_GNRL_CTRL1, &buf,
+ sizeof(buf));
}
static int sx9360_read_thresh(struct sx_common_data *data, int *val)
@@ -510,7 +497,6 @@ static int sx9360_read_event_val(struct iio_dev *indio_dev,
static int sx9360_write_thresh(struct sx_common_data *data, int _val)
{
unsigned int val = _val;
- int ret;
if (val >= 1)
val = int_sqrt(2 * val);
@@ -518,11 +504,8 @@ static int sx9360_write_thresh(struct sx_common_data *data, int _val)
if (val > 0xff)
return -EINVAL;
- mutex_lock(&data->mutex);
- ret = regmap_write(data->regmap, SX9360_REG_PROX_CTRL5, val);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_write(data->regmap, SX9360_REG_PROX_CTRL5, val);
}
static int sx9360_write_hysteresis(struct sx_common_data *data, int _val)
@@ -546,18 +529,14 @@ static int sx9360_write_hysteresis(struct sx_common_data *data, int _val)
return -EINVAL;
hyst = FIELD_PREP(SX9360_REG_PROX_CTRL4_HYST_MASK, hyst);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9360_REG_PROX_CTRL4,
- SX9360_REG_PROX_CTRL4_HYST_MASK, hyst);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9360_REG_PROX_CTRL4,
+ SX9360_REG_PROX_CTRL4_HYST_MASK, hyst);
}
static int sx9360_write_far_debounce(struct sx_common_data *data, int _val)
{
unsigned int regval, val = _val;
- int ret;
if (val > 0)
val = ilog2(val);
@@ -566,19 +545,15 @@ static int sx9360_write_far_debounce(struct sx_common_data *data, int _val)
regval = FIELD_PREP(SX9360_REG_PROX_CTRL4_FAR_DEBOUNCE_MASK, val);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9360_REG_PROX_CTRL4,
- SX9360_REG_PROX_CTRL4_FAR_DEBOUNCE_MASK,
- regval);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9360_REG_PROX_CTRL4,
+ SX9360_REG_PROX_CTRL4_FAR_DEBOUNCE_MASK,
+ regval);
}
static int sx9360_write_close_debounce(struct sx_common_data *data, int _val)
{
unsigned int regval, val = _val;
- int ret;
if (val > 0)
val = ilog2(val);
@@ -587,13 +562,10 @@ static int sx9360_write_close_debounce(struct sx_common_data *data, int _val)
regval = FIELD_PREP(SX9360_REG_PROX_CTRL4_CLOSE_DEBOUNCE_MASK, val);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9360_REG_PROX_CTRL4,
- SX9360_REG_PROX_CTRL4_CLOSE_DEBOUNCE_MASK,
- regval);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, SX9360_REG_PROX_CTRL4,
+ SX9360_REG_PROX_CTRL4_CLOSE_DEBOUNCE_MASK,
+ regval);
}
static int sx9360_write_event_val(struct iio_dev *indio_dev,
@@ -630,19 +602,15 @@ static int sx9360_write_gain(struct sx_common_data *data,
const struct iio_chan_spec *chan, int val)
{
unsigned int gain, reg;
- int ret;
gain = ilog2(val);
reg = SX9360_REG_PROX_CTRL0_PHR + chan->channel;
gain = FIELD_PREP(SX9360_REG_PROX_CTRL0_GAIN_MASK, gain);
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, reg,
- SX9360_REG_PROX_CTRL0_GAIN_MASK,
- gain);
- mutex_unlock(&data->mutex);
-
- return ret;
+ guard(mutex)(&data->mutex);
+ return regmap_update_bits(data->regmap, reg,
+ SX9360_REG_PROX_CTRL0_GAIN_MASK,
+ gain);
}
static int sx9360_write_raw(struct iio_dev *indio_dev,
@@ -827,36 +795,31 @@ static int sx9360_suspend(struct device *dev)
disable_irq_nosync(data->client->irq);
- mutex_lock(&data->mutex);
+ guard(mutex)(&data->mutex);
ret = regmap_read(data->regmap, SX9360_REG_GNRL_CTRL0, &regval);
+ if (ret < 0)
+ return ret;
data->suspend_ctrl =
FIELD_GET(SX9360_REG_GNRL_CTRL0_PHEN_MASK, regval);
- if (ret < 0)
- goto out;
/* Disable all phases, send the device to sleep. */
- ret = regmap_write(data->regmap, SX9360_REG_GNRL_CTRL0, 0);
-
-out:
- mutex_unlock(&data->mutex);
- return ret;
+ return regmap_write(data->regmap, SX9360_REG_GNRL_CTRL0, 0);
}
static int sx9360_resume(struct device *dev)
{
struct sx_common_data *data = iio_priv(dev_get_drvdata(dev));
- int ret;
-
- mutex_lock(&data->mutex);
- ret = regmap_update_bits(data->regmap, SX9360_REG_GNRL_CTRL0,
- SX9360_REG_GNRL_CTRL0_PHEN_MASK,
- data->suspend_ctrl);
- mutex_unlock(&data->mutex);
- if (ret)
- return ret;
+ scoped_guard(mutex, &data->mutex) {
+ int ret = regmap_update_bits(data->regmap,
+ SX9360_REG_GNRL_CTRL0,
+ SX9360_REG_GNRL_CTRL0_PHEN_MASK,
+ data->suspend_ctrl);
+ if (ret)
+ return ret;
+ }
enable_irq(data->client->irq);
return 0;
}
diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c
index fcb96c44d954..39447c786af3 100644
--- a/drivers/iio/temperature/ltc2983.c
+++ b/drivers/iio/temperature/ltc2983.c
@@ -207,6 +207,7 @@ enum {
container_of(_sensor, struct ltc2983_temp, sensor)
struct ltc2983_chip_info {
+ const char *name;
unsigned int max_channels_nr;
bool has_temp;
bool has_eeprom;
@@ -1346,7 +1347,7 @@ static irqreturn_t ltc2983_irq_handler(int irq, void *data)
__chan; \
})
-static int ltc2983_parse_dt(struct ltc2983_data *st)
+static int ltc2983_parse_fw(struct ltc2983_data *st)
{
struct device *dev = &st->spi->dev;
struct fwnode_handle *child;
@@ -1605,7 +1606,6 @@ static int ltc2983_probe(struct spi_device *spi)
struct ltc2983_data *st;
struct iio_dev *indio_dev;
struct gpio_desc *gpio;
- const char *name = spi_get_device_id(spi)->name;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
@@ -1614,9 +1614,7 @@ static int ltc2983_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
- st->info = device_get_match_data(&spi->dev);
- if (!st->info)
- st->info = (void *)spi_get_device_id(spi)->driver_data;
+ st->info = spi_get_device_match_data(spi);
if (!st->info)
return -ENODEV;
@@ -1632,7 +1630,7 @@ static int ltc2983_probe(struct spi_device *spi)
st->eeprom_key = cpu_to_be32(LTC2983_EEPROM_KEY);
spi_set_drvdata(spi, st);
- ret = ltc2983_parse_dt(st);
+ ret = ltc2983_parse_fw(st);
if (ret)
return ret;
@@ -1657,7 +1655,7 @@ static int ltc2983_probe(struct spi_device *spi)
return ret;
ret = devm_request_irq(&spi->dev, spi->irq, ltc2983_irq_handler,
- IRQF_TRIGGER_RISING, name, st);
+ IRQF_TRIGGER_RISING, st->info->name, st);
if (ret) {
dev_err(&spi->dev, "failed to request an irq, %d", ret);
return ret;
@@ -1672,7 +1670,7 @@ static int ltc2983_probe(struct spi_device *spi)
return ret;
}
- indio_dev->name = name;
+ indio_dev->name = st->info->name;
indio_dev->num_channels = st->iio_channels;
indio_dev->channels = st->iio_chan;
indio_dev->modes = INDIO_DIRECT_MODE;
@@ -1703,15 +1701,25 @@ static DEFINE_SIMPLE_DEV_PM_OPS(ltc2983_pm_ops, ltc2983_suspend,
ltc2983_resume);
static const struct ltc2983_chip_info ltc2983_chip_info_data = {
+ .name = "ltc2983",
.max_channels_nr = 20,
};
static const struct ltc2983_chip_info ltc2984_chip_info_data = {
+ .name = "ltc2984",
.max_channels_nr = 20,
.has_eeprom = true,
};
static const struct ltc2983_chip_info ltc2986_chip_info_data = {
+ .name = "ltc2986",
+ .max_channels_nr = 10,
+ .has_temp = true,
+ .has_eeprom = true,
+};
+
+static const struct ltc2983_chip_info ltm2985_chip_info_data = {
+ .name = "ltm2985",
.max_channels_nr = 10,
.has_temp = true,
.has_eeprom = true,
@@ -1721,7 +1729,7 @@ static const struct spi_device_id ltc2983_id_table[] = {
{ "ltc2983", (kernel_ulong_t)&ltc2983_chip_info_data },
{ "ltc2984", (kernel_ulong_t)&ltc2984_chip_info_data },
{ "ltc2986", (kernel_ulong_t)&ltc2986_chip_info_data },
- { "ltm2985", (kernel_ulong_t)&ltc2986_chip_info_data },
+ { "ltm2985", (kernel_ulong_t)&ltm2985_chip_info_data },
{},
};
MODULE_DEVICE_TABLE(spi, ltc2983_id_table);
@@ -1730,7 +1738,7 @@ static const struct of_device_id ltc2983_of_match[] = {
{ .compatible = "adi,ltc2983", .data = &ltc2983_chip_info_data },
{ .compatible = "adi,ltc2984", .data = &ltc2984_chip_info_data },
{ .compatible = "adi,ltc2986", .data = &ltc2986_chip_info_data },
- { .compatible = "adi,ltm2985", .data = &ltc2986_chip_info_data },
+ { .compatible = "adi,ltm2985", .data = &ltm2985_chip_info_data },
{},
};
MODULE_DEVICE_TABLE(of, ltc2983_of_match);
diff --git a/drivers/iio/temperature/tmp117.c b/drivers/iio/temperature/tmp117.c
index 059953015ae7..8972083d903a 100644
--- a/drivers/iio/temperature/tmp117.c
+++ b/drivers/iio/temperature/tmp117.c
@@ -9,6 +9,7 @@
* Note: This driver assumes that the sensor has been calibrated beforehand.
*/
+#include <linux/delay.h>
#include <linux/err.h>
#include <linux/i2c.h>
#include <linux/module.h>
@@ -17,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/limits.h>
#include <linux/property.h>
+#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
@@ -148,10 +150,17 @@ static int tmp117_probe(struct i2c_client *client)
struct tmp117_data *data;
struct iio_dev *indio_dev;
int dev_id;
+ int ret;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -EOPNOTSUPP;
+ ret = devm_regulator_get_enable(&client->dev, "vcc");
+ if (ret)
+ return ret;
+
+ fsleep(1500);
+
dev_id = i2c_smbus_read_word_swapped(client, TMP117_REG_DEVICE_ID);
if (dev_id < 0)
return dev_id;
diff --git a/drivers/iio/test/Kconfig b/drivers/iio/test/Kconfig
index 0b6e4e278a2f..33cca49c8058 100644
--- a/drivers/iio/test/Kconfig
+++ b/drivers/iio/test/Kconfig
@@ -4,6 +4,20 @@
#
# Keep in alphabetical order
+config IIO_GTS_KUNIT_TEST
+ tristate "Test IIO formatting functions" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ select IIO_GTS_HELPER
+ select TEST_KUNIT_DEVICE_HELPERS
+ default KUNIT_ALL_TESTS
+ help
+ build unit tests for the IIO light sensor gain-time-scale helpers.
+
+ For more information on KUnit and unit tests in general, please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N. Keep in alphabetical order
+
config IIO_RESCALE_KUNIT_TEST
tristate "Test IIO rescale conversion functions" if !KUNIT_ALL_TESTS
depends on KUNIT && IIO_RESCALE
diff --git a/drivers/iio/test/Makefile b/drivers/iio/test/Makefile
index d76eaf36da82..e9a4cf1ff57f 100644
--- a/drivers/iio/test/Makefile
+++ b/drivers/iio/test/Makefile
@@ -6,4 +6,5 @@
# Keep in alphabetical order
obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o
obj-$(CONFIG_IIO_FORMAT_KUNIT_TEST) += iio-test-format.o
+obj-$(CONFIG_IIO_GTS_KUNIT_TEST) += iio-test-gts.o
CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN)
diff --git a/drivers/iio/test/iio-test-gts.c b/drivers/iio/test/iio-test-gts.c
new file mode 100644
index 000000000000..cf7ab773ea0b
--- /dev/null
+++ b/drivers/iio/test/iio-test-gts.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Unit tests for IIO light sensor gain-time-scale helpers
+ *
+ * Copyright (c) 2023 Matti Vaittinen <mazziesaccount@gmail.com>
+ */
+
+#include <kunit/device.h>
+#include <kunit/test.h>
+#include <linux/device.h>
+#include <linux/iio/iio-gts-helper.h>
+#include <linux/iio/types.h>
+
+/*
+ * Please, read the "rant" from the top of the lib/test_linear_ranges.c if
+ * you see a line of helper code which is not being tested.
+ *
+ * Then, please look at the line which is not being tested. Is this line
+ * somehow unusually complex? If answer is "no", then chances are that the
+ * "development inertia" caused by adding a test exceeds the benefits.
+ *
+ * If yes, then adding a test is probably a good idea but please stop for a
+ * moment and consider the effort of changing all the tests when code gets
+ * refactored. Eventually it neeeds to be.
+ */
+
+#define TEST_TSEL_50 1
+#define TEST_TSEL_X_MIN TEST_TSEL_50
+#define TEST_TSEL_100 0
+#define TEST_TSEL_200 2
+#define TEST_TSEL_400 4
+#define TEST_TSEL_X_MAX TEST_TSEL_400
+
+#define TEST_GSEL_1 0x00
+#define TEST_GSEL_X_MIN TEST_GSEL_1
+#define TEST_GSEL_4 0x08
+#define TEST_GSEL_16 0x0a
+#define TEST_GSEL_32 0x0b
+#define TEST_GSEL_64 0x0c
+#define TEST_GSEL_256 0x18
+#define TEST_GSEL_512 0x19
+#define TEST_GSEL_1024 0x1a
+#define TEST_GSEL_2048 0x1b
+#define TEST_GSEL_4096 0x1c
+#define TEST_GSEL_X_MAX TEST_GSEL_4096
+
+#define TEST_SCALE_1X 64
+#define TEST_SCALE_MIN_X TEST_SCALE_1X
+#define TEST_SCALE_2X 32
+#define TEST_SCALE_4X 16
+#define TEST_SCALE_8X 8
+#define TEST_SCALE_16X 4
+#define TEST_SCALE_32X 2
+#define TEST_SCALE_64X 1
+
+#define TEST_SCALE_NANO_128X 500000000
+#define TEST_SCALE_NANO_256X 250000000
+#define TEST_SCALE_NANO_512X 125000000
+#define TEST_SCALE_NANO_1024X 62500000
+#define TEST_SCALE_NANO_2048X 31250000
+#define TEST_SCALE_NANO_4096X 15625000
+#define TEST_SCALE_NANO_4096X2 7812500
+#define TEST_SCALE_NANO_4096X4 3906250
+#define TEST_SCALE_NANO_4096X8 1953125
+
+#define TEST_SCALE_NANO_MAX_X TEST_SCALE_NANO_4096X8
+
+/*
+ * Can't have this allocated from stack because the kunit clean-up will
+ * happen only after the test function has already gone
+ */
+static struct iio_gts gts;
+
+static const struct iio_gain_sel_pair gts_test_gains[] = {
+ GAIN_SCALE_GAIN(1, TEST_GSEL_1),
+ GAIN_SCALE_GAIN(4, TEST_GSEL_4),
+ GAIN_SCALE_GAIN(16, TEST_GSEL_16),
+ GAIN_SCALE_GAIN(32, TEST_GSEL_32),
+ GAIN_SCALE_GAIN(64, TEST_GSEL_64),
+ GAIN_SCALE_GAIN(256, TEST_GSEL_256),
+ GAIN_SCALE_GAIN(512, TEST_GSEL_512),
+ GAIN_SCALE_GAIN(1024, TEST_GSEL_1024),
+ GAIN_SCALE_GAIN(2048, TEST_GSEL_2048),
+ GAIN_SCALE_GAIN(4096, TEST_GSEL_4096),
+#define HWGAIN_MAX 4096
+};
+
+static const struct iio_itime_sel_mul gts_test_itimes[] = {
+ GAIN_SCALE_ITIME_US(400 * 1000, TEST_TSEL_400, 8),
+ GAIN_SCALE_ITIME_US(200 * 1000, TEST_TSEL_200, 4),
+ GAIN_SCALE_ITIME_US(100 * 1000, TEST_TSEL_100, 2),
+ GAIN_SCALE_ITIME_US(50 * 1000, TEST_TSEL_50, 1),
+#define TIMEGAIN_MAX 8
+};
+#define TOTAL_GAIN_MAX (HWGAIN_MAX * TIMEGAIN_MAX)
+#define IIO_GTS_TEST_DEV "iio-gts-test-dev"
+
+static struct device *__test_init_iio_gain_scale(struct kunit *test,
+ struct iio_gts *gts, const struct iio_gain_sel_pair *g_table,
+ int num_g, const struct iio_itime_sel_mul *i_table, int num_i)
+{
+ struct device *dev;
+ int ret;
+
+ dev = kunit_device_register(test, IIO_GTS_TEST_DEV);
+
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, dev);
+ if (IS_ERR_OR_NULL(dev))
+ return NULL;
+
+ ret = devm_iio_init_iio_gts(dev, TEST_SCALE_1X, 0, g_table, num_g,
+ i_table, num_i, gts);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ if (ret)
+ return NULL;
+
+ return dev;
+}
+
+#define test_init_iio_gain_scale(test, gts) \
+ __test_init_iio_gain_scale(test, gts, gts_test_gains, \
+ ARRAY_SIZE(gts_test_gains), gts_test_itimes, \
+ ARRAY_SIZE(gts_test_itimes))
+
+static void test_init_iio_gts_invalid(struct kunit *test)
+{
+ struct device *dev;
+ int ret;
+ const struct iio_itime_sel_mul itimes_neg[] = {
+ GAIN_SCALE_ITIME_US(-10, TEST_TSEL_400, 8),
+ GAIN_SCALE_ITIME_US(200 * 1000, TEST_TSEL_200, 4),
+ };
+ const struct iio_gain_sel_pair gains_neg[] = {
+ GAIN_SCALE_GAIN(1, TEST_GSEL_1),
+ GAIN_SCALE_GAIN(2, TEST_GSEL_4),
+ GAIN_SCALE_GAIN(-2, TEST_GSEL_16),
+ };
+ /* 55555 * 38656 = 2147534080 => overflows 32bit int */
+ const struct iio_itime_sel_mul itimes_overflow[] = {
+ GAIN_SCALE_ITIME_US(400 * 1000, TEST_TSEL_400, 55555),
+ GAIN_SCALE_ITIME_US(200 * 1000, TEST_TSEL_200, 4),
+ };
+ const struct iio_gain_sel_pair gains_overflow[] = {
+ GAIN_SCALE_GAIN(1, TEST_GSEL_1),
+ GAIN_SCALE_GAIN(2, TEST_GSEL_4),
+ GAIN_SCALE_GAIN(38656, TEST_GSEL_16),
+ };
+
+ dev = kunit_device_register(test, IIO_GTS_TEST_DEV);
+ KUNIT_EXPECT_NOT_ERR_OR_NULL(test, dev);
+ if (!dev)
+ return;
+
+ /* Ok gains, negative time */
+ ret = devm_iio_init_iio_gts(dev, TEST_SCALE_1X, 0, gts_test_gains,
+ ARRAY_SIZE(gts_test_gains), itimes_neg,
+ ARRAY_SIZE(itimes_neg), &gts);
+ KUNIT_EXPECT_EQ(test, -EINVAL, ret);
+
+ /* Ok times, negative gain */
+ ret = devm_iio_init_iio_gts(dev, TEST_SCALE_1X, 0, gains_neg,
+ ARRAY_SIZE(gains_neg), gts_test_itimes,
+ ARRAY_SIZE(gts_test_itimes), &gts);
+ KUNIT_EXPECT_EQ(test, -EINVAL, ret);
+
+ /* gain * time overflow int */
+ ret = devm_iio_init_iio_gts(dev, TEST_SCALE_1X, 0, gains_overflow,
+ ARRAY_SIZE(gains_overflow), itimes_overflow,
+ ARRAY_SIZE(itimes_overflow), &gts);
+ KUNIT_EXPECT_EQ(test, -EOVERFLOW, ret);
+}
+
+static void test_iio_gts_find_gain_for_scale_using_time(struct kunit *test)
+{
+ struct device *dev;
+ int ret, gain_sel;
+
+ dev = test_init_iio_gain_scale(test, &gts);
+ if (!dev)
+ return;
+
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_100,
+ TEST_SCALE_8X, 0, &gain_sel);
+ /*
+ * Meas time 100 => gain by time 2x
+ * TEST_SCALE_8X matches total gain 8x
+ * => required HWGAIN 4x
+ */
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, TEST_GSEL_4, gain_sel);
+
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_200, 0,
+ TEST_SCALE_NANO_256X, &gain_sel);
+ /*
+ * Meas time 200 => gain by time 4x
+ * TEST_SCALE_256X matches total gain 256x
+ * => required HWGAIN 256/4 => 64x
+ */
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, TEST_GSEL_64, gain_sel);
+
+ /* Min time, Min gain */
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_X_MIN,
+ TEST_SCALE_MIN_X, 0, &gain_sel);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, TEST_GSEL_1, gain_sel);
+
+ /* Max time, Max gain */
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_X_MAX,
+ 0, TEST_SCALE_NANO_MAX_X, &gain_sel);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, TEST_GSEL_4096, gain_sel);
+
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_100, 0,
+ TEST_SCALE_NANO_256X, &gain_sel);
+ /*
+ * Meas time 100 => gain by time 2x
+ * TEST_SCALE_256X matches total gain 256x
+ * => required HWGAIN 256/2 => 128x (not in gain-table - unsupported)
+ */
+ KUNIT_EXPECT_NE(test, 0, ret);
+
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_200, 0,
+ TEST_SCALE_NANO_MAX_X, &gain_sel);
+ /* We can't reach the max gain with integration time smaller than MAX */
+ KUNIT_EXPECT_NE(test, 0, ret);
+
+ ret = iio_gts_find_gain_sel_for_scale_using_time(&gts, TEST_TSEL_50, 0,
+ TEST_SCALE_NANO_MAX_X, &gain_sel);
+ /* We can't reach the max gain with integration time smaller than MAX */
+ KUNIT_EXPECT_NE(test, 0, ret);
+}
+
+static void test_iio_gts_find_new_gain_sel_by_old_gain_time(struct kunit *test)
+{
+ struct device *dev;
+ int ret, old_gain, new_gain, old_time_sel, new_time_sel;
+
+ dev = test_init_iio_gain_scale(test, &gts);
+ if (!dev)
+ return;
+
+ old_gain = 32;
+ old_time_sel = TEST_TSEL_200;
+ new_time_sel = TEST_TSEL_400;
+
+ ret = iio_gts_find_new_gain_sel_by_old_gain_time(&gts, old_gain,
+ old_time_sel, new_time_sel, &new_gain);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ /*
+ * Doubling the integration time doubles the total gain - so old
+ * (hw)gain must be divided by two to compensate. => 32 / 2 => 16
+ */
+ KUNIT_EXPECT_EQ(test, 16, new_gain);
+
+ old_gain = 4;
+ old_time_sel = TEST_TSEL_50;
+ new_time_sel = TEST_TSEL_200;
+ ret = iio_gts_find_new_gain_sel_by_old_gain_time(&gts, old_gain,
+ old_time_sel, new_time_sel, &new_gain);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ /*
+ * gain by time 1x => 4x - (hw)gain 4x => 1x
+ */
+ KUNIT_EXPECT_EQ(test, 1, new_gain);
+
+ old_gain = 512;
+ old_time_sel = TEST_TSEL_400;
+ new_time_sel = TEST_TSEL_50;
+ ret = iio_gts_find_new_gain_sel_by_old_gain_time(&gts, old_gain,
+ old_time_sel, new_time_sel, &new_gain);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ /*
+ * gain by time 8x => 1x - (hw)gain 512x => 4096x)
+ */
+ KUNIT_EXPECT_EQ(test, 4096, new_gain);
+
+ /* Unsupported gain 2x */
+ old_gain = 4;
+ old_time_sel = TEST_TSEL_200;
+ new_time_sel = TEST_TSEL_400;
+ ret = iio_gts_find_new_gain_sel_by_old_gain_time(&gts, old_gain,
+ old_time_sel, new_time_sel, &new_gain);
+ KUNIT_EXPECT_NE(test, 0, ret);
+
+ /* Too small gain */
+ old_gain = 4;
+ old_time_sel = TEST_TSEL_50;
+ new_time_sel = TEST_TSEL_400;
+ ret = iio_gts_find_new_gain_sel_by_old_gain_time(&gts, old_gain,
+ old_time_sel, new_time_sel, &new_gain);
+ KUNIT_EXPECT_NE(test, 0, ret);
+
+ /* Too big gain */
+ old_gain = 1024;
+ old_time_sel = TEST_TSEL_400;
+ new_time_sel = TEST_TSEL_50;
+ ret = iio_gts_find_new_gain_sel_by_old_gain_time(&gts, old_gain,
+ old_time_sel, new_time_sel, &new_gain);
+ KUNIT_EXPECT_NE(test, 0, ret);
+
+}
+
+static void test_iio_find_closest_gain_low(struct kunit *test)
+{
+ struct device *dev;
+ bool in_range;
+ int ret;
+
+ const struct iio_gain_sel_pair gts_test_gains_gain_low[] = {
+ GAIN_SCALE_GAIN(4, TEST_GSEL_4),
+ GAIN_SCALE_GAIN(16, TEST_GSEL_16),
+ GAIN_SCALE_GAIN(32, TEST_GSEL_32),
+ };
+
+ dev = test_init_iio_gain_scale(test, &gts);
+ if (!dev)
+ return;
+
+ ret = iio_find_closest_gain_low(&gts, 2, &in_range);
+ KUNIT_EXPECT_EQ(test, 1, ret);
+ KUNIT_EXPECT_EQ(test, true, in_range);
+
+ ret = iio_find_closest_gain_low(&gts, 1, &in_range);
+ KUNIT_EXPECT_EQ(test, 1, ret);
+ KUNIT_EXPECT_EQ(test, true, in_range);
+
+ ret = iio_find_closest_gain_low(&gts, 4095, &in_range);
+ KUNIT_EXPECT_EQ(test, 2048, ret);
+ KUNIT_EXPECT_EQ(test, true, in_range);
+
+ ret = iio_find_closest_gain_low(&gts, 4097, &in_range);
+ KUNIT_EXPECT_EQ(test, 4096, ret);
+ KUNIT_EXPECT_EQ(test, false, in_range);
+
+ kunit_device_unregister(test, dev);
+
+ dev = __test_init_iio_gain_scale(test, &gts, gts_test_gains_gain_low,
+ ARRAY_SIZE(gts_test_gains_gain_low),
+ gts_test_itimes, ARRAY_SIZE(gts_test_itimes));
+ if (!dev)
+ return;
+
+ ret = iio_find_closest_gain_low(&gts, 3, &in_range);
+ KUNIT_EXPECT_EQ(test, -EINVAL, ret);
+ KUNIT_EXPECT_EQ(test, false, in_range);
+}
+
+static void test_iio_gts_total_gain_to_scale(struct kunit *test)
+{
+ struct device *dev;
+ int ret, scale_int, scale_nano;
+
+ dev = test_init_iio_gain_scale(test, &gts);
+ if (!dev)
+ return;
+
+ ret = iio_gts_total_gain_to_scale(&gts, 1, &scale_int, &scale_nano);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, TEST_SCALE_1X, scale_int);
+ KUNIT_EXPECT_EQ(test, 0, scale_nano);
+
+ ret = iio_gts_total_gain_to_scale(&gts, 1, &scale_int, &scale_nano);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, TEST_SCALE_1X, scale_int);
+ KUNIT_EXPECT_EQ(test, 0, scale_nano);
+
+ ret = iio_gts_total_gain_to_scale(&gts, 4096 * 8, &scale_int,
+ &scale_nano);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ KUNIT_EXPECT_EQ(test, 0, scale_int);
+ KUNIT_EXPECT_EQ(test, TEST_SCALE_NANO_4096X8, scale_nano);
+}
+
+static void test_iio_gts_chk_times(struct kunit *test, const int *vals)
+{
+ static const int expected[] = {0, 50000, 0, 100000, 0, 200000, 0, 400000};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(expected); i++)
+ KUNIT_EXPECT_EQ(test, expected[i], vals[i]);
+}
+
+static void test_iio_gts_chk_scales_all(struct kunit *test, struct iio_gts *gts,
+ const int *vals, int len)
+{
+ static const int gains[] = {1, 2, 4, 8, 16, 32, 64, 128, 256, 512,
+ 1024, 2048, 4096, 4096 * 2, 4096 * 4,
+ 4096 * 8};
+ int expected[ARRAY_SIZE(gains) * 2];
+ int i, ret;
+ int exp_len = ARRAY_SIZE(gains) * 2;
+
+ KUNIT_EXPECT_EQ(test, exp_len, len);
+ if (len != exp_len)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(gains); i++) {
+ ret = iio_gts_total_gain_to_scale(gts, gains[i],
+ &expected[2 * i],
+ &expected[2 * i + 1]);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ if (ret)
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(expected); i++)
+ KUNIT_EXPECT_EQ(test, expected[i], vals[i]);
+}
+
+static void test_iio_gts_chk_scales_t200(struct kunit *test, struct iio_gts *gts,
+ const int *vals, int len)
+{
+ /* The gain caused by time 200 is 4x */
+ static const int gains[] = {
+ 1 * 4,
+ 4 * 4,
+ 16 * 4,
+ 32 * 4,
+ 64 * 4,
+ 256 * 4,
+ 512 * 4,
+ 1024 * 4,
+ 2048 * 4,
+ 4096 * 4
+ };
+ int expected[ARRAY_SIZE(gains) * 2];
+ int i, ret;
+
+ KUNIT_EXPECT_EQ(test, 2 * ARRAY_SIZE(gains), len);
+ if (len < 2 * ARRAY_SIZE(gains))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(gains); i++) {
+ ret = iio_gts_total_gain_to_scale(gts, gains[i],
+ &expected[2 * i],
+ &expected[2 * i + 1]);
+ KUNIT_EXPECT_EQ(test, 0, ret);
+ if (ret)
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(expected); i++)
+ KUNIT_EXPECT_EQ(test, expected[i], vals[i]);
+}
+
+static void test_iio_gts_avail_test(struct kunit *test)
+{
+ struct device *dev;
+ int ret;
+ int type, len;
+ const int *vals;
+
+ dev = test_init_iio_gain_scale(test, &gts);
+ if (!dev)
+ return;
+
+ /* test table building for times and iio_gts_avail_times() */
+ ret = iio_gts_avail_times(&gts, &vals, &type, &len);
+ KUNIT_EXPECT_EQ(test, IIO_AVAIL_LIST, ret);
+ if (ret)
+ return;
+
+ KUNIT_EXPECT_EQ(test, IIO_VAL_INT_PLUS_MICRO, type);
+ KUNIT_EXPECT_EQ(test, 8, len);
+ if (len < 8)
+ return;
+
+ test_iio_gts_chk_times(test, vals);
+
+ /* Test table building for all scales and iio_gts_all_avail_scales() */
+ ret = iio_gts_all_avail_scales(&gts, &vals, &type, &len);
+ KUNIT_EXPECT_EQ(test, IIO_AVAIL_LIST, ret);
+ if (ret)
+ return;
+
+ KUNIT_EXPECT_EQ(test, IIO_VAL_INT_PLUS_NANO, type);
+
+ test_iio_gts_chk_scales_all(test, &gts, vals, len);
+
+ /*
+ * Test table building for scales/time and
+ * iio_gts_avail_scales_for_time()
+ */
+ ret = iio_gts_avail_scales_for_time(&gts, 200000, &vals, &type, &len);
+ KUNIT_EXPECT_EQ(test, IIO_AVAIL_LIST, ret);
+ if (ret)
+ return;
+
+ KUNIT_EXPECT_EQ(test, IIO_VAL_INT_PLUS_NANO, type);
+ test_iio_gts_chk_scales_t200(test, &gts, vals, len);
+}
+
+static struct kunit_case iio_gts_test_cases[] = {
+ KUNIT_CASE(test_init_iio_gts_invalid),
+ KUNIT_CASE(test_iio_gts_find_gain_for_scale_using_time),
+ KUNIT_CASE(test_iio_gts_find_new_gain_sel_by_old_gain_time),
+ KUNIT_CASE(test_iio_find_closest_gain_low),
+ KUNIT_CASE(test_iio_gts_total_gain_to_scale),
+ KUNIT_CASE(test_iio_gts_avail_test),
+ {}
+};
+
+static struct kunit_suite iio_gts_test_suite = {
+ .name = "iio-gain-time-scale",
+ .test_cases = iio_gts_test_cases,
+};
+
+kunit_test_suite(iio_gts_test_suite);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Matti Vaittinen <mazziesaccount@gmail.com>");
+MODULE_DESCRIPTION("Test IIO light sensor gain-time-scale helpers");
+MODULE_IMPORT_NS(IIO_GTS_HELPER);
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index ff58058aeadc..bf0df6ee4f78 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -34,6 +34,7 @@ MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("InfiniBand CM");
MODULE_LICENSE("Dual BSD/GPL");
+#define CM_DESTROY_ID_WAIT_TIMEOUT 10000 /* msecs */
static const char * const ibcm_rej_reason_strs[] = {
[IB_CM_REJ_NO_QP] = "no QP",
[IB_CM_REJ_NO_EEC] = "no EEC",
@@ -1025,10 +1026,20 @@ static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
}
}
+static noinline void cm_destroy_id_wait_timeout(struct ib_cm_id *cm_id)
+{
+ struct cm_id_private *cm_id_priv;
+
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ pr_err("%s: cm_id=%p timed out. state=%d refcnt=%d\n", __func__,
+ cm_id, cm_id->state, refcount_read(&cm_id_priv->refcount));
+}
+
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
struct cm_work *work;
+ int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irq(&cm_id_priv->lock);
@@ -1135,7 +1146,14 @@ retest:
xa_erase(&cm.local_id_table, cm_local_id(cm_id->local_id));
cm_deref_id(cm_id_priv);
- wait_for_completion(&cm_id_priv->comp);
+ do {
+ ret = wait_for_completion_timeout(&cm_id_priv->comp,
+ msecs_to_jiffies(
+ CM_DESTROY_ID_WAIT_TIMEOUT));
+ if (!ret) /* timeout happened */
+ cm_destroy_id_wait_timeout(cm_id);
+ } while (!ret);
+
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
cm_free_work(work);
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 67bcea7a153c..07cb6c5ffda0 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1730,7 +1730,7 @@ static int assign_client_id(struct ib_client *client)
{
int ret;
- down_write(&clients_rwsem);
+ lockdep_assert_held(&clients_rwsem);
/*
* The add/remove callbacks must be called in FIFO/LIFO order. To
* achieve this we assign client_ids so they are sorted in
@@ -1739,14 +1739,11 @@ static int assign_client_id(struct ib_client *client)
client->client_id = highest_client_id;
ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
if (ret)
- goto out;
+ return ret;
highest_client_id++;
xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
-
-out:
- up_write(&clients_rwsem);
- return ret;
+ return 0;
}
static void remove_client_id(struct ib_client *client)
@@ -1776,25 +1773,35 @@ int ib_register_client(struct ib_client *client)
{
struct ib_device *device;
unsigned long index;
+ bool need_unreg = false;
int ret;
refcount_set(&client->uses, 1);
init_completion(&client->uses_zero);
+
+ /*
+ * The devices_rwsem is held in write mode to ensure that a racing
+ * ib_register_device() sees a consisent view of clients and devices.
+ */
+ down_write(&devices_rwsem);
+ down_write(&clients_rwsem);
ret = assign_client_id(client);
if (ret)
- return ret;
+ goto out;
- down_read(&devices_rwsem);
+ need_unreg = true;
xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
ret = add_client_context(device, client);
- if (ret) {
- up_read(&devices_rwsem);
- ib_unregister_client(client);
- return ret;
- }
+ if (ret)
+ goto out;
}
- up_read(&devices_rwsem);
- return 0;
+ ret = 0;
+out:
+ up_write(&clients_rwsem);
+ up_write(&devices_rwsem);
+ if (need_unreg && ret)
+ ib_unregister_client(client);
+ return ret;
}
EXPORT_SYMBOL(ib_register_client);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 6de05ade2ba9..3d3ee3eca983 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2737,7 +2737,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
case IB_FLOW_SPEC_ETH:
- ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_eth_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2748,7 +2748,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_IPV4:
- ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_ipv4_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2759,7 +2759,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_IPV6:
- ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_ipv6_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2775,7 +2775,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
break;
case IB_FLOW_SPEC_TCP:
case IB_FLOW_SPEC_UDP:
- ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_tcp_udp_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2786,7 +2786,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_VXLAN_TUNNEL:
- ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_tunnel_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2801,7 +2801,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
return -EINVAL;
break;
case IB_FLOW_SPEC_ESP:
- ib_filter_sz = offsetof(struct ib_flow_esp_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_esp_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2812,7 +2812,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->esp.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_GRE:
- ib_filter_sz = offsetof(struct ib_flow_gre_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_gre_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
@@ -2823,7 +2823,7 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
memcpy(&ib_spec->gre.mask, kern_spec_mask, actual_filter_sz);
break;
case IB_FLOW_SPEC_MPLS:
- ib_filter_sz = offsetof(struct ib_flow_mpls_filter, real_sz);
+ ib_filter_sz = sizeof(struct ib_flow_mpls_filter);
actual_filter_sz = spec_filter_size(kern_spec_mask,
kern_filter_sz,
ib_filter_sz);
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index d9799706c58e..f80da6a67e24 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -36,13 +36,15 @@
#include "uverbs.h"
struct bundle_alloc_head {
- struct bundle_alloc_head *next;
+ struct_group_tagged(bundle_alloc_head_hdr, hdr,
+ struct bundle_alloc_head *next;
+ );
u8 data[];
};
struct bundle_priv {
/* Must be first */
- struct bundle_alloc_head alloc_head;
+ struct bundle_alloc_head_hdr alloc_head;
struct bundle_alloc_head *allocated_mem;
size_t internal_avail;
size_t internal_used;
@@ -64,7 +66,7 @@ struct bundle_priv {
* Must be last. bundle ends in a flex array which overlaps
* internal_buffer.
*/
- struct uverbs_attr_bundle bundle;
+ struct uverbs_attr_bundle_hdr bundle;
u64 internal_buffer[32];
};
@@ -77,9 +79,10 @@ void uapi_compute_bundle_size(struct uverbs_api_ioctl_method *method_elm,
unsigned int num_attrs)
{
struct bundle_priv *pbundle;
+ struct uverbs_attr_bundle *bundle;
size_t bundle_size =
offsetof(struct bundle_priv, internal_buffer) +
- sizeof(*pbundle->bundle.attrs) * method_elm->key_bitmap_len +
+ sizeof(*bundle->attrs) * method_elm->key_bitmap_len +
sizeof(*pbundle->uattrs) * num_attrs;
method_elm->use_stack = bundle_size <= sizeof(*pbundle);
@@ -107,7 +110,7 @@ __malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
gfp_t flags)
{
struct bundle_priv *pbundle =
- container_of(bundle, struct bundle_priv, bundle);
+ container_of(&bundle->hdr, struct bundle_priv, bundle);
size_t new_used;
void *res;
@@ -149,7 +152,7 @@ static int uverbs_set_output(const struct uverbs_attr_bundle *bundle,
const struct uverbs_attr *attr)
{
struct bundle_priv *pbundle =
- container_of(bundle, struct bundle_priv, bundle);
+ container_of(&bundle->hdr, struct bundle_priv, bundle);
u16 flags;
flags = pbundle->uattrs[attr->ptr_attr.uattr_idx].flags |
@@ -166,6 +169,8 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
struct ib_uverbs_attr *uattr,
u32 attr_bkey)
{
+ struct uverbs_attr_bundle *bundle =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
const struct uverbs_attr_spec *spec = &attr_uapi->spec;
size_t array_len;
u32 *idr_vals;
@@ -184,7 +189,7 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
return -EINVAL;
attr->uobjects =
- uverbs_alloc(&pbundle->bundle,
+ uverbs_alloc(bundle,
array_size(array_len, sizeof(*attr->uobjects)));
if (IS_ERR(attr->uobjects))
return PTR_ERR(attr->uobjects);
@@ -209,7 +214,7 @@ static int uverbs_process_idrs_array(struct bundle_priv *pbundle,
for (i = 0; i != array_len; i++) {
attr->uobjects[i] = uverbs_get_uobject_from_file(
spec->u2.objs_arr.obj_type, spec->u2.objs_arr.access,
- idr_vals[i], &pbundle->bundle);
+ idr_vals[i], bundle);
if (IS_ERR(attr->uobjects[i])) {
ret = PTR_ERR(attr->uobjects[i]);
break;
@@ -240,7 +245,9 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
struct ib_uverbs_attr *uattr, u32 attr_bkey)
{
const struct uverbs_attr_spec *spec = &attr_uapi->spec;
- struct uverbs_attr *e = &pbundle->bundle.attrs[attr_bkey];
+ struct uverbs_attr_bundle *bundle =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
+ struct uverbs_attr *e = &bundle->attrs[attr_bkey];
const struct uverbs_attr_spec *val_spec = spec;
struct uverbs_obj_attr *o_attr;
@@ -288,7 +295,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
if (val_spec->alloc_and_copy && !uverbs_attr_ptr_is_inline(e)) {
void *p;
- p = uverbs_alloc(&pbundle->bundle, uattr->len);
+ p = uverbs_alloc(bundle, uattr->len);
if (IS_ERR(p))
return PTR_ERR(p);
@@ -321,7 +328,7 @@ static int uverbs_process_attr(struct bundle_priv *pbundle,
*/
o_attr->uobject = uverbs_get_uobject_from_file(
spec->u.obj.obj_type, spec->u.obj.access,
- uattr->data_s64, &pbundle->bundle);
+ uattr->data_s64, bundle);
if (IS_ERR(o_attr->uobject))
return PTR_ERR(o_attr->uobject);
__set_bit(attr_bkey, pbundle->uobj_finalize);
@@ -422,6 +429,8 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
unsigned int num_attrs)
{
int (*handler)(struct uverbs_attr_bundle *attrs);
+ struct uverbs_attr_bundle *bundle =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
size_t uattrs_size = array_size(sizeof(*pbundle->uattrs), num_attrs);
unsigned int destroy_bkey = pbundle->method_elm->destroy_bkey;
unsigned int i;
@@ -434,7 +443,7 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
if (!handler)
return -EIO;
- pbundle->uattrs = uverbs_alloc(&pbundle->bundle, uattrs_size);
+ pbundle->uattrs = uverbs_alloc(bundle, uattrs_size);
if (IS_ERR(pbundle->uattrs))
return PTR_ERR(pbundle->uattrs);
if (copy_from_user(pbundle->uattrs, pbundle->user_attrs, uattrs_size))
@@ -453,25 +462,23 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
return -EINVAL;
if (pbundle->method_elm->has_udata)
- uverbs_fill_udata(&pbundle->bundle,
- &pbundle->bundle.driver_udata,
+ uverbs_fill_udata(bundle, &pbundle->bundle.driver_udata,
UVERBS_ATTR_UHW_IN, UVERBS_ATTR_UHW_OUT);
else
pbundle->bundle.driver_udata = (struct ib_udata){};
if (destroy_bkey != UVERBS_API_ATTR_BKEY_LEN) {
- struct uverbs_obj_attr *destroy_attr =
- &pbundle->bundle.attrs[destroy_bkey].obj_attr;
+ struct uverbs_obj_attr *destroy_attr = &bundle->attrs[destroy_bkey].obj_attr;
- ret = uobj_destroy(destroy_attr->uobject, &pbundle->bundle);
+ ret = uobj_destroy(destroy_attr->uobject, bundle);
if (ret)
return ret;
__clear_bit(destroy_bkey, pbundle->uobj_finalize);
- ret = handler(&pbundle->bundle);
+ ret = handler(bundle);
uobj_put_destroy(destroy_attr->uobject);
} else {
- ret = handler(&pbundle->bundle);
+ ret = handler(bundle);
}
/*
@@ -481,10 +488,10 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
*/
if (!ret && pbundle->method_elm->has_udata) {
const struct uverbs_attr *attr =
- uverbs_attr_get(&pbundle->bundle, UVERBS_ATTR_UHW_OUT);
+ uverbs_attr_get(bundle, UVERBS_ATTR_UHW_OUT);
if (!IS_ERR(attr))
- ret = uverbs_set_output(&pbundle->bundle, attr);
+ ret = uverbs_set_output(bundle, attr);
}
/*
@@ -501,6 +508,8 @@ static int ib_uverbs_run_method(struct bundle_priv *pbundle,
static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
{
unsigned int key_bitmap_len = pbundle->method_elm->key_bitmap_len;
+ struct uverbs_attr_bundle *bundle =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
struct bundle_alloc_head *memblock;
unsigned int i;
@@ -508,20 +517,19 @@ static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
i = -1;
while ((i = find_next_bit(pbundle->uobj_finalize, key_bitmap_len,
i + 1)) < key_bitmap_len) {
- struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
+ struct uverbs_attr *attr = &bundle->attrs[i];
uverbs_finalize_object(
attr->obj_attr.uobject,
attr->obj_attr.attr_elm->spec.u.obj.access,
test_bit(i, pbundle->uobj_hw_obj_valid),
- commit,
- &pbundle->bundle);
+ commit, bundle);
}
i = -1;
while ((i = find_next_bit(pbundle->spec_finalize, key_bitmap_len,
i + 1)) < key_bitmap_len) {
- struct uverbs_attr *attr = &pbundle->bundle.attrs[i];
+ struct uverbs_attr *attr = &bundle->attrs[i];
const struct uverbs_api_attr *attr_uapi;
void __rcu **slot;
@@ -535,7 +543,7 @@ static void bundle_destroy(struct bundle_priv *pbundle, bool commit)
if (attr_uapi->spec.type == UVERBS_ATTR_TYPE_IDRS_ARRAY) {
uverbs_free_idrs_array(attr_uapi, &attr->objs_arr_attr,
- commit, &pbundle->bundle);
+ commit, bundle);
}
}
@@ -578,7 +586,8 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
method_elm->bundle_size -
offsetof(struct bundle_priv, internal_buffer);
pbundle->alloc_head.next = NULL;
- pbundle->allocated_mem = &pbundle->alloc_head;
+ pbundle->allocated_mem = container_of(&pbundle->alloc_head,
+ struct bundle_alloc_head, hdr);
} else {
pbundle = &onstack;
pbundle->internal_avail = sizeof(pbundle->internal_buffer);
@@ -596,8 +605,9 @@ static int ib_uverbs_cmd_verbs(struct ib_uverbs_file *ufile,
pbundle->user_attrs = user_attrs;
pbundle->internal_used = ALIGN(pbundle->method_elm->key_bitmap_len *
- sizeof(*pbundle->bundle.attrs),
- sizeof(*pbundle->internal_buffer));
+ sizeof(*container_of(&pbundle->bundle,
+ struct uverbs_attr_bundle, hdr)->attrs),
+ sizeof(*pbundle->internal_buffer));
memset(pbundle->bundle.attr_present, 0,
sizeof(pbundle->bundle.attr_present));
memset(pbundle->uobj_finalize, 0, sizeof(pbundle->uobj_finalize));
@@ -700,11 +710,13 @@ void uverbs_fill_udata(struct uverbs_attr_bundle *bundle,
unsigned int attr_out)
{
struct bundle_priv *pbundle =
- container_of(bundle, struct bundle_priv, bundle);
+ container_of(&bundle->hdr, struct bundle_priv, bundle);
+ struct uverbs_attr_bundle *bundle_aux =
+ container_of(&pbundle->bundle, struct uverbs_attr_bundle, hdr);
const struct uverbs_attr *in =
- uverbs_attr_get(&pbundle->bundle, attr_in);
+ uverbs_attr_get(bundle_aux, attr_in);
const struct uverbs_attr *out =
- uverbs_attr_get(&pbundle->bundle, attr_out);
+ uverbs_attr_get(bundle_aux, attr_out);
if (!IS_ERR(in)) {
udata->inlen = in->ptr_attr.len;
@@ -829,7 +841,7 @@ void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *bundle,
u16 idx)
{
struct bundle_priv *pbundle =
- container_of(bundle, struct bundle_priv, bundle);
+ container_of(&bundle->hdr, struct bundle_priv, bundle);
__set_bit(uapi_bkey_attr(uapi_key_attr(idx)),
pbundle->uobj_hw_obj_valid);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 50cb2259bf87..fb8a0c248866 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -930,8 +930,6 @@ void c4iw_id_table_free(struct c4iw_id_table *alloc);
typedef int (*c4iw_handler_func)(struct c4iw_dev *dev, struct sk_buff *skb);
-int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
- struct l2t_entry *l2t);
void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qpid,
struct c4iw_dev_ucontext *uctx);
u32 c4iw_get_resource(struct c4iw_id_table *id_table);
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index e2bdec32ae80..926f9ff1f60f 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -57,6 +57,7 @@ struct efa_dev {
u64 db_bar_addr;
u64 db_bar_len;
+ unsigned int num_irq_vectors;
int admin_msix_vector_idx;
struct efa_irq admin_irq;
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 7b1910a86216..5fa3603c80d8 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -322,7 +322,9 @@ static int efa_create_eqs(struct efa_dev *dev)
int err;
int i;
- neqs = min_t(unsigned int, neqs, num_online_cpus());
+ neqs = min_t(unsigned int, neqs,
+ dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE);
+
dev->neqs = neqs;
dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
if (!dev->eqs)
@@ -468,34 +470,30 @@ static void efa_disable_msix(struct efa_dev *dev)
static int efa_enable_msix(struct efa_dev *dev)
{
- int msix_vecs, irq_num;
+ int max_vecs, num_vecs;
/*
* Reserve the max msix vectors we might need, one vector is reserved
* for admin.
*/
- msix_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
- num_online_cpus() + 1);
+ max_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
+ num_online_cpus() + 1);
dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
- msix_vecs);
+ max_vecs);
dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX;
- irq_num = pci_alloc_irq_vectors(dev->pdev, msix_vecs,
- msix_vecs, PCI_IRQ_MSIX);
+ num_vecs = pci_alloc_irq_vectors(dev->pdev, 1,
+ max_vecs, PCI_IRQ_MSIX);
- if (irq_num < 0) {
- dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n",
- irq_num);
+ if (num_vecs < 0) {
+ dev_err(&dev->pdev->dev, "Failed to enable MSI-X. error %d\n",
+ num_vecs);
return -ENOSPC;
}
- if (irq_num != msix_vecs) {
- efa_disable_msix(dev);
- dev_err(&dev->pdev->dev,
- "Allocated %d MSI-X (out of %d requested)\n",
- irq_num, msix_vecs);
- return -ENOSPC;
- }
+ dev_dbg(&dev->pdev->dev, "Allocated %d MSI-X vectors\n", num_vecs);
+
+ dev->num_irq_vectors = num_vecs;
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 18b05ffb415a..c465966a1d9c 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -315,7 +315,7 @@ int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit)
* This routine returns the receive context associated
* with a a qp's qpn.
*
- * Returns the context.
+ * Return: the context.
*/
static struct hfi1_ctxtdata *qp_to_rcd(struct rvt_dev_info *rdi,
struct rvt_qp *qp)
@@ -710,7 +710,7 @@ void hfi1_tid_rdma_flush_wait(struct rvt_qp *qp)
* The exp_lock must be held.
*
* Return:
- * On success: a value postive value between 0 and RXE_NUM_TID_FLOWS - 1
+ * On success: a value positive value between 0 and RXE_NUM_TID_FLOWS - 1
* On failure: -EAGAIN
*/
static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last)
@@ -1007,7 +1007,7 @@ static u32 tid_flush_pages(struct tid_rdma_pageset *list,
* pages are tested two at a time, i, i + 1 for contiguous
* pages and i - 1 and i contiguous pages.
*
- * If any condition is false, any accumlated pages are flushed and
+ * If any condition is false, any accumulated pages are flushed and
* v0,v1 are emitted as separate PAGE_SIZE pagesets
*
* Otherwise, the current 8k is totaled for a future flush.
@@ -1434,7 +1434,7 @@ static void kern_program_rcvarray(struct tid_rdma_flow *flow)
* (5) computes a tidarray with formatted TID entries which can be sent
* to the sender
* (6) Reserves and programs HW flows.
- * (7) It also manages queing the QP when TID/flow resources are not
+ * (7) It also manages queueing the QP when TID/flow resources are not
* available.
*
* @req points to struct tid_rdma_request of which the segments are a part. The
@@ -1604,7 +1604,7 @@ void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
}
/**
- * hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information
+ * hfi1_kern_exp_rcv_free_flows - free previously allocated flow information
* @req: the tid rdma request to be cleaned
*/
static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
@@ -2055,7 +2055,7 @@ static int tid_rdma_rcv_error(struct hfi1_packet *packet,
* req->clear_tail is advanced). However, when an earlier
* request is received, this request will not be complete any
* more (qp->s_tail_ack_queue is moved back, see below).
- * Consequently, we need to update the TID flow info everytime
+ * Consequently, we need to update the TID flow info every time
* a duplicate request is received.
*/
bth0 = be32_to_cpu(ohdr->bth[0]);
@@ -2219,7 +2219,7 @@ void hfi1_rc_rcv_tid_rdma_read_req(struct hfi1_packet *packet)
/*
* 1. Verify TID RDMA READ REQ as per IB_OPCODE_RC_RDMA_READ
* (see hfi1_rc_rcv())
- * 2. Put TID RDMA READ REQ into the response queueu (s_ack_queue)
+ * 2. Put TID RDMA READ REQ into the response queue (s_ack_queue)
* - Setup struct tid_rdma_req with request info
* - Initialize struct tid_rdma_flow info;
* - Copy TID entries;
@@ -2439,7 +2439,7 @@ find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode)
void hfi1_rc_rcv_tid_rdma_read_resp(struct hfi1_packet *packet)
{
- /* HANDLER FOR TID RDMA READ RESPONSE packet (Requestor side */
+ /* HANDLER FOR TID RDMA READ RESPONSE packet (Requester side) */
/*
* 1. Find matching SWQE
@@ -3649,7 +3649,7 @@ void hfi1_rc_rcv_tid_rdma_write_req(struct hfi1_packet *packet)
* 1. Verify TID RDMA WRITE REQ as per IB_OPCODE_RC_RDMA_WRITE_FIRST
* (see hfi1_rc_rcv())
* - Don't allow 0-length requests.
- * 2. Put TID RDMA WRITE REQ into the response queueu (s_ack_queue)
+ * 2. Put TID RDMA WRITE REQ into the response queue (s_ack_queue)
* - Setup struct tid_rdma_req with request info
* - Prepare struct tid_rdma_flow array?
* 3. Set the qp->s_ack_state as state diagram in design doc.
@@ -4026,7 +4026,7 @@ unlock_r_lock:
void hfi1_rc_rcv_tid_rdma_write_resp(struct hfi1_packet *packet)
{
- /* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requestor side */
+ /* HANDLER FOR TID RDMA WRITE RESPONSE packet (Requester side) */
/*
* 1. Find matching SWQE
@@ -5440,8 +5440,9 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
* the two state machines can step on each other with respect to the
* RVT_S_BUSY flag.
* Therefore, a modified test is used.
- * @return true if the second leg is scheduled;
- * false if the second leg is not scheduled.
+ *
+ * Return: %true if the second leg is scheduled;
+ * %false if the second leg is not scheduled.
*/
bool hfi1_schedule_tid_send(struct rvt_qp *qp)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.h b/drivers/infiniband/hw/hns/hns_roce_cmd.h
index 052a3d60905a..11dbbabebdc9 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cmd.h
+++ b/drivers/infiniband/hw/hns/hns_roce_cmd.h
@@ -108,6 +108,9 @@ enum {
HNS_ROCE_CMD_QUERY_CEQC = 0x92,
HNS_ROCE_CMD_DESTROY_CEQC = 0x93,
+ /* SCC CTX commands */
+ HNS_ROCE_CMD_QUERY_SCCC = 0xa2,
+
/* SCC CTX BT commands */
HNS_ROCE_CMD_READ_SCCC_BT0 = 0xa4,
HNS_ROCE_CMD_WRITE_SCCC_BT0 = 0xa5,
diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c
index 1b6d16af8c12..7250d0643b5c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_cq.c
+++ b/drivers/infiniband/hw/hns/hns_roce_cq.c
@@ -133,14 +133,12 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct ib_device *ibdev = &hr_dev->ib_dev;
u64 mtts[MTT_MIN_COUNT] = {};
- dma_addr_t dma_handle;
int ret;
- ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts),
- &dma_handle);
- if (!ret) {
+ ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts));
+ if (ret) {
ibdev_err(ibdev, "failed to find CQ mtr, ret = %d.\n", ret);
- return -EINVAL;
+ return ret;
}
/* Get CQC memory HEM(Hardware Entry Memory) table */
@@ -157,7 +155,8 @@ static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
goto err_put;
}
- ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts, dma_handle);
+ ret = hns_roce_create_cqc(hr_dev, hr_cq, mtts,
+ hns_roce_get_mtr_ba(&hr_cq->mtr));
if (ret)
goto err_xa;
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index b1fce5ddf631..c3cbd0a494bf 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -179,6 +179,7 @@ enum {
#define HNS_ROCE_CMD_SUCCESS 1
+#define HNS_ROCE_MAX_HOP_NUM 3
/* The minimum page size is 4K for hardware */
#define HNS_HW_PAGE_SHIFT 12
#define HNS_HW_PAGE_SIZE (1 << HNS_HW_PAGE_SHIFT)
@@ -269,6 +270,11 @@ struct hns_roce_hem_list {
dma_addr_t root_ba; /* pointer to the root ba table */
};
+enum mtr_type {
+ MTR_DEFAULT = 0,
+ MTR_PBL,
+};
+
struct hns_roce_buf_attr {
struct {
size_t size; /* region size */
@@ -277,7 +283,10 @@ struct hns_roce_buf_attr {
unsigned int region_count; /* valid region count */
unsigned int page_shift; /* buffer page shift */
unsigned int user_access; /* umem access flag */
+ u64 iova;
+ enum mtr_type type;
bool mtt_only; /* only alloc buffer-required MTT memory */
+ bool adaptive; /* adaptive for page_shift and hopnum */
};
struct hns_roce_hem_cfg {
@@ -585,6 +594,13 @@ struct hns_roce_work {
u32 queue_num;
};
+enum hns_roce_cong_type {
+ CONG_TYPE_DCQCN,
+ CONG_TYPE_LDCP,
+ CONG_TYPE_HC3,
+ CONG_TYPE_DIP,
+};
+
struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_wq rq;
@@ -628,6 +644,7 @@ struct hns_roce_qp {
struct list_head sq_node; /* all send qps are on a list */
struct hns_user_mmap_entry *dwqe_mmap_entry;
u32 config;
+ enum hns_roce_cong_type cong_type;
};
struct hns_roce_ib_iboe {
@@ -699,13 +716,6 @@ struct hns_roce_eq_table {
struct hns_roce_eq *eq;
};
-enum cong_type {
- CONG_TYPE_DCQCN,
- CONG_TYPE_LDCP,
- CONG_TYPE_HC3,
- CONG_TYPE_DIP,
-};
-
struct hns_roce_caps {
u64 fw_ver;
u8 num_ports;
@@ -835,7 +845,8 @@ struct hns_roce_caps {
u16 default_aeq_period;
u16 default_aeq_arm_st;
u16 default_ceq_arm_st;
- enum cong_type cong_type;
+ u8 cong_cap;
+ enum hns_roce_cong_type default_cong_type;
};
enum hns_roce_device_state {
@@ -936,6 +947,7 @@ struct hns_roce_hw {
int (*query_qpc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
int (*query_mpt)(struct hns_roce_dev *hr_dev, u32 key, void *buffer);
int (*query_srqc)(struct hns_roce_dev *hr_dev, u32 srqn, void *buffer);
+ int (*query_sccc)(struct hns_roce_dev *hr_dev, u32 qpn, void *buffer);
int (*query_hw_counter)(struct hns_roce_dev *hr_dev,
u64 *stats, u32 port, int *hw_counters);
const struct ib_device_ops *hns_roce_dev_ops;
@@ -1152,8 +1164,13 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev);
/* hns roce hw need current block and next block addr from mtt */
#define MTT_MIN_COUNT 2
+static inline dma_addr_t hns_roce_get_mtr_ba(struct hns_roce_mtr *mtr)
+{
+ return mtr->hem_cfg.root_ba;
+}
+
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr);
+ u32 offset, u64 *mtt_buf, int mtt_max);
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_buf_attr *buf_attr,
unsigned int page_shift, struct ib_udata *udata,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c
index c4ac06a33869..a4b3f19161dc 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.c
@@ -249,61 +249,34 @@ int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
}
static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
- int npages,
unsigned long hem_alloc_size,
gfp_t gfp_mask)
{
- struct hns_roce_hem_chunk *chunk = NULL;
struct hns_roce_hem *hem;
- struct scatterlist *mem;
int order;
void *buf;
WARN_ON(gfp_mask & __GFP_HIGHMEM);
+ order = get_order(hem_alloc_size);
+ if (PAGE_SIZE << order != hem_alloc_size) {
+ dev_err(hr_dev->dev, "invalid hem_alloc_size: %lu!\n",
+ hem_alloc_size);
+ return NULL;
+ }
+
hem = kmalloc(sizeof(*hem),
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!hem)
return NULL;
- INIT_LIST_HEAD(&hem->chunk_list);
-
- order = get_order(hem_alloc_size);
-
- while (npages > 0) {
- if (!chunk) {
- chunk = kmalloc(sizeof(*chunk),
- gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
- if (!chunk)
- goto fail;
-
- sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
- chunk->npages = 0;
- chunk->nsg = 0;
- memset(chunk->buf, 0, sizeof(chunk->buf));
- list_add_tail(&chunk->list, &hem->chunk_list);
- }
+ buf = dma_alloc_coherent(hr_dev->dev, hem_alloc_size,
+ &hem->dma, gfp_mask);
+ if (!buf)
+ goto fail;
- while (1 << order > npages)
- --order;
-
- /*
- * Alloc memory one time. If failed, don't alloc small block
- * memory, directly return fail.
- */
- mem = &chunk->mem[chunk->npages];
- buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
- &sg_dma_address(mem), gfp_mask);
- if (!buf)
- goto fail;
-
- chunk->buf[chunk->npages] = buf;
- sg_dma_len(mem) = PAGE_SIZE << order;
-
- ++chunk->npages;
- ++chunk->nsg;
- npages -= 1 << order;
- }
+ hem->buf = buf;
+ hem->size = hem_alloc_size;
return hem;
@@ -314,20 +287,10 @@ fail:
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
{
- struct hns_roce_hem_chunk *chunk, *tmp;
- int i;
-
if (!hem)
return;
- list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
- for (i = 0; i < chunk->npages; ++i)
- dma_free_coherent(hr_dev->dev,
- sg_dma_len(&chunk->mem[i]),
- chunk->buf[i],
- sg_dma_address(&chunk->mem[i]));
- kfree(chunk);
- }
+ dma_free_coherent(hr_dev->dev, hem->size, hem->buf, hem->dma);
kfree(hem);
}
@@ -415,7 +378,6 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
{
u32 bt_size = mhop->bt_chunk_size;
struct device *dev = hr_dev->dev;
- struct hns_roce_hem_iter iter;
gfp_t flag;
u64 bt_ba;
u32 size;
@@ -456,16 +418,15 @@ static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
*/
size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
flag = GFP_KERNEL | __GFP_NOWARN;
- table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
- size, flag);
+ table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size, flag);
if (!table->hem[index->buf]) {
ret = -ENOMEM;
goto err_alloc_hem;
}
index->inited |= HEM_INDEX_BUF;
- hns_roce_hem_first(table->hem[index->buf], &iter);
- bt_ba = hns_roce_hem_addr(&iter);
+ bt_ba = table->hem[index->buf]->dma;
+
if (table->type < HEM_TYPE_MTT) {
if (mhop->hop_num == 2)
*(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
@@ -586,7 +547,6 @@ int hns_roce_table_get(struct hns_roce_dev *hr_dev,
}
table->hem[i] = hns_roce_alloc_hem(hr_dev,
- table->table_chunk_size >> PAGE_SHIFT,
table->table_chunk_size,
GFP_KERNEL | __GFP_NOWARN);
if (!table->hem[i]) {
@@ -725,7 +685,6 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table,
unsigned long obj, dma_addr_t *dma_handle)
{
- struct hns_roce_hem_chunk *chunk;
struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem;
unsigned long mhop_obj = obj;
@@ -734,7 +693,6 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
int offset, dma_offset;
void *addr = NULL;
u32 hem_idx = 0;
- int length;
int i, j;
mutex_lock(&table->mutex);
@@ -767,23 +725,8 @@ void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
if (!hem)
goto out;
- list_for_each_entry(chunk, &hem->chunk_list, list) {
- for (i = 0; i < chunk->npages; ++i) {
- length = sg_dma_len(&chunk->mem[i]);
- if (dma_handle && dma_offset >= 0) {
- if (length > (u32)dma_offset)
- *dma_handle = sg_dma_address(
- &chunk->mem[i]) + dma_offset;
- dma_offset -= length;
- }
-
- if (length > (u32)offset) {
- addr = chunk->buf[i] + offset;
- goto out;
- }
- offset -= length;
- }
- }
+ *dma_handle = hem->dma + dma_offset;
+ addr = hem->buf + offset;
out:
mutex_unlock(&table->mutex);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.h b/drivers/infiniband/hw/hns/hns_roce_hem.h
index 7d23d3c51da4..6fb51db9682b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hem.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hem.h
@@ -56,10 +56,6 @@ enum {
HEM_TYPE_TRRL,
};
-#define HNS_ROCE_HEM_CHUNK_LEN \
- ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
- (sizeof(struct scatterlist) + sizeof(void *)))
-
#define check_whether_bt_num_3(type, hop_num) \
(type < HEM_TYPE_MTT && hop_num == 2)
@@ -72,25 +68,13 @@ enum {
(type >= HEM_TYPE_MTT && hop_num == 1) || \
(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
-struct hns_roce_hem_chunk {
- struct list_head list;
- int npages;
- int nsg;
- struct scatterlist mem[HNS_ROCE_HEM_CHUNK_LEN];
- void *buf[HNS_ROCE_HEM_CHUNK_LEN];
-};
-
struct hns_roce_hem {
- struct list_head chunk_list;
+ void *buf;
+ dma_addr_t dma;
+ unsigned long size;
refcount_t refcount;
};
-struct hns_roce_hem_iter {
- struct hns_roce_hem *hem;
- struct hns_roce_hem_chunk *chunk;
- int page_idx;
-};
-
struct hns_roce_hem_mhop {
u32 hop_num;
u32 buf_chunk_size;
@@ -133,38 +117,4 @@ void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_list *hem_list,
int offset, int *mtt_cnt);
-static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
- struct hns_roce_hem_iter *iter)
-{
- iter->hem = hem;
- iter->chunk = list_empty(&hem->chunk_list) ? NULL :
- list_entry(hem->chunk_list.next,
- struct hns_roce_hem_chunk, list);
- iter->page_idx = 0;
-}
-
-static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
-{
- return !iter->chunk;
-}
-
-static inline void hns_roce_hem_next(struct hns_roce_hem_iter *iter)
-{
- if (++iter->page_idx >= iter->chunk->nsg) {
- if (iter->chunk->list.next == &iter->hem->chunk_list) {
- iter->chunk = NULL;
- return;
- }
-
- iter->chunk = list_entry(iter->chunk->list.next,
- struct hns_roce_hem_chunk, list);
- iter->page_idx = 0;
- }
-}
-
-static inline dma_addr_t hns_roce_hem_addr(struct hns_roce_hem_iter *iter)
-{
- return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
-}
-
#endif /* _HNS_ROCE_HEM_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index 8206daea6767..ba7ae792d279 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -2209,11 +2209,12 @@ static int hns_roce_query_caps(struct hns_roce_dev *hr_dev)
caps->max_wqes = 1 << le16_to_cpu(resp_c->sq_depth);
caps->num_srqs = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_SRQS);
- caps->cong_type = hr_reg_read(resp_d, PF_CAPS_D_CONG_TYPE);
+ caps->cong_cap = hr_reg_read(resp_d, PF_CAPS_D_CONG_CAP);
caps->max_srq_wrs = 1 << le16_to_cpu(resp_d->srq_depth);
caps->ceqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_CEQ_DEPTH);
caps->num_comp_vectors = hr_reg_read(resp_d, PF_CAPS_D_NUM_CEQS);
caps->aeqe_depth = 1 << hr_reg_read(resp_d, PF_CAPS_D_AEQ_DEPTH);
+ caps->default_cong_type = hr_reg_read(resp_d, PF_CAPS_D_DEFAULT_ALG);
caps->reserved_pds = hr_reg_read(resp_d, PF_CAPS_D_RSV_PDS);
caps->num_uars = 1 << hr_reg_read(resp_d, PF_CAPS_D_NUM_UARS);
caps->reserved_qps = hr_reg_read(resp_d, PF_CAPS_D_RSV_QPS);
@@ -3195,21 +3196,22 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t pbl_ba;
- int i, count;
+ int ret;
+ int i;
- count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
- min_t(int, ARRAY_SIZE(pages), mr->npages),
- &pbl_ba);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find PBL mtr, count = %d.\n",
- count);
- return -ENOBUFS;
+ ret = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages,
+ min_t(int, ARRAY_SIZE(pages), mr->npages));
+ if (ret) {
+ ibdev_err(ibdev, "failed to find PBL mtr, ret = %d.\n", ret);
+ return ret;
}
/* Aligned to the hardware address access unit */
- for (i = 0; i < count; i++)
+ for (i = 0; i < ARRAY_SIZE(pages); i++)
pages[i] >>= 6;
+ pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
+
mpt_entry->pbl_size = cpu_to_le32(mr->npages);
mpt_entry->pbl_ba_l = cpu_to_le32(pbl_ba >> 3);
hr_reg_write(mpt_entry, MPT_PBL_BA_H, upper_32_bits(pbl_ba >> 3));
@@ -3308,18 +3310,12 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
void *mb_buf, struct hns_roce_mr *mr)
{
- struct ib_device *ibdev = &hr_dev->ib_dev;
+ dma_addr_t pbl_ba = hns_roce_get_mtr_ba(&mr->pbl_mtr);
struct hns_roce_v2_mpt_entry *mpt_entry;
- dma_addr_t pbl_ba = 0;
mpt_entry = mb_buf;
memset(mpt_entry, 0, sizeof(*mpt_entry));
- if (hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, NULL, 0, &pbl_ba) < 0) {
- ibdev_err(ibdev, "failed to find frmr mtr.\n");
- return -ENOBUFS;
- }
-
hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_FREE);
hr_reg_write(mpt_entry, MPT_PD, mr->pd);
@@ -4063,7 +4059,6 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj,
u32 step_idx)
{
- struct hns_roce_hem_iter iter;
struct hns_roce_hem_mhop mhop;
struct hns_roce_hem *hem;
unsigned long mhop_obj = obj;
@@ -4100,12 +4095,8 @@ static int hns_roce_v2_set_hem(struct hns_roce_dev *hr_dev,
if (check_whether_last_step(hop_num, step_idx)) {
hem = table->hem[hem_idx];
- for (hns_roce_hem_first(hem, &iter);
- !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
- bt_ba = hns_roce_hem_addr(&iter);
- ret = set_hem_to_hw(hr_dev, obj, bt_ba, table->type,
- step_idx);
- }
+
+ ret = set_hem_to_hw(hr_dev, obj, hem->dma, table->type, step_idx);
} else {
if (step_idx == 0)
bt_ba = table->bt_l0_dma_addr[i];
@@ -4346,17 +4337,20 @@ static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
{
u64 mtts[MTT_MIN_COUNT] = { 0 };
u64 wqe_sge_ba;
- int count;
+ int ret;
/* Search qp buf's mtts */
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
- MTT_MIN_COUNT, &wqe_sge_ba);
- if (hr_qp->rq.wqe_cnt && count < 1) {
+ ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
+ MTT_MIN_COUNT);
+ if (hr_qp->rq.wqe_cnt && ret) {
ibdev_err(&hr_dev->ib_dev,
- "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
- return -EINVAL;
+ "failed to find QP(0x%lx) RQ WQE buf, ret = %d.\n",
+ hr_qp->qpn, ret);
+ return ret;
}
+ wqe_sge_ba = hns_roce_get_mtr_ba(&hr_qp->mtr);
+
context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
qpc_mask->wqe_sge_ba = 0;
@@ -4418,23 +4412,23 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
struct ib_device *ibdev = &hr_dev->ib_dev;
u64 sge_cur_blk = 0;
u64 sq_cur_blk = 0;
- int count;
+ int ret;
/* search qp buf's mtts */
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find QP(0x%lx) SQ buf.\n",
- hr_qp->qpn);
- return -EINVAL;
+ ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->sq.offset,
+ &sq_cur_blk, 1);
+ if (ret) {
+ ibdev_err(ibdev, "failed to find QP(0x%lx) SQ WQE buf, ret = %d.\n",
+ hr_qp->qpn, ret);
+ return ret;
}
if (hr_qp->sge.sge_cnt > 0) {
- count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
- hr_qp->sge.offset,
- &sge_cur_blk, 1, NULL);
- if (count < 1) {
- ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
- hr_qp->qpn);
- return -EINVAL;
+ ret = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
+ hr_qp->sge.offset, &sge_cur_blk, 1);
+ if (ret) {
+ ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf, ret = %d.\n",
+ hr_qp->qpn, ret);
+ return ret;
}
}
@@ -4744,13 +4738,10 @@ enum {
static int check_cong_type(struct ib_qp *ibqp,
struct hns_roce_congestion_algorithm *cong_alg)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
-
- if (ibqp->qp_type == IB_QPT_UD)
- hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
/* different congestion types match different configurations */
- switch (hr_dev->caps.cong_type) {
+ switch (hr_qp->cong_type) {
case CONG_TYPE_DCQCN:
cong_alg->alg_sel = CONG_DCQCN;
cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
@@ -4776,10 +4767,7 @@ static int check_cong_type(struct ib_qp *ibqp,
cong_alg->wnd_mode_sel = WND_LIMIT;
break;
default:
- ibdev_warn(&hr_dev->ib_dev,
- "invalid type(%u) for congestion selection.\n",
- hr_dev->caps.cong_type);
- hr_dev->caps.cong_type = CONG_TYPE_DCQCN;
+ hr_qp->cong_type = CONG_TYPE_DCQCN;
cong_alg->alg_sel = CONG_DCQCN;
cong_alg->alg_sub_sel = UNSUPPORT_CONG_LEVEL;
cong_alg->dip_vld = DIP_INVALID;
@@ -4798,6 +4786,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
struct hns_roce_congestion_algorithm cong_field;
struct ib_device *ibdev = ibqp->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
+ struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
u32 dip_idx = 0;
int ret;
@@ -4810,7 +4799,7 @@ static int fill_cong_field(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
return ret;
hr_reg_write(context, QPC_CONG_ALGO_TMPL_ID, hr_dev->cong_algo_tmpl_id +
- hr_dev->caps.cong_type * HNS_ROCE_CONG_SIZE);
+ hr_qp->cong_type * HNS_ROCE_CONG_SIZE);
hr_reg_clear(qpc_mask, QPC_CONG_ALGO_TMPL_ID);
hr_reg_write(&context->ext, QPCEX_CONG_ALG_SEL, cong_field.alg_sel);
hr_reg_clear(&qpc_mask->ext, QPCEX_CONG_ALG_SEL);
@@ -5328,6 +5317,30 @@ out:
return ret;
}
+static int hns_roce_v2_query_sccc(struct hns_roce_dev *hr_dev, u32 qpn,
+ void *buffer)
+{
+ struct hns_roce_v2_scc_context *context;
+ struct hns_roce_cmd_mailbox *mailbox;
+ int ret;
+
+ mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, HNS_ROCE_CMD_QUERY_SCCC,
+ qpn);
+ if (ret)
+ goto out;
+
+ context = mailbox->buf;
+ memcpy(buffer, context, sizeof(*context));
+
+out:
+ hns_roce_free_cmd_mailbox(hr_dev, mailbox);
+ return ret;
+}
+
static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_qp_context *context)
{
@@ -5581,18 +5594,20 @@ static int hns_roce_v2_write_srqc_index_queue(struct hns_roce_srq *srq,
struct ib_device *ibdev = srq->ibsrq.device;
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
u64 mtts_idx[MTT_MIN_COUNT] = {};
- dma_addr_t dma_handle_idx = 0;
+ dma_addr_t dma_handle_idx;
int ret;
/* Get physical address of idx que buf */
ret = hns_roce_mtr_find(hr_dev, &idx_que->mtr, 0, mtts_idx,
- ARRAY_SIZE(mtts_idx), &dma_handle_idx);
- if (ret < 1) {
+ ARRAY_SIZE(mtts_idx));
+ if (ret) {
ibdev_err(ibdev, "failed to find mtr for SRQ idx, ret = %d.\n",
ret);
- return -ENOBUFS;
+ return ret;
}
+ dma_handle_idx = hns_roce_get_mtr_ba(&idx_que->mtr);
+
hr_reg_write(ctx, SRQC_IDX_HOP_NUM,
to_hr_hem_hopnum(hr_dev->caps.idx_hop_num, srq->wqe_cnt));
@@ -5624,20 +5639,22 @@ static int hns_roce_v2_write_srqc(struct hns_roce_srq *srq, void *mb_buf)
struct hns_roce_dev *hr_dev = to_hr_dev(ibdev);
struct hns_roce_srq_context *ctx = mb_buf;
u64 mtts_wqe[MTT_MIN_COUNT] = {};
- dma_addr_t dma_handle_wqe = 0;
+ dma_addr_t dma_handle_wqe;
int ret;
memset(ctx, 0, sizeof(*ctx));
/* Get the physical address of srq buf */
ret = hns_roce_mtr_find(hr_dev, &srq->buf_mtr, 0, mtts_wqe,
- ARRAY_SIZE(mtts_wqe), &dma_handle_wqe);
- if (ret < 1) {
+ ARRAY_SIZE(mtts_wqe));
+ if (ret) {
ibdev_err(ibdev, "failed to find mtr for SRQ WQE, ret = %d.\n",
ret);
- return -ENOBUFS;
+ return ret;
}
+ dma_handle_wqe = hns_roce_get_mtr_ba(&srq->buf_mtr);
+
hr_reg_write(ctx, SRQC_SRQ_ST, 1);
hr_reg_write_bool(ctx, SRQC_SRQ_TYPE,
srq->ibsrq.srq_type == IB_SRQT_XRC);
@@ -6353,7 +6370,7 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
u64 eqe_ba[MTT_MIN_COUNT] = { 0 };
struct hns_roce_eq_context *eqc;
u64 bt_ba = 0;
- int count;
+ int ret;
eqc = mb_buf;
memset(eqc, 0, sizeof(struct hns_roce_eq_context));
@@ -6361,13 +6378,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq,
init_eq_config(hr_dev, eq);
/* if not multi-hop, eqe buffer only use one trunk */
- count = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba, MTT_MIN_COUNT,
- &bt_ba);
- if (count < 1) {
- dev_err(hr_dev->dev, "failed to find EQE mtr\n");
- return -ENOBUFS;
+ ret = hns_roce_mtr_find(hr_dev, &eq->mtr, 0, eqe_ba,
+ ARRAY_SIZE(eqe_ba));
+ if (ret) {
+ dev_err(hr_dev->dev, "failed to find EQE mtr, ret = %d\n", ret);
+ return ret;
}
+ bt_ba = hns_roce_get_mtr_ba(&eq->mtr);
+
hr_reg_write(eqc, EQC_EQ_ST, HNS_ROCE_V2_EQ_STATE_VALID);
hr_reg_write(eqc, EQC_EQE_HOP_NUM, eq->hop_num);
hr_reg_write(eqc, EQC_OVER_IGNORE, eq->over_ignore);
@@ -6714,6 +6733,7 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.query_qpc = hns_roce_v2_query_qpc,
.query_mpt = hns_roce_v2_query_mpt,
.query_srqc = hns_roce_v2_query_srqc,
+ .query_sccc = hns_roce_v2_query_sccc,
.query_hw_counter = hns_roce_hw_v2_query_counter,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index cd97cbee682a..df04bc8ede57 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -646,6 +646,12 @@ struct hns_roce_v2_qp_context {
#define QPCEX_SQ_RQ_NOT_FORBID_EN QPCEX_FIELD_LOC(23, 23)
#define QPCEX_STASH QPCEX_FIELD_LOC(82, 82)
+#define SCC_CONTEXT_SIZE 16
+
+struct hns_roce_v2_scc_context {
+ __le32 data[SCC_CONTEXT_SIZE];
+};
+
#define V2_QP_RWE_S 1 /* rdma write enable */
#define V2_QP_RRE_S 2 /* rdma read enable */
#define V2_QP_ATE_S 3 /* rdma atomic enable */
@@ -1214,12 +1220,13 @@ struct hns_roce_query_pf_caps_d {
#define PF_CAPS_D_RQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(21, 20)
#define PF_CAPS_D_EX_SGE_HOP_NUM PF_CAPS_D_FIELD_LOC(23, 22)
#define PF_CAPS_D_SQWQE_HOP_NUM PF_CAPS_D_FIELD_LOC(25, 24)
-#define PF_CAPS_D_CONG_TYPE PF_CAPS_D_FIELD_LOC(29, 26)
+#define PF_CAPS_D_CONG_CAP PF_CAPS_D_FIELD_LOC(29, 26)
#define PF_CAPS_D_CEQ_DEPTH PF_CAPS_D_FIELD_LOC(85, 64)
#define PF_CAPS_D_NUM_CEQS PF_CAPS_D_FIELD_LOC(95, 86)
#define PF_CAPS_D_AEQ_DEPTH PF_CAPS_D_FIELD_LOC(117, 96)
#define PF_CAPS_D_AEQ_ARM_ST PF_CAPS_D_FIELD_LOC(119, 118)
#define PF_CAPS_D_CEQ_ARM_ST PF_CAPS_D_FIELD_LOC(121, 120)
+#define PF_CAPS_D_DEFAULT_ALG PF_CAPS_D_FIELD_LOC(127, 122)
#define PF_CAPS_D_RSV_PDS PF_CAPS_D_FIELD_LOC(147, 128)
#define PF_CAPS_D_NUM_UARS PF_CAPS_D_FIELD_LOC(155, 148)
#define PF_CAPS_D_RSV_QPS PF_CAPS_D_FIELD_LOC(179, 160)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index b55fe6911f9f..1dc60c2b2b7a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -394,6 +394,9 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
resp.config |= HNS_ROCE_RSP_CQE_INLINE_FLAGS;
}
+ if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
+ resp.congest_type = hr_dev->caps.cong_cap;
+
ret = hns_roce_uar_alloc(hr_dev, &context->uar);
if (ret)
goto error_out;
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 91cd580480fe..9e05b57a2d67 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -32,6 +32,7 @@
*/
#include <linux/vmalloc.h>
+#include <linux/count_zeros.h>
#include <rdma/ib_umem.h>
#include <linux/math.h>
#include "hns_roce_device.h"
@@ -103,14 +104,21 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
buf_attr.user_access = mr->access;
/* fast MR's buffer is alloced before mapping, not at creation */
buf_attr.mtt_only = is_fast;
+ buf_attr.iova = mr->iova;
+ /* pagesize and hopnum is fixed for fast MR */
+ buf_attr.adaptive = !is_fast;
+ buf_attr.type = MTR_PBL;
err = hns_roce_mtr_create(hr_dev, &mr->pbl_mtr, &buf_attr,
hr_dev->caps.pbl_ba_pg_sz + PAGE_SHIFT,
udata, start);
- if (err)
+ if (err) {
ibdev_err(ibdev, "failed to alloc pbl mtr, ret = %d.\n", err);
- else
- mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
+ return err;
+ }
+
+ mr->npages = mr->pbl_mtr.hem_cfg.buf_pg_count;
+ mr->pbl_hop_num = buf_attr.region[0].hopnum;
return err;
}
@@ -695,7 +703,7 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtr->umem = NULL;
mtr->kmem = hns_roce_buf_alloc(hr_dev, total_size,
buf_attr->page_shift,
- mtr->hem_cfg.is_direct ?
+ !mtr_has_mtt(buf_attr) ?
HNS_ROCE_BUF_DIRECT : 0);
if (IS_ERR(mtr->kmem)) {
ibdev_err(ibdev, "failed to alloc kmem, ret = %ld.\n",
@@ -707,14 +715,41 @@ static int mtr_alloc_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return 0;
}
-static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- int page_count, unsigned int page_shift)
+static int cal_mtr_pg_cnt(struct hns_roce_mtr *mtr)
+{
+ struct hns_roce_buf_region *region;
+ int page_cnt = 0;
+ int i;
+
+ for (i = 0; i < mtr->hem_cfg.region_count; i++) {
+ region = &mtr->hem_cfg.region[i];
+ page_cnt += region->count;
+ }
+
+ return page_cnt;
+}
+
+static bool need_split_huge_page(struct hns_roce_mtr *mtr)
+{
+ /* When HEM buffer uses 0-level addressing, the page size is
+ * equal to the whole buffer size. If the current MTR has multiple
+ * regions, we split the buffer into small pages(4k, required by hns
+ * ROCEE). These pages will be used in multiple regions.
+ */
+ return mtr->hem_cfg.is_direct && mtr->hem_cfg.region_count > 1;
+}
+
+static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
+ int page_count = cal_mtr_pg_cnt(mtr);
+ unsigned int page_shift;
dma_addr_t *pages;
int npage;
int ret;
+ page_shift = need_split_huge_page(mtr) ? HNS_HW_PAGE_SHIFT :
+ mtr->hem_cfg.buf_pg_shift;
/* alloc a tmp array to store buffer's dma address */
pages = kvcalloc(page_count, sizeof(dma_addr_t), GFP_KERNEL);
if (!pages)
@@ -734,7 +769,7 @@ static int mtr_map_bufs(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
goto err_alloc_list;
}
- if (mtr->hem_cfg.is_direct && npage > 1) {
+ if (need_split_huge_page(mtr) && npage > 1) {
ret = mtr_check_direct_pages(pages, npage, page_shift);
if (ret) {
ibdev_err(ibdev, "failed to check %s page: %d / %d.\n",
@@ -809,47 +844,53 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
return ret;
}
-int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
- u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
+static int hns_roce_get_direct_addr_mtt(struct hns_roce_hem_cfg *cfg,
+ u32 start_index, u64 *mtt_buf,
+ int mtt_cnt)
{
- struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
- int mtt_count, left;
- u32 start_index;
+ int mtt_count;
int total = 0;
- __le64 *mtts;
u32 npage;
u64 addr;
- if (!mtt_buf || mtt_max < 1)
- goto done;
-
- /* no mtt memory in direct mode, so just return the buffer address */
- if (cfg->is_direct) {
- start_index = offset >> HNS_HW_PAGE_SHIFT;
- for (mtt_count = 0; mtt_count < cfg->region_count &&
- total < mtt_max; mtt_count++) {
- npage = cfg->region[mtt_count].offset;
- if (npage < start_index)
- continue;
+ if (mtt_cnt > cfg->region_count)
+ return -EINVAL;
- addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
- mtt_buf[total] = addr;
+ for (mtt_count = 0; mtt_count < cfg->region_count && total < mtt_cnt;
+ mtt_count++) {
+ npage = cfg->region[mtt_count].offset;
+ if (npage < start_index)
+ continue;
- total++;
- }
+ addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
+ mtt_buf[total] = addr;
- goto done;
+ total++;
}
- start_index = offset >> cfg->buf_pg_shift;
- left = mtt_max;
+ if (!total)
+ return -ENOENT;
+
+ return 0;
+}
+
+static int hns_roce_get_mhop_mtt(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr, u32 start_index,
+ u64 *mtt_buf, int mtt_cnt)
+{
+ int left = mtt_cnt;
+ int total = 0;
+ int mtt_count;
+ __le64 *mtts;
+ u32 npage;
+
while (left > 0) {
mtt_count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
start_index + total,
&mtt_count);
if (!mtts || !mtt_count)
- goto done;
+ break;
npage = min(mtt_count, left);
left -= npage;
@@ -857,69 +898,165 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
mtt_buf[total++] = le64_to_cpu(mtts[mtt_count]);
}
-done:
- if (base_addr)
- *base_addr = cfg->root_ba;
+ if (!total)
+ return -ENOENT;
+
+ return 0;
+}
+
+int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
+ u32 offset, u64 *mtt_buf, int mtt_max)
+{
+ struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
+ u32 start_index;
+ int ret;
+
+ if (!mtt_buf || mtt_max < 1)
+ return -EINVAL;
+
+ /* no mtt memory in direct mode, so just return the buffer address */
+ if (cfg->is_direct) {
+ start_index = offset >> HNS_HW_PAGE_SHIFT;
+ ret = hns_roce_get_direct_addr_mtt(cfg, start_index,
+ mtt_buf, mtt_max);
+ } else {
+ start_index = offset >> cfg->buf_pg_shift;
+ ret = hns_roce_get_mhop_mtt(hr_dev, mtr, start_index,
+ mtt_buf, mtt_max);
+ }
+ return ret;
+}
+
+static int get_best_page_shift(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr)
+{
+ unsigned int page_sz;
+
+ if (!buf_attr->adaptive || buf_attr->type != MTR_PBL || !mtr->umem)
+ return 0;
+
+ page_sz = ib_umem_find_best_pgsz(mtr->umem,
+ hr_dev->caps.page_size_cap,
+ buf_attr->iova);
+ if (!page_sz)
+ return -EINVAL;
+
+ buf_attr->page_shift = order_base_2(page_sz);
+ return 0;
+}
+
+static int get_best_hop_num(struct hns_roce_dev *hr_dev,
+ struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *buf_attr,
+ unsigned int ba_pg_shift)
+{
+#define INVALID_HOPNUM -1
+#define MIN_BA_CNT 1
+ size_t buf_pg_sz = 1 << buf_attr->page_shift;
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+ size_t ba_pg_sz = 1 << ba_pg_shift;
+ int hop_num = INVALID_HOPNUM;
+ size_t unit = MIN_BA_CNT;
+ size_t ba_cnt;
+ int j;
+
+ if (!buf_attr->adaptive || buf_attr->type != MTR_PBL)
+ return 0;
+
+ /* Caculating the number of buf pages, each buf page need a BA */
+ if (mtr->umem)
+ ba_cnt = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
+ else
+ ba_cnt = DIV_ROUND_UP(buf_attr->region[0].size, buf_pg_sz);
+
+ for (j = 0; j <= HNS_ROCE_MAX_HOP_NUM; j++) {
+ if (ba_cnt <= unit) {
+ hop_num = j;
+ break;
+ }
+ /* Number of BAs can be represented at per hop */
+ unit *= ba_pg_sz / BA_BYTE_LEN;
+ }
+
+ if (hop_num < 0) {
+ ibdev_err(ibdev,
+ "failed to calculate a valid hopnum.\n");
+ return -EINVAL;
+ }
- return total;
+ buf_attr->region[0].hopnum = hop_num;
+
+ return 0;
+}
+
+static bool is_buf_attr_valid(struct hns_roce_dev *hr_dev,
+ struct hns_roce_buf_attr *attr)
+{
+ struct ib_device *ibdev = &hr_dev->ib_dev;
+
+ if (attr->region_count > ARRAY_SIZE(attr->region) ||
+ attr->region_count < 1 || attr->page_shift < HNS_HW_PAGE_SHIFT) {
+ ibdev_err(ibdev,
+ "invalid buf attr, region count %d, page shift %u.\n",
+ attr->region_count, attr->page_shift);
+ return false;
+ }
+
+ return true;
}
static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
- struct hns_roce_buf_attr *attr,
- struct hns_roce_hem_cfg *cfg,
- unsigned int *buf_page_shift, u64 unalinged_size)
+ struct hns_roce_mtr *mtr,
+ struct hns_roce_buf_attr *attr)
{
+ struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
struct hns_roce_buf_region *r;
- u64 first_region_padding;
- int page_cnt, region_cnt;
- unsigned int page_shift;
+ size_t buf_pg_sz;
size_t buf_size;
+ int page_cnt, i;
+ u64 pgoff = 0;
+
+ if (!is_buf_attr_valid(hr_dev, attr))
+ return -EINVAL;
/* If mtt is disabled, all pages must be within a continuous range */
cfg->is_direct = !mtr_has_mtt(attr);
+ cfg->region_count = attr->region_count;
buf_size = mtr_bufs_size(attr);
- if (cfg->is_direct) {
- /* When HEM buffer uses 0-level addressing, the page size is
- * equal to the whole buffer size, and we split the buffer into
- * small pages which is used to check whether the adjacent
- * units are in the continuous space and its size is fixed to
- * 4K based on hns ROCEE's requirement.
- */
- page_shift = HNS_HW_PAGE_SHIFT;
-
- /* The ROCEE requires the page size to be 4K * 2 ^ N. */
+ if (need_split_huge_page(mtr)) {
+ buf_pg_sz = HNS_HW_PAGE_SIZE;
cfg->buf_pg_count = 1;
+ /* The ROCEE requires the page size to be 4K * 2 ^ N. */
cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT +
order_base_2(DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE));
- first_region_padding = 0;
} else {
- page_shift = attr->page_shift;
- cfg->buf_pg_count = DIV_ROUND_UP(buf_size + unalinged_size,
- 1 << page_shift);
- cfg->buf_pg_shift = page_shift;
- first_region_padding = unalinged_size;
+ buf_pg_sz = 1 << attr->page_shift;
+ cfg->buf_pg_count = mtr->umem ?
+ ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz) :
+ DIV_ROUND_UP(buf_size, buf_pg_sz);
+ cfg->buf_pg_shift = attr->page_shift;
+ pgoff = mtr->umem ? mtr->umem->address & ~PAGE_MASK : 0;
}
/* Convert buffer size to page index and page count for each region and
* the buffer's offset needs to be appended to the first region.
*/
- for (page_cnt = 0, region_cnt = 0; region_cnt < attr->region_count &&
- region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
- r = &cfg->region[region_cnt];
+ for (page_cnt = 0, i = 0; i < attr->region_count; i++) {
+ r = &cfg->region[i];
r->offset = page_cnt;
- buf_size = hr_hw_page_align(attr->region[region_cnt].size +
- first_region_padding);
- r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
- first_region_padding = 0;
+ buf_size = hr_hw_page_align(attr->region[i].size + pgoff);
+ if (attr->type == MTR_PBL && mtr->umem)
+ r->count = ib_umem_num_dma_blocks(mtr->umem, buf_pg_sz);
+ else
+ r->count = DIV_ROUND_UP(buf_size, buf_pg_sz);
+
+ pgoff = 0;
page_cnt += r->count;
- r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
- r->count);
+ r->hopnum = to_hr_hem_hopnum(attr->region[i].hopnum, r->count);
}
- cfg->region_count = region_cnt;
- *buf_page_shift = page_shift;
-
- return page_cnt;
+ return 0;
}
static u64 cal_pages_per_l1ba(unsigned int ba_per_bt, unsigned int hopnum)
@@ -1007,50 +1144,58 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
unsigned long user_addr)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
- unsigned int buf_page_shift = 0;
- int buf_page_cnt;
int ret;
- buf_page_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, &mtr->hem_cfg,
- &buf_page_shift,
- udata ? user_addr & ~PAGE_MASK : 0);
- if (buf_page_cnt < 1 || buf_page_shift < HNS_HW_PAGE_SHIFT) {
- ibdev_err(ibdev, "failed to init mtr cfg, count %d shift %u.\n",
- buf_page_cnt, buf_page_shift);
- return -EINVAL;
- }
-
- ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
- if (ret) {
- ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
- return ret;
- }
-
/* The caller has its own buffer list and invokes the hns_roce_mtr_map()
* to finish the MTT configuration.
*/
if (buf_attr->mtt_only) {
mtr->umem = NULL;
mtr->kmem = NULL;
- return 0;
+ } else {
+ ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
+ if (ret) {
+ ibdev_err(ibdev,
+ "failed to alloc mtr bufs, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = get_best_page_shift(hr_dev, mtr, buf_attr);
+ if (ret)
+ goto err_init_buf;
+
+ ret = get_best_hop_num(hr_dev, mtr, buf_attr, ba_page_shift);
+ if (ret)
+ goto err_init_buf;
}
- ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, udata, user_addr);
+ ret = mtr_init_buf_cfg(hr_dev, mtr, buf_attr);
+ if (ret)
+ goto err_init_buf;
+
+ ret = mtr_alloc_mtt(hr_dev, mtr, ba_page_shift);
if (ret) {
- ibdev_err(ibdev, "failed to alloc mtr bufs, ret = %d.\n", ret);
- goto err_alloc_mtt;
+ ibdev_err(ibdev, "failed to alloc mtr mtt, ret = %d.\n", ret);
+ goto err_init_buf;
}
+ if (buf_attr->mtt_only)
+ return 0;
+
/* Write buffer's dma address to MTT */
- ret = mtr_map_bufs(hr_dev, mtr, buf_page_cnt, buf_page_shift);
- if (ret)
+ ret = mtr_map_bufs(hr_dev, mtr);
+ if (ret) {
ibdev_err(ibdev, "failed to map mtr bufs, ret = %d.\n", ret);
- else
- return 0;
+ goto err_alloc_mtt;
+ }
+
+ return 0;
- mtr_free_bufs(hr_dev, mtr);
err_alloc_mtt:
mtr_free_mtt(hr_dev, mtr);
+err_init_buf:
+ mtr_free_bufs(hr_dev, mtr);
+
return ret;
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
index 31b147210688..f35a66325d9a 100644
--- a/drivers/infiniband/hw/hns/hns_roce_qp.c
+++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
@@ -1004,6 +1004,60 @@ static void free_kernel_wrid(struct hns_roce_qp *hr_qp)
kfree(hr_qp->sq.wrid);
}
+static void default_congest_type(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp)
+{
+ if (hr_qp->ibqp.qp_type == IB_QPT_UD ||
+ hr_qp->ibqp.qp_type == IB_QPT_GSI)
+ hr_qp->cong_type = CONG_TYPE_DCQCN;
+ else
+ hr_qp->cong_type = hr_dev->caps.default_cong_type;
+}
+
+static int set_congest_type(struct hns_roce_qp *hr_qp,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device);
+
+ switch (ucmd->cong_type_flags) {
+ case HNS_ROCE_CREATE_QP_FLAGS_DCQCN:
+ hr_qp->cong_type = CONG_TYPE_DCQCN;
+ break;
+ case HNS_ROCE_CREATE_QP_FLAGS_LDCP:
+ hr_qp->cong_type = CONG_TYPE_LDCP;
+ break;
+ case HNS_ROCE_CREATE_QP_FLAGS_HC3:
+ hr_qp->cong_type = CONG_TYPE_HC3;
+ break;
+ case HNS_ROCE_CREATE_QP_FLAGS_DIP:
+ hr_qp->cong_type = CONG_TYPE_DIP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!test_bit(hr_qp->cong_type, (unsigned long *)&hr_dev->caps.cong_cap))
+ return -EOPNOTSUPP;
+
+ if (hr_qp->ibqp.qp_type == IB_QPT_UD &&
+ hr_qp->cong_type != CONG_TYPE_DCQCN)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int set_congest_param(struct hns_roce_dev *hr_dev,
+ struct hns_roce_qp *hr_qp,
+ struct hns_roce_ib_create_qp *ucmd)
+{
+ if (ucmd->comp_mask & HNS_ROCE_CREATE_QP_MASK_CONGEST_TYPE)
+ return set_congest_type(hr_qp, ucmd);
+
+ default_congest_type(hr_dev, hr_qp);
+
+ return 0;
+}
+
static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata,
@@ -1043,6 +1097,10 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ibdev_err(ibdev,
"failed to set user SQ size, ret = %d.\n",
ret);
+
+ ret = set_congest_param(hr_dev, hr_qp, ucmd);
+ if (ret)
+ return ret;
} else {
if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09)
hr_qp->config = HNS_ROCE_EXSGE_FLAGS;
@@ -1051,6 +1109,8 @@ static int set_qp_param(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ibdev_err(ibdev,
"failed to set kernel SQ size, ret = %d.\n",
ret);
+
+ default_congest_type(hr_dev, hr_qp);
}
return ret;
diff --git a/drivers/infiniband/hw/hns/hns_roce_restrack.c b/drivers/infiniband/hw/hns/hns_roce_restrack.c
index f7f3c4cc7426..356d98816949 100644
--- a/drivers/infiniband/hw/hns/hns_roce_restrack.c
+++ b/drivers/infiniband/hw/hns/hns_roce_restrack.c
@@ -97,16 +97,33 @@ int hns_roce_fill_res_qp_entry_raw(struct sk_buff *msg, struct ib_qp *ib_qp)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_qp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp);
- struct hns_roce_v2_qp_context context;
+ struct hns_roce_full_qp_ctx {
+ struct hns_roce_v2_qp_context qpc;
+ struct hns_roce_v2_scc_context sccc;
+ } context = {};
int ret;
if (!hr_dev->hw->query_qpc)
return -EINVAL;
- ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context);
+ ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context.qpc);
if (ret)
- return -EINVAL;
+ return ret;
+
+ /* If SCC is disabled or the query fails, the queried SCCC will
+ * be all 0.
+ */
+ if (!(hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL) ||
+ !hr_dev->hw->query_sccc)
+ goto out;
+
+ ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc);
+ if (ret)
+ ibdev_warn_ratelimited(&hr_dev->ib_dev,
+ "failed to query SCCC, ret = %d.\n",
+ ret);
+out:
ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, sizeof(context), &context);
return ret;
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 0b046c061742..12704efb7b19 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -719,7 +719,6 @@ static int irdma_setup_kmode_qp(struct irdma_device *iwdev,
info->rq_pa + (ukinfo->rq_depth * IRDMA_QP_WQE_MIN_SIZE);
ukinfo->sq_size = ukinfo->sq_depth >> ukinfo->sq_shift;
ukinfo->rq_size = ukinfo->rq_depth >> ukinfo->rq_shift;
- ukinfo->qp_id = iwqp->ibqp.qp_num;
iwqp->max_send_wr = (ukinfo->sq_depth - IRDMA_SQ_RSVD) >> ukinfo->sq_shift;
iwqp->max_recv_wr = (ukinfo->rq_depth - IRDMA_RQ_RSVD) >> ukinfo->rq_shift;
@@ -944,7 +943,7 @@ static int irdma_create_qp(struct ib_qp *ibqp,
iwqp->host_ctx.size = IRDMA_QP_CTX_SIZE;
init_info.pd = &iwpd->sc_pd;
- init_info.qp_uk_init_info.qp_id = iwqp->ibqp.qp_num;
+ init_info.qp_uk_init_info.qp_id = qp_num;
if (!rdma_protocol_roce(&iwdev->ibdev, 1))
init_info.qp_uk_init_info.first_sq_wq = 1;
iwqp->ctx_info.qp_compl_ctx = (uintptr_t)qp;
diff --git a/drivers/infiniband/hw/mana/cq.c b/drivers/infiniband/hw/mana/cq.c
index 83ebd070535a..4a71e678d09c 100644
--- a/drivers/infiniband/hw/mana/cq.c
+++ b/drivers/infiniband/hw/mana/cq.c
@@ -16,7 +16,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev->gdma_dev->gdma_context;
+ gc = mdev_to_gc(mdev);
if (udata->inlen < sizeof(ucmd))
return -EINVAL;
@@ -48,7 +48,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return err;
}
- err = mana_ib_gd_create_dma_region(mdev, cq->umem, &cq->gdma_region);
+ err = mana_ib_create_zero_offset_dma_region(mdev, cq->umem, &cq->gdma_region);
if (err) {
ibdev_dbg(ibdev,
"Failed to create dma region for create cq, %d\n",
@@ -57,7 +57,7 @@ int mana_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
}
ibdev_dbg(ibdev,
- "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
+ "create_dma_region ret %d gdma_region 0x%llx\n",
err, cq->gdma_region);
/*
@@ -81,7 +81,7 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
int err;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev->gdma_dev->gdma_context;
+ gc = mdev_to_gc(mdev);
err = mana_ib_gd_destroy_dma_region(mdev, cq->gdma_region);
if (err) {
@@ -100,10 +100,29 @@ int mana_ib_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
return 0;
}
-void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
+static void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq)
{
struct mana_ib_cq *cq = ctx;
if (cq->ibcq.comp_handler)
cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
}
+
+int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq)
+{
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct gdma_queue *gdma_cq;
+
+ /* Create CQ table entry */
+ WARN_ON(gc->cq_table[cq->id]);
+ gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
+ if (!gdma_cq)
+ return -ENOMEM;
+
+ gdma_cq->cq.context = cq;
+ gdma_cq->type = GDMA_CQ;
+ gdma_cq->cq.callback = mana_ib_cq_handler;
+ gdma_cq->id = cq->id;
+ gc->cq_table[cq->id] = gdma_cq;
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index faca092456fa..71e33feee61b 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -8,13 +8,10 @@
void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
u32 port)
{
- struct gdma_dev *gd = &dev->gdma_dev->gdma_context->mana;
struct mana_port_context *mpc;
struct net_device *ndev;
- struct mana_context *mc;
- mc = gd->driver_data;
- ndev = mc->ports[port];
+ ndev = mana_ib_get_netdev(&dev->ib_dev, port);
mpc = netdev_priv(ndev);
mutex_lock(&pd->vport_mutex);
@@ -31,14 +28,11 @@ void mana_ib_uncfg_vport(struct mana_ib_dev *dev, struct mana_ib_pd *pd,
int mana_ib_cfg_vport(struct mana_ib_dev *dev, u32 port, struct mana_ib_pd *pd,
u32 doorbell_id)
{
- struct gdma_dev *mdev = &dev->gdma_dev->gdma_context->mana;
struct mana_port_context *mpc;
- struct mana_context *mc;
struct net_device *ndev;
int err;
- mc = mdev->driver_data;
- ndev = mc->ports[port];
+ ndev = mana_ib_get_netdev(&dev->ib_dev, port);
mpc = netdev_priv(ndev);
mutex_lock(&pd->vport_mutex);
@@ -79,17 +73,17 @@ int mana_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct gdma_create_pd_req req = {};
enum gdma_pd_flags flags = 0;
struct mana_ib_dev *dev;
- struct gdma_dev *mdev;
+ struct gdma_context *gc;
int err;
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- mdev = dev->gdma_dev;
+ gc = mdev_to_gc(dev);
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_PD, sizeof(req),
sizeof(resp));
req.flags = flags;
- err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req,
+ err = mana_gd_send_request(gc, sizeof(req), &req,
sizeof(resp), &resp);
if (err || resp.hdr.status) {
@@ -119,17 +113,17 @@ int mana_ib_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
struct gdma_destory_pd_resp resp = {};
struct gdma_destroy_pd_req req = {};
struct mana_ib_dev *dev;
- struct gdma_dev *mdev;
+ struct gdma_context *gc;
int err;
dev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- mdev = dev->gdma_dev;
+ gc = mdev_to_gc(dev);
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_PD, sizeof(req),
sizeof(resp));
req.pd_handle = pd->pd_handle;
- err = mana_gd_send_request(mdev->gdma_context, sizeof(req), &req,
+ err = mana_gd_send_request(gc, sizeof(req), &req,
sizeof(resp), &resp);
if (err || resp.hdr.status) {
@@ -206,13 +200,11 @@ int mana_ib_alloc_ucontext(struct ib_ucontext *ibcontext,
struct ib_device *ibdev = ibcontext->device;
struct mana_ib_dev *mdev;
struct gdma_context *gc;
- struct gdma_dev *dev;
int doorbell_page;
int ret;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- dev = mdev->gdma_dev;
- gc = dev->gdma_context;
+ gc = mdev_to_gc(mdev);
/* Allocate a doorbell page index */
ret = mana_gd_allocate_doorbell_page(gc, &doorbell_page);
@@ -238,7 +230,7 @@ void mana_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
int ret;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev->gdma_dev->gdma_context;
+ gc = mdev_to_gc(mdev);
ret = mana_gd_destroy_doorbell_page(gc, mana_ucontext->doorbell);
if (ret)
@@ -309,8 +301,8 @@ mana_ib_gd_add_dma_region(struct mana_ib_dev *dev, struct gdma_context *gc,
return 0;
}
-int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
- mana_handle_t *gdma_region)
+static int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, unsigned long page_sz)
{
struct gdma_dma_region_add_pages_req *add_req = NULL;
size_t num_pages_processed = 0, num_pages_to_handle;
@@ -322,23 +314,14 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
size_t max_pgs_create_cmd;
struct gdma_context *gc;
size_t num_pages_total;
- struct gdma_dev *mdev;
- unsigned long page_sz;
unsigned int tail = 0;
u64 *page_addr_list;
void *request_buf;
int err;
- mdev = dev->gdma_dev;
- gc = mdev->gdma_context;
+ gc = mdev_to_gc(dev);
hwc = gc->hwc.driver_data;
- /* Hardware requires dma region to align to chosen page size */
- page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, 0);
- if (!page_sz) {
- ibdev_dbg(&dev->ib_dev, "failed to find page size.\n");
- return -ENOMEM;
- }
num_pages_total = ib_umem_num_dma_blocks(umem, page_sz);
max_pgs_create_cmd =
@@ -358,7 +341,7 @@ int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
sizeof(struct gdma_create_dma_region_resp));
create_req->length = umem->length;
- create_req->offset_in_page = umem->address & (page_sz - 1);
+ create_req->offset_in_page = ib_umem_dma_offset(umem, page_sz);
create_req->gdma_page_type = order_base_2(page_sz) - PAGE_SHIFT;
create_req->page_count = num_pages_total;
@@ -424,12 +407,39 @@ out:
return err;
}
+int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, u64 virt)
+{
+ unsigned long page_sz;
+
+ page_sz = ib_umem_find_best_pgsz(umem, PAGE_SZ_BM, virt);
+ if (!page_sz) {
+ ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
+ return -EINVAL;
+ }
+
+ return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
+}
+
+int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region)
+{
+ unsigned long page_sz;
+
+ /* Hardware requires dma region to align to chosen page size */
+ page_sz = ib_umem_find_best_pgoff(umem, PAGE_SZ_BM, 0);
+ if (!page_sz) {
+ ibdev_dbg(&dev->ib_dev, "Failed to find page size.\n");
+ return -EINVAL;
+ }
+
+ return mana_ib_gd_create_dma_region(dev, umem, gdma_region, page_sz);
+}
+
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev, u64 gdma_region)
{
- struct gdma_dev *mdev = dev->gdma_dev;
- struct gdma_context *gc;
+ struct gdma_context *gc = mdev_to_gc(dev);
- gc = mdev->gdma_context;
ibdev_dbg(&dev->ib_dev, "destroy dma region 0x%llx\n", gdma_region);
return mana_gd_destroy_dma_region(gc, gdma_region);
@@ -447,7 +457,7 @@ int mana_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
int ret;
mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
- gc = mdev->gdma_dev->gdma_context;
+ gc = mdev_to_gc(mdev);
if (vma->vm_pgoff != 0) {
ibdev_dbg(ibdev, "Unexpected vm_pgoff %lu\n", vma->vm_pgoff);
@@ -531,7 +541,7 @@ int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *dev)
req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
req.hdr.dev_id = dev->gdma_dev->dev_id;
- err = mana_gd_send_request(dev->gdma_dev->gdma_context, sizeof(req),
+ err = mana_gd_send_request(mdev_to_gc(dev), sizeof(req),
&req, sizeof(resp), &resp);
if (err) {
diff --git a/drivers/infiniband/hw/mana/mana_ib.h b/drivers/infiniband/hw/mana/mana_ib.h
index 6bdc0f5498d5..f83390eebb7d 100644
--- a/drivers/infiniband/hw/mana/mana_ib.h
+++ b/drivers/infiniband/hw/mana/mana_ib.h
@@ -142,8 +142,29 @@ struct mana_ib_query_adapter_caps_resp {
u32 max_inline_data_size;
}; /* HW Data */
-int mana_ib_gd_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
- mana_handle_t *gdma_region);
+static inline struct gdma_context *mdev_to_gc(struct mana_ib_dev *mdev)
+{
+ return mdev->gdma_dev->gdma_context;
+}
+
+static inline struct net_device *mana_ib_get_netdev(struct ib_device *ibdev, u32 port)
+{
+ struct mana_ib_dev *mdev = container_of(ibdev, struct mana_ib_dev, ib_dev);
+ struct gdma_context *gc = mdev_to_gc(mdev);
+ struct mana_context *mc = gc->mana.driver_data;
+
+ if (port < 1 || port > mc->num_ports)
+ return NULL;
+ return mc->ports[port - 1];
+}
+
+int mana_ib_install_cq_cb(struct mana_ib_dev *mdev, struct mana_ib_cq *cq);
+
+int mana_ib_create_zero_offset_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region);
+
+int mana_ib_create_dma_region(struct mana_ib_dev *dev, struct ib_umem *umem,
+ mana_handle_t *gdma_region, u64 virt);
int mana_ib_gd_destroy_dma_region(struct mana_ib_dev *dev,
mana_handle_t gdma_region);
@@ -210,6 +231,4 @@ int mana_ib_query_gid(struct ib_device *ibdev, u32 port, int index,
void mana_ib_disassociate_ucontext(struct ib_ucontext *ibcontext);
int mana_ib_gd_query_adapter_caps(struct mana_ib_dev *mdev);
-
-void mana_ib_cq_handler(void *ctx, struct gdma_queue *gdma_cq);
#endif
diff --git a/drivers/infiniband/hw/mana/mr.c b/drivers/infiniband/hw/mana/mr.c
index 351207c60eb6..b70b13484f09 100644
--- a/drivers/infiniband/hw/mana/mr.c
+++ b/drivers/infiniband/hw/mana/mr.c
@@ -30,12 +30,9 @@ static int mana_ib_gd_create_mr(struct mana_ib_dev *dev, struct mana_ib_mr *mr,
{
struct gdma_create_mr_response resp = {};
struct gdma_create_mr_request req = {};
- struct gdma_dev *mdev = dev->gdma_dev;
- struct gdma_context *gc;
+ struct gdma_context *gc = mdev_to_gc(dev);
int err;
- gc = mdev->gdma_context;
-
mana_gd_init_req_hdr(&req.hdr, GDMA_CREATE_MR, sizeof(req),
sizeof(resp));
req.pd_handle = mr_params->pd_handle;
@@ -77,12 +74,9 @@ static int mana_ib_gd_destroy_mr(struct mana_ib_dev *dev, u64 mr_handle)
{
struct gdma_destroy_mr_response resp = {};
struct gdma_destroy_mr_request req = {};
- struct gdma_dev *mdev = dev->gdma_dev;
- struct gdma_context *gc;
+ struct gdma_context *gc = mdev_to_gc(dev);
int err;
- gc = mdev->gdma_context;
-
mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_MR, sizeof(req),
sizeof(resp));
@@ -133,7 +127,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
goto err_free;
}
- err = mana_ib_gd_create_dma_region(dev, mr->umem, &dma_region_handle);
+ err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
if (err) {
ibdev_dbg(ibdev, "Failed create dma region for user-mr, %d\n",
err);
@@ -141,7 +135,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
}
ibdev_dbg(ibdev,
- "mana_ib_gd_create_dma_region ret %d gdma_region %llx\n", err,
+ "create_dma_region ret %d gdma_region %llx\n", err,
dma_region_handle);
mr_params.pd_handle = pd->pd_handle;
@@ -164,8 +158,7 @@ struct ib_mr *mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length,
return &mr->ibmr;
err_dma_region:
- mana_gd_destroy_dma_region(dev->gdma_dev->gdma_context,
- dma_region_handle);
+ mana_gd_destroy_dma_region(mdev_to_gc(dev), dma_region_handle);
err_umem:
ib_umem_release(mr->umem);
diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c
index 21ac9fcadf3f..6e7627745c95 100644
--- a/drivers/infiniband/hw/mana/qp.c
+++ b/drivers/infiniband/hw/mana/qp.c
@@ -17,12 +17,10 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
struct mana_cfg_rx_steer_resp resp = {};
mana_handle_t *req_indir_tab;
struct gdma_context *gc;
- struct gdma_dev *mdev;
u32 req_buf_size;
int i, err;
- gc = dev->gdma_dev->gdma_context;
- mdev = &gc->mana;
+ gc = mdev_to_gc(dev);
req_buf_size =
sizeof(*req) + sizeof(mana_handle_t) * MANA_INDIRECT_TABLE_SIZE;
@@ -39,7 +37,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev,
req->rx_enable = 1;
req->update_default_rxobj = 1;
req->default_rxobj = default_rxobj;
- req->hdr.dev_id = mdev->dev_id;
+ req->hdr.dev_id = gc->mana.dev_id;
/* If there are more than 1 entries in indirection table, enable RSS */
if (log_ind_tbl_size)
@@ -99,20 +97,17 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
struct mana_ib_qp *qp = container_of(ibqp, struct mana_ib_qp, ibqp);
struct mana_ib_dev *mdev =
container_of(pd->device, struct mana_ib_dev, ib_dev);
+ struct gdma_context *gc = mdev_to_gc(mdev);
struct ib_rwq_ind_table *ind_tbl = attr->rwq_ind_tbl;
struct mana_ib_create_qp_rss_resp resp = {};
struct mana_ib_create_qp_rss ucmd = {};
struct gdma_queue **gdma_cq_allocated;
mana_handle_t *mana_ind_table;
struct mana_port_context *mpc;
- struct gdma_queue *gdma_cq;
unsigned int ind_tbl_size;
- struct mana_context *mc;
struct net_device *ndev;
- struct gdma_context *gc;
struct mana_ib_cq *cq;
struct mana_ib_wq *wq;
- struct gdma_dev *gd;
struct mana_eq *eq;
struct ib_cq *ibcq;
struct ib_wq *ibwq;
@@ -120,10 +115,6 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
u32 port;
int ret;
- gc = mdev->gdma_dev->gdma_context;
- gd = &gc->mana;
- mc = gd->driver_data;
-
if (!udata || udata->inlen < sizeof(ucmd))
return -EINVAL;
@@ -166,12 +157,12 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
/* IB ports start with 1, MANA start with 0 */
port = ucmd.port;
- if (port < 1 || port > mc->num_ports) {
+ ndev = mana_ib_get_netdev(pd->device, port);
+ if (!ndev) {
ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
port);
return -EINVAL;
}
- ndev = mc->ports[port - 1];
mpc = netdev_priv(ndev);
ibdev_dbg(&mdev->ib_dev, "rx_hash_function %d port %d\n",
@@ -209,7 +200,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
cq_spec.gdma_region = cq->gdma_region;
cq_spec.queue_size = cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
- eq = &mc->eqs[cq->comp_vector % gc->max_num_queues];
+ eq = &mpc->ac->eqs[cq->comp_vector % gc->max_num_queues];
cq_spec.attached_eq = eq->eq->id;
ret = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_RQ,
@@ -237,19 +228,11 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd,
mana_ind_table[i] = wq->rx_object;
/* Create CQ table entry */
- WARN_ON(gc->cq_table[cq->id]);
- gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
- if (!gdma_cq) {
- ret = -ENOMEM;
+ ret = mana_ib_install_cq_cb(mdev, cq);
+ if (ret)
goto fail;
- }
- gdma_cq_allocated[i] = gdma_cq;
- gdma_cq->cq.context = cq;
- gdma_cq->type = GDMA_CQ;
- gdma_cq->cq.callback = mana_ib_cq_handler;
- gdma_cq->id = cq->id;
- gc->cq_table[cq->id] = gdma_cq;
+ gdma_cq_allocated[i] = gc->cq_table[cq->id];
}
resp.num_entries = i;
@@ -306,14 +289,13 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
struct mana_ib_ucontext *mana_ucontext =
rdma_udata_to_drv_context(udata, struct mana_ib_ucontext,
ibucontext);
- struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
+ struct gdma_context *gc = mdev_to_gc(mdev);
struct mana_ib_create_qp_resp resp = {};
struct mana_ib_create_qp ucmd = {};
struct gdma_queue *gdma_cq = NULL;
struct mana_obj_spec wq_spec = {};
struct mana_obj_spec cq_spec = {};
struct mana_port_context *mpc;
- struct mana_context *mc;
struct net_device *ndev;
struct ib_umem *umem;
struct mana_eq *eq;
@@ -321,8 +303,6 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
u32 port;
int err;
- mc = gd->driver_data;
-
if (!mana_ucontext || udata->inlen < sizeof(ucmd))
return -EINVAL;
@@ -333,11 +313,6 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
return err;
}
- /* IB ports start with 1, MANA Ethernet ports start with 0 */
- port = ucmd.port;
- if (port < 1 || port > mc->num_ports)
- return -EINVAL;
-
if (attr->cap.max_send_wr > mdev->adapter_caps.max_qp_wr) {
ibdev_dbg(&mdev->ib_dev,
"Requested max_send_wr %d exceeding limit\n",
@@ -352,11 +327,17 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
return -EINVAL;
}
- ndev = mc->ports[port - 1];
+ port = ucmd.port;
+ ndev = mana_ib_get_netdev(ibpd->device, port);
+ if (!ndev) {
+ ibdev_dbg(&mdev->ib_dev, "Invalid port %u in creating qp\n",
+ port);
+ return -EINVAL;
+ }
mpc = netdev_priv(ndev);
ibdev_dbg(&mdev->ib_dev, "port %u ndev %p mpc %p\n", port, ndev, mpc);
- err = mana_ib_cfg_vport(mdev, port - 1, pd, mana_ucontext->doorbell);
+ err = mana_ib_cfg_vport(mdev, port, pd, mana_ucontext->doorbell);
if (err)
return -ENODEV;
@@ -376,8 +357,8 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
}
qp->sq_umem = umem;
- err = mana_ib_gd_create_dma_region(mdev, qp->sq_umem,
- &qp->sq_gdma_region);
+ err = mana_ib_create_zero_offset_dma_region(mdev, qp->sq_umem,
+ &qp->sq_gdma_region);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create dma region for create qp-raw, %d\n",
@@ -386,7 +367,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
}
ibdev_dbg(&mdev->ib_dev,
- "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
+ "create_dma_region ret %d gdma_region 0x%llx\n",
err, qp->sq_gdma_region);
/* Create a WQ on the same port handle used by the Ethernet */
@@ -396,8 +377,8 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
cq_spec.gdma_region = send_cq->gdma_region;
cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE;
cq_spec.modr_ctx_id = 0;
- eq_vec = send_cq->comp_vector % gd->gdma_context->max_num_queues;
- eq = &mc->eqs[eq_vec];
+ eq_vec = send_cq->comp_vector % gc->max_num_queues;
+ eq = &mpc->ac->eqs[eq_vec];
cq_spec.attached_eq = eq->eq->id;
err = mana_create_wq_obj(mpc, mpc->port_handle, GDMA_SQ, &wq_spec,
@@ -417,18 +398,9 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
send_cq->id = cq_spec.queue_index;
/* Create CQ table entry */
- WARN_ON(gd->gdma_context->cq_table[send_cq->id]);
- gdma_cq = kzalloc(sizeof(*gdma_cq), GFP_KERNEL);
- if (!gdma_cq) {
- err = -ENOMEM;
+ err = mana_ib_install_cq_cb(mdev, send_cq);
+ if (err)
goto err_destroy_wq_obj;
- }
-
- gdma_cq->cq.context = send_cq;
- gdma_cq->type = GDMA_CQ;
- gdma_cq->cq.callback = mana_ib_cq_handler;
- gdma_cq->id = send_cq->id;
- gd->gdma_context->cq_table[send_cq->id] = gdma_cq;
ibdev_dbg(&mdev->ib_dev,
"ret %d qp->tx_object 0x%llx sq id %llu cq id %llu\n", err,
@@ -450,7 +422,7 @@ static int mana_ib_create_qp_raw(struct ib_qp *ibqp, struct ib_pd *ibpd,
err_release_gdma_cq:
kfree(gdma_cq);
- gd->gdma_context->cq_table[send_cq->id] = NULL;
+ gc->cq_table[send_cq->id] = NULL;
err_destroy_wq_obj:
mana_destroy_wq_obj(mpc, GDMA_SQ, qp->tx_object);
@@ -462,7 +434,7 @@ err_release_umem:
ib_umem_release(umem);
err_free_vport:
- mana_ib_uncfg_vport(mdev, pd, port - 1);
+ mana_ib_uncfg_vport(mdev, pd, port);
return err;
}
@@ -500,16 +472,13 @@ static int mana_ib_destroy_qp_rss(struct mana_ib_qp *qp,
{
struct mana_ib_dev *mdev =
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
- struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
struct mana_port_context *mpc;
- struct mana_context *mc;
struct net_device *ndev;
struct mana_ib_wq *wq;
struct ib_wq *ibwq;
int i;
- mc = gd->driver_data;
- ndev = mc->ports[qp->port - 1];
+ ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
mpc = netdev_priv(ndev);
for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
@@ -527,15 +496,12 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
{
struct mana_ib_dev *mdev =
container_of(qp->ibqp.device, struct mana_ib_dev, ib_dev);
- struct gdma_dev *gd = &mdev->gdma_dev->gdma_context->mana;
struct ib_pd *ibpd = qp->ibqp.pd;
struct mana_port_context *mpc;
- struct mana_context *mc;
struct net_device *ndev;
struct mana_ib_pd *pd;
- mc = gd->driver_data;
- ndev = mc->ports[qp->port - 1];
+ ndev = mana_ib_get_netdev(qp->ibqp.device, qp->port);
mpc = netdev_priv(ndev);
pd = container_of(ibpd, struct mana_ib_pd, ibpd);
@@ -546,7 +512,7 @@ static int mana_ib_destroy_qp_raw(struct mana_ib_qp *qp, struct ib_udata *udata)
ib_umem_release(qp->sq_umem);
}
- mana_ib_uncfg_vport(mdev, pd, qp->port - 1);
+ mana_ib_uncfg_vport(mdev, pd, qp->port);
return 0;
}
diff --git a/drivers/infiniband/hw/mana/wq.c b/drivers/infiniband/hw/mana/wq.c
index 372d361510e0..7c9c69962573 100644
--- a/drivers/infiniband/hw/mana/wq.c
+++ b/drivers/infiniband/hw/mana/wq.c
@@ -46,7 +46,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
wq->wq_buf_size = ucmd.wq_buf_size;
wq->rx_object = INVALID_MANA_HANDLE;
- err = mana_ib_gd_create_dma_region(mdev, wq->umem, &wq->gdma_region);
+ err = mana_ib_create_zero_offset_dma_region(mdev, wq->umem, &wq->gdma_region);
if (err) {
ibdev_dbg(&mdev->ib_dev,
"Failed to create dma region for create wq, %d\n",
@@ -55,7 +55,7 @@ struct ib_wq *mana_ib_create_wq(struct ib_pd *pd,
}
ibdev_dbg(&mdev->ib_dev,
- "mana_ib_gd_create_dma_region ret %d gdma_region 0x%llx\n",
+ "create_dma_region ret %d gdma_region 0x%llx\n",
err, wq->gdma_region);
/* WQ ID is returned at wq_create time, doesn't know the value yet */
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index bbe79b86c717..a8de35c07c9e 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1377,7 +1377,6 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
struct ib_port_attr *props);
void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
u64 access_flags);
-void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
int mlx5_mkey_cache_init(struct mlx5_ib_dev *dev);
void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev);
diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
index 54c723a6edda..ae466e72fc43 100644
--- a/drivers/infiniband/sw/rxe/rxe.c
+++ b/drivers/infiniband/sw/rxe/rxe.c
@@ -160,8 +160,6 @@ void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
port->attr.active_mtu = mtu;
port->mtu_cap = ib_mtu_enum_to_int(mtu);
-
- rxe_info_dev(rxe, "Set mtu to %d", port->mtu_cap);
}
/* called by ifc layer to create new rxe device.
@@ -181,7 +179,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
int err = 0;
if (is_vlan_dev(ndev)) {
- rxe_err("rxe creation allowed on top of a real device only");
+ rxe_err("rxe creation allowed on top of a real device only\n");
err = -EPERM;
goto err;
}
@@ -189,7 +187,7 @@ static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
rxe = rxe_get_dev_from_net(ndev);
if (rxe) {
ib_device_put(&rxe->ib_dev);
- rxe_err_dev(rxe, "already configured on %s", ndev->name);
+ rxe_err_dev(rxe, "already configured on %s\n", ndev->name);
err = -EEXIST;
goto err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h
index d33dd6cf83d3..d8fb2c7af30a 100644
--- a/drivers/infiniband/sw/rxe/rxe.h
+++ b/drivers/infiniband/sw/rxe/rxe.h
@@ -38,7 +38,7 @@
#define RXE_ROCE_V2_SPORT (0xc000)
-#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt "\n", __func__, ##__VA_ARGS__)
+#define rxe_dbg(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
#define rxe_dbg_dev(rxe, fmt, ...) ibdev_dbg(&(rxe)->ib_dev, \
"%s: " fmt, __func__, ##__VA_ARGS__)
#define rxe_dbg_uc(uc, fmt, ...) ibdev_dbg((uc)->ibuc.device, \
@@ -58,7 +58,7 @@
#define rxe_dbg_mw(mw, fmt, ...) ibdev_dbg((mw)->ibmw.device, \
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
-#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt "\n", __func__, \
+#define rxe_err(fmt, ...) pr_err_ratelimited("%s: " fmt, __func__, \
##__VA_ARGS__)
#define rxe_err_dev(rxe, fmt, ...) ibdev_err_ratelimited(&(rxe)->ib_dev, \
"%s: " fmt, __func__, ##__VA_ARGS__)
@@ -79,7 +79,7 @@
#define rxe_err_mw(mw, fmt, ...) ibdev_err_ratelimited((mw)->ibmw.device, \
"mw#%d %s: " fmt, (mw)->elem.index, __func__, ##__VA_ARGS__)
-#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt "\n", __func__, \
+#define rxe_info(fmt, ...) pr_info_ratelimited("%s: " fmt, __func__, \
##__VA_ARGS__)
#define rxe_info_dev(rxe, fmt, ...) ibdev_info_ratelimited(&(rxe)->ib_dev, \
"%s: " fmt, __func__, ##__VA_ARGS__)
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index d0bdc2d8adc8..b78b8c0856ab 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -433,7 +433,7 @@ static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
}
} else {
if (wqe->status != IB_WC_WR_FLUSH_ERR)
- rxe_err_qp(qp, "non-flush error status = %d",
+ rxe_err_qp(qp, "non-flush error status = %d\n",
wqe->status);
}
}
@@ -582,7 +582,7 @@ static int flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
err = rxe_cq_post(qp->scq, &cqe, 0);
if (err)
- rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err);
+ rxe_dbg_cq(qp->scq, "post cq failed, err = %d\n", err);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index d5486cbb3f10..fec87c9030ab 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -27,7 +27,7 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
if (cq) {
count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (cqe < count) {
- rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)",
+ rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)\n",
cqe, count);
goto err1;
}
@@ -96,7 +96,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
- rxe_err_cq(cq, "queue full");
+ rxe_err_cq(cq, "queue full\n");
spin_unlock_irqrestore(&cq->cq_lock, flags);
if (cq->ibcq.event_handler) {
ev.device = cq->ibcq.device;
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index 4d2a8ef52c85..746110898a0e 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -59,7 +59,7 @@ int rxe_mmap(struct ib_ucontext *context, struct vm_area_struct *vma);
/* rxe_mr.c */
u8 rxe_get_next_key(u32 last_key);
void rxe_mr_init_dma(int access, struct rxe_mr *mr);
-int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
int access, struct rxe_mr *mr);
int rxe_mr_init_fast(int max_pages, struct rxe_mr *mr);
int rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index f54042e9aeb2..da3dee520876 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -34,7 +34,7 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
case IB_MR_TYPE_MEM_REG:
if (iova < mr->ibmr.iova ||
iova + length > mr->ibmr.iova + mr->ibmr.length) {
- rxe_dbg_mr(mr, "iova/length out of range");
+ rxe_dbg_mr(mr, "iova/length out of range\n");
return -EINVAL;
}
return 0;
@@ -126,7 +126,7 @@ static int rxe_mr_fill_pages_from_sgt(struct rxe_mr *mr, struct sg_table *sgt)
return xas_error(&xas);
}
-int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length, u64 iova,
+int rxe_mr_init_user(struct rxe_dev *rxe, u64 start, u64 length,
int access, struct rxe_mr *mr)
{
struct ib_umem *umem;
@@ -319,7 +319,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr,
err = mr_check_range(mr, iova, length);
if (unlikely(err)) {
- rxe_dbg_mr(mr, "iova out of range");
+ rxe_dbg_mr(mr, "iova out of range\n");
return err;
}
@@ -477,7 +477,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
u64 *va;
if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
- rxe_dbg_mr(mr, "mr not in valid state");
+ rxe_dbg_mr(mr, "mr not in valid state\n");
return RESPST_ERR_RKEY_VIOLATION;
}
@@ -490,7 +490,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
err = mr_check_range(mr, iova, sizeof(value));
if (err) {
- rxe_dbg_mr(mr, "iova out of range");
+ rxe_dbg_mr(mr, "iova out of range\n");
return RESPST_ERR_RKEY_VIOLATION;
}
page_offset = rxe_mr_iova_to_page_offset(mr, iova);
@@ -501,7 +501,7 @@ int rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode,
}
if (unlikely(page_offset & 0x7)) {
- rxe_dbg_mr(mr, "iova not aligned");
+ rxe_dbg_mr(mr, "iova not aligned\n");
return RESPST_ERR_MISALIGNED_ATOMIC;
}
@@ -534,7 +534,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
/* See IBA oA19-28 */
if (unlikely(mr->state != RXE_MR_STATE_VALID)) {
- rxe_dbg_mr(mr, "mr not in valid state");
+ rxe_dbg_mr(mr, "mr not in valid state\n");
return RESPST_ERR_RKEY_VIOLATION;
}
@@ -548,7 +548,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
/* See IBA oA19-28 */
err = mr_check_range(mr, iova, sizeof(value));
if (unlikely(err)) {
- rxe_dbg_mr(mr, "iova out of range");
+ rxe_dbg_mr(mr, "iova out of range\n");
return RESPST_ERR_RKEY_VIOLATION;
}
page_offset = rxe_mr_iova_to_page_offset(mr, iova);
@@ -560,7 +560,7 @@ int rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value)
/* See IBA A19.4.2 */
if (unlikely(page_offset & 0x7)) {
- rxe_dbg_mr(mr, "misaligned address");
+ rxe_dbg_mr(mr, "misaligned address\n");
return RESPST_ERR_MISALIGNED_ATOMIC;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index d9312b5c9d20..379e65bfcd49 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -198,7 +198,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
}
if (access & ~RXE_ACCESS_SUPPORTED_MW) {
- rxe_err_mw(mw, "access %#x not supported", access);
+ rxe_err_mw(mw, "access %#x not supported\n", access);
ret = -EOPNOTSUPP;
goto err_drop_mr;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 28e379c108bc..e3589c02013e 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -201,7 +201,7 @@ static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
QUEUE_TYPE_FROM_CLIENT);
if (!qp->sq.queue) {
- rxe_err_qp(qp, "Unable to allocate send queue");
+ rxe_err_qp(qp, "Unable to allocate send queue\n");
err = -ENOMEM;
goto err_out;
}
@@ -211,7 +211,7 @@ static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->sq.queue->buf, qp->sq.queue->buf_size,
&qp->sq.queue->ip);
if (err) {
- rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
goto err_free;
}
@@ -292,7 +292,7 @@ static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
QUEUE_TYPE_FROM_CLIENT);
if (!qp->rq.queue) {
- rxe_err_qp(qp, "Unable to allocate recv queue");
+ rxe_err_qp(qp, "Unable to allocate recv queue\n");
err = -ENOMEM;
goto err_out;
}
@@ -302,7 +302,7 @@ static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
qp->rq.queue->buf, qp->rq.queue->buf_size,
&qp->rq.queue->ip);
if (err) {
- rxe_err_qp(qp, "do_mmap_info failed, err = %d", err);
+ rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
goto err_free;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index da470a925efc..963382f625d7 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -362,18 +362,18 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
if ((pkt->mask & RXE_START_MASK) &&
(pkt->mask & RXE_END_MASK)) {
if (unlikely(payload > mtu)) {
- rxe_dbg_qp(qp, "only packet too long");
+ rxe_dbg_qp(qp, "only packet too long\n");
return RESPST_ERR_LENGTH;
}
} else if ((pkt->mask & RXE_START_MASK) ||
(pkt->mask & RXE_MIDDLE_MASK)) {
if (unlikely(payload != mtu)) {
- rxe_dbg_qp(qp, "first or middle packet not mtu");
+ rxe_dbg_qp(qp, "first or middle packet not mtu\n");
return RESPST_ERR_LENGTH;
}
} else if (pkt->mask & RXE_END_MASK) {
if (unlikely((payload == 0) || (payload > mtu))) {
- rxe_dbg_qp(qp, "last packet zero or too long");
+ rxe_dbg_qp(qp, "last packet zero or too long\n");
return RESPST_ERR_LENGTH;
}
}
@@ -382,7 +382,7 @@ static enum resp_states rxe_resp_check_length(struct rxe_qp *qp,
/* See IBA C9-94 */
if (pkt->mask & RXE_RETH_MASK) {
if (reth_len(pkt) > (1U << 31)) {
- rxe_dbg_qp(qp, "dma length too long");
+ rxe_dbg_qp(qp, "dma length too long\n");
return RESPST_ERR_LENGTH;
}
}
@@ -1133,7 +1133,7 @@ static enum resp_states do_complete(struct rxe_qp *qp,
}
} else {
if (wc->status != IB_WC_WR_FLUSH_ERR)
- rxe_err_qp(qp, "non-flush error status = %d",
+ rxe_err_qp(qp, "non-flush error status = %d\n",
wc->status);
}
@@ -1442,7 +1442,7 @@ static int flush_recv_wqe(struct rxe_qp *qp, struct rxe_recv_wqe *wqe)
err = rxe_cq_post(qp->rcq, &cqe, 0);
if (err)
- rxe_dbg_cq(qp->rcq, "post cq failed err = %d", err);
+ rxe_dbg_cq(qp->rcq, "post cq failed err = %d\n", err);
return err;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c
index 1501120d4f52..80332638d9e3 100644
--- a/drivers/infiniband/sw/rxe/rxe_task.c
+++ b/drivers/infiniband/sw/rxe/rxe_task.c
@@ -156,7 +156,7 @@ static void do_task(struct rxe_task *task)
default:
WARN_ON(1);
- rxe_dbg_qp(task->qp, "unexpected task state = %d",
+ rxe_dbg_qp(task->qp, "unexpected task state = %d\n",
task->state);
task->state = TASK_STATE_IDLE;
}
@@ -167,7 +167,7 @@ exit:
if (WARN_ON(task->num_done != task->num_sched))
rxe_dbg_qp(
task->qp,
- "%ld tasks scheduled, %ld tasks done",
+ "%ld tasks scheduled, %ld tasks done\n",
task->num_sched, task->num_done);
}
spin_unlock_irqrestore(&task->lock, flags);
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 48f86839d36a..614581989b38 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -23,7 +23,7 @@ static int rxe_query_device(struct ib_device *ibdev,
int err;
if (udata->inlen || udata->outlen) {
- rxe_dbg_dev(rxe, "malformed udata");
+ rxe_dbg_dev(rxe, "malformed udata\n");
err = -EINVAL;
goto err_out;
}
@@ -33,7 +33,7 @@ static int rxe_query_device(struct ib_device *ibdev,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -45,7 +45,7 @@ static int rxe_query_port(struct ib_device *ibdev,
if (port_num != 1) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
@@ -67,7 +67,7 @@ static int rxe_query_port(struct ib_device *ibdev,
return ret;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -79,7 +79,7 @@ static int rxe_query_pkey(struct ib_device *ibdev,
if (index != 0) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad pkey index = %d", index);
+ rxe_dbg_dev(rxe, "bad pkey index = %d\n", index);
goto err_out;
}
@@ -87,7 +87,7 @@ static int rxe_query_pkey(struct ib_device *ibdev,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -100,7 +100,7 @@ static int rxe_modify_device(struct ib_device *ibdev,
if (mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID |
IB_DEVICE_MODIFY_NODE_DESC)) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
+ rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
goto err_out;
}
@@ -115,7 +115,7 @@ static int rxe_modify_device(struct ib_device *ibdev,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -128,14 +128,14 @@ static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
if (port_num != 1) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
//TODO is shutdown useful
if (mask & ~(IB_PORT_RESET_QKEY_CNTR)) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "unsupported mask = 0x%x", mask);
+ rxe_dbg_dev(rxe, "unsupported mask = 0x%x\n", mask);
goto err_out;
}
@@ -149,7 +149,7 @@ static int rxe_modify_port(struct ib_device *ibdev, u32 port_num,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -161,14 +161,14 @@ static enum rdma_link_layer rxe_get_link_layer(struct ib_device *ibdev,
if (port_num != 1) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
return IB_LINK_LAYER_ETHERNET;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -181,7 +181,7 @@ static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
if (port_num != 1) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "bad port_num = %d", port_num);
+ rxe_dbg_dev(rxe, "bad port_num = %d\n", port_num);
goto err_out;
}
@@ -197,7 +197,7 @@ static int rxe_port_immutable(struct ib_device *ibdev, u32 port_num,
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -210,7 +210,7 @@ static int rxe_alloc_ucontext(struct ib_ucontext *ibuc, struct ib_udata *udata)
err = rxe_add_to_pool(&rxe->uc_pool, uc);
if (err)
- rxe_err_dev(rxe, "unable to create uc");
+ rxe_err_dev(rxe, "unable to create uc\n");
return err;
}
@@ -222,7 +222,7 @@ static void rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
err = rxe_cleanup(uc);
if (err)
- rxe_err_uc(uc, "cleanup failed, err = %d", err);
+ rxe_err_uc(uc, "cleanup failed, err = %d\n", err);
}
/* pd */
@@ -234,14 +234,14 @@ static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
err = rxe_add_to_pool(&rxe->pd_pool, pd);
if (err) {
- rxe_dbg_dev(rxe, "unable to alloc pd");
+ rxe_dbg_dev(rxe, "unable to alloc pd\n");
goto err_out;
}
return 0;
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -252,7 +252,7 @@ static int rxe_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
err = rxe_cleanup(pd);
if (err)
- rxe_err_pd(pd, "cleanup failed, err = %d", err);
+ rxe_err_pd(pd, "cleanup failed, err = %d\n", err);
return 0;
}
@@ -279,7 +279,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = rxe_add_to_pool_ah(&rxe->ah_pool, ah,
init_attr->flags & RDMA_CREATE_AH_SLEEPABLE);
if (err) {
- rxe_dbg_dev(rxe, "unable to create ah");
+ rxe_dbg_dev(rxe, "unable to create ah\n");
goto err_out;
}
@@ -288,7 +288,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
err = rxe_ah_chk_attr(ah, init_attr->ah_attr);
if (err) {
- rxe_dbg_ah(ah, "bad attr");
+ rxe_dbg_ah(ah, "bad attr\n");
goto err_cleanup;
}
@@ -298,7 +298,7 @@ static int rxe_create_ah(struct ib_ah *ibah,
sizeof(uresp->ah_num));
if (err) {
err = -EFAULT;
- rxe_dbg_ah(ah, "unable to copy to user");
+ rxe_dbg_ah(ah, "unable to copy to user\n");
goto err_cleanup;
}
} else if (ah->is_user) {
@@ -314,9 +314,9 @@ static int rxe_create_ah(struct ib_ah *ibah,
err_cleanup:
cleanup_err = rxe_cleanup(ah);
if (cleanup_err)
- rxe_err_ah(ah, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_ah(ah, "cleanup failed, err = %d\n", cleanup_err);
err_out:
- rxe_err_ah(ah, "returned err = %d", err);
+ rxe_err_ah(ah, "returned err = %d\n", err);
return err;
}
@@ -327,7 +327,7 @@ static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
err = rxe_ah_chk_attr(ah, attr);
if (err) {
- rxe_dbg_ah(ah, "bad attr");
+ rxe_dbg_ah(ah, "bad attr\n");
goto err_out;
}
@@ -336,7 +336,7 @@ static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
return 0;
err_out:
- rxe_err_ah(ah, "returned err = %d", err);
+ rxe_err_ah(ah, "returned err = %d\n", err);
return err;
}
@@ -358,7 +358,7 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
err = rxe_cleanup_ah(ah, flags & RDMA_DESTROY_AH_SLEEPABLE);
if (err)
- rxe_err_ah(ah, "cleanup failed, err = %d", err);
+ rxe_err_ah(ah, "cleanup failed, err = %d\n", err);
return 0;
}
@@ -376,7 +376,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (udata) {
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
- rxe_err_dev(rxe, "malformed udata");
+ rxe_err_dev(rxe, "malformed udata\n");
goto err_out;
}
uresp = udata->outbuf;
@@ -384,20 +384,20 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (init->srq_type != IB_SRQT_BASIC) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "srq type = %d, not supported",
+ rxe_dbg_dev(rxe, "srq type = %d, not supported\n",
init->srq_type);
goto err_out;
}
err = rxe_srq_chk_init(rxe, init);
if (err) {
- rxe_dbg_dev(rxe, "invalid init attributes");
+ rxe_dbg_dev(rxe, "invalid init attributes\n");
goto err_out;
}
err = rxe_add_to_pool(&rxe->srq_pool, srq);
if (err) {
- rxe_dbg_dev(rxe, "unable to create srq, err = %d", err);
+ rxe_dbg_dev(rxe, "unable to create srq, err = %d\n", err);
goto err_out;
}
@@ -406,7 +406,7 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
err = rxe_srq_from_init(rxe, srq, init, udata, uresp);
if (err) {
- rxe_dbg_srq(srq, "create srq failed, err = %d", err);
+ rxe_dbg_srq(srq, "create srq failed, err = %d\n", err);
goto err_cleanup;
}
@@ -415,9 +415,9 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
err_cleanup:
cleanup_err = rxe_cleanup(srq);
if (cleanup_err)
- rxe_err_srq(srq, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_srq(srq, "cleanup failed, err = %d\n", cleanup_err);
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -433,34 +433,34 @@ static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
if (udata) {
if (udata->inlen < sizeof(cmd)) {
err = -EINVAL;
- rxe_dbg_srq(srq, "malformed udata");
+ rxe_dbg_srq(srq, "malformed udata\n");
goto err_out;
}
err = ib_copy_from_udata(&cmd, udata, sizeof(cmd));
if (err) {
err = -EFAULT;
- rxe_dbg_srq(srq, "unable to read udata");
+ rxe_dbg_srq(srq, "unable to read udata\n");
goto err_out;
}
}
err = rxe_srq_chk_attr(rxe, srq, attr, mask);
if (err) {
- rxe_dbg_srq(srq, "bad init attributes");
+ rxe_dbg_srq(srq, "bad init attributes\n");
goto err_out;
}
err = rxe_srq_from_attr(rxe, srq, attr, mask, &cmd, udata);
if (err) {
- rxe_dbg_srq(srq, "bad attr");
+ rxe_dbg_srq(srq, "bad attr\n");
goto err_out;
}
return 0;
err_out:
- rxe_err_srq(srq, "returned err = %d", err);
+ rxe_err_srq(srq, "returned err = %d\n", err);
return err;
}
@@ -471,7 +471,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
if (srq->error) {
err = -EINVAL;
- rxe_dbg_srq(srq, "srq in error state");
+ rxe_dbg_srq(srq, "srq in error state\n");
goto err_out;
}
@@ -481,7 +481,7 @@ static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
return 0;
err_out:
- rxe_err_srq(srq, "returned err = %d", err);
+ rxe_err_srq(srq, "returned err = %d\n", err);
return err;
}
@@ -505,7 +505,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
if (err) {
*bad_wr = wr;
- rxe_err_srq(srq, "returned err = %d", err);
+ rxe_err_srq(srq, "returned err = %d\n", err);
}
return err;
@@ -518,7 +518,7 @@ static int rxe_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
err = rxe_cleanup(srq);
if (err)
- rxe_err_srq(srq, "cleanup failed, err = %d", err);
+ rxe_err_srq(srq, "cleanup failed, err = %d\n", err);
return 0;
}
@@ -536,13 +536,13 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (udata) {
if (udata->inlen) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
+ rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
goto err_out;
}
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
+ rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
goto err_out;
}
@@ -554,25 +554,25 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
if (init->create_flags) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "unsupported create_flags, err = %d", err);
+ rxe_dbg_dev(rxe, "unsupported create_flags, err = %d\n", err);
goto err_out;
}
err = rxe_qp_chk_init(rxe, init);
if (err) {
- rxe_dbg_dev(rxe, "bad init attr, err = %d", err);
+ rxe_dbg_dev(rxe, "bad init attr, err = %d\n", err);
goto err_out;
}
err = rxe_add_to_pool(&rxe->qp_pool, qp);
if (err) {
- rxe_dbg_dev(rxe, "unable to create qp, err = %d", err);
+ rxe_dbg_dev(rxe, "unable to create qp, err = %d\n", err);
goto err_out;
}
err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibqp->pd, udata);
if (err) {
- rxe_dbg_qp(qp, "create qp failed, err = %d", err);
+ rxe_dbg_qp(qp, "create qp failed, err = %d\n", err);
goto err_cleanup;
}
@@ -582,9 +582,9 @@ static int rxe_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init,
err_cleanup:
cleanup_err = rxe_cleanup(qp);
if (cleanup_err)
- rxe_err_qp(qp, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_qp(qp, "cleanup failed, err = %d\n", cleanup_err);
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -597,20 +597,20 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (mask & ~IB_QP_ATTR_STANDARD_BITS) {
err = -EOPNOTSUPP;
- rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d",
+ rxe_dbg_qp(qp, "unsupported mask = 0x%x, err = %d\n",
mask, err);
goto err_out;
}
err = rxe_qp_chk_attr(rxe, qp, attr, mask);
if (err) {
- rxe_dbg_qp(qp, "bad mask/attr, err = %d", err);
+ rxe_dbg_qp(qp, "bad mask/attr, err = %d\n", err);
goto err_out;
}
err = rxe_qp_from_attr(qp, attr, mask, udata);
if (err) {
- rxe_dbg_qp(qp, "modify qp failed, err = %d", err);
+ rxe_dbg_qp(qp, "modify qp failed, err = %d\n", err);
goto err_out;
}
@@ -622,7 +622,7 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return 0;
err_out:
- rxe_err_qp(qp, "returned err = %d", err);
+ rxe_err_qp(qp, "returned err = %d\n", err);
return err;
}
@@ -644,18 +644,18 @@ static int rxe_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
err = rxe_qp_chk_destroy(qp);
if (err) {
- rxe_dbg_qp(qp, "unable to destroy qp, err = %d", err);
+ rxe_dbg_qp(qp, "unable to destroy qp, err = %d\n", err);
goto err_out;
}
err = rxe_cleanup(qp);
if (err)
- rxe_err_qp(qp, "cleanup failed, err = %d", err);
+ rxe_err_qp(qp, "cleanup failed, err = %d\n", err);
return 0;
err_out:
- rxe_err_qp(qp, "returned err = %d", err);
+ rxe_err_qp(qp, "returned err = %d\n", err);
return err;
}
@@ -675,12 +675,12 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
do {
mask = wr_opcode_mask(ibwr->opcode, qp);
if (!mask) {
- rxe_err_qp(qp, "bad wr opcode for qp type");
+ rxe_err_qp(qp, "bad wr opcode for qp type\n");
break;
}
if (num_sge > sq->max_sge) {
- rxe_err_qp(qp, "num_sge > max_sge");
+ rxe_err_qp(qp, "num_sge > max_sge\n");
break;
}
@@ -689,27 +689,27 @@ static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
length += ibwr->sg_list[i].length;
if (length > (1UL << 31)) {
- rxe_err_qp(qp, "message length too long");
+ rxe_err_qp(qp, "message length too long\n");
break;
}
if (mask & WR_ATOMIC_MASK) {
if (length != 8) {
- rxe_err_qp(qp, "atomic length != 8");
+ rxe_err_qp(qp, "atomic length != 8\n");
break;
}
if (atomic_wr(ibwr)->remote_addr & 0x7) {
- rxe_err_qp(qp, "misaligned atomic address");
+ rxe_err_qp(qp, "misaligned atomic address\n");
break;
}
}
if (ibwr->send_flags & IB_SEND_INLINE) {
if (!(mask & WR_INLINE_MASK)) {
- rxe_err_qp(qp, "opcode doesn't support inline data");
+ rxe_err_qp(qp, "opcode doesn't support inline data\n");
break;
}
if (length > sq->max_inline) {
- rxe_err_qp(qp, "inline length too big");
+ rxe_err_qp(qp, "inline length too big\n");
break;
}
}
@@ -747,7 +747,7 @@ static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
case IB_WR_SEND:
break;
default:
- rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP",
+ rxe_err_qp(qp, "bad wr opcode %d for UD/GSI QP\n",
wr->opcode);
return -EINVAL;
}
@@ -795,7 +795,7 @@ static int init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
case IB_WR_ATOMIC_WRITE:
break;
default:
- rxe_err_qp(qp, "unsupported wr opcode %d",
+ rxe_err_qp(qp, "unsupported wr opcode %d\n",
wr->opcode);
return -EINVAL;
}
@@ -870,7 +870,7 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr)
full = queue_full(sq->queue, QUEUE_TYPE_FROM_ULP);
if (unlikely(full)) {
- rxe_err_qp(qp, "send queue full");
+ rxe_err_qp(qp, "send queue full\n");
return -ENOMEM;
}
@@ -922,14 +922,14 @@ static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
/* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) {
spin_unlock_irqrestore(&qp->state_lock, flags);
- rxe_err_qp(qp, "qp has been destroyed");
+ rxe_err_qp(qp, "qp has been destroyed\n");
return -EINVAL;
}
if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr;
- rxe_err_qp(qp, "qp not ready to send");
+ rxe_err_qp(qp, "qp not ready to send\n");
return -EINVAL;
}
spin_unlock_irqrestore(&qp->state_lock, flags);
@@ -959,13 +959,13 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
full = queue_full(rq->queue, QUEUE_TYPE_FROM_ULP);
if (unlikely(full)) {
err = -ENOMEM;
- rxe_dbg("queue full");
+ rxe_dbg("queue full\n");
goto err_out;
}
if (unlikely(num_sge > rq->max_sge)) {
err = -EINVAL;
- rxe_dbg("bad num_sge > max_sge");
+ rxe_dbg("bad num_sge > max_sge\n");
goto err_out;
}
@@ -976,7 +976,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
/* IBA max message size is 2^31 */
if (length >= (1UL<<31)) {
err = -EINVAL;
- rxe_dbg("message length too long");
+ rxe_dbg("message length too long\n");
goto err_out;
}
@@ -996,7 +996,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
return 0;
err_out:
- rxe_dbg("returned err = %d", err);
+ rxe_dbg("returned err = %d\n", err);
return err;
}
@@ -1012,7 +1012,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
/* caller has already called destroy_qp */
if (WARN_ON_ONCE(!qp->valid)) {
spin_unlock_irqrestore(&qp->state_lock, flags);
- rxe_err_qp(qp, "qp has been destroyed");
+ rxe_err_qp(qp, "qp has been destroyed\n");
return -EINVAL;
}
@@ -1020,14 +1020,14 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
if (unlikely((qp_state(qp) < IB_QPS_INIT))) {
spin_unlock_irqrestore(&qp->state_lock, flags);
*bad_wr = wr;
- rxe_dbg_qp(qp, "qp not ready to post recv");
+ rxe_dbg_qp(qp, "qp not ready to post recv\n");
return -EINVAL;
}
spin_unlock_irqrestore(&qp->state_lock, flags);
if (unlikely(qp->srq)) {
*bad_wr = wr;
- rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead");
+ rxe_dbg_qp(qp, "qp has srq, use post_srq_recv instead\n");
return -EINVAL;
}
@@ -1065,7 +1065,7 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (udata) {
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
- rxe_dbg_dev(rxe, "malformed udata, err = %d", err);
+ rxe_dbg_dev(rxe, "malformed udata, err = %d\n", err);
goto err_out;
}
uresp = udata->outbuf;
@@ -1073,26 +1073,26 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
if (attr->flags) {
err = -EOPNOTSUPP;
- rxe_dbg_dev(rxe, "bad attr->flags, err = %d", err);
+ rxe_dbg_dev(rxe, "bad attr->flags, err = %d\n", err);
goto err_out;
}
err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
if (err) {
- rxe_dbg_dev(rxe, "bad init attributes, err = %d", err);
+ rxe_dbg_dev(rxe, "bad init attributes, err = %d\n", err);
goto err_out;
}
err = rxe_add_to_pool(&rxe->cq_pool, cq);
if (err) {
- rxe_dbg_dev(rxe, "unable to create cq, err = %d", err);
+ rxe_dbg_dev(rxe, "unable to create cq, err = %d\n", err);
goto err_out;
}
err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector, udata,
uresp);
if (err) {
- rxe_dbg_cq(cq, "create cq failed, err = %d", err);
+ rxe_dbg_cq(cq, "create cq failed, err = %d\n", err);
goto err_cleanup;
}
@@ -1101,9 +1101,9 @@ static int rxe_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
err_cleanup:
cleanup_err = rxe_cleanup(cq);
if (cleanup_err)
- rxe_err_cq(cq, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_cq(cq, "cleanup failed, err = %d\n", cleanup_err);
err_out:
- rxe_err_dev(rxe, "returned err = %d", err);
+ rxe_err_dev(rxe, "returned err = %d\n", err);
return err;
}
@@ -1117,7 +1117,7 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
if (udata) {
if (udata->outlen < sizeof(*uresp)) {
err = -EINVAL;
- rxe_dbg_cq(cq, "malformed udata");
+ rxe_dbg_cq(cq, "malformed udata\n");
goto err_out;
}
uresp = udata->outbuf;
@@ -1125,20 +1125,20 @@ static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
if (err) {
- rxe_dbg_cq(cq, "bad attr, err = %d", err);
+ rxe_dbg_cq(cq, "bad attr, err = %d\n", err);
goto err_out;
}
err = rxe_cq_resize_queue(cq, cqe, uresp, udata);
if (err) {
- rxe_dbg_cq(cq, "resize cq failed, err = %d", err);
+ rxe_dbg_cq(cq, "resize cq failed, err = %d\n", err);
goto err_out;
}
return 0;
err_out:
- rxe_err_cq(cq, "returned err = %d", err);
+ rxe_err_cq(cq, "returned err = %d\n", err);
return err;
}
@@ -1202,18 +1202,18 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
*/
if (atomic_read(&cq->num_wq)) {
err = -EINVAL;
- rxe_dbg_cq(cq, "still in use");
+ rxe_dbg_cq(cq, "still in use\n");
goto err_out;
}
err = rxe_cleanup(cq);
if (err)
- rxe_err_cq(cq, "cleanup failed, err = %d", err);
+ rxe_err_cq(cq, "cleanup failed, err = %d\n", err);
return 0;
err_out:
- rxe_err_cq(cq, "returned err = %d", err);
+ rxe_err_cq(cq, "returned err = %d\n", err);
return err;
}
@@ -1231,7 +1231,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
err = rxe_add_to_pool(&rxe->mr_pool, mr);
if (err) {
- rxe_dbg_dev(rxe, "unable to create mr");
+ rxe_dbg_dev(rxe, "unable to create mr\n");
goto err_free;
}
@@ -1245,7 +1245,7 @@ static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
err_free:
kfree(mr);
- rxe_err_pd(pd, "returned err = %d", err);
+ rxe_err_pd(pd, "returned err = %d\n", err);
return ERR_PTR(err);
}
@@ -1259,7 +1259,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
int err, cleanup_err;
if (access & ~RXE_ACCESS_SUPPORTED_MR) {
- rxe_err_pd(pd, "access = %#x not supported (%#x)", access,
+ rxe_err_pd(pd, "access = %#x not supported (%#x)\n", access,
RXE_ACCESS_SUPPORTED_MR);
return ERR_PTR(-EOPNOTSUPP);
}
@@ -1270,7 +1270,7 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
err = rxe_add_to_pool(&rxe->mr_pool, mr);
if (err) {
- rxe_dbg_pd(pd, "unable to create mr");
+ rxe_dbg_pd(pd, "unable to create mr\n");
goto err_free;
}
@@ -1278,9 +1278,9 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
mr->ibmr.pd = ibpd;
mr->ibmr.device = ibpd->device;
- err = rxe_mr_init_user(rxe, start, length, iova, access, mr);
+ err = rxe_mr_init_user(rxe, start, length, access, mr);
if (err) {
- rxe_dbg_mr(mr, "reg_user_mr failed, err = %d", err);
+ rxe_dbg_mr(mr, "reg_user_mr failed, err = %d\n", err);
goto err_cleanup;
}
@@ -1290,10 +1290,10 @@ static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd, u64 start,
err_cleanup:
cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
- rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
err_free:
kfree(mr);
- rxe_err_pd(pd, "returned err = %d", err);
+ rxe_err_pd(pd, "returned err = %d\n", err);
return ERR_PTR(err);
}
@@ -1310,7 +1310,7 @@ static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags,
* rereg_pd and rereg_access
*/
if (flags & ~RXE_MR_REREG_SUPPORTED) {
- rxe_err_mr(mr, "flags = %#x not supported", flags);
+ rxe_err_mr(mr, "flags = %#x not supported\n", flags);
return ERR_PTR(-EOPNOTSUPP);
}
@@ -1322,7 +1322,7 @@ static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags,
if (flags & IB_MR_REREG_ACCESS) {
if (access & ~RXE_ACCESS_SUPPORTED_MR) {
- rxe_err_mr(mr, "access = %#x not supported", access);
+ rxe_err_mr(mr, "access = %#x not supported\n", access);
return ERR_PTR(-EOPNOTSUPP);
}
mr->access = access;
@@ -1341,7 +1341,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
if (mr_type != IB_MR_TYPE_MEM_REG) {
err = -EINVAL;
- rxe_dbg_pd(pd, "mr type %d not supported, err = %d",
+ rxe_dbg_pd(pd, "mr type %d not supported, err = %d\n",
mr_type, err);
goto err_out;
}
@@ -1360,7 +1360,7 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
err = rxe_mr_init_fast(max_num_sg, mr);
if (err) {
- rxe_dbg_mr(mr, "alloc_mr failed, err = %d", err);
+ rxe_dbg_mr(mr, "alloc_mr failed, err = %d\n", err);
goto err_cleanup;
}
@@ -1370,11 +1370,11 @@ static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
err_cleanup:
cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
- rxe_err_mr(mr, "cleanup failed, err = %d", err);
+ rxe_err_mr(mr, "cleanup failed, err = %d\n", err);
err_free:
kfree(mr);
err_out:
- rxe_err_pd(pd, "returned err = %d", err);
+ rxe_err_pd(pd, "returned err = %d\n", err);
return ERR_PTR(err);
}
@@ -1386,19 +1386,19 @@ static int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
/* See IBA 10.6.7.2.6 */
if (atomic_read(&mr->num_mw) > 0) {
err = -EINVAL;
- rxe_dbg_mr(mr, "mr has mw's bound");
+ rxe_dbg_mr(mr, "mr has mw's bound\n");
goto err_out;
}
cleanup_err = rxe_cleanup(mr);
if (cleanup_err)
- rxe_err_mr(mr, "cleanup failed, err = %d", cleanup_err);
+ rxe_err_mr(mr, "cleanup failed, err = %d\n", cleanup_err);
kfree_rcu_mightsleep(mr);
return 0;
err_out:
- rxe_err_mr(mr, "returned err = %d", err);
+ rxe_err_mr(mr, "returned err = %d\n", err);
return err;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 319d4288eddd..8a4ab9ff0a68 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -287,8 +287,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
ah = ipoib_create_ah(dev, priv->pd, &av);
if (IS_ERR(ah)) {
- ipoib_warn(priv, "ib_address_create failed %ld\n",
- -PTR_ERR(ah));
+ ipoib_warn(priv, "ib_address_create failed %pe\n", ah);
/* use original error */
return PTR_ERR(ah);
}
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index d3c436ead694..4aa80c9388f0 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -133,7 +133,7 @@ static ssize_t mpath_policy_store(struct device *dev,
/* distinguish "mi" and "min-latency" with length */
len = strnlen(buf, NAME_MAX);
- if (buf[len - 1] == '\n')
+ if (len && buf[len - 1] == '\n')
len--;
if (!strncasecmp(buf, "round-robin", 11) ||
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 040234c01be4..9632afbd727b 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3209,7 +3209,6 @@ static int srpt_add_one(struct ib_device *device)
INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
srpt_event_handler);
- ib_register_event_handler(&sdev->event_handler);
for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
sport = &sdev->port[i - 1];
@@ -3232,6 +3231,7 @@ static int srpt_add_one(struct ib_device *device)
}
}
+ ib_register_event_handler(&sdev->event_handler);
spin_lock(&srpt_dev_lock);
list_add_tail(&sdev->list, &srpt_dev_list);
spin_unlock(&srpt_dev_lock);
@@ -3242,7 +3242,6 @@ static int srpt_add_one(struct ib_device *device)
err_port:
srpt_unregister_mad_agent(sdev, i);
- ib_unregister_event_handler(&sdev->event_handler);
err_cm:
if (sdev->cm_id)
ib_destroy_cm_id(sdev->cm_id);
diff --git a/drivers/input/keyboard/amikbd.c b/drivers/input/keyboard/amikbd.c
index e305c44cd0aa..ecfae0b0b6aa 100644
--- a/drivers/input/keyboard/amikbd.c
+++ b/drivers/input/keyboard/amikbd.c
@@ -26,7 +26,7 @@ MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
MODULE_DESCRIPTION("Amiga keyboard driver");
MODULE_LICENSE("GPL");
-#ifdef CONFIG_HW_CONSOLE
+#ifdef CONFIG_VT
static unsigned char amikbd_keycode[0x78] __initdata = {
[0] = KEY_GRAVE,
[1] = KEY_1,
@@ -148,9 +148,9 @@ static void __init amikbd_init_console_keymaps(void)
memcpy(key_maps[i], temp_map, sizeof(temp_map));
}
}
-#else /* !CONFIG_HW_CONSOLE */
+#else /* !CONFIG_VT */
static inline void amikbd_init_console_keymaps(void) {}
-#endif /* !CONFIG_HW_CONSOLE */
+#endif /* !CONFIG_VT */
static const char *amikbd_messages[8] = {
[0] = KERN_ALERT "amikbd: Ctrl-Amiga-Amiga reset warning!!\n",
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 50bac2d79d9b..5d1010cafed8 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -343,7 +343,7 @@ EXPORT_SYMBOL_GPL(icc_std_aggregate);
* an array of icc nodes specified in the icc_onecell_data struct when
* registering the provider.
*/
-struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
+struct icc_node *of_icc_xlate_onecell(const struct of_phandle_args *spec,
void *data)
{
struct icc_onecell_data *icc_data = data;
@@ -368,7 +368,7 @@ EXPORT_SYMBOL_GPL(of_icc_xlate_onecell);
* Returns a valid pointer to struct icc_node_data on success or ERR_PTR()
* on failure.
*/
-struct icc_node_data *of_icc_get_from_provider(struct of_phandle_args *spec)
+struct icc_node_data *of_icc_get_from_provider(const struct of_phandle_args *spec)
{
struct icc_node *node = ERR_PTR(-EPROBE_DEFER);
struct icc_node_data *data = NULL;
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 697f96c49f6f..1446a839184e 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -8,6 +8,15 @@ config INTERCONNECT_QCOM
config INTERCONNECT_QCOM_BCM_VOTER
tristate
+config INTERCONNECT_QCOM_MSM8909
+ tristate "Qualcomm MSM8909 interconnect driver"
+ depends on INTERCONNECT_QCOM
+ depends on QCOM_SMD_RPM
+ select INTERCONNECT_QCOM_SMD_RPM
+ help
+ This is a driver for the Qualcomm Network-on-Chip on msm8909-based
+ platforms.
+
config INTERCONNECT_QCOM_MSM8916
tristate "Qualcomm MSM8916 interconnect driver"
depends on INTERCONNECT_QCOM
@@ -209,6 +218,15 @@ config INTERCONNECT_QCOM_SM6350
This is a driver for the Qualcomm Network-on-Chip on sm6350-based
platforms.
+config INTERCONNECT_QCOM_SM7150
+ tristate "Qualcomm SM7150 interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on sm7150-based
+ platforms.
+
config INTERCONNECT_QCOM_SM8150
tristate "Qualcomm SM8150 interconnect driver"
depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index 704846165022..2ea3113d0a4d 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_INTERCONNECT_QCOM) += interconnect_qcom.o
interconnect_qcom-y := icc-common.o
icc-bcm-voter-objs := bcm-voter.o
+qnoc-msm8909-objs := msm8909.o
qnoc-msm8916-objs := msm8916.o
qnoc-msm8939-objs := msm8939.o
qnoc-msm8974-objs := msm8974.o
@@ -26,6 +27,7 @@ qnoc-sdx65-objs := sdx65.o
qnoc-sdx75-objs := sdx75.o
qnoc-sm6115-objs := sm6115.o
qnoc-sm6350-objs := sm6350.o
+qnoc-sm7150-objs := sm7150.o
qnoc-sm8150-objs := sm8150.o
qnoc-sm8250-objs := sm8250.o
qnoc-sm8350-objs := sm8350.o
@@ -36,6 +38,7 @@ qnoc-x1e80100-objs := x1e80100.o
icc-smd-rpm-objs := smd-rpm.o icc-rpm.o icc-rpm-clocks.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
+obj-$(CONFIG_INTERCONNECT_QCOM_MSM8909) += qnoc-msm8909.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8939) += qnoc-msm8939.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8974) += qnoc-msm8974.o
@@ -58,6 +61,7 @@ obj-$(CONFIG_INTERCONNECT_QCOM_SDX65) += qnoc-sdx65.o
obj-$(CONFIG_INTERCONNECT_QCOM_SDX75) += qnoc-sdx75.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM6115) += qnoc-sm6115.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM6350) += qnoc-sm6350.o
+obj-$(CONFIG_INTERCONNECT_QCOM_SM7150) += qnoc-sm7150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o
obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o
diff --git a/drivers/interconnect/qcom/icc-common.c b/drivers/interconnect/qcom/icc-common.c
index f27f4fdc4531..9b9ee113f172 100644
--- a/drivers/interconnect/qcom/icc-common.c
+++ b/drivers/interconnect/qcom/icc-common.c
@@ -9,7 +9,8 @@
#include "icc-common.h"
-struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+struct icc_node_data *qcom_icc_xlate_extended(const struct of_phandle_args *spec,
+ void *data)
{
struct icc_node_data *ndata;
struct icc_node *node;
diff --git a/drivers/interconnect/qcom/icc-common.h b/drivers/interconnect/qcom/icc-common.h
index 33bb2c38dff3..21c39b163948 100644
--- a/drivers/interconnect/qcom/icc-common.h
+++ b/drivers/interconnect/qcom/icc-common.h
@@ -8,6 +8,7 @@
#include <linux/interconnect-provider.h>
-struct icc_node_data *qcom_icc_xlate_extended(struct of_phandle_args *spec, void *data);
+struct icc_node_data *qcom_icc_xlate_extended(const struct of_phandle_args *spec,
+ void *data);
#endif
diff --git a/drivers/interconnect/qcom/msm8909.c b/drivers/interconnect/qcom/msm8909.c
new file mode 100644
index 000000000000..0d0cd7282f5b
--- /dev/null
+++ b/drivers/interconnect/qcom/msm8909.c
@@ -0,0 +1,1329 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Based on data from msm8909-bus.dtsi in Qualcomm's msm-3.18 release:
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/interconnect/qcom,msm8909.h>
+
+#include "icc-rpm.h"
+
+enum {
+ QNOC_MASTER_AMPSS_M0 = 1,
+ QNOC_MASTER_GRAPHICS_3D,
+ QNOC_SNOC_BIMC_0_MAS,
+ QNOC_SNOC_BIMC_1_MAS,
+ QNOC_MASTER_TCU_0,
+ QNOC_MASTER_TCU_1,
+ QNOC_MASTER_AUDIO,
+ QNOC_MASTER_SPDM,
+ QNOC_MASTER_DEHR,
+ QNOC_MASTER_QPIC,
+ QNOC_MASTER_BLSP_1,
+ QNOC_MASTER_USB_HS,
+ QNOC_MASTER_CRYPTO_CORE0,
+ QNOC_MASTER_SDCC_1,
+ QNOC_MASTER_SDCC_2,
+ QNOC_SNOC_PNOC_MAS,
+ QNOC_MASTER_QDSS_BAM,
+ QNOC_BIMC_SNOC_MAS,
+ QNOC_MASTER_MDP_PORT0,
+ QNOC_PNOC_SNOC_MAS,
+ QNOC_MASTER_VIDEO_P0,
+ QNOC_MASTER_VFE,
+ QNOC_MASTER_QDSS_ETR,
+ QNOC_PNOC_M_0,
+ QNOC_PNOC_M_1,
+ QNOC_PNOC_INT_0,
+ QNOC_PNOC_INT_1,
+ QNOC_PNOC_SLV_0,
+ QNOC_PNOC_SLV_1,
+ QNOC_PNOC_SLV_2,
+ QNOC_PNOC_SLV_3,
+ QNOC_PNOC_SLV_4,
+ QNOC_PNOC_SLV_5,
+ QNOC_PNOC_SLV_7,
+ QNOC_SNOC_MM_INT_0,
+ QNOC_SNOC_MM_INT_1,
+ QNOC_SNOC_MM_INT_2,
+ QNOC_SNOC_MM_INT_BIMC,
+ QNOC_SNOC_QDSS_INT,
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_INT_BIMC,
+ QNOC_SLAVE_EBI_CH0,
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_TCSR,
+ QNOC_SLAVE_SDCC_1,
+ QNOC_SLAVE_BLSP_1,
+ QNOC_SLAVE_CRYPTO_0_CFG,
+ QNOC_SLAVE_MESSAGE_RAM,
+ QNOC_SLAVE_PDM,
+ QNOC_SLAVE_PRNG,
+ QNOC_SLAVE_USB_HS,
+ QNOC_SLAVE_QPIC,
+ QNOC_SLAVE_SPDM,
+ QNOC_SLAVE_SDCC_2,
+ QNOC_SLAVE_AUDIO,
+ QNOC_SLAVE_DEHR_CFG,
+ QNOC_SLAVE_SNOC_CFG,
+ QNOC_SLAVE_QDSS_CFG,
+ QNOC_SLAVE_USB_PHYS_CFG,
+ QNOC_SLAVE_CAMERA_CFG,
+ QNOC_SLAVE_DISPLAY_CFG,
+ QNOC_SLAVE_VENUS_CFG,
+ QNOC_SLAVE_TLMM,
+ QNOC_SLAVE_GRAPHICS_3D_CFG,
+ QNOC_SLAVE_IMEM_CFG,
+ QNOC_SLAVE_BIMC_CFG,
+ QNOC_SLAVE_PMIC_ARB,
+ QNOC_SLAVE_TCU,
+ QNOC_PNOC_SNOC_SLV,
+ QNOC_SLAVE_APPSS,
+ QNOC_SNOC_BIMC_0_SLV,
+ QNOC_SNOC_BIMC_1_SLV,
+ QNOC_SLAVE_SYSTEM_IMEM,
+ QNOC_SNOC_PNOC_SLV,
+ QNOC_SLAVE_QDSS_STM,
+ QNOC_SLAVE_CATS_128,
+ QNOC_SLAVE_OCMEM_64,
+};
+
+static const u16 mas_apps_proc_links[] = {
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_apps_proc = {
+ .name = "mas_apps_proc",
+ .id = QNOC_MASTER_AMPSS_M0,
+ .buswidth = 8,
+ .mas_rpm_id = 0,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_apps_proc_links),
+ .links = mas_apps_proc_links,
+};
+
+static const u16 mas_oxili_links[] = {
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_oxili = {
+ .name = "mas_oxili",
+ .id = QNOC_MASTER_GRAPHICS_3D,
+ .buswidth = 8,
+ .mas_rpm_id = 6,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 2,
+ .num_links = ARRAY_SIZE(mas_oxili_links),
+ .links = mas_oxili_links,
+};
+
+static const u16 mas_snoc_bimc_0_links[] = {
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_snoc_bimc_0 = {
+ .name = "mas_snoc_bimc_0",
+ .id = QNOC_SNOC_BIMC_0_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 3,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 3,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_0_links),
+ .links = mas_snoc_bimc_0_links,
+};
+
+static const u16 mas_snoc_bimc_1_links[] = {
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_snoc_bimc_1 = {
+ .name = "mas_snoc_bimc_1",
+ .id = QNOC_SNOC_BIMC_1_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 76,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 4,
+ .num_links = ARRAY_SIZE(mas_snoc_bimc_1_links),
+ .links = mas_snoc_bimc_1_links,
+};
+
+static const u16 mas_tcu_0_links[] = {
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_tcu_0 = {
+ .name = "mas_tcu_0",
+ .id = QNOC_MASTER_TCU_0,
+ .buswidth = 8,
+ .mas_rpm_id = 102,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 2,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_tcu_0_links),
+ .links = mas_tcu_0_links,
+};
+
+static const u16 mas_tcu_1_links[] = {
+ QNOC_BIMC_SNOC_SLV,
+ QNOC_SLAVE_EBI_CH0
+};
+
+static struct qcom_icc_node mas_tcu_1 = {
+ .name = "mas_tcu_1",
+ .id = QNOC_MASTER_TCU_1,
+ .buswidth = 8,
+ .mas_rpm_id = 103,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 2,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(mas_tcu_1_links),
+ .links = mas_tcu_1_links,
+};
+
+static const u16 mas_audio_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_audio = {
+ .name = "mas_audio",
+ .id = QNOC_MASTER_AUDIO,
+ .buswidth = 4,
+ .mas_rpm_id = 78,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_audio_links),
+ .links = mas_audio_links,
+};
+
+static const u16 mas_spdm_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_spdm = {
+ .name = "mas_spdm",
+ .id = QNOC_MASTER_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = 50,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_spdm_links),
+ .links = mas_spdm_links,
+};
+
+static const u16 mas_dehr_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_dehr = {
+ .name = "mas_dehr",
+ .id = QNOC_MASTER_DEHR,
+ .buswidth = 4,
+ .mas_rpm_id = 48,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_dehr_links),
+ .links = mas_dehr_links,
+};
+
+static const u16 mas_qpic_links[] = {
+ QNOC_PNOC_M_0
+};
+
+static struct qcom_icc_node mas_qpic = {
+ .name = "mas_qpic",
+ .id = QNOC_MASTER_QPIC,
+ .buswidth = 4,
+ .mas_rpm_id = 58,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_qpic_links),
+ .links = mas_qpic_links,
+};
+
+static const u16 mas_blsp_1_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_blsp_1 = {
+ .name = "mas_blsp_1",
+ .id = QNOC_MASTER_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = 41,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_blsp_1_links),
+ .links = mas_blsp_1_links,
+};
+
+static const u16 mas_usb_hs_links[] = {
+ QNOC_PNOC_M_1
+};
+
+static struct qcom_icc_node mas_usb_hs = {
+ .name = "mas_usb_hs",
+ .id = QNOC_MASTER_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = 42,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_usb_hs_links),
+ .links = mas_usb_hs_links,
+};
+
+static const u16 mas_crypto_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_crypto = {
+ .name = "mas_crypto",
+ .id = QNOC_MASTER_CRYPTO_CORE0,
+ .buswidth = 8,
+ .mas_rpm_id = 23,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 0,
+ .num_links = ARRAY_SIZE(mas_crypto_links),
+ .links = mas_crypto_links,
+};
+
+static const u16 mas_sdcc_1_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_sdcc_1 = {
+ .name = "mas_sdcc_1",
+ .id = QNOC_MASTER_SDCC_1,
+ .buswidth = 8,
+ .mas_rpm_id = 33,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_sdcc_1_links),
+ .links = mas_sdcc_1_links,
+};
+
+static const u16 mas_sdcc_2_links[] = {
+ QNOC_PNOC_INT_1
+};
+
+static struct qcom_icc_node mas_sdcc_2 = {
+ .name = "mas_sdcc_2",
+ .id = QNOC_MASTER_SDCC_2,
+ .buswidth = 8,
+ .mas_rpm_id = 35,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_sdcc_2_links),
+ .links = mas_sdcc_2_links,
+};
+
+static const u16 mas_snoc_pcnoc_links[] = {
+ QNOC_PNOC_INT_0
+};
+
+static struct qcom_icc_node mas_snoc_pcnoc = {
+ .name = "mas_snoc_pcnoc",
+ .id = QNOC_SNOC_PNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 77,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_snoc_pcnoc_links),
+ .links = mas_snoc_pcnoc_links,
+};
+
+static const u16 mas_qdss_bam_links[] = {
+ QNOC_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_bam = {
+ .name = "mas_qdss_bam",
+ .id = QNOC_MASTER_QDSS_BAM,
+ .buswidth = 4,
+ .mas_rpm_id = 19,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 11,
+ .num_links = ARRAY_SIZE(mas_qdss_bam_links),
+ .links = mas_qdss_bam_links,
+};
+
+static const u16 mas_bimc_snoc_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1
+};
+
+static struct qcom_icc_node mas_bimc_snoc = {
+ .name = "mas_bimc_snoc",
+ .id = QNOC_BIMC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 21,
+ .slv_rpm_id = -1,
+ .num_links = ARRAY_SIZE(mas_bimc_snoc_links),
+ .links = mas_bimc_snoc_links,
+};
+
+static const u16 mas_mdp_links[] = {
+ QNOC_SNOC_MM_INT_1,
+ QNOC_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_mdp = {
+ .name = "mas_mdp",
+ .id = QNOC_MASTER_MDP_PORT0,
+ .buswidth = 16,
+ .mas_rpm_id = 8,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 7,
+ .num_links = ARRAY_SIZE(mas_mdp_links),
+ .links = mas_mdp_links,
+ .ab_coeff = 167,
+};
+
+static const u16 mas_pcnoc_snoc_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_1,
+ QNOC_SNOC_INT_BIMC
+};
+
+static struct qcom_icc_node mas_pcnoc_snoc = {
+ .name = "mas_pcnoc_snoc",
+ .id = QNOC_PNOC_SNOC_MAS,
+ .buswidth = 8,
+ .mas_rpm_id = 29,
+ .slv_rpm_id = -1,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(mas_pcnoc_snoc_links),
+ .links = mas_pcnoc_snoc_links,
+};
+
+static const u16 mas_venus_links[] = {
+ QNOC_SNOC_MM_INT_0,
+ QNOC_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_venus = {
+ .name = "mas_venus",
+ .id = QNOC_MASTER_VIDEO_P0,
+ .buswidth = 16,
+ .mas_rpm_id = 9,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 8,
+ .num_links = ARRAY_SIZE(mas_venus_links),
+ .links = mas_venus_links,
+ .ab_coeff = 167,
+};
+
+static const u16 mas_vfe_links[] = {
+ QNOC_SNOC_MM_INT_1,
+ QNOC_SNOC_MM_INT_2
+};
+
+static struct qcom_icc_node mas_vfe = {
+ .name = "mas_vfe",
+ .id = QNOC_MASTER_VFE,
+ .buswidth = 16,
+ .mas_rpm_id = 11,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 9,
+ .num_links = ARRAY_SIZE(mas_vfe_links),
+ .links = mas_vfe_links,
+ .ab_coeff = 167,
+};
+
+static const u16 mas_qdss_etr_links[] = {
+ QNOC_SNOC_QDSS_INT
+};
+
+static struct qcom_icc_node mas_qdss_etr = {
+ .name = "mas_qdss_etr",
+ .id = QNOC_MASTER_QDSS_ETR,
+ .buswidth = 8,
+ .mas_rpm_id = 31,
+ .slv_rpm_id = -1,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 1,
+ .qos.prio_level = 1,
+ .qos.qos_port = 10,
+ .num_links = ARRAY_SIZE(mas_qdss_etr_links),
+ .links = mas_qdss_etr_links,
+};
+
+static const u16 pcnoc_m_0_links[] = {
+ QNOC_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node pcnoc_m_0 = {
+ .name = "pcnoc_m_0",
+ .id = QNOC_PNOC_M_0,
+ .buswidth = 8,
+ .mas_rpm_id = 87,
+ .slv_rpm_id = 116,
+ .qos.qos_mode = NOC_QOS_MODE_BYPASS,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 5,
+ .num_links = ARRAY_SIZE(pcnoc_m_0_links),
+ .links = pcnoc_m_0_links,
+};
+
+static const u16 pcnoc_m_1_links[] = {
+ QNOC_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node pcnoc_m_1 = {
+ .name = "pcnoc_m_1",
+ .id = QNOC_PNOC_M_1,
+ .buswidth = 8,
+ .mas_rpm_id = 88,
+ .slv_rpm_id = 117,
+ .qos.qos_mode = NOC_QOS_MODE_FIXED,
+ .qos.areq_prio = 0,
+ .qos.prio_level = 0,
+ .qos.qos_port = 6,
+ .num_links = ARRAY_SIZE(pcnoc_m_1_links),
+ .links = pcnoc_m_1_links,
+};
+
+static const u16 pcnoc_int_0_links[] = {
+ QNOC_PNOC_SLV_3,
+ QNOC_PNOC_SLV_2,
+ QNOC_PNOC_SLV_1,
+ QNOC_PNOC_SLV_0,
+ QNOC_PNOC_SLV_7,
+ QNOC_PNOC_SLV_5,
+ QNOC_PNOC_SLV_4,
+ QNOC_SLAVE_TCU
+};
+
+static struct qcom_icc_node pcnoc_int_0 = {
+ .name = "pcnoc_int_0",
+ .id = QNOC_PNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 85,
+ .slv_rpm_id = 114,
+ .num_links = ARRAY_SIZE(pcnoc_int_0_links),
+ .links = pcnoc_int_0_links,
+};
+
+static const u16 pcnoc_int_1_links[] = {
+ QNOC_PNOC_SNOC_SLV
+};
+
+static struct qcom_icc_node pcnoc_int_1 = {
+ .name = "pcnoc_int_1",
+ .id = QNOC_PNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = 86,
+ .slv_rpm_id = 115,
+ .num_links = ARRAY_SIZE(pcnoc_int_1_links),
+ .links = pcnoc_int_1_links,
+};
+
+static const u16 pcnoc_s_0_links[] = {
+ QNOC_SLAVE_SDCC_1,
+ QNOC_SLAVE_TCSR,
+ QNOC_SLAVE_BLSP_1
+};
+
+static struct qcom_icc_node pcnoc_s_0 = {
+ .name = "pcnoc_s_0",
+ .id = QNOC_PNOC_SLV_0,
+ .buswidth = 4,
+ .mas_rpm_id = 89,
+ .slv_rpm_id = 118,
+ .num_links = ARRAY_SIZE(pcnoc_s_0_links),
+ .links = pcnoc_s_0_links,
+};
+
+static const u16 pcnoc_s_1_links[] = {
+ QNOC_SLAVE_MESSAGE_RAM,
+ QNOC_SLAVE_CRYPTO_0_CFG,
+ QNOC_SLAVE_USB_HS,
+ QNOC_SLAVE_PDM,
+ QNOC_SLAVE_PRNG,
+ QNOC_SLAVE_QPIC
+};
+
+static struct qcom_icc_node pcnoc_s_1 = {
+ .name = "pcnoc_s_1",
+ .id = QNOC_PNOC_SLV_1,
+ .buswidth = 4,
+ .mas_rpm_id = 90,
+ .slv_rpm_id = 119,
+ .num_links = ARRAY_SIZE(pcnoc_s_1_links),
+ .links = pcnoc_s_1_links,
+};
+
+static const u16 pcnoc_s_2_links[] = {
+ QNOC_SLAVE_SPDM,
+ QNOC_SLAVE_SDCC_2,
+ QNOC_SLAVE_AUDIO,
+ QNOC_SLAVE_DEHR_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_2 = {
+ .name = "pcnoc_s_2",
+ .id = QNOC_PNOC_SLV_2,
+ .buswidth = 4,
+ .mas_rpm_id = 91,
+ .slv_rpm_id = 120,
+ .num_links = ARRAY_SIZE(pcnoc_s_2_links),
+ .links = pcnoc_s_2_links,
+};
+
+static const u16 pcnoc_s_3_links[] = {
+ QNOC_SLAVE_QDSS_CFG,
+ QNOC_SLAVE_USB_PHYS_CFG,
+ QNOC_SLAVE_SNOC_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_3 = {
+ .name = "pcnoc_s_3",
+ .id = QNOC_PNOC_SLV_3,
+ .buswidth = 4,
+ .mas_rpm_id = 92,
+ .slv_rpm_id = 121,
+ .num_links = ARRAY_SIZE(pcnoc_s_3_links),
+ .links = pcnoc_s_3_links,
+};
+
+static const u16 pcnoc_s_4_links[] = {
+ QNOC_SLAVE_CAMERA_CFG,
+ QNOC_SLAVE_DISPLAY_CFG,
+ QNOC_SLAVE_VENUS_CFG
+};
+
+static struct qcom_icc_node pcnoc_s_4 = {
+ .name = "pcnoc_s_4",
+ .id = QNOC_PNOC_SLV_4,
+ .buswidth = 4,
+ .mas_rpm_id = 93,
+ .slv_rpm_id = 122,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(pcnoc_s_4_links),
+ .links = pcnoc_s_4_links,
+};
+
+static const u16 pcnoc_s_5_links[] = {
+ QNOC_SLAVE_TLMM
+};
+
+static struct qcom_icc_node pcnoc_s_5 = {
+ .name = "pcnoc_s_5",
+ .id = QNOC_PNOC_SLV_5,
+ .buswidth = 4,
+ .mas_rpm_id = 129,
+ .slv_rpm_id = 189,
+ .num_links = ARRAY_SIZE(pcnoc_s_5_links),
+ .links = pcnoc_s_5_links,
+};
+
+static const u16 pcnoc_s_7_links[] = {
+ QNOC_SLAVE_GRAPHICS_3D_CFG,
+ QNOC_SLAVE_IMEM_CFG,
+ QNOC_SLAVE_BIMC_CFG,
+ QNOC_SLAVE_PMIC_ARB
+};
+
+static struct qcom_icc_node pcnoc_s_7 = {
+ .name = "pcnoc_s_7",
+ .id = QNOC_PNOC_SLV_7,
+ .buswidth = 4,
+ .mas_rpm_id = 95,
+ .slv_rpm_id = 124,
+ .num_links = ARRAY_SIZE(pcnoc_s_7_links),
+ .links = pcnoc_s_7_links,
+};
+
+static const u16 mm_int_0_links[] = {
+ QNOC_SNOC_MM_INT_BIMC
+};
+
+static struct qcom_icc_node mm_int_0 = {
+ .name = "mm_int_0",
+ .id = QNOC_SNOC_MM_INT_0,
+ .buswidth = 16,
+ .mas_rpm_id = 79,
+ .slv_rpm_id = 108,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_0_links),
+ .links = mm_int_0_links,
+ .ab_coeff = 167,
+};
+
+static const u16 mm_int_1_links[] = {
+ QNOC_SNOC_MM_INT_BIMC
+};
+
+static struct qcom_icc_node mm_int_1 = {
+ .name = "mm_int_1",
+ .id = QNOC_SNOC_MM_INT_1,
+ .buswidth = 16,
+ .mas_rpm_id = 80,
+ .slv_rpm_id = 109,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_1_links),
+ .links = mm_int_1_links,
+ .ab_coeff = 167,
+};
+
+static const u16 mm_int_2_links[] = {
+ QNOC_SNOC_INT_0
+};
+
+static struct qcom_icc_node mm_int_2 = {
+ .name = "mm_int_2",
+ .id = QNOC_SNOC_MM_INT_2,
+ .buswidth = 16,
+ .mas_rpm_id = 81,
+ .slv_rpm_id = 110,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_2_links),
+ .links = mm_int_2_links,
+ .ab_coeff = 167,
+};
+
+static const u16 mm_int_bimc_links[] = {
+ QNOC_SNOC_BIMC_1_SLV
+};
+
+static struct qcom_icc_node mm_int_bimc = {
+ .name = "mm_int_bimc",
+ .id = QNOC_SNOC_MM_INT_BIMC,
+ .buswidth = 16,
+ .mas_rpm_id = 82,
+ .slv_rpm_id = 111,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(mm_int_bimc_links),
+ .links = mm_int_bimc_links,
+ .ab_coeff = 167,
+};
+
+static const u16 qdss_int_links[] = {
+ QNOC_SNOC_INT_0,
+ QNOC_SNOC_INT_BIMC
+};
+
+static struct qcom_icc_node qdss_int = {
+ .name = "qdss_int",
+ .id = QNOC_SNOC_QDSS_INT,
+ .buswidth = 8,
+ .mas_rpm_id = 98,
+ .slv_rpm_id = 128,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(qdss_int_links),
+ .links = qdss_int_links,
+};
+
+static const u16 snoc_int_0_links[] = {
+ QNOC_SLAVE_SYSTEM_IMEM,
+ QNOC_SLAVE_QDSS_STM,
+ QNOC_SNOC_PNOC_SLV
+};
+
+static struct qcom_icc_node snoc_int_0 = {
+ .name = "snoc_int_0",
+ .id = QNOC_SNOC_INT_0,
+ .buswidth = 8,
+ .mas_rpm_id = 99,
+ .slv_rpm_id = 130,
+ .num_links = ARRAY_SIZE(snoc_int_0_links),
+ .links = snoc_int_0_links,
+};
+
+static const u16 snoc_int_1_links[] = {
+ QNOC_SLAVE_CATS_128,
+ QNOC_SLAVE_APPSS,
+ QNOC_SLAVE_OCMEM_64
+};
+
+static struct qcom_icc_node snoc_int_1 = {
+ .name = "snoc_int_1",
+ .id = QNOC_SNOC_INT_1,
+ .buswidth = 8,
+ .mas_rpm_id = 100,
+ .slv_rpm_id = 131,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(snoc_int_1_links),
+ .links = snoc_int_1_links,
+};
+
+static const u16 snoc_int_bimc_links[] = {
+ QNOC_SNOC_BIMC_0_SLV
+};
+
+static struct qcom_icc_node snoc_int_bimc = {
+ .name = "snoc_int_bimc",
+ .id = QNOC_SNOC_INT_BIMC,
+ .buswidth = 8,
+ .mas_rpm_id = 101,
+ .slv_rpm_id = 132,
+ .num_links = ARRAY_SIZE(snoc_int_bimc_links),
+ .links = snoc_int_bimc_links,
+};
+
+static struct qcom_icc_node slv_ebi = {
+ .name = "slv_ebi",
+ .id = QNOC_SLAVE_EBI_CH0,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 0,
+};
+
+static const u16 slv_bimc_snoc_links[] = {
+ QNOC_BIMC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_bimc_snoc = {
+ .name = "slv_bimc_snoc",
+ .id = QNOC_BIMC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 2,
+ .num_links = ARRAY_SIZE(slv_bimc_snoc_links),
+ .links = slv_bimc_snoc_links,
+};
+
+static struct qcom_icc_node slv_tcsr = {
+ .name = "slv_tcsr",
+ .id = QNOC_SLAVE_TCSR,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 50,
+};
+
+static struct qcom_icc_node slv_sdcc_1 = {
+ .name = "slv_sdcc_1",
+ .id = QNOC_SLAVE_SDCC_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 31,
+};
+
+static struct qcom_icc_node slv_blsp_1 = {
+ .name = "slv_blsp_1",
+ .id = QNOC_SLAVE_BLSP_1,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 39,
+};
+
+static struct qcom_icc_node slv_crypto_0_cfg = {
+ .name = "slv_crypto_0_cfg",
+ .id = QNOC_SLAVE_CRYPTO_0_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 52,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_message_ram = {
+ .name = "slv_message_ram",
+ .id = QNOC_SLAVE_MESSAGE_RAM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 55,
+};
+
+static struct qcom_icc_node slv_pdm = {
+ .name = "slv_pdm",
+ .id = QNOC_SLAVE_PDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 41,
+};
+
+static struct qcom_icc_node slv_prng = {
+ .name = "slv_prng",
+ .id = QNOC_SLAVE_PRNG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 44,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_usb_hs = {
+ .name = "slv_usb_hs",
+ .id = QNOC_SLAVE_USB_HS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 40,
+};
+
+static struct qcom_icc_node slv_qpic = {
+ .name = "slv_qpic",
+ .id = QNOC_SLAVE_QPIC,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 80,
+};
+
+static struct qcom_icc_node slv_spdm = {
+ .name = "slv_spdm",
+ .id = QNOC_SLAVE_SPDM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 60,
+};
+
+static struct qcom_icc_node slv_sdcc_2 = {
+ .name = "slv_sdcc_2",
+ .id = QNOC_SLAVE_SDCC_2,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 33,
+};
+
+static struct qcom_icc_node slv_audio = {
+ .name = "slv_audio",
+ .id = QNOC_SLAVE_AUDIO,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 105,
+};
+
+static struct qcom_icc_node slv_dehr_cfg = {
+ .name = "slv_dehr_cfg",
+ .id = QNOC_SLAVE_DEHR_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 61,
+};
+
+static struct qcom_icc_node slv_snoc_cfg = {
+ .name = "slv_snoc_cfg",
+ .id = QNOC_SLAVE_SNOC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 70,
+};
+
+static struct qcom_icc_node slv_qdss_cfg = {
+ .name = "slv_qdss_cfg",
+ .id = QNOC_SLAVE_QDSS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 63,
+};
+
+static struct qcom_icc_node slv_usb_phy = {
+ .name = "slv_usb_phy",
+ .id = QNOC_SLAVE_USB_PHYS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 95,
+};
+
+static struct qcom_icc_node slv_camera_ss_cfg = {
+ .name = "slv_camera_ss_cfg",
+ .id = QNOC_SLAVE_CAMERA_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 3,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_disp_ss_cfg = {
+ .name = "slv_disp_ss_cfg",
+ .id = QNOC_SLAVE_DISPLAY_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 4,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_venus_cfg = {
+ .name = "slv_venus_cfg",
+ .id = QNOC_SLAVE_VENUS_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 10,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_tlmm = {
+ .name = "slv_tlmm",
+ .id = QNOC_SLAVE_TLMM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 51,
+};
+
+static struct qcom_icc_node slv_gpu_cfg = {
+ .name = "slv_gpu_cfg",
+ .id = QNOC_SLAVE_GRAPHICS_3D_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 11,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_imem_cfg = {
+ .name = "slv_imem_cfg",
+ .id = QNOC_SLAVE_IMEM_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 54,
+};
+
+static struct qcom_icc_node slv_bimc_cfg = {
+ .name = "slv_bimc_cfg",
+ .id = QNOC_SLAVE_BIMC_CFG,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 56,
+};
+
+static struct qcom_icc_node slv_pmic_arb = {
+ .name = "slv_pmic_arb",
+ .id = QNOC_SLAVE_PMIC_ARB,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 59,
+};
+
+static struct qcom_icc_node slv_tcu = {
+ .name = "slv_tcu",
+ .id = QNOC_SLAVE_TCU,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 133,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_pcnoc_snoc_links[] = {
+ QNOC_PNOC_SNOC_MAS
+};
+
+static struct qcom_icc_node slv_pcnoc_snoc = {
+ .name = "slv_pcnoc_snoc",
+ .id = QNOC_PNOC_SNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 45,
+ .num_links = ARRAY_SIZE(slv_pcnoc_snoc_links),
+ .links = slv_pcnoc_snoc_links,
+};
+
+static struct qcom_icc_node slv_kpss_ahb = {
+ .name = "slv_kpss_ahb",
+ .id = QNOC_SLAVE_APPSS,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 20,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static const u16 slv_snoc_bimc_0_links[] = {
+ QNOC_SNOC_BIMC_0_MAS
+};
+
+static struct qcom_icc_node slv_snoc_bimc_0 = {
+ .name = "slv_snoc_bimc_0",
+ .id = QNOC_SNOC_BIMC_0_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 24,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_0_links),
+ .links = slv_snoc_bimc_0_links,
+};
+
+static const u16 slv_snoc_bimc_1_links[] = {
+ QNOC_SNOC_BIMC_1_MAS
+};
+
+static struct qcom_icc_node slv_snoc_bimc_1 = {
+ .name = "slv_snoc_bimc_1",
+ .id = QNOC_SNOC_BIMC_1_SLV,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 104,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+ .num_links = ARRAY_SIZE(slv_snoc_bimc_1_links),
+ .links = slv_snoc_bimc_1_links,
+};
+
+static struct qcom_icc_node slv_imem = {
+ .name = "slv_imem",
+ .id = QNOC_SLAVE_SYSTEM_IMEM,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 26,
+};
+
+static const u16 slv_snoc_pcnoc_links[] = {
+ QNOC_SNOC_PNOC_MAS
+};
+
+static struct qcom_icc_node slv_snoc_pcnoc = {
+ .name = "slv_snoc_pcnoc",
+ .id = QNOC_SNOC_PNOC_SLV,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 28,
+ .num_links = ARRAY_SIZE(slv_snoc_pcnoc_links),
+ .links = slv_snoc_pcnoc_links,
+};
+
+static struct qcom_icc_node slv_qdss_stm = {
+ .name = "slv_qdss_stm",
+ .id = QNOC_SLAVE_QDSS_STM,
+ .buswidth = 4,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 30,
+};
+
+static struct qcom_icc_node slv_cats_0 = {
+ .name = "slv_cats_0",
+ .id = QNOC_SLAVE_CATS_128,
+ .buswidth = 16,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 106,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node slv_cats_1 = {
+ .name = "slv_cats_1",
+ .id = QNOC_SLAVE_OCMEM_64,
+ .buswidth = 8,
+ .mas_rpm_id = -1,
+ .slv_rpm_id = 107,
+ .qos.ap_owned = true,
+ .qos.qos_mode = NOC_QOS_MODE_INVALID,
+};
+
+static struct qcom_icc_node * const msm8909_bimc_nodes[] = {
+ [MAS_APPS_PROC] = &mas_apps_proc,
+ [MAS_OXILI] = &mas_oxili,
+ [MAS_SNOC_BIMC_0] = &mas_snoc_bimc_0,
+ [MAS_SNOC_BIMC_1] = &mas_snoc_bimc_1,
+ [MAS_TCU_0] = &mas_tcu_0,
+ [MAS_TCU_1] = &mas_tcu_1,
+ [SLV_EBI] = &slv_ebi,
+ [SLV_BIMC_SNOC] = &slv_bimc_snoc,
+};
+
+static const struct regmap_config msm8909_bimc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x62000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8909_bimc = {
+ .type = QCOM_ICC_BIMC,
+ .nodes = msm8909_bimc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8909_bimc_nodes),
+ .bus_clk_desc = &bimc_clk,
+ .regmap_cfg = &msm8909_bimc_regmap_config,
+ .qos_offset = 0x8000,
+ .ab_coeff = 154,
+};
+
+static struct qcom_icc_node * const msm8909_pcnoc_nodes[] = {
+ [MAS_AUDIO] = &mas_audio,
+ [MAS_SPDM] = &mas_spdm,
+ [MAS_DEHR] = &mas_dehr,
+ [MAS_QPIC] = &mas_qpic,
+ [MAS_BLSP_1] = &mas_blsp_1,
+ [MAS_USB_HS] = &mas_usb_hs,
+ [MAS_CRYPTO] = &mas_crypto,
+ [MAS_SDCC_1] = &mas_sdcc_1,
+ [MAS_SDCC_2] = &mas_sdcc_2,
+ [MAS_SNOC_PCNOC] = &mas_snoc_pcnoc,
+ [PCNOC_M_0] = &pcnoc_m_0,
+ [PCNOC_M_1] = &pcnoc_m_1,
+ [PCNOC_INT_0] = &pcnoc_int_0,
+ [PCNOC_INT_1] = &pcnoc_int_1,
+ [PCNOC_S_0] = &pcnoc_s_0,
+ [PCNOC_S_1] = &pcnoc_s_1,
+ [PCNOC_S_2] = &pcnoc_s_2,
+ [PCNOC_S_3] = &pcnoc_s_3,
+ [PCNOC_S_4] = &pcnoc_s_4,
+ [PCNOC_S_5] = &pcnoc_s_5,
+ [PCNOC_S_7] = &pcnoc_s_7,
+ [SLV_TCSR] = &slv_tcsr,
+ [SLV_SDCC_1] = &slv_sdcc_1,
+ [SLV_BLSP_1] = &slv_blsp_1,
+ [SLV_CRYPTO_0_CFG] = &slv_crypto_0_cfg,
+ [SLV_MESSAGE_RAM] = &slv_message_ram,
+ [SLV_PDM] = &slv_pdm,
+ [SLV_PRNG] = &slv_prng,
+ [SLV_USB_HS] = &slv_usb_hs,
+ [SLV_QPIC] = &slv_qpic,
+ [SLV_SPDM] = &slv_spdm,
+ [SLV_SDCC_2] = &slv_sdcc_2,
+ [SLV_AUDIO] = &slv_audio,
+ [SLV_DEHR_CFG] = &slv_dehr_cfg,
+ [SLV_SNOC_CFG] = &slv_snoc_cfg,
+ [SLV_QDSS_CFG] = &slv_qdss_cfg,
+ [SLV_USB_PHY] = &slv_usb_phy,
+ [SLV_CAMERA_SS_CFG] = &slv_camera_ss_cfg,
+ [SLV_DISP_SS_CFG] = &slv_disp_ss_cfg,
+ [SLV_VENUS_CFG] = &slv_venus_cfg,
+ [SLV_TLMM] = &slv_tlmm,
+ [SLV_GPU_CFG] = &slv_gpu_cfg,
+ [SLV_IMEM_CFG] = &slv_imem_cfg,
+ [SLV_BIMC_CFG] = &slv_bimc_cfg,
+ [SLV_PMIC_ARB] = &slv_pmic_arb,
+ [SLV_TCU] = &slv_tcu,
+ [SLV_PCNOC_SNOC] = &slv_pcnoc_snoc,
+};
+
+static const struct regmap_config msm8909_pcnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x11000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8909_pcnoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8909_pcnoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8909_pcnoc_nodes),
+ .bus_clk_desc = &bus_0_clk,
+ .regmap_cfg = &msm8909_pcnoc_regmap_config,
+ .qos_offset = 0x7000,
+};
+
+static struct qcom_icc_node * const msm8909_snoc_nodes[] = {
+ [MAS_QDSS_BAM] = &mas_qdss_bam,
+ [MAS_BIMC_SNOC] = &mas_bimc_snoc,
+ [MAS_MDP] = &mas_mdp,
+ [MAS_PCNOC_SNOC] = &mas_pcnoc_snoc,
+ [MAS_VENUS] = &mas_venus,
+ [MAS_VFE] = &mas_vfe,
+ [MAS_QDSS_ETR] = &mas_qdss_etr,
+ [MM_INT_0] = &mm_int_0,
+ [MM_INT_1] = &mm_int_1,
+ [MM_INT_2] = &mm_int_2,
+ [MM_INT_BIMC] = &mm_int_bimc,
+ [QDSS_INT] = &qdss_int,
+ [SNOC_INT_0] = &snoc_int_0,
+ [SNOC_INT_1] = &snoc_int_1,
+ [SNOC_INT_BIMC] = &snoc_int_bimc,
+ [SLV_KPSS_AHB] = &slv_kpss_ahb,
+ [SLV_SNOC_BIMC_0] = &slv_snoc_bimc_0,
+ [SLV_SNOC_BIMC_1] = &slv_snoc_bimc_1,
+ [SLV_IMEM] = &slv_imem,
+ [SLV_SNOC_PCNOC] = &slv_snoc_pcnoc,
+ [SLV_QDSS_STM] = &slv_qdss_stm,
+ [SLV_CATS_0] = &slv_cats_0,
+ [SLV_CATS_1] = &slv_cats_1,
+};
+
+static const struct regmap_config msm8909_snoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x13000,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc msm8909_snoc = {
+ .type = QCOM_ICC_NOC,
+ .nodes = msm8909_snoc_nodes,
+ .num_nodes = ARRAY_SIZE(msm8909_snoc_nodes),
+ .bus_clk_desc = &bus_1_clk,
+ .regmap_cfg = &msm8909_snoc_regmap_config,
+ .qos_offset = 0x7000,
+};
+
+static const struct of_device_id msm8909_noc_of_match[] = {
+ { .compatible = "qcom,msm8909-bimc", .data = &msm8909_bimc },
+ { .compatible = "qcom,msm8909-pcnoc", .data = &msm8909_pcnoc },
+ { .compatible = "qcom,msm8909-snoc", .data = &msm8909_snoc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, msm8909_noc_of_match);
+
+static struct platform_driver msm8909_noc_driver = {
+ .probe = qnoc_probe,
+ .remove_new = qnoc_remove,
+ .driver = {
+ .name = "qnoc-msm8909",
+ .of_match_table = msm8909_noc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+module_platform_driver(msm8909_noc_driver);
+
+MODULE_DESCRIPTION("Qualcomm MSM8909 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sa8775p.c b/drivers/interconnect/qcom/sa8775p.c
index dd6281db08ad..a729775c2aa4 100644
--- a/drivers/interconnect/qcom/sa8775p.c
+++ b/drivers/interconnect/qcom/sa8775p.c
@@ -2092,11 +2092,11 @@ static struct qcom_icc_bcm bcm_sn10 = {
.nodes = { &xs_qdss_stm },
};
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
&bcm_sn3,
};
-static struct qcom_icc_node *aggre1_noc_nodes[] = {
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
[MASTER_QUP_3] = &qxm_qup3,
[MASTER_EMAC] = &xm_emac_0,
[MASTER_EMAC_1] = &xm_emac_1,
@@ -2115,12 +2115,12 @@ static const struct qcom_icc_desc sa8775p_aggre1_noc = {
.num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
};
-static struct qcom_icc_bcm *aggre2_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
&bcm_ce0,
&bcm_sn4,
};
-static struct qcom_icc_node *aggre2_noc_nodes[] = {
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
[MASTER_QDSS_BAM] = &qhm_qdss_bam,
[MASTER_QUP_0] = &qhm_qup0,
[MASTER_QUP_1] = &qhm_qup1,
@@ -2142,13 +2142,13 @@ static const struct qcom_icc_desc sa8775p_aggre2_noc = {
.num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
};
-static struct qcom_icc_bcm *clk_virt_bcms[] = {
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
&bcm_qup0,
&bcm_qup1,
&bcm_qup2,
};
-static struct qcom_icc_node *clk_virt_nodes[] = {
+static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
@@ -2166,7 +2166,7 @@ static const struct qcom_icc_desc sa8775p_clk_virt = {
.num_bcms = ARRAY_SIZE(clk_virt_bcms),
};
-static struct qcom_icc_bcm *config_noc_bcms[] = {
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
&bcm_cn0,
&bcm_cn1,
&bcm_cn2,
@@ -2175,7 +2175,7 @@ static struct qcom_icc_bcm *config_noc_bcms[] = {
&bcm_sn10,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc,
[MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie,
[SLAVE_AHB2PHY_0] = &qhs_ahb2phy0,
@@ -2271,10 +2271,10 @@ static const struct qcom_icc_desc sa8775p_config_noc = {
.num_bcms = ARRAY_SIZE(config_noc_bcms),
};
-static struct qcom_icc_bcm *dc_noc_bcms[] = {
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
};
-static struct qcom_icc_node *dc_noc_nodes[] = {
+static struct qcom_icc_node * const dc_noc_nodes[] = {
[MASTER_CNOC_DC_NOC] = &qnm_cnoc_dc_noc,
[SLAVE_LLCC_CFG] = &qhs_llcc,
[SLAVE_GEM_NOC_CFG] = &qns_gemnoc,
@@ -2287,12 +2287,12 @@ static const struct qcom_icc_desc sa8775p_dc_noc = {
.num_bcms = ARRAY_SIZE(dc_noc_bcms),
};
-static struct qcom_icc_bcm *gem_noc_bcms[] = {
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh2,
};
-static struct qcom_icc_node *gem_noc_nodes[] = {
+static struct qcom_icc_node * const gem_noc_nodes[] = {
[MASTER_GPU_TCU] = &alm_gpu_tcu,
[MASTER_PCIE_TCU] = &alm_pcie_tcu,
[MASTER_SYS_TCU] = &alm_sys_tcu,
@@ -2323,12 +2323,12 @@ static const struct qcom_icc_desc sa8775p_gem_noc = {
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *gpdsp_anoc_bcms[] = {
+static struct qcom_icc_bcm * const gpdsp_anoc_bcms[] = {
&bcm_gna0,
&bcm_gnb0,
};
-static struct qcom_icc_node *gpdsp_anoc_nodes[] = {
+static struct qcom_icc_node * const gpdsp_anoc_nodes[] = {
[MASTER_DSP0] = &qxm_dsp0,
[MASTER_DSP1] = &qxm_dsp1,
[SLAVE_GP_DSP_SAIL_NOC] = &qns_gp_dsp_sail_noc,
@@ -2341,11 +2341,11 @@ static const struct qcom_icc_desc sa8775p_gpdsp_anoc = {
.num_bcms = ARRAY_SIZE(gpdsp_anoc_bcms),
};
-static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
&bcm_sn9,
};
-static struct qcom_icc_node *lpass_ag_noc_nodes[] = {
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
[MASTER_CNOC_LPASS_AG_NOC] = &qhm_config_noc,
[MASTER_LPASS_PROC] = &qxm_lpass_dsp,
[SLAVE_LPASS_CORE_CFG] = &qhs_lpass_core,
@@ -2364,12 +2364,12 @@ static const struct qcom_icc_desc sa8775p_lpass_ag_noc = {
.num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms),
};
-static struct qcom_icc_bcm *mc_virt_bcms[] = {
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
};
-static struct qcom_icc_node *mc_virt_nodes[] = {
+static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
};
@@ -2381,12 +2381,12 @@ static const struct qcom_icc_desc sa8775p_mc_virt = {
.num_bcms = ARRAY_SIZE(mc_virt_bcms),
};
-static struct qcom_icc_bcm *mmss_noc_bcms[] = {
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
};
-static struct qcom_icc_node *mmss_noc_nodes[] = {
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
[MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
[MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
[MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
@@ -2413,12 +2413,12 @@ static const struct qcom_icc_desc sa8775p_mmss_noc = {
.num_bcms = ARRAY_SIZE(mmss_noc_bcms),
};
-static struct qcom_icc_bcm *nspa_noc_bcms[] = {
+static struct qcom_icc_bcm * const nspa_noc_bcms[] = {
&bcm_nsa0,
&bcm_nsa1,
};
-static struct qcom_icc_node *nspa_noc_nodes[] = {
+static struct qcom_icc_node * const nspa_noc_nodes[] = {
[MASTER_CDSP_NOC_CFG] = &qhm_nsp_noc_config,
[MASTER_CDSP_PROC] = &qxm_nsp,
[SLAVE_HCP_A] = &qns_hcp,
@@ -2433,12 +2433,12 @@ static const struct qcom_icc_desc sa8775p_nspa_noc = {
.num_bcms = ARRAY_SIZE(nspa_noc_bcms),
};
-static struct qcom_icc_bcm *nspb_noc_bcms[] = {
+static struct qcom_icc_bcm * const nspb_noc_bcms[] = {
&bcm_nsb0,
&bcm_nsb1,
};
-static struct qcom_icc_node *nspb_noc_nodes[] = {
+static struct qcom_icc_node * const nspb_noc_nodes[] = {
[MASTER_CDSPB_NOC_CFG] = &qhm_nspb_noc_config,
[MASTER_CDSP_PROC_B] = &qxm_nspb,
[SLAVE_CDSPB_MEM_NOC] = &qns_nspb_gemnoc,
@@ -2453,11 +2453,11 @@ static const struct qcom_icc_desc sa8775p_nspb_noc = {
.num_bcms = ARRAY_SIZE(nspb_noc_bcms),
};
-static struct qcom_icc_bcm *pcie_anoc_bcms[] = {
+static struct qcom_icc_bcm * const pcie_anoc_bcms[] = {
&bcm_pci0,
};
-static struct qcom_icc_node *pcie_anoc_nodes[] = {
+static struct qcom_icc_node * const pcie_anoc_nodes[] = {
[MASTER_PCIE_0] = &xm_pcie3_0,
[MASTER_PCIE_1] = &xm_pcie3_1,
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
@@ -2470,7 +2470,7 @@ static const struct qcom_icc_desc sa8775p_pcie_anoc = {
.num_bcms = ARRAY_SIZE(pcie_anoc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn1,
&bcm_sn3,
@@ -2478,7 +2478,7 @@ static struct qcom_icc_bcm *system_noc_bcms[] = {
&bcm_sn9,
};
-static struct qcom_icc_node *system_noc_nodes[] = {
+static struct qcom_icc_node * const system_noc_nodes[] = {
[MASTER_GIC_AHB] = &qhm_gic,
[MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
[MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
diff --git a/drivers/interconnect/qcom/sm6115.c b/drivers/interconnect/qcom/sm6115.c
index 88b67634aa2f..7e15ddf0a80a 100644
--- a/drivers/interconnect/qcom/sm6115.c
+++ b/drivers/interconnect/qcom/sm6115.c
@@ -1193,7 +1193,7 @@ static struct qcom_icc_node slv_anoc_snoc = {
.links = slv_anoc_snoc_links,
};
-static struct qcom_icc_node *bimc_nodes[] = {
+static struct qcom_icc_node * const bimc_nodes[] = {
[MASTER_AMPSS_M0] = &apps_proc,
[MASTER_SNOC_BIMC_RT] = &mas_snoc_bimc_rt,
[MASTER_SNOC_BIMC_NRT] = &mas_snoc_bimc_nrt,
@@ -1223,7 +1223,7 @@ static const struct qcom_icc_desc sm6115_bimc = {
.ab_coeff = 153,
};
-static struct qcom_icc_node *config_noc_nodes[] = {
+static struct qcom_icc_node * const config_noc_nodes[] = {
[SNOC_CNOC_MAS] = &mas_snoc_cnoc,
[MASTER_QDSS_DAP] = &xm_dap,
[SLAVE_AHB2PHY_USB] = &qhs_ahb2phy_usb,
@@ -1294,7 +1294,7 @@ static const struct qcom_icc_desc sm6115_config_noc = {
.keep_alive = true,
};
-static struct qcom_icc_node *sys_noc_nodes[] = {
+static struct qcom_icc_node * const sys_noc_nodes[] = {
[MASTER_CRYPTO_CORE0] = &crypto_c0,
[MASTER_SNOC_CFG] = &qhm_snoc_cfg,
[MASTER_TIC] = &qhm_tic,
@@ -1339,7 +1339,7 @@ static const struct qcom_icc_desc sm6115_sys_noc = {
.keep_alive = true,
};
-static struct qcom_icc_node *clk_virt_nodes[] = {
+static struct qcom_icc_node * const clk_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[SLAVE_QUP_CORE_0] = &qup0_core_slave,
};
@@ -1353,7 +1353,7 @@ static const struct qcom_icc_desc sm6115_clk_virt = {
.keep_alive = true,
};
-static struct qcom_icc_node *mmnrt_virt_nodes[] = {
+static struct qcom_icc_node * const mmnrt_virt_nodes[] = {
[MASTER_CAMNOC_SF] = &qnm_camera_nrt,
[MASTER_VIDEO_P0] = &qxm_venus0,
[MASTER_VIDEO_PROC] = &qxm_venus_cpu,
@@ -1370,7 +1370,7 @@ static const struct qcom_icc_desc sm6115_mmnrt_virt = {
.ab_coeff = 142,
};
-static struct qcom_icc_node *mmrt_virt_nodes[] = {
+static struct qcom_icc_node * const mmrt_virt_nodes[] = {
[MASTER_CAMNOC_HF] = &qnm_camera_rt,
[MASTER_MDP_PORT0] = &qxm_mdp0,
[SLAVE_SNOC_BIMC_RT] = &slv_snoc_bimc_rt,
diff --git a/drivers/interconnect/qcom/sm7150.c b/drivers/interconnect/qcom/sm7150.c
new file mode 100644
index 000000000000..dc0d1343f510
--- /dev/null
+++ b/drivers/interconnect/qcom/sm7150.c
@@ -0,0 +1,1754 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <dt-bindings/interconnect/qcom,sm7150-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+#include "sm7150.h"
+
+static struct qcom_icc_node qhm_a1noc_cfg = {
+ .name = "qhm-a1noc-cfg",
+ .id = SM7150_MASTER_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_SERVICE_A1NOC },
+};
+
+static struct qcom_icc_node qhm_qup_center = {
+ .name = "qhm_qup_center",
+ .id = SM7150_MASTER_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_tsif = {
+ .name = "qhm_tsif",
+ .id = SM7150_MASTER_TSIF,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_emmc = {
+ .name = "xm_emmc",
+ .id = SM7150_MASTER_EMMC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .id = SM7150_MASTER_SDCC_2,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .id = SM7150_MASTER_SDCC_4,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .id = SM7150_MASTER_UFS_MEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_a2noc_cfg = {
+ .name = "qhm_a2noc_cfg",
+ .id = SM7150_MASTER_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_SERVICE_A2NOC },
+};
+
+static struct qcom_icc_node qhm_qdss_bam = {
+ .name = "qhm_qdss_bam",
+ .id = SM7150_MASTER_QDSS_BAM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qhm_qup_north = {
+ .name = "qhm_qup_north",
+ .id = SM7150_MASTER_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qnm_cnoc = {
+ .name = "qnm_cnoc",
+ .id = SM7150_MASTER_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .id = SM7150_MASTER_CRYPTO_CORE_0,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_ipa = {
+ .name = "qxm_ipa",
+ .id = SM7150_MASTER_IPA,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_pcie3_0 = {
+ .name = "xm_pcie3_0",
+ .id = SM7150_MASTER_PCIE,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_ANOC_PCIE_GEM_NOC },
+};
+
+static struct qcom_icc_node xm_qdss_etr = {
+ .name = "xm_qdss_etr",
+ .id = SM7150_MASTER_QDSS_ETR,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .id = SM7150_MASTER_USB3,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_SLV },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf0_uncomp = {
+ .name = "qxm_camnoc_hf0_uncomp",
+ .id = SM7150_MASTER_CAMNOC_HF0_UNCOMP,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_rt_uncomp = {
+ .name = "qxm_camnoc_rt_uncomp",
+ .id = SM7150_MASTER_CAMNOC_RT_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf_uncomp = {
+ .name = "qxm_camnoc_sf_uncomp",
+ .id = SM7150_MASTER_CAMNOC_SF_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qxm_camnoc_nrt_uncomp = {
+ .name = "qxm_camnoc_nrt_uncomp",
+ .id = SM7150_MASTER_CAMNOC_NRT_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_CAMNOC_UNCOMP },
+};
+
+static struct qcom_icc_node qnm_npu = {
+ .name = "qnm_npu",
+ .id = SM7150_MASTER_NPU,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_CDSP_GEM_NOC },
+};
+
+static struct qcom_icc_node qhm_spdm = {
+ .name = "qhm_spdm",
+ .id = SM7150_MASTER_SPDM,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_CNOC_A2NOC },
+};
+
+static struct qcom_icc_node qnm_snoc = {
+ .name = "qnm_snoc",
+ .id = SM7150_SNOC_CNOC_MAS,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 47,
+ .links = { SM7150_SLAVE_TLMM_SOUTH,
+ SM7150_SLAVE_CAMERA_CFG,
+ SM7150_SLAVE_SDCC_4,
+ SM7150_SLAVE_SDCC_2,
+ SM7150_SLAVE_CNOC_MNOC_CFG,
+ SM7150_SLAVE_UFS_MEM_CFG,
+ SM7150_SLAVE_QUP_0,
+ SM7150_SLAVE_GLM,
+ SM7150_SLAVE_PDM,
+ SM7150_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ SM7150_SLAVE_A2NOC_CFG,
+ SM7150_SLAVE_QDSS_CFG,
+ SM7150_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ SM7150_SLAVE_DISPLAY_CFG,
+ SM7150_SLAVE_PCIE_CFG,
+ SM7150_SLAVE_DISPLAY_THROTTLE_CFG,
+ SM7150_SLAVE_TCSR,
+ SM7150_SLAVE_VENUS_CVP_THROTTLE_CFG,
+ SM7150_SLAVE_CNOC_DDRSS,
+ SM7150_SLAVE_AHB2PHY_NORTH,
+ SM7150_SLAVE_SNOC_CFG,
+ SM7150_SLAVE_GRAPHICS_3D_CFG,
+ SM7150_SLAVE_VENUS_CFG,
+ SM7150_SLAVE_TSIF,
+ SM7150_SLAVE_CDSP_CFG,
+ SM7150_SLAVE_CLK_CTL,
+ SM7150_SLAVE_AOP,
+ SM7150_SLAVE_QUP_1,
+ SM7150_SLAVE_AHB2PHY_SOUTH,
+ SM7150_SLAVE_SERVICE_CNOC,
+ SM7150_SLAVE_AHB2PHY_WEST,
+ SM7150_SLAVE_USB3,
+ SM7150_SLAVE_VENUS_THROTTLE_CFG,
+ SM7150_SLAVE_IPA_CFG,
+ SM7150_SLAVE_RBCPR_CX_CFG,
+ SM7150_SLAVE_TLMM_WEST,
+ SM7150_SLAVE_A1NOC_CFG,
+ SM7150_SLAVE_AOSS,
+ SM7150_SLAVE_PRNG,
+ SM7150_SLAVE_VSENSE_CTRL_CFG,
+ SM7150_SLAVE_EMMC_CFG,
+ SM7150_SLAVE_SPDM_WRAPPER,
+ SM7150_SLAVE_CRYPTO_0_CFG,
+ SM7150_SLAVE_PIMEM_CFG,
+ SM7150_SLAVE_TLMM_NORTH,
+ SM7150_SLAVE_RBCPR_MX_CFG,
+ SM7150_SLAVE_IMEM_CFG
+ },
+};
+
+static struct qcom_icc_node xm_qdss_dap = {
+ .name = "xm_qdss_dap",
+ .id = SM7150_MASTER_QDSS_DAP,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 48,
+ .links = { SM7150_SLAVE_TLMM_SOUTH,
+ SM7150_SLAVE_CAMERA_CFG,
+ SM7150_SLAVE_SDCC_4,
+ SM7150_SLAVE_SDCC_2,
+ SM7150_SLAVE_CNOC_MNOC_CFG,
+ SM7150_SLAVE_UFS_MEM_CFG,
+ SM7150_SLAVE_QUP_0,
+ SM7150_SLAVE_GLM,
+ SM7150_SLAVE_PDM,
+ SM7150_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ SM7150_SLAVE_A2NOC_CFG,
+ SM7150_SLAVE_QDSS_CFG,
+ SM7150_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ SM7150_SLAVE_DISPLAY_CFG,
+ SM7150_SLAVE_PCIE_CFG,
+ SM7150_SLAVE_DISPLAY_THROTTLE_CFG,
+ SM7150_SLAVE_TCSR,
+ SM7150_SLAVE_VENUS_CVP_THROTTLE_CFG,
+ SM7150_SLAVE_CNOC_DDRSS,
+ SM7150_SLAVE_CNOC_A2NOC,
+ SM7150_SLAVE_AHB2PHY_NORTH,
+ SM7150_SLAVE_SNOC_CFG,
+ SM7150_SLAVE_GRAPHICS_3D_CFG,
+ SM7150_SLAVE_VENUS_CFG,
+ SM7150_SLAVE_TSIF,
+ SM7150_SLAVE_CDSP_CFG,
+ SM7150_SLAVE_CLK_CTL,
+ SM7150_SLAVE_AOP,
+ SM7150_SLAVE_QUP_1,
+ SM7150_SLAVE_AHB2PHY_SOUTH,
+ SM7150_SLAVE_SERVICE_CNOC,
+ SM7150_SLAVE_AHB2PHY_WEST,
+ SM7150_SLAVE_USB3,
+ SM7150_SLAVE_VENUS_THROTTLE_CFG,
+ SM7150_SLAVE_IPA_CFG,
+ SM7150_SLAVE_RBCPR_CX_CFG,
+ SM7150_SLAVE_TLMM_WEST,
+ SM7150_SLAVE_A1NOC_CFG,
+ SM7150_SLAVE_AOSS,
+ SM7150_SLAVE_PRNG,
+ SM7150_SLAVE_VSENSE_CTRL_CFG,
+ SM7150_SLAVE_EMMC_CFG,
+ SM7150_SLAVE_SPDM_WRAPPER,
+ SM7150_SLAVE_CRYPTO_0_CFG,
+ SM7150_SLAVE_PIMEM_CFG,
+ SM7150_SLAVE_TLMM_NORTH,
+ SM7150_SLAVE_RBCPR_MX_CFG,
+ SM7150_SLAVE_IMEM_CFG
+ },
+};
+
+static struct qcom_icc_node qhm_cnoc_dc_noc = {
+ .name = "qhm_cnoc_dc_noc",
+ .id = SM7150_MASTER_CNOC_DC_NOC,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC_CFG,
+ SM7150_SLAVE_GEM_NOC_CFG
+ },
+};
+
+static struct qcom_icc_node acm_apps = {
+ .name = "acm_apps",
+ .id = SM7150_MASTER_AMPSS_M0,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC,
+ SM7150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node acm_sys_tcu = {
+ .name = "acm_sys_tcu",
+ .id = SM7150_MASTER_SYS_TCU,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC,
+ SM7150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qhm_gemnoc_cfg = {
+ .name = "qhm_gemnoc_cfg",
+ .id = SM7150_MASTER_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_SERVICE_GEM_NOC,
+ SM7150_SLAVE_MSS_PROC_MS_MPU_CFG
+ },
+};
+
+static struct qcom_icc_node qnm_cmpnoc = {
+ .name = "qnm_cmpnoc",
+ .id = SM7150_MASTER_COMPUTE_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC,
+ SM7150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .id = SM7150_MASTER_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .id = SM7150_MASTER_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC,
+ SM7150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_pcie = {
+ .name = "qnm_pcie",
+ .id = SM7150_MASTER_GEM_NOC_PCIE_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC,
+ SM7150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node qnm_snoc_gc = {
+ .name = "qnm_snoc_gc",
+ .id = SM7150_MASTER_SNOC_GC_MEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .id = SM7150_MASTER_SNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_LLCC },
+};
+
+static struct qcom_icc_node qxm_gpu = {
+ .name = "qxm_gpu",
+ .id = SM7150_MASTER_GRAPHICS_3D,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_LLCC,
+ SM7150_SLAVE_GEM_NOC_SNOC
+ },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .id = SM7150_MASTER_LLCC,
+ .channels = 2,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_EBI_CH0 },
+};
+
+static struct qcom_icc_node qhm_mnoc_cfg = {
+ .name = "qhm_mnoc_cfg",
+ .id = SM7150_MASTER_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_SERVICE_MNOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_hf = {
+ .name = "qxm_camnoc_hf",
+ .id = SM7150_MASTER_CAMNOC_HF0,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_nrt = {
+ .name = "qxm_camnoc_nrt",
+ .id = SM7150_MASTER_CAMNOC_NRT,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_rt = {
+ .name = "qxm_camnoc_rt",
+ .id = SM7150_MASTER_CAMNOC_RT,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_camnoc_sf = {
+ .name = "qxm_camnoc_sf",
+ .id = SM7150_MASTER_CAMNOC_SF,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp0 = {
+ .name = "qxm_mdp0",
+ .id = SM7150_MASTER_MDP_PORT0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_mdp1 = {
+ .name = "qxm_mdp1",
+ .id = SM7150_MASTER_MDP_PORT1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_rot = {
+ .name = "qxm_rot",
+ .id = SM7150_MASTER_ROTATOR,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus0 = {
+ .name = "qxm_venus0",
+ .id = SM7150_MASTER_VIDEO_P0,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus1 = {
+ .name = "qxm_venus1",
+ .id = SM7150_MASTER_VIDEO_P1,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxm_venus_arm9 = {
+ .name = "qxm_venus_arm9",
+ .id = SM7150_MASTER_VIDEO_PROC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qhm_snoc_cfg = {
+ .name = "qhm_snoc_cfg",
+ .id = SM7150_MASTER_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_SLAVE_SERVICE_SNOC },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .id = SM7150_A1NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 6,
+ .links = { SM7150_SLAVE_SNOC_GEM_NOC_SF,
+ SM7150_SLAVE_PIMEM,
+ SM7150_SLAVE_OCIMEM,
+ SM7150_SLAVE_APPSS,
+ SM7150_SNOC_CNOC_SLV,
+ SM7150_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .id = SM7150_A2NOC_SNOC_MAS,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 7,
+ .links = { SM7150_SLAVE_SNOC_GEM_NOC_SF,
+ SM7150_SLAVE_PIMEM,
+ SM7150_SLAVE_OCIMEM,
+ SM7150_SLAVE_APPSS,
+ SM7150_SNOC_CNOC_SLV,
+ SM7150_SLAVE_TCU,
+ SM7150_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qnm_gemnoc = {
+ .name = "qnm_gemnoc",
+ .id = SM7150_MASTER_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 6,
+ .links = { SM7150_SLAVE_PIMEM,
+ SM7150_SLAVE_OCIMEM,
+ SM7150_SLAVE_APPSS,
+ SM7150_SNOC_CNOC_SLV,
+ SM7150_SLAVE_TCU,
+ SM7150_SLAVE_QDSS_STM
+ },
+};
+
+static struct qcom_icc_node qxm_pimem = {
+ .name = "qxm_pimem",
+ .id = SM7150_MASTER_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_SNOC_GEM_NOC_GC,
+ SM7150_SLAVE_OCIMEM
+ },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .id = SM7150_MASTER_GIC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .links = { SM7150_SLAVE_SNOC_GEM_NOC_GC,
+ SM7150_SLAVE_OCIMEM
+ },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .id = SM7150_A1NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM7150_A1NOC_SNOC_MAS },
+};
+
+static struct qcom_icc_node srvc_aggre1_noc = {
+ .name = "srvc_aggre1_noc",
+ .id = SM7150_SLAVE_SERVICE_A1NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .id = SM7150_A2NOC_SNOC_SLV,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM7150_A2NOC_SNOC_MAS },
+};
+
+static struct qcom_icc_node qns_pcie_gemnoc = {
+ .name = "qns_pcie_gemnoc",
+ .id = SM7150_SLAVE_ANOC_PCIE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_MASTER_GEM_NOC_PCIE_SNOC },
+};
+
+static struct qcom_icc_node srvc_aggre2_noc = {
+ .name = "srvc_aggre2_noc",
+ .id = SM7150_SLAVE_SERVICE_A2NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_camnoc_uncomp = {
+ .name = "qns_camnoc_uncomp",
+ .id = SM7150_SLAVE_CAMNOC_UNCOMP,
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node qns_cdsp_gemnoc = {
+ .name = "qns_cdsp_gemnoc",
+ .id = SM7150_SLAVE_CDSP_GEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_MASTER_COMPUTE_NOC },
+};
+
+static struct qcom_icc_node qhs_a1_noc_cfg = {
+ .name = "qhs_a1_noc_cfg",
+ .id = SM7150_SLAVE_A1NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_MASTER_A1NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_a2_noc_cfg = {
+ .name = "qhs_a2_noc_cfg",
+ .id = SM7150_SLAVE_A2NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_MASTER_A2NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_ahb2phy_north = {
+ .name = "qhs_ahb2phy_north",
+ .id = SM7150_SLAVE_AHB2PHY_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy_south = {
+ .name = "qhs_ahb2phy_south",
+ .id = SM7150_SLAVE_AHB2PHY_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy_west = {
+ .name = "qhs_ahb2phy_west",
+ .id = SM7150_SLAVE_AHB2PHY_WEST,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aop = {
+ .name = "qhs_aop",
+ .id = SM7150_SLAVE_AOP,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .id = SM7150_SLAVE_AOSS,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .id = SM7150_SLAVE_CAMERA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_nrt_thrott_cfg = {
+ .name = "qhs_camera_nrt_thrott_cfg",
+ .id = SM7150_SLAVE_CAMERA_NRT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_rt_throttle_cfg = {
+ .name = "qhs_camera_rt_throttle_cfg",
+ .id = SM7150_SLAVE_CAMERA_RT_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .id = SM7150_SLAVE_CLK_CTL,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_compute_dsp_cfg = {
+ .name = "qhs_compute_dsp_cfg",
+ .id = SM7150_SLAVE_CDSP_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_cx = {
+ .name = "qhs_cpr_cx",
+ .id = SM7150_SLAVE_RBCPR_CX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_cpr_mx = {
+ .name = "qhs_cpr_mx",
+ .id = SM7150_SLAVE_RBCPR_MX_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .id = SM7150_SLAVE_CRYPTO_0_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ddrss_cfg = {
+ .name = "qhs_ddrss_cfg",
+ .id = SM7150_SLAVE_CNOC_DDRSS,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_MASTER_CNOC_DC_NOC },
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .id = SM7150_SLAVE_DISPLAY_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display_throttle_cfg = {
+ .name = "qhs_display_throttle_cfg",
+ .id = SM7150_SLAVE_DISPLAY_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_emmc_cfg = {
+ .name = "qhs_emmc_cfg",
+ .id = SM7150_SLAVE_EMMC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_glm = {
+ .name = "qhs_glm",
+ .id = SM7150_SLAVE_GLM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .id = SM7150_SLAVE_GRAPHICS_3D_CFG,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .id = SM7150_SLAVE_IMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipa = {
+ .name = "qhs_ipa",
+ .id = SM7150_SLAVE_IPA_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mnoc_cfg = {
+ .name = "qhs_mnoc_cfg",
+ .id = SM7150_SLAVE_CNOC_MNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_MASTER_CNOC_MNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_pcie_cfg = {
+ .name = "qhs_pcie_cfg",
+ .id = SM7150_SLAVE_PCIE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .id = SM7150_SLAVE_PDM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pimem_cfg = {
+ .name = "qhs_pimem_cfg",
+ .id = SM7150_SLAVE_PIMEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .id = SM7150_SLAVE_PRNG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .id = SM7150_SLAVE_QDSS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qupv3_center = {
+ .name = "qhs_qupv3_center",
+ .id = SM7150_SLAVE_QUP_0,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qupv3_north = {
+ .name = "qhs_qupv3_north",
+ .id = SM7150_SLAVE_QUP_1,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .id = SM7150_SLAVE_SDCC_2,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .id = SM7150_SLAVE_SDCC_4,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_snoc_cfg = {
+ .name = "qhs_snoc_cfg",
+ .id = SM7150_SLAVE_SNOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_MASTER_SNOC_CFG },
+};
+
+static struct qcom_icc_node qhs_spdm = {
+ .name = "qhs_spdm",
+ .id = SM7150_SLAVE_SPDM_WRAPPER,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .id = SM7150_SLAVE_TCSR,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_north = {
+ .name = "qhs_tlmm_north",
+ .id = SM7150_SLAVE_TLMM_NORTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_south = {
+ .name = "qhs_tlmm_south",
+ .id = SM7150_SLAVE_TLMM_SOUTH,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm_west = {
+ .name = "qhs_tlmm_west",
+ .id = SM7150_SLAVE_TLMM_WEST,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tsif = {
+ .name = "qhs_tsif",
+ .id = SM7150_SLAVE_TSIF,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .id = SM7150_SLAVE_UFS_MEM_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0 = {
+ .name = "qhs_usb3_0",
+ .id = SM7150_SLAVE_USB3,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .id = SM7150_SLAVE_VENUS_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cvp_throttle_cfg = {
+ .name = "qhs_venus_cvp_throttle_cfg",
+ .id = SM7150_SLAVE_VENUS_CVP_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_throttle_cfg = {
+ .name = "qhs_venus_throttle_cfg",
+ .id = SM7150_SLAVE_VENUS_THROTTLE_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_vsense_ctrl_cfg = {
+ .name = "qhs_vsense_ctrl_cfg",
+ .id = SM7150_SLAVE_VSENSE_CTRL_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_cnoc_a2noc = {
+ .name = "qns_cnoc_a2noc",
+ .id = SM7150_SLAVE_CNOC_A2NOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_MASTER_CNOC_A2NOC },
+};
+
+static struct qcom_icc_node srvc_cnoc = {
+ .name = "srvc_cnoc",
+ .id = SM7150_SLAVE_SERVICE_CNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gemnoc = {
+ .name = "qhs_gemnoc",
+ .id = SM7150_SLAVE_GEM_NOC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .links = { SM7150_MASTER_GEM_NOC_CFG },
+};
+
+static struct qcom_icc_node qhs_llcc = {
+ .name = "qhs_llcc",
+ .id = SM7150_SLAVE_LLCC_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_mdsp_ms_mpu_cfg = {
+ .name = "qhs_mdsp_ms_mpu_cfg",
+ .id = SM7150_SLAVE_MSS_PROC_MS_MPU_CFG,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_gem_noc_snoc = {
+ .name = "qns_gem_noc_snoc",
+ .id = SM7150_SLAVE_GEM_NOC_SNOC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_MASTER_GEM_NOC_SNOC },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .id = SM7150_SLAVE_LLCC,
+ .channels = 2,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM7150_MASTER_LLCC },
+};
+
+static struct qcom_icc_node srvc_gemnoc = {
+ .name = "srvc_gemnoc",
+ .id = SM7150_SLAVE_SERVICE_GEM_NOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .id = SM7150_SLAVE_EBI_CH0,
+ .channels = 2,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns2_mem_noc = {
+ .name = "qns2_mem_noc",
+ .id = SM7150_SLAVE_MNOC_SF_MEM_NOC,
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_MASTER_MNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .id = SM7150_SLAVE_MNOC_HF_MEM_NOC,
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .links = { SM7150_MASTER_MNOC_HF_MEM_NOC },
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .id = SM7150_SLAVE_SERVICE_MNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_apss = {
+ .name = "qhs_apss",
+ .id = SM7150_SLAVE_APPSS,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qns_cnoc = {
+ .name = "qns_cnoc",
+ .id = SM7150_SNOC_CNOC_SLV,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_SNOC_CNOC_MAS },
+};
+
+static struct qcom_icc_node qns_gemnoc_gc = {
+ .name = "qns_gemnoc_gc",
+ .id = SM7150_SLAVE_SNOC_GEM_NOC_GC,
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .links = { SM7150_MASTER_SNOC_GC_MEM_NOC },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .id = SM7150_SLAVE_SNOC_GEM_NOC_SF,
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .links = { SM7150_MASTER_SNOC_SF_MEM_NOC },
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .id = SM7150_SLAVE_OCIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_pimem = {
+ .name = "qxs_pimem",
+ .id = SM7150_SLAVE_PIMEM,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node srvc_snoc = {
+ .name = "srvc_snoc",
+ .id = SM7150_SLAVE_SERVICE_SNOC,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .id = SM7150_SLAVE_QDSS_STM,
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .id = SM7150_SLAVE_TCU,
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .enable_mask = BIT(3),
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .keepalive = true,
+ .num_nodes = 8,
+ .nodes = { &qxm_camnoc_hf0_uncomp,
+ &qxm_camnoc_rt_uncomp,
+ &qxm_camnoc_sf_uncomp,
+ &qxm_camnoc_nrt_uncomp,
+ &qxm_camnoc_hf,
+ &qxm_camnoc_rt,
+ &qxm_mdp0,
+ &qxm_mdp1
+ },
+};
+
+static struct qcom_icc_bcm bcm_sh2 = {
+ .name = "SH2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_gem_noc_snoc },
+};
+
+static struct qcom_icc_bcm bcm_sh3 = {
+ .name = "SH3",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &acm_sys_tcu },
+};
+
+static struct qcom_icc_bcm bcm_mm2 = {
+ .name = "MM2",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qxm_camnoc_nrt,
+ &qns2_mem_noc
+ },
+};
+
+static struct qcom_icc_bcm bcm_mm3 = {
+ .name = "MM3",
+ .keepalive = false,
+ .num_nodes = 5,
+ .nodes = { &qxm_camnoc_sf,
+ &qxm_rot,
+ &qxm_venus0,
+ &qxm_venus1,
+ &qxm_venus_arm9
+ },
+};
+
+static struct qcom_icc_bcm bcm_sh5 = {
+ .name = "SH5",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &acm_apps },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sh8 = {
+ .name = "SH8",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_cdsp_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_sh10 = {
+ .name = "SH10",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_npu },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .num_nodes = 54,
+ .nodes = { &qhm_tsif,
+ &xm_emmc,
+ &xm_sdc2,
+ &xm_sdc4,
+ &qhm_spdm,
+ &qnm_snoc,
+ &qhs_a1_noc_cfg,
+ &qhs_a2_noc_cfg,
+ &qhs_ahb2phy_north,
+ &qhs_ahb2phy_south,
+ &qhs_ahb2phy_west,
+ &qhs_aop,
+ &qhs_aoss,
+ &qhs_camera_cfg,
+ &qhs_camera_nrt_thrott_cfg,
+ &qhs_camera_rt_throttle_cfg,
+ &qhs_clk_ctl,
+ &qhs_compute_dsp_cfg,
+ &qhs_cpr_cx,
+ &qhs_cpr_mx,
+ &qhs_crypto0_cfg,
+ &qhs_ddrss_cfg,
+ &qhs_display_cfg,
+ &qhs_display_throttle_cfg,
+ &qhs_emmc_cfg,
+ &qhs_glm,
+ &qhs_gpuss_cfg,
+ &qhs_imem_cfg,
+ &qhs_ipa,
+ &qhs_mnoc_cfg,
+ &qhs_pcie_cfg,
+ &qhs_pdm,
+ &qhs_pimem_cfg,
+ &qhs_prng,
+ &qhs_qdss_cfg,
+ &qhs_qupv3_center,
+ &qhs_qupv3_north,
+ &qhs_sdc2,
+ &qhs_sdc4,
+ &qhs_snoc_cfg,
+ &qhs_spdm,
+ &qhs_tcsr,
+ &qhs_tlmm_north,
+ &qhs_tlmm_south,
+ &qhs_tlmm_west,
+ &qhs_tsif,
+ &qhs_ufs_mem_cfg,
+ &qhs_usb3_0,
+ &qhs_venus_cfg,
+ &qhs_venus_cvp_throttle_cfg,
+ &qhs_venus_throttle_cfg,
+ &qhs_vsense_ctrl_cfg,
+ &qns_cnoc_a2noc,
+ &srvc_cnoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qhm_qup_center,
+ &qhm_qup_north
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_imem },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_gc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qxs_pimem },
+};
+
+static struct qcom_icc_bcm bcm_sn9 = {
+ .name = "SN9",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qnm_aggre1_noc,
+ &qns_a1noc_snoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn11 = {
+ .name = "SN11",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qnm_aggre2_noc,
+ &qns_a2noc_snoc
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn12 = {
+ .name = "SN12",
+ .keepalive = false,
+ .num_nodes = 2,
+ .nodes = { &qxm_pimem,
+ &xm_gic
+ },
+};
+
+static struct qcom_icc_bcm bcm_sn14 = {
+ .name = "SN14",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qns_pcie_gemnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn15 = {
+ .name = "SN15",
+ .keepalive = false,
+ .num_nodes = 1,
+ .nodes = { &qnm_gemnoc },
+};
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ &bcm_cn0,
+ &bcm_qup0,
+ &bcm_sn9,
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg,
+ [MASTER_QUP_0] = &qhm_qup_center,
+ [MASTER_TSIF] = &qhm_tsif,
+ [MASTER_EMMC] = &xm_emmc,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [A1NOC_SNOC_SLV] = &qns_a1noc_snoc,
+ [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
+};
+
+static const struct qcom_icc_desc sm7150_aggre1_noc = {
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const aggre2_noc_bcms[] = {
+ &bcm_ce0,
+ &bcm_qup0,
+ &bcm_sn11,
+ &bcm_sn14,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg,
+ [MASTER_QDSS_BAM] = &qhm_qdss_bam,
+ [MASTER_QUP_1] = &qhm_qup_north,
+ [MASTER_CNOC_A2NOC] = &qnm_cnoc,
+ [MASTER_CRYPTO_CORE_0] = &qxm_crypto,
+ [MASTER_IPA] = &qxm_ipa,
+ [MASTER_PCIE] = &xm_pcie3_0,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr,
+ [MASTER_USB3] = &xm_usb3_0,
+ [A2NOC_SNOC_SLV] = &qns_a2noc_snoc,
+ [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_gemnoc,
+ [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+};
+
+static const struct qcom_icc_desc sm7150_aggre2_noc = {
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .bcms = aggre2_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre2_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const camnoc_virt_bcms[] = {
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node * const camnoc_virt_nodes[] = {
+ [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp,
+ [MASTER_CAMNOC_RT_UNCOMP] = &qxm_camnoc_rt_uncomp,
+ [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp,
+ [MASTER_CAMNOC_NRT_UNCOMP] = &qxm_camnoc_nrt_uncomp,
+ [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp,
+};
+
+static const struct qcom_icc_desc sm7150_camnoc_virt = {
+ .nodes = camnoc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(camnoc_virt_nodes),
+ .bcms = camnoc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(camnoc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const compute_noc_bcms[] = {
+ &bcm_sh10,
+ &bcm_sh8,
+};
+
+static struct qcom_icc_node * const compute_noc_nodes[] = {
+ [MASTER_NPU] = &qnm_npu,
+ [SLAVE_CDSP_GEM_NOC] = &qns_cdsp_gemnoc,
+};
+
+static const struct qcom_icc_desc sm7150_compute_noc = {
+ .nodes = compute_noc_nodes,
+ .num_nodes = ARRAY_SIZE(compute_noc_nodes),
+ .bcms = compute_noc_bcms,
+ .num_bcms = ARRAY_SIZE(compute_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const config_noc_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node * const config_noc_nodes[] = {
+ [MASTER_SPDM] = &qhm_spdm,
+ [SNOC_CNOC_MAS] = &qnm_snoc,
+ [MASTER_QDSS_DAP] = &xm_qdss_dap,
+ [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg,
+ [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy_north,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy_south,
+ [SLAVE_AHB2PHY_WEST] = &qhs_ahb2phy_west,
+ [SLAVE_AOP] = &qhs_aop,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CAMERA_NRT_THROTTLE_CFG] = &qhs_camera_nrt_thrott_cfg,
+ [SLAVE_CAMERA_RT_THROTTLE_CFG] = &qhs_camera_rt_throttle_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg,
+ [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx,
+ [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_DISPLAY_THROTTLE_CFG] = &qhs_display_throttle_cfg,
+ [SLAVE_EMMC_CFG] = &qhs_emmc_cfg,
+ [SLAVE_GLM] = &qhs_glm,
+ [SLAVE_GRAPHICS_3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_IPA_CFG] = &qhs_ipa,
+ [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg,
+ [SLAVE_PCIE_CFG] = &qhs_pcie_cfg,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QUP_0] = &qhs_qupv3_center,
+ [SLAVE_QUP_1] = &qhs_qupv3_north,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SNOC_CFG] = &qhs_snoc_cfg,
+ [SLAVE_SPDM_WRAPPER] = &qhs_spdm,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM_NORTH] = &qhs_tlmm_north,
+ [SLAVE_TLMM_SOUTH] = &qhs_tlmm_south,
+ [SLAVE_TLMM_WEST] = &qhs_tlmm_west,
+ [SLAVE_TSIF] = &qhs_tsif,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB3] = &qhs_usb3_0,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_VENUS_CVP_THROTTLE_CFG] = &qhs_venus_cvp_throttle_cfg,
+ [SLAVE_VENUS_THROTTLE_CFG] = &qhs_venus_throttle_cfg,
+ [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg,
+ [SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc,
+ [SLAVE_SERVICE_CNOC] = &srvc_cnoc,
+};
+
+static const struct qcom_icc_desc sm7150_config_noc = {
+ .nodes = config_noc_nodes,
+ .num_nodes = ARRAY_SIZE(config_noc_nodes),
+ .bcms = config_noc_bcms,
+ .num_bcms = ARRAY_SIZE(config_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const dc_noc_bcms[] = {
+};
+
+static struct qcom_icc_node * const dc_noc_nodes[] = {
+ [MASTER_CNOC_DC_NOC] = &qhm_cnoc_dc_noc,
+ [SLAVE_GEM_NOC_CFG] = &qhs_gemnoc,
+ [SLAVE_LLCC_CFG] = &qhs_llcc,
+};
+
+static const struct qcom_icc_desc sm7150_dc_noc = {
+ .nodes = dc_noc_nodes,
+ .num_nodes = ARRAY_SIZE(dc_noc_nodes),
+ .bcms = dc_noc_bcms,
+ .num_bcms = ARRAY_SIZE(dc_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const gem_noc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh2,
+ &bcm_sh3,
+ &bcm_sh5,
+};
+
+static struct qcom_icc_node * const gem_noc_nodes[] = {
+ [MASTER_AMPSS_M0] = &acm_apps,
+ [MASTER_SYS_TCU] = &acm_sys_tcu,
+ [MASTER_GEM_NOC_CFG] = &qhm_gemnoc_cfg,
+ [MASTER_COMPUTE_NOC] = &qnm_cmpnoc,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_pcie,
+ [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_GRAPHICS_3D] = &qxm_gpu,
+ [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg,
+ [SLAVE_GEM_NOC_SNOC] = &qns_gem_noc_snoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_SERVICE_GEM_NOC] = &srvc_gemnoc,
+};
+
+static const struct qcom_icc_desc sm7150_gem_noc = {
+ .nodes = gem_noc_nodes,
+ .num_nodes = ARRAY_SIZE(gem_noc_nodes),
+ .bcms = gem_noc_bcms,
+ .num_bcms = ARRAY_SIZE(gem_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI_CH0] = &ebi,
+};
+
+static const struct qcom_icc_desc sm7150_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+ &bcm_mm2,
+ &bcm_mm3,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg,
+ [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf,
+ [MASTER_CAMNOC_NRT] = &qxm_camnoc_nrt,
+ [MASTER_CAMNOC_RT] = &qxm_camnoc_rt,
+ [MASTER_CAMNOC_SF] = &qxm_camnoc_sf,
+ [MASTER_MDP_PORT0] = &qxm_mdp0,
+ [MASTER_MDP_PORT1] = &qxm_mdp1,
+ [MASTER_ROTATOR] = &qxm_rot,
+ [MASTER_VIDEO_P0] = &qxm_venus0,
+ [MASTER_VIDEO_P1] = &qxm_venus1,
+ [MASTER_VIDEO_PROC] = &qxm_venus_arm9,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct qcom_icc_desc sm7150_mmss_noc = {
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn11,
+ &bcm_sn12,
+ &bcm_sn15,
+ &bcm_sn2,
+ &bcm_sn4,
+ &bcm_sn9,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_SNOC_CFG] = &qhm_snoc_cfg,
+ [A1NOC_SNOC_MAS] = &qnm_aggre1_noc,
+ [A2NOC_SNOC_MAS] = &qnm_aggre2_noc,
+ [MASTER_GEM_NOC_SNOC] = &qnm_gemnoc,
+ [MASTER_PIMEM] = &qxm_pimem,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_APPSS] = &qhs_apss,
+ [SNOC_CNOC_SLV] = &qns_cnoc,
+ [SLAVE_SNOC_GEM_NOC_GC] = &qns_gemnoc_gc,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+ [SLAVE_OCIMEM] = &qxs_imem,
+ [SLAVE_PIMEM] = &qxs_pimem,
+ [SLAVE_SERVICE_SNOC] = &srvc_snoc,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct qcom_icc_desc sm7150_system_noc = {
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,sm7150-aggre1-noc", .data = &sm7150_aggre1_noc },
+ { .compatible = "qcom,sm7150-aggre2-noc", .data = &sm7150_aggre2_noc },
+ { .compatible = "qcom,sm7150-camnoc-virt", .data = &sm7150_camnoc_virt },
+ { .compatible = "qcom,sm7150-compute-noc", .data = &sm7150_compute_noc },
+ { .compatible = "qcom,sm7150-config-noc", .data = &sm7150_config_noc },
+ { .compatible = "qcom,sm7150-dc-noc", .data = &sm7150_dc_noc },
+ { .compatible = "qcom,sm7150-gem-noc", .data = &sm7150_gem_noc },
+ { .compatible = "qcom,sm7150-mc-virt", .data = &sm7150_mc_virt },
+ { .compatible = "qcom,sm7150-mmss-noc", .data = &sm7150_mmss_noc },
+ { .compatible = "qcom,sm7150-system-noc", .data = &sm7150_system_noc },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove_new = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-sm7150",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("Qualcomm SM7150 NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/sm7150.h b/drivers/interconnect/qcom/sm7150.h
new file mode 100644
index 000000000000..e00a9b0c1279
--- /dev/null
+++ b/drivers/interconnect/qcom/sm7150.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Qualcomm #define SM7150 interconnect IDs
+ *
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024, Danila Tikhonov <danila@jiaxyga.com>
+ */
+
+#ifndef __DRIVERS_INTERCONNECT_QCOM_SM7150_H
+#define __DRIVERS_INTERCONNECT_QCOM_SM7150_H
+
+#define SM7150_A1NOC_SNOC_MAS 0
+#define SM7150_A1NOC_SNOC_SLV 1
+#define SM7150_A2NOC_SNOC_MAS 2
+#define SM7150_A2NOC_SNOC_SLV 3
+#define SM7150_MASTER_A1NOC_CFG 4
+#define SM7150_MASTER_A2NOC_CFG 5
+#define SM7150_MASTER_AMPSS_M0 6
+#define SM7150_MASTER_CAMNOC_HF0 7
+#define SM7150_MASTER_CAMNOC_HF0_UNCOMP 8
+#define SM7150_MASTER_CAMNOC_NRT 9
+#define SM7150_MASTER_CAMNOC_NRT_UNCOMP 10
+#define SM7150_MASTER_CAMNOC_RT 11
+#define SM7150_MASTER_CAMNOC_RT_UNCOMP 12
+#define SM7150_MASTER_CAMNOC_SF 13
+#define SM7150_MASTER_CAMNOC_SF_UNCOMP 14
+#define SM7150_MASTER_CNOC_A2NOC 15
+#define SM7150_MASTER_CNOC_DC_NOC 16
+#define SM7150_MASTER_CNOC_MNOC_CFG 17
+#define SM7150_MASTER_COMPUTE_NOC 18
+#define SM7150_MASTER_CRYPTO_CORE_0 19
+#define SM7150_MASTER_EMMC 20
+#define SM7150_MASTER_GEM_NOC_CFG 21
+#define SM7150_MASTER_GEM_NOC_PCIE_SNOC 22
+#define SM7150_MASTER_GEM_NOC_SNOC 23
+#define SM7150_MASTER_GIC 24
+#define SM7150_MASTER_GRAPHICS_3D 25
+#define SM7150_MASTER_IPA 26
+#define SM7150_MASTER_LLCC 27
+#define SM7150_MASTER_MDP_PORT0 28
+#define SM7150_MASTER_MDP_PORT1 29
+#define SM7150_MASTER_MNOC_HF_MEM_NOC 30
+#define SM7150_MASTER_MNOC_SF_MEM_NOC 31
+#define SM7150_MASTER_NPU 32
+#define SM7150_MASTER_PCIE 33
+#define SM7150_MASTER_PIMEM 34
+#define SM7150_MASTER_QDSS_BAM 35
+#define SM7150_MASTER_QDSS_DAP 36
+#define SM7150_MASTER_QDSS_ETR 37
+#define SM7150_MASTER_QUP_0 38
+#define SM7150_MASTER_QUP_1 39
+#define SM7150_MASTER_ROTATOR 40
+#define SM7150_MASTER_SDCC_2 41
+#define SM7150_MASTER_SDCC_4 42
+#define SM7150_MASTER_SNOC_CFG 43
+#define SM7150_MASTER_SNOC_GC_MEM_NOC 44
+#define SM7150_MASTER_SNOC_SF_MEM_NOC 45
+#define SM7150_MASTER_SPDM 46
+#define SM7150_MASTER_SYS_TCU 47
+#define SM7150_MASTER_TSIF 48
+#define SM7150_MASTER_UFS_MEM 49
+#define SM7150_MASTER_USB3 50
+#define SM7150_MASTER_VIDEO_P0 51
+#define SM7150_MASTER_VIDEO_P1 52
+#define SM7150_MASTER_VIDEO_PROC 53
+#define SM7150_SLAVE_A1NOC_CFG 54
+#define SM7150_SLAVE_A2NOC_CFG 55
+#define SM7150_SLAVE_AHB2PHY_NORTH 56
+#define SM7150_SLAVE_AHB2PHY_SOUTH 57
+#define SM7150_SLAVE_AHB2PHY_WEST 58
+#define SM7150_SLAVE_ANOC_PCIE_GEM_NOC 59
+#define SM7150_SLAVE_AOP 60
+#define SM7150_SLAVE_AOSS 61
+#define SM7150_SLAVE_APPSS 62
+#define SM7150_SLAVE_CAMERA_CFG 63
+#define SM7150_SLAVE_CAMERA_NRT_THROTTLE_CFG 64
+#define SM7150_SLAVE_CAMERA_RT_THROTTLE_CFG 65
+#define SM7150_SLAVE_CAMNOC_UNCOMP 66
+#define SM7150_SLAVE_CDSP_CFG 67
+#define SM7150_SLAVE_CDSP_GEM_NOC 68
+#define SM7150_SLAVE_CLK_CTL 69
+#define SM7150_SLAVE_CNOC_A2NOC 70
+#define SM7150_SLAVE_CNOC_DDRSS 71
+#define SM7150_SLAVE_CNOC_MNOC_CFG 72
+#define SM7150_SLAVE_CRYPTO_0_CFG 73
+#define SM7150_SLAVE_DISPLAY_CFG 74
+#define SM7150_SLAVE_DISPLAY_THROTTLE_CFG 75
+#define SM7150_SLAVE_EBI_CH0 76
+#define SM7150_SLAVE_EMMC_CFG 77
+#define SM7150_SLAVE_GEM_NOC_CFG 78
+#define SM7150_SLAVE_GEM_NOC_SNOC 79
+#define SM7150_SLAVE_GLM 80
+#define SM7150_SLAVE_GRAPHICS_3D_CFG 81
+#define SM7150_SLAVE_IMEM_CFG 82
+#define SM7150_SLAVE_IPA_CFG 83
+#define SM7150_SLAVE_LLCC 84
+#define SM7150_SLAVE_LLCC_CFG 85
+#define SM7150_SLAVE_MNOC_HF_MEM_NOC 86
+#define SM7150_SLAVE_MNOC_SF_MEM_NOC 87
+#define SM7150_SLAVE_MSS_PROC_MS_MPU_CFG 88
+#define SM7150_SLAVE_OCIMEM 89
+#define SM7150_SLAVE_PCIE_CFG 90
+#define SM7150_SLAVE_PDM 91
+#define SM7150_SLAVE_PIMEM 92
+#define SM7150_SLAVE_PIMEM_CFG 93
+#define SM7150_SLAVE_PRNG 94
+#define SM7150_SLAVE_QDSS_CFG 95
+#define SM7150_SLAVE_QDSS_STM 96
+#define SM7150_SLAVE_QUP_0 97
+#define SM7150_SLAVE_QUP_1 98
+#define SM7150_SLAVE_RBCPR_CX_CFG 99
+#define SM7150_SLAVE_RBCPR_MX_CFG 100
+#define SM7150_SLAVE_SDCC_2 101
+#define SM7150_SLAVE_SDCC_4 102
+#define SM7150_SLAVE_SERVICE_A1NOC 103
+#define SM7150_SLAVE_SERVICE_A2NOC 104
+#define SM7150_SLAVE_SERVICE_CNOC 105
+#define SM7150_SLAVE_SERVICE_GEM_NOC 106
+#define SM7150_SLAVE_SERVICE_MNOC 107
+#define SM7150_SLAVE_SERVICE_SNOC 108
+#define SM7150_SLAVE_SNOC_CFG 109
+#define SM7150_SLAVE_SNOC_GEM_NOC_GC 110
+#define SM7150_SLAVE_SNOC_GEM_NOC_SF 111
+#define SM7150_SLAVE_SPDM_WRAPPER 112
+#define SM7150_SLAVE_TCSR 113
+#define SM7150_SLAVE_TCU 114
+#define SM7150_SLAVE_TLMM_NORTH 115
+#define SM7150_SLAVE_TLMM_SOUTH 116
+#define SM7150_SLAVE_TLMM_WEST 117
+#define SM7150_SLAVE_TSIF 118
+#define SM7150_SLAVE_UFS_MEM_CFG 119
+#define SM7150_SLAVE_USB3 120
+#define SM7150_SLAVE_VENUS_CFG 121
+#define SM7150_SLAVE_VENUS_CVP_THROTTLE_CFG 122
+#define SM7150_SLAVE_VENUS_THROTTLE_CFG 123
+#define SM7150_SLAVE_VSENSE_CTRL_CFG 124
+#define SM7150_SNOC_CNOC_MAS 125
+#define SM7150_SNOC_CNOC_SLV 126
+
+#endif
diff --git a/drivers/interconnect/qcom/sm8250.c b/drivers/interconnect/qcom/sm8250.c
index 02d40eea0d69..1879fa15761f 100644
--- a/drivers/interconnect/qcom/sm8250.c
+++ b/drivers/interconnect/qcom/sm8250.c
@@ -1673,7 +1673,7 @@ static struct qcom_icc_bcm * const qup_virt_bcms[] = {
&bcm_qup0,
};
-static struct qcom_icc_node *qup_virt_nodes[] = {
+static struct qcom_icc_node * const qup_virt_nodes[] = {
[MASTER_QUP_CORE_0] = &qup0_core_master,
[MASTER_QUP_CORE_1] = &qup1_core_master,
[MASTER_QUP_CORE_2] = &qup2_core_master,
diff --git a/drivers/interconnect/qcom/sm8550.c b/drivers/interconnect/qcom/sm8550.c
index fc22cecf650f..4d0e6fa9e003 100644
--- a/drivers/interconnect/qcom/sm8550.c
+++ b/drivers/interconnect/qcom/sm8550.c
@@ -524,231 +524,6 @@ static struct qcom_icc_node xm_gic = {
.links = { SM8550_SLAVE_SNOC_GEM_NOC_GC },
};
-static struct qcom_icc_node qnm_mnoc_hf_disp = {
- .name = "qnm_mnoc_hf_disp",
- .id = SM8550_MASTER_MNOC_HF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_DISP },
-};
-
-static struct qcom_icc_node qnm_pcie_disp = {
- .name = "qnm_pcie_disp",
- .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_DISP,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_DISP },
-};
-
-static struct qcom_icc_node llcc_mc_disp = {
- .name = "llcc_mc_disp",
- .id = SM8550_MASTER_LLCC_DISP,
- .channels = 4,
- .buswidth = 4,
- .num_links = 1,
- .links = { SM8550_SLAVE_EBI1_DISP },
-};
-
-static struct qcom_icc_node qnm_mdp_disp = {
- .name = "qnm_mdp_disp",
- .id = SM8550_MASTER_MDP_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qnm_mnoc_hf_cam_ife_0 = {
- .name = "qnm_mnoc_hf_cam_ife_0",
- .id = SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qnm_mnoc_sf_cam_ife_0 = {
- .name = "qnm_mnoc_sf_cam_ife_0",
- .id = SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qnm_pcie_cam_ife_0 = {
- .name = "qnm_pcie_cam_ife_0",
- .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_0,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node llcc_mc_cam_ife_0 = {
- .name = "llcc_mc_cam_ife_0",
- .id = SM8550_MASTER_LLCC_CAM_IFE_0,
- .channels = 4,
- .buswidth = 4,
- .num_links = 1,
- .links = { SM8550_SLAVE_EBI1_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qnm_camnoc_hf_cam_ife_0 = {
- .name = "qnm_camnoc_hf_cam_ife_0",
- .id = SM8550_MASTER_CAMNOC_HF_CAM_IFE_0,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qnm_camnoc_icp_cam_ife_0 = {
- .name = "qnm_camnoc_icp_cam_ife_0",
- .id = SM8550_MASTER_CAMNOC_ICP_CAM_IFE_0,
- .channels = 1,
- .buswidth = 8,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qnm_camnoc_sf_cam_ife_0 = {
- .name = "qnm_camnoc_sf_cam_ife_0",
- .id = SM8550_MASTER_CAMNOC_SF_CAM_IFE_0,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qnm_mnoc_hf_cam_ife_1 = {
- .name = "qnm_mnoc_hf_cam_ife_1",
- .id = SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qnm_mnoc_sf_cam_ife_1 = {
- .name = "qnm_mnoc_sf_cam_ife_1",
- .id = SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qnm_pcie_cam_ife_1 = {
- .name = "qnm_pcie_cam_ife_1",
- .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_1,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node llcc_mc_cam_ife_1 = {
- .name = "llcc_mc_cam_ife_1",
- .id = SM8550_MASTER_LLCC_CAM_IFE_1,
- .channels = 4,
- .buswidth = 4,
- .num_links = 1,
- .links = { SM8550_SLAVE_EBI1_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qnm_camnoc_hf_cam_ife_1 = {
- .name = "qnm_camnoc_hf_cam_ife_1",
- .id = SM8550_MASTER_CAMNOC_HF_CAM_IFE_1,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qnm_camnoc_icp_cam_ife_1 = {
- .name = "qnm_camnoc_icp_cam_ife_1",
- .id = SM8550_MASTER_CAMNOC_ICP_CAM_IFE_1,
- .channels = 1,
- .buswidth = 8,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qnm_camnoc_sf_cam_ife_1 = {
- .name = "qnm_camnoc_sf_cam_ife_1",
- .id = SM8550_MASTER_CAMNOC_SF_CAM_IFE_1,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qnm_mnoc_hf_cam_ife_2 = {
- .name = "qnm_mnoc_hf_cam_ife_2",
- .id = SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node qnm_mnoc_sf_cam_ife_2 = {
- .name = "qnm_mnoc_sf_cam_ife_2",
- .id = SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node qnm_pcie_cam_ife_2 = {
- .name = "qnm_pcie_cam_ife_2",
- .id = SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_2,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_SLAVE_LLCC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node llcc_mc_cam_ife_2 = {
- .name = "llcc_mc_cam_ife_2",
- .id = SM8550_MASTER_LLCC_CAM_IFE_2,
- .channels = 4,
- .buswidth = 4,
- .num_links = 1,
- .links = { SM8550_SLAVE_EBI1_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node qnm_camnoc_hf_cam_ife_2 = {
- .name = "qnm_camnoc_hf_cam_ife_2",
- .id = SM8550_MASTER_CAMNOC_HF_CAM_IFE_2,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node qnm_camnoc_icp_cam_ife_2 = {
- .name = "qnm_camnoc_icp_cam_ife_2",
- .id = SM8550_MASTER_CAMNOC_ICP_CAM_IFE_2,
- .channels = 1,
- .buswidth = 8,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node qnm_camnoc_sf_cam_ife_2 = {
- .name = "qnm_camnoc_sf_cam_ife_2",
- .id = SM8550_MASTER_CAMNOC_SF_CAM_IFE_2,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2 },
-};
-
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = SM8550_SLAVE_A1NOC_SNOC,
@@ -1342,137 +1117,6 @@ static struct qcom_icc_node qns_gemnoc_sf = {
.links = { SM8550_MASTER_SNOC_SF_MEM_NOC },
};
-static struct qcom_icc_node qns_llcc_disp = {
- .name = "qns_llcc_disp",
- .id = SM8550_SLAVE_LLCC_DISP,
- .channels = 4,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_MASTER_LLCC_DISP },
-};
-
-static struct qcom_icc_node ebi_disp = {
- .name = "ebi_disp",
- .id = SM8550_SLAVE_EBI1_DISP,
- .channels = 4,
- .buswidth = 4,
- .num_links = 0,
-};
-
-static struct qcom_icc_node qns_mem_noc_hf_disp = {
- .name = "qns_mem_noc_hf_disp",
- .id = SM8550_SLAVE_MNOC_HF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qns_llcc_cam_ife_0 = {
- .name = "qns_llcc_cam_ife_0",
- .id = SM8550_SLAVE_LLCC_CAM_IFE_0,
- .channels = 4,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_MASTER_LLCC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node ebi_cam_ife_0 = {
- .name = "ebi_cam_ife_0",
- .id = SM8550_SLAVE_EBI1_CAM_IFE_0,
- .channels = 4,
- .buswidth = 4,
- .num_links = 0,
-};
-
-static struct qcom_icc_node qns_mem_noc_hf_cam_ife_0 = {
- .name = "qns_mem_noc_hf_cam_ife_0",
- .id = SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qns_mem_noc_sf_cam_ife_0 = {
- .name = "qns_mem_noc_sf_cam_ife_0",
- .id = SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0 },
-};
-
-static struct qcom_icc_node qns_llcc_cam_ife_1 = {
- .name = "qns_llcc_cam_ife_1",
- .id = SM8550_SLAVE_LLCC_CAM_IFE_1,
- .channels = 4,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_MASTER_LLCC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node ebi_cam_ife_1 = {
- .name = "ebi_cam_ife_1",
- .id = SM8550_SLAVE_EBI1_CAM_IFE_1,
- .channels = 4,
- .buswidth = 4,
- .num_links = 0,
-};
-
-static struct qcom_icc_node qns_mem_noc_hf_cam_ife_1 = {
- .name = "qns_mem_noc_hf_cam_ife_1",
- .id = SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qns_mem_noc_sf_cam_ife_1 = {
- .name = "qns_mem_noc_sf_cam_ife_1",
- .id = SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1 },
-};
-
-static struct qcom_icc_node qns_llcc_cam_ife_2 = {
- .name = "qns_llcc_cam_ife_2",
- .id = SM8550_SLAVE_LLCC_CAM_IFE_2,
- .channels = 4,
- .buswidth = 16,
- .num_links = 1,
- .links = { SM8550_MASTER_LLCC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node ebi_cam_ife_2 = {
- .name = "ebi_cam_ife_2",
- .id = SM8550_SLAVE_EBI1_CAM_IFE_2,
- .channels = 4,
- .buswidth = 4,
- .num_links = 0,
-};
-
-static struct qcom_icc_node qns_mem_noc_hf_cam_ife_2 = {
- .name = "qns_mem_noc_hf_cam_ife_2",
- .id = SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2 },
-};
-
-static struct qcom_icc_node qns_mem_noc_sf_cam_ife_2 = {
- .name = "qns_mem_noc_sf_cam_ife_2",
- .id = SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2 },
-};
-
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.enable_mask = 0x8,
@@ -1639,161 +1283,6 @@ static struct qcom_icc_bcm bcm_sn7 = {
.nodes = { &qns_pcie_mem_noc },
};
-static struct qcom_icc_bcm bcm_acv_disp = {
- .name = "ACV",
- .enable_mask = 0x1,
- .num_nodes = 1,
- .nodes = { &ebi_disp },
-};
-
-static struct qcom_icc_bcm bcm_mc0_disp = {
- .name = "MC0",
- .num_nodes = 1,
- .nodes = { &ebi_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm0_disp = {
- .name = "MM0",
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_hf_disp },
-};
-
-static struct qcom_icc_bcm bcm_sh0_disp = {
- .name = "SH0",
- .num_nodes = 1,
- .nodes = { &qns_llcc_disp },
-};
-
-static struct qcom_icc_bcm bcm_sh1_disp = {
- .name = "SH1",
- .enable_mask = 0x1,
- .num_nodes = 2,
- .nodes = { &qnm_mnoc_hf_disp, &qnm_pcie_disp },
-};
-
-static struct qcom_icc_bcm bcm_acv_cam_ife_0 = {
- .name = "ACV",
- .enable_mask = 0x0,
- .num_nodes = 1,
- .nodes = { &ebi_cam_ife_0 },
-};
-
-static struct qcom_icc_bcm bcm_mc0_cam_ife_0 = {
- .name = "MC0",
- .num_nodes = 1,
- .nodes = { &ebi_cam_ife_0 },
-};
-
-static struct qcom_icc_bcm bcm_mm0_cam_ife_0 = {
- .name = "MM0",
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_hf_cam_ife_0 },
-};
-
-static struct qcom_icc_bcm bcm_mm1_cam_ife_0 = {
- .name = "MM1",
- .enable_mask = 0x1,
- .num_nodes = 4,
- .nodes = { &qnm_camnoc_hf_cam_ife_0, &qnm_camnoc_icp_cam_ife_0,
- &qnm_camnoc_sf_cam_ife_0, &qns_mem_noc_sf_cam_ife_0 },
-};
-
-static struct qcom_icc_bcm bcm_sh0_cam_ife_0 = {
- .name = "SH0",
- .num_nodes = 1,
- .nodes = { &qns_llcc_cam_ife_0 },
-};
-
-static struct qcom_icc_bcm bcm_sh1_cam_ife_0 = {
- .name = "SH1",
- .enable_mask = 0x1,
- .num_nodes = 3,
- .nodes = { &qnm_mnoc_hf_cam_ife_0, &qnm_mnoc_sf_cam_ife_0,
- &qnm_pcie_cam_ife_0 },
-};
-
-static struct qcom_icc_bcm bcm_acv_cam_ife_1 = {
- .name = "ACV",
- .enable_mask = 0x0,
- .num_nodes = 1,
- .nodes = { &ebi_cam_ife_1 },
-};
-
-static struct qcom_icc_bcm bcm_mc0_cam_ife_1 = {
- .name = "MC0",
- .num_nodes = 1,
- .nodes = { &ebi_cam_ife_1 },
-};
-
-static struct qcom_icc_bcm bcm_mm0_cam_ife_1 = {
- .name = "MM0",
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_hf_cam_ife_1 },
-};
-
-static struct qcom_icc_bcm bcm_mm1_cam_ife_1 = {
- .name = "MM1",
- .enable_mask = 0x1,
- .num_nodes = 4,
- .nodes = { &qnm_camnoc_hf_cam_ife_1, &qnm_camnoc_icp_cam_ife_1,
- &qnm_camnoc_sf_cam_ife_1, &qns_mem_noc_sf_cam_ife_1 },
-};
-
-static struct qcom_icc_bcm bcm_sh0_cam_ife_1 = {
- .name = "SH0",
- .num_nodes = 1,
- .nodes = { &qns_llcc_cam_ife_1 },
-};
-
-static struct qcom_icc_bcm bcm_sh1_cam_ife_1 = {
- .name = "SH1",
- .enable_mask = 0x1,
- .num_nodes = 3,
- .nodes = { &qnm_mnoc_hf_cam_ife_1, &qnm_mnoc_sf_cam_ife_1,
- &qnm_pcie_cam_ife_1 },
-};
-
-static struct qcom_icc_bcm bcm_acv_cam_ife_2 = {
- .name = "ACV",
- .enable_mask = 0x0,
- .num_nodes = 1,
- .nodes = { &ebi_cam_ife_2 },
-};
-
-static struct qcom_icc_bcm bcm_mc0_cam_ife_2 = {
- .name = "MC0",
- .num_nodes = 1,
- .nodes = { &ebi_cam_ife_2 },
-};
-
-static struct qcom_icc_bcm bcm_mm0_cam_ife_2 = {
- .name = "MM0",
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_hf_cam_ife_2 },
-};
-
-static struct qcom_icc_bcm bcm_mm1_cam_ife_2 = {
- .name = "MM1",
- .enable_mask = 0x1,
- .num_nodes = 4,
- .nodes = { &qnm_camnoc_hf_cam_ife_2, &qnm_camnoc_icp_cam_ife_2,
- &qnm_camnoc_sf_cam_ife_2, &qns_mem_noc_sf_cam_ife_2 },
-};
-
-static struct qcom_icc_bcm bcm_sh0_cam_ife_2 = {
- .name = "SH0",
- .num_nodes = 1,
- .nodes = { &qns_llcc_cam_ife_2 },
-};
-
-static struct qcom_icc_bcm bcm_sh1_cam_ife_2 = {
- .name = "SH1",
- .enable_mask = 0x1,
- .num_nodes = 3,
- .nodes = { &qnm_mnoc_hf_cam_ife_2, &qnm_mnoc_sf_cam_ife_2,
- &qnm_pcie_cam_ife_2 },
-};
-
static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
@@ -1945,14 +1434,6 @@ static const struct qcom_icc_desc sm8550_cnoc_main = {
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh1,
- &bcm_sh0_disp,
- &bcm_sh1_disp,
- &bcm_sh0_cam_ife_0,
- &bcm_sh1_cam_ife_0,
- &bcm_sh0_cam_ife_1,
- &bcm_sh1_cam_ife_1,
- &bcm_sh0_cam_ife_2,
- &bcm_sh1_cam_ife_2,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
@@ -1971,21 +1452,6 @@ static struct qcom_icc_node * const gem_noc_nodes[] = {
[SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
- [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
- [MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp,
- [SLAVE_LLCC_DISP] = &qns_llcc_disp,
- [MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0] = &qnm_mnoc_hf_cam_ife_0,
- [MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0] = &qnm_mnoc_sf_cam_ife_0,
- [MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_0] = &qnm_pcie_cam_ife_0,
- [SLAVE_LLCC_CAM_IFE_0] = &qns_llcc_cam_ife_0,
- [MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1] = &qnm_mnoc_hf_cam_ife_1,
- [MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1] = &qnm_mnoc_sf_cam_ife_1,
- [MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_1] = &qnm_pcie_cam_ife_1,
- [SLAVE_LLCC_CAM_IFE_1] = &qns_llcc_cam_ife_1,
- [MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2] = &qnm_mnoc_hf_cam_ife_2,
- [MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2] = &qnm_mnoc_sf_cam_ife_2,
- [MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_2] = &qnm_pcie_cam_ife_2,
- [SLAVE_LLCC_CAM_IFE_2] = &qns_llcc_cam_ife_2,
};
static const struct qcom_icc_desc sm8550_gem_noc = {
@@ -2044,27 +1510,11 @@ static const struct qcom_icc_desc sm8550_lpass_lpicx_noc = {
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
- &bcm_acv_disp,
- &bcm_mc0_disp,
- &bcm_acv_cam_ife_0,
- &bcm_mc0_cam_ife_0,
- &bcm_acv_cam_ife_1,
- &bcm_mc0_cam_ife_1,
- &bcm_acv_cam_ife_2,
- &bcm_mc0_cam_ife_2,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
- [MASTER_LLCC_DISP] = &llcc_mc_disp,
- [SLAVE_EBI1_DISP] = &ebi_disp,
- [MASTER_LLCC_CAM_IFE_0] = &llcc_mc_cam_ife_0,
- [SLAVE_EBI1_CAM_IFE_0] = &ebi_cam_ife_0,
- [MASTER_LLCC_CAM_IFE_1] = &llcc_mc_cam_ife_1,
- [SLAVE_EBI1_CAM_IFE_1] = &ebi_cam_ife_1,
- [MASTER_LLCC_CAM_IFE_2] = &llcc_mc_cam_ife_2,
- [SLAVE_EBI1_CAM_IFE_2] = &ebi_cam_ife_2,
};
static const struct qcom_icc_desc sm8550_mc_virt = {
@@ -2077,13 +1527,6 @@ static const struct qcom_icc_desc sm8550_mc_virt = {
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
- &bcm_mm0_disp,
- &bcm_mm0_cam_ife_0,
- &bcm_mm1_cam_ife_0,
- &bcm_mm0_cam_ife_1,
- &bcm_mm1_cam_ife_1,
- &bcm_mm0_cam_ife_2,
- &bcm_mm1_cam_ife_2,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
@@ -2100,23 +1543,6 @@ static struct qcom_icc_node * const mmss_noc_nodes[] = {
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
- [MASTER_MDP_DISP] = &qnm_mdp_disp,
- [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
- [MASTER_CAMNOC_HF_CAM_IFE_0] = &qnm_camnoc_hf_cam_ife_0,
- [MASTER_CAMNOC_ICP_CAM_IFE_0] = &qnm_camnoc_icp_cam_ife_0,
- [MASTER_CAMNOC_SF_CAM_IFE_0] = &qnm_camnoc_sf_cam_ife_0,
- [SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0] = &qns_mem_noc_hf_cam_ife_0,
- [SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0] = &qns_mem_noc_sf_cam_ife_0,
- [MASTER_CAMNOC_HF_CAM_IFE_1] = &qnm_camnoc_hf_cam_ife_1,
- [MASTER_CAMNOC_ICP_CAM_IFE_1] = &qnm_camnoc_icp_cam_ife_1,
- [MASTER_CAMNOC_SF_CAM_IFE_1] = &qnm_camnoc_sf_cam_ife_1,
- [SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1] = &qns_mem_noc_hf_cam_ife_1,
- [SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1] = &qns_mem_noc_sf_cam_ife_1,
- [MASTER_CAMNOC_HF_CAM_IFE_2] = &qnm_camnoc_hf_cam_ife_2,
- [MASTER_CAMNOC_ICP_CAM_IFE_2] = &qnm_camnoc_icp_cam_ife_2,
- [MASTER_CAMNOC_SF_CAM_IFE_2] = &qnm_camnoc_sf_cam_ife_2,
- [SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2] = &qns_mem_noc_hf_cam_ife_2,
- [SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2] = &qns_mem_noc_sf_cam_ife_2,
};
static const struct qcom_icc_desc sm8550_mmss_noc = {
diff --git a/drivers/interconnect/qcom/sm8550.h b/drivers/interconnect/qcom/sm8550.h
index 8d5862c04bca..c9b2986e1293 100644
--- a/drivers/interconnect/qcom/sm8550.h
+++ b/drivers/interconnect/qcom/sm8550.h
@@ -12,167 +12,127 @@
#define SM8550_MASTER_A1NOC_SNOC 0
#define SM8550_MASTER_A2NOC_SNOC 1
#define SM8550_MASTER_ANOC_PCIE_GEM_NOC 2
-#define SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_0 3
-#define SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_1 4
-#define SM8550_MASTER_ANOC_PCIE_GEM_NOC_CAM_IFE_2 5
-#define SM8550_MASTER_ANOC_PCIE_GEM_NOC_DISP 6
-#define SM8550_MASTER_APPSS_PROC 7
-#define SM8550_MASTER_CAMNOC_HF 8
-#define SM8550_MASTER_CAMNOC_HF_CAM_IFE_0 9
-#define SM8550_MASTER_CAMNOC_HF_CAM_IFE_1 10
-#define SM8550_MASTER_CAMNOC_HF_CAM_IFE_2 11
-#define SM8550_MASTER_CAMNOC_ICP 12
-#define SM8550_MASTER_CAMNOC_ICP_CAM_IFE_0 13
-#define SM8550_MASTER_CAMNOC_ICP_CAM_IFE_1 14
-#define SM8550_MASTER_CAMNOC_ICP_CAM_IFE_2 15
-#define SM8550_MASTER_CAMNOC_SF 16
-#define SM8550_MASTER_CAMNOC_SF_CAM_IFE_0 17
-#define SM8550_MASTER_CAMNOC_SF_CAM_IFE_1 18
-#define SM8550_MASTER_CAMNOC_SF_CAM_IFE_2 19
-#define SM8550_MASTER_CDSP_HCP 20
-#define SM8550_MASTER_CDSP_PROC 21
-#define SM8550_MASTER_CNOC_CFG 22
-#define SM8550_MASTER_CNOC_MNOC_CFG 23
-#define SM8550_MASTER_COMPUTE_NOC 24
-#define SM8550_MASTER_CRYPTO 25
-#define SM8550_MASTER_GEM_NOC_CNOC 26
-#define SM8550_MASTER_GEM_NOC_PCIE_SNOC 27
-#define SM8550_MASTER_GFX3D 28
-#define SM8550_MASTER_GIC 29
-#define SM8550_MASTER_GIC_AHB 30
-#define SM8550_MASTER_GPU_TCU 31
-#define SM8550_MASTER_IPA 32
-#define SM8550_MASTER_LLCC 33
-#define SM8550_MASTER_LLCC_CAM_IFE_0 34
-#define SM8550_MASTER_LLCC_CAM_IFE_1 35
-#define SM8550_MASTER_LLCC_CAM_IFE_2 36
-#define SM8550_MASTER_LLCC_DISP 37
-#define SM8550_MASTER_LPASS_GEM_NOC 38
-#define SM8550_MASTER_LPASS_LPINOC 39
-#define SM8550_MASTER_LPASS_PROC 40
-#define SM8550_MASTER_LPIAON_NOC 41
-#define SM8550_MASTER_MDP 42
-#define SM8550_MASTER_MDP_DISP 43
-#define SM8550_MASTER_MNOC_HF_MEM_NOC 44
-#define SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_0 45
-#define SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_1 46
-#define SM8550_MASTER_MNOC_HF_MEM_NOC_CAM_IFE_2 47
-#define SM8550_MASTER_MNOC_HF_MEM_NOC_DISP 48
-#define SM8550_MASTER_MNOC_SF_MEM_NOC 49
-#define SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_0 50
-#define SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_1 51
-#define SM8550_MASTER_MNOC_SF_MEM_NOC_CAM_IFE_2 52
-#define SM8550_MASTER_MSS_PROC 53
-#define SM8550_MASTER_PCIE_0 54
-#define SM8550_MASTER_PCIE_1 55
-#define SM8550_MASTER_PCIE_ANOC_CFG 56
-#define SM8550_MASTER_QDSS_BAM 57
-#define SM8550_MASTER_QDSS_ETR 58
-#define SM8550_MASTER_QDSS_ETR_1 59
-#define SM8550_MASTER_QSPI_0 60
-#define SM8550_MASTER_QUP_1 61
-#define SM8550_MASTER_QUP_2 62
-#define SM8550_MASTER_QUP_CORE_0 63
-#define SM8550_MASTER_QUP_CORE_1 64
-#define SM8550_MASTER_QUP_CORE_2 65
-#define SM8550_MASTER_SDCC_2 66
-#define SM8550_MASTER_SDCC_4 67
-#define SM8550_MASTER_SNOC_GC_MEM_NOC 68
-#define SM8550_MASTER_SNOC_SF_MEM_NOC 69
-#define SM8550_MASTER_SP 70
-#define SM8550_MASTER_SYS_TCU 71
-#define SM8550_MASTER_UFS_MEM 72
-#define SM8550_MASTER_USB3_0 73
-#define SM8550_MASTER_VIDEO 74
-#define SM8550_MASTER_VIDEO_CV_PROC 75
-#define SM8550_MASTER_VIDEO_PROC 76
-#define SM8550_MASTER_VIDEO_V_PROC 77
-#define SM8550_SLAVE_A1NOC_SNOC 78
-#define SM8550_SLAVE_A2NOC_SNOC 79
-#define SM8550_SLAVE_AHB2PHY_NORTH 80
-#define SM8550_SLAVE_AHB2PHY_SOUTH 81
-#define SM8550_SLAVE_ANOC_PCIE_GEM_NOC 82
-#define SM8550_SLAVE_AOSS 83
-#define SM8550_SLAVE_APPSS 84
-#define SM8550_SLAVE_BOOT_IMEM 85
-#define SM8550_SLAVE_CAMERA_CFG 86
-#define SM8550_SLAVE_CDSP_MEM_NOC 87
-#define SM8550_SLAVE_CLK_CTL 88
-#define SM8550_SLAVE_CNOC_CFG 89
-#define SM8550_SLAVE_CNOC_MNOC_CFG 90
-#define SM8550_SLAVE_CNOC_MSS 91
-#define SM8550_SLAVE_CPR_NSPCX 92
-#define SM8550_SLAVE_CRYPTO_0_CFG 93
-#define SM8550_SLAVE_CX_RDPM 94
-#define SM8550_SLAVE_DDRSS_CFG 95
-#define SM8550_SLAVE_DISPLAY_CFG 96
-#define SM8550_SLAVE_EBI1 97
-#define SM8550_SLAVE_EBI1_CAM_IFE_0 98
-#define SM8550_SLAVE_EBI1_CAM_IFE_1 99
-#define SM8550_SLAVE_EBI1_CAM_IFE_2 100
-#define SM8550_SLAVE_EBI1_DISP 101
-#define SM8550_SLAVE_GEM_NOC_CNOC 102
-#define SM8550_SLAVE_GFX3D_CFG 103
-#define SM8550_SLAVE_I2C 104
-#define SM8550_SLAVE_IMEM 105
-#define SM8550_SLAVE_IMEM_CFG 106
-#define SM8550_SLAVE_IPA_CFG 107
-#define SM8550_SLAVE_IPC_ROUTER_CFG 108
-#define SM8550_SLAVE_LLCC 109
-#define SM8550_SLAVE_LLCC_CAM_IFE_0 110
-#define SM8550_SLAVE_LLCC_CAM_IFE_1 111
-#define SM8550_SLAVE_LLCC_CAM_IFE_2 112
-#define SM8550_SLAVE_LLCC_DISP 113
-#define SM8550_SLAVE_LPASS_GEM_NOC 114
-#define SM8550_SLAVE_LPASS_QTB_CFG 115
-#define SM8550_SLAVE_LPIAON_NOC_LPASS_AG_NOC 116
-#define SM8550_SLAVE_LPICX_NOC_LPIAON_NOC 117
-#define SM8550_SLAVE_MEM_NOC_PCIE_SNOC 118
-#define SM8550_SLAVE_MNOC_HF_MEM_NOC 119
-#define SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_0 120
-#define SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_1 121
-#define SM8550_SLAVE_MNOC_HF_MEM_NOC_CAM_IFE_2 122
-#define SM8550_SLAVE_MNOC_HF_MEM_NOC_DISP 123
-#define SM8550_SLAVE_MNOC_SF_MEM_NOC 124
-#define SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_0 125
-#define SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_1 126
-#define SM8550_SLAVE_MNOC_SF_MEM_NOC_CAM_IFE_2 127
-#define SM8550_SLAVE_MX_RDPM 128
-#define SM8550_SLAVE_NSP_QTB_CFG 129
-#define SM8550_SLAVE_PCIE_0 130
-#define SM8550_SLAVE_PCIE_0_CFG 131
-#define SM8550_SLAVE_PCIE_1 132
-#define SM8550_SLAVE_PCIE_1_CFG 133
-#define SM8550_SLAVE_PCIE_ANOC_CFG 134
-#define SM8550_SLAVE_PDM 135
-#define SM8550_SLAVE_PIMEM_CFG 136
-#define SM8550_SLAVE_PRNG 137
-#define SM8550_SLAVE_QDSS_CFG 138
-#define SM8550_SLAVE_QDSS_STM 139
-#define SM8550_SLAVE_QSPI_0 140
-#define SM8550_SLAVE_QUP_1 141
-#define SM8550_SLAVE_QUP_2 142
-#define SM8550_SLAVE_QUP_CORE_0 143
-#define SM8550_SLAVE_QUP_CORE_1 144
-#define SM8550_SLAVE_QUP_CORE_2 145
-#define SM8550_SLAVE_RBCPR_CX_CFG 146
-#define SM8550_SLAVE_RBCPR_MMCX_CFG 147
-#define SM8550_SLAVE_RBCPR_MXA_CFG 148
-#define SM8550_SLAVE_RBCPR_MXC_CFG 149
-#define SM8550_SLAVE_SDCC_2 150
-#define SM8550_SLAVE_SDCC_4 151
-#define SM8550_SLAVE_SERVICE_MNOC 152
-#define SM8550_SLAVE_SERVICE_PCIE_ANOC 153
-#define SM8550_SLAVE_SNOC_GEM_NOC_GC 154
-#define SM8550_SLAVE_SNOC_GEM_NOC_SF 155
-#define SM8550_SLAVE_SPSS_CFG 156
-#define SM8550_SLAVE_TCSR 157
-#define SM8550_SLAVE_TCU 158
-#define SM8550_SLAVE_TLMM 159
-#define SM8550_SLAVE_TME_CFG 160
-#define SM8550_SLAVE_UFS_MEM_CFG 161
-#define SM8550_SLAVE_USB3_0 162
-#define SM8550_SLAVE_VENUS_CFG 163
-#define SM8550_SLAVE_VSENSE_CTRL_CFG 164
+#define SM8550_MASTER_APPSS_PROC 3
+#define SM8550_MASTER_CAMNOC_HF 4
+#define SM8550_MASTER_CAMNOC_ICP 5
+#define SM8550_MASTER_CAMNOC_SF 6
+#define SM8550_MASTER_CDSP_HCP 7
+#define SM8550_MASTER_CDSP_PROC 8
+#define SM8550_MASTER_CNOC_CFG 9
+#define SM8550_MASTER_CNOC_MNOC_CFG 10
+#define SM8550_MASTER_COMPUTE_NOC 11
+#define SM8550_MASTER_CRYPTO 12
+#define SM8550_MASTER_GEM_NOC_CNOC 13
+#define SM8550_MASTER_GEM_NOC_PCIE_SNOC 14
+#define SM8550_MASTER_GFX3D 15
+#define SM8550_MASTER_GIC 16
+#define SM8550_MASTER_GIC_AHB 17
+#define SM8550_MASTER_GPU_TCU 18
+#define SM8550_MASTER_IPA 19
+#define SM8550_MASTER_LLCC 20
+#define SM8550_MASTER_LPASS_GEM_NOC 21
+#define SM8550_MASTER_LPASS_LPINOC 22
+#define SM8550_MASTER_LPASS_PROC 23
+#define SM8550_MASTER_LPIAON_NOC 24
+#define SM8550_MASTER_MDP 25
+#define SM8550_MASTER_MNOC_HF_MEM_NOC 26
+#define SM8550_MASTER_MNOC_SF_MEM_NOC 27
+#define SM8550_MASTER_MSS_PROC 28
+#define SM8550_MASTER_PCIE_0 29
+#define SM8550_MASTER_PCIE_1 30
+#define SM8550_MASTER_PCIE_ANOC_CFG 31
+#define SM8550_MASTER_QDSS_BAM 32
+#define SM8550_MASTER_QDSS_ETR 33
+#define SM8550_MASTER_QDSS_ETR_1 34
+#define SM8550_MASTER_QSPI_0 35
+#define SM8550_MASTER_QUP_1 36
+#define SM8550_MASTER_QUP_2 37
+#define SM8550_MASTER_QUP_CORE_0 38
+#define SM8550_MASTER_QUP_CORE_1 39
+#define SM8550_MASTER_QUP_CORE_2 40
+#define SM8550_MASTER_SDCC_2 41
+#define SM8550_MASTER_SDCC_4 42
+#define SM8550_MASTER_SNOC_GC_MEM_NOC 43
+#define SM8550_MASTER_SNOC_SF_MEM_NOC 44
+#define SM8550_MASTER_SP 45
+#define SM8550_MASTER_SYS_TCU 46
+#define SM8550_MASTER_UFS_MEM 47
+#define SM8550_MASTER_USB3_0 48
+#define SM8550_MASTER_VIDEO 49
+#define SM8550_MASTER_VIDEO_CV_PROC 50
+#define SM8550_MASTER_VIDEO_PROC 51
+#define SM8550_MASTER_VIDEO_V_PROC 52
+#define SM8550_SLAVE_A1NOC_SNOC 53
+#define SM8550_SLAVE_A2NOC_SNOC 54
+#define SM8550_SLAVE_AHB2PHY_NORTH 55
+#define SM8550_SLAVE_AHB2PHY_SOUTH 56
+#define SM8550_SLAVE_ANOC_PCIE_GEM_NOC 57
+#define SM8550_SLAVE_AOSS 58
+#define SM8550_SLAVE_APPSS 59
+#define SM8550_SLAVE_BOOT_IMEM 60
+#define SM8550_SLAVE_CAMERA_CFG 61
+#define SM8550_SLAVE_CDSP_MEM_NOC 62
+#define SM8550_SLAVE_CLK_CTL 63
+#define SM8550_SLAVE_CNOC_CFG 64
+#define SM8550_SLAVE_CNOC_MNOC_CFG 65
+#define SM8550_SLAVE_CNOC_MSS 66
+#define SM8550_SLAVE_CPR_NSPCX 67
+#define SM8550_SLAVE_CRYPTO_0_CFG 68
+#define SM8550_SLAVE_CX_RDPM 69
+#define SM8550_SLAVE_DDRSS_CFG 70
+#define SM8550_SLAVE_DISPLAY_CFG 71
+#define SM8550_SLAVE_EBI1 72
+#define SM8550_SLAVE_GEM_NOC_CNOC 73
+#define SM8550_SLAVE_GFX3D_CFG 74
+#define SM8550_SLAVE_I2C 75
+#define SM8550_SLAVE_IMEM 76
+#define SM8550_SLAVE_IMEM_CFG 77
+#define SM8550_SLAVE_IPA_CFG 78
+#define SM8550_SLAVE_IPC_ROUTER_CFG 79
+#define SM8550_SLAVE_LLCC 80
+#define SM8550_SLAVE_LPASS_GEM_NOC 81
+#define SM8550_SLAVE_LPASS_QTB_CFG 82
+#define SM8550_SLAVE_LPIAON_NOC_LPASS_AG_NOC 83
+#define SM8550_SLAVE_LPICX_NOC_LPIAON_NOC 84
+#define SM8550_SLAVE_MEM_NOC_PCIE_SNOC 85
+#define SM8550_SLAVE_MNOC_HF_MEM_NOC 86
+#define SM8550_SLAVE_MNOC_SF_MEM_NOC 87
+#define SM8550_SLAVE_MX_RDPM 88
+#define SM8550_SLAVE_NSP_QTB_CFG 89
+#define SM8550_SLAVE_PCIE_0 90
+#define SM8550_SLAVE_PCIE_0_CFG 91
+#define SM8550_SLAVE_PCIE_1 92
+#define SM8550_SLAVE_PCIE_1_CFG 93
+#define SM8550_SLAVE_PCIE_ANOC_CFG 94
+#define SM8550_SLAVE_PDM 95
+#define SM8550_SLAVE_PIMEM_CFG 96
+#define SM8550_SLAVE_PRNG 97
+#define SM8550_SLAVE_QDSS_CFG 98
+#define SM8550_SLAVE_QDSS_STM 99
+#define SM8550_SLAVE_QSPI_0 100
+#define SM8550_SLAVE_QUP_1 101
+#define SM8550_SLAVE_QUP_2 102
+#define SM8550_SLAVE_QUP_CORE_0 103
+#define SM8550_SLAVE_QUP_CORE_1 104
+#define SM8550_SLAVE_QUP_CORE_2 105
+#define SM8550_SLAVE_RBCPR_CX_CFG 106
+#define SM8550_SLAVE_RBCPR_MMCX_CFG 107
+#define SM8550_SLAVE_RBCPR_MXA_CFG 108
+#define SM8550_SLAVE_RBCPR_MXC_CFG 109
+#define SM8550_SLAVE_SDCC_2 110
+#define SM8550_SLAVE_SDCC_4 111
+#define SM8550_SLAVE_SERVICE_MNOC 112
+#define SM8550_SLAVE_SERVICE_PCIE_ANOC 113
+#define SM8550_SLAVE_SNOC_GEM_NOC_GC 114
+#define SM8550_SLAVE_SNOC_GEM_NOC_SF 115
+#define SM8550_SLAVE_SPSS_CFG 116
+#define SM8550_SLAVE_TCSR 117
+#define SM8550_SLAVE_TCU 118
+#define SM8550_SLAVE_TLMM 119
+#define SM8550_SLAVE_TME_CFG 120
+#define SM8550_SLAVE_UFS_MEM_CFG 121
+#define SM8550_SLAVE_USB3_0 122
+#define SM8550_SLAVE_VENUS_CFG 123
+#define SM8550_SLAVE_VSENSE_CTRL_CFG 124
#endif
diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c
index cbaf4f9c41be..99824675ee3f 100644
--- a/drivers/interconnect/qcom/x1e80100.c
+++ b/drivers/interconnect/qcom/x1e80100.c
@@ -670,150 +670,6 @@ static struct qcom_icc_node xm_usb4_2 = {
.links = { X1E80100_SLAVE_AGGRE_USB_SOUTH },
};
-static struct qcom_icc_node qnm_mnoc_hf_disp = {
- .name = "qnm_mnoc_hf_disp",
- .id = X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { X1E80100_SLAVE_LLCC_DISP },
-};
-
-static struct qcom_icc_node qnm_pcie_disp = {
- .name = "qnm_pcie_disp",
- .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC_DISP,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_SLAVE_LLCC_DISP },
-};
-
-static struct qcom_icc_node llcc_mc_disp = {
- .name = "llcc_mc_disp",
- .id = X1E80100_MASTER_LLCC_DISP,
- .channels = 8,
- .buswidth = 4,
- .num_links = 1,
- .links = { X1E80100_SLAVE_EBI1_DISP },
-};
-
-static struct qcom_icc_node qnm_mdp_disp = {
- .name = "qnm_mdp_disp",
- .id = X1E80100_MASTER_MDP_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qnm_pcie_pcie = {
- .name = "qnm_pcie_pcie",
- .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_SLAVE_LLCC_PCIE },
-};
-
-static struct qcom_icc_node llcc_mc_pcie = {
- .name = "llcc_mc_pcie",
- .id = X1E80100_MASTER_LLCC_PCIE,
- .channels = 8,
- .buswidth = 4,
- .num_links = 1,
- .links = { X1E80100_SLAVE_EBI1_PCIE },
-};
-
-static struct qcom_icc_node qnm_pcie_north_gem_noc_pcie = {
- .name = "qnm_pcie_north_gem_noc_pcie",
- .id = X1E80100_MASTER_PCIE_NORTH_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE },
-};
-
-static struct qcom_icc_node qnm_pcie_south_gem_noc_pcie = {
- .name = "qnm_pcie_south_gem_noc_pcie",
- .id = X1E80100_MASTER_PCIE_SOUTH_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_3_pcie = {
- .name = "xm_pcie_3_pcie",
- .id = X1E80100_MASTER_PCIE_3_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_4_pcie = {
- .name = "xm_pcie_4_pcie",
- .id = X1E80100_MASTER_PCIE_4_PCIE,
- .channels = 1,
- .buswidth = 8,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_5_pcie = {
- .name = "xm_pcie_5_pcie",
- .id = X1E80100_MASTER_PCIE_5_PCIE,
- .channels = 1,
- .buswidth = 8,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_0_pcie = {
- .name = "xm_pcie_0_pcie",
- .id = X1E80100_MASTER_PCIE_0_PCIE,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_1_pcie = {
- .name = "xm_pcie_1_pcie",
- .id = X1E80100_MASTER_PCIE_1_PCIE,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_2_pcie = {
- .name = "xm_pcie_2_pcie",
- .id = X1E80100_MASTER_PCIE_2_PCIE,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_6a_pcie = {
- .name = "xm_pcie_6a_pcie",
- .id = X1E80100_MASTER_PCIE_6A_PCIE,
- .channels = 1,
- .buswidth = 32,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
-};
-
-static struct qcom_icc_node xm_pcie_6b_pcie = {
- .name = "xm_pcie_6b_pcie",
- .id = X1E80100_MASTER_PCIE_6B_PCIE,
- .channels = 1,
- .buswidth = 16,
- .num_links = 1,
- .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE },
-};
-
static struct qcom_icc_node qns_a1noc_snoc = {
.name = "qns_a1noc_snoc",
.id = X1E80100_SLAVE_A1NOC_SNOC,
@@ -1514,76 +1370,6 @@ static struct qcom_icc_node qns_aggre_usb_south_snoc = {
.links = { X1E80100_MASTER_AGGRE_USB_SOUTH },
};
-static struct qcom_icc_node qns_llcc_disp = {
- .name = "qns_llcc_disp",
- .id = X1E80100_SLAVE_LLCC_DISP,
- .channels = 8,
- .buswidth = 16,
- .num_links = 1,
- .links = { X1E80100_MASTER_LLCC_DISP },
-};
-
-static struct qcom_icc_node ebi_disp = {
- .name = "ebi_disp",
- .id = X1E80100_SLAVE_EBI1_DISP,
- .channels = 8,
- .buswidth = 4,
- .num_links = 0,
-};
-
-static struct qcom_icc_node qns_mem_noc_hf_disp = {
- .name = "qns_mem_noc_hf_disp",
- .id = X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP,
- .channels = 2,
- .buswidth = 32,
- .num_links = 1,
- .links = { X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP },
-};
-
-static struct qcom_icc_node qns_llcc_pcie = {
- .name = "qns_llcc_pcie",
- .id = X1E80100_SLAVE_LLCC_PCIE,
- .channels = 8,
- .buswidth = 16,
- .num_links = 1,
- .links = { X1E80100_MASTER_LLCC_PCIE },
-};
-
-static struct qcom_icc_node ebi_pcie = {
- .name = "ebi_pcie",
- .id = X1E80100_SLAVE_EBI1_PCIE,
- .channels = 8,
- .buswidth = 4,
- .num_links = 0,
-};
-
-static struct qcom_icc_node qns_pcie_mem_noc_pcie = {
- .name = "qns_pcie_mem_noc_pcie",
- .id = X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE },
-};
-
-static struct qcom_icc_node qns_pcie_north_gem_noc_pcie = {
- .name = "qns_pcie_north_gem_noc_pcie",
- .id = X1E80100_SLAVE_PCIE_NORTH_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_MASTER_PCIE_NORTH_PCIE },
-};
-
-static struct qcom_icc_node qns_pcie_south_gem_noc_pcie = {
- .name = "qns_pcie_south_gem_noc_pcie",
- .id = X1E80100_SLAVE_PCIE_SOUTH_PCIE,
- .channels = 1,
- .buswidth = 64,
- .num_links = 1,
- .links = { X1E80100_MASTER_PCIE_SOUTH_PCIE },
-};
-
static struct qcom_icc_bcm bcm_acv = {
.name = "ACV",
.enable_mask = BIT(3),
@@ -1756,73 +1542,7 @@ static struct qcom_icc_bcm bcm_sn4 = {
.nodes = { &qnm_usb_anoc },
};
-static struct qcom_icc_bcm bcm_acv_disp = {
- .name = "ACV",
- .num_nodes = 1,
- .nodes = { &ebi_disp },
-};
-
-static struct qcom_icc_bcm bcm_mc0_disp = {
- .name = "MC0",
- .num_nodes = 1,
- .nodes = { &ebi_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm0_disp = {
- .name = "MM0",
- .num_nodes = 1,
- .nodes = { &qns_mem_noc_hf_disp },
-};
-
-static struct qcom_icc_bcm bcm_mm1_disp = {
- .name = "MM1",
- .num_nodes = 1,
- .nodes = { &qnm_mdp_disp },
-};
-
-static struct qcom_icc_bcm bcm_sh0_disp = {
- .name = "SH0",
- .num_nodes = 1,
- .nodes = { &qns_llcc_disp },
-};
-
-static struct qcom_icc_bcm bcm_sh1_disp = {
- .name = "SH1",
- .num_nodes = 2,
- .nodes = { &qnm_mnoc_hf_disp, &qnm_pcie_disp },
-};
-
-static struct qcom_icc_bcm bcm_acv_pcie = {
- .name = "ACV",
- .num_nodes = 1,
- .nodes = { &ebi_pcie },
-};
-
-static struct qcom_icc_bcm bcm_mc0_pcie = {
- .name = "MC0",
- .num_nodes = 1,
- .nodes = { &ebi_pcie },
-};
-
-static struct qcom_icc_bcm bcm_pc0_pcie = {
- .name = "PC0",
- .num_nodes = 1,
- .nodes = { &qns_pcie_mem_noc_pcie },
-};
-
-static struct qcom_icc_bcm bcm_sh0_pcie = {
- .name = "SH0",
- .num_nodes = 1,
- .nodes = { &qns_llcc_pcie },
-};
-
-static struct qcom_icc_bcm bcm_sh1_pcie = {
- .name = "SH1",
- .num_nodes = 1,
- .nodes = { &qnm_pcie_pcie },
-};
-
-static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
};
static struct qcom_icc_node * const aggre1_noc_nodes[] = {
@@ -1983,10 +1703,6 @@ static const struct qcom_icc_desc x1e80100_cnoc_main = {
static struct qcom_icc_bcm * const gem_noc_bcms[] = {
&bcm_sh0,
&bcm_sh1,
- &bcm_sh0_disp,
- &bcm_sh1_disp,
- &bcm_sh0_pcie,
- &bcm_sh1_pcie,
};
static struct qcom_icc_node * const gem_noc_nodes[] = {
@@ -2005,11 +1721,6 @@ static struct qcom_icc_node * const gem_noc_nodes[] = {
[SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc,
[SLAVE_LLCC] = &qns_llcc,
[SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie,
- [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp,
- [MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp,
- [SLAVE_LLCC_DISP] = &qns_llcc_disp,
- [MASTER_ANOC_PCIE_GEM_NOC_PCIE] = &qnm_pcie_pcie,
- [SLAVE_LLCC_PCIE] = &qns_llcc_pcie,
};
static const struct qcom_icc_desc x1e80100_gem_noc = {
@@ -2019,7 +1730,7 @@ static const struct qcom_icc_desc x1e80100_gem_noc = {
.num_bcms = ARRAY_SIZE(gem_noc_bcms),
};
-static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = {
+static struct qcom_icc_bcm * const lpass_ag_noc_bcms[] = {
};
static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
@@ -2068,19 +1779,11 @@ static const struct qcom_icc_desc x1e80100_lpass_lpicx_noc = {
static struct qcom_icc_bcm * const mc_virt_bcms[] = {
&bcm_acv,
&bcm_mc0,
- &bcm_acv_disp,
- &bcm_mc0_disp,
- &bcm_acv_pcie,
- &bcm_mc0_pcie,
};
static struct qcom_icc_node * const mc_virt_nodes[] = {
[MASTER_LLCC] = &llcc_mc,
[SLAVE_EBI1] = &ebi,
- [MASTER_LLCC_DISP] = &llcc_mc_disp,
- [SLAVE_EBI1_DISP] = &ebi_disp,
- [MASTER_LLCC_PCIE] = &llcc_mc_pcie,
- [SLAVE_EBI1_PCIE] = &ebi_pcie,
};
static const struct qcom_icc_desc x1e80100_mc_virt = {
@@ -2093,8 +1796,6 @@ static const struct qcom_icc_desc x1e80100_mc_virt = {
static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
&bcm_mm0,
&bcm_mm1,
- &bcm_mm0_disp,
- &bcm_mm1_disp,
};
static struct qcom_icc_node * const mmss_noc_nodes[] = {
@@ -2111,8 +1812,6 @@ static struct qcom_icc_node * const mmss_noc_nodes[] = {
[SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
[SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
[SLAVE_SERVICE_MNOC] = &srvc_mnoc,
- [MASTER_MDP_DISP] = &qnm_mdp_disp,
- [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp,
};
static const struct qcom_icc_desc x1e80100_mmss_noc = {
@@ -2140,16 +1839,12 @@ static const struct qcom_icc_desc x1e80100_nsp_noc = {
static struct qcom_icc_bcm * const pcie_center_anoc_bcms[] = {
&bcm_pc0,
- &bcm_pc0_pcie,
};
static struct qcom_icc_node * const pcie_center_anoc_nodes[] = {
[MASTER_PCIE_NORTH] = &qnm_pcie_north_gem_noc,
[MASTER_PCIE_SOUTH] = &qnm_pcie_south_gem_noc,
[SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc,
- [MASTER_PCIE_NORTH_PCIE] = &qnm_pcie_north_gem_noc_pcie,
- [MASTER_PCIE_SOUTH_PCIE] = &qnm_pcie_south_gem_noc_pcie,
- [SLAVE_ANOC_PCIE_GEM_NOC_PCIE] = &qns_pcie_mem_noc_pcie,
};
static const struct qcom_icc_desc x1e80100_pcie_center_anoc = {
@@ -2167,10 +1862,6 @@ static struct qcom_icc_node * const pcie_north_anoc_nodes[] = {
[MASTER_PCIE_4] = &xm_pcie_4,
[MASTER_PCIE_5] = &xm_pcie_5,
[SLAVE_PCIE_NORTH] = &qns_pcie_north_gem_noc,
- [MASTER_PCIE_3_PCIE] = &xm_pcie_3_pcie,
- [MASTER_PCIE_4_PCIE] = &xm_pcie_4_pcie,
- [MASTER_PCIE_5_PCIE] = &xm_pcie_5_pcie,
- [SLAVE_PCIE_NORTH_PCIE] = &qns_pcie_north_gem_noc_pcie,
};
static const struct qcom_icc_desc x1e80100_pcie_north_anoc = {
@@ -2180,7 +1871,7 @@ static const struct qcom_icc_desc x1e80100_pcie_north_anoc = {
.num_bcms = ARRAY_SIZE(pcie_north_anoc_bcms),
};
-static struct qcom_icc_bcm *pcie_south_anoc_bcms[] = {
+static struct qcom_icc_bcm * const pcie_south_anoc_bcms[] = {
};
static struct qcom_icc_node * const pcie_south_anoc_nodes[] = {
@@ -2190,12 +1881,6 @@ static struct qcom_icc_node * const pcie_south_anoc_nodes[] = {
[MASTER_PCIE_6A] = &xm_pcie_6a,
[MASTER_PCIE_6B] = &xm_pcie_6b,
[SLAVE_PCIE_SOUTH] = &qns_pcie_south_gem_noc,
- [MASTER_PCIE_0_PCIE] = &xm_pcie_0_pcie,
- [MASTER_PCIE_1_PCIE] = &xm_pcie_1_pcie,
- [MASTER_PCIE_2_PCIE] = &xm_pcie_2_pcie,
- [MASTER_PCIE_6A_PCIE] = &xm_pcie_6a_pcie,
- [MASTER_PCIE_6B_PCIE] = &xm_pcie_6b_pcie,
- [SLAVE_PCIE_SOUTH_PCIE] = &qns_pcie_south_gem_noc_pcie,
};
static const struct qcom_icc_desc x1e80100_pcie_south_anoc = {
@@ -2205,7 +1890,7 @@ static const struct qcom_icc_desc x1e80100_pcie_south_anoc = {
.num_bcms = ARRAY_SIZE(pcie_south_anoc_bcms),
};
-static struct qcom_icc_bcm *system_noc_bcms[] = {
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
&bcm_sn0,
&bcm_sn2,
&bcm_sn3,
@@ -2243,7 +1928,7 @@ static const struct qcom_icc_desc x1e80100_usb_center_anoc = {
.num_bcms = ARRAY_SIZE(usb_center_anoc_bcms),
};
-static struct qcom_icc_bcm *usb_north_anoc_bcms[] = {
+static struct qcom_icc_bcm * const usb_north_anoc_bcms[] = {
};
static struct qcom_icc_node * const usb_north_anoc_nodes[] = {
@@ -2259,7 +1944,7 @@ static const struct qcom_icc_desc x1e80100_usb_north_anoc = {
.num_bcms = ARRAY_SIZE(usb_north_anoc_bcms),
};
-static struct qcom_icc_bcm *usb_south_anoc_bcms[] = {
+static struct qcom_icc_bcm * const usb_south_anoc_bcms[] = {
};
static struct qcom_icc_node * const usb_south_anoc_nodes[] = {
diff --git a/drivers/interconnect/samsung/exynos.c b/drivers/interconnect/samsung/exynos.c
index 1ba14cb45d5a..c9e5361e17c5 100644
--- a/drivers/interconnect/samsung/exynos.c
+++ b/drivers/interconnect/samsung/exynos.c
@@ -82,7 +82,7 @@ static int exynos_generic_icc_set(struct icc_node *src, struct icc_node *dst)
return 0;
}
-static struct icc_node *exynos_generic_icc_xlate(struct of_phandle_args *spec,
+static struct icc_node *exynos_generic_icc_xlate(const struct of_phandle_args *spec,
void *data)
{
struct exynos_icc_priv *priv = data;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index b58f5a3311c3..e4cb26f6a943 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -1711,6 +1711,14 @@ static size_t iommu_dma_opt_mapping_size(void)
return iova_rcache_range();
}
+static size_t iommu_dma_max_mapping_size(struct device *dev)
+{
+ if (dev_is_untrusted(dev))
+ return swiotlb_max_mapping_size(dev);
+
+ return SIZE_MAX;
+}
+
static const struct dma_map_ops iommu_dma_ops = {
.flags = DMA_F_PCI_P2PDMA_SUPPORTED,
.alloc = iommu_dma_alloc,
@@ -1733,6 +1741,7 @@ static const struct dma_map_ops iommu_dma_ops = {
.unmap_resource = iommu_dma_unmap_resource,
.get_merge_boundary = iommu_dma_get_merge_boundary,
.opt_mapping_size = iommu_dma_opt_mapping_size,
+ .max_mapping_size = iommu_dma_max_mapping_size,
};
/*
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig
index 6cf9f48e7d8c..f52fb39c968e 100644
--- a/drivers/iommu/intel/Kconfig
+++ b/drivers/iommu/intel/Kconfig
@@ -87,8 +87,8 @@ config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON
the default value.
config INTEL_IOMMU_PERF_EVENTS
- def_bool y
bool "Intel IOMMU performance events"
+ default y
depends on INTEL_IOMMU && PERF_EVENTS
help
Selecting this option will enable the performance monitoring
diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c
index b1471ba016a5..866bf48d803b 100644
--- a/drivers/ipack/ipack.c
+++ b/drivers/ipack/ipack.c
@@ -187,7 +187,7 @@ static struct attribute *ipack_attrs[] = {
};
ATTRIBUTE_GROUPS(ipack);
-static struct bus_type ipack_bus_type = {
+static const struct bus_type ipack_bus_type = {
.name = "ipack",
.probe = ipack_bus_probe,
.match = ipack_bus_match,
diff --git a/drivers/irqchip/irq-renesas-rzg2l.c b/drivers/irqchip/irq-renesas-rzg2l.c
index 9494fc26259c..ae67fec2ab46 100644
--- a/drivers/irqchip/irq-renesas-rzg2l.c
+++ b/drivers/irqchip/irq-renesas-rzg2l.c
@@ -85,10 +85,9 @@ static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
return data->domain->host_data;
}
-static void rzg2l_irq_eoi(struct irq_data *d)
+static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hw_irq = hwirq - IRQC_IRQ_START;
u32 bit = BIT(hw_irq);
u32 iitsr, iscr;
@@ -99,20 +98,30 @@ static void rzg2l_irq_eoi(struct irq_data *d)
* ISCR can only be cleared if the type is falling-edge, rising-edge or
* falling/rising-edge.
*/
- if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq)))
+ if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) {
writel_relaxed(iscr & ~bit, priv->base + ISCR);
+ /*
+ * Enforce that the posted write is flushed to prevent that the
+ * just handled interrupt is raised again.
+ */
+ readl_relaxed(priv->base + ISCR);
+ }
}
-static void rzg2l_tint_eoi(struct irq_data *d)
+static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_TINT_START;
- struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
- u32 bit = BIT(hw_irq);
+ u32 bit = BIT(hwirq - IRQC_TINT_START);
u32 reg;
reg = readl_relaxed(priv->base + TSCR);
- if (reg & bit)
+ if (reg & bit) {
writel_relaxed(reg & ~bit, priv->base + TSCR);
+ /*
+ * Enforce that the posted write is flushed to prevent that the
+ * just handled interrupt is raised again.
+ */
+ readl_relaxed(priv->base + TSCR);
+ }
}
static void rzg2l_irqc_eoi(struct irq_data *d)
@@ -122,9 +131,9 @@ static void rzg2l_irqc_eoi(struct irq_data *d)
raw_spin_lock(&priv->lock);
if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
- rzg2l_irq_eoi(d);
+ rzg2l_clear_irq_int(priv, hw_irq);
else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
- rzg2l_tint_eoi(d);
+ rzg2l_clear_tint_int(priv, hw_irq);
raw_spin_unlock(&priv->lock);
irq_chip_eoi_parent(d);
}
@@ -142,7 +151,7 @@ static void rzg2l_irqc_irq_disable(struct irq_data *d)
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
- reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
+ reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}
@@ -154,7 +163,6 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
unsigned int hw_irq = irqd_to_hwirq(d);
if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
- unsigned long tint = (uintptr_t)irq_data_get_irq_chip_data(d);
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
u32 offset = hw_irq - IRQC_TINT_START;
u32 tssr_offset = TSSR_OFFSET(offset);
@@ -163,7 +171,7 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
raw_spin_lock(&priv->lock);
reg = readl_relaxed(priv->base + TSSR(tssr_index));
- reg |= (TIEN | tint) << TSSEL_SHIFT(tssr_offset);
+ reg |= TIEN << TSSEL_SHIFT(tssr_offset);
writel_relaxed(reg, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
}
@@ -172,8 +180,10 @@ static void rzg2l_irqc_irq_enable(struct irq_data *d)
static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
{
- unsigned int hw_irq = irqd_to_hwirq(d) - IRQC_IRQ_START;
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
+ unsigned int hwirq = irqd_to_hwirq(d);
+ u32 iitseln = hwirq - IRQC_IRQ_START;
+ bool clear_irq_int = false;
u16 sense, tmp;
switch (type & IRQ_TYPE_SENSE_MASK) {
@@ -183,14 +193,17 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
case IRQ_TYPE_EDGE_FALLING:
sense = IITSR_IITSEL_EDGE_FALLING;
+ clear_irq_int = true;
break;
case IRQ_TYPE_EDGE_RISING:
sense = IITSR_IITSEL_EDGE_RISING;
+ clear_irq_int = true;
break;
case IRQ_TYPE_EDGE_BOTH:
sense = IITSR_IITSEL_EDGE_BOTH;
+ clear_irq_int = true;
break;
default:
@@ -199,21 +212,40 @@ static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
raw_spin_lock(&priv->lock);
tmp = readl_relaxed(priv->base + IITSR);
- tmp &= ~IITSR_IITSEL_MASK(hw_irq);
- tmp |= IITSR_IITSEL(hw_irq, sense);
+ tmp &= ~IITSR_IITSEL_MASK(iitseln);
+ tmp |= IITSR_IITSEL(iitseln, sense);
+ if (clear_irq_int)
+ rzg2l_clear_irq_int(priv, hwirq);
writel_relaxed(tmp, priv->base + IITSR);
raw_spin_unlock(&priv->lock);
return 0;
}
+static u32 rzg2l_disable_tint_and_set_tint_source(struct irq_data *d, struct rzg2l_irqc_priv *priv,
+ u32 reg, u32 tssr_offset, u8 tssr_index)
+{
+ u32 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
+ u32 tien = reg & (TIEN << TSSEL_SHIFT(tssr_offset));
+
+ /* Clear the relevant byte in reg */
+ reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
+ /* Set TINT and leave TIEN clear */
+ reg |= tint << TSSEL_SHIFT(tssr_offset);
+ writel_relaxed(reg, priv->base + TSSR(tssr_index));
+
+ return reg | tien;
+}
+
static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
{
struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
unsigned int hwirq = irqd_to_hwirq(d);
u32 titseln = hwirq - IRQC_TINT_START;
+ u32 tssr_offset = TSSR_OFFSET(titseln);
+ u8 tssr_index = TSSR_INDEX(titseln);
u8 index, sense;
- u32 reg;
+ u32 reg, tssr;
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_RISING:
@@ -235,10 +267,14 @@ static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
}
raw_spin_lock(&priv->lock);
+ tssr = readl_relaxed(priv->base + TSSR(tssr_index));
+ tssr = rzg2l_disable_tint_and_set_tint_source(d, priv, tssr, tssr_offset, tssr_index);
reg = readl_relaxed(priv->base + TITSR(index));
reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
reg |= sense << (titseln * TITSEL_WIDTH);
writel_relaxed(reg, priv->base + TITSR(index));
+ rzg2l_clear_tint_int(priv, hwirq);
+ writel_relaxed(tssr, priv->base + TSSR(tssr_index));
raw_spin_unlock(&priv->lock);
return 0;
diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c
index 61994da7bad0..267045b76505 100644
--- a/drivers/mcb/mcb-core.c
+++ b/drivers/mcb/mcb-core.c
@@ -156,7 +156,7 @@ static const struct attribute_group *mcb_carrier_groups[] = {
};
-static struct bus_type mcb_bus_type = {
+static const struct bus_type mcb_bus_type = {
.name = "mcb",
.match = mcb_match,
.uevent = mcb_uevent,
@@ -165,7 +165,7 @@ static struct bus_type mcb_bus_type = {
.shutdown = mcb_shutdown,
};
-static struct device_type mcb_carrier_device_type = {
+static const struct device_type mcb_carrier_device_type = {
.name = "mcb-carrier",
.groups = mcb_carrier_groups,
};
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 68ce56fc61d0..35b1080752cd 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -519,7 +519,6 @@ config DM_VERITY
If unsure, say N.
config DM_VERITY_VERIFY_ROOTHASH_SIG
- def_bool n
bool "Verity data device root hash signature verification support"
depends on DM_VERITY
select SYSTEM_DATA_VERIFICATION
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index d822ab2f739b..37b9f8f1ae1a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1699,7 +1699,6 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
struct bio_vec bv;
sector_t sector, logical_sector, area, offset;
struct page *page;
- void *buffer;
get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset,
@@ -1708,13 +1707,14 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
logical_sector = dio->range.logical_sector;
page = mempool_alloc(&ic->recheck_pool, GFP_NOIO);
- buffer = page_to_virt(page);
__bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
unsigned pos = 0;
do {
+ sector_t alignment;
char *mem;
+ char *buffer = page_to_virt(page);
int r;
struct dm_io_request io_req;
struct dm_io_region io_loc;
@@ -1727,6 +1727,14 @@ static noinline void integrity_recheck(struct dm_integrity_io *dio, char *checks
io_loc.sector = sector;
io_loc.count = ic->sectors_per_block;
+ /* Align the bio to logical block size */
+ alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT);
+ alignment &= -alignment;
+ io_loc.sector = round_down(io_loc.sector, alignment);
+ io_loc.count += sector - io_loc.sector;
+ buffer += (sector - io_loc.sector) << SECTOR_SHIFT;
+ io_loc.count = round_up(io_loc.count, alignment);
+
r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
if (unlikely(r)) {
dio->bi_status = errno_to_blk_status(r);
@@ -1848,12 +1856,12 @@ again:
r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
if (unlikely(r)) {
+ if (likely(checksums != checksums_onstack))
+ kfree(checksums);
if (r > 0) {
- integrity_recheck(dio, checksums);
+ integrity_recheck(dio, checksums_onstack);
goto skip_io;
}
- if (likely(checksums != checksums_onstack))
- kfree(checksums);
goto error;
}
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index bf7a574499a3..0ace06d1bee3 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -684,8 +684,10 @@ static void dm_exception_table_exit(struct dm_exception_table *et,
for (i = 0; i < size; i++) {
slot = et->table + i;
- hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list)
+ hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) {
kmem_cache_free(mem, ex);
+ cond_resched();
+ }
}
kvfree(et->table);
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index a083921a8968..224b488794e5 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -755,7 +755,7 @@ const char *const tegra_mc_error_names[8] = {
[6] = "SMMU translation error",
};
-struct icc_node *tegra_mc_icc_xlate(struct of_phandle_args *spec, void *data)
+struct icc_node *tegra_mc_icc_xlate(const struct of_phandle_args *spec, void *data)
{
struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
struct icc_node *node;
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 00ed2b6a0d1b..47c0c19e13fd 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1285,7 +1285,7 @@ to_tegra_emc_provider(struct icc_provider *provider)
}
static struct icc_node_data *
-emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+emc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
{
struct icc_provider *provider = data;
struct icc_node_data *ndata;
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 470b7dbab2c2..9d7393e19f12 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -1170,7 +1170,7 @@ static int tegra124_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
}
static struct icc_node_data *
-tegra124_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+tegra124_mc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
{
struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
const struct tegra_mc_client *client;
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index fcd4aea48bda..57d9ae12fcfe 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -236,7 +236,7 @@ static int tegra_emc_icc_set_bw(struct icc_node *src, struct icc_node *dst)
}
static struct icc_node *
-tegra_emc_of_icc_xlate(struct of_phandle_args *spec, void *data)
+tegra_emc_of_icc_xlate(const struct of_phandle_args *spec, void *data)
{
struct icc_provider *provider = data;
struct icc_node *node;
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index fd595c851a27..97cf59523b0b 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -950,7 +950,7 @@ to_tegra_emc_provider(struct icc_provider *provider)
}
static struct icc_node_data *
-emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+emc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
{
struct icc_provider *provider = data;
struct icc_node_data *ndata;
diff --git a/drivers/memory/tegra/tegra20.c b/drivers/memory/tegra/tegra20.c
index aa4b97d5e732..a3022e715dee 100644
--- a/drivers/memory/tegra/tegra20.c
+++ b/drivers/memory/tegra/tegra20.c
@@ -390,7 +390,7 @@ static int tegra20_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
}
static struct icc_node_data *
-tegra20_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+tegra20_mc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
{
struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
unsigned int i, idx = spec->args[0];
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 9eae25c57ec6..d7b0a23c2d7d 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -1468,7 +1468,7 @@ to_tegra_emc_provider(struct icc_provider *provider)
}
static struct icc_node_data *
-emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+emc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
{
struct icc_provider *provider = data;
struct icc_node_data *ndata;
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index 06f8b35e0a14..d3e685c8431f 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -1332,7 +1332,7 @@ static int tegra30_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
}
static struct icc_node_data *
-tegra30_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+tegra30_mc_of_icc_xlate_extended(const struct of_phandle_args *spec, void *data)
{
struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
const struct tegra_mc_client *client;
diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c
index ea5fbcbbe4a5..ef326d6d566e 100644
--- a/drivers/mfd/rave-sp.c
+++ b/drivers/mfd/rave-sp.c
@@ -471,8 +471,8 @@ static void rave_sp_receive_frame(struct rave_sp *sp,
rave_sp_receive_reply(sp, data, length);
}
-static ssize_t rave_sp_receive_buf(struct serdev_device *serdev,
- const u8 *buf, size_t size)
+static size_t rave_sp_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t size)
{
struct device *dev = &serdev->dev;
struct rave_sp *sp = dev_get_drvdata(dev);
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index ee590c4a1537..6eac0f335915 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -251,7 +251,7 @@ static int ssc_probe(struct platform_device *pdev)
return 0;
}
-static int ssc_remove(struct platform_device *pdev)
+static void ssc_remove(struct platform_device *pdev)
{
struct ssc_device *ssc = platform_get_drvdata(pdev);
@@ -260,8 +260,6 @@ static int ssc_remove(struct platform_device *pdev)
mutex_lock(&user_lock);
list_del(&ssc->list);
mutex_unlock(&user_lock);
-
- return 0;
}
static struct platform_driver ssc_driver = {
@@ -271,7 +269,7 @@ static struct platform_driver ssc_driver = {
},
.id_table = atmel_ssc_devtypes,
.probe = ssc_probe,
- .remove = ssc_remove,
+ .remove_new = ssc_remove,
};
module_platform_driver(ssc_driver);
diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c
index 25ce725035e7..bcc005dff1c0 100644
--- a/drivers/misc/cxl/of.c
+++ b/drivers/misc/cxl/of.c
@@ -431,7 +431,7 @@ int cxl_of_read_adapter_properties(struct cxl *adapter, struct device_node *np)
return 0;
}
-static int cxl_of_remove(struct platform_device *pdev)
+static void cxl_of_remove(struct platform_device *pdev)
{
struct cxl *adapter;
int afu;
@@ -441,7 +441,6 @@ static int cxl_of_remove(struct platform_device *pdev)
cxl_guest_remove_afu(adapter->afu[afu]);
cxl_guest_remove_adapter(adapter);
- return 0;
}
static void cxl_of_shutdown(struct platform_device *pdev)
@@ -501,6 +500,6 @@ struct platform_driver cxl_of_driver = {
.owner = THIS_MODULE
},
.probe = cxl_of_probe,
- .remove = cxl_of_remove,
+ .remove_new = cxl_of_remove,
.shutdown = cxl_of_shutdown,
};
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index b630625b3024..e78a76d74ff4 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -14,7 +14,6 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/nvmem-provider.h>
diff --git a/drivers/misc/eeprom/idt_89hpesx.c b/drivers/misc/eeprom/idt_89hpesx.c
index d807d08e2614..327afb866b21 100644
--- a/drivers/misc/eeprom/idt_89hpesx.c
+++ b/drivers/misc/eeprom/idt_89hpesx.c
@@ -129,7 +129,7 @@ struct idt_smb_seq {
struct idt_eeprom_seq {
u8 cmd;
u8 eeaddr;
- u16 memaddr;
+ __le16 memaddr;
u8 data;
} __packed;
@@ -141,8 +141,8 @@ struct idt_eeprom_seq {
*/
struct idt_csr_seq {
u8 cmd;
- u16 csraddr;
- u32 data;
+ __le16 csraddr;
+ __le32 data;
} __packed;
/*
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index dbd26c3b245b..4c67e2c5a82e 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -2186,7 +2186,7 @@ static int fastrpc_cb_probe(struct platform_device *pdev)
return 0;
}
-static int fastrpc_cb_remove(struct platform_device *pdev)
+static void fastrpc_cb_remove(struct platform_device *pdev)
{
struct fastrpc_channel_ctx *cctx = dev_get_drvdata(pdev->dev.parent);
struct fastrpc_session_ctx *sess = dev_get_drvdata(&pdev->dev);
@@ -2201,8 +2201,6 @@ static int fastrpc_cb_remove(struct platform_device *pdev)
}
}
spin_unlock_irqrestore(&cctx->lock, flags);
-
- return 0;
}
static const struct of_device_id fastrpc_match_table[] = {
@@ -2212,7 +2210,7 @@ static const struct of_device_id fastrpc_match_table[] = {
static struct platform_driver fastrpc_cb_driver = {
.probe = fastrpc_cb_probe,
- .remove = fastrpc_cb_remove,
+ .remove_new = fastrpc_cb_remove,
.driver = {
.name = "qcom,fastrpc-cb",
.of_match_table = fastrpc_match_table,
diff --git a/drivers/misc/hi6421v600-irq.c b/drivers/misc/hi6421v600-irq.c
index b075d803a2c2..69ee4f39af2a 100644
--- a/drivers/misc/hi6421v600-irq.c
+++ b/drivers/misc/hi6421v600-irq.c
@@ -11,7 +11,6 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/irqdomain.h>
diff --git a/drivers/misc/hisi_hikey_usb.c b/drivers/misc/hisi_hikey_usb.c
index 2165ec35a343..fb9be37057a8 100644
--- a/drivers/misc/hisi_hikey_usb.c
+++ b/drivers/misc/hisi_hikey_usb.c
@@ -14,7 +14,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/notifier.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
@@ -239,7 +238,7 @@ static int hisi_hikey_usb_probe(struct platform_device *pdev)
return 0;
}
-static int hisi_hikey_usb_remove(struct platform_device *pdev)
+static void hisi_hikey_usb_remove(struct platform_device *pdev)
{
struct hisi_hikey_usb *hisi_hikey_usb = platform_get_drvdata(pdev);
@@ -251,8 +250,6 @@ static int hisi_hikey_usb_remove(struct platform_device *pdev)
} else {
hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_OFF);
}
-
- return 0;
}
static const struct of_device_id id_table_hisi_hikey_usb[] = {
@@ -263,7 +260,7 @@ MODULE_DEVICE_TABLE(of, id_table_hisi_hikey_usb);
static struct platform_driver hisi_hikey_usb_driver = {
.probe = hisi_hikey_usb_probe,
- .remove = hisi_hikey_usb_remove,
+ .remove_new = hisi_hikey_usb_remove,
.driver = {
.name = DEVICE_DRIVER_NAME,
.of_match_table = id_table_hisi_hikey_usb,
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index f1b74d3f8958..04bd34c8c506 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -770,7 +770,7 @@ static void ilo_remove(struct pci_dev *pdev)
static int ilo_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int devnum, minor, start, error = 0;
+ int devnum, slot, start, error = 0;
struct ilo_hwinfo *ilo_hw;
if (pci_match_id(ilo_blacklist, pdev)) {
@@ -839,11 +839,11 @@ static int ilo_probe(struct pci_dev *pdev,
goto remove_isr;
}
- for (minor = 0 ; minor < max_ccb; minor++) {
+ for (slot = 0; slot < max_ccb; slot++) {
struct device *dev;
dev = device_create(&ilo_class, &pdev->dev,
- MKDEV(ilo_major, minor), NULL,
- "hpilo!d%dccb%d", devnum, minor);
+ MKDEV(ilo_major, start + slot), NULL,
+ "hpilo!d%dccb%d", devnum, slot);
if (IS_ERR(dev))
dev_err(&pdev->dev, "Could not create files\n");
}
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index b92767d6bdd2..5178c02b21eb 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -417,7 +417,7 @@ static void lkdtm_FAM_BOUNDS(void)
pr_err("FAIL: survived access of invalid flexible array member index!\n");
if (!__has_attribute(__counted_by__))
- pr_warn("This is expected since this %s was built a compiler supporting __counted_by\n",
+ pr_warn("This is expected since this %s was built with a compiler that does not support __counted_by\n",
lkdtm_kernel_info);
else if (IS_ENABLED(CONFIG_UBSAN_BOUNDS))
pr_expected_config(CONFIG_UBSAN_TRAP);
diff --git a/drivers/misc/mei/gsc-me.c b/drivers/misc/mei/gsc-me.c
index 6be8f1cc052c..5a8c26c3df13 100644
--- a/drivers/misc/mei/gsc-me.c
+++ b/drivers/misc/mei/gsc-me.c
@@ -144,9 +144,6 @@ static void mei_gsc_remove(struct auxiliary_device *aux_dev)
struct mei_me_hw *hw;
dev = dev_get_drvdata(&aux_dev->dev);
- if (!dev)
- return;
-
hw = to_me_hw(dev);
mei_stop(dev);
@@ -168,9 +165,6 @@ static int __maybe_unused mei_gsc_pm_suspend(struct device *device)
{
struct mei_device *dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
-
mei_stop(dev);
mei_disable_interrupts(dev);
@@ -186,9 +180,6 @@ static int __maybe_unused mei_gsc_pm_resume(struct device *device)
int err;
struct mei_me_hw *hw;
- if (!dev)
- return -ENODEV;
-
hw = to_me_hw(dev);
aux_dev = to_auxiliary_dev(device);
adev = auxiliary_dev_to_mei_aux_dev(aux_dev);
@@ -211,8 +202,6 @@ static int __maybe_unused mei_gsc_pm_runtime_idle(struct device *device)
{
struct mei_device *dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
if (mei_write_is_idle(dev))
pm_runtime_autosuspend(device);
@@ -225,9 +214,6 @@ static int __maybe_unused mei_gsc_pm_runtime_suspend(struct device *device)
struct mei_me_hw *hw;
int ret;
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->device_lock);
if (mei_write_is_idle(dev)) {
@@ -252,9 +238,6 @@ static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device)
struct mei_me_hw *hw;
irqreturn_t irq_ret;
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->device_lock);
hw = to_me_hw(dev);
@@ -293,6 +276,10 @@ static const struct auxiliary_device_id mei_gsc_id_table[] = {
.driver_data = MEI_ME_GSCFI_CFG,
},
{
+ .name = "xe.mei-gscfi",
+ .driver_data = MEI_ME_GSCFI_CFG,
+ },
+ {
/* sentinel */
}
};
@@ -312,5 +299,6 @@ module_auxiliary_driver(mei_gsc_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_ALIAS("auxiliary:i915.mei-gsc");
MODULE_ALIAS("auxiliary:i915.mei-gscfi");
+MODULE_ALIAS("auxiliary:xe.mei-gscfi");
MODULE_DESCRIPTION("Intel(R) Graphics System Controller");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/mei/hdcp/Kconfig b/drivers/misc/mei/hdcp/Kconfig
index 9be312ec798d..631dd9651d7c 100644
--- a/drivers/misc/mei/hdcp/Kconfig
+++ b/drivers/misc/mei/hdcp/Kconfig
@@ -4,7 +4,7 @@
config INTEL_MEI_HDCP
tristate "Intel HDCP2.2 services of ME Interface"
depends on INTEL_MEI_ME
- depends on DRM_I915
+ depends on DRM_I915 || DRM_XE
help
MEI Support for HDCP2.2 Services on Intel platforms.
diff --git a/drivers/misc/mei/hdcp/mei_hdcp.c b/drivers/misc/mei/hdcp/mei_hdcp.c
index 51359cc5ece9..f8759a6c9ed3 100644
--- a/drivers/misc/mei/hdcp/mei_hdcp.c
+++ b/drivers/misc/mei/hdcp/mei_hdcp.c
@@ -17,6 +17,7 @@
*/
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mei.h>
#include <linux/mei_cl_bus.h>
@@ -781,9 +782,18 @@ static int mei_hdcp_component_match(struct device *dev, int subcomponent,
void *data)
{
struct device *base = data;
+ struct pci_dev *pdev;
- if (!dev->driver || strcmp(dev->driver->name, "i915") ||
- subcomponent != I915_COMPONENT_HDCP)
+ if (!dev_is_pci(dev))
+ return 0;
+
+ pdev = to_pci_dev(dev);
+
+ if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) ||
+ pdev->vendor != PCI_VENDOR_ID_INTEL)
+ return 0;
+
+ if (subcomponent != I915_COMPONENT_HDCP)
return 0;
base = base->parent;
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 8cf636c54032..b5757993c9b2 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -297,11 +297,7 @@ end:
*/
static void mei_me_shutdown(struct pci_dev *pdev)
{
- struct mei_device *dev;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return;
+ struct mei_device *dev = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev);
@@ -322,11 +318,7 @@ static void mei_me_shutdown(struct pci_dev *pdev)
*/
static void mei_me_remove(struct pci_dev *pdev)
{
- struct mei_device *dev;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return;
+ struct mei_device *dev = pci_get_drvdata(pdev);
if (mei_pg_is_enabled(dev))
pm_runtime_get_noresume(&pdev->dev);
@@ -355,9 +347,6 @@ static int mei_me_pci_suspend(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev = pci_get_drvdata(pdev);
- if (!dev)
- return -ENODEV;
-
dev_dbg(&pdev->dev, "suspend\n");
mei_stop(dev);
@@ -373,14 +362,10 @@ static int mei_me_pci_suspend(struct device *device)
static int mei_me_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct mei_device *dev;
+ struct mei_device *dev = pci_get_drvdata(pdev);
unsigned int irqflags;
int err;
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return -ENODEV;
-
pci_enable_msi(pdev);
irqflags = pci_dev_msi_enabled(pdev) ? IRQF_ONESHOT : IRQF_SHARED;
@@ -421,13 +406,10 @@ static void mei_me_pci_complete(struct device *device)
#ifdef CONFIG_PM
static int mei_me_pm_runtime_idle(struct device *device)
{
- struct mei_device *dev;
+ struct mei_device *dev = dev_get_drvdata(device);
dev_dbg(device, "rpm: me: runtime_idle\n");
- dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
if (mei_write_is_idle(dev))
pm_runtime_autosuspend(device);
@@ -436,15 +418,11 @@ static int mei_me_pm_runtime_idle(struct device *device)
static int mei_me_pm_runtime_suspend(struct device *device)
{
- struct mei_device *dev;
+ struct mei_device *dev = dev_get_drvdata(device);
int ret;
dev_dbg(device, "rpm: me: runtime suspend\n");
- dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->device_lock);
if (mei_write_is_idle(dev))
@@ -464,15 +442,11 @@ static int mei_me_pm_runtime_suspend(struct device *device)
static int mei_me_pm_runtime_resume(struct device *device)
{
- struct mei_device *dev;
+ struct mei_device *dev = dev_get_drvdata(device);
int ret;
dev_dbg(device, "rpm: me: runtime resume\n");
- dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->device_lock);
ret = mei_me_pg_exit_sync(dev);
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index fa20d9a27813..2a584104ba38 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -166,11 +166,7 @@ end:
*/
static void mei_txe_shutdown(struct pci_dev *pdev)
{
- struct mei_device *dev;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return;
+ struct mei_device *dev = pci_get_drvdata(pdev);
dev_dbg(&pdev->dev, "shutdown\n");
mei_stop(dev);
@@ -191,13 +187,7 @@ static void mei_txe_shutdown(struct pci_dev *pdev)
*/
static void mei_txe_remove(struct pci_dev *pdev)
{
- struct mei_device *dev;
-
- dev = pci_get_drvdata(pdev);
- if (!dev) {
- dev_err(&pdev->dev, "mei: dev == NULL\n");
- return;
- }
+ struct mei_device *dev = pci_get_drvdata(pdev);
pm_runtime_get_noresume(&pdev->dev);
@@ -218,9 +208,6 @@ static int mei_txe_pci_suspend(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev = pci_get_drvdata(pdev);
- if (!dev)
- return -ENODEV;
-
dev_dbg(&pdev->dev, "suspend\n");
mei_stop(dev);
@@ -236,13 +223,9 @@ static int mei_txe_pci_suspend(struct device *device)
static int mei_txe_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct mei_device *dev;
+ struct mei_device *dev = pci_get_drvdata(pdev);
int err;
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return -ENODEV;
-
pci_enable_msi(pdev);
mei_clear_interrupts(dev);
@@ -273,13 +256,10 @@ static int mei_txe_pci_resume(struct device *device)
#ifdef CONFIG_PM
static int mei_txe_pm_runtime_idle(struct device *device)
{
- struct mei_device *dev;
+ struct mei_device *dev = dev_get_drvdata(device);
dev_dbg(device, "rpm: txe: runtime_idle\n");
- dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
if (mei_write_is_idle(dev))
pm_runtime_autosuspend(device);
@@ -287,15 +267,11 @@ static int mei_txe_pm_runtime_idle(struct device *device)
}
static int mei_txe_pm_runtime_suspend(struct device *device)
{
- struct mei_device *dev;
+ struct mei_device *dev = dev_get_drvdata(device);
int ret;
dev_dbg(device, "rpm: txe: runtime suspend\n");
- dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->device_lock);
if (mei_write_is_idle(dev))
@@ -317,15 +293,11 @@ static int mei_txe_pm_runtime_suspend(struct device *device)
static int mei_txe_pm_runtime_resume(struct device *device)
{
- struct mei_device *dev;
+ struct mei_device *dev = dev_get_drvdata(device);
int ret;
dev_dbg(device, "rpm: txe: runtime resume\n");
- dev = dev_get_drvdata(device);
- if (!dev)
- return -ENODEV;
-
mutex_lock(&dev->device_lock);
mei_enable_interrupts(dev);
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
index 8d303c6c0000..6c9f00bcb94b 100644
--- a/drivers/misc/mei/platform-vsc.c
+++ b/drivers/misc/mei/platform-vsc.c
@@ -384,7 +384,7 @@ err_cancel:
return ret;
}
-static int mei_vsc_remove(struct platform_device *pdev)
+static void mei_vsc_remove(struct platform_device *pdev)
{
struct mei_device *mei_dev = platform_get_drvdata(pdev);
@@ -395,8 +395,6 @@ static int mei_vsc_remove(struct platform_device *pdev)
mei_disable_interrupts(mei_dev);
mei_deregister(mei_dev);
-
- return 0;
}
static int mei_vsc_suspend(struct device *dev)
@@ -433,7 +431,7 @@ MODULE_DEVICE_TABLE(platform, mei_vsc_id_table);
static struct platform_driver mei_vsc_drv = {
.probe = mei_vsc_probe,
- .remove = mei_vsc_remove,
+ .remove_new = mei_vsc_remove,
.id_table = mei_vsc_id_table,
.driver = {
.name = MEI_VSC_DRV_NAME,
diff --git a/drivers/misc/mei/pxp/Kconfig b/drivers/misc/mei/pxp/Kconfig
index e9219b61cd92..aa2dece4a927 100644
--- a/drivers/misc/mei/pxp/Kconfig
+++ b/drivers/misc/mei/pxp/Kconfig
@@ -4,7 +4,7 @@
config INTEL_MEI_PXP
tristate "Intel PXP services of ME Interface"
depends on INTEL_MEI_ME
- depends on DRM_I915
+ depends on DRM_I915 || DRM_XE
help
MEI Support for PXP Services on Intel platforms.
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
index 787c6a27a4be..b1e4c23b31a3 100644
--- a/drivers/misc/mei/pxp/mei_pxp.c
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/mei.h>
#include <linux/mei_cl_bus.h>
@@ -225,12 +226,21 @@ static int mei_pxp_component_match(struct device *dev, int subcomponent,
void *data)
{
struct device *base = data;
+ struct pci_dev *pdev;
if (!dev)
return 0;
- if (!dev->driver || strcmp(dev->driver->name, "i915") ||
- subcomponent != I915_COMPONENT_PXP)
+ if (!dev_is_pci(dev))
+ return 0;
+
+ pdev = to_pci_dev(dev);
+
+ if (pdev->class != (PCI_CLASS_DISPLAY_VGA << 8) ||
+ pdev->vendor != PCI_VENDOR_ID_INTEL)
+ return 0;
+
+ if (subcomponent != I915_COMPONENT_PXP)
return 0;
base = base->parent;
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index 55f7db490d3b..ecfb70cd057c 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -25,7 +25,8 @@
#define VSC_TP_ROM_BOOTUP_DELAY_MS 10
#define VSC_TP_ROM_XFER_POLL_TIMEOUT_US (500 * USEC_PER_MSEC)
#define VSC_TP_ROM_XFER_POLL_DELAY_US (20 * USEC_PER_MSEC)
-#define VSC_TP_WAIT_FW_ASSERTED_TIMEOUT (2 * HZ)
+#define VSC_TP_WAIT_FW_POLL_TIMEOUT (2 * HZ)
+#define VSC_TP_WAIT_FW_POLL_DELAY_US (20 * USEC_PER_MSEC)
#define VSC_TP_MAX_XFER_COUNT 5
#define VSC_TP_PACKET_SYNC 0x31
@@ -101,13 +102,15 @@ static int vsc_tp_wakeup_request(struct vsc_tp *tp)
gpiod_set_value_cansleep(tp->wakeupfw, 0);
ret = wait_event_timeout(tp->xfer_wait,
- atomic_read(&tp->assert_cnt) &&
- gpiod_get_value_cansleep(tp->wakeuphost),
- VSC_TP_WAIT_FW_ASSERTED_TIMEOUT);
+ atomic_read(&tp->assert_cnt),
+ VSC_TP_WAIT_FW_POLL_TIMEOUT);
if (!ret)
return -ETIMEDOUT;
- return 0;
+ return read_poll_timeout(gpiod_get_value_cansleep, ret, ret,
+ VSC_TP_WAIT_FW_POLL_DELAY_US,
+ VSC_TP_WAIT_FW_POLL_TIMEOUT, false,
+ tp->wakeuphost);
}
static void vsc_tp_wakeup_release(struct vsc_tp *tp)
@@ -416,8 +419,6 @@ static irqreturn_t vsc_tp_isr(int irq, void *data)
atomic_inc(&tp->assert_cnt);
- wake_up(&tp->xfer_wait);
-
return IRQ_WAKE_THREAD;
}
@@ -425,6 +426,8 @@ static irqreturn_t vsc_tp_thread_isr(int irq, void *data)
{
struct vsc_tp *tp = data;
+ wake_up(&tp->xfer_wait);
+
if (tp->event_notify)
tp->event_notify(tp->event_notify_context);
@@ -442,11 +445,16 @@ static int vsc_tp_match_any(struct acpi_device *adev, void *data)
static int vsc_tp_probe(struct spi_device *spi)
{
- struct platform_device_info pinfo = { 0 };
+ struct vsc_tp *tp;
+ struct platform_device_info pinfo = {
+ .name = "intel_vsc",
+ .data = &tp,
+ .size_data = sizeof(tp),
+ .id = PLATFORM_DEVID_NONE,
+ };
struct device *dev = &spi->dev;
struct platform_device *pdev;
struct acpi_device *adev;
- struct vsc_tp *tp;
int ret;
tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL);
@@ -498,13 +506,8 @@ static int vsc_tp_probe(struct spi_device *spi)
ret = -ENODEV;
goto err_destroy_lock;
}
- pinfo.fwnode = acpi_fwnode_handle(adev);
-
- pinfo.name = "intel_vsc";
- pinfo.data = &tp;
- pinfo.size_data = sizeof(tp);
- pinfo.id = PLATFORM_DEVID_NONE;
+ pinfo.fwnode = acpi_fwnode_handle(adev);
pdev = platform_device_register_full(&pinfo);
if (IS_ERR(pdev)) {
ret = PTR_ERR(pdev);
diff --git a/drivers/misc/open-dice.c b/drivers/misc/open-dice.c
index d279a4f195e2..1e3eb2aa44d9 100644
--- a/drivers/misc/open-dice.c
+++ b/drivers/misc/open-dice.c
@@ -165,12 +165,11 @@ static int __init open_dice_probe(struct platform_device *pdev)
return 0;
}
-static int open_dice_remove(struct platform_device *pdev)
+static void open_dice_remove(struct platform_device *pdev)
{
struct open_dice_drvdata *drvdata = platform_get_drvdata(pdev);
misc_deregister(&drvdata->misc);
- return 0;
}
static const struct of_device_id open_dice_of_match[] = {
@@ -179,7 +178,7 @@ static const struct of_device_id open_dice_of_match[] = {
};
static struct platform_driver open_dice_driver = {
- .remove = open_dice_remove,
+ .remove_new = open_dice_remove,
.driver = {
.name = DRIVER_NAME,
.of_match_table = open_dice_of_match,
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index e248c0a8882f..546eb06a40d0 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -435,7 +435,7 @@ err_free_partitions:
return ret;
}
-static int sram_remove(struct platform_device *pdev)
+static void sram_remove(struct platform_device *pdev)
{
struct sram_dev *sram = platform_get_drvdata(pdev);
@@ -443,8 +443,6 @@ static int sram_remove(struct platform_device *pdev)
if (sram->pool && gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
dev_err(sram->dev, "removed while SRAM allocated\n");
-
- return 0;
}
static struct platform_driver sram_driver = {
@@ -453,7 +451,7 @@ static struct platform_driver sram_driver = {
.of_match_table = sram_dt_ids,
},
.probe = sram_probe,
- .remove = sram_remove,
+ .remove_new = sram_remove,
};
static int __init sram_init(void)
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 4b1be0bb6ac0..47ebe80bf849 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -774,7 +774,7 @@ err_core_init:
return err;
}
-static int kim_remove(struct platform_device *pdev)
+static void kim_remove(struct platform_device *pdev)
{
/* free the GPIOs requested */
struct ti_st_plat_data *pdata = pdev->dev.platform_data;
@@ -798,7 +798,6 @@ static int kim_remove(struct platform_device *pdev)
kfree(kim_gdata);
kim_gdata = NULL;
- return 0;
}
static int kim_suspend(struct platform_device *pdev, pm_message_t state)
@@ -825,7 +824,7 @@ static int kim_resume(struct platform_device *pdev)
/* entry point for ST KIM module, called in from ST Core */
static struct platform_driver kim_platform_driver = {
.probe = kim_probe,
- .remove = kim_remove,
+ .remove_new = kim_remove,
.suspend = kim_suspend,
.resume = kim_resume,
.driver = {
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index eee9b6581604..d2eb31f39aa7 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -166,7 +166,7 @@ static void tifm_free(struct device *dev)
kfree(fm);
}
-static struct class tifm_adapter_class = {
+static const struct class tifm_adapter_class = {
.name = "tifm_adapter",
.dev_release = tifm_free
};
diff --git a/drivers/misc/vcpu_stall_detector.c b/drivers/misc/vcpu_stall_detector.c
index 6479c962da1a..e2015c87f03f 100644
--- a/drivers/misc/vcpu_stall_detector.c
+++ b/drivers/misc/vcpu_stall_detector.c
@@ -187,7 +187,7 @@ err:
return ret;
}
-static int vcpu_stall_detect_remove(struct platform_device *pdev)
+static void vcpu_stall_detect_remove(struct platform_device *pdev)
{
int cpu;
@@ -195,8 +195,6 @@ static int vcpu_stall_detect_remove(struct platform_device *pdev)
for_each_possible_cpu(cpu)
stop_stall_detector_cpu(cpu);
-
- return 0;
}
static const struct of_device_id vcpu_stall_detect_of_match[] = {
@@ -208,7 +206,7 @@ MODULE_DEVICE_TABLE(of, vcpu_stall_detect_of_match);
static struct platform_driver vcpu_stall_detect_driver = {
.probe = vcpu_stall_detect_probe,
- .remove = vcpu_stall_detect_remove,
+ .remove_new = vcpu_stall_detect_remove,
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = vcpu_stall_detect_of_match,
diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c
index 94a0ee19bf20..ea433695f4c4 100644
--- a/drivers/misc/xilinx_sdfec.c
+++ b/drivers/misc/xilinx_sdfec.c
@@ -1420,7 +1420,7 @@ err_xsdfec_dev:
return err;
}
-static int xsdfec_remove(struct platform_device *pdev)
+static void xsdfec_remove(struct platform_device *pdev)
{
struct xsdfec_dev *xsdfec;
@@ -1428,7 +1428,6 @@ static int xsdfec_remove(struct platform_device *pdev)
misc_deregister(&xsdfec->miscdev);
ida_free(&dev_nrs, xsdfec->dev_id);
xsdfec_disable_all_clks(&xsdfec->clks);
- return 0;
}
static const struct of_device_id xsdfec_of_match[] = {
@@ -1445,7 +1444,7 @@ static struct platform_driver xsdfec_driver = {
.of_match_table = xsdfec_of_match,
},
.probe = xsdfec_probe,
- .remove = xsdfec_remove,
+ .remove_new = xsdfec_remove,
};
module_platform_driver(xsdfec_driver);
diff --git a/drivers/misc/xilinx_tmr_inject.c b/drivers/misc/xilinx_tmr_inject.c
index 9fc5835bfebc..73c6da7d0963 100644
--- a/drivers/misc/xilinx_tmr_inject.c
+++ b/drivers/misc/xilinx_tmr_inject.c
@@ -143,11 +143,10 @@ static int xtmr_inject_probe(struct platform_device *pdev)
return 0;
}
-static int xtmr_inject_remove(struct platform_device *pdev)
+static void xtmr_inject_remove(struct platform_device *pdev)
{
debugfs_remove_recursive(dbgfs_root);
dbgfs_root = NULL;
- return 0;
}
static const struct of_device_id xtmr_inject_of_match[] = {
@@ -164,7 +163,7 @@ static struct platform_driver xtmr_inject_driver = {
.of_match_table = xtmr_inject_of_match,
},
.probe = xtmr_inject_probe,
- .remove = xtmr_inject_remove,
+ .remove_new = xtmr_inject_remove,
};
module_platform_driver(xtmr_inject_driver);
MODULE_AUTHOR("Advanced Micro Devices, Inc");
diff --git a/drivers/most/core.c b/drivers/most/core.c
index e4412c7d25b0..f13d0e14a48b 100644
--- a/drivers/most/core.c
+++ b/drivers/most/core.c
@@ -499,7 +499,7 @@ static int most_match(struct device *dev, struct device_driver *drv)
return 1;
}
-static struct bus_type mostbus = {
+static const struct bus_type mostbus = {
.name = "most",
.match = most_match,
};
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig
index 7499a540121e..e28a3af83c0e 100644
--- a/drivers/mtd/ubi/Kconfig
+++ b/drivers/mtd/ubi/Kconfig
@@ -113,4 +113,17 @@ config MTD_UBI_FAULT_INJECTION
testing purposes.
If in doubt, say "N".
+
+config MTD_UBI_NVMEM
+ tristate "UBI virtual NVMEM"
+ default n
+ depends on NVMEM
+ help
+ This option enabled an additional driver exposing UBI volumes as NVMEM
+ providers, intended for platforms where UBI is part of the firmware
+ specification and used to store also e.g. MAC addresses or board-
+ specific Wi-Fi calibration data.
+
+ If in doubt, say "N".
+
endif # MTD_UBI
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile
index 543673605ca7..4b51aaf00d1a 100644
--- a/drivers/mtd/ubi/Makefile
+++ b/drivers/mtd/ubi/Makefile
@@ -7,3 +7,4 @@ ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
ubi-$(CONFIG_MTD_UBI_BLOCK) += block.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
+obj-$(CONFIG_MTD_UBI_NVMEM) += nvmem.o
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 5c8fdcc088a0..f82e3423acb9 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -65,10 +65,10 @@ struct ubiblock_pdu {
};
/* Numbers of elements set in the @ubiblock_param array */
-static int ubiblock_devs __initdata;
+static int ubiblock_devs;
/* MTD devices specification parameters */
-static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
+static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES];
struct ubiblock {
struct ubi_volume_desc *desc;
@@ -536,6 +536,70 @@ static int ubiblock_resize(struct ubi_volume_info *vi)
return 0;
}
+static bool
+match_volume_desc(struct ubi_volume_info *vi, const char *name, int ubi_num, int vol_id)
+{
+ int err, len, cur_ubi_num, cur_vol_id;
+
+ if (ubi_num == -1) {
+ /* No ubi num, name must be a vol device path */
+ err = ubi_get_num_by_path(name, &cur_ubi_num, &cur_vol_id);
+ if (err || vi->ubi_num != cur_ubi_num || vi->vol_id != cur_vol_id)
+ return false;
+
+ return true;
+ }
+
+ if (vol_id == -1) {
+ /* Got ubi_num, but no vol_id, name must be volume name */
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ len = strnlen(name, UBI_VOL_NAME_MAX + 1);
+ if (len < 1 || vi->name_len != len)
+ return false;
+
+ if (strcmp(name, vi->name))
+ return false;
+
+ return true;
+ }
+
+ if (vi->ubi_num != ubi_num)
+ return false;
+
+ if (vi->vol_id != vol_id)
+ return false;
+
+ return true;
+}
+
+static void
+ubiblock_create_from_param(struct ubi_volume_info *vi)
+{
+ int i, ret = 0;
+ struct ubiblock_param *p;
+
+ /*
+ * Iterate over ubiblock cmdline parameters. If a parameter matches the
+ * newly added volume create the ubiblock device for it.
+ */
+ for (i = 0; i < ubiblock_devs; i++) {
+ p = &ubiblock_param[i];
+
+ if (!match_volume_desc(vi, p->name, p->ubi_num, p->vol_id))
+ continue;
+
+ ret = ubiblock_create(vi);
+ if (ret) {
+ pr_err(
+ "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
+ vi->name, p->ubi_num, p->vol_id, ret);
+ }
+ break;
+ }
+}
+
static int ubiblock_notify(struct notifier_block *nb,
unsigned long notification_type, void *ns_ptr)
{
@@ -543,10 +607,7 @@ static int ubiblock_notify(struct notifier_block *nb,
switch (notification_type) {
case UBI_VOLUME_ADDED:
- /*
- * We want to enforce explicit block device creation for
- * volumes, so when a volume is added we do nothing.
- */
+ ubiblock_create_from_param(&nt->vi);
break;
case UBI_VOLUME_REMOVED:
ubiblock_remove(&nt->vi);
@@ -572,56 +633,6 @@ static struct notifier_block ubiblock_notifier = {
.notifier_call = ubiblock_notify,
};
-static struct ubi_volume_desc * __init
-open_volume_desc(const char *name, int ubi_num, int vol_id)
-{
- if (ubi_num == -1)
- /* No ubi num, name must be a vol device path */
- return ubi_open_volume_path(name, UBI_READONLY);
- else if (vol_id == -1)
- /* No vol_id, must be vol_name */
- return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
- else
- return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
-}
-
-static void __init ubiblock_create_from_param(void)
-{
- int i, ret = 0;
- struct ubiblock_param *p;
- struct ubi_volume_desc *desc;
- struct ubi_volume_info vi;
-
- /*
- * If there is an error creating one of the ubiblocks, continue on to
- * create the following ubiblocks. This helps in a circumstance where
- * the kernel command-line specifies multiple block devices and some
- * may be broken, but we still want the working ones to come up.
- */
- for (i = 0; i < ubiblock_devs; i++) {
- p = &ubiblock_param[i];
-
- desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
- if (IS_ERR(desc)) {
- pr_err(
- "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
- p->ubi_num, p->vol_id, PTR_ERR(desc));
- continue;
- }
-
- ubi_get_volume_info(desc, &vi);
- ubi_close_volume(desc);
-
- ret = ubiblock_create(&vi);
- if (ret) {
- pr_err(
- "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
- vi.name, p->ubi_num, p->vol_id, ret);
- continue;
- }
- }
-}
-
static void ubiblock_remove_all(void)
{
struct ubiblock *next;
@@ -647,18 +658,7 @@ int __init ubiblock_init(void)
if (ubiblock_major < 0)
return ubiblock_major;
- /*
- * Attach block devices from 'block=' module param.
- * Even if one block device in the param list fails to come up,
- * still allow the module to load and leave any others up.
- */
- ubiblock_create_from_param();
-
- /*
- * Block devices are only created upon user requests, so we ignore
- * existing volumes.
- */
- ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
+ ret = ubi_register_volume_notifier(&ubiblock_notifier, 0);
if (ret)
goto err_unreg;
return 0;
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 7d4ff1193db6..a7e3a6246c0e 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -27,6 +27,7 @@
#include <linux/log2.h>
#include <linux/kthread.h>
#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/slab.h>
#include <linux/major.h>
#include "ubi.h"
@@ -92,7 +93,7 @@ static struct ubi_device *ubi_devices[UBI_MAX_DEVICES];
/* Serializes UBI devices creations and removals */
DEFINE_MUTEX(ubi_devices_mutex);
-/* Protects @ubi_devices and @ubi->ref_count */
+/* Protects @ubi_devices, @ubi->ref_count and @ubi->is_dead */
static DEFINE_SPINLOCK(ubi_devices_lock);
/* "Show" method for files in '/<sysfs>/class/ubi/' */
@@ -260,6 +261,9 @@ struct ubi_device *ubi_get_device(int ubi_num)
spin_lock(&ubi_devices_lock);
ubi = ubi_devices[ubi_num];
+ if (ubi && ubi->is_dead)
+ ubi = NULL;
+
if (ubi) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
@@ -297,7 +301,7 @@ struct ubi_device *ubi_get_by_major(int major)
spin_lock(&ubi_devices_lock);
for (i = 0; i < UBI_MAX_DEVICES; i++) {
ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_assert(ubi->ref_count >= 0);
ubi->ref_count += 1;
get_device(&ubi->dev);
@@ -326,7 +330,7 @@ int ubi_major2num(int major)
for (i = 0; i < UBI_MAX_DEVICES; i++) {
struct ubi_device *ubi = ubi_devices[i];
- if (ubi && MAJOR(ubi->cdev.dev) == major) {
+ if (ubi && !ubi->is_dead && MAJOR(ubi->cdev.dev) == major) {
ubi_num = ubi->ubi_num;
break;
}
@@ -513,7 +517,7 @@ static void ubi_free_volumes_from(struct ubi_device *ubi, int from)
int i;
for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) {
- if (!ubi->volumes[i])
+ if (!ubi->volumes[i] || ubi->volumes[i]->is_dead)
continue;
ubi_eba_replace_table(ubi->volumes[i], NULL);
ubi_fastmap_destroy_checkmap(ubi->volumes[i]);
@@ -1098,7 +1102,6 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
return -EINVAL;
spin_lock(&ubi_devices_lock);
- put_device(&ubi->dev);
ubi->ref_count -= 1;
if (ubi->ref_count) {
if (!anyway) {
@@ -1109,6 +1112,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_err(ubi, "%s reference count %d, destroy anyway",
ubi->ubi_name, ubi->ref_count);
}
+ ubi->is_dead = true;
+ spin_unlock(&ubi_devices_lock);
+
+ ubi_notify_all(ubi, UBI_VOLUME_SHUTDOWN, NULL);
+
+ spin_lock(&ubi_devices_lock);
+ put_device(&ubi->dev);
ubi_devices[ubi_num] = NULL;
spin_unlock(&ubi_devices_lock);
@@ -1219,43 +1229,43 @@ static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
return mtd;
}
-static int __init ubi_init(void)
+static void ubi_notify_add(struct mtd_info *mtd)
{
- int err, i, k;
+ struct device_node *np = mtd_get_of_node(mtd);
+ int err;
- /* Ensure that EC and VID headers have correct size */
- BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
- BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+ if (!of_device_is_compatible(np, "linux,ubi"))
+ return;
- if (mtd_devs > UBI_MAX_DEVICES) {
- pr_err("UBI error: too many MTD devices, maximum is %d\n",
- UBI_MAX_DEVICES);
- return -EINVAL;
- }
+ /*
+ * we are already holding &mtd_table_mutex, but still need
+ * to bump refcount
+ */
+ err = __get_mtd_device(mtd);
+ if (err)
+ return;
- /* Create base sysfs directory and sysfs files */
- err = class_register(&ubi_class);
+ /* called while holding mtd_table_mutex */
+ mutex_lock_nested(&ubi_devices_mutex, SINGLE_DEPTH_NESTING);
+ err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 0, 0, false, false);
+ mutex_unlock(&ubi_devices_mutex);
if (err < 0)
- return err;
-
- err = misc_register(&ubi_ctrl_cdev);
- if (err) {
- pr_err("UBI error: cannot register device\n");
- goto out;
- }
+ __put_mtd_device(mtd);
+}
- ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
- sizeof(struct ubi_wl_entry),
- 0, 0, NULL);
- if (!ubi_wl_entry_slab) {
- err = -ENOMEM;
- goto out_dev_unreg;
- }
+static void ubi_notify_remove(struct mtd_info *mtd)
+{
+ /* do nothing for now */
+}
- err = ubi_debugfs_init();
- if (err)
- goto out_slab;
+static struct mtd_notifier ubi_mtd_notifier = {
+ .add = ubi_notify_add,
+ .remove = ubi_notify_remove,
+};
+static int __init ubi_init_attach(void)
+{
+ int err, i, k;
/* Attach MTD devices */
for (i = 0; i < mtd_devs; i++) {
@@ -1304,25 +1314,79 @@ static int __init ubi_init(void)
}
}
+ return 0;
+
+out_detach:
+ for (k = 0; k < i; k++)
+ if (ubi_devices[k]) {
+ mutex_lock(&ubi_devices_mutex);
+ ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
+ mutex_unlock(&ubi_devices_mutex);
+ }
+ return err;
+}
+#ifndef CONFIG_MTD_UBI_MODULE
+late_initcall(ubi_init_attach);
+#endif
+
+static int __init ubi_init(void)
+{
+ int err;
+
+ /* Ensure that EC and VID headers have correct size */
+ BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64);
+ BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
+
+ if (mtd_devs > UBI_MAX_DEVICES) {
+ pr_err("UBI error: too many MTD devices, maximum is %d\n",
+ UBI_MAX_DEVICES);
+ return -EINVAL;
+ }
+
+ /* Create base sysfs directory and sysfs files */
+ err = class_register(&ubi_class);
+ if (err < 0)
+ return err;
+
+ err = misc_register(&ubi_ctrl_cdev);
+ if (err) {
+ pr_err("UBI error: cannot register device\n");
+ goto out;
+ }
+
+ ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab",
+ sizeof(struct ubi_wl_entry),
+ 0, 0, NULL);
+ if (!ubi_wl_entry_slab) {
+ err = -ENOMEM;
+ goto out_dev_unreg;
+ }
+
+ err = ubi_debugfs_init();
+ if (err)
+ goto out_slab;
+
err = ubiblock_init();
if (err) {
pr_err("UBI error: block: cannot initialize, error %d\n", err);
/* See comment above re-ubi_is_module(). */
if (ubi_is_module())
- goto out_detach;
+ goto out_slab;
+ }
+
+ register_mtd_user(&ubi_mtd_notifier);
+
+ if (ubi_is_module()) {
+ err = ubi_init_attach();
+ if (err)
+ goto out_mtd_notifier;
}
return 0;
-out_detach:
- for (k = 0; k < i; k++)
- if (ubi_devices[k]) {
- mutex_lock(&ubi_devices_mutex);
- ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1);
- mutex_unlock(&ubi_devices_mutex);
- }
- ubi_debugfs_exit();
+out_mtd_notifier:
+ unregister_mtd_user(&ubi_mtd_notifier);
out_slab:
kmem_cache_destroy(ubi_wl_entry_slab);
out_dev_unreg:
@@ -1332,13 +1396,15 @@ out:
pr_err("UBI error: cannot initialize UBI, error %d\n", err);
return err;
}
-late_initcall(ubi_init);
+device_initcall(ubi_init);
+
static void __exit ubi_exit(void)
{
int i;
ubiblock_exit();
+ unregister_mtd_user(&ubi_mtd_notifier);
for (i = 0; i < UBI_MAX_DEVICES; i++)
if (ubi_devices[i]) {
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 8d1f0e05892c..e5ac3cd0bbae 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1456,7 +1456,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
+
+ /**
+ * The volumes_lock lock is needed here to prevent the expired old eba_tbl
+ * being updated when the eba_tbl is copied in the ubi_resize_volume() process.
+ */
+ spin_lock(&ubi->volumes_lock);
vol->eba_tbl->entries[lnum].pnum = to;
+ spin_unlock(&ubi->volumes_lock);
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 2a728c31e6b8..9a4940874be5 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -85,9 +85,10 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
sizeof(struct ubi_fm_scan_pool) +
sizeof(struct ubi_fm_scan_pool) +
(ubi->peb_count * sizeof(struct ubi_fm_ec)) +
- (sizeof(struct ubi_fm_eba) +
- (ubi->peb_count * sizeof(__be32))) +
- sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
+ ((sizeof(struct ubi_fm_eba) +
+ sizeof(struct ubi_fm_volhdr)) *
+ (UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT)) +
+ (ubi->peb_count * sizeof(__be32));
return roundup(size, ubi->leb_size);
}
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 5db653eacbd4..f1ea8677467f 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -152,7 +152,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
spin_lock(&ubi->volumes_lock);
vol = ubi->volumes[vol_id];
- if (!vol)
+ if (!vol || vol->is_dead)
goto out_unlock;
err = -EBUSY;
@@ -280,6 +280,41 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name,
EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
/**
+ * ubi_get_num_by_path - get UBI device and volume number from device path
+ * @pathname: volume character device node path
+ * @ubi_num: pointer to UBI device number to be set
+ * @vol_id: pointer to UBI volume ID to be set
+ *
+ * Returns 0 on success and sets ubi_num and vol_id, returns error otherwise.
+ */
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id)
+{
+ int error;
+ struct path path;
+ struct kstat stat;
+
+ error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+ if (error)
+ return error;
+
+ error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
+ path_put(&path);
+ if (error)
+ return error;
+
+ if (!S_ISCHR(stat.mode))
+ return -EINVAL;
+
+ *ubi_num = ubi_major2num(MAJOR(stat.rdev));
+ *vol_id = MINOR(stat.rdev) - 1;
+
+ if (*vol_id < 0 || *ubi_num < 0)
+ return -ENODEV;
+
+ return 0;
+}
+
+/**
* ubi_open_volume_path - open UBI volume by its character device node path.
* @pathname: volume character device node path
* @mode: open mode
@@ -290,32 +325,17 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
{
int error, ubi_num, vol_id;
- struct path path;
- struct kstat stat;
dbg_gen("open volume %s, mode %d", pathname, mode);
if (!pathname || !*pathname)
return ERR_PTR(-EINVAL);
- error = kern_path(pathname, LOOKUP_FOLLOW, &path);
- if (error)
- return ERR_PTR(error);
-
- error = vfs_getattr(&path, &stat, STATX_TYPE, AT_STATX_SYNC_AS_STAT);
- path_put(&path);
+ error = ubi_get_num_by_path(pathname, &ubi_num, &vol_id);
if (error)
return ERR_PTR(error);
- if (!S_ISCHR(stat.mode))
- return ERR_PTR(-EINVAL);
-
- ubi_num = ubi_major2num(MAJOR(stat.rdev));
- vol_id = MINOR(stat.rdev) - 1;
-
- if (vol_id >= 0 && ubi_num >= 0)
- return ubi_open_volume(ubi_num, vol_id, mode);
- return ERR_PTR(-ENODEV);
+ return ubi_open_volume(ubi_num, vol_id, mode);
}
EXPORT_SYMBOL_GPL(ubi_open_volume_path);
diff --git a/drivers/mtd/ubi/nvmem.c b/drivers/mtd/ubi/nvmem.c
new file mode 100644
index 000000000000..8aeb9c428e51
--- /dev/null
+++ b/drivers/mtd/ubi/nvmem.c
@@ -0,0 +1,191 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2023 Daniel Golle <daniel@makrotopia.org>
+ */
+
+/* UBI NVMEM provider */
+#include "ubi.h"
+#include <linux/nvmem-provider.h>
+#include <asm/div64.h>
+
+/* List of all NVMEM devices */
+static LIST_HEAD(nvmem_devices);
+static DEFINE_MUTEX(devices_mutex);
+
+struct ubi_nvmem {
+ struct nvmem_device *nvmem;
+ int ubi_num;
+ int vol_id;
+ int usable_leb_size;
+ struct list_head list;
+};
+
+static int ubi_nvmem_reg_read(void *priv, unsigned int from,
+ void *val, size_t bytes)
+{
+ size_t to_read, bytes_left = bytes;
+ struct ubi_nvmem *unv = priv;
+ struct ubi_volume_desc *desc;
+ uint32_t offs;
+ uint64_t lnum = from;
+ int err = 0;
+
+ desc = ubi_open_volume(unv->ubi_num, unv->vol_id, UBI_READONLY);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ offs = do_div(lnum, unv->usable_leb_size);
+ while (bytes_left) {
+ to_read = unv->usable_leb_size - offs;
+
+ if (to_read > bytes_left)
+ to_read = bytes_left;
+
+ err = ubi_read(desc, lnum, val, offs, to_read);
+ if (err)
+ break;
+
+ lnum += 1;
+ offs = 0;
+ bytes_left -= to_read;
+ val += to_read;
+ }
+ ubi_close_volume(desc);
+
+ if (err)
+ return err;
+
+ return bytes_left == 0 ? 0 : -EIO;
+}
+
+static int ubi_nvmem_add(struct ubi_volume_info *vi)
+{
+ struct device_node *np = dev_of_node(vi->dev);
+ struct nvmem_config config = {};
+ struct ubi_nvmem *unv;
+ int ret;
+
+ if (!np)
+ return 0;
+
+ if (!of_get_child_by_name(np, "nvmem-layout"))
+ return 0;
+
+ if (WARN_ON_ONCE(vi->usable_leb_size <= 0) ||
+ WARN_ON_ONCE(vi->size <= 0))
+ return -EINVAL;
+
+ unv = kzalloc(sizeof(struct ubi_nvmem), GFP_KERNEL);
+ if (!unv)
+ return -ENOMEM;
+
+ config.id = NVMEM_DEVID_NONE;
+ config.dev = vi->dev;
+ config.name = dev_name(vi->dev);
+ config.owner = THIS_MODULE;
+ config.priv = unv;
+ config.reg_read = ubi_nvmem_reg_read;
+ config.size = vi->usable_leb_size * vi->size;
+ config.word_size = 1;
+ config.stride = 1;
+ config.read_only = true;
+ config.root_only = true;
+ config.ignore_wp = true;
+ config.of_node = np;
+
+ unv->ubi_num = vi->ubi_num;
+ unv->vol_id = vi->vol_id;
+ unv->usable_leb_size = vi->usable_leb_size;
+ unv->nvmem = nvmem_register(&config);
+ if (IS_ERR(unv->nvmem)) {
+ ret = dev_err_probe(vi->dev, PTR_ERR(unv->nvmem),
+ "Failed to register NVMEM device\n");
+ kfree(unv);
+ return ret;
+ }
+
+ mutex_lock(&devices_mutex);
+ list_add_tail(&unv->list, &nvmem_devices);
+ mutex_unlock(&devices_mutex);
+
+ return 0;
+}
+
+static void ubi_nvmem_remove(struct ubi_volume_info *vi)
+{
+ struct ubi_nvmem *unv_c, *unv = NULL;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry(unv_c, &nvmem_devices, list)
+ if (unv_c->ubi_num == vi->ubi_num && unv_c->vol_id == vi->vol_id) {
+ unv = unv_c;
+ break;
+ }
+
+ if (!unv) {
+ mutex_unlock(&devices_mutex);
+ return;
+ }
+
+ list_del(&unv->list);
+ mutex_unlock(&devices_mutex);
+ nvmem_unregister(unv->nvmem);
+ kfree(unv);
+}
+
+/**
+ * nvmem_notify - UBI notification handler.
+ * @nb: registered notifier block
+ * @l: notification type
+ * @ns_ptr: pointer to the &struct ubi_notification object
+ */
+static int nvmem_notify(struct notifier_block *nb, unsigned long l,
+ void *ns_ptr)
+{
+ struct ubi_notification *nt = ns_ptr;
+
+ switch (l) {
+ case UBI_VOLUME_RESIZED:
+ ubi_nvmem_remove(&nt->vi);
+ fallthrough;
+ case UBI_VOLUME_ADDED:
+ ubi_nvmem_add(&nt->vi);
+ break;
+ case UBI_VOLUME_SHUTDOWN:
+ ubi_nvmem_remove(&nt->vi);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block nvmem_notifier = {
+ .notifier_call = nvmem_notify,
+};
+
+static int __init ubi_nvmem_init(void)
+{
+ return ubi_register_volume_notifier(&nvmem_notifier, 0);
+}
+
+static void __exit ubi_nvmem_exit(void)
+{
+ struct ubi_nvmem *unv, *tmp;
+
+ mutex_lock(&devices_mutex);
+ list_for_each_entry_safe(unv, tmp, &nvmem_devices, list) {
+ nvmem_unregister(unv->nvmem);
+ list_del(&unv->list);
+ kfree(unv);
+ }
+ mutex_unlock(&devices_mutex);
+
+ ubi_unregister_volume_notifier(&nvmem_notifier);
+}
+
+module_init(ubi_nvmem_init);
+module_exit(ubi_nvmem_exit);
+MODULE_DESCRIPTION("NVMEM layer over UBI volumes");
+MODULE_AUTHOR("Daniel Golle");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 0b42bb45dd84..32009a24869e 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -337,6 +337,7 @@ struct ubi_volume {
int writers;
int exclusive;
int metaonly;
+ bool is_dead;
int reserved_pebs;
int vol_type;
@@ -561,6 +562,7 @@ struct ubi_device {
spinlock_t volumes_lock;
int ref_count;
int image_seq;
+ bool is_dead;
int rsvd_pebs;
int avail_pebs;
@@ -955,6 +957,7 @@ void ubi_free_internal_volumes(struct ubi_device *ubi);
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_volume_info *vi);
+int ubi_get_num_by_path(const char *pathname, int *ubi_num, int *vol_id);
/* scan.c */
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr);
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 2c867d16f89f..5a3558bbb903 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -59,7 +59,7 @@ static ssize_t vol_attribute_show(struct device *dev,
struct ubi_device *ubi = vol->ubi;
spin_lock(&ubi->volumes_lock);
- if (!ubi->volumes[vol->vol_id]) {
+ if (!ubi->volumes[vol->vol_id] || ubi->volumes[vol->vol_id]->is_dead) {
spin_unlock(&ubi->volumes_lock);
return -ENODEV;
}
@@ -124,6 +124,31 @@ static void vol_release(struct device *dev)
kfree(vol);
}
+static struct fwnode_handle *find_volume_fwnode(struct ubi_volume *vol)
+{
+ struct fwnode_handle *fw_vols, *fw_vol;
+ const char *volname;
+ u32 volid;
+
+ fw_vols = device_get_named_child_node(vol->dev.parent->parent, "volumes");
+ if (!fw_vols)
+ return NULL;
+
+ fwnode_for_each_child_node(fw_vols, fw_vol) {
+ if (!fwnode_property_read_string(fw_vol, "volname", &volname) &&
+ strncmp(volname, vol->name, vol->name_len))
+ continue;
+
+ if (!fwnode_property_read_u32(fw_vol, "volid", &volid) &&
+ vol->vol_id != volid)
+ continue;
+
+ return fw_vol;
+ }
+
+ return NULL;
+}
+
/**
* ubi_create_volume - create volume.
* @ubi: UBI device description object
@@ -189,7 +214,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
/* Ensure that the name is unique */
for (i = 0; i < ubi->vtbl_slots; i++)
- if (ubi->volumes[i] &&
+ if (ubi->volumes[i] && !ubi->volumes[i]->is_dead &&
ubi->volumes[i]->name_len == req->name_len &&
!strcmp(ubi->volumes[i]->name, req->name)) {
ubi_err(ubi, "volume \"%s\" exists (ID %d)",
@@ -223,6 +248,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
vol->name_len = req->name_len;
memcpy(vol->name, req->name, vol->name_len);
vol->ubi = ubi;
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
/*
* Finish all pending erases because there may be some LEBs belonging
@@ -352,6 +378,19 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
err = -EBUSY;
goto out_unlock;
}
+
+ /*
+ * Mark volume as dead at this point to prevent that anyone
+ * can take a reference to the volume from now on.
+ * This is necessary as we have to release the spinlock before
+ * calling ubi_volume_notify.
+ */
+ vol->is_dead = true;
+ spin_unlock(&ubi->volumes_lock);
+
+ ubi_volume_notify(ubi, vol, UBI_VOLUME_SHUTDOWN);
+
+ spin_lock(&ubi->volumes_lock);
ubi->volumes[vol_id] = NULL;
spin_unlock(&ubi->volumes_lock);
@@ -408,6 +447,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
struct ubi_device *ubi = vol->ubi;
struct ubi_vtbl_record vtbl_rec;
struct ubi_eba_table *new_eba_tbl = NULL;
+ struct ubi_eba_table *old_eba_tbl = NULL;
int vol_id = vol->vol_id;
if (ubi->ro_mode)
@@ -453,10 +493,13 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
err = -ENOSPC;
goto out_free;
}
+
ubi->avail_pebs -= pebs;
ubi->rsvd_pebs += pebs;
ubi_eba_copy_table(vol, new_eba_tbl, vol->reserved_pebs);
- ubi_eba_replace_table(vol, new_eba_tbl);
+ old_eba_tbl = vol->eba_tbl;
+ vol->eba_tbl = new_eba_tbl;
+ vol->reserved_pebs = reserved_pebs;
spin_unlock(&ubi->volumes_lock);
}
@@ -471,7 +514,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
ubi->avail_pebs -= pebs;
ubi_update_reserved(ubi);
ubi_eba_copy_table(vol, new_eba_tbl, reserved_pebs);
- ubi_eba_replace_table(vol, new_eba_tbl);
+ old_eba_tbl = vol->eba_tbl;
+ vol->eba_tbl = new_eba_tbl;
+ vol->reserved_pebs = reserved_pebs;
spin_unlock(&ubi->volumes_lock);
}
@@ -493,7 +538,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
if (err)
goto out_acc;
- vol->reserved_pebs = reserved_pebs;
if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
vol->used_ebs = reserved_pebs;
vol->last_eb_bytes = vol->usable_leb_size;
@@ -501,19 +545,23 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
(long long)vol->used_ebs * vol->usable_leb_size;
}
+ /* destroy old table */
+ ubi_eba_destroy_table(old_eba_tbl);
ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED);
self_check_volumes(ubi);
return err;
out_acc:
- if (pebs > 0) {
- spin_lock(&ubi->volumes_lock);
- ubi->rsvd_pebs -= pebs;
- ubi->avail_pebs += pebs;
- spin_unlock(&ubi->volumes_lock);
- }
- return err;
-
+ spin_lock(&ubi->volumes_lock);
+ vol->reserved_pebs = reserved_pebs - pebs;
+ ubi->rsvd_pebs -= pebs;
+ ubi->avail_pebs += pebs;
+ if (pebs > 0)
+ ubi_eba_copy_table(vol, old_eba_tbl, vol->reserved_pebs);
+ else
+ ubi_eba_copy_table(vol, old_eba_tbl, reserved_pebs);
+ vol->eba_tbl = old_eba_tbl;
+ spin_unlock(&ubi->volumes_lock);
out_free:
ubi_eba_destroy_table(new_eba_tbl);
return err;
@@ -592,6 +640,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
vol->dev.class = &ubi_class;
vol->dev.groups = volume_dev_groups;
dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
+ device_set_node(&vol->dev, find_volume_fwnode(vol));
err = device_register(&vol->dev);
if (err) {
cdev_del(&vol->cdev);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index f700f0e4f2ec..6e5489e233dd 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -791,6 +791,12 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
* The number of supported volumes is limited by the eraseblock size
* and by the UBI_MAX_VOLUMES constant.
*/
+
+ if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) {
+ ubi_err(ubi, "LEB size too small for a volume record");
+ return -EINVAL;
+ }
+
ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE;
if (ubi->vtbl_slots > UBI_MAX_VOLUMES)
ubi->vtbl_slots = UBI_MAX_VOLUMES;
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index f81b598147b3..7b5028b67cd5 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -370,8 +370,8 @@ static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = {
static const struct kvaser_pciefd_irq_mask kvaser_pciefd_xilinx_irq_mask = {
.kcan_rx0 = BIT(4),
- .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) },
- .all = GENMASK(19, 16) | BIT(4),
+ .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19), BIT(20), BIT(21), BIT(22), BIT(23) },
+ .all = GENMASK(23, 16) | BIT(4),
};
static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = {
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 678b51f9cea6..767f66c37f6b 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -950,20 +950,56 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
mutex_unlock(&priv->reg_mutex);
}
+/* On page 205, section "8.6.3 Frame filtering" of the active standard, IEEE Std
+ * 802.1Q™-2022, it is stated that frames with 01:80:C2:00:00:00-0F as MAC DA
+ * must only be propagated to C-VLAN and MAC Bridge components. That means
+ * VLAN-aware and VLAN-unaware bridges. On the switch designs with CPU ports,
+ * these frames are supposed to be processed by the CPU (software). So we make
+ * the switch only forward them to the CPU port. And if received from a CPU
+ * port, forward to a single port. The software is responsible of making the
+ * switch conform to the latter by setting a single port as destination port on
+ * the special tag.
+ *
+ * This switch intellectual property cannot conform to this part of the standard
+ * fully. Whilst the REV_UN frame tag covers the remaining :04-0D and :0F MAC
+ * DAs, it also includes :22-FF which the scope of propagation is not supposed
+ * to be restricted for these MAC DAs.
+ */
static void
mt753x_trap_frames(struct mt7530_priv *priv)
{
- /* Trap BPDUs to the CPU port(s) */
- mt7530_rmw(priv, MT753X_BPC, MT753X_BPDU_PORT_FW_MASK,
+ /* Trap 802.1X PAE frames and BPDUs to the CPU port(s) and egress them
+ * VLAN-untagged.
+ */
+ mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_EG_TAG_MASK |
+ MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
+ MT753X_BPDU_PORT_FW_MASK,
+ MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
MT753X_BPDU_CPU_ONLY);
- /* Trap 802.1X PAE frames to the CPU port(s) */
- mt7530_rmw(priv, MT753X_BPC, MT753X_PAE_PORT_FW_MASK,
- MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY));
+ /* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
+ * them VLAN-untagged.
+ */
+ mt7530_rmw(priv, MT753X_RGAC1, MT753X_R02_EG_TAG_MASK |
+ MT753X_R02_PORT_FW_MASK | MT753X_R01_EG_TAG_MASK |
+ MT753X_R01_PORT_FW_MASK,
+ MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
- /* Trap LLDP frames with :0E MAC DA to the CPU port(s) */
- mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_PORT_FW_MASK,
- MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY));
+ /* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
+ * them VLAN-untagged.
+ */
+ mt7530_rmw(priv, MT753X_RGAC2, MT753X_R0E_EG_TAG_MASK |
+ MT753X_R0E_PORT_FW_MASK | MT753X_R03_EG_TAG_MASK |
+ MT753X_R03_PORT_FW_MASK,
+ MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
+ MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ MT753X_BPDU_CPU_ONLY);
}
static void
@@ -2192,22 +2228,16 @@ mt7530_setup(struct dsa_switch *ds)
}
}
- /* Disable LEDs before reset to prevent the MT7530 sampling a
- * potentially incorrect HT_XTAL_FSEL value.
- */
- mt7530_write(priv, MT7530_LED_EN, 0);
- usleep_range(1000, 1100);
-
/* Reset whole chip through gpio pin or memory-mapped registers for
* different type of hardware
*/
if (priv->mcm) {
reset_control_assert(priv->rstc);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
reset_control_deassert(priv->rstc);
} else {
gpiod_set_value_cansleep(priv->reset, 0);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
gpiod_set_value_cansleep(priv->reset, 1);
}
@@ -2420,11 +2450,11 @@ mt7531_setup(struct dsa_switch *ds)
*/
if (priv->mcm) {
reset_control_assert(priv->rstc);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
reset_control_deassert(priv->rstc);
} else {
gpiod_set_value_cansleep(priv->reset, 0);
- usleep_range(1000, 1100);
+ usleep_range(5000, 5100);
gpiod_set_value_cansleep(priv->reset, 1);
}
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index a71166e0a7fc..d17b318e6ee4 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -65,14 +65,33 @@ enum mt753x_id {
/* Registers for BPDU and PAE frame control*/
#define MT753X_BPC 0x24
-#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
+#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
+#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x)
+#define MT753X_BPDU_EG_TAG_MASK GENMASK(8, 6)
+#define MT753X_BPDU_EG_TAG(x) FIELD_PREP(MT753X_BPDU_EG_TAG_MASK, x)
+#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
+
+/* Register for :01 and :02 MAC DA frame control */
+#define MT753X_RGAC1 0x28
+#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
+#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
+#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
+#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
+#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
+#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
+#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
/* Register for :03 and :0E MAC DA frame control */
#define MT753X_RGAC2 0x2c
+#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
+#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
+#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
+#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
+#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)
enum mt753x_bpdu_port_fw {
MT753X_BPDU_FOLLOW_MFC,
@@ -253,6 +272,7 @@ enum mt7530_port_mode {
enum mt7530_vlan_port_eg_tag {
MT7530_VLAN_EG_DISABLED = 0,
MT7530_VLAN_EG_CONSISTENT = 1,
+ MT7530_VLAN_EG_UNTAGGED = 4,
};
enum mt7530_vlan_port_attr {
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 0d917a9699c5..b65b8592ad75 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -367,6 +367,7 @@ static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
cp->irq_arr[0].status_blk = (void *)
((unsigned long) bnapi->status_blk.msi +
(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
+ cp->irq_arr[0].status_blk_map = bp->status_blk_mapping;
cp->irq_arr[0].status_blk_num = sb_id;
cp->num_irq = 1;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index d8b1824c334d..0bc1367fd649 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1002,9 +1002,6 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
struct bnx2x_alloc_pool *pool)
{
- if (!pool->page)
- return;
-
put_page(pool->page);
pool->page = NULL;
@@ -1015,6 +1012,9 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
{
int i;
+ if (!fp->page_pool.page)
+ return;
+
if (fp->mode == TPA_MODE_DISABLED)
return;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 0d8e61c63c7c..678829646cec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -14912,9 +14912,11 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
else
cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
+ cp->irq_arr[0].status_blk_map = bp->cnic_sb_mapping;
cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
cp->irq_arr[1].status_blk = bp->def_status_blk;
+ cp->irq_arr[1].status_blk_map = bp->def_status_blk_mapping;
cp->irq_arr[1].status_blk_num = DEF_SB_ID;
cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 7926aaef8f0c..3d63177e7e52 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1107,10 +1107,11 @@ static int cnic_init_uio(struct cnic_dev *dev)
TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
CNIC_PAGE_MASK;
+ uinfo->mem[1].dma_addr = cp->status_blk_map;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
- uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
+ uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE * 9);
else
- uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
+ uinfo->mem[1].size = PAGE_ALIGN(BNX2_SBLK_MSIX_ALIGN_SIZE);
uinfo->name = "bnx2_cnic";
} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
@@ -1118,20 +1119,26 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
CNIC_PAGE_MASK;
- uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
+ uinfo->mem[1].dma_addr = cp->status_blk_map;
+ uinfo->mem[1].size = PAGE_ALIGN(sizeof(*cp->bnx2x_def_status_blk));
uinfo->name = "bnx2x_cnic";
}
- uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+ uinfo->mem[1].dma_device = &dev->pcidev->dev;
+ uinfo->mem[1].memtype = UIO_MEM_DMA_COHERENT;
uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
- uinfo->mem[2].size = udev->l2_ring_size;
- uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+ uinfo->mem[2].dma_addr = udev->l2_ring_map;
+ uinfo->mem[2].size = PAGE_ALIGN(udev->l2_ring_size);
+ uinfo->mem[2].dma_device = &dev->pcidev->dev;
+ uinfo->mem[2].memtype = UIO_MEM_DMA_COHERENT;
uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
- uinfo->mem[3].size = udev->l2_buf_size;
- uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
+ uinfo->mem[3].dma_addr = udev->l2_buf_map;
+ uinfo->mem[3].size = PAGE_ALIGN(udev->l2_buf_size);
+ uinfo->mem[3].dma_device = &dev->pcidev->dev;
+ uinfo->mem[3].memtype = UIO_MEM_DMA_COHERENT;
uinfo->version = CNIC_MODULE_VERSION;
uinfo->irq = UIO_IRQ_CUSTOM;
@@ -1313,6 +1320,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
return 0;
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
+ cp->status_blk_map = cp->ethdev->irq_arr[1].status_blk_map;
cp->l2_rx_ring_size = 15;
@@ -5323,6 +5331,7 @@ static int cnic_start_hw(struct cnic_dev *dev)
pci_dev_get(dev->pcidev);
cp->func = PCI_FUNC(dev->pcidev->devfn);
cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
+ cp->status_blk_map = ethdev->irq_arr[0].status_blk_map;
cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
err = cp->alloc_resc(dev);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 4baea81bae7a..fedc84ada937 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -260,6 +260,7 @@ struct cnic_local {
#define SM_RX_ID 0
#define SM_TX_ID 1
} status_blk;
+ dma_addr_t status_blk_map;
struct host_sp_status_block *bnx2x_def_status_blk;
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 789e5c7e9311..49a11ec80b36 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -190,6 +190,7 @@ struct cnic_ops {
struct cnic_irq {
unsigned int vector;
void *status_blk;
+ dma_addr_t status_blk_map;
u32 status_blk_num;
u32 status_blk_num2;
u32 irq_flags;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
index 8510b88d4982..f3cd5a376eca 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -24,7 +24,7 @@ TRACE_EVENT(hclge_pf_mbx_get,
__field(u8, code)
__field(u8, subcode)
__string(pciname, pci_name(hdev->pdev))
- __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __string(devname, hdev->vport[0].nic.kinfo.netdev->name)
__array(u32, mbx_data, PF_GET_MBX_LEN)
),
@@ -33,7 +33,7 @@ TRACE_EVENT(hclge_pf_mbx_get,
__entry->code = req->msg.code;
__entry->subcode = req->msg.subcode;
__assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_vf_to_pf_cmd));
),
@@ -56,7 +56,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
__field(u8, vfid)
__field(u16, code)
__string(pciname, pci_name(hdev->pdev))
- __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __string(devname, hdev->vport[0].nic.kinfo.netdev->name)
__array(u32, mbx_data, PF_SEND_MBX_LEN)
),
@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_pf_mbx_send,
__entry->vfid = req->dest_vfid;
__entry->code = le16_to_cpu(req->msg.code);
__assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_pf_to_vf_cmd));
),
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
index 5d4895bb57a1..b259e95dd53c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
@@ -23,7 +23,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
__field(u8, vfid)
__field(u16, code)
__string(pciname, pci_name(hdev->pdev))
- __string(devname, &hdev->nic.kinfo.netdev->name)
+ __string(devname, hdev->nic.kinfo.netdev->name)
__array(u32, mbx_data, VF_GET_MBX_LEN)
),
@@ -31,7 +31,7 @@ TRACE_EVENT(hclge_vf_mbx_get,
__entry->vfid = req->dest_vfid;
__entry->code = le16_to_cpu(req->msg.code);
__assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ __assign_str(devname, hdev->nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_pf_to_vf_cmd));
),
@@ -55,7 +55,7 @@ TRACE_EVENT(hclge_vf_mbx_send,
__field(u8, code)
__field(u8, subcode)
__string(pciname, pci_name(hdev->pdev))
- __string(devname, &hdev->nic.kinfo.netdev->name)
+ __string(devname, hdev->nic.kinfo.netdev->name)
__array(u32, mbx_data, VF_SEND_MBX_LEN)
),
@@ -64,7 +64,7 @@ TRACE_EVENT(hclge_vf_mbx_send,
__entry->code = req->msg.code;
__entry->subcode = req->msg.subcode;
__assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ __assign_str(devname, hdev->nic.kinfo.netdev->name);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_vf_to_pf_cmd));
),
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index d2fd315556a3..a545a7917e4f 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -956,7 +956,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
u16 q_idx)
{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
return -EINVAL;
@@ -978,7 +978,7 @@ int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings,
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
- DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
int err = 0;
u16 q_idx;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 4d8111aeb0ff..db4b2844e1f7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -4695,7 +4695,7 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 i, buf_size = __struct_size(qg_list);
struct ice_q_ctx *q_ctx;
int status = -ENOENT;
@@ -4917,7 +4917,7 @@ int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
- DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
u16 qg_size = __struct_size(qg_list);
struct ice_hw *hw;
int status = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index 7532d11ad7f3..fc91c4d41186 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -1938,8 +1938,8 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
*/
static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{
- DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
- ICE_PKG_CNT);
+ DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
+ ICE_PKG_CNT);
u16 size = __struct_size(pkg_info);
u32 i;
@@ -1990,8 +1990,8 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
struct ice_pkg_hdr *ospkg,
struct ice_seg **seg)
{
- DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
- ICE_PKG_CNT);
+ DEFINE_RAW_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
+ ICE_PKG_CNT);
u16 size = __struct_size(pkg);
enum ice_ddp_state state;
u32 i;
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 467372d541d2..f97128b69f87 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -491,7 +491,7 @@ static void
ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
u16 vsi_num, u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
@@ -849,7 +849,7 @@ static void
ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
@@ -1873,7 +1873,7 @@ static void
ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
u16 vsi_num, u8 tc)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
struct device *dev = ice_pf_to_dev(lag->pf);
u16 numq, valq, num_moved, qbuf_size;
u16 buf_size = __struct_size(buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index d174a4eeb899..a1525992d14b 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -237,7 +237,7 @@ static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
u32 node_teid)
{
- DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
u16 buf_size = __struct_size(buf);
u16 num_groups_removed = 0;
int status;
@@ -2219,7 +2219,7 @@ int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
- DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
u16 buf_len = __struct_size(buf);
struct ice_sched_node *node;
u16 i, grps_movd = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index f84bab80ca42..d4baae8c3b72 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -1812,7 +1812,7 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
struct ice_aqc_res_elem *vsi_ele;
int status;
@@ -2081,7 +2081,7 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
*/
int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
int status;
@@ -4418,7 +4418,7 @@ int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
int status;
@@ -4446,7 +4446,7 @@ int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
int status;
@@ -4476,7 +4476,7 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
*/
int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
{
- DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
u16 buf_len = __struct_size(buf);
u16 res_type;
int status;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 6c70c8498690..3c0f55b3e48e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -1338,7 +1338,7 @@ static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
/* Release thread waiting for completion */
lmac->cmd_pend = false;
- wake_up_interruptible(&lmac->wq_cmd_cmplt);
+ wake_up(&lmac->wq_cmd_cmplt);
break;
case CGX_EVT_ASYNC:
if (cgx_event_is_linkevent(event))
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
index b92264d0a77e..1e5aa5397504 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c
@@ -214,11 +214,12 @@ int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid)
}
EXPORT_SYMBOL(otx2_mbox_busy_poll_for_rsp);
-void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+static void otx2_mbox_msg_send_data(struct otx2_mbox *mbox, int devid, u64 data)
{
struct otx2_mbox_dev *mdev = &mbox->dev[devid];
struct mbox_hdr *tx_hdr, *rx_hdr;
void *hw_mbase = mdev->hwbase;
+ u64 intr_val;
tx_hdr = hw_mbase + mbox->tx_start;
rx_hdr = hw_mbase + mbox->rx_start;
@@ -254,14 +255,52 @@ void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
spin_unlock(&mdev->mbox_lock);
+ /* Check if interrupt pending */
+ intr_val = readq((void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+
+ intr_val |= data;
/* The interrupt should be fired after num_msgs is written
* to the shared memory
*/
- writeq(1, (void __iomem *)mbox->reg_base +
+ writeq(intr_val, (void __iomem *)mbox->reg_base +
(mbox->trigger | (devid << mbox->tr_shift)));
}
+
+void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid)
+{
+ otx2_mbox_msg_send_data(mbox, devid, MBOX_DOWN_MSG);
+}
EXPORT_SYMBOL(otx2_mbox_msg_send);
+void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid)
+{
+ otx2_mbox_msg_send_data(mbox, devid, MBOX_UP_MSG);
+}
+EXPORT_SYMBOL(otx2_mbox_msg_send_up);
+
+bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid)
+{
+ u64 data;
+
+ data = readq((void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+
+ /* If data is non-zero wait for ~1ms and return to caller
+ * whether data has changed to zero or not after the wait.
+ */
+ if (!data)
+ return true;
+
+ usleep_range(950, 1000);
+
+ data = readq((void __iomem *)mbox->reg_base +
+ (mbox->trigger | (devid << mbox->tr_shift)));
+
+ return data == 0;
+}
+EXPORT_SYMBOL(otx2_mbox_wait_for_zero);
+
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
int size, int size_rsp)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 61ab7f66f053..eb2a20b5a0d0 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -16,6 +16,9 @@
#define MBOX_SIZE SZ_64K
+#define MBOX_DOWN_MSG 1
+#define MBOX_UP_MSG 2
+
/* AF/PF: PF initiated, PF/VF VF initiated */
#define MBOX_DOWN_RX_START 0
#define MBOX_DOWN_RX_SIZE (46 * SZ_1K)
@@ -101,6 +104,7 @@ int otx2_mbox_regions_init(struct otx2_mbox *mbox, void __force **hwbase,
struct pci_dev *pdev, void __force *reg_base,
int direction, int ndevs, unsigned long *bmap);
void otx2_mbox_msg_send(struct otx2_mbox *mbox, int devid);
+void otx2_mbox_msg_send_up(struct otx2_mbox *mbox, int devid);
int otx2_mbox_wait_for_rsp(struct otx2_mbox *mbox, int devid);
int otx2_mbox_busy_poll_for_rsp(struct otx2_mbox *mbox, int devid);
struct mbox_msghdr *otx2_mbox_alloc_msg_rsp(struct otx2_mbox *mbox, int devid,
@@ -118,6 +122,8 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox,
return otx2_mbox_alloc_msg_rsp(mbox, devid, size, 0);
}
+bool otx2_mbox_wait_for_zero(struct otx2_mbox *mbox, int devid);
+
/* Mailbox message types */
#define MBOX_MSG_MASK 0xFFFF
#define MBOX_MSG_INVALID 0xFFFE
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
index dfd23580e3b8..d39d86e694cc 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -121,13 +121,17 @@ int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
{
struct mcs_intr_info *req;
- int err, pf;
+ int pf;
pf = rvu_get_pf(event->pcifunc);
+ mutex_lock(&rvu->mbox_lock);
+
req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
- if (!req)
+ if (!req) {
+ mutex_unlock(&rvu->mbox_lock);
return -ENOMEM;
+ }
req->mcs_id = event->mcs_id;
req->intr_mask = event->intr_mask;
@@ -135,10 +139,11 @@ static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
req->hdr.pcifunc = event->pcifunc;
req->lmac_id = event->lmac_id;
- otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
- err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
- if (err)
- dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pf);
+
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf);
+
+ mutex_unlock(&rvu->mbox_lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 07d4859de53a..ff78251f92d4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2119,7 +2119,7 @@ bad_message:
}
}
-static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
+static void __rvu_mbox_handler(struct rvu_work *mwork, int type, bool poll)
{
struct rvu *rvu = mwork->rvu;
int offset, err, id, devid;
@@ -2186,6 +2186,9 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
}
mw->mbox_wrk[devid].num_msgs = 0;
+ if (poll)
+ otx2_mbox_wait_for_zero(mbox, devid);
+
/* Send mbox responses to VF/PF */
otx2_mbox_msg_send(mbox, devid);
}
@@ -2193,15 +2196,18 @@ static void __rvu_mbox_handler(struct rvu_work *mwork, int type)
static inline void rvu_afpf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
+ struct rvu *rvu = mwork->rvu;
- __rvu_mbox_handler(mwork, TYPE_AFPF);
+ mutex_lock(&rvu->mbox_lock);
+ __rvu_mbox_handler(mwork, TYPE_AFPF, true);
+ mutex_unlock(&rvu->mbox_lock);
}
static inline void rvu_afvf_mbox_handler(struct work_struct *work)
{
struct rvu_work *mwork = container_of(work, struct rvu_work, work);
- __rvu_mbox_handler(mwork, TYPE_AFVF);
+ __rvu_mbox_handler(mwork, TYPE_AFVF, false);
}
static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type)
@@ -2376,6 +2382,8 @@ static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw,
}
}
+ mutex_init(&rvu->mbox_lock);
+
mbox_regions = kcalloc(num, sizeof(void *), GFP_KERNEL);
if (!mbox_regions) {
err = -ENOMEM;
@@ -2525,10 +2533,9 @@ static void rvu_queue_work(struct mbox_wq_info *mw, int first,
}
}
-static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+static irqreturn_t rvu_mbox_pf_intr_handler(int irq, void *rvu_irq)
{
struct rvu *rvu = (struct rvu *)rvu_irq;
- int vfs = rvu->vfs;
u64 intr;
intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT);
@@ -2542,6 +2549,18 @@ static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu *rvu = (struct rvu *)rvu_irq;
+ int vfs = rvu->vfs;
+ u64 intr;
+
+ /* Sync with mbox memory region */
+ rmb();
+
/* Handle VF interrupts */
if (vfs > 64) {
intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1));
@@ -2886,7 +2905,7 @@ static int rvu_register_interrupts(struct rvu *rvu)
/* Register mailbox interrupt handler */
sprintf(&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], "RVUAF Mbox");
ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_MBOX),
- rvu_mbox_intr_handler, 0,
+ rvu_mbox_pf_intr_handler, 0,
&rvu->irq_name[RVU_AF_INT_VEC_MBOX * NAME_SIZE], rvu);
if (ret) {
dev_err(rvu->dev,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index f390525a6217..35834687e40f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -591,6 +591,8 @@ struct rvu {
spinlock_t mcs_intrq_lock;
/* CPT interrupt lock */
spinlock_t cpt_intr_lock;
+
+ struct mutex mbox_lock; /* Serialize mbox up and down msgs */
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 38acdc7a73bb..72e060cf6b61 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -232,7 +232,7 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
struct cgx_link_user_info *linfo;
struct cgx_link_info_msg *msg;
unsigned long pfmap;
- int err, pfid;
+ int pfid;
linfo = &event->link_uinfo;
pfmap = cgxlmac_to_pfmap(rvu, event->cgx_id, event->lmac_id);
@@ -255,16 +255,22 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu)
continue;
}
+ mutex_lock(&rvu->mbox_lock);
+
/* Send mbox message to PF */
msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid);
- if (!msg)
+ if (!msg) {
+ mutex_unlock(&rvu->mbox_lock);
continue;
+ }
+
msg->link_info = *linfo;
- otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid);
- err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid);
- if (err)
- dev_warn(rvu->dev, "notification to pf %d failed\n",
- pfid);
+
+ otx2_mbox_wait_for_zero(&rvu->afpf_wq_info.mbox_up, pfid);
+
+ otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid);
+
+ mutex_unlock(&rvu->mbox_lock);
} while (pfmap);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 02d0b707aea5..a85ac039d779 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -1592,7 +1592,7 @@ int otx2_detach_resources(struct mbox *mbox)
detach->partial = false;
/* Send detach request to AF */
- otx2_mbox_msg_send(&mbox->mbox, 0);
+ otx2_sync_mbox_msg(mbox);
mutex_unlock(&mbox->lock);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 06910307085e..7e16a341ec58 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -815,7 +815,7 @@ static inline int otx2_sync_mbox_up_msg(struct mbox *mbox, int devid)
if (!otx2_mbox_nonempty(&mbox->mbox_up, devid))
return 0;
- otx2_mbox_msg_send(&mbox->mbox_up, devid);
+ otx2_mbox_msg_send_up(&mbox->mbox_up, devid);
err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid);
if (err)
return err;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index e5fe67e73865..b40bd0e46751 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -292,8 +292,8 @@ static int otx2_pf_flr_init(struct otx2_nic *pf, int num_vfs)
return 0;
}
-static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
- int first, int mdevs, u64 intr, int type)
+static void otx2_queue_vf_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
+ int first, int mdevs, u64 intr)
{
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
@@ -307,40 +307,26 @@ static void otx2_queue_work(struct mbox *mw, struct workqueue_struct *mbox_wq,
mbox = &mw->mbox;
mdev = &mbox->dev[i];
- if (type == TYPE_PFAF)
- otx2_sync_mbox_bbuf(mbox, i);
hdr = mdev->mbase + mbox->rx_start;
/* The hdr->num_msgs is set to zero immediately in the interrupt
- * handler to ensure that it holds a correct value next time
- * when the interrupt handler is called.
- * pf->mbox.num_msgs holds the data for use in pfaf_mbox_handler
- * pf>mbox.up_num_msgs holds the data for use in
- * pfaf_mbox_up_handler.
+ * handler to ensure that it holds a correct value next time
+ * when the interrupt handler is called. pf->mw[i].num_msgs
+ * holds the data for use in otx2_pfvf_mbox_handler and
+ * pf->mw[i].up_num_msgs holds the data for use in
+ * otx2_pfvf_mbox_up_handler.
*/
if (hdr->num_msgs) {
mw[i].num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
- if (type == TYPE_PFAF)
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr),
- sizeof(u64)));
-
queue_work(mbox_wq, &mw[i].mbox_wrk);
}
mbox = &mw->mbox_up;
mdev = &mbox->dev[i];
- if (type == TYPE_PFAF)
- otx2_sync_mbox_bbuf(mbox, i);
hdr = mdev->mbase + mbox->rx_start;
if (hdr->num_msgs) {
mw[i].up_num_msgs = hdr->num_msgs;
hdr->num_msgs = 0;
- if (type == TYPE_PFAF)
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr),
- sizeof(u64)));
-
queue_work(mbox_wq, &mw[i].mbox_up_wrk);
}
}
@@ -356,8 +342,10 @@ static void otx2_forward_msg_pfvf(struct otx2_mbox_dev *mdev,
/* Msgs are already copied, trigger VF's mbox irq */
smp_wmb();
+ otx2_mbox_wait_for_zero(pfvf_mbox, devid);
+
offset = pfvf_mbox->trigger | (devid << pfvf_mbox->tr_shift);
- writeq(1, (void __iomem *)pfvf_mbox->reg_base + offset);
+ writeq(MBOX_DOWN_MSG, (void __iomem *)pfvf_mbox->reg_base + offset);
/* Restore VF's mbox bounce buffer region address */
src_mdev->mbase = bbuf_base;
@@ -547,7 +535,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
end:
offset = mbox->rx_start + msg->next_msgoff;
if (mdev->msgs_acked == (vf_mbox->up_num_msgs - 1))
- __otx2_mbox_reset(mbox, 0);
+ __otx2_mbox_reset(mbox, vf_idx);
mdev->msgs_acked++;
}
}
@@ -564,8 +552,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
if (vfs > 64) {
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(1));
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(1), intr);
- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr,
- TYPE_PFVF);
+ otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 64, vfs, intr);
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
vfs = 64;
@@ -574,7 +561,7 @@ static irqreturn_t otx2_pfvf_mbox_intr_handler(int irq, void *pf_irq)
intr = otx2_read64(pf, RVU_PF_VFPF_MBOX_INTX(0));
otx2_write64(pf, RVU_PF_VFPF_MBOX_INTX(0), intr);
- otx2_queue_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr, TYPE_PFVF);
+ otx2_queue_vf_work(mbox, pf->mbox_pfvf_wq, 0, vfs, intr);
if (intr)
trace_otx2_msg_interrupt(mbox->mbox.pdev, "VF(s) to PF", intr);
@@ -597,8 +584,9 @@ static int otx2_pfvf_mbox_init(struct otx2_nic *pf, int numvfs)
if (!pf->mbox_pfvf)
return -ENOMEM;
- pf->mbox_pfvf_wq = alloc_ordered_workqueue("otx2_pfvf_mailbox",
- WQ_HIGHPRI | WQ_MEM_RECLAIM);
+ pf->mbox_pfvf_wq = alloc_workqueue("otx2_pfvf_mailbox",
+ WQ_UNBOUND | WQ_HIGHPRI |
+ WQ_MEM_RECLAIM, 0);
if (!pf->mbox_pfvf_wq)
return -ENOMEM;
@@ -821,20 +809,22 @@ static void otx2_pfaf_mbox_handler(struct work_struct *work)
struct mbox *af_mbox;
struct otx2_nic *pf;
int offset, id;
+ u16 num_msgs;
af_mbox = container_of(work, struct mbox, mbox_wrk);
mbox = &af_mbox->mbox;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ num_msgs = rsp_hdr->num_msgs;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
pf = af_mbox->pfvf;
- for (id = 0; id < af_mbox->num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2_process_pfaf_mbox_msg(pf, msg);
offset = mbox->rx_start + msg->next_msgoff;
- if (mdev->msgs_acked == (af_mbox->num_msgs - 1))
+ if (mdev->msgs_acked == (num_msgs - 1))
__otx2_mbox_reset(mbox, 0);
mdev->msgs_acked++;
}
@@ -945,12 +935,14 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
int offset, id, devid = 0;
struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
+ u16 num_msgs;
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ num_msgs = rsp_hdr->num_msgs;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < af_mbox->up_num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
devid = msg->pcifunc & RVU_PFVF_FUNC_MASK;
@@ -959,10 +951,11 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
otx2_process_mbox_msg_up(pf, msg);
offset = mbox->rx_start + msg->next_msgoff;
}
- if (devid) {
+ /* Forward to VF iff VFs are really present */
+ if (devid && pci_num_vf(pf->pdev)) {
otx2_forward_vf_mbox_msgs(pf, &pf->mbox.mbox_up,
MBOX_DIR_PFVF_UP, devid - 1,
- af_mbox->up_num_msgs);
+ num_msgs);
return;
}
@@ -972,16 +965,49 @@ static void otx2_pfaf_mbox_up_handler(struct work_struct *work)
static irqreturn_t otx2_pfaf_mbox_intr_handler(int irq, void *pf_irq)
{
struct otx2_nic *pf = (struct otx2_nic *)pf_irq;
- struct mbox *mbox;
+ struct mbox *mw = &pf->mbox;
+ struct otx2_mbox_dev *mdev;
+ struct otx2_mbox *mbox;
+ struct mbox_hdr *hdr;
+ u64 mbox_data;
/* Clear the IRQ */
otx2_write64(pf, RVU_PF_INT, BIT_ULL(0));
- mbox = &pf->mbox;
- trace_otx2_msg_interrupt(mbox->mbox.pdev, "AF to PF", BIT_ULL(0));
+ mbox_data = otx2_read64(pf, RVU_PF_PFAF_MBOX0);
+
+ if (mbox_data & MBOX_UP_MSG) {
+ mbox_data &= ~MBOX_UP_MSG;
+ otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
+
+ mbox = &mw->mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
+ BIT_ULL(0));
+ }
+
+ if (mbox_data & MBOX_DOWN_MSG) {
+ mbox_data &= ~MBOX_DOWN_MSG;
+ otx2_write64(pf, RVU_PF_PFAF_MBOX0, mbox_data);
+
+ mbox = &mw->mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(pf->mbox_wq, &mw->mbox_wrk);
- otx2_queue_work(mbox, pf->mbox_wq, 0, 1, 1, TYPE_PFAF);
+ trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
+ BIT_ULL(0));
+ }
return IRQ_HANDLED;
}
@@ -3087,6 +3113,7 @@ static void otx2_vf_link_event_task(struct work_struct *work)
struct otx2_vf_config *config;
struct cgx_link_info_msg *req;
struct mbox_msghdr *msghdr;
+ struct delayed_work *dwork;
struct otx2_nic *pf;
int vf_idx;
@@ -3095,10 +3122,24 @@ static void otx2_vf_link_event_task(struct work_struct *work)
vf_idx = config - config->pf->vf_configs;
pf = config->pf;
+ if (config->intf_down)
+ return;
+
+ mutex_lock(&pf->mbox.lock);
+
+ dwork = &config->link_event_work;
+
+ if (!otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx)) {
+ schedule_delayed_work(dwork, msecs_to_jiffies(100));
+ mutex_unlock(&pf->mbox.lock);
+ return;
+ }
+
msghdr = otx2_mbox_alloc_msg_rsp(&pf->mbox_pfvf[0].mbox_up, vf_idx,
sizeof(*req), sizeof(struct msg_rsp));
if (!msghdr) {
dev_err(pf->dev, "Failed to create VF%d link event\n", vf_idx);
+ mutex_unlock(&pf->mbox.lock);
return;
}
@@ -3107,7 +3148,11 @@ static void otx2_vf_link_event_task(struct work_struct *work)
req->hdr.sig = OTX2_MBOX_REQ_SIG;
memcpy(&req->link_info, &pf->linfo, sizeof(req->link_info));
+ otx2_mbox_wait_for_zero(&pf->mbox_pfvf[0].mbox_up, vf_idx);
+
otx2_sync_mbox_up_msg(&pf->mbox_pfvf[0], vf_idx);
+
+ mutex_unlock(&pf->mbox.lock);
}
static int otx2_sriov_enable(struct pci_dev *pdev, int numvfs)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 35e06048356f..cf0aa16d7540 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -89,16 +89,20 @@ static void otx2vf_vfaf_mbox_handler(struct work_struct *work)
struct otx2_mbox *mbox;
struct mbox *af_mbox;
int offset, id;
+ u16 num_msgs;
af_mbox = container_of(work, struct mbox, mbox_wrk);
mbox = &af_mbox->mbox;
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (af_mbox->num_msgs == 0)
+ num_msgs = rsp_hdr->num_msgs;
+
+ if (num_msgs == 0)
return;
+
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < af_mbox->num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_vfaf_mbox_msg(af_mbox->pfvf, msg);
offset = mbox->rx_start + msg->next_msgoff;
@@ -151,6 +155,7 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
struct mbox *vf_mbox;
struct otx2_nic *vf;
int offset, id;
+ u16 num_msgs;
vf_mbox = container_of(work, struct mbox, mbox_up_wrk);
vf = vf_mbox->pfvf;
@@ -158,12 +163,14 @@ static void otx2vf_vfaf_mbox_up_handler(struct work_struct *work)
mdev = &mbox->dev[0];
rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (vf_mbox->up_num_msgs == 0)
+ num_msgs = rsp_hdr->num_msgs;
+
+ if (num_msgs == 0)
return;
offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
- for (id = 0; id < vf_mbox->up_num_msgs; id++) {
+ for (id = 0; id < num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + offset);
otx2vf_process_mbox_msg_up(vf, msg);
offset = mbox->rx_start + msg->next_msgoff;
@@ -178,40 +185,48 @@ static irqreturn_t otx2vf_vfaf_mbox_intr_handler(int irq, void *vf_irq)
struct otx2_mbox_dev *mdev;
struct otx2_mbox *mbox;
struct mbox_hdr *hdr;
+ u64 mbox_data;
/* Clear the IRQ */
otx2_write64(vf, RVU_VF_INT, BIT_ULL(0));
+ mbox_data = otx2_read64(vf, RVU_VF_VFPF_MBOX0);
+
/* Read latest mbox data */
smp_rmb();
- /* Check for PF => VF response messages */
- mbox = &vf->mbox.mbox;
- mdev = &mbox->dev[0];
- otx2_sync_mbox_bbuf(mbox, 0);
+ if (mbox_data & MBOX_DOWN_MSG) {
+ mbox_data &= ~MBOX_DOWN_MSG;
+ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
+
+ /* Check for PF => VF response messages */
+ mbox = &vf->mbox.mbox;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
- trace_otx2_msg_interrupt(mbox->pdev, "PF to VF", BIT_ULL(0));
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (hdr->num_msgs) {
- vf->mbox.num_msgs = hdr->num_msgs;
- hdr->num_msgs = 0;
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
- queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
+ trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF to VF",
+ BIT_ULL(0));
}
- /* Check for PF => VF notification messages */
- mbox = &vf->mbox.mbox_up;
- mdev = &mbox->dev[0];
- otx2_sync_mbox_bbuf(mbox, 0);
- hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- if (hdr->num_msgs) {
- vf->mbox.up_num_msgs = hdr->num_msgs;
- hdr->num_msgs = 0;
- memset(mbox->hwbase + mbox->rx_start, 0,
- ALIGN(sizeof(struct mbox_hdr), sizeof(u64)));
- queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+ if (mbox_data & MBOX_UP_MSG) {
+ mbox_data &= ~MBOX_UP_MSG;
+ otx2_write64(vf, RVU_VF_VFPF_MBOX0, mbox_data);
+
+ /* Check for PF => VF notification messages */
+ mbox = &vf->mbox.mbox_up;
+ mdev = &mbox->dev[0];
+ otx2_sync_mbox_bbuf(mbox, 0);
+
+ hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
+ if (hdr->num_msgs)
+ queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
+
+ trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF to VF",
+ BIT_ULL(0));
}
return IRQ_HANDLED;
@@ -760,8 +775,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(vf);
otx2_shutdown_tc(vf);
otx2_shutdown_qos(vf);
- otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
+ otx2vf_disable_mbox_intr(vf);
free_percpu(vf->hw.lmt_info);
if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
qmem_free(vf->dev, vf->dync_lmt);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index de123350bd46..caa13b9cedff 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -677,8 +677,7 @@ static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
mcr_new = mcr_cur;
mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
- MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
- MAC_MCR_RX_FIFO_CLR_DIS;
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_RX_FIFO_CLR_DIS;
/* Only update control register when needed! */
if (mcr_new != mcr_cur)
@@ -694,7 +693,7 @@ static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
phylink_config);
u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
- mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
+ mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK);
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
@@ -803,7 +802,7 @@ static void mtk_mac_link_up(struct phylink_config *config,
if (rx_pause)
mcr |= MAC_MCR_FORCE_RX_FC;
- mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
+ mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN | MAC_MCR_FORCE_LINK;
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index b2a5d9c3733d..6ce0db3a1a92 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -994,7 +994,7 @@ void mtk_ppe_start(struct mtk_ppe *ppe)
MTK_PPE_KEEPALIVE_DISABLE) |
FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
- MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
+ MTK_PPE_SCAN_MODE_CHECK_AGE) |
FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
MTK_PPE_ENTRIES_SHIFT);
if (mtk_is_netsys_v2_or_greater(ppe->eth))
@@ -1090,17 +1090,21 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
mtk_ppe_cache_enable(ppe, false);
- /* disable offload engine */
- ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
- ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
-
/* disable aging */
val = MTK_PPE_TB_CFG_AGE_NON_L4 |
MTK_PPE_TB_CFG_AGE_UNBIND |
MTK_PPE_TB_CFG_AGE_TCP |
MTK_PPE_TB_CFG_AGE_UDP |
- MTK_PPE_TB_CFG_AGE_TCP_FIN;
+ MTK_PPE_TB_CFG_AGE_TCP_FIN |
+ MTK_PPE_TB_CFG_SCAN_MODE;
ppe_clear(ppe, MTK_PPE_TB_CFG, val);
- return mtk_ppe_wait_busy(ppe);
+ if (mtk_ppe_wait_busy(ppe))
+ return -ETIMEDOUT;
+
+ /* disable offload engine */
+ ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
+ ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
+
+ return 0;
}
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index 321fd8d00730..37efb1ea9fcd 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -45,7 +45,7 @@ struct qcauart {
unsigned char *tx_buffer;
};
-static ssize_t
+static size_t
qca_tty_receive(struct serdev_device *serdev, const u8 *data, size_t count)
{
struct qcauart *qca = serdev_device_get_drvdata(serdev);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 93295916b1d2..5b5d5e4310d1 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -571,7 +571,7 @@ static int txgbe_clock_register(struct txgbe *txgbe)
char clk_name[32];
struct clk *clk;
- snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d",
+ snprintf(clk_name, sizeof(clk_name), "i2c_dw.%d",
pci_dev_id(pdev));
clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 8297ef681bf5..6c6ec9475709 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -2831,8 +2831,8 @@ EXPORT_SYMBOL(genphy_resume);
int genphy_loopback(struct phy_device *phydev, bool enable)
{
if (enable) {
- u16 val, ctl = BMCR_LOOPBACK;
- int ret;
+ u16 ctl = BMCR_LOOPBACK;
+ int ret, val;
ctl |= mii_bmcr_encode_fixed(phydev->speed, phydev->duplex);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 13d902462d8e..bcdfbf61eb66 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -1464,8 +1464,6 @@ static netdev_features_t veth_fix_features(struct net_device *dev,
if (peer_priv->_xdp_prog)
features &= ~NETIF_F_GSO_SOFTWARE;
}
- if (priv->_xdp_prog)
- features |= NETIF_F_GRO;
return features;
}
@@ -1569,14 +1567,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
}
if (!old_prog) {
- if (!veth_gro_requested(dev)) {
- /* user-space did not require GRO, but adding
- * XDP is supposed to get GRO working
- */
- dev->features |= NETIF_F_GRO;
- netdev_features_change(dev);
- }
-
peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
peer->max_mtu = max_mtu;
}
@@ -1592,14 +1582,6 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog,
if (dev->flags & IFF_UP)
veth_disable_xdp(dev);
- /* if user-space did not require GRO, since adding XDP
- * enabled it, clear it now
- */
- if (!veth_gro_requested(dev)) {
- dev->features &= ~NETIF_F_GRO;
- netdev_features_change(dev);
- }
-
if (peer) {
peer->hw_features |= NETIF_F_GSO_SOFTWARE;
peer->max_mtu = ETH_MAX_MTU;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d7ce4a1011ea..c22d1118a133 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -80,6 +80,11 @@ struct virtnet_stat_desc {
size_t offset;
};
+struct virtnet_sq_free_stats {
+ u64 packets;
+ u64 bytes;
+};
+
struct virtnet_sq_stats {
struct u64_stats_sync syncp;
u64_stats_t packets;
@@ -304,6 +309,12 @@ struct virtnet_info {
/* Work struct for config space updates */
struct work_struct config_work;
+ /* Work struct for setting rx mode */
+ struct work_struct rx_mode_work;
+
+ /* OK to queue work setting RX mode? */
+ bool rx_mode_work_enabled;
+
/* Does the affinity hint is set for virtqueues? */
bool affinity_hint_set;
@@ -366,6 +377,31 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
}
+static void __free_old_xmit(struct send_queue *sq, bool in_napi,
+ struct virtnet_sq_free_stats *stats)
+{
+ unsigned int len;
+ void *ptr;
+
+ while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
+ ++stats->packets;
+
+ if (!is_xdp_frame(ptr)) {
+ struct sk_buff *skb = ptr;
+
+ pr_debug("Sent skb %p\n", skb);
+
+ stats->bytes += skb->len;
+ napi_consume_skb(skb, in_napi);
+ } else {
+ struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+ stats->bytes += xdp_get_frame_len(frame);
+ xdp_return_frame(frame);
+ }
+ }
+}
+
/* Converting between virtqueue no. and kernel tx/rx queue no.
* 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
*/
@@ -447,6 +483,20 @@ static void disable_delayed_refill(struct virtnet_info *vi)
spin_unlock_bh(&vi->refill_lock);
}
+static void enable_rx_mode_work(struct virtnet_info *vi)
+{
+ rtnl_lock();
+ vi->rx_mode_work_enabled = true;
+ rtnl_unlock();
+}
+
+static void disable_rx_mode_work(struct virtnet_info *vi)
+{
+ rtnl_lock();
+ vi->rx_mode_work_enabled = false;
+ rtnl_unlock();
+}
+
static void virtqueue_napi_schedule(struct napi_struct *napi,
struct virtqueue *vq)
{
@@ -776,39 +826,21 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
virtnet_rq_free_buf(vi, rq, buf);
}
-static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
+static void free_old_xmit(struct send_queue *sq, bool in_napi)
{
- unsigned int len;
- unsigned int packets = 0;
- unsigned int bytes = 0;
- void *ptr;
-
- while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- if (likely(!is_xdp_frame(ptr))) {
- struct sk_buff *skb = ptr;
-
- pr_debug("Sent skb %p\n", skb);
+ struct virtnet_sq_free_stats stats = {0};
- bytes += skb->len;
- napi_consume_skb(skb, in_napi);
- } else {
- struct xdp_frame *frame = ptr_to_xdp(ptr);
-
- bytes += xdp_get_frame_len(frame);
- xdp_return_frame(frame);
- }
- packets++;
- }
+ __free_old_xmit(sq, in_napi, &stats);
/* Avoid overhead when no packets have been processed
* happens when called speculatively from start_xmit.
*/
- if (!packets)
+ if (!stats.packets)
return;
u64_stats_update_begin(&sq->stats.syncp);
- u64_stats_add(&sq->stats.bytes, bytes);
- u64_stats_add(&sq->stats.packets, packets);
+ u64_stats_add(&sq->stats.bytes, stats.bytes);
+ u64_stats_add(&sq->stats.packets, stats.packets);
u64_stats_update_end(&sq->stats.syncp);
}
@@ -848,7 +880,7 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
virtqueue_napi_schedule(&sq->napi, sq->vq);
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
- free_old_xmit_skbs(sq, false);
+ free_old_xmit(sq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
virtqueue_disable_cb(sq->vq);
@@ -947,15 +979,12 @@ static int virtnet_xdp_xmit(struct net_device *dev,
int n, struct xdp_frame **frames, u32 flags)
{
struct virtnet_info *vi = netdev_priv(dev);
+ struct virtnet_sq_free_stats stats = {0};
struct receive_queue *rq = vi->rq;
struct bpf_prog *xdp_prog;
struct send_queue *sq;
- unsigned int len;
- int packets = 0;
- int bytes = 0;
int nxmit = 0;
int kicks = 0;
- void *ptr;
int ret;
int i;
@@ -974,20 +1003,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
/* Free up any pending old buffers before queueing new ones. */
- while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- if (likely(is_xdp_frame(ptr))) {
- struct xdp_frame *frame = ptr_to_xdp(ptr);
-
- bytes += xdp_get_frame_len(frame);
- xdp_return_frame(frame);
- } else {
- struct sk_buff *skb = ptr;
-
- bytes += skb->len;
- napi_consume_skb(skb, false);
- }
- packets++;
- }
+ __free_old_xmit(sq, false, &stats);
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
@@ -1007,8 +1023,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
out:
u64_stats_update_begin(&sq->stats.syncp);
- u64_stats_add(&sq->stats.bytes, bytes);
- u64_stats_add(&sq->stats.packets, packets);
+ u64_stats_add(&sq->stats.bytes, stats.bytes);
+ u64_stats_add(&sq->stats.packets, stats.packets);
u64_stats_add(&sq->stats.xdp_tx, n);
u64_stats_add(&sq->stats.xdp_tx_drops, n - nxmit);
u64_stats_add(&sq->stats.kicks, kicks);
@@ -2160,7 +2176,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
do {
virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(sq, true);
+ free_old_xmit(sq, true);
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
@@ -2308,7 +2324,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(sq, true);
+ free_old_xmit(sq, true);
if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
netif_tx_wake_queue(txq);
@@ -2398,7 +2414,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
if (use_napi)
virtqueue_disable_cb(sq->vq);
- free_old_xmit_skbs(sq, false);
+ free_old_xmit(sq, false);
} while (use_napi && kick &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
@@ -2550,8 +2566,10 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
* into the hypervisor, so the request should be handled immediately.
*/
while (!virtqueue_get_buf(vi->cvq, &tmp) &&
- !virtqueue_is_broken(vi->cvq))
+ !virtqueue_is_broken(vi->cvq)) {
+ cond_resched();
cpu_relax();
+ }
return vi->ctrl->status == VIRTIO_NET_OK;
}
@@ -2706,9 +2724,11 @@ static int virtnet_close(struct net_device *dev)
return 0;
}
-static void virtnet_set_rx_mode(struct net_device *dev)
+static void virtnet_rx_mode_work(struct work_struct *work)
{
- struct virtnet_info *vi = netdev_priv(dev);
+ struct virtnet_info *vi =
+ container_of(work, struct virtnet_info, rx_mode_work);
+ struct net_device *dev = vi->dev;
struct scatterlist sg[2];
struct virtio_net_ctrl_mac *mac_data;
struct netdev_hw_addr *ha;
@@ -2721,6 +2741,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
+ rtnl_lock();
+
vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
@@ -2738,14 +2760,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
vi->ctrl->allmulti ? "en" : "dis");
+ netif_addr_lock_bh(dev);
+
uc_count = netdev_uc_count(dev);
mc_count = netdev_mc_count(dev);
/* MAC filter - use one buffer for both lists */
buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
(2 * sizeof(mac_data->entries)), GFP_ATOMIC);
mac_data = buf;
- if (!buf)
+ if (!buf) {
+ netif_addr_unlock_bh(dev);
+ rtnl_unlock();
return;
+ }
sg_init_table(sg, 2);
@@ -2766,6 +2793,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
+ netif_addr_unlock_bh(dev);
+
sg_set_buf(&sg[1], mac_data,
sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
@@ -2773,9 +2802,19 @@ static void virtnet_set_rx_mode(struct net_device *dev)
VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
+ rtnl_unlock();
+
kfree(buf);
}
+static void virtnet_set_rx_mode(struct net_device *dev)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ if (vi->rx_mode_work_enabled)
+ schedule_work(&vi->rx_mode_work);
+}
+
static int virtnet_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
@@ -3856,6 +3895,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev)
/* Make sure no work handler is accessing the device */
flush_work(&vi->config_work);
+ disable_rx_mode_work(vi);
+ flush_work(&vi->rx_mode_work);
netif_tx_lock_bh(vi->dev);
netif_device_detach(vi->dev);
@@ -3878,6 +3919,7 @@ static int virtnet_restore_up(struct virtio_device *vdev)
virtio_device_ready(vdev);
enable_delayed_refill(vi);
+ enable_rx_mode_work(vi);
if (netif_running(vi->dev)) {
err = virtnet_open(vi->dev);
@@ -4676,6 +4718,7 @@ static int virtnet_probe(struct virtio_device *vdev)
vdev->priv = vi;
INIT_WORK(&vi->config_work, virtnet_config_changed_work);
+ INIT_WORK(&vi->rx_mode_work, virtnet_rx_mode_work);
spin_lock_init(&vi->refill_lock);
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) {
@@ -4798,6 +4841,8 @@ static int virtnet_probe(struct virtio_device *vdev)
if (vi->has_rss || vi->has_rss_hash_report)
virtnet_init_default_rss(vi);
+ enable_rx_mode_work(vi);
+
/* serialize netdev register + virtio_device_ready() with ndo_open() */
rtnl_lock();
@@ -4895,6 +4940,8 @@ static void virtnet_remove(struct virtio_device *vdev)
/* Make sure no work handler is accessing the device. */
flush_work(&vi->config_work);
+ disable_rx_mode_work(vi);
+ flush_work(&vi->rx_mode_work);
unregister_netdev(vi->dev);
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
index 80ddaff759d4..a6c787454a1a 100644
--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -382,12 +382,12 @@ vmxnet3_process_xdp(struct vmxnet3_adapter *adapter,
page = rbi->page;
dma_sync_single_for_cpu(&adapter->pdev->dev,
page_pool_get_dma_addr(page) +
- rq->page_pool->p.offset, rcd->len,
+ rq->page_pool->p.offset, rbi->len,
page_pool_get_dma_dir(rq->page_pool));
- xdp_init_buff(&xdp, rbi->len, &rq->xdp_rxq);
+ xdp_init_buff(&xdp, PAGE_SIZE, &rq->xdp_rxq);
xdp_prepare_buff(&xdp, page_address(page), rq->page_pool->p.offset,
- rcd->len, false);
+ rbi->len, false);
xdp_buff_clear_frags_flag(&xdp);
xdp_prog = rcu_dereference(rq->adapter->xdp_bpf_prog);
diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c
index 960371df470a..f69b1f579a0c 100644
--- a/drivers/net/wan/fsl_qmc_hdlc.c
+++ b/drivers/net/wan/fsl_qmc_hdlc.c
@@ -780,7 +780,7 @@ static const struct of_device_id qmc_hdlc_id_table[] = {
{ .compatible = "fsl,qmc-hdlc" },
{} /* sentinel */
};
-MODULE_DEVICE_TABLE(of, qmc_hdlc_driver);
+MODULE_DEVICE_TABLE(of, qmc_hdlc_id_table);
static struct platform_driver qmc_hdlc_driver = {
.driver = {
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index deb9636b0ecf..3feb36ee5bfb 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -237,7 +237,6 @@ static const struct net_device_ops netdev_ops = {
.ndo_open = wg_open,
.ndo_stop = wg_stop,
.ndo_start_xmit = wg_xmit,
- .ndo_get_stats64 = dev_get_tstats64
};
static void wg_destruct(struct net_device *dev)
@@ -262,7 +261,6 @@ static void wg_destruct(struct net_device *dev)
rcu_barrier(); /* Wait for all the peers to be actually freed. */
wg_ratelimiter_uninit();
memzero_explicit(&wg->static_identity, sizeof(wg->static_identity));
- free_percpu(dev->tstats);
kvfree(wg->index_hashtable);
kvfree(wg->peer_hashtable);
mutex_unlock(&wg->device_update_lock);
@@ -297,6 +295,7 @@ static void wg_setup(struct net_device *dev)
dev->hw_enc_features |= WG_NETDEV_FEATURES;
dev->mtu = ETH_DATA_LEN - overhead;
dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
SET_NETDEV_DEVTYPE(dev, &device_type);
@@ -331,14 +330,10 @@ static int wg_newlink(struct net *src_net, struct net_device *dev,
if (!wg->index_hashtable)
goto err_free_peer_hashtable;
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!dev->tstats)
- goto err_free_index_hashtable;
-
wg->handshake_receive_wq = alloc_workqueue("wg-kex-%s",
WQ_CPU_INTENSIVE | WQ_FREEZABLE, 0, dev->name);
if (!wg->handshake_receive_wq)
- goto err_free_tstats;
+ goto err_free_index_hashtable;
wg->handshake_send_wq = alloc_workqueue("wg-kex-%s",
WQ_UNBOUND | WQ_FREEZABLE, 0, dev->name);
@@ -397,8 +392,6 @@ err_destroy_handshake_send:
destroy_workqueue(wg->handshake_send_wq);
err_destroy_handshake_receive:
destroy_workqueue(wg->handshake_receive_wq);
-err_free_tstats:
- free_percpu(dev->tstats);
err_free_index_hashtable:
kvfree(wg->index_hashtable);
err_free_peer_hashtable:
diff --git a/drivers/net/wireguard/netlink.c b/drivers/net/wireguard/netlink.c
index e220d761b1f2..f7055180ba4a 100644
--- a/drivers/net/wireguard/netlink.c
+++ b/drivers/net/wireguard/netlink.c
@@ -164,8 +164,8 @@ get_peer(struct wg_peer *peer, struct sk_buff *skb, struct dump_ctx *ctx)
if (!allowedips_node)
goto no_allowedips;
if (!ctx->allowedips_seq)
- ctx->allowedips_seq = peer->device->peer_allowedips.seq;
- else if (ctx->allowedips_seq != peer->device->peer_allowedips.seq)
+ ctx->allowedips_seq = ctx->wg->peer_allowedips.seq;
+ else if (ctx->allowedips_seq != ctx->wg->peer_allowedips.seq)
goto no_allowedips;
allowedips_nest = nla_nest_start(skb, WGPEER_A_ALLOWEDIPS);
@@ -255,17 +255,17 @@ static int wg_get_device_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!peers_nest)
goto out;
ret = 0;
- /* If the last cursor was removed via list_del_init in peer_remove, then
+ lockdep_assert_held(&wg->device_update_lock);
+ /* If the last cursor was removed in peer_remove or peer_remove_all, then
* we just treat this the same as there being no more peers left. The
* reason is that seq_nr should indicate to userspace that this isn't a
* coherent dump anyway, so they'll try again.
*/
if (list_empty(&wg->peer_list) ||
- (ctx->next_peer && list_empty(&ctx->next_peer->peer_list))) {
+ (ctx->next_peer && ctx->next_peer->is_dead)) {
nla_nest_cancel(skb, peers_nest);
goto out;
}
- lockdep_assert_held(&wg->device_update_lock);
peer = list_prepare_entry(ctx->next_peer, &wg->peer_list, peer_list);
list_for_each_entry_continue(peer, &wg->peer_list, peer_list) {
if (get_peer(peer, skb, ctx)) {
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index df275b4fccb6..eb8851113654 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -251,7 +251,7 @@ static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair)
if (unlikely(!READ_ONCE(keypair->receiving.is_valid) ||
wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) ||
- keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) {
+ READ_ONCE(keypair->receiving_counter.counter) >= REJECT_AFTER_MESSAGES)) {
WRITE_ONCE(keypair->receiving.is_valid, false);
return false;
}
@@ -318,7 +318,7 @@ static bool counter_validate(struct noise_replay_counter *counter, u64 their_cou
for (i = 1; i <= top; ++i)
counter->backtrack[(i + index_current) &
((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0;
- counter->counter = their_counter;
+ WRITE_ONCE(counter->counter, their_counter);
}
index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1;
@@ -463,7 +463,7 @@ int wg_packet_rx_poll(struct napi_struct *napi, int budget)
net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n",
peer->device->dev->name,
PACKET_CB(skb)->nonce,
- keypair->receiving_counter.counter);
+ READ_ONCE(keypair->receiving_counter.counter));
goto next;
}
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index 2eb5978bd79e..cfbbe0713317 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -203,8 +203,8 @@ static int pn532_uart_rx_is_frame(struct sk_buff *skb)
return 0;
}
-static ssize_t pn532_receive_buf(struct serdev_device *serdev,
- const u8 *data, size_t count)
+static size_t pn532_receive_buf(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct pn532_uart_phy *dev = serdev_device_get_drvdata(serdev);
size_t i;
diff --git a/drivers/nfc/s3fwrn5/uart.c b/drivers/nfc/s3fwrn5/uart.c
index 456d3947116c..9c09c10c2a46 100644
--- a/drivers/nfc/s3fwrn5/uart.c
+++ b/drivers/nfc/s3fwrn5/uart.c
@@ -51,8 +51,8 @@ static const struct s3fwrn5_phy_ops uart_phy_ops = {
.write = s3fwrn82_uart_write,
};
-static ssize_t s3fwrn82_uart_read(struct serdev_device *serdev,
- const u8 *data, size_t count)
+static size_t s3fwrn82_uart_read(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct s3fwrn82_uart_phy *phy = serdev_device_get_drvdata(serdev);
size_t i;
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index a480cdeac288..dd6ec0865141 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1532,7 +1532,7 @@ put_dev:
return ret;
}
-static int apple_nvme_remove(struct platform_device *pdev)
+static void apple_nvme_remove(struct platform_device *pdev)
{
struct apple_nvme *anv = platform_get_drvdata(pdev);
@@ -1547,8 +1547,6 @@ static int apple_nvme_remove(struct platform_device *pdev)
apple_rtkit_shutdown(anv->rtk);
apple_nvme_detach_genpd(anv);
-
- return 0;
}
static void apple_nvme_shutdown(struct platform_device *pdev)
@@ -1598,7 +1596,7 @@ static struct platform_driver apple_nvme_driver = {
.pm = pm_sleep_ptr(&apple_nvme_pm_ops),
},
.probe = apple_nvme_probe,
- .remove = apple_nvme_remove,
+ .remove_new = apple_nvme_remove,
.shutdown = apple_nvme_shutdown,
};
module_platform_driver(apple_nvme_driver);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 00864a634470..943d72bdd794 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1807,9 +1807,6 @@ static void nvme_config_discard(struct nvme_ns *ns, struct queue_limits *lim)
{
struct nvme_ctrl *ctrl = ns->ctrl;
- BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
- NVME_DSM_MAX_RANGES);
-
if (ctrl->dmrsl && ctrl->dmrsl <= nvme_sect_to_lba(ns->head, UINT_MAX))
lim->max_hw_discard_sectors =
nvme_lba_to_sect(ns->head, ctrl->dmrsl);
@@ -3237,7 +3234,7 @@ static int nvme_init_identify(struct nvme_ctrl *ctrl)
if (ctrl->shutdown_timeout != shutdown_timeout)
dev_info(ctrl->device,
- "Shutdown timeout set to %u seconds\n",
+ "D3 entry latency set to %u seconds\n",
ctrl->shutdown_timeout);
} else
ctrl->shutdown_timeout = shutdown_timeout;
@@ -4391,7 +4388,8 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
set->ops = ops;
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
if (ctrl->ops->flags & NVME_F_FABRICS)
- set->reserved_tags = NVMF_RESERVED_TAGS;
+ /* Reserved for fabric connect and keep alive */
+ set->reserved_tags = 2;
set->numa_node = ctrl->numa_node;
set->flags = BLK_MQ_F_NO_SCHED;
if (ctrl->ops->flags & NVME_F_BLOCKING)
@@ -4460,7 +4458,8 @@ int nvme_alloc_io_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
if (ctrl->quirks & NVME_QUIRK_SHARED_TAGS)
set->reserved_tags = NVME_AQ_DEPTH;
else if (ctrl->ops->flags & NVME_F_FABRICS)
- set->reserved_tags = NVMF_RESERVED_TAGS;
+ /* Reserved for fabric connect */
+ set->reserved_tags = 1;
set->numa_node = ctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE;
if (ctrl->ops->flags & NVME_F_BLOCKING)
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 06cc54851b1b..37c974c38dcb 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -19,13 +19,6 @@
#define NVMF_DEF_FAIL_FAST_TMO -1
/*
- * Reserved one command for internal usage. This command is used for sending
- * the connect command, as well as for the keep alive command on the admin
- * queue once live.
- */
-#define NVMF_RESERVED_TAGS 1
-
-/*
* Define a host as seen by the target. We allocate one at boot, but also
* allow the override it when creating controllers. This is both to provide
* persistence of the Host NQN over multiple boots, and to allow using
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e6267a6aa380..8e0bb9692685 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -3363,6 +3363,9 @@ static const struct pci_device_id nvme_id_table[] = {
NVME_QUIRK_BOGUS_NID, },
{ PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x126f, 0x2262), /* Silicon Motion generic */
+ .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
+ NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */
.driver_data = NVME_QUIRK_NO_NS_DESC_LIST |
NVME_QUIRK_BOGUS_NID, },
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index fc3eed00f9ff..e05571b2a1b0 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -97,8 +97,7 @@ static int nvme_sc_to_pr_err(int nvme_sc)
static int nvme_send_pr_command(struct block_device *bdev,
struct nvme_command *c, void *data, unsigned int data_len)
{
- if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
- nvme_disk_is_ns_head(bdev->bd_disk))
+ if (nvme_disk_is_ns_head(bdev->bd_disk))
return nvme_send_ns_head_pr_command(bdev, c, data, data_len);
return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data,
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index 09fcaa519e5b..3c55f7edd181 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -236,8 +236,7 @@ static ssize_t nuse_show(struct device *dev, struct device_attribute *attr,
struct block_device *bdev = disk->part0;
int ret;
- if (IS_ENABLED(CONFIG_NVME_MULTIPATH) &&
- bdev->bd_disk->fops == &nvme_ns_head_ops)
+ if (nvme_disk_is_ns_head(bdev->bd_disk))
ret = ns_head_update_nuse(head);
else
ret = ns_update_nuse(bdev->bd_disk->private_data);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 3692b56cb58d..fdbcdcedcee9 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -37,6 +37,14 @@ module_param(so_priority, int, 0644);
MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
/*
+ * Use the unbound workqueue for nvme_tcp_wq, then we can set the cpu affinity
+ * from sysfs.
+ */
+static bool wq_unbound;
+module_param(wq_unbound, bool, 0644);
+MODULE_PARM_DESC(wq_unbound, "Use unbound workqueue for nvme-tcp IO context (default false)");
+
+/*
* TLS handshake timeout
*/
static int tls_handshake_timeout = 10;
@@ -1546,7 +1554,10 @@ static void nvme_tcp_set_queue_io_cpu(struct nvme_tcp_queue *queue)
else if (nvme_tcp_poll_queue(queue))
n = qid - ctrl->io_queues[HCTX_TYPE_DEFAULT] -
ctrl->io_queues[HCTX_TYPE_READ] - 1;
- queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
+ if (wq_unbound)
+ queue->io_cpu = WORK_CPU_UNBOUND;
+ else
+ queue->io_cpu = cpumask_next_wrap(n - 1, cpu_online_mask, -1, false);
}
static void nvme_tcp_tls_done(void *data, int status, key_serial_t pskid)
@@ -2785,6 +2796,8 @@ static struct nvmf_transport_ops nvme_tcp_transport = {
static int __init nvme_tcp_init_module(void)
{
+ unsigned int wq_flags = WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_SYSFS;
+
BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8);
BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72);
BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24);
@@ -2794,8 +2807,10 @@ static int __init nvme_tcp_init_module(void)
BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128);
BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24);
- nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq",
- WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (wq_unbound)
+ wq_flags |= WQ_UNBOUND;
+
+ nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", wq_flags, 0);
if (!nvme_tcp_wq)
return -ENOMEM;
diff --git a/drivers/nvme/host/trace.c b/drivers/nvme/host/trace.c
index 1c36fcedea20..0288315f0050 100644
--- a/drivers/nvme/host/trace.c
+++ b/drivers/nvme/host/trace.c
@@ -119,7 +119,10 @@ static const char *nvme_trace_get_lba_status(struct trace_seq *p,
static const char *nvme_trace_admin_format_nvm(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
- u8 lbaf = cdw10[0] & 0xF;
+ /*
+ * lbafu(bit 13:12) is already in the upper 4 bits, lbafl: bit 03:00.
+ */
+ u8 lbaf = (cdw10[1] & 0x30) | (cdw10[0] & 0xF);
u8 mset = (cdw10[0] >> 4) & 0x1;
u8 pi = (cdw10[0] >> 5) & 0x7;
u8 pil = cdw10[1] & 0x1;
@@ -164,12 +167,27 @@ static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10)
static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const zsa_strs[] = {
+ [0x01] = "close zone",
+ [0x02] = "finish zone",
+ [0x03] = "open zone",
+ [0x04] = "reset zone",
+ [0x05] = "offline zone",
+ [0x10] = "set zone descriptor extension"
+ };
const char *ret = trace_seq_buffer_ptr(p);
u64 slba = get_unaligned_le64(cdw10);
+ const char *zsa_str;
u8 zsa = cdw10[12];
u8 all = cdw10[13];
- trace_seq_printf(p, "slba=%llu, zsa=%u, all=%u", slba, zsa, all);
+ if (zsa < ARRAY_SIZE(zsa_strs) && zsa_strs[zsa])
+ zsa_str = zsa_strs[zsa];
+ else
+ zsa_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, zsa=%u:%s, all=%u",
+ slba, zsa, zsa_str, all);
trace_seq_putc(p, 0);
return ret;
@@ -177,15 +195,86 @@ static const char *nvme_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
static const char *nvme_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
{
+ static const char * const zrasf_strs[] = {
+ [0x00] = "list all zones",
+ [0x01] = "list the zones in the ZSE: Empty state",
+ [0x02] = "list the zones in the ZSIO: Implicitly Opened state",
+ [0x03] = "list the zones in the ZSEO: Explicitly Opened state",
+ [0x04] = "list the zones in the ZSC: Closed state",
+ [0x05] = "list the zones in the ZSF: Full state",
+ [0x06] = "list the zones in the ZSRO: Read Only state",
+ [0x07] = "list the zones in the ZSO: Offline state",
+ [0x09] = "list the zones that have the zone attribute"
+ };
const char *ret = trace_seq_buffer_ptr(p);
u64 slba = get_unaligned_le64(cdw10);
u32 numd = get_unaligned_le32(cdw10 + 8);
u8 zra = cdw10[12];
u8 zrasf = cdw10[13];
+ const char *zrasf_str;
u8 pr = cdw10[14];
- trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u, pr=%u",
- slba, numd, zra, zrasf, pr);
+ if (zrasf < ARRAY_SIZE(zrasf_strs) && zrasf_strs[zrasf])
+ zrasf_str = zrasf_strs[zrasf];
+ else
+ zrasf_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u:%s, pr=%u",
+ slba, numd, zra, zrasf, zrasf_str, pr);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_reg(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 rrega = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 ptpl = (cdw10[3] >> 6) & 0x3;
+
+ trace_seq_printf(p, "rrega=%u, iekey=%u, ptpl=%u",
+ rrega, iekey, ptpl);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_acq(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 racqa = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 rtype = cdw10[1];
+
+ trace_seq_printf(p, "racqa=%u, iekey=%u, rtype=%u",
+ racqa, iekey, rtype);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_rel(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 rrela = cdw10[0] & 0x7;
+ u8 iekey = (cdw10[0] >> 3) & 0x1;
+ u8 rtype = cdw10[1];
+
+ trace_seq_printf(p, "rrela=%u, iekey=%u, rtype=%u",
+ rrela, iekey, rtype);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvme_trace_resv_report(struct trace_seq *p, u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u32 numd = get_unaligned_le32(cdw10);
+ u8 eds = cdw10[4] & 0x1;
+
+ trace_seq_printf(p, "numd=%u, eds=%u", numd, eds);
trace_seq_putc(p, 0);
return ret;
@@ -243,6 +332,14 @@ const char *nvme_trace_parse_nvm_cmd(struct trace_seq *p,
return nvme_trace_zone_mgmt_send(p, cdw10);
case nvme_cmd_zone_mgmt_recv:
return nvme_trace_zone_mgmt_recv(p, cdw10);
+ case nvme_cmd_resv_register:
+ return nvme_trace_resv_reg(p, cdw10);
+ case nvme_cmd_resv_acquire:
+ return nvme_trace_resv_acq(p, cdw10);
+ case nvme_cmd_resv_release:
+ return nvme_trace_resv_rel(p, cdw10);
+ case nvme_cmd_resv_report:
+ return nvme_trace_resv_report(p, cdw10);
default:
return nvme_trace_common(p, cdw10);
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index f2bb9d95ecf4..5b8c63e74639 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -53,7 +53,6 @@ struct nvmet_rdma_cmd {
enum {
NVMET_RDMA_REQ_INLINE_DATA = (1 << 0),
- NVMET_RDMA_REQ_INVALIDATE_RKEY = (1 << 1),
};
struct nvmet_rdma_rsp {
@@ -722,7 +721,7 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
struct rdma_cm_id *cm_id = rsp->queue->cm_id;
struct ib_send_wr *first_wr;
- if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) {
+ if (rsp->invalidate_rkey) {
rsp->send_wr.opcode = IB_WR_SEND_WITH_INV;
rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey;
} else {
@@ -905,10 +904,8 @@ static u16 nvmet_rdma_map_sgl_keyed(struct nvmet_rdma_rsp *rsp,
goto error_out;
rsp->n_rdma += ret;
- if (invalidate) {
+ if (invalidate)
rsp->invalidate_rkey = key;
- rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY;
- }
return 0;
@@ -1047,6 +1044,7 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
rsp->req.cmd = cmd->nvme_cmd;
rsp->req.port = queue->port;
rsp->n_rdma = 0;
+ rsp->invalidate_rkey = 0;
if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
unsigned long flags;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 2aa5762e9f50..a5422e2c979a 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -898,6 +898,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
pr_err("bad nvme-tcp pdu length (%d)\n",
le32_to_cpu(icreq->hdr.plen));
nvmet_tcp_fatal_error(queue);
+ return -EPROTO;
}
if (icreq->pfv != NVME_TCP_PFV_1_0) {
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
index 6ee1f3db81d0..8d1806a82887 100644
--- a/drivers/nvme/target/trace.c
+++ b/drivers/nvme/target/trace.c
@@ -119,6 +119,67 @@ const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
}
}
+static const char *nvmet_trace_zone_mgmt_send(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const zsa_strs[] = {
+ [0x01] = "close zone",
+ [0x02] = "finish zone",
+ [0x03] = "open zone",
+ [0x04] = "reset zone",
+ [0x05] = "offline zone",
+ [0x10] = "set zone descriptor extension"
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ const char *zsa_str;
+ u8 zsa = cdw10[12];
+ u8 all = cdw10[13];
+
+ if (zsa < ARRAY_SIZE(zsa_strs) && zsa_strs[zsa])
+ zsa_str = zsa_strs[zsa];
+ else
+ zsa_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, zsa=%u:%s, all=%u",
+ slba, zsa, zsa_str, all);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
+static const char *nvmet_trace_zone_mgmt_recv(struct trace_seq *p, u8 *cdw10)
+{
+ static const char * const zrasf_strs[] = {
+ [0x00] = "list all zones",
+ [0x01] = "list the zones in the ZSE: Empty state",
+ [0x02] = "list the zones in the ZSIO: Implicitly Opened state",
+ [0x03] = "list the zones in the ZSEO: Explicitly Opened state",
+ [0x04] = "list the zones in the ZSC: Closed state",
+ [0x05] = "list the zones in the ZSF: Full state",
+ [0x06] = "list the zones in the ZSRO: Read Only state",
+ [0x07] = "list the zones in the ZSO: Offline state",
+ [0x09] = "list the zones that have the zone attribute"
+ };
+ const char *ret = trace_seq_buffer_ptr(p);
+ u64 slba = get_unaligned_le64(cdw10);
+ u32 numd = get_unaligned_le32(&cdw10[8]);
+ u8 zra = cdw10[12];
+ u8 zrasf = cdw10[13];
+ const char *zrasf_str;
+ u8 pr = cdw10[14];
+
+ if (zrasf < ARRAY_SIZE(zrasf_strs) && zrasf_strs[zrasf])
+ zrasf_str = zrasf_strs[zrasf];
+ else
+ zrasf_str = "reserved";
+
+ trace_seq_printf(p, "slba=%llu, numd=%u, zra=%u, zrasf=%u:%s, pr=%u",
+ slba, numd, zra, zrasf, zrasf_str, pr);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
u8 opcode, u8 *cdw10)
{
@@ -126,9 +187,14 @@ const char *nvmet_trace_parse_nvm_cmd(struct trace_seq *p,
case nvme_cmd_read:
case nvme_cmd_write:
case nvme_cmd_write_zeroes:
+ case nvme_cmd_zone_append:
return nvmet_trace_read_write(p, cdw10);
case nvme_cmd_dsm:
return nvmet_trace_dsm(p, cdw10);
+ case nvme_cmd_zone_mgmt_send:
+ return nvmet_trace_zone_mgmt_send(p, cdw10);
+ case nvme_cmd_zone_mgmt_recv:
+ return nvmet_trace_zone_mgmt_recv(p, cdw10);
default:
return nvmet_trace_common(p, cdw10);
}
@@ -176,6 +242,34 @@ static const char *nvmet_trace_fabrics_property_get(struct trace_seq *p,
return ret;
}
+static const char *nvmet_trace_fabrics_auth_send(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 tl = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, tl=%u",
+ spsp0, spsp1, secp, tl);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
+static const char *nvmet_trace_fabrics_auth_receive(struct trace_seq *p, u8 *spc)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 spsp0 = spc[1];
+ u8 spsp1 = spc[2];
+ u8 secp = spc[3];
+ u32 al = get_unaligned_le32(spc + 4);
+
+ trace_seq_printf(p, "spsp0=%02x, spsp1=%02x, secp=%02x, al=%u",
+ spsp0, spsp1, secp, al);
+ trace_seq_putc(p, 0);
+ return ret;
+}
+
static const char *nvmet_trace_fabrics_common(struct trace_seq *p, u8 *spc)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -195,6 +289,10 @@ const char *nvmet_trace_parse_fabrics_cmd(struct trace_seq *p,
return nvmet_trace_fabrics_connect(p, spc);
case nvme_fabrics_type_property_get:
return nvmet_trace_fabrics_property_get(p, spc);
+ case nvme_fabrics_type_auth_send:
+ return nvmet_trace_fabrics_auth_send(p, spc);
+ case nvme_fabrics_type_auth_receive:
+ return nvmet_trace_fabrics_auth_receive(p, spc);
default:
return nvmet_trace_fabrics_common(p, spc);
}
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index eb357ac2e54a..2c6b99402df8 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -807,6 +807,11 @@ static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_nod
if (addr && len == (2 * sizeof(u32))) {
info.bit_offset = be32_to_cpup(addr++);
info.nbits = be32_to_cpup(addr);
+ if (info.bit_offset >= BITS_PER_BYTE || info.nbits < 1) {
+ dev_err(dev, "nvmem: invalid bits on %pOF\n", child);
+ of_node_put(child);
+ return -EINVAL;
+ }
}
info.np = of_node_get(child);
diff --git a/drivers/nvmem/layouts.c b/drivers/nvmem/layouts.c
index 6a6aa58369ff..8b5e2de138eb 100644
--- a/drivers/nvmem/layouts.c
+++ b/drivers/nvmem/layouts.c
@@ -45,7 +45,7 @@ static void nvmem_layout_bus_remove(struct device *dev)
return drv->remove(layout);
}
-static struct bus_type nvmem_layout_bus_type = {
+static const struct bus_type nvmem_layout_bus_type = {
.name = "nvmem-layout",
.match = nvmem_layout_bus_match,
.probe = nvmem_layout_bus_probe,
diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c
index b922df99f9bc..33678d0af2c2 100644
--- a/drivers/nvmem/meson-efuse.c
+++ b/drivers/nvmem/meson-efuse.c
@@ -47,7 +47,6 @@ static int meson_efuse_probe(struct platform_device *pdev)
struct nvmem_config *econfig;
struct clk *clk;
unsigned int size;
- int ret;
sm_np = of_parse_phandle(pdev->dev.of_node, "secure-monitor", 0);
if (!sm_np) {
@@ -60,27 +59,9 @@ static int meson_efuse_probe(struct platform_device *pdev)
if (!fw)
return -EPROBE_DEFER;
- clk = devm_clk_get(dev, NULL);
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to get efuse gate");
- return ret;
- }
-
- ret = clk_prepare_enable(clk);
- if (ret) {
- dev_err(dev, "failed to enable gate");
- return ret;
- }
-
- ret = devm_add_action_or_reset(dev,
- (void(*)(void *))clk_disable_unprepare,
- clk);
- if (ret) {
- dev_err(dev, "failed to add disable callback");
- return ret;
- }
+ clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get efuse gate");
if (meson_sm_call(fw, SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) {
dev_err(dev, "failed to get max user");
diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c
index 84f05b40a411..9caf04667341 100644
--- a/drivers/nvmem/mtk-efuse.c
+++ b/drivers/nvmem/mtk-efuse.c
@@ -68,6 +68,7 @@ static int mtk_efuse_probe(struct platform_device *pdev)
struct nvmem_config econfig = {};
struct mtk_efuse_priv *priv;
const struct mtk_efuse_pdata *pdata;
+ struct platform_device *socinfo;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -88,8 +89,16 @@ static int mtk_efuse_probe(struct platform_device *pdev)
if (pdata->uses_post_processing)
econfig.fixup_dt_cell_info = &mtk_efuse_fixup_dt_cell_info;
nvmem = devm_nvmem_register(dev, &econfig);
+ if (IS_ERR(nvmem))
+ return PTR_ERR(nvmem);
- return PTR_ERR_OR_ZERO(nvmem);
+ socinfo = platform_device_register_data(&pdev->dev, "mtk-socinfo",
+ PLATFORM_DEVID_AUTO, NULL, 0);
+ if (IS_ERR(socinfo))
+ dev_info(dev, "MediaTek SoC Information will be unavailable\n");
+
+ platform_set_drvdata(pdev, socinfo);
+ return 0;
}
static const struct mtk_efuse_pdata mtk_mt8186_efuse_pdata = {
@@ -108,8 +117,17 @@ static const struct of_device_id mtk_efuse_of_match[] = {
};
MODULE_DEVICE_TABLE(of, mtk_efuse_of_match);
+static void mtk_efuse_remove(struct platform_device *pdev)
+{
+ struct platform_device *socinfo = platform_get_drvdata(pdev);
+
+ if (!IS_ERR_OR_NULL(socinfo))
+ platform_device_unregister(socinfo);
+}
+
static struct platform_driver mtk_efuse_driver = {
.probe = mtk_efuse_probe,
+ .remove_new = mtk_efuse_remove,
.driver = {
.name = "mediatek,efuse",
.of_match_table = mtk_efuse_of_match,
diff --git a/drivers/nvmem/zynqmp_nvmem.c b/drivers/nvmem/zynqmp_nvmem.c
index 7f15aa89a9d0..8682adaacd69 100644
--- a/drivers/nvmem/zynqmp_nvmem.c
+++ b/drivers/nvmem/zynqmp_nvmem.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2019 Xilinx, Inc.
+ * Copyright (C) 2022 - 2023, Advanced Micro Devices, Inc.
*/
+#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/nvmem-provider.h>
#include <linux/of.h>
@@ -10,36 +12,190 @@
#include <linux/firmware/xlnx-zynqmp.h>
#define SILICON_REVISION_MASK 0xF
+#define P_USER_0_64_UPPER_MASK GENMASK(31, 16)
+#define P_USER_127_LOWER_4_BIT_MASK GENMASK(3, 0)
+#define WORD_INBYTES 4
+#define SOC_VER_SIZE 0x4
+#define EFUSE_MEMORY_SIZE 0x177
+#define UNUSED_SPACE 0x8
+#define ZYNQMP_NVMEM_SIZE (SOC_VER_SIZE + UNUSED_SPACE + \
+ EFUSE_MEMORY_SIZE)
+#define SOC_VERSION_OFFSET 0x0
+#define EFUSE_START_OFFSET 0xC
+#define EFUSE_END_OFFSET 0xFC
+#define EFUSE_PUF_START_OFFSET 0x100
+#define EFUSE_PUF_MID_OFFSET 0x140
+#define EFUSE_PUF_END_OFFSET 0x17F
+#define EFUSE_NOT_ENABLED 29
-struct zynqmp_nvmem_data {
- struct device *dev;
- struct nvmem_device *nvmem;
+/*
+ * efuse access type
+ */
+enum efuse_access {
+ EFUSE_READ = 0,
+ EFUSE_WRITE
+};
+
+/**
+ * struct xilinx_efuse - the basic structure
+ * @src: address of the buffer to store the data to be write/read
+ * @size: read/write word count
+ * @offset: read/write offset
+ * @flag: 0 - represents efuse read and 1- represents efuse write
+ * @pufuserfuse:0 - represents non-puf efuses, offset is used for read/write
+ * 1 - represents puf user fuse row number.
+ *
+ * this structure stores all the required details to
+ * read/write efuse memory.
+ */
+struct xilinx_efuse {
+ u64 src;
+ u32 size;
+ u32 offset;
+ enum efuse_access flag;
+ u32 pufuserfuse;
};
-static int zynqmp_nvmem_read(void *context, unsigned int offset,
- void *val, size_t bytes)
+static int zynqmp_efuse_access(void *context, unsigned int offset,
+ void *val, size_t bytes, enum efuse_access flag,
+ unsigned int pufflag)
{
+ struct device *dev = context;
+ struct xilinx_efuse *efuse;
+ dma_addr_t dma_addr;
+ dma_addr_t dma_buf;
+ size_t words = bytes / WORD_INBYTES;
int ret;
- int idcode, version;
- struct zynqmp_nvmem_data *priv = context;
-
- ret = zynqmp_pm_get_chipid(&idcode, &version);
- if (ret < 0)
- return ret;
+ int value;
+ char *data;
+
+ if (bytes % WORD_INBYTES != 0) {
+ dev_err(dev, "Bytes requested should be word aligned\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (pufflag == 0 && offset % WORD_INBYTES) {
+ dev_err(dev, "Offset requested should be word aligned\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (pufflag == 1 && flag == EFUSE_WRITE) {
+ memcpy(&value, val, bytes);
+ if ((offset == EFUSE_PUF_START_OFFSET ||
+ offset == EFUSE_PUF_MID_OFFSET) &&
+ value & P_USER_0_64_UPPER_MASK) {
+ dev_err(dev, "Only lower 4 bytes are allowed to be programmed in P_USER_0 & P_USER_64\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (offset == EFUSE_PUF_END_OFFSET &&
+ (value & P_USER_127_LOWER_4_BIT_MASK)) {
+ dev_err(dev, "Only MSB 28 bits are allowed to be programmed for P_USER_127\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ efuse = dma_alloc_coherent(dev, sizeof(struct xilinx_efuse),
+ &dma_addr, GFP_KERNEL);
+ if (!efuse)
+ return -ENOMEM;
- dev_dbg(priv->dev, "Read chipid val %x %x\n", idcode, version);
- *(int *)val = version & SILICON_REVISION_MASK;
+ data = dma_alloc_coherent(dev, sizeof(bytes),
+ &dma_buf, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto efuse_data_fail;
+ }
+
+ if (flag == EFUSE_WRITE) {
+ memcpy(data, val, bytes);
+ efuse->flag = EFUSE_WRITE;
+ } else {
+ efuse->flag = EFUSE_READ;
+ }
+
+ efuse->src = dma_buf;
+ efuse->size = words;
+ efuse->offset = offset;
+ efuse->pufuserfuse = pufflag;
+
+ zynqmp_pm_efuse_access(dma_addr, (u32 *)&ret);
+ if (ret != 0) {
+ if (ret == EFUSE_NOT_ENABLED) {
+ dev_err(dev, "efuse access is not enabled\n");
+ ret = -EOPNOTSUPP;
+ } else {
+ dev_err(dev, "Error in efuse read %x\n", ret);
+ ret = -EPERM;
+ }
+ goto efuse_access_err;
+ }
+
+ if (flag == EFUSE_READ)
+ memcpy(val, data, bytes);
+efuse_access_err:
+ dma_free_coherent(dev, sizeof(bytes),
+ data, dma_buf);
+efuse_data_fail:
+ dma_free_coherent(dev, sizeof(struct xilinx_efuse),
+ efuse, dma_addr);
+
+ return ret;
+}
- return 0;
+static int zynqmp_nvmem_read(void *context, unsigned int offset, void *val, size_t bytes)
+{
+ struct device *dev = context;
+ int ret;
+ int pufflag = 0;
+ int idcode;
+ int version;
+
+ if (offset >= EFUSE_PUF_START_OFFSET && offset <= EFUSE_PUF_END_OFFSET)
+ pufflag = 1;
+
+ switch (offset) {
+ /* Soc version offset is zero */
+ case SOC_VERSION_OFFSET:
+ if (bytes != SOC_VER_SIZE)
+ return -EOPNOTSUPP;
+
+ ret = zynqmp_pm_get_chipid((u32 *)&idcode, (u32 *)&version);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "Read chipid val %x %x\n", idcode, version);
+ *(int *)val = version & SILICON_REVISION_MASK;
+ break;
+ /* Efuse offset starts from 0xc */
+ case EFUSE_START_OFFSET ... EFUSE_END_OFFSET:
+ case EFUSE_PUF_START_OFFSET ... EFUSE_PUF_END_OFFSET:
+ ret = zynqmp_efuse_access(context, offset, val,
+ bytes, EFUSE_READ, pufflag);
+ break;
+ default:
+ *(u32 *)val = 0xDEADBEEF;
+ ret = 0;
+ break;
+ }
+
+ return ret;
}
-static struct nvmem_config econfig = {
- .name = "zynqmp-nvmem",
- .owner = THIS_MODULE,
- .word_size = 1,
- .size = 1,
- .read_only = true,
-};
+static int zynqmp_nvmem_write(void *context,
+ unsigned int offset, void *val, size_t bytes)
+{
+ int pufflag = 0;
+
+ if (offset < EFUSE_START_OFFSET || offset > EFUSE_PUF_END_OFFSET)
+ return -EOPNOTSUPP;
+
+ if (offset >= EFUSE_PUF_START_OFFSET && offset <= EFUSE_PUF_END_OFFSET)
+ pufflag = 1;
+
+ return zynqmp_efuse_access(context, offset,
+ val, bytes, EFUSE_WRITE, pufflag);
+}
static const struct of_device_id zynqmp_nvmem_match[] = {
{ .compatible = "xlnx,zynqmp-nvmem-fw", },
@@ -50,21 +206,18 @@ MODULE_DEVICE_TABLE(of, zynqmp_nvmem_match);
static int zynqmp_nvmem_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct zynqmp_nvmem_data *priv;
-
- priv = devm_kzalloc(dev, sizeof(struct zynqmp_nvmem_data), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ struct nvmem_config econfig = {};
- priv->dev = dev;
+ econfig.name = "zynqmp-nvmem";
+ econfig.owner = THIS_MODULE;
+ econfig.word_size = 1;
+ econfig.size = ZYNQMP_NVMEM_SIZE;
econfig.dev = dev;
econfig.add_legacy_fixed_of_cells = true;
econfig.reg_read = zynqmp_nvmem_read;
- econfig.priv = priv;
-
- priv->nvmem = devm_nvmem_register(dev, &econfig);
+ econfig.reg_write = zynqmp_nvmem_write;
- return PTR_ERR_OR_ZERO(priv->nvmem);
+ return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &econfig));
}
static struct platform_driver zynqmp_nvmem_driver = {
diff --git a/drivers/of/property.c b/drivers/of/property.c
index c907478ef89e..a6358ee99b74 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -1072,7 +1072,8 @@ of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
}
static void of_link_to_phandle(struct device_node *con_np,
- struct device_node *sup_np)
+ struct device_node *sup_np,
+ u8 flags)
{
struct device_node *tmp_np = of_node_get(sup_np);
@@ -1091,7 +1092,7 @@ static void of_link_to_phandle(struct device_node *con_np,
tmp_np = of_get_next_parent(tmp_np);
}
- fwnode_link_add(of_fwnode_handle(con_np), of_fwnode_handle(sup_np));
+ fwnode_link_add(of_fwnode_handle(con_np), of_fwnode_handle(sup_np), flags);
}
/**
@@ -1204,6 +1205,8 @@ static struct device_node *parse_##fname(struct device_node *np, \
* to a struct device, implement this ops so fw_devlink can use it
* to find the true consumer.
* @optional: Describes whether a supplier is mandatory or not
+ * @fwlink_flags: Optional fwnode link flags to use when creating a fwnode link
+ * for this property.
*
* Returns:
* parse_prop() return values are
@@ -1216,6 +1219,7 @@ struct supplier_bindings {
const char *prop_name, int index);
struct device_node *(*get_con_dev)(struct device_node *np);
bool optional;
+ u8 fwlink_flags;
};
DEFINE_SIMPLE_PROP(clocks, "clocks", "#clock-cells")
@@ -1223,6 +1227,7 @@ DEFINE_SIMPLE_PROP(interconnects, "interconnects", "#interconnect-cells")
DEFINE_SIMPLE_PROP(iommus, "iommus", "#iommu-cells")
DEFINE_SIMPLE_PROP(mboxes, "mboxes", "#mbox-cells")
DEFINE_SIMPLE_PROP(io_channels, "io-channels", "#io-channel-cells")
+DEFINE_SIMPLE_PROP(io_backends, "io-backends", "#io-backend-cells")
DEFINE_SIMPLE_PROP(interrupt_parent, "interrupt-parent", NULL)
DEFINE_SIMPLE_PROP(dmas, "dmas", "#dma-cells")
DEFINE_SIMPLE_PROP(power_domains, "power-domains", "#power-domain-cells")
@@ -1246,6 +1251,7 @@ DEFINE_SIMPLE_PROP(leds, "leds", NULL)
DEFINE_SIMPLE_PROP(backlight, "backlight", NULL)
DEFINE_SIMPLE_PROP(panel, "panel", NULL)
DEFINE_SIMPLE_PROP(msi_parent, "msi-parent", "#msi-cells")
+DEFINE_SIMPLE_PROP(post_init_providers, "post-init-providers", NULL)
DEFINE_SUFFIX_PROP(regulators, "-supply", NULL)
DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells")
@@ -1323,6 +1329,7 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_iommu_maps, .optional = true, },
{ .parse_prop = parse_mboxes, },
{ .parse_prop = parse_io_channels, },
+ { .parse_prop = parse_io_backends, },
{ .parse_prop = parse_interrupt_parent, },
{ .parse_prop = parse_dmas, .optional = true, },
{ .parse_prop = parse_power_domains, },
@@ -1355,6 +1362,10 @@ static const struct supplier_bindings of_supplier_bindings[] = {
{ .parse_prop = parse_regulators, },
{ .parse_prop = parse_gpio, },
{ .parse_prop = parse_gpios, },
+ {
+ .parse_prop = parse_post_init_providers,
+ .fwlink_flags = FWLINK_FLAG_IGNORE,
+ },
{}
};
@@ -1399,7 +1410,7 @@ static int of_link_property(struct device_node *con_np, const char *prop_name)
: of_node_get(con_np);
matched = true;
i++;
- of_link_to_phandle(con_dev_np, phandle);
+ of_link_to_phandle(con_dev_np, phandle, s->fwlink_flags);
of_node_put(phandle);
of_node_put(con_dev_np);
}
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index 84d5701d606c..e6dc857aac3f 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -219,7 +219,7 @@ out_irq:
return err;
}
-static int __exit amiga_parallel_remove(struct platform_device *pdev)
+static void __exit amiga_parallel_remove(struct platform_device *pdev)
{
struct parport *port = platform_get_drvdata(pdev);
@@ -227,11 +227,10 @@ static int __exit amiga_parallel_remove(struct platform_device *pdev)
if (port->irq != PARPORT_IRQ_NONE)
free_irq(IRQ_AMIGA_CIAA_FLG, port);
parport_put_port(port);
- return 0;
}
static struct platform_driver amiga_parallel_driver = {
- .remove = __exit_p(amiga_parallel_remove),
+ .remove_new = __exit_p(amiga_parallel_remove),
.driver = {
.name = "amiga-parallel",
},
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index c81d4d86994b..949236a7a27c 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -334,7 +334,7 @@ out_unmap:
return err;
}
-static int bpp_remove(struct platform_device *op)
+static void bpp_remove(struct platform_device *op)
{
struct parport *p = dev_get_drvdata(&op->dev);
struct parport_operations *ops = p->ops;
@@ -351,8 +351,6 @@ static int bpp_remove(struct platform_device *op)
kfree(ops);
dev_set_drvdata(&op->dev, NULL);
-
- return 0;
}
static const struct of_device_id bpp_match[] = {
@@ -370,7 +368,7 @@ static struct platform_driver bpp_sbus_driver = {
.of_match_table = bpp_match,
},
.probe = bpp_probe,
- .remove = bpp_remove,
+ .remove_new = bpp_remove,
};
module_platform_driver(bpp_sbus_driver);
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 004d86230aa6..7526a9e714fa 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -87,7 +87,8 @@ config RISCV_PMU_SBI
filtering, counter configuration.
config STARFIVE_STARLINK_PMU
- depends on ARCH_STARFIVE || (COMPILE_TEST && 64BIT)
+ depends on ARCH_STARFIVE || COMPILE_TEST
+ depends on 64BIT
bool "StarFive StarLink PMU"
help
Provide support for StarLink Performance Monitor Unit.
@@ -95,6 +96,20 @@ config STARFIVE_STARLINK_PMU
an L3 memory system. The L3 cache events are added into perf event
subsystem, allowing monitoring of various L3 cache perf events.
+config ANDES_CUSTOM_PMU
+ bool "Andes custom PMU support"
+ depends on ARCH_RENESAS && RISCV_ALTERNATIVE && RISCV_PMU_SBI
+ default y
+ help
+ The Andes cores implement the PMU overflow extension very
+ similar to the standard Sscofpmf and Smcntrpmf extension.
+
+ This will patch the overflow and pending CSRs and handle the
+ non-standard behaviour via the regular SBI PMU driver and
+ interface.
+
+ If you don't know what to do here, say "Y".
+
config ARM_PMU_ACPI
depends on ARM_PMU && ACPI
def_bool y
diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c
index 452aab49db1e..8cbe6e5f9c39 100644
--- a/drivers/perf/riscv_pmu_sbi.c
+++ b/drivers/perf/riscv_pmu_sbi.c
@@ -19,11 +19,33 @@
#include <linux/of.h>
#include <linux/cpu_pm.h>
#include <linux/sched/clock.h>
+#include <linux/soc/andes/irq.h>
#include <asm/errata_list.h>
#include <asm/sbi.h>
#include <asm/cpufeature.h>
+#define ALT_SBI_PMU_OVERFLOW(__ovl) \
+asm volatile(ALTERNATIVE_2( \
+ "csrr %0, " __stringify(CSR_SSCOUNTOVF), \
+ "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
+ THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
+ CONFIG_ERRATA_THEAD_PMU, \
+ "csrr %0, " __stringify(ANDES_CSR_SCOUNTEROF), \
+ 0, RISCV_ISA_EXT_XANDESPMU, \
+ CONFIG_ANDES_CUSTOM_PMU) \
+ : "=r" (__ovl) : \
+ : "memory")
+
+#define ALT_SBI_PMU_OVF_CLEAR_PENDING(__irq_mask) \
+asm volatile(ALTERNATIVE( \
+ "csrc " __stringify(CSR_IP) ", %0\n\t", \
+ "csrc " __stringify(ANDES_CSR_SLIP) ", %0\n\t", \
+ 0, RISCV_ISA_EXT_XANDESPMU, \
+ CONFIG_ANDES_CUSTOM_PMU) \
+ : : "r"(__irq_mask) \
+ : "memory")
+
#define SYSCTL_NO_USER_ACCESS 0
#define SYSCTL_USER_ACCESS 1
#define SYSCTL_LEGACY 2
@@ -61,6 +83,7 @@ static int sysctl_perf_user_access __read_mostly = SYSCTL_USER_ACCESS;
static union sbi_pmu_ctr_info *pmu_ctr_list;
static bool riscv_pmu_use_irq;
static unsigned int riscv_pmu_irq_num;
+static unsigned int riscv_pmu_irq_mask;
static unsigned int riscv_pmu_irq;
/* Cache the available counters in a bitmask */
@@ -694,7 +717,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
event = cpu_hw_evt->events[fidx];
if (!event) {
- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
return IRQ_NONE;
}
@@ -708,7 +731,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
* Overflow interrupt pending bit should only be cleared after stopping
* all the counters to avoid any race condition.
*/
- csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num));
+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
/* No overflow bit is set */
if (!overflow)
@@ -780,8 +803,7 @@ static int pmu_sbi_starting_cpu(unsigned int cpu, struct hlist_node *node)
if (riscv_pmu_use_irq) {
cpu_hw_evt->irq = riscv_pmu_irq;
- csr_clear(CSR_IP, BIT(riscv_pmu_irq_num));
- csr_set(CSR_IE, BIT(riscv_pmu_irq_num));
+ ALT_SBI_PMU_OVF_CLEAR_PENDING(riscv_pmu_irq_mask);
enable_percpu_irq(riscv_pmu_irq, IRQ_TYPE_NONE);
}
@@ -792,7 +814,6 @@ static int pmu_sbi_dying_cpu(unsigned int cpu, struct hlist_node *node)
{
if (riscv_pmu_use_irq) {
disable_percpu_irq(riscv_pmu_irq);
- csr_clear(CSR_IE, BIT(riscv_pmu_irq_num));
}
/* Disable all counters access for user mode now */
@@ -816,8 +837,14 @@ static int pmu_sbi_setup_irqs(struct riscv_pmu *pmu, struct platform_device *pde
riscv_cached_mimpid(0) == 0) {
riscv_pmu_irq_num = THEAD_C9XX_RV_IRQ_PMU;
riscv_pmu_use_irq = true;
+ } else if (riscv_isa_extension_available(NULL, XANDESPMU) &&
+ IS_ENABLED(CONFIG_ANDES_CUSTOM_PMU)) {
+ riscv_pmu_irq_num = ANDES_SLI_CAUSE_BASE + ANDES_RV_IRQ_PMOVI;
+ riscv_pmu_use_irq = true;
}
+ riscv_pmu_irq_mask = BIT(riscv_pmu_irq_num % BITS_PER_LONG);
+
if (!riscv_pmu_use_irq)
return -EOPNOTSUPP;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 4cef568231bf..787354b849c7 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -87,6 +87,7 @@ source "drivers/phy/motorola/Kconfig"
source "drivers/phy/mscc/Kconfig"
source "drivers/phy/qualcomm/Kconfig"
source "drivers/phy/ralink/Kconfig"
+source "drivers/phy/realtek/Kconfig"
source "drivers/phy/renesas/Kconfig"
source "drivers/phy/rockchip/Kconfig"
source "drivers/phy/samsung/Kconfig"
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index fb3dc9de6111..868a220ed0f6 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -26,6 +26,7 @@ obj-y += allwinner/ \
mscc/ \
qualcomm/ \
ralink/ \
+ realtek/ \
renesas/ \
rockchip/ \
samsung/ \
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 7f9b4de772ee..c5c8d70bc853 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -490,6 +490,53 @@ int phy_calibrate(struct phy *phy)
EXPORT_SYMBOL_GPL(phy_calibrate);
/**
+ * phy_notify_connect() - phy connect notification
+ * @phy: the phy returned by phy_get()
+ * @port: the port index for connect
+ *
+ * If the phy needs to get connection status, the callback can be used.
+ * Returns: %0 if successful, a negative error code otherwise
+ */
+int phy_notify_connect(struct phy *phy, int port)
+{
+ int ret;
+
+ if (!phy || !phy->ops->connect)
+ return 0;
+
+ mutex_lock(&phy->mutex);
+ ret = phy->ops->connect(phy, port);
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_notify_connect);
+
+/**
+ * phy_notify_disconnect() - phy disconnect notification
+ * @phy: the phy returned by phy_get()
+ * @port: the port index for disconnect
+ *
+ * If the phy needs to get connection status, the callback can be used.
+ *
+ * Returns: %0 if successful, a negative error code otherwise
+ */
+int phy_notify_disconnect(struct phy *phy, int port)
+{
+ int ret;
+
+ if (!phy || !phy->ops->disconnect)
+ return 0;
+
+ mutex_lock(&phy->mutex);
+ ret = phy->ops->disconnect(phy, port);
+ mutex_unlock(&phy->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phy_notify_disconnect);
+
+/**
* phy_configure() - Changes the phy parameters
* @phy: the phy returned by phy_get()
* @opts: New configuration to apply
diff --git a/drivers/phy/realtek/Kconfig b/drivers/phy/realtek/Kconfig
new file mode 100644
index 000000000000..75ac7e7c31ae
--- /dev/null
+++ b/drivers/phy/realtek/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Phy drivers for Realtek platforms
+#
+
+if ARCH_REALTEK || COMPILE_TEST
+
+config PHY_RTK_RTD_USB2PHY
+ tristate "Realtek RTD USB2 PHY Transceiver Driver"
+ depends on USB_SUPPORT
+ select GENERIC_PHY
+ select USB_PHY
+ select USB_COMMON
+ help
+ Enable this to support Realtek SoC USB2 phy transceiver.
+ The DHC (digital home center) RTD series SoCs used the Synopsys
+ DWC3 USB IP. This driver will do the PHY initialization
+ of the parameters.
+
+config PHY_RTK_RTD_USB3PHY
+ tristate "Realtek RTD USB3 PHY Transceiver Driver"
+ depends on USB_SUPPORT
+ select GENERIC_PHY
+ select USB_PHY
+ select USB_COMMON
+ help
+ Enable this to support Realtek SoC USB3 phy transceiver.
+ The DHC (digital home center) RTD series SoCs used the Synopsys
+ DWC3 USB IP. This driver will do the PHY initialization
+ of the parameters.
+
+endif # ARCH_REALTEK || COMPILE_TEST
diff --git a/drivers/phy/realtek/Makefile b/drivers/phy/realtek/Makefile
new file mode 100644
index 000000000000..ed7b47ff8a26
--- /dev/null
+++ b/drivers/phy/realtek/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHY_RTK_RTD_USB2PHY) += phy-rtk-usb2.o
+obj-$(CONFIG_PHY_RTK_RTD_USB3PHY) += phy-rtk-usb3.o
diff --git a/drivers/phy/realtek/phy-rtk-usb2.c b/drivers/phy/realtek/phy-rtk-usb2.c
new file mode 100644
index 000000000000..e3ad7cea5109
--- /dev/null
+++ b/drivers/phy/realtek/phy-rtk-usb2.c
@@ -0,0 +1,1312 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-rtk-usb2.c RTK usb2.0 PHY driver
+ *
+ * Copyright (C) 2023 Realtek Semiconductor Corporation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/regmap.h>
+#include <linux/sys_soc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+#include <linux/usb.h>
+
+/* GUSB2PHYACCn register */
+#define PHY_NEW_REG_REQ BIT(25)
+#define PHY_VSTS_BUSY BIT(23)
+#define PHY_VCTRL_SHIFT 8
+#define PHY_REG_DATA_MASK 0xff
+
+#define GET_LOW_NIBBLE(addr) ((addr) & 0x0f)
+#define GET_HIGH_NIBBLE(addr) (((addr) & 0xf0) >> 4)
+
+#define EFUS_USB_DC_CAL_RATE 2
+#define EFUS_USB_DC_CAL_MAX 7
+
+#define EFUS_USB_DC_DIS_RATE 1
+#define EFUS_USB_DC_DIS_MAX 7
+
+#define MAX_PHY_DATA_SIZE 20
+#define OFFEST_PHY_READ 0x20
+
+#define MAX_USB_PHY_NUM 4
+#define MAX_USB_PHY_PAGE0_DATA_SIZE 16
+#define MAX_USB_PHY_PAGE1_DATA_SIZE 16
+#define MAX_USB_PHY_PAGE2_DATA_SIZE 8
+
+#define SET_PAGE_OFFSET 0xf4
+#define SET_PAGE_0 0x9b
+#define SET_PAGE_1 0xbb
+#define SET_PAGE_2 0xdb
+
+#define PAGE_START 0xe0
+#define PAGE0_0XE4 0xe4
+#define PAGE0_0XE6 0xe6
+#define PAGE0_0XE7 0xe7
+#define PAGE1_0XE0 0xe0
+#define PAGE1_0XE2 0xe2
+
+#define SENSITIVITY_CTRL (BIT(4) | BIT(5) | BIT(6))
+#define ENABLE_AUTO_SENSITIVITY_CALIBRATION BIT(2)
+#define DEFAULT_DC_DRIVING_VALUE (0x8)
+#define DEFAULT_DC_DISCONNECTION_VALUE (0x6)
+#define HS_CLK_SELECT BIT(6)
+
+struct phy_reg {
+ void __iomem *reg_wrap_vstatus;
+ void __iomem *reg_gusb2phyacc0;
+ int vstatus_index;
+};
+
+struct phy_data {
+ u8 addr;
+ u8 data;
+};
+
+struct phy_cfg {
+ int page0_size;
+ struct phy_data page0[MAX_USB_PHY_PAGE0_DATA_SIZE];
+ int page1_size;
+ struct phy_data page1[MAX_USB_PHY_PAGE1_DATA_SIZE];
+ int page2_size;
+ struct phy_data page2[MAX_USB_PHY_PAGE2_DATA_SIZE];
+
+ int num_phy;
+
+ bool check_efuse;
+ int check_efuse_version;
+#define CHECK_EFUSE_V1 1
+#define CHECK_EFUSE_V2 2
+ int efuse_dc_driving_rate;
+ int efuse_dc_disconnect_rate;
+ int dc_driving_mask;
+ int dc_disconnect_mask;
+ bool usb_dc_disconnect_at_page0;
+ int driving_updated_for_dev_dis;
+
+ bool do_toggle;
+ bool do_toggle_driving;
+ bool use_default_parameter;
+ bool is_double_sensitivity_mode;
+};
+
+struct phy_parameter {
+ struct phy_reg phy_reg;
+
+ /* Get from efuse */
+ s8 efuse_usb_dc_cal;
+ s8 efuse_usb_dc_dis;
+
+ /* Get from dts */
+ bool inverse_hstx_sync_clock;
+ u32 driving_level;
+ s32 driving_level_compensate;
+ s32 disconnection_compensate;
+};
+
+struct rtk_phy {
+ struct device *dev;
+
+ struct phy_cfg *phy_cfg;
+ int num_phy;
+ struct phy_parameter *phy_parameter;
+
+ struct dentry *debug_dir;
+};
+
+/* mapping 0xE0 to 0 ... 0xE7 to 7, 0xF0 to 8 ,,, 0xF7 to 15 */
+static inline int page_addr_to_array_index(u8 addr)
+{
+ return (int)((((addr) - PAGE_START) & 0x7) +
+ ((((addr) - PAGE_START) & 0x10) >> 1));
+}
+
+static inline u8 array_index_to_page_addr(int index)
+{
+ return ((((index) + PAGE_START) & 0x7) +
+ ((((index) & 0x8) << 1) + PAGE_START));
+}
+
+#define PHY_IO_TIMEOUT_USEC (50000)
+#define PHY_IO_DELAY_US (100)
+
+static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
+{
+ int ret;
+ unsigned int val;
+
+ ret = read_poll_timeout(readl, val, ((val & mask) == result),
+ PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
+ if (ret) {
+ pr_err("%s can't program USB phy\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static char rtk_phy_read(struct phy_reg *phy_reg, char addr)
+{
+ void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
+ unsigned int val;
+ int ret = 0;
+
+ addr -= OFFEST_PHY_READ;
+
+ /* polling until VBusy == 0 */
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return (char)ret;
+
+ /* VCtrl = low nibble of addr, and set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return (char)ret;
+
+ /* VCtrl = high nibble of addr, and set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return (char)ret;
+
+ val = readl(reg_gusb2phyacc0);
+
+ return (char)(val & PHY_REG_DATA_MASK);
+}
+
+static int rtk_phy_write(struct phy_reg *phy_reg, char addr, char data)
+{
+ unsigned int val;
+ void __iomem *reg_wrap_vstatus = phy_reg->reg_wrap_vstatus;
+ void __iomem *reg_gusb2phyacc0 = phy_reg->reg_gusb2phyacc0;
+ int shift_bits = phy_reg->vstatus_index * 8;
+ int ret = 0;
+
+ /* write data to VStatusOut2 (data output to phy) */
+ writel((u32)data << shift_bits, reg_wrap_vstatus);
+
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return ret;
+
+ /* VCtrl = low nibble of addr, set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_LOW_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return ret;
+
+ /* VCtrl = high nibble of addr, set PHY_NEW_REG_REQ */
+ val = PHY_NEW_REG_REQ | (GET_HIGH_NIBBLE(addr) << PHY_VCTRL_SHIFT);
+
+ writel(val, reg_gusb2phyacc0);
+ ret = utmi_wait_register(reg_gusb2phyacc0, PHY_VSTS_BUSY, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtk_phy_set_page(struct phy_reg *phy_reg, int page)
+{
+ switch (page) {
+ case 0:
+ return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_0);
+ case 1:
+ return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_1);
+ case 2:
+ return rtk_phy_write(phy_reg, SET_PAGE_OFFSET, SET_PAGE_2);
+ default:
+ pr_err("%s error page=%d\n", __func__, page);
+ }
+
+ return -EINVAL;
+}
+
+static u8 __updated_dc_disconnect_level_page0_0xe4(struct phy_cfg *phy_cfg,
+ struct phy_parameter *phy_parameter, u8 data)
+{
+ u8 ret;
+ s32 val;
+ s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+ int offset = 4;
+
+ val = (s32)((data >> offset) & dc_disconnect_mask)
+ + phy_parameter->efuse_usb_dc_dis
+ + phy_parameter->disconnection_compensate;
+
+ if (val > dc_disconnect_mask)
+ val = dc_disconnect_mask;
+ else if (val < 0)
+ val = 0;
+
+ ret = (data & (~(dc_disconnect_mask << offset))) |
+ (val & dc_disconnect_mask) << offset;
+
+ return ret;
+}
+
+/* updated disconnect level at page0 */
+static void update_dc_disconnect_level_at_page0(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, bool update)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_data *phy_data_page;
+ struct phy_data *phy_data;
+ u8 addr, data;
+ int offset = 4;
+ s32 dc_disconnect_mask;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_reg = &phy_parameter->phy_reg;
+
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ i = page_addr_to_array_index(PAGE0_0XE4);
+ phy_data = phy_data_page + i;
+ if (!phy_data->addr) {
+ phy_data->addr = PAGE0_0XE4;
+ phy_data->data = rtk_phy_read(phy_reg, PAGE0_0XE4);
+ }
+
+ addr = phy_data->addr;
+ data = phy_data->data;
+ dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+
+ if (update)
+ data = __updated_dc_disconnect_level_page0_0xe4(phy_cfg, phy_parameter, data);
+ else
+ data = (data & ~(dc_disconnect_mask << offset)) |
+ (DEFAULT_DC_DISCONNECTION_VALUE << offset);
+
+ if (rtk_phy_write(phy_reg, addr, data))
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+}
+
+static u8 __updated_dc_disconnect_level_page1_0xe2(struct phy_cfg *phy_cfg,
+ struct phy_parameter *phy_parameter, u8 data)
+{
+ u8 ret;
+ s32 val;
+ s32 dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ val = (s32)(data & dc_disconnect_mask)
+ + phy_parameter->efuse_usb_dc_dis
+ + phy_parameter->disconnection_compensate;
+ } else { /* for CHECK_EFUSE_V2 or no efuse */
+ if (phy_parameter->efuse_usb_dc_dis)
+ val = (s32)(phy_parameter->efuse_usb_dc_dis +
+ phy_parameter->disconnection_compensate);
+ else
+ val = (s32)((data & dc_disconnect_mask) +
+ phy_parameter->disconnection_compensate);
+ }
+
+ if (val > dc_disconnect_mask)
+ val = dc_disconnect_mask;
+ else if (val < 0)
+ val = 0;
+
+ ret = (data & (~dc_disconnect_mask)) | (val & dc_disconnect_mask);
+
+ return ret;
+}
+
+/* updated disconnect level at page1 */
+static void update_dc_disconnect_level_at_page1(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, bool update)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_data *phy_data_page;
+ struct phy_data *phy_data;
+ struct phy_reg *phy_reg;
+ u8 addr, data;
+ s32 dc_disconnect_mask;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_reg = &phy_parameter->phy_reg;
+
+ /* Set page 1 */
+ phy_data_page = phy_cfg->page1;
+ rtk_phy_set_page(phy_reg, 1);
+
+ i = page_addr_to_array_index(PAGE1_0XE2);
+ phy_data = phy_data_page + i;
+ if (!phy_data->addr) {
+ phy_data->addr = PAGE1_0XE2;
+ phy_data->data = rtk_phy_read(phy_reg, PAGE1_0XE2);
+ }
+
+ addr = phy_data->addr;
+ data = phy_data->data;
+ dc_disconnect_mask = phy_cfg->dc_disconnect_mask;
+
+ if (update)
+ data = __updated_dc_disconnect_level_page1_0xe2(phy_cfg, phy_parameter, data);
+ else
+ data = (data & ~dc_disconnect_mask) | DEFAULT_DC_DISCONNECTION_VALUE;
+
+ if (rtk_phy_write(phy_reg, addr, data))
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+}
+
+static void update_dc_disconnect_level(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, bool update)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+
+ if (phy_cfg->usb_dc_disconnect_at_page0)
+ update_dc_disconnect_level_at_page0(rtk_phy, phy_parameter, update);
+ else
+ update_dc_disconnect_level_at_page1(rtk_phy, phy_parameter, update);
+}
+
+static u8 __update_dc_driving_page0_0xe4(struct phy_cfg *phy_cfg,
+ struct phy_parameter *phy_parameter, u8 data)
+{
+ s32 driving_level_compensate = phy_parameter->driving_level_compensate;
+ s32 dc_driving_mask = phy_cfg->dc_driving_mask;
+ s32 val;
+ u8 ret;
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ val = (s32)(data & dc_driving_mask) + driving_level_compensate
+ + phy_parameter->efuse_usb_dc_cal;
+ } else { /* for CHECK_EFUSE_V2 or no efuse */
+ if (phy_parameter->efuse_usb_dc_cal)
+ val = (s32)((phy_parameter->efuse_usb_dc_cal & dc_driving_mask)
+ + driving_level_compensate);
+ else
+ val = (s32)(data & dc_driving_mask);
+ }
+
+ if (val > dc_driving_mask)
+ val = dc_driving_mask;
+ else if (val < 0)
+ val = 0;
+
+ ret = (data & (~dc_driving_mask)) | (val & dc_driving_mask);
+
+ return ret;
+}
+
+static void update_dc_driving_level(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+
+ phy_reg = &phy_parameter->phy_reg;
+ phy_cfg = rtk_phy->phy_cfg;
+ if (!phy_cfg->page0[4].addr) {
+ rtk_phy_set_page(phy_reg, 0);
+ phy_cfg->page0[4].addr = PAGE0_0XE4;
+ phy_cfg->page0[4].data = rtk_phy_read(phy_reg, PAGE0_0XE4);
+ }
+
+ if (phy_parameter->driving_level != DEFAULT_DC_DRIVING_VALUE) {
+ u32 dc_driving_mask;
+ u8 driving_level;
+ u8 data;
+
+ data = phy_cfg->page0[4].data;
+ dc_driving_mask = phy_cfg->dc_driving_mask;
+ driving_level = data & dc_driving_mask;
+
+ dev_dbg(rtk_phy->dev, "%s driving_level=%d => dts driving_level=%d\n",
+ __func__, driving_level, phy_parameter->driving_level);
+
+ phy_cfg->page0[4].data = (data & (~dc_driving_mask)) |
+ (phy_parameter->driving_level & dc_driving_mask);
+ }
+
+ phy_cfg->page0[4].data = __update_dc_driving_page0_0xe4(phy_cfg,
+ phy_parameter,
+ phy_cfg->page0[4].data);
+}
+
+static void update_hs_clk_select(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (phy_parameter->inverse_hstx_sync_clock) {
+ if (!phy_cfg->page0[6].addr) {
+ rtk_phy_set_page(phy_reg, 0);
+ phy_cfg->page0[6].addr = PAGE0_0XE6;
+ phy_cfg->page0[6].data = rtk_phy_read(phy_reg, PAGE0_0XE6);
+ }
+
+ phy_cfg->page0[6].data = phy_cfg->page0[6].data | HS_CLK_SELECT;
+ }
+}
+
+static void do_rtk_phy_toggle(struct rtk_phy *rtk_phy,
+ int index, bool connect)
+{
+ struct phy_parameter *phy_parameter;
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_data *phy_data_page;
+ u8 addr, data;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (!phy_cfg->do_toggle)
+ goto out;
+
+ if (phy_cfg->is_double_sensitivity_mode)
+ goto do_toggle_driving;
+
+ /* Set page 0 */
+ rtk_phy_set_page(phy_reg, 0);
+
+ addr = PAGE0_0XE7;
+ data = rtk_phy_read(phy_reg, addr);
+
+ if (connect)
+ rtk_phy_write(phy_reg, addr, data & (~SENSITIVITY_CTRL));
+ else
+ rtk_phy_write(phy_reg, addr, data | (SENSITIVITY_CTRL));
+
+do_toggle_driving:
+
+ if (!phy_cfg->do_toggle_driving)
+ goto do_toggle;
+
+ /* Page 0 addr 0xE4 driving capability */
+
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ i = page_addr_to_array_index(PAGE0_0XE4);
+ addr = phy_data_page[i].addr;
+ data = phy_data_page[i].data;
+
+ if (connect) {
+ rtk_phy_write(phy_reg, addr, data);
+ } else {
+ u8 value;
+ s32 tmp;
+ s32 driving_updated =
+ phy_cfg->driving_updated_for_dev_dis;
+ s32 dc_driving_mask = phy_cfg->dc_driving_mask;
+
+ tmp = (s32)(data & dc_driving_mask) + driving_updated;
+
+ if (tmp > dc_driving_mask)
+ tmp = dc_driving_mask;
+ else if (tmp < 0)
+ tmp = 0;
+
+ value = (data & (~dc_driving_mask)) | (tmp & dc_driving_mask);
+
+ rtk_phy_write(phy_reg, addr, value);
+ }
+
+do_toggle:
+ /* restore dc disconnect level before toggle */
+ update_dc_disconnect_level(rtk_phy, phy_parameter, false);
+
+ /* Set page 1 */
+ rtk_phy_set_page(phy_reg, 1);
+
+ addr = PAGE1_0XE0;
+ data = rtk_phy_read(phy_reg, addr);
+
+ rtk_phy_write(phy_reg, addr, data &
+ (~ENABLE_AUTO_SENSITIVITY_CALIBRATION));
+ mdelay(1);
+ rtk_phy_write(phy_reg, addr, data |
+ (ENABLE_AUTO_SENSITIVITY_CALIBRATION));
+
+ /* update dc disconnect level after toggle */
+ update_dc_disconnect_level(rtk_phy, phy_parameter, true);
+
+out:
+ return;
+}
+
+static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
+{
+ struct phy_parameter *phy_parameter;
+ struct phy_cfg *phy_cfg;
+ struct phy_data *phy_data_page;
+ struct phy_reg *phy_reg;
+ int i;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (phy_cfg->use_default_parameter) {
+ dev_dbg(rtk_phy->dev, "%s phy#%d use default parameter\n",
+ __func__, index);
+ goto do_toggle;
+ }
+
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ for (i = 0; i < phy_cfg->page0_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = phy_data->addr;
+ u8 data = phy_data->data;
+
+ if (!addr)
+ continue;
+
+ if (rtk_phy_write(phy_reg, addr, data)) {
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page0 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+ return -EINVAL;
+ }
+ }
+
+ /* Set page 1 */
+ phy_data_page = phy_cfg->page1;
+ rtk_phy_set_page(phy_reg, 1);
+
+ for (i = 0; i < phy_cfg->page1_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = phy_data->addr;
+ u8 data = phy_data->data;
+
+ if (!addr)
+ continue;
+
+ if (rtk_phy_write(phy_reg, addr, data)) {
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page1 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+ return -EINVAL;
+ }
+ }
+
+ if (phy_cfg->page2_size == 0)
+ goto do_toggle;
+
+ /* Set page 2 */
+ phy_data_page = phy_cfg->page2;
+ rtk_phy_set_page(phy_reg, 2);
+
+ for (i = 0; i < phy_cfg->page2_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = phy_data->addr;
+ u8 data = phy_data->data;
+
+ if (!addr)
+ continue;
+
+ if (rtk_phy_write(phy_reg, addr, data)) {
+ dev_err(rtk_phy->dev,
+ "%s: Error to set page2 parameter addr=0x%x value=0x%x\n",
+ __func__, addr, data);
+ return -EINVAL;
+ }
+ }
+
+do_toggle:
+ do_rtk_phy_toggle(rtk_phy, index, false);
+
+ return 0;
+}
+
+static int rtk_phy_init(struct phy *phy)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+ unsigned long phy_init_time = jiffies;
+ int i, ret = 0;
+
+ if (!rtk_phy)
+ return -EINVAL;
+
+ for (i = 0; i < rtk_phy->num_phy; i++)
+ ret = do_rtk_phy_init(rtk_phy, i);
+
+ dev_dbg(rtk_phy->dev, "Initialized RTK USB 2.0 PHY (take %dms)\n",
+ jiffies_to_msecs(jiffies - phy_init_time));
+ return ret;
+}
+
+static int rtk_phy_exit(struct phy *phy)
+{
+ return 0;
+}
+
+static void rtk_phy_toggle(struct rtk_phy *rtk_phy, bool connect, int port)
+{
+ int index = port;
+
+ if (index > rtk_phy->num_phy) {
+ dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
+ __func__, index, rtk_phy->num_phy);
+ return;
+ }
+
+ do_rtk_phy_toggle(rtk_phy, index, connect);
+}
+
+static int rtk_phy_connect(struct phy *phy, int port)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+
+ dev_dbg(rtk_phy->dev, "%s port=%d\n", __func__, port);
+ rtk_phy_toggle(rtk_phy, true, port);
+
+ return 0;
+}
+
+static int rtk_phy_disconnect(struct phy *phy, int port)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+
+ dev_dbg(rtk_phy->dev, "%s port=%d\n", __func__, port);
+ rtk_phy_toggle(rtk_phy, false, port);
+
+ return 0;
+}
+
+static const struct phy_ops ops = {
+ .init = rtk_phy_init,
+ .exit = rtk_phy_exit,
+ .connect = rtk_phy_connect,
+ .disconnect = rtk_phy_disconnect,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *create_phy_debug_root(void)
+{
+ struct dentry *phy_debug_root;
+
+ phy_debug_root = debugfs_lookup("phy", usb_debug_root);
+ if (!phy_debug_root)
+ phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
+
+ return phy_debug_root;
+}
+
+static int rtk_usb2_parameter_show(struct seq_file *s, void *unused)
+{
+ struct rtk_phy *rtk_phy = s->private;
+ struct phy_cfg *phy_cfg;
+ int i, index;
+
+ phy_cfg = rtk_phy->phy_cfg;
+
+ seq_puts(s, "Property:\n");
+ seq_printf(s, " check_efuse: %s\n",
+ phy_cfg->check_efuse ? "Enable" : "Disable");
+ seq_printf(s, " check_efuse_version: %d\n",
+ phy_cfg->check_efuse_version);
+ seq_printf(s, " efuse_dc_driving_rate: %d\n",
+ phy_cfg->efuse_dc_driving_rate);
+ seq_printf(s, " dc_driving_mask: 0x%x\n",
+ phy_cfg->dc_driving_mask);
+ seq_printf(s, " efuse_dc_disconnect_rate: %d\n",
+ phy_cfg->efuse_dc_disconnect_rate);
+ seq_printf(s, " dc_disconnect_mask: 0x%x\n",
+ phy_cfg->dc_disconnect_mask);
+ seq_printf(s, " usb_dc_disconnect_at_page0: %s\n",
+ phy_cfg->usb_dc_disconnect_at_page0 ? "true" : "false");
+ seq_printf(s, " do_toggle: %s\n",
+ phy_cfg->do_toggle ? "Enable" : "Disable");
+ seq_printf(s, " do_toggle_driving: %s\n",
+ phy_cfg->do_toggle_driving ? "Enable" : "Disable");
+ seq_printf(s, " driving_updated_for_dev_dis: 0x%x\n",
+ phy_cfg->driving_updated_for_dev_dis);
+ seq_printf(s, " use_default_parameter: %s\n",
+ phy_cfg->use_default_parameter ? "Enable" : "Disable");
+ seq_printf(s, " is_double_sensitivity_mode: %s\n",
+ phy_cfg->is_double_sensitivity_mode ? "Enable" : "Disable");
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ struct phy_parameter *phy_parameter;
+ struct phy_reg *phy_reg;
+ struct phy_data *phy_data_page;
+
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ seq_printf(s, "PHY %d:\n", index);
+
+ seq_puts(s, "Page 0:\n");
+ /* Set page 0 */
+ phy_data_page = phy_cfg->page0;
+ rtk_phy_set_page(phy_reg, 0);
+
+ for (i = 0; i < phy_cfg->page0_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = array_index_to_page_addr(i);
+ u8 data = phy_data->data;
+ u8 value = rtk_phy_read(phy_reg, addr);
+
+ if (phy_data->addr)
+ seq_printf(s, " Page 0: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+ addr, data, value);
+ else
+ seq_printf(s, " Page 0: addr=0x%x data=none ==> read value=0x%02x\n",
+ addr, value);
+ }
+
+ seq_puts(s, "Page 1:\n");
+ /* Set page 1 */
+ phy_data_page = phy_cfg->page1;
+ rtk_phy_set_page(phy_reg, 1);
+
+ for (i = 0; i < phy_cfg->page1_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = array_index_to_page_addr(i);
+ u8 data = phy_data->data;
+ u8 value = rtk_phy_read(phy_reg, addr);
+
+ if (phy_data->addr)
+ seq_printf(s, " Page 1: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+ addr, data, value);
+ else
+ seq_printf(s, " Page 1: addr=0x%x data=none ==> read value=0x%02x\n",
+ addr, value);
+ }
+
+ if (phy_cfg->page2_size == 0)
+ goto out;
+
+ seq_puts(s, "Page 2:\n");
+ /* Set page 2 */
+ phy_data_page = phy_cfg->page2;
+ rtk_phy_set_page(phy_reg, 2);
+
+ for (i = 0; i < phy_cfg->page2_size; i++) {
+ struct phy_data *phy_data = phy_data_page + i;
+ u8 addr = array_index_to_page_addr(i);
+ u8 data = phy_data->data;
+ u8 value = rtk_phy_read(phy_reg, addr);
+
+ if (phy_data->addr)
+ seq_printf(s, " Page 2: addr=0x%x data=0x%02x ==> read value=0x%02x\n",
+ addr, data, value);
+ else
+ seq_printf(s, " Page 2: addr=0x%x data=none ==> read value=0x%02x\n",
+ addr, value);
+ }
+
+out:
+ seq_puts(s, "PHY Property:\n");
+ seq_printf(s, " efuse_usb_dc_cal: %d\n",
+ (int)phy_parameter->efuse_usb_dc_cal);
+ seq_printf(s, " efuse_usb_dc_dis: %d\n",
+ (int)phy_parameter->efuse_usb_dc_dis);
+ seq_printf(s, " inverse_hstx_sync_clock: %s\n",
+ phy_parameter->inverse_hstx_sync_clock ? "Enable" : "Disable");
+ seq_printf(s, " driving_level: %d\n",
+ phy_parameter->driving_level);
+ seq_printf(s, " driving_level_compensate: %d\n",
+ phy_parameter->driving_level_compensate);
+ seq_printf(s, " disconnection_compensate: %d\n",
+ phy_parameter->disconnection_compensate);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rtk_usb2_parameter);
+
+static inline void create_debug_files(struct rtk_phy *rtk_phy)
+{
+ struct dentry *phy_debug_root = NULL;
+
+ phy_debug_root = create_phy_debug_root();
+ if (!phy_debug_root)
+ return;
+
+ rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev),
+ phy_debug_root);
+
+ debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb2_parameter_fops);
+}
+
+static inline void remove_debug_files(struct rtk_phy *rtk_phy)
+{
+ debugfs_remove_recursive(rtk_phy->debug_dir);
+}
+#else
+static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
+static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
+#endif /* CONFIG_DEBUG_FS */
+
+static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, int index)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+ u8 value = 0;
+ struct nvmem_cell *cell;
+ struct soc_device_attribute rtk_soc_groot[] = {
+ { .family = "Realtek Groot",},
+ { /* empty */ } };
+
+ if (!phy_cfg->check_efuse)
+ goto out;
+
+ /* Read efuse for usb dc cal */
+ cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-cal");
+ if (IS_ERR(cell)) {
+ dev_dbg(rtk_phy->dev, "%s no usb-dc-cal: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ value = buf[0] & phy_cfg->dc_driving_mask;
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ int rate = phy_cfg->efuse_dc_driving_rate;
+
+ if (value <= EFUS_USB_DC_CAL_MAX)
+ phy_parameter->efuse_usb_dc_cal = (int8_t)(value * rate);
+ else
+ phy_parameter->efuse_usb_dc_cal = -(int8_t)
+ ((EFUS_USB_DC_CAL_MAX & value) * rate);
+
+ if (soc_device_match(rtk_soc_groot)) {
+ dev_dbg(rtk_phy->dev, "For groot IC we need a workaround to adjust efuse_usb_dc_cal\n");
+
+ /* We don't multiple dc_cal_rate=2 for positive dc cal compensate */
+ if (value <= EFUS_USB_DC_CAL_MAX)
+ phy_parameter->efuse_usb_dc_cal = (int8_t)(value);
+
+ /* We set max dc cal compensate is 0x8 if otp is 0x7 */
+ if (value == 0x7)
+ phy_parameter->efuse_usb_dc_cal = (int8_t)(value + 1);
+ }
+ } else { /* for CHECK_EFUSE_V2 */
+ phy_parameter->efuse_usb_dc_cal = value & phy_cfg->dc_driving_mask;
+ }
+
+ /* Read efuse for usb dc disconnect level */
+ value = 0;
+ cell = nvmem_cell_get(rtk_phy->dev, "usb-dc-dis");
+ if (IS_ERR(cell)) {
+ dev_dbg(rtk_phy->dev, "%s no usb-dc-dis: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ value = buf[0] & phy_cfg->dc_disconnect_mask;
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ if (phy_cfg->check_efuse_version == CHECK_EFUSE_V1) {
+ int rate = phy_cfg->efuse_dc_disconnect_rate;
+
+ if (value <= EFUS_USB_DC_DIS_MAX)
+ phy_parameter->efuse_usb_dc_dis = (int8_t)(value * rate);
+ else
+ phy_parameter->efuse_usb_dc_dis = -(int8_t)
+ ((EFUS_USB_DC_DIS_MAX & value) * rate);
+ } else { /* for CHECK_EFUSE_V2 */
+ phy_parameter->efuse_usb_dc_dis = value & phy_cfg->dc_disconnect_mask;
+ }
+
+out:
+ return 0;
+}
+
+static int parse_phy_data(struct rtk_phy *rtk_phy)
+{
+ struct device *dev = rtk_phy->dev;
+ struct device_node *np = dev->of_node;
+ struct phy_parameter *phy_parameter;
+ int ret = 0;
+ int index;
+
+ rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
+ rtk_phy->num_phy, GFP_KERNEL);
+ if (!rtk_phy->phy_parameter)
+ return -ENOMEM;
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+
+ phy_parameter->phy_reg.reg_wrap_vstatus = of_iomap(np, 0);
+ phy_parameter->phy_reg.reg_gusb2phyacc0 = of_iomap(np, 1) + index;
+ phy_parameter->phy_reg.vstatus_index = index;
+
+ if (of_property_read_bool(np, "realtek,inverse-hstx-sync-clock"))
+ phy_parameter->inverse_hstx_sync_clock = true;
+ else
+ phy_parameter->inverse_hstx_sync_clock = false;
+
+ if (of_property_read_u32_index(np, "realtek,driving-level",
+ index, &phy_parameter->driving_level))
+ phy_parameter->driving_level = DEFAULT_DC_DRIVING_VALUE;
+
+ if (of_property_read_u32_index(np, "realtek,driving-level-compensate",
+ index, &phy_parameter->driving_level_compensate))
+ phy_parameter->driving_level_compensate = 0;
+
+ if (of_property_read_u32_index(np, "realtek,disconnection-compensate",
+ index, &phy_parameter->disconnection_compensate))
+ phy_parameter->disconnection_compensate = 0;
+
+ get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
+
+ update_dc_driving_level(rtk_phy, phy_parameter);
+
+ update_hs_clk_select(rtk_phy, phy_parameter);
+ }
+
+ return ret;
+}
+
+static int rtk_usb2phy_probe(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy;
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ const struct phy_cfg *phy_cfg;
+ int ret = 0;
+
+ phy_cfg = of_device_get_match_data(dev);
+ if (!phy_cfg) {
+ dev_err(dev, "phy config are not assigned!\n");
+ return -EINVAL;
+ }
+
+ rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
+ if (!rtk_phy)
+ return -ENOMEM;
+
+ rtk_phy->dev = &pdev->dev;
+ rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
+
+ memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+
+ rtk_phy->num_phy = phy_cfg->num_phy;
+
+ ret = parse_phy_data(rtk_phy);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, rtk_phy);
+
+ generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, rtk_phy);
+
+ phy_provider = devm_of_phy_provider_register(rtk_phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ create_debug_files(rtk_phy);
+
+err:
+ return ret;
+}
+
+static void rtk_usb2phy_remove(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
+
+ remove_debug_files(rtk_phy);
+}
+
+static const struct phy_cfg rtd1295_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0x90},
+ [3] = {0xe3, 0x3a},
+ [4] = {0xe4, 0x68},
+ [6] = {0xe6, 0x91},
+ [13] = {0xf5, 0x81},
+ [15] = {0xf7, 0x02}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 1,
+ .check_efuse = false,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1395_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [4] = {0xe4, 0xac},
+ [13] = {0xf5, 0x00},
+ [15] = {0xf7, 0x02}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 1,
+ .check_efuse = false,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1395_phy_cfg_2port = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [4] = {0xe4, 0xac},
+ [13] = {0xf5, 0x00},
+ [15] = {0xf7, 0x02}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 2,
+ .check_efuse = false,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1619_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [4] = {0xe4, 0x68}, },
+ .page1_size = 8,
+ .page1 = { /* default parameter */ },
+ .page2_size = 0,
+ .page2 = { /* no parameter */ },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = false,
+};
+
+static const struct phy_cfg rtd1319_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0x18},
+ [4] = {0xe4, 0x6a},
+ [7] = {0xe7, 0x71},
+ [13] = {0xf5, 0x15},
+ [15] = {0xf7, 0x32}, },
+ .page1_size = 8,
+ .page1 = { [3] = {0xe3, 0x44}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [0] = {0xe0, 0x01}, },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = true,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1312c_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0x14},
+ [4] = {0xe4, 0x67},
+ [5] = {0xe5, 0x55}, },
+ .page1_size = 8,
+ .page1 = { [3] = {0xe3, 0x23},
+ [6] = {0xe6, 0x58}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { /* default parameter */ },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = 1,
+ .dc_driving_mask = 0xf,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = true,
+ .do_toggle = true,
+ .do_toggle_driving = true,
+ .driving_updated_for_dev_dis = 0xf,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1619b_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0xa3},
+ [4] = {0xe4, 0xa8},
+ [5] = {0xe5, 0x4f},
+ [6] = {0xe6, 0x02}, },
+ .page1_size = 8,
+ .page1 = { [3] = {0xe3, 0x64}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [7] = {0xe7, 0x45}, },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+ .dc_driving_mask = 0x1f,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = false,
+ .do_toggle = true,
+ .do_toggle_driving = true,
+ .driving_updated_for_dev_dis = 0x8,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1319d_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0xa3},
+ [4] = {0xe4, 0x8e},
+ [5] = {0xe5, 0x4f},
+ [6] = {0xe6, 0x02}, },
+ .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
+ .page1 = { [14] = {0xf5, 0x1}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [7] = {0xe7, 0x44}, },
+ .check_efuse = true,
+ .num_phy = 1,
+ .check_efuse_version = CHECK_EFUSE_V1,
+ .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+ .dc_driving_mask = 0x1f,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = false,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0x8,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct phy_cfg rtd1315e_phy_cfg = {
+ .page0_size = MAX_USB_PHY_PAGE0_DATA_SIZE,
+ .page0 = { [0] = {0xe0, 0xa3},
+ [4] = {0xe4, 0x8c},
+ [5] = {0xe5, 0x4f},
+ [6] = {0xe6, 0x02}, },
+ .page1_size = MAX_USB_PHY_PAGE1_DATA_SIZE,
+ .page1 = { [3] = {0xe3, 0x7f},
+ [14] = {0xf5, 0x01}, },
+ .page2_size = MAX_USB_PHY_PAGE2_DATA_SIZE,
+ .page2 = { [7] = {0xe7, 0x44}, },
+ .num_phy = 1,
+ .check_efuse = true,
+ .check_efuse_version = CHECK_EFUSE_V2,
+ .efuse_dc_driving_rate = EFUS_USB_DC_CAL_RATE,
+ .dc_driving_mask = 0x1f,
+ .efuse_dc_disconnect_rate = EFUS_USB_DC_DIS_RATE,
+ .dc_disconnect_mask = 0xf,
+ .usb_dc_disconnect_at_page0 = false,
+ .do_toggle = true,
+ .do_toggle_driving = false,
+ .driving_updated_for_dev_dis = 0x8,
+ .use_default_parameter = false,
+ .is_double_sensitivity_mode = true,
+};
+
+static const struct of_device_id usbphy_rtk_dt_match[] = {
+ { .compatible = "realtek,rtd1295-usb2phy", .data = &rtd1295_phy_cfg },
+ { .compatible = "realtek,rtd1312c-usb2phy", .data = &rtd1312c_phy_cfg },
+ { .compatible = "realtek,rtd1315e-usb2phy", .data = &rtd1315e_phy_cfg },
+ { .compatible = "realtek,rtd1319-usb2phy", .data = &rtd1319_phy_cfg },
+ { .compatible = "realtek,rtd1319d-usb2phy", .data = &rtd1319d_phy_cfg },
+ { .compatible = "realtek,rtd1395-usb2phy", .data = &rtd1395_phy_cfg },
+ { .compatible = "realtek,rtd1395-usb2phy-2port", .data = &rtd1395_phy_cfg_2port },
+ { .compatible = "realtek,rtd1619-usb2phy", .data = &rtd1619_phy_cfg },
+ { .compatible = "realtek,rtd1619b-usb2phy", .data = &rtd1619b_phy_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
+
+static struct platform_driver rtk_usb2phy_driver = {
+ .probe = rtk_usb2phy_probe,
+ .remove_new = rtk_usb2phy_remove,
+ .driver = {
+ .name = "rtk-usb2phy",
+ .of_match_table = usbphy_rtk_dt_match,
+ },
+};
+
+module_platform_driver(rtk_usb2phy_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+MODULE_DESCRIPTION("Realtek usb 2.0 phy driver");
diff --git a/drivers/phy/realtek/phy-rtk-usb3.c b/drivers/phy/realtek/phy-rtk-usb3.c
new file mode 100644
index 000000000000..dfcf4b921bba
--- /dev/null
+++ b/drivers/phy/realtek/phy-rtk-usb3.c
@@ -0,0 +1,748 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * phy-rtk-usb3.c RTK usb3.0 phy driver
+ *
+ * copyright (c) 2023 realtek semiconductor corporation
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/regmap.h>
+#include <linux/sys_soc.h>
+#include <linux/mfd/syscon.h>
+#include <linux/phy/phy.h>
+#include <linux/usb.h>
+
+#define USB_MDIO_CTRL_PHY_BUSY BIT(7)
+#define USB_MDIO_CTRL_PHY_WRITE BIT(0)
+#define USB_MDIO_CTRL_PHY_ADDR_SHIFT 8
+#define USB_MDIO_CTRL_PHY_DATA_SHIFT 16
+
+#define MAX_USB_PHY_DATA_SIZE 0x30
+#define PHY_ADDR_0X09 0x09
+#define PHY_ADDR_0X0B 0x0b
+#define PHY_ADDR_0X0D 0x0d
+#define PHY_ADDR_0X10 0x10
+#define PHY_ADDR_0X1F 0x1f
+#define PHY_ADDR_0X20 0x20
+#define PHY_ADDR_0X21 0x21
+#define PHY_ADDR_0X30 0x30
+
+#define REG_0X09_FORCE_CALIBRATION BIT(9)
+#define REG_0X0B_RX_OFFSET_RANGE_MASK 0xc
+#define REG_0X0D_RX_DEBUG_TEST_EN BIT(6)
+#define REG_0X10_DEBUG_MODE_SETTING 0x3c0
+#define REG_0X10_DEBUG_MODE_SETTING_MASK 0x3f8
+#define REG_0X1F_RX_OFFSET_CODE_MASK 0x1e
+
+#define USB_U3_TX_LFPS_SWING_TRIM_SHIFT 4
+#define USB_U3_TX_LFPS_SWING_TRIM_MASK 0xf
+#define AMPLITUDE_CONTROL_COARSE_MASK 0xff
+#define AMPLITUDE_CONTROL_FINE_MASK 0xffff
+#define AMPLITUDE_CONTROL_COARSE_DEFAULT 0xff
+#define AMPLITUDE_CONTROL_FINE_DEFAULT 0xffff
+
+#define PHY_ADDR_MAP_ARRAY_INDEX(addr) (addr)
+#define ARRAY_INDEX_MAP_PHY_ADDR(index) (index)
+
+struct phy_reg {
+ void __iomem *reg_mdio_ctl;
+};
+
+struct phy_data {
+ u8 addr;
+ u16 data;
+};
+
+struct phy_cfg {
+ int param_size;
+ struct phy_data param[MAX_USB_PHY_DATA_SIZE];
+
+ bool check_efuse;
+ bool do_toggle;
+ bool do_toggle_once;
+ bool use_default_parameter;
+ bool check_rx_front_end_offset;
+};
+
+struct phy_parameter {
+ struct phy_reg phy_reg;
+
+ /* Get from efuse */
+ u8 efuse_usb_u3_tx_lfps_swing_trim;
+
+ /* Get from dts */
+ u32 amplitude_control_coarse;
+ u32 amplitude_control_fine;
+};
+
+struct rtk_phy {
+ struct device *dev;
+
+ struct phy_cfg *phy_cfg;
+ int num_phy;
+ struct phy_parameter *phy_parameter;
+
+ struct dentry *debug_dir;
+};
+
+#define PHY_IO_TIMEOUT_USEC (50000)
+#define PHY_IO_DELAY_US (100)
+
+static inline int utmi_wait_register(void __iomem *reg, u32 mask, u32 result)
+{
+ int ret;
+ unsigned int val;
+
+ ret = read_poll_timeout(readl, val, ((val & mask) == result),
+ PHY_IO_DELAY_US, PHY_IO_TIMEOUT_USEC, false, reg);
+ if (ret) {
+ pr_err("%s can't program USB phy\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int rtk_phy3_wait_vbusy(struct phy_reg *phy_reg)
+{
+ return utmi_wait_register(phy_reg->reg_mdio_ctl, USB_MDIO_CTRL_PHY_BUSY, 0);
+}
+
+static u16 rtk_phy_read(struct phy_reg *phy_reg, char addr)
+{
+ unsigned int tmp;
+ u32 value;
+
+ tmp = (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT);
+
+ writel(tmp, phy_reg->reg_mdio_ctl);
+
+ rtk_phy3_wait_vbusy(phy_reg);
+
+ value = readl(phy_reg->reg_mdio_ctl);
+ value = value >> USB_MDIO_CTRL_PHY_DATA_SHIFT;
+
+ return (u16)value;
+}
+
+static int rtk_phy_write(struct phy_reg *phy_reg, char addr, u16 data)
+{
+ unsigned int val;
+
+ val = USB_MDIO_CTRL_PHY_WRITE |
+ (addr << USB_MDIO_CTRL_PHY_ADDR_SHIFT) |
+ (data << USB_MDIO_CTRL_PHY_DATA_SHIFT);
+
+ writel(val, phy_reg->reg_mdio_ctl);
+
+ rtk_phy3_wait_vbusy(phy_reg);
+
+ return 0;
+}
+
+static void do_rtk_usb3_phy_toggle(struct rtk_phy *rtk_phy, int index, bool connect)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_parameter *phy_parameter;
+ struct phy_data *phy_data;
+ u8 addr;
+ u16 data;
+ int i;
+
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (!phy_cfg->do_toggle)
+ return;
+
+ i = PHY_ADDR_MAP_ARRAY_INDEX(PHY_ADDR_0X09);
+ phy_data = phy_cfg->param + i;
+ addr = phy_data->addr;
+ data = phy_data->data;
+
+ if (!addr && !data) {
+ addr = PHY_ADDR_0X09;
+ data = rtk_phy_read(phy_reg, addr);
+ phy_data->addr = addr;
+ phy_data->data = data;
+ }
+
+ rtk_phy_write(phy_reg, addr, data & (~REG_0X09_FORCE_CALIBRATION));
+ mdelay(1);
+ rtk_phy_write(phy_reg, addr, data | REG_0X09_FORCE_CALIBRATION);
+}
+
+static int do_rtk_phy_init(struct rtk_phy *rtk_phy, int index)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+ struct phy_parameter *phy_parameter;
+ int i = 0;
+
+ phy_cfg = rtk_phy->phy_cfg;
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ if (phy_cfg->use_default_parameter)
+ goto do_toggle;
+
+ for (i = 0; i < phy_cfg->param_size; i++) {
+ struct phy_data *phy_data = phy_cfg->param + i;
+ u8 addr = phy_data->addr;
+ u16 data = phy_data->data;
+
+ if (!addr && !data)
+ continue;
+
+ rtk_phy_write(phy_reg, addr, data);
+ }
+
+do_toggle:
+ if (phy_cfg->do_toggle_once)
+ phy_cfg->do_toggle = true;
+
+ do_rtk_usb3_phy_toggle(rtk_phy, index, false);
+
+ if (phy_cfg->do_toggle_once) {
+ u16 check_value = 0;
+ int count = 10;
+ u16 value_0x0d, value_0x10;
+
+ /* Enable Debug mode by set 0x0D and 0x10 */
+ value_0x0d = rtk_phy_read(phy_reg, PHY_ADDR_0X0D);
+ value_0x10 = rtk_phy_read(phy_reg, PHY_ADDR_0X10);
+
+ rtk_phy_write(phy_reg, PHY_ADDR_0X0D,
+ value_0x0d | REG_0X0D_RX_DEBUG_TEST_EN);
+ rtk_phy_write(phy_reg, PHY_ADDR_0X10,
+ (value_0x10 & ~REG_0X10_DEBUG_MODE_SETTING_MASK) |
+ REG_0X10_DEBUG_MODE_SETTING);
+
+ check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
+
+ while (!(check_value & BIT(15))) {
+ check_value = rtk_phy_read(phy_reg, PHY_ADDR_0X30);
+ mdelay(1);
+ if (count-- < 0)
+ break;
+ }
+
+ if (!(check_value & BIT(15)))
+ dev_info(rtk_phy->dev, "toggle fail addr=0x%02x, data=0x%04x\n",
+ PHY_ADDR_0X30, check_value);
+
+ /* Disable Debug mode by set 0x0D and 0x10 to default*/
+ rtk_phy_write(phy_reg, PHY_ADDR_0X0D, value_0x0d);
+ rtk_phy_write(phy_reg, PHY_ADDR_0X10, value_0x10);
+
+ phy_cfg->do_toggle = false;
+ }
+
+ if (phy_cfg->check_rx_front_end_offset) {
+ u16 rx_offset_code, rx_offset_range;
+ u16 code_mask = REG_0X1F_RX_OFFSET_CODE_MASK;
+ u16 range_mask = REG_0X0B_RX_OFFSET_RANGE_MASK;
+ bool do_update = false;
+
+ rx_offset_code = rtk_phy_read(phy_reg, PHY_ADDR_0X1F);
+ if (((rx_offset_code & code_mask) == 0x0) ||
+ ((rx_offset_code & code_mask) == code_mask))
+ do_update = true;
+
+ rx_offset_range = rtk_phy_read(phy_reg, PHY_ADDR_0X0B);
+ if (((rx_offset_range & range_mask) == range_mask) && do_update) {
+ dev_warn(rtk_phy->dev, "Don't update rx_offset_range (rx_offset_code=0x%x, rx_offset_range=0x%x)\n",
+ rx_offset_code, rx_offset_range);
+ do_update = false;
+ }
+
+ if (do_update) {
+ u16 tmp1, tmp2;
+
+ tmp1 = rx_offset_range & (~range_mask);
+ tmp2 = rx_offset_range & range_mask;
+ tmp2 += (1 << 2);
+ rx_offset_range = tmp1 | (tmp2 & range_mask);
+ rtk_phy_write(phy_reg, PHY_ADDR_0X0B, rx_offset_range);
+ goto do_toggle;
+ }
+ }
+
+ return 0;
+}
+
+static int rtk_phy_init(struct phy *phy)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+ int ret = 0;
+ int i;
+ unsigned long phy_init_time = jiffies;
+
+ for (i = 0; i < rtk_phy->num_phy; i++)
+ ret = do_rtk_phy_init(rtk_phy, i);
+
+ dev_dbg(rtk_phy->dev, "Initialized RTK USB 3.0 PHY (take %dms)\n",
+ jiffies_to_msecs(jiffies - phy_init_time));
+
+ return ret;
+}
+
+static int rtk_phy_exit(struct phy *phy)
+{
+ return 0;
+}
+
+static void rtk_phy_toggle(struct rtk_phy *rtk_phy, bool connect, int port)
+{
+ int index = port;
+
+ if (index > rtk_phy->num_phy) {
+ dev_err(rtk_phy->dev, "%s: The port=%d is not in usb phy (num_phy=%d)\n",
+ __func__, index, rtk_phy->num_phy);
+ return;
+ }
+
+ do_rtk_usb3_phy_toggle(rtk_phy, index, connect);
+}
+
+static int rtk_phy_connect(struct phy *phy, int port)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+
+ dev_dbg(rtk_phy->dev, "%s port=%d\n", __func__, port);
+ rtk_phy_toggle(rtk_phy, true, port);
+
+ return 0;
+}
+
+static int rtk_phy_disconnect(struct phy *phy, int port)
+{
+ struct rtk_phy *rtk_phy = phy_get_drvdata(phy);
+
+ dev_dbg(rtk_phy->dev, "%s port=%d\n", __func__, port);
+ rtk_phy_toggle(rtk_phy, false, port);
+
+ return 0;
+}
+
+static const struct phy_ops ops = {
+ .init = rtk_phy_init,
+ .exit = rtk_phy_exit,
+ .connect = rtk_phy_connect,
+ .disconnect = rtk_phy_disconnect,
+ .owner = THIS_MODULE,
+};
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *create_phy_debug_root(void)
+{
+ struct dentry *phy_debug_root;
+
+ phy_debug_root = debugfs_lookup("phy", usb_debug_root);
+ if (!phy_debug_root)
+ phy_debug_root = debugfs_create_dir("phy", usb_debug_root);
+
+ return phy_debug_root;
+}
+
+static int rtk_usb3_parameter_show(struct seq_file *s, void *unused)
+{
+ struct rtk_phy *rtk_phy = s->private;
+ struct phy_cfg *phy_cfg;
+ int i, index;
+
+ phy_cfg = rtk_phy->phy_cfg;
+
+ seq_puts(s, "Property:\n");
+ seq_printf(s, " check_efuse: %s\n",
+ phy_cfg->check_efuse ? "Enable" : "Disable");
+ seq_printf(s, " do_toggle: %s\n",
+ phy_cfg->do_toggle ? "Enable" : "Disable");
+ seq_printf(s, " do_toggle_once: %s\n",
+ phy_cfg->do_toggle_once ? "Enable" : "Disable");
+ seq_printf(s, " use_default_parameter: %s\n",
+ phy_cfg->use_default_parameter ? "Enable" : "Disable");
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ struct phy_reg *phy_reg;
+ struct phy_parameter *phy_parameter;
+
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+ phy_reg = &phy_parameter->phy_reg;
+
+ seq_printf(s, "PHY %d:\n", index);
+
+ for (i = 0; i < phy_cfg->param_size; i++) {
+ struct phy_data *phy_data = phy_cfg->param + i;
+ u8 addr = ARRAY_INDEX_MAP_PHY_ADDR(i);
+ u16 data = phy_data->data;
+
+ if (!phy_data->addr && !data)
+ seq_printf(s, " addr = 0x%02x, data = none ==> read value = 0x%04x\n",
+ addr, rtk_phy_read(phy_reg, addr));
+ else
+ seq_printf(s, " addr = 0x%02x, data = 0x%04x ==> read value = 0x%04x\n",
+ addr, data, rtk_phy_read(phy_reg, addr));
+ }
+
+ seq_puts(s, "PHY Property:\n");
+ seq_printf(s, " efuse_usb_u3_tx_lfps_swing_trim: 0x%x\n",
+ (int)phy_parameter->efuse_usb_u3_tx_lfps_swing_trim);
+ seq_printf(s, " amplitude_control_coarse: 0x%x\n",
+ (int)phy_parameter->amplitude_control_coarse);
+ seq_printf(s, " amplitude_control_fine: 0x%x\n",
+ (int)phy_parameter->amplitude_control_fine);
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(rtk_usb3_parameter);
+
+static inline void create_debug_files(struct rtk_phy *rtk_phy)
+{
+ struct dentry *phy_debug_root = NULL;
+
+ phy_debug_root = create_phy_debug_root();
+
+ if (!phy_debug_root)
+ return;
+
+ rtk_phy->debug_dir = debugfs_create_dir(dev_name(rtk_phy->dev), phy_debug_root);
+
+ debugfs_create_file("parameter", 0444, rtk_phy->debug_dir, rtk_phy,
+ &rtk_usb3_parameter_fops);
+}
+
+static inline void remove_debug_files(struct rtk_phy *rtk_phy)
+{
+ debugfs_remove_recursive(rtk_phy->debug_dir);
+}
+#else
+static inline void create_debug_files(struct rtk_phy *rtk_phy) { }
+static inline void remove_debug_files(struct rtk_phy *rtk_phy) { }
+#endif /* CONFIG_DEBUG_FS */
+
+static int get_phy_data_by_efuse(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter, int index)
+{
+ struct phy_cfg *phy_cfg = rtk_phy->phy_cfg;
+ u8 value = 0;
+ struct nvmem_cell *cell;
+
+ if (!phy_cfg->check_efuse)
+ goto out;
+
+ cell = nvmem_cell_get(rtk_phy->dev, "usb_u3_tx_lfps_swing_trim");
+ if (IS_ERR(cell)) {
+ dev_dbg(rtk_phy->dev, "%s no usb_u3_tx_lfps_swing_trim: %ld\n",
+ __func__, PTR_ERR(cell));
+ } else {
+ unsigned char *buf;
+ size_t buf_size;
+
+ buf = nvmem_cell_read(cell, &buf_size);
+ if (!IS_ERR(buf)) {
+ value = buf[0] & USB_U3_TX_LFPS_SWING_TRIM_MASK;
+ kfree(buf);
+ }
+ nvmem_cell_put(cell);
+ }
+
+ if (value > 0 && value < 0x8)
+ phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = 0x8;
+ else
+ phy_parameter->efuse_usb_u3_tx_lfps_swing_trim = (u8)value;
+
+out:
+ return 0;
+}
+
+static void update_amplitude_control_value(struct rtk_phy *rtk_phy,
+ struct phy_parameter *phy_parameter)
+{
+ struct phy_cfg *phy_cfg;
+ struct phy_reg *phy_reg;
+
+ phy_reg = &phy_parameter->phy_reg;
+ phy_cfg = rtk_phy->phy_cfg;
+
+ if (phy_parameter->amplitude_control_coarse != AMPLITUDE_CONTROL_COARSE_DEFAULT) {
+ u16 val_mask = AMPLITUDE_CONTROL_COARSE_MASK;
+ u16 data;
+
+ if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
+ phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
+ data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
+ } else {
+ data = phy_cfg->param[PHY_ADDR_0X20].data;
+ }
+
+ data &= (~val_mask);
+ data |= (phy_parameter->amplitude_control_coarse & val_mask);
+
+ phy_cfg->param[PHY_ADDR_0X20].data = data;
+ }
+
+ if (phy_parameter->efuse_usb_u3_tx_lfps_swing_trim) {
+ u8 efuse_val = phy_parameter->efuse_usb_u3_tx_lfps_swing_trim;
+ u16 val_mask = USB_U3_TX_LFPS_SWING_TRIM_MASK;
+ int val_shift = USB_U3_TX_LFPS_SWING_TRIM_SHIFT;
+ u16 data;
+
+ if (!phy_cfg->param[PHY_ADDR_0X20].addr && !phy_cfg->param[PHY_ADDR_0X20].data) {
+ phy_cfg->param[PHY_ADDR_0X20].addr = PHY_ADDR_0X20;
+ data = rtk_phy_read(phy_reg, PHY_ADDR_0X20);
+ } else {
+ data = phy_cfg->param[PHY_ADDR_0X20].data;
+ }
+
+ data &= ~(val_mask << val_shift);
+ data |= ((efuse_val & val_mask) << val_shift);
+
+ phy_cfg->param[PHY_ADDR_0X20].data = data;
+ }
+
+ if (phy_parameter->amplitude_control_fine != AMPLITUDE_CONTROL_FINE_DEFAULT) {
+ u16 val_mask = AMPLITUDE_CONTROL_FINE_MASK;
+
+ if (!phy_cfg->param[PHY_ADDR_0X21].addr && !phy_cfg->param[PHY_ADDR_0X21].data)
+ phy_cfg->param[PHY_ADDR_0X21].addr = PHY_ADDR_0X21;
+
+ phy_cfg->param[PHY_ADDR_0X21].data =
+ phy_parameter->amplitude_control_fine & val_mask;
+ }
+}
+
+static int parse_phy_data(struct rtk_phy *rtk_phy)
+{
+ struct device *dev = rtk_phy->dev;
+ struct phy_parameter *phy_parameter;
+ int ret = 0;
+ int index;
+
+ rtk_phy->phy_parameter = devm_kzalloc(dev, sizeof(struct phy_parameter) *
+ rtk_phy->num_phy, GFP_KERNEL);
+ if (!rtk_phy->phy_parameter)
+ return -ENOMEM;
+
+ for (index = 0; index < rtk_phy->num_phy; index++) {
+ phy_parameter = &((struct phy_parameter *)rtk_phy->phy_parameter)[index];
+
+ phy_parameter->phy_reg.reg_mdio_ctl = of_iomap(dev->of_node, 0) + index;
+
+ /* Amplitude control address 0x20 bit 0 to bit 7 */
+ if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-coarse-tuning",
+ &phy_parameter->amplitude_control_coarse))
+ phy_parameter->amplitude_control_coarse = AMPLITUDE_CONTROL_COARSE_DEFAULT;
+
+ /* Amplitude control address 0x21 bit 0 to bit 16 */
+ if (of_property_read_u32(dev->of_node, "realtek,amplitude-control-fine-tuning",
+ &phy_parameter->amplitude_control_fine))
+ phy_parameter->amplitude_control_fine = AMPLITUDE_CONTROL_FINE_DEFAULT;
+
+ get_phy_data_by_efuse(rtk_phy, phy_parameter, index);
+
+ update_amplitude_control_value(rtk_phy, phy_parameter);
+ }
+
+ return ret;
+}
+
+static int rtk_usb3phy_probe(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy;
+ struct device *dev = &pdev->dev;
+ struct phy *generic_phy;
+ struct phy_provider *phy_provider;
+ const struct phy_cfg *phy_cfg;
+ int ret;
+
+ phy_cfg = of_device_get_match_data(dev);
+ if (!phy_cfg) {
+ dev_err(dev, "phy config are not assigned!\n");
+ return -EINVAL;
+ }
+
+ rtk_phy = devm_kzalloc(dev, sizeof(*rtk_phy), GFP_KERNEL);
+ if (!rtk_phy)
+ return -ENOMEM;
+
+ rtk_phy->dev = &pdev->dev;
+ rtk_phy->phy_cfg = devm_kzalloc(dev, sizeof(*phy_cfg), GFP_KERNEL);
+
+ memcpy(rtk_phy->phy_cfg, phy_cfg, sizeof(*phy_cfg));
+
+ rtk_phy->num_phy = 1;
+
+ ret = parse_phy_data(rtk_phy);
+ if (ret)
+ goto err;
+
+ platform_set_drvdata(pdev, rtk_phy);
+
+ generic_phy = devm_phy_create(rtk_phy->dev, NULL, &ops);
+ if (IS_ERR(generic_phy))
+ return PTR_ERR(generic_phy);
+
+ phy_set_drvdata(generic_phy, rtk_phy);
+
+ phy_provider = devm_of_phy_provider_register(rtk_phy->dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
+ create_debug_files(rtk_phy);
+
+err:
+ return ret;
+}
+
+static void rtk_usb3phy_remove(struct platform_device *pdev)
+{
+ struct rtk_phy *rtk_phy = platform_get_drvdata(pdev);
+
+ remove_debug_files(rtk_phy);
+}
+
+static const struct phy_cfg rtd1295_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [0] = {0x01, 0x4008}, [1] = {0x01, 0xe046},
+ [2] = {0x02, 0x6046}, [3] = {0x03, 0x2779},
+ [4] = {0x04, 0x72f5}, [5] = {0x05, 0x2ad3},
+ [6] = {0x06, 0x000e}, [7] = {0x07, 0x2e00},
+ [8] = {0x08, 0x3591}, [9] = {0x09, 0x525c},
+ [10] = {0x0a, 0xa600}, [11] = {0x0b, 0xa904},
+ [12] = {0x0c, 0xc000}, [13] = {0x0d, 0xef1c},
+ [14] = {0x0e, 0x2000}, [15] = {0x0f, 0x0000},
+ [16] = {0x10, 0x000c}, [17] = {0x11, 0x4c00},
+ [18] = {0x12, 0xfc00}, [19] = {0x13, 0x0c81},
+ [20] = {0x14, 0xde01}, [21] = {0x15, 0x0000},
+ [22] = {0x16, 0x0000}, [23] = {0x17, 0x0000},
+ [24] = {0x18, 0x0000}, [25] = {0x19, 0x4004},
+ [26] = {0x1a, 0x1260}, [27] = {0x1b, 0xff00},
+ [28] = {0x1c, 0xcb00}, [29] = {0x1d, 0xa03f},
+ [30] = {0x1e, 0xc2e0}, [31] = {0x1f, 0x2807},
+ [32] = {0x20, 0x947a}, [33] = {0x21, 0x88aa},
+ [34] = {0x22, 0x0057}, [35] = {0x23, 0xab66},
+ [36] = {0x24, 0x0800}, [37] = {0x25, 0x0000},
+ [38] = {0x26, 0x040a}, [39] = {0x27, 0x01d6},
+ [40] = {0x28, 0xf8c2}, [41] = {0x29, 0x3080},
+ [42] = {0x2a, 0x3082}, [43] = {0x2b, 0x2078},
+ [44] = {0x2c, 0xffff}, [45] = {0x2d, 0xffff},
+ [46] = {0x2e, 0x0000}, [47] = {0x2f, 0x0040}, },
+ .check_efuse = false,
+ .do_toggle = true,
+ .do_toggle_once = false,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1619_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [8] = {0x08, 0x3591},
+ [38] = {0x26, 0x840b},
+ [40] = {0x28, 0xf842}, },
+ .check_efuse = false,
+ .do_toggle = true,
+ .do_toggle_once = false,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1319_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [1] = {0x01, 0xac86},
+ [6] = {0x06, 0x0003},
+ [9] = {0x09, 0x924c},
+ [10] = {0x0a, 0xa608},
+ [11] = {0x0b, 0xb905},
+ [14] = {0x0e, 0x2010},
+ [32] = {0x20, 0x705a},
+ [33] = {0x21, 0xf645},
+ [34] = {0x22, 0x0013},
+ [35] = {0x23, 0xcb66},
+ [41] = {0x29, 0xff00}, },
+ .check_efuse = true,
+ .do_toggle = true,
+ .do_toggle_once = false,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1619b_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [1] = {0x01, 0xac8c},
+ [6] = {0x06, 0x0017},
+ [9] = {0x09, 0x724c},
+ [10] = {0x0a, 0xb610},
+ [11] = {0x0b, 0xb90d},
+ [13] = {0x0d, 0xef2a},
+ [15] = {0x0f, 0x9050},
+ [16] = {0x10, 0x000c},
+ [32] = {0x20, 0x70ff},
+ [34] = {0x22, 0x0013},
+ [35] = {0x23, 0xdb66},
+ [38] = {0x26, 0x8609},
+ [41] = {0x29, 0xff13},
+ [42] = {0x2a, 0x3070}, },
+ .check_efuse = true,
+ .do_toggle = false,
+ .do_toggle_once = true,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = false,
+};
+
+static const struct phy_cfg rtd1319d_phy_cfg = {
+ .param_size = MAX_USB_PHY_DATA_SIZE,
+ .param = { [1] = {0x01, 0xac89},
+ [4] = {0x04, 0xf2f5},
+ [6] = {0x06, 0x0017},
+ [9] = {0x09, 0x424c},
+ [10] = {0x0a, 0x9610},
+ [11] = {0x0b, 0x9901},
+ [12] = {0x0c, 0xf000},
+ [13] = {0x0d, 0xef2a},
+ [14] = {0x0e, 0x1000},
+ [15] = {0x0f, 0x9050},
+ [32] = {0x20, 0x7077},
+ [35] = {0x23, 0x0b62},
+ [37] = {0x25, 0x10ec},
+ [42] = {0x2a, 0x3070}, },
+ .check_efuse = true,
+ .do_toggle = false,
+ .do_toggle_once = true,
+ .use_default_parameter = false,
+ .check_rx_front_end_offset = true,
+};
+
+static const struct of_device_id usbphy_rtk_dt_match[] = {
+ { .compatible = "realtek,rtd1295-usb3phy", .data = &rtd1295_phy_cfg },
+ { .compatible = "realtek,rtd1319-usb3phy", .data = &rtd1319_phy_cfg },
+ { .compatible = "realtek,rtd1319d-usb3phy", .data = &rtd1319d_phy_cfg },
+ { .compatible = "realtek,rtd1619-usb3phy", .data = &rtd1619_phy_cfg },
+ { .compatible = "realtek,rtd1619b-usb3phy", .data = &rtd1619b_phy_cfg },
+ {},
+};
+MODULE_DEVICE_TABLE(of, usbphy_rtk_dt_match);
+
+static struct platform_driver rtk_usb3phy_driver = {
+ .probe = rtk_usb3phy_probe,
+ .remove_new = rtk_usb3phy_remove,
+ .driver = {
+ .name = "rtk-usb3phy",
+ .of_match_table = usbphy_rtk_dt_match,
+ },
+};
+
+module_platform_driver(rtk_usb3phy_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stanley Chang <stanley_chang@realtek.com>");
+MODULE_DESCRIPTION("Realtek usb 3.0 phy driver");
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c
index 0dc86a7740e3..cfdb54b6070a 100644
--- a/drivers/phy/tegra/xusb.c
+++ b/drivers/phy/tegra/xusb.c
@@ -1531,6 +1531,19 @@ int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
}
EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_usb3_companion);
+int tegra_xusb_padctl_get_port_number(struct phy *phy)
+{
+ struct tegra_xusb_lane *lane;
+
+ if (!phy)
+ return -ENODEV;
+
+ lane = phy_get_drvdata(phy);
+
+ return lane->index;
+}
+EXPORT_SYMBOL_GPL(tegra_xusb_padctl_get_port_number);
+
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
MODULE_DESCRIPTION("Tegra XUSB Pad Controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 2b2f14a1b711..4d305876ec08 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -24,6 +24,23 @@
#define DP_PORT_VDO (DP_CONF_SET_PIN_ASSIGN(BIT(DP_PIN_ASSIGN_C) | BIT(DP_PIN_ASSIGN_D)) | \
DP_CAP_DFP_D | DP_CAP_RECEPTACLE)
+static void cros_typec_role_switch_quirk(struct fwnode_handle *fwnode)
+{
+#ifdef CONFIG_ACPI
+ struct fwnode_handle *switch_fwnode;
+
+ /* Supply the USB role switch with the correct pld_crc if it's missing. */
+ switch_fwnode = fwnode_find_reference(fwnode, "usb-role-switch", 0);
+ if (!IS_ERR_OR_NULL(switch_fwnode)) {
+ struct acpi_device *adev = to_acpi_device_node(switch_fwnode);
+
+ if (adev && !adev->pld_crc)
+ adev->pld_crc = to_acpi_device_node(fwnode)->pld_crc;
+ fwnode_handle_put(switch_fwnode);
+ }
+#endif
+}
+
static int cros_typec_parse_port_props(struct typec_capability *cap,
struct fwnode_handle *fwnode,
struct device *dev)
@@ -66,6 +83,8 @@ static int cros_typec_parse_port_props(struct typec_capability *cap,
cap->prefer_role = ret;
}
+ cros_typec_role_switch_quirk(fwnode);
+
cap->fwnode = fwnode;
return 0;
diff --git a/drivers/platform/chrome/cros_ec_uart.c b/drivers/platform/chrome/cros_ec_uart.c
index 68d80559fddc..8ea867c2a01a 100644
--- a/drivers/platform/chrome/cros_ec_uart.c
+++ b/drivers/platform/chrome/cros_ec_uart.c
@@ -81,8 +81,8 @@ struct cros_ec_uart {
struct response_info response;
};
-static ssize_t cros_ec_uart_rx_bytes(struct serdev_device *serdev,
- const u8 *data, size_t count)
+static size_t cros_ec_uart_rx_bytes(struct serdev_device *serdev,
+ const u8 *data, size_t count)
{
struct ec_host_response *host_response;
struct cros_ec_device *ec_dev = serdev_device_get_drvdata(serdev);
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index f3d09b1631e3..03ca5bf19f98 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -2,6 +2,7 @@
menuconfig GOLDFISH
bool "Platform support for Goldfish virtual devices"
depends on HAS_IOMEM && HAS_DMA
+ default X86_GOLDFISH
help
Say Y here to get to see options for the Goldfish virtual platform.
This option alone does not add any kernel code.
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index 9591a28bc38a..ba550eaa06fc 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -227,8 +227,8 @@ EXPORT_SYMBOL_GPL(ssam_client_bind);
/* -- Glue layer (serdev_device -> ssam_controller). ------------------------ */
-static ssize_t ssam_receive_buf(struct serdev_device *dev, const u8 *buf,
- size_t n)
+static size_t ssam_receive_buf(struct serdev_device *dev, const u8 *buf,
+ size_t n)
{
struct ssam_controller *ctrl;
int ret;
diff --git a/drivers/powercap/intel_rapl_msr.c b/drivers/powercap/intel_rapl_msr.c
index b4b6930cacb0..35cb152fa9aa 100644
--- a/drivers/powercap/intel_rapl_msr.c
+++ b/drivers/powercap/intel_rapl_msr.c
@@ -197,11 +197,10 @@ out:
return ret;
}
-static int rapl_msr_remove(struct platform_device *pdev)
+static void rapl_msr_remove(struct platform_device *pdev)
{
cpuhp_remove_state(rapl_msr_priv->pcap_rapl_online);
powercap_unregister_control_type(rapl_msr_priv->control_type);
- return 0;
}
static const struct platform_device_id rapl_msr_ids[] = {
@@ -212,7 +211,7 @@ MODULE_DEVICE_TABLE(platform, rapl_msr_ids);
static struct platform_driver intel_rapl_msr_driver = {
.probe = rapl_msr_probe,
- .remove = rapl_msr_remove,
+ .remove_new = rapl_msr_remove,
.id_table = rapl_msr_ids,
.driver = {
.name = "intel_rapl_msr",
diff --git a/drivers/pps/generators/Makefile b/drivers/pps/generators/Makefile
index 2d56dd0495d5..2589fd0f2481 100644
--- a/drivers/pps/generators/Makefile
+++ b/drivers/pps/generators/Makefile
@@ -5,6 +5,4 @@
obj-$(CONFIG_PPS_GENERATOR_PARPORT) += pps_gen_parport.o
-ifeq ($(CONFIG_PPS_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index d019ca6dee9b..dabac9772741 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2274,6 +2274,17 @@ struct regulator *_regulator_get(struct device *dev, const char *id,
if (ret > 0) {
rdev->use_count = 1;
regulator->enable_count = 1;
+
+ /* Propagate the regulator state to its supply */
+ if (rdev->supply) {
+ ret = regulator_enable(rdev->supply);
+ if (ret < 0) {
+ destroy_regulator(regulator);
+ module_put(rdev->owner);
+ put_device(&rdev->dev);
+ return ERR_PTR(ret);
+ }
+ }
} else {
rdev->use_count = 0;
regulator->enable_count = 0;
diff --git a/drivers/remoteproc/imx_dsp_rproc.c b/drivers/remoteproc/imx_dsp_rproc.c
index d73727a5828a..087506e21508 100644
--- a/drivers/remoteproc/imx_dsp_rproc.c
+++ b/drivers/remoteproc/imx_dsp_rproc.c
@@ -1040,8 +1040,8 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
return ret;
}
- rproc = rproc_alloc(dev, "imx-dsp-rproc", &imx_dsp_rproc_ops, fw_name,
- sizeof(*priv));
+ rproc = devm_rproc_alloc(dev, "imx-dsp-rproc", &imx_dsp_rproc_ops,
+ fw_name, sizeof(*priv));
if (!rproc)
return -ENOMEM;
@@ -1061,14 +1061,14 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
ret = imx_dsp_rproc_detect_mode(priv);
if (ret) {
dev_err(dev, "failed on imx_dsp_rproc_detect_mode\n");
- goto err_put_rproc;
+ return ret;
}
/* There are multiple power domains required by DSP on some platform */
ret = imx_dsp_attach_pm_domains(priv);
if (ret) {
dev_err(dev, "failed on imx_dsp_attach_pm_domains\n");
- goto err_put_rproc;
+ return ret;
}
/* Get clocks */
ret = imx_dsp_rproc_clk_get(priv);
@@ -1091,8 +1091,6 @@ static int imx_dsp_rproc_probe(struct platform_device *pdev)
err_detach_domains:
dev_pm_domain_detach_list(priv->pd_list);
-err_put_rproc:
- rproc_free(rproc);
return ret;
}
@@ -1105,7 +1103,6 @@ static void imx_dsp_rproc_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
rproc_del(rproc);
dev_pm_domain_detach_list(priv->pd_list);
- rproc_free(rproc);
}
/* pm runtime functions */
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c
index 3161f14442bc..5a3fb902acc9 100644
--- a/drivers/remoteproc/imx_rproc.c
+++ b/drivers/remoteproc/imx_rproc.c
@@ -1049,16 +1049,14 @@ static int imx_rproc_probe(struct platform_device *pdev)
int ret;
/* set some other name then imx */
- rproc = rproc_alloc(dev, "imx-rproc", &imx_rproc_ops,
- NULL, sizeof(*priv));
+ rproc = devm_rproc_alloc(dev, "imx-rproc", &imx_rproc_ops,
+ NULL, sizeof(*priv));
if (!rproc)
return -ENOMEM;
dcfg = of_device_get_match_data(dev);
- if (!dcfg) {
- ret = -EINVAL;
- goto err_put_rproc;
- }
+ if (!dcfg)
+ return -EINVAL;
priv = rproc->priv;
priv->rproc = rproc;
@@ -1069,8 +1067,7 @@ static int imx_rproc_probe(struct platform_device *pdev)
priv->workqueue = create_workqueue(dev_name(dev));
if (!priv->workqueue) {
dev_err(dev, "cannot create workqueue\n");
- ret = -ENOMEM;
- goto err_put_rproc;
+ return -ENOMEM;
}
ret = imx_rproc_xtr_mbox_init(rproc);
@@ -1112,8 +1109,6 @@ err_put_mbox:
imx_rproc_free_mbox(rproc);
err_put_wkq:
destroy_workqueue(priv->workqueue);
-err_put_rproc:
- rproc_free(rproc);
return ret;
}
@@ -1128,7 +1123,6 @@ static void imx_rproc_remove(struct platform_device *pdev)
imx_rproc_put_scu(rproc);
imx_rproc_free_mbox(rproc);
destroy_workqueue(priv->workqueue);
- rproc_free(rproc);
}
static const struct of_device_id imx_rproc_of_match[] = {
diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c
index 93f9a1537ec6..1d24c9b656a8 100644
--- a/drivers/remoteproc/qcom_q6v5_adsp.c
+++ b/drivers/remoteproc/qcom_q6v5_adsp.c
@@ -674,8 +674,8 @@ static int adsp_probe(struct platform_device *pdev)
return ret;
}
- rproc = rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
- firmware_name, sizeof(*adsp));
+ rproc = devm_rproc_alloc(&pdev->dev, pdev->name, &adsp_ops,
+ firmware_name, sizeof(*adsp));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
@@ -700,16 +700,16 @@ static int adsp_probe(struct platform_device *pdev)
ret = adsp_alloc_memory_region(adsp);
if (ret)
- goto free_rproc;
+ return ret;
ret = adsp_init_clock(adsp, desc->clk_ids);
if (ret)
- goto free_rproc;
+ return ret;
ret = qcom_rproc_pds_attach(adsp, desc->pd_names, desc->num_pds);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to attach proxy power domains\n");
- goto free_rproc;
+ return ret;
}
ret = adsp_init_reset(adsp);
@@ -744,9 +744,6 @@ static int adsp_probe(struct platform_device *pdev)
disable_pm:
qcom_rproc_pds_detach(adsp);
-free_rproc:
- rproc_free(rproc);
-
return ret;
}
@@ -761,7 +758,6 @@ static void adsp_remove(struct platform_device *pdev)
qcom_remove_sysmon_subdev(adsp->sysmon);
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
qcom_rproc_pds_detach(adsp);
- rproc_free(adsp->rproc);
}
static const struct adsp_pil_data adsp_resource_init = {
diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c
index 394b2c1cb5e2..1779fc890e10 100644
--- a/drivers/remoteproc/qcom_q6v5_mss.c
+++ b/drivers/remoteproc/qcom_q6v5_mss.c
@@ -1990,8 +1990,8 @@ static int q6v5_probe(struct platform_device *pdev)
return ret;
}
- rproc = rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
- mba_image, sizeof(*qproc));
+ rproc = devm_rproc_alloc(&pdev->dev, pdev->name, &q6v5_ops,
+ mba_image, sizeof(*qproc));
if (!rproc) {
dev_err(&pdev->dev, "failed to allocate rproc\n");
return -ENOMEM;
@@ -2008,7 +2008,7 @@ static int q6v5_probe(struct platform_device *pdev)
1, &qproc->hexagon_mdt_image);
if (ret < 0 && ret != -EINVAL) {
dev_err(&pdev->dev, "unable to read mpss firmware-name\n");
- goto free_rproc;
+ return ret;
}
platform_set_drvdata(pdev, qproc);
@@ -2019,17 +2019,17 @@ static int q6v5_probe(struct platform_device *pdev)
qproc->has_spare_reg = desc->has_spare_reg;
ret = q6v5_init_mem(qproc, pdev);
if (ret)
- goto free_rproc;
+ return ret;
ret = q6v5_alloc_memory_region(qproc);
if (ret)
- goto free_rproc;
+ return ret;
ret = q6v5_init_clocks(&pdev->dev, qproc->proxy_clks,
desc->proxy_clk_names);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get proxy clocks.\n");
- goto free_rproc;
+ return ret;
}
qproc->proxy_clk_count = ret;
@@ -2037,7 +2037,7 @@ static int q6v5_probe(struct platform_device *pdev)
desc->reset_clk_names);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get reset clocks.\n");
- goto free_rproc;
+ return ret;
}
qproc->reset_clk_count = ret;
@@ -2045,7 +2045,7 @@ static int q6v5_probe(struct platform_device *pdev)
desc->active_clk_names);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get active clocks.\n");
- goto free_rproc;
+ return ret;
}
qproc->active_clk_count = ret;
@@ -2053,7 +2053,7 @@ static int q6v5_probe(struct platform_device *pdev)
desc->proxy_supply);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get proxy regulators.\n");
- goto free_rproc;
+ return ret;
}
qproc->proxy_reg_count = ret;
@@ -2061,7 +2061,7 @@ static int q6v5_probe(struct platform_device *pdev)
desc->active_supply);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get active regulators.\n");
- goto free_rproc;
+ return ret;
}
qproc->active_reg_count = ret;
@@ -2074,12 +2074,12 @@ static int q6v5_probe(struct platform_device *pdev)
desc->fallback_proxy_supply);
if (ret < 0) {
dev_err(&pdev->dev, "Failed to get fallback proxy regulators.\n");
- goto free_rproc;
+ return ret;
}
qproc->fallback_proxy_reg_count = ret;
} else if (ret < 0) {
dev_err(&pdev->dev, "Failed to init power domains\n");
- goto free_rproc;
+ return ret;
} else {
qproc->proxy_pd_count = ret;
}
@@ -2127,8 +2127,6 @@ remove_subdevs:
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
detach_proxy_pds:
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
-free_rproc:
- rproc_free(rproc);
return ret;
}
@@ -2149,8 +2147,6 @@ static void q6v5_remove(struct platform_device *pdev)
qcom_remove_glink_subdev(rproc, &qproc->glink_subdev);
q6v5_pds_detach(qproc, qproc->proxy_pds, qproc->proxy_pd_count);
-
- rproc_free(rproc);
}
static const struct rproc_hexagon_res sc7180_mss = {
diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c
index a9dd58608052..54d8005d40a3 100644
--- a/drivers/remoteproc/qcom_q6v5_pas.c
+++ b/drivers/remoteproc/qcom_q6v5_pas.c
@@ -33,12 +33,15 @@
#define ADSP_DECRYPT_SHUTDOWN_DELAY_MS 100
+#define MAX_ASSIGN_COUNT 3
+
struct adsp_data {
int crash_reason_smem;
const char *firmware_name;
const char *dtb_firmware_name;
int pas_id;
int dtb_pas_id;
+ int lite_pas_id;
unsigned int minidump_id;
bool auto_boot;
bool decrypt_shutdown;
@@ -51,6 +54,9 @@ struct adsp_data {
int ssctl_id;
int region_assign_idx;
+ int region_assign_count;
+ bool region_assign_shared;
+ int region_assign_vmid;
};
struct qcom_adsp {
@@ -72,6 +78,7 @@ struct qcom_adsp {
const char *dtb_firmware_name;
int pas_id;
int dtb_pas_id;
+ int lite_pas_id;
unsigned int minidump_id;
int crash_reason_smem;
bool decrypt_shutdown;
@@ -87,15 +94,18 @@ struct qcom_adsp {
phys_addr_t dtb_mem_phys;
phys_addr_t mem_reloc;
phys_addr_t dtb_mem_reloc;
- phys_addr_t region_assign_phys;
+ phys_addr_t region_assign_phys[MAX_ASSIGN_COUNT];
void *mem_region;
void *dtb_mem_region;
size_t mem_size;
size_t dtb_mem_size;
- size_t region_assign_size;
+ size_t region_assign_size[MAX_ASSIGN_COUNT];
int region_assign_idx;
- u64 region_assign_perms;
+ int region_assign_count;
+ bool region_assign_shared;
+ int region_assign_vmid;
+ u64 region_assign_owners[MAX_ASSIGN_COUNT];
struct qcom_rproc_glink glink_subdev;
struct qcom_rproc_subdev smd_subdev;
@@ -210,6 +220,9 @@ static int adsp_load(struct rproc *rproc, const struct firmware *fw)
/* Store firmware handle to be used in adsp_start() */
adsp->firmware = fw;
+ if (adsp->lite_pas_id)
+ ret = qcom_scm_pas_shutdown(adsp->lite_pas_id);
+
if (adsp->dtb_pas_id) {
ret = request_firmware(&adsp->dtb_firmware, adsp->dtb_firmware_name, adsp->dev);
if (ret) {
@@ -590,37 +603,53 @@ static int adsp_alloc_memory_region(struct qcom_adsp *adsp)
static int adsp_assign_memory_region(struct qcom_adsp *adsp)
{
- struct reserved_mem *rmem = NULL;
- struct qcom_scm_vmperm perm;
+ struct qcom_scm_vmperm perm[MAX_ASSIGN_COUNT];
struct device_node *node;
+ unsigned int perm_size;
+ int offset;
int ret;
if (!adsp->region_assign_idx)
return 0;
- node = of_parse_phandle(adsp->dev->of_node, "memory-region", adsp->region_assign_idx);
- if (node)
- rmem = of_reserved_mem_lookup(node);
- of_node_put(node);
- if (!rmem) {
- dev_err(adsp->dev, "unable to resolve shareable memory-region\n");
- return -EINVAL;
- }
+ for (offset = 0; offset < adsp->region_assign_count; ++offset) {
+ struct reserved_mem *rmem = NULL;
+
+ node = of_parse_phandle(adsp->dev->of_node, "memory-region",
+ adsp->region_assign_idx + offset);
+ if (node)
+ rmem = of_reserved_mem_lookup(node);
+ of_node_put(node);
+ if (!rmem) {
+ dev_err(adsp->dev, "unable to resolve shareable memory-region index %d\n",
+ offset);
+ return -EINVAL;
+ }
- perm.vmid = QCOM_SCM_VMID_MSS_MSA;
- perm.perm = QCOM_SCM_PERM_RW;
+ if (adsp->region_assign_shared) {
+ perm[0].vmid = QCOM_SCM_VMID_HLOS;
+ perm[0].perm = QCOM_SCM_PERM_RW;
+ perm[1].vmid = adsp->region_assign_vmid;
+ perm[1].perm = QCOM_SCM_PERM_RW;
+ perm_size = 2;
+ } else {
+ perm[0].vmid = adsp->region_assign_vmid;
+ perm[0].perm = QCOM_SCM_PERM_RW;
+ perm_size = 1;
+ }
- adsp->region_assign_phys = rmem->base;
- adsp->region_assign_size = rmem->size;
- adsp->region_assign_perms = BIT(QCOM_SCM_VMID_HLOS);
+ adsp->region_assign_phys[offset] = rmem->base;
+ adsp->region_assign_size[offset] = rmem->size;
+ adsp->region_assign_owners[offset] = BIT(QCOM_SCM_VMID_HLOS);
- ret = qcom_scm_assign_mem(adsp->region_assign_phys,
- adsp->region_assign_size,
- &adsp->region_assign_perms,
- &perm, 1);
- if (ret < 0) {
- dev_err(adsp->dev, "assign memory failed\n");
- return ret;
+ ret = qcom_scm_assign_mem(adsp->region_assign_phys[offset],
+ adsp->region_assign_size[offset],
+ &adsp->region_assign_owners[offset],
+ perm, perm_size);
+ if (ret < 0) {
+ dev_err(adsp->dev, "assign memory %d failed\n", offset);
+ return ret;
+ }
}
return 0;
@@ -629,20 +658,23 @@ static int adsp_assign_memory_region(struct qcom_adsp *adsp)
static void adsp_unassign_memory_region(struct qcom_adsp *adsp)
{
struct qcom_scm_vmperm perm;
+ int offset;
int ret;
- if (!adsp->region_assign_idx)
+ if (!adsp->region_assign_idx || adsp->region_assign_shared)
return;
- perm.vmid = QCOM_SCM_VMID_HLOS;
- perm.perm = QCOM_SCM_PERM_RW;
+ for (offset = 0; offset < adsp->region_assign_count; ++offset) {
+ perm.vmid = QCOM_SCM_VMID_HLOS;
+ perm.perm = QCOM_SCM_PERM_RW;
- ret = qcom_scm_assign_mem(adsp->region_assign_phys,
- adsp->region_assign_size,
- &adsp->region_assign_perms,
- &perm, 1);
- if (ret < 0)
- dev_err(adsp->dev, "unassign memory failed\n");
+ ret = qcom_scm_assign_mem(adsp->region_assign_phys[offset],
+ adsp->region_assign_size[offset],
+ &adsp->region_assign_owners[offset],
+ &perm, 1);
+ if (ret < 0)
+ dev_err(adsp->dev, "unassign memory %d failed\n", offset);
+ }
}
static int adsp_probe(struct platform_device *pdev)
@@ -678,7 +710,7 @@ static int adsp_probe(struct platform_device *pdev)
if (desc->minidump_id)
ops = &adsp_minidump_ops;
- rproc = rproc_alloc(&pdev->dev, pdev->name, ops, fw_name, sizeof(*adsp));
+ rproc = devm_rproc_alloc(&pdev->dev, pdev->name, ops, fw_name, sizeof(*adsp));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
@@ -693,9 +725,13 @@ static int adsp_probe(struct platform_device *pdev)
adsp->rproc = rproc;
adsp->minidump_id = desc->minidump_id;
adsp->pas_id = desc->pas_id;
+ adsp->lite_pas_id = desc->lite_pas_id;
adsp->info_name = desc->sysmon_name;
adsp->decrypt_shutdown = desc->decrypt_shutdown;
adsp->region_assign_idx = desc->region_assign_idx;
+ adsp->region_assign_count = min_t(int, MAX_ASSIGN_COUNT, desc->region_assign_count);
+ adsp->region_assign_vmid = desc->region_assign_vmid;
+ adsp->region_assign_shared = desc->region_assign_shared;
if (dtb_fw_name) {
adsp->dtb_firmware_name = dtb_fw_name;
adsp->dtb_pas_id = desc->dtb_pas_id;
@@ -754,7 +790,6 @@ detach_proxy_pds:
adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
free_rproc:
device_init_wakeup(adsp->dev, false);
- rproc_free(rproc);
return ret;
}
@@ -773,28 +808,27 @@ static void adsp_remove(struct platform_device *pdev)
qcom_remove_ssr_subdev(adsp->rproc, &adsp->ssr_subdev);
adsp_pds_detach(adsp, adsp->proxy_pds, adsp->proxy_pd_count);
device_init_wakeup(adsp->dev, false);
- rproc_free(adsp->rproc);
}
static const struct adsp_data adsp_resource_init = {
- .crash_reason_smem = 423,
- .firmware_name = "adsp.mdt",
- .pas_id = 1,
- .auto_boot = true,
- .ssr_name = "lpass",
- .sysmon_name = "adsp",
- .ssctl_id = 0x14,
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .auto_boot = true,
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
};
static const struct adsp_data sdm845_adsp_resource_init = {
- .crash_reason_smem = 423,
- .firmware_name = "adsp.mdt",
- .pas_id = 1,
- .auto_boot = true,
- .load_state = "adsp",
- .ssr_name = "lpass",
- .sysmon_name = "adsp",
- .ssctl_id = 0x14,
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .auto_boot = true,
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
};
static const struct adsp_data sm6350_adsp_resource = {
@@ -829,18 +863,18 @@ static const struct adsp_data sm6375_mpss_resource = {
};
static const struct adsp_data sm8150_adsp_resource = {
- .crash_reason_smem = 423,
- .firmware_name = "adsp.mdt",
- .pas_id = 1,
- .auto_boot = true,
- .proxy_pd_names = (char*[]){
- "cx",
- NULL
- },
- .load_state = "adsp",
- .ssr_name = "lpass",
- .sysmon_name = "adsp",
- .ssctl_id = 0x14,
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
};
static const struct adsp_data sm8250_adsp_resource = {
@@ -876,17 +910,17 @@ static const struct adsp_data sm8350_adsp_resource = {
};
static const struct adsp_data msm8996_adsp_resource = {
- .crash_reason_smem = 423,
- .firmware_name = "adsp.mdt",
- .pas_id = 1,
- .auto_boot = true,
- .proxy_pd_names = (char*[]){
- "cx",
- NULL
- },
- .ssr_name = "lpass",
- .sysmon_name = "adsp",
- .ssctl_id = 0x14,
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .pas_id = 1,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ NULL
+ },
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
};
static const struct adsp_data cdsp_resource_init = {
@@ -984,6 +1018,46 @@ static const struct adsp_data sc8280xp_nsp1_resource = {
.ssctl_id = 0x20,
};
+static const struct adsp_data x1e80100_adsp_resource = {
+ .crash_reason_smem = 423,
+ .firmware_name = "adsp.mdt",
+ .dtb_firmware_name = "adsp_dtb.mdt",
+ .pas_id = 1,
+ .dtb_pas_id = 0x24,
+ .lite_pas_id = 0x1f,
+ .minidump_id = 5,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "adsp",
+ .ssr_name = "lpass",
+ .sysmon_name = "adsp",
+ .ssctl_id = 0x14,
+};
+
+static const struct adsp_data x1e80100_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .dtb_firmware_name = "cdsp_dtb.mdt",
+ .pas_id = 18,
+ .dtb_pas_id = 0x25,
+ .minidump_id = 7,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ "nsp",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+};
+
static const struct adsp_data sm8350_cdsp_resource = {
.crash_reason_smem = 601,
.firmware_name = "cdsp.mdt",
@@ -1033,33 +1107,33 @@ static const struct adsp_data sc8180x_mpss_resource = {
};
static const struct adsp_data msm8996_slpi_resource_init = {
- .crash_reason_smem = 424,
- .firmware_name = "slpi.mdt",
- .pas_id = 12,
- .auto_boot = true,
- .proxy_pd_names = (char*[]){
- "ssc_cx",
- NULL
- },
- .ssr_name = "dsps",
- .sysmon_name = "slpi",
- .ssctl_id = 0x16,
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "ssc_cx",
+ NULL
+ },
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
};
static const struct adsp_data sdm845_slpi_resource_init = {
- .crash_reason_smem = 424,
- .firmware_name = "slpi.mdt",
- .pas_id = 12,
- .auto_boot = true,
- .proxy_pd_names = (char*[]){
- "lcx",
- "lmx",
- NULL
- },
- .load_state = "slpi",
- .ssr_name = "dsps",
- .sysmon_name = "slpi",
- .ssctl_id = 0x16,
+ .crash_reason_smem = 424,
+ .firmware_name = "slpi.mdt",
+ .pas_id = 12,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "lcx",
+ "lmx",
+ NULL
+ },
+ .load_state = "slpi",
+ .ssr_name = "dsps",
+ .sysmon_name = "slpi",
+ .ssctl_id = 0x16,
};
static const struct adsp_data wcss_resource_init = {
@@ -1163,6 +1237,8 @@ static const struct adsp_data sm8550_mpss_resource = {
.sysmon_name = "modem",
.ssctl_id = 0x12,
.region_assign_idx = 2,
+ .region_assign_count = 1,
+ .region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
};
static const struct adsp_data sc7280_wpss_resource = {
@@ -1181,6 +1257,53 @@ static const struct adsp_data sc7280_wpss_resource = {
.ssctl_id = 0x19,
};
+static const struct adsp_data sm8650_cdsp_resource = {
+ .crash_reason_smem = 601,
+ .firmware_name = "cdsp.mdt",
+ .dtb_firmware_name = "cdsp_dtb.mdt",
+ .pas_id = 18,
+ .dtb_pas_id = 0x25,
+ .minidump_id = 7,
+ .auto_boot = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mxc",
+ "nsp",
+ NULL
+ },
+ .load_state = "cdsp",
+ .ssr_name = "cdsp",
+ .sysmon_name = "cdsp",
+ .ssctl_id = 0x17,
+ .region_assign_idx = 2,
+ .region_assign_count = 1,
+ .region_assign_shared = true,
+ .region_assign_vmid = QCOM_SCM_VMID_CDSP,
+};
+
+static const struct adsp_data sm8650_mpss_resource = {
+ .crash_reason_smem = 421,
+ .firmware_name = "modem.mdt",
+ .dtb_firmware_name = "modem_dtb.mdt",
+ .pas_id = 4,
+ .dtb_pas_id = 0x26,
+ .minidump_id = 3,
+ .auto_boot = false,
+ .decrypt_shutdown = true,
+ .proxy_pd_names = (char*[]){
+ "cx",
+ "mss",
+ NULL
+ },
+ .load_state = "modem",
+ .ssr_name = "mpss",
+ .sysmon_name = "modem",
+ .ssctl_id = 0x12,
+ .region_assign_idx = 2,
+ .region_assign_count = 3,
+ .region_assign_vmid = QCOM_SCM_VMID_MSS_MSA,
+};
+
static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,msm8226-adsp-pil", .data = &adsp_resource_init},
{ .compatible = "qcom,msm8953-adsp-pil", .data = &msm8996_adsp_resource},
@@ -1236,6 +1359,11 @@ static const struct of_device_id adsp_of_match[] = {
{ .compatible = "qcom,sm8550-adsp-pas", .data = &sm8550_adsp_resource},
{ .compatible = "qcom,sm8550-cdsp-pas", .data = &sm8550_cdsp_resource},
{ .compatible = "qcom,sm8550-mpss-pas", .data = &sm8550_mpss_resource},
+ { .compatible = "qcom,sm8650-adsp-pas", .data = &sm8550_adsp_resource},
+ { .compatible = "qcom,sm8650-cdsp-pas", .data = &sm8650_cdsp_resource},
+ { .compatible = "qcom,sm8650-mpss-pas", .data = &sm8650_mpss_resource},
+ { .compatible = "qcom,x1e80100-adsp-pas", .data = &x1e80100_adsp_resource},
+ { .compatible = "qcom,x1e80100-cdsp-pas", .data = &x1e80100_cdsp_resource},
{ },
};
MODULE_DEVICE_TABLE(of, adsp_of_match);
diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c
index cff1fa07d1de..94f68c919ee6 100644
--- a/drivers/remoteproc/qcom_q6v5_wcss.c
+++ b/drivers/remoteproc/qcom_q6v5_wcss.c
@@ -1011,8 +1011,8 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
if (!desc)
return -EINVAL;
- rproc = rproc_alloc(&pdev->dev, pdev->name, desc->ops,
- desc->firmware_name, sizeof(*wcss));
+ rproc = devm_rproc_alloc(&pdev->dev, pdev->name, desc->ops,
+ desc->firmware_name, sizeof(*wcss));
if (!rproc) {
dev_err(&pdev->dev, "failed to allocate rproc\n");
return -ENOMEM;
@@ -1027,29 +1027,29 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
ret = q6v5_wcss_init_mmio(wcss, pdev);
if (ret)
- goto free_rproc;
+ return ret;
ret = q6v5_alloc_memory_region(wcss);
if (ret)
- goto free_rproc;
+ return ret;
if (wcss->version == WCSS_QCS404) {
ret = q6v5_wcss_init_clock(wcss);
if (ret)
- goto free_rproc;
+ return ret;
ret = q6v5_wcss_init_regulator(wcss);
if (ret)
- goto free_rproc;
+ return ret;
}
ret = q6v5_wcss_init_reset(wcss, desc);
if (ret)
- goto free_rproc;
+ return ret;
ret = qcom_q6v5_init(&wcss->q6v5, pdev, rproc, desc->crash_reason_smem, NULL, NULL);
if (ret)
- goto free_rproc;
+ return ret;
qcom_add_glink_subdev(rproc, &wcss->glink_subdev, "q6wcss");
qcom_add_ssr_subdev(rproc, &wcss->ssr_subdev, "q6wcss");
@@ -1061,16 +1061,11 @@ static int q6v5_wcss_probe(struct platform_device *pdev)
ret = rproc_add(rproc);
if (ret)
- goto free_rproc;
+ return ret;
platform_set_drvdata(pdev, rproc);
return 0;
-
-free_rproc:
- rproc_free(rproc);
-
- return ret;
}
static void q6v5_wcss_remove(struct platform_device *pdev)
@@ -1080,7 +1075,6 @@ static void q6v5_wcss_remove(struct platform_device *pdev)
qcom_q6v5_deinit(&wcss->q6v5);
rproc_del(rproc);
- rproc_free(rproc);
}
static const struct wcss_data wcss_ipq8074_res_init = {
diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c
index 90de22c81da9..a7bb9da27029 100644
--- a/drivers/remoteproc/qcom_wcnss.c
+++ b/drivers/remoteproc/qcom_wcnss.c
@@ -555,8 +555,8 @@ static int wcnss_probe(struct platform_device *pdev)
if (ret < 0 && ret != -EINVAL)
return ret;
- rproc = rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
- fw_name, sizeof(*wcnss));
+ rproc = devm_rproc_alloc(&pdev->dev, pdev->name, &wcnss_ops,
+ fw_name, sizeof(*wcnss));
if (!rproc) {
dev_err(&pdev->dev, "unable to allocate remoteproc\n");
return -ENOMEM;
@@ -574,14 +574,12 @@ static int wcnss_probe(struct platform_device *pdev)
mutex_init(&wcnss->iris_lock);
mmio = devm_platform_ioremap_resource_byname(pdev, "pmu");
- if (IS_ERR(mmio)) {
- ret = PTR_ERR(mmio);
- goto free_rproc;
- }
+ if (IS_ERR(mmio))
+ return PTR_ERR(mmio);
ret = wcnss_alloc_memory_region(wcnss);
if (ret)
- goto free_rproc;
+ return ret;
wcnss->pmu_cfg = mmio + data->pmu_offset;
wcnss->spare_out = mmio + data->spare_offset;
@@ -592,7 +590,7 @@ static int wcnss_probe(struct platform_device *pdev)
*/
ret = wcnss_init_pds(wcnss, data->pd_names);
if (ret && (ret != -ENODATA || !data->num_pd_vregs))
- goto free_rproc;
+ return ret;
ret = wcnss_init_regulators(wcnss, data->vregs, data->num_vregs,
data->num_pd_vregs);
@@ -656,8 +654,6 @@ remove_iris:
qcom_iris_remove(wcnss->iris);
detach_pds:
wcnss_release_pds(wcnss);
-free_rproc:
- rproc_free(rproc);
return ret;
}
@@ -673,7 +669,6 @@ static void wcnss_remove(struct platform_device *pdev)
qcom_remove_sysmon_subdev(wcnss->sysmon);
qcom_remove_smd_subdev(wcnss->rproc, &wcnss->smd_subdev);
wcnss_release_pds(wcnss);
- rproc_free(wcnss->rproc);
}
static const struct of_device_id wcnss_of_match[] = {
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 695cce218e8c..f276956f2c5c 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -33,6 +33,7 @@
#include <linux/idr.h>
#include <linux/elf.h>
#include <linux/crc32.h>
+#include <linux/of_platform.h>
#include <linux/of_reserved_mem.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_ring.h>
@@ -2112,6 +2113,7 @@ EXPORT_SYMBOL(rproc_detach);
struct rproc *rproc_get_by_phandle(phandle phandle)
{
struct rproc *rproc = NULL, *r;
+ struct device_driver *driver;
struct device_node *np;
np = of_find_node_by_phandle(phandle);
@@ -2122,7 +2124,26 @@ struct rproc *rproc_get_by_phandle(phandle phandle)
list_for_each_entry_rcu(r, &rproc_list, node) {
if (r->dev.parent && device_match_of_node(r->dev.parent, np)) {
/* prevent underlying implementation from being removed */
- if (!try_module_get(r->dev.parent->driver->owner)) {
+
+ /*
+ * If the remoteproc's parent has a driver, the
+ * remoteproc is not part of a cluster and we can use
+ * that driver.
+ */
+ driver = r->dev.parent->driver;
+
+ /*
+ * If the remoteproc's parent does not have a driver,
+ * look for the driver associated with the cluster.
+ */
+ if (!driver) {
+ if (r->dev.parent->parent)
+ driver = r->dev.parent->parent->driver;
+ if (!driver)
+ break;
+ }
+
+ if (!try_module_get(driver->owner)) {
dev_err(&r->dev, "can't get owner\n");
break;
}
@@ -2533,7 +2554,11 @@ EXPORT_SYMBOL(rproc_free);
*/
void rproc_put(struct rproc *rproc)
{
- module_put(rproc->dev.parent->driver->owner);
+ if (rproc->dev.parent->driver)
+ module_put(rproc->dev.parent->driver->owner);
+ else
+ module_put(rproc->dev.parent->parent->driver->owner);
+
put_device(&rproc->dev);
}
EXPORT_SYMBOL(rproc_put);
diff --git a/drivers/remoteproc/remoteproc_virtio.c b/drivers/remoteproc/remoteproc_virtio.c
index 83d76915a6ad..25b66b113b69 100644
--- a/drivers/remoteproc/remoteproc_virtio.c
+++ b/drivers/remoteproc/remoteproc_virtio.c
@@ -351,6 +351,9 @@ static void rproc_virtio_dev_release(struct device *dev)
kfree(vdev);
+ of_reserved_mem_device_release(&rvdev->pdev->dev);
+ dma_release_coherent_memory(&rvdev->pdev->dev);
+
put_device(&rvdev->pdev->dev);
}
@@ -584,9 +587,6 @@ static void rproc_virtio_remove(struct platform_device *pdev)
rproc_remove_subdev(rproc, &rvdev->subdev);
rproc_remove_rvdev(rvdev);
- of_reserved_mem_device_release(&pdev->dev);
- dma_release_coherent_memory(&pdev->dev);
-
put_device(&rproc->dev);
}
diff --git a/drivers/remoteproc/st_remoteproc.c b/drivers/remoteproc/st_remoteproc.c
index cb163766c56d..1340be9d0110 100644
--- a/drivers/remoteproc/st_remoteproc.c
+++ b/drivers/remoteproc/st_remoteproc.c
@@ -347,23 +347,21 @@ static int st_rproc_probe(struct platform_device *pdev)
int enabled;
int ret, i;
- rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
+ rproc = devm_rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
if (!rproc)
return -ENOMEM;
rproc->has_iommu = false;
ddata = rproc->priv;
ddata->config = (struct st_rproc_config *)device_get_match_data(dev);
- if (!ddata->config) {
- ret = -ENODEV;
- goto free_rproc;
- }
+ if (!ddata->config)
+ return -ENODEV;
platform_set_drvdata(pdev, rproc);
ret = st_rproc_parse_dt(pdev);
if (ret)
- goto free_rproc;
+ return ret;
enabled = st_rproc_state(pdev);
if (enabled < 0) {
@@ -439,8 +437,7 @@ free_mbox:
mbox_free_channel(ddata->mbox_chan[i]);
free_clk:
clk_unprepare(ddata->clk);
-free_rproc:
- rproc_free(rproc);
+
return ret;
}
@@ -456,8 +453,6 @@ static void st_rproc_remove(struct platform_device *pdev)
for (i = 0; i < ST_RPROC_MAX_VRING * MBOX_MAX; i++)
mbox_free_channel(ddata->mbox_chan[i]);
-
- rproc_free(rproc);
}
static struct platform_driver st_rproc_driver = {
diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c
index 4f469f0bcf8b..88623df7d0c3 100644
--- a/drivers/remoteproc/stm32_rproc.c
+++ b/drivers/remoteproc/stm32_rproc.c
@@ -120,7 +120,7 @@ static int stm32_rproc_mem_alloc(struct rproc *rproc,
void *va;
dev_dbg(dev, "map memory: %pad+%zx\n", &mem->dma, mem->len);
- va = ioremap_wc(mem->dma, mem->len);
+ va = (__force void *)ioremap_wc(mem->dma, mem->len);
if (IS_ERR_OR_NULL(va)) {
dev_err(dev, "Unable to map memory region: %pad+0x%zx\n",
&mem->dma, mem->len);
@@ -137,7 +137,7 @@ static int stm32_rproc_mem_release(struct rproc *rproc,
struct rproc_mem_entry *mem)
{
dev_dbg(rproc->dev.parent, "unmap memory: %pa\n", &mem->dma);
- iounmap(mem->va);
+ iounmap((__force __iomem void *)mem->va);
return 0;
}
@@ -657,7 +657,7 @@ done:
* entire area by overwriting it with the initial values stored in rproc->clean_table.
*/
*table_sz = RSC_TBL_SIZE;
- return (struct resource_table *)ddata->rsc_va;
+ return (__force struct resource_table *)ddata->rsc_va;
}
static const struct rproc_ops st_rproc_ops = {
@@ -843,7 +843,7 @@ static int stm32_rproc_probe(struct platform_device *pdev)
if (ret)
return ret;
- rproc = rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
+ rproc = devm_rproc_alloc(dev, np->name, &st_rproc_ops, NULL, sizeof(*ddata));
if (!rproc)
return -ENOMEM;
@@ -897,7 +897,6 @@ free_rproc:
dev_pm_clear_wake_irq(dev);
device_init_wakeup(dev, false);
}
- rproc_free(rproc);
return ret;
}
@@ -918,7 +917,6 @@ static void stm32_rproc_remove(struct platform_device *pdev)
dev_pm_clear_wake_irq(dev);
device_init_wakeup(dev, false);
}
- rproc_free(rproc);
}
static int stm32_rproc_suspend(struct device *dev)
diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
index ab882e3b7130..3555b535b168 100644
--- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c
+++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c
@@ -550,6 +550,13 @@ static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev,
return 0;
}
+static void k3_dsp_mem_release(void *data)
+{
+ struct device *dev = data;
+
+ of_reserved_mem_device_release(dev);
+}
+
static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
{
struct device *dev = kproc->dev;
@@ -579,27 +586,25 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
ERR_PTR(ret));
return ret;
}
+ ret = devm_add_action_or_reset(dev, k3_dsp_mem_release, dev);
+ if (ret)
+ return ret;
num_rmems--;
- kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
- if (!kproc->rmem) {
- ret = -ENOMEM;
- goto release_rmem;
- }
+ kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
+ if (!kproc->rmem)
+ return -ENOMEM;
/* use remaining reserved memory regions for static carveouts */
for (i = 0; i < num_rmems; i++) {
rmem_np = of_parse_phandle(np, "memory-region", i + 1);
- if (!rmem_np) {
- ret = -EINVAL;
- goto unmap_rmem;
- }
+ if (!rmem_np)
+ return -EINVAL;
rmem = of_reserved_mem_lookup(rmem_np);
if (!rmem) {
of_node_put(rmem_np);
- ret = -EINVAL;
- goto unmap_rmem;
+ return -EINVAL;
}
of_node_put(rmem_np);
@@ -607,12 +612,11 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
/* 64-bit address regions currently not supported */
kproc->rmem[i].dev_addr = (u32)rmem->base;
kproc->rmem[i].size = rmem->size;
- kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
+ kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size);
if (!kproc->rmem[i].cpu_addr) {
dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
i + 1, &rmem->base, &rmem->size);
- ret = -ENOMEM;
- goto unmap_rmem;
+ return -ENOMEM;
}
dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
@@ -623,25 +627,13 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc)
kproc->num_rmems = num_rmems;
return 0;
-
-unmap_rmem:
- for (i--; i >= 0; i--)
- iounmap(kproc->rmem[i].cpu_addr);
- kfree(kproc->rmem);
-release_rmem:
- of_reserved_mem_device_release(kproc->dev);
- return ret;
}
-static void k3_dsp_reserved_mem_exit(struct k3_dsp_rproc *kproc)
+static void k3_dsp_release_tsp(void *data)
{
- int i;
+ struct ti_sci_proc *tsp = data;
- for (i = 0; i < kproc->num_rmems; i++)
- iounmap(kproc->rmem[i].cpu_addr);
- kfree(kproc->rmem);
-
- of_reserved_mem_device_release(kproc->dev);
+ ti_sci_proc_release(tsp);
}
static
@@ -657,7 +649,7 @@ struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev,
if (ret < 0)
return ERR_PTR(ret);
- tsp = kzalloc(sizeof(*tsp), GFP_KERNEL);
+ tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
if (!tsp)
return ERR_PTR(-ENOMEM);
@@ -680,7 +672,6 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
const char *fw_name;
bool p_state = false;
int ret = 0;
- int ret1;
data = of_device_get_match_data(dev);
if (!data)
@@ -690,8 +681,8 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
if (ret)
return dev_err_probe(dev, ret, "failed to parse firmware-name property\n");
- rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name,
- sizeof(*kproc));
+ rproc = devm_rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops,
+ fw_name, sizeof(*kproc));
if (!rproc)
return -ENOMEM;
@@ -706,56 +697,46 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
kproc->dev = dev;
kproc->data = data;
- kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci");
- if (IS_ERR(kproc->ti_sci)) {
- ret = dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
- "failed to get ti-sci handle\n");
- kproc->ti_sci = NULL;
- goto free_rproc;
- }
+ kproc->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
+ if (IS_ERR(kproc->ti_sci))
+ return dev_err_probe(dev, PTR_ERR(kproc->ti_sci),
+ "failed to get ti-sci handle\n");
ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id);
- if (ret) {
- dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n");
- goto put_sci;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "missing 'ti,sci-dev-id' property\n");
kproc->reset = devm_reset_control_get_exclusive(dev, NULL);
- if (IS_ERR(kproc->reset)) {
- ret = dev_err_probe(dev, PTR_ERR(kproc->reset),
- "failed to get reset\n");
- goto put_sci;
- }
+ if (IS_ERR(kproc->reset))
+ return dev_err_probe(dev, PTR_ERR(kproc->reset),
+ "failed to get reset\n");
kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci);
- if (IS_ERR(kproc->tsp)) {
- ret = dev_err_probe(dev, PTR_ERR(kproc->tsp),
- "failed to construct ti-sci proc control\n");
- goto put_sci;
- }
+ if (IS_ERR(kproc->tsp))
+ return dev_err_probe(dev, PTR_ERR(kproc->tsp),
+ "failed to construct ti-sci proc control\n");
ret = ti_sci_proc_request(kproc->tsp);
if (ret < 0) {
dev_err_probe(dev, ret, "ti_sci_proc_request failed\n");
- goto free_tsp;
+ return ret;
}
+ ret = devm_add_action_or_reset(dev, k3_dsp_release_tsp, kproc->tsp);
+ if (ret)
+ return ret;
ret = k3_dsp_rproc_of_get_memories(pdev, kproc);
if (ret)
- goto release_tsp;
+ return ret;
ret = k3_dsp_reserved_mem_init(kproc);
- if (ret) {
- dev_err_probe(dev, ret, "reserved memory init failed\n");
- goto release_tsp;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "reserved memory init failed\n");
ret = kproc->ti_sci->ops.dev_ops.is_on(kproc->ti_sci, kproc->ti_sci_id,
NULL, &p_state);
- if (ret) {
- dev_err_probe(dev, ret, "failed to get initial state, mode cannot be determined\n");
- goto release_mem;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get initial state, mode cannot be determined\n");
/* configure J721E devices for either remoteproc or IPC-only mode */
if (p_state) {
@@ -779,8 +760,7 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
if (data->uses_lreset) {
ret = reset_control_status(kproc->reset);
if (ret < 0) {
- dev_err_probe(dev, ret, "failed to get reset status\n");
- goto release_mem;
+ return dev_err_probe(dev, ret, "failed to get reset status\n");
} else if (ret == 0) {
dev_warn(dev, "local reset is deasserted for device\n");
k3_dsp_rproc_reset(kproc);
@@ -788,31 +768,13 @@ static int k3_dsp_rproc_probe(struct platform_device *pdev)
}
}
- ret = rproc_add(rproc);
- if (ret) {
- dev_err_probe(dev, ret, "failed to add register device with remoteproc core\n");
- goto release_mem;
- }
+ ret = devm_rproc_add(dev, rproc);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add register device with remoteproc core\n");
platform_set_drvdata(pdev, kproc);
return 0;
-
-release_mem:
- k3_dsp_reserved_mem_exit(kproc);
-release_tsp:
- ret1 = ti_sci_proc_release(kproc->tsp);
- if (ret1)
- dev_err(dev, "failed to release proc (%pe)\n", ERR_PTR(ret1));
-free_tsp:
- kfree(kproc->tsp);
-put_sci:
- ret1 = ti_sci_put_handle(kproc->ti_sci);
- if (ret1)
- dev_err(dev, "failed to put ti_sci handle (%pe)\n", ERR_PTR(ret1));
-free_rproc:
- rproc_free(rproc);
- return ret;
}
static void k3_dsp_rproc_remove(struct platform_device *pdev)
@@ -824,27 +786,9 @@ static void k3_dsp_rproc_remove(struct platform_device *pdev)
if (rproc->state == RPROC_ATTACHED) {
ret = rproc_detach(rproc);
- if (ret) {
- /* Note this error path leaks resources */
+ if (ret)
dev_err(dev, "failed to detach proc (%pe)\n", ERR_PTR(ret));
- return;
- }
}
-
- rproc_del(kproc->rproc);
-
- ret = ti_sci_proc_release(kproc->tsp);
- if (ret)
- dev_err(dev, "failed to release proc (%pe)\n", ERR_PTR(ret));
-
- kfree(kproc->tsp);
-
- ret = ti_sci_put_handle(kproc->ti_sci);
- if (ret)
- dev_err(dev, "failed to put ti_sci handle (%pe)\n", ERR_PTR(ret));
-
- k3_dsp_reserved_mem_exit(kproc);
- rproc_free(kproc->rproc);
}
static const struct k3_dsp_mem_data c66_mems[] = {
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index ccd59ddd7610..85b27c42cf65 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -66,6 +66,15 @@ config RESET_BRCMSTB_RESCAL
This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on
BCM7216.
+config RESET_GPIO
+ tristate "GPIO reset controller"
+ help
+ This enables a generic reset controller for resets attached via
+ GPIOs. Typically for OF platforms this driver expects "reset-gpios"
+ property.
+
+ If compiled as module, it will be called reset-gpio.
+
config RESET_HSDK
bool "Synopsys HSDK Reset Driver"
depends on HAS_IOMEM
@@ -213,7 +222,7 @@ config RESET_SCMI
config RESET_SIMPLE
bool "Simple Reset Controller Driver" if COMPILE_TEST || EXPERT
- default ARCH_ASPEED || ARCH_BCMBCA || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
+ default ARCH_ASPEED || ARCH_BCMBCA || ARCH_BITMAIN || ARCH_REALTEK || ARCH_SOPHGO || ARCH_STM32 || (ARCH_INTEL_SOCFPGA && ARM64) || ARCH_SUNXI || ARC
depends on HAS_IOMEM
help
This enables a simple reset controller driver for reset lines that
@@ -228,6 +237,7 @@ config RESET_SIMPLE
- RCC reset controller in STM32 MCUs
- Allwinner SoCs
- SiFive FU740 SoCs
+ - Sophgo SoCs
config RESET_SOCFPGA
bool "SoCFPGA Reset Driver" if COMPILE_TEST && (!ARM || !ARCH_INTEL_SOCFPGA)
diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile
index 8270da8a4baa..fd8b49fa46fc 100644
--- a/drivers/reset/Makefile
+++ b/drivers/reset/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_RESET_BCM6345) += reset-bcm6345.o
obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o
obj-$(CONFIG_RESET_BRCMSTB) += reset-brcmstb.o
obj-$(CONFIG_RESET_BRCMSTB_RESCAL) += reset-brcmstb-rescal.o
+obj-$(CONFIG_RESET_GPIO) += reset-gpio.o
obj-$(CONFIG_RESET_HSDK) += reset-hsdk.o
obj-$(CONFIG_RESET_IMX7) += reset-imx7.o
obj-$(CONFIG_RESET_INTEL_GW) += reset-intel-gw.o
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index 4d5a78d3c085..dba74e857be6 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -5,14 +5,19 @@
* Copyright 2013 Philipp Zabel, Pengutronix
*/
#include <linux/atomic.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/kref.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/idr.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/acpi.h>
+#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
@@ -23,6 +28,11 @@ static LIST_HEAD(reset_controller_list);
static DEFINE_MUTEX(reset_lookup_mutex);
static LIST_HEAD(reset_lookup_list);
+/* Protects reset_gpio_lookup_list */
+static DEFINE_MUTEX(reset_gpio_lookup_mutex);
+static LIST_HEAD(reset_gpio_lookup_list);
+static DEFINE_IDA(reset_gpio_ida);
+
/**
* struct reset_control - a reset control
* @rcdev: a pointer to the reset controller device
@@ -63,6 +73,16 @@ struct reset_control_array {
struct reset_control *rstc[] __counted_by(num_rstcs);
};
+/**
+ * struct reset_gpio_lookup - lookup key for ad-hoc created reset-gpio devices
+ * @of_args: phandle to the reset controller with all the args like GPIO number
+ * @list: list entry for the reset_gpio_lookup_list
+ */
+struct reset_gpio_lookup {
+ struct of_phandle_args of_args;
+ struct list_head list;
+};
+
static const char *rcdev_name(struct reset_controller_dev *rcdev)
{
if (rcdev->dev)
@@ -71,6 +91,9 @@ static const char *rcdev_name(struct reset_controller_dev *rcdev)
if (rcdev->of_node)
return rcdev->of_node->full_name;
+ if (rcdev->of_args)
+ return rcdev->of_args->np->full_name;
+
return NULL;
}
@@ -99,6 +122,9 @@ static int of_reset_simple_xlate(struct reset_controller_dev *rcdev,
*/
int reset_controller_register(struct reset_controller_dev *rcdev)
{
+ if (rcdev->of_node && rcdev->of_args)
+ return -EINVAL;
+
if (!rcdev->of_xlate) {
rcdev->of_reset_n_cells = 1;
rcdev->of_xlate = of_reset_simple_xlate;
@@ -813,12 +839,171 @@ static void __reset_control_put_internal(struct reset_control *rstc)
kref_put(&rstc->refcnt, __reset_control_release);
}
+static int __reset_add_reset_gpio_lookup(int id, struct device_node *np,
+ unsigned int gpio,
+ unsigned int of_flags)
+{
+ const struct fwnode_handle *fwnode = of_fwnode_handle(np);
+ unsigned int lookup_flags;
+ const char *label_tmp;
+
+ /*
+ * Later we map GPIO flags between OF and Linux, however not all
+ * constants from include/dt-bindings/gpio/gpio.h and
+ * include/linux/gpio/machine.h match each other.
+ */
+ if (of_flags > GPIO_ACTIVE_LOW) {
+ pr_err("reset-gpio code does not support GPIO flags %u for GPIO %u\n",
+ of_flags, gpio);
+ return -EINVAL;
+ }
+
+ struct gpio_device *gdev __free(gpio_device_put) = gpio_device_find_by_fwnode(fwnode);
+ if (!gdev)
+ return -EPROBE_DEFER;
+
+ label_tmp = gpio_device_get_label(gdev);
+ if (!label_tmp)
+ return -EINVAL;
+
+ char *label __free(kfree) = kstrdup(label_tmp, GFP_KERNEL);
+ if (!label)
+ return -ENOMEM;
+
+ /* Size: one lookup entry plus sentinel */
+ struct gpiod_lookup_table *lookup __free(kfree) = kzalloc(struct_size(lookup, table, 2),
+ GFP_KERNEL);
+ if (!lookup)
+ return -ENOMEM;
+
+ lookup->dev_id = kasprintf(GFP_KERNEL, "reset-gpio.%d", id);
+ if (!lookup->dev_id)
+ return -ENOMEM;
+
+ lookup_flags = GPIO_PERSISTENT;
+ lookup_flags |= of_flags & GPIO_ACTIVE_LOW;
+ lookup->table[0] = GPIO_LOOKUP(no_free_ptr(label), gpio, "reset",
+ lookup_flags);
+
+ /* Not freed on success, because it is persisent subsystem data. */
+ gpiod_add_lookup_table(no_free_ptr(lookup));
+
+ return 0;
+}
+
+/*
+ * @args: phandle to the GPIO provider with all the args like GPIO number
+ */
+static int __reset_add_reset_gpio_device(const struct of_phandle_args *args)
+{
+ struct reset_gpio_lookup *rgpio_dev;
+ struct platform_device *pdev;
+ int id, ret;
+
+ /*
+ * Currently only #gpio-cells=2 is supported with the meaning of:
+ * args[0]: GPIO number
+ * args[1]: GPIO flags
+ * TODO: Handle other cases.
+ */
+ if (args->args_count != 2)
+ return -ENOENT;
+
+ /*
+ * Registering reset-gpio device might cause immediate
+ * bind, resulting in its probe() registering new reset controller thus
+ * taking reset_list_mutex lock via reset_controller_register().
+ */
+ lockdep_assert_not_held(&reset_list_mutex);
+
+ mutex_lock(&reset_gpio_lookup_mutex);
+
+ list_for_each_entry(rgpio_dev, &reset_gpio_lookup_list, list) {
+ if (args->np == rgpio_dev->of_args.np) {
+ if (of_phandle_args_equal(args, &rgpio_dev->of_args))
+ goto out; /* Already on the list, done */
+ }
+ }
+
+ id = ida_alloc(&reset_gpio_ida, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto err_unlock;
+ }
+
+ /* Not freed on success, because it is persisent subsystem data. */
+ rgpio_dev = kzalloc(sizeof(*rgpio_dev), GFP_KERNEL);
+ if (!rgpio_dev) {
+ ret = -ENOMEM;
+ goto err_ida_free;
+ }
+
+ ret = __reset_add_reset_gpio_lookup(id, args->np, args->args[0],
+ args->args[1]);
+ if (ret < 0)
+ goto err_kfree;
+
+ rgpio_dev->of_args = *args;
+ /*
+ * We keep the device_node reference, but of_args.np is put at the end
+ * of __of_reset_control_get(), so get it one more time.
+ * Hold reference as long as rgpio_dev memory is valid.
+ */
+ of_node_get(rgpio_dev->of_args.np);
+ pdev = platform_device_register_data(NULL, "reset-gpio", id,
+ &rgpio_dev->of_args,
+ sizeof(rgpio_dev->of_args));
+ ret = PTR_ERR_OR_ZERO(pdev);
+ if (ret)
+ goto err_put;
+
+ list_add(&rgpio_dev->list, &reset_gpio_lookup_list);
+
+out:
+ mutex_unlock(&reset_gpio_lookup_mutex);
+
+ return 0;
+
+err_put:
+ of_node_put(rgpio_dev->of_args.np);
+err_kfree:
+ kfree(rgpio_dev);
+err_ida_free:
+ ida_free(&reset_gpio_ida, id);
+err_unlock:
+ mutex_unlock(&reset_gpio_lookup_mutex);
+
+ return ret;
+}
+
+static struct reset_controller_dev *__reset_find_rcdev(const struct of_phandle_args *args,
+ bool gpio_fallback)
+{
+ struct reset_controller_dev *rcdev;
+
+ lockdep_assert_held(&reset_list_mutex);
+
+ list_for_each_entry(rcdev, &reset_controller_list, list) {
+ if (gpio_fallback) {
+ if (rcdev->of_args && of_phandle_args_equal(args,
+ rcdev->of_args))
+ return rcdev;
+ } else {
+ if (args->np == rcdev->of_node)
+ return rcdev;
+ }
+ }
+
+ return NULL;
+}
+
struct reset_control *
__of_reset_control_get(struct device_node *node, const char *id, int index,
bool shared, bool optional, bool acquired)
{
+ bool gpio_fallback = false;
struct reset_control *rstc;
- struct reset_controller_dev *r, *rcdev;
+ struct reset_controller_dev *rcdev;
struct of_phandle_args args;
int rstc_id;
int ret;
@@ -839,39 +1024,52 @@ __of_reset_control_get(struct device_node *node, const char *id, int index,
index, &args);
if (ret == -EINVAL)
return ERR_PTR(ret);
- if (ret)
- return optional ? NULL : ERR_PTR(ret);
+ if (ret) {
+ if (!IS_ENABLED(CONFIG_RESET_GPIO))
+ return optional ? NULL : ERR_PTR(ret);
- mutex_lock(&reset_list_mutex);
- rcdev = NULL;
- list_for_each_entry(r, &reset_controller_list, list) {
- if (args.np == r->of_node) {
- rcdev = r;
- break;
+ /*
+ * There can be only one reset-gpio for regular devices, so
+ * don't bother with the "reset-gpios" phandle index.
+ */
+ ret = of_parse_phandle_with_args(node, "reset-gpios", "#gpio-cells",
+ 0, &args);
+ if (ret)
+ return optional ? NULL : ERR_PTR(ret);
+
+ gpio_fallback = true;
+
+ ret = __reset_add_reset_gpio_device(&args);
+ if (ret) {
+ rstc = ERR_PTR(ret);
+ goto out_put;
}
}
+ mutex_lock(&reset_list_mutex);
+ rcdev = __reset_find_rcdev(&args, gpio_fallback);
if (!rcdev) {
rstc = ERR_PTR(-EPROBE_DEFER);
- goto out;
+ goto out_unlock;
}
if (WARN_ON(args.args_count != rcdev->of_reset_n_cells)) {
rstc = ERR_PTR(-EINVAL);
- goto out;
+ goto out_unlock;
}
rstc_id = rcdev->of_xlate(rcdev, &args);
if (rstc_id < 0) {
rstc = ERR_PTR(rstc_id);
- goto out;
+ goto out_unlock;
}
/* reset_list_mutex also protects the rcdev's reset_control list */
rstc = __reset_control_get_internal(rcdev, rstc_id, shared, acquired);
-out:
+out_unlock:
mutex_unlock(&reset_list_mutex);
+out_put:
of_node_put(args.np);
return rstc;
diff --git a/drivers/reset/reset-gpio.c b/drivers/reset/reset-gpio.c
new file mode 100644
index 000000000000..2290b25b6703
--- /dev/null
+++ b/drivers/reset/reset-gpio.c
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+
+struct reset_gpio_priv {
+ struct reset_controller_dev rc;
+ struct gpio_desc *reset;
+};
+
+static inline struct reset_gpio_priv
+*rc_to_reset_gpio(struct reset_controller_dev *rc)
+{
+ return container_of(rc, struct reset_gpio_priv, rc);
+}
+
+static int reset_gpio_assert(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct reset_gpio_priv *priv = rc_to_reset_gpio(rc);
+
+ gpiod_set_value_cansleep(priv->reset, 1);
+
+ return 0;
+}
+
+static int reset_gpio_deassert(struct reset_controller_dev *rc,
+ unsigned long id)
+{
+ struct reset_gpio_priv *priv = rc_to_reset_gpio(rc);
+
+ gpiod_set_value_cansleep(priv->reset, 0);
+
+ return 0;
+}
+
+static int reset_gpio_status(struct reset_controller_dev *rc, unsigned long id)
+{
+ struct reset_gpio_priv *priv = rc_to_reset_gpio(rc);
+
+ return gpiod_get_value_cansleep(priv->reset);
+}
+
+static const struct reset_control_ops reset_gpio_ops = {
+ .assert = reset_gpio_assert,
+ .deassert = reset_gpio_deassert,
+ .status = reset_gpio_status,
+};
+
+static int reset_gpio_of_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ return reset_spec->args[0];
+}
+
+static void reset_gpio_of_node_put(void *data)
+{
+ of_node_put(data);
+}
+
+static int reset_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct of_phandle_args *platdata = dev_get_platdata(dev);
+ struct reset_gpio_priv *priv;
+ int ret;
+
+ if (!platdata)
+ return -EINVAL;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, &priv->rc);
+
+ priv->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset))
+ return dev_err_probe(dev, PTR_ERR(priv->reset),
+ "Could not get reset gpios\n");
+
+ priv->rc.ops = &reset_gpio_ops;
+ priv->rc.owner = THIS_MODULE;
+ priv->rc.dev = dev;
+ priv->rc.of_args = platdata;
+ ret = devm_add_action_or_reset(dev, reset_gpio_of_node_put,
+ priv->rc.of_node);
+ if (ret)
+ return ret;
+
+ /* Cells to match GPIO specifier, but it's not really used */
+ priv->rc.of_reset_n_cells = 2;
+ priv->rc.of_xlate = reset_gpio_of_xlate;
+ priv->rc.nr_resets = 1;
+
+ return devm_reset_controller_register(dev, &priv->rc);
+}
+
+static const struct platform_device_id reset_gpio_ids[] = {
+ { .name = "reset-gpio", },
+ {}
+};
+MODULE_DEVICE_TABLE(platform, reset_gpio_ids);
+
+static struct platform_driver reset_gpio_driver = {
+ .probe = reset_gpio_probe,
+ .id_table = reset_gpio_ids,
+ .driver = {
+ .name = "reset-gpio",
+ },
+};
+module_platform_driver(reset_gpio_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>");
+MODULE_DESCRIPTION("Generic GPIO reset driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/reset/reset-simple.c b/drivers/reset/reset-simple.c
index 818cabcc9fb7..276067839830 100644
--- a/drivers/reset/reset-simple.c
+++ b/drivers/reset/reset-simple.c
@@ -151,6 +151,8 @@ static const struct of_device_id reset_simple_dt_ids[] = {
{ .compatible = "snps,dw-high-reset" },
{ .compatible = "snps,dw-low-reset",
.data = &reset_simple_active_low },
+ { .compatible = "sophgo,sg2042-reset",
+ .data = &reset_simple_active_low },
{ /* sentinel */ },
};
diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
index 09833ad05da7..1cb8d7474428 100644
--- a/drivers/rpmsg/rpmsg_char.c
+++ b/drivers/rpmsg/rpmsg_char.c
@@ -399,8 +399,8 @@ static void rpmsg_eptdev_release_device(struct device *dev)
{
struct rpmsg_eptdev *eptdev = dev_to_eptdev(dev);
- ida_simple_remove(&rpmsg_ept_ida, dev->id);
- ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
+ ida_free(&rpmsg_ept_ida, dev->id);
+ ida_free(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
kfree(eptdev);
}
@@ -441,12 +441,12 @@ static int rpmsg_chrdev_eptdev_add(struct rpmsg_eptdev *eptdev, struct rpmsg_cha
eptdev->chinfo = chinfo;
- ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
+ ret = ida_alloc_max(&rpmsg_minor_ida, RPMSG_DEV_MAX - 1, GFP_KERNEL);
if (ret < 0)
goto free_eptdev;
dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
- ret = ida_simple_get(&rpmsg_ept_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&rpmsg_ept_ida, GFP_KERNEL);
if (ret < 0)
goto free_minor_ida;
dev->id = ret;
@@ -462,9 +462,9 @@ static int rpmsg_chrdev_eptdev_add(struct rpmsg_eptdev *eptdev, struct rpmsg_cha
return ret;
free_ept_ida:
- ida_simple_remove(&rpmsg_ept_ida, dev->id);
+ ida_free(&rpmsg_ept_ida, dev->id);
free_minor_ida:
- ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
+ ida_free(&rpmsg_minor_ida, MINOR(dev->devt));
free_eptdev:
put_device(dev);
kfree(eptdev);
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 8abc7d022ff7..4295c01a2861 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -605,7 +605,7 @@ static void rpmsg_dev_remove(struct device *dev)
rpmsg_destroy_ept(rpdev->ept);
}
-static struct bus_type rpmsg_bus = {
+static const struct bus_type rpmsg_bus = {
.name = "rpmsg",
.match = rpmsg_dev_match,
.dev_groups = rpmsg_dev_groups,
diff --git a/drivers/rpmsg/rpmsg_ctrl.c b/drivers/rpmsg/rpmsg_ctrl.c
index 433253835690..c312794ba4b3 100644
--- a/drivers/rpmsg/rpmsg_ctrl.c
+++ b/drivers/rpmsg/rpmsg_ctrl.c
@@ -130,8 +130,8 @@ static void rpmsg_ctrldev_release_device(struct device *dev)
{
struct rpmsg_ctrldev *ctrldev = dev_to_ctrldev(dev);
- ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
- ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
+ ida_free(&rpmsg_ctrl_ida, dev->id);
+ ida_free(&rpmsg_minor_ida, MINOR(dev->devt));
kfree(ctrldev);
}
@@ -156,12 +156,12 @@ static int rpmsg_ctrldev_probe(struct rpmsg_device *rpdev)
cdev_init(&ctrldev->cdev, &rpmsg_ctrldev_fops);
ctrldev->cdev.owner = THIS_MODULE;
- ret = ida_simple_get(&rpmsg_minor_ida, 0, RPMSG_DEV_MAX, GFP_KERNEL);
+ ret = ida_alloc_max(&rpmsg_minor_ida, RPMSG_DEV_MAX - 1, GFP_KERNEL);
if (ret < 0)
goto free_ctrldev;
dev->devt = MKDEV(MAJOR(rpmsg_major), ret);
- ret = ida_simple_get(&rpmsg_ctrl_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&rpmsg_ctrl_ida, GFP_KERNEL);
if (ret < 0)
goto free_minor_ida;
dev->id = ret;
@@ -179,9 +179,9 @@ static int rpmsg_ctrldev_probe(struct rpmsg_device *rpdev)
return ret;
free_ctrl_ida:
- ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
+ ida_free(&rpmsg_ctrl_ida, dev->id);
free_minor_ida:
- ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
+ ida_free(&rpmsg_minor_ida, MINOR(dev->devt));
free_ctrldev:
put_device(dev);
kfree(ctrldev);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e37a4341f442..c63e32d012f2 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1858,7 +1858,8 @@ config RTC_DRV_MT2712
config RTC_DRV_MT6397
tristate "MediaTek PMIC based RTC"
- depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN)
+ depends on MFD_MT6397 || COMPILE_TEST
+ select IRQ_DOMAIN
help
This selects the MediaTek(R) RTC driver. RTC is part of MediaTek
MT6397 PMIC. You should enable MT6397 PMIC MFD before select
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 921ee1827974..e31fa0ad127e 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -21,7 +21,6 @@
#include "rtc-core.h"
static DEFINE_IDA(rtc_ida);
-struct class *rtc_class;
static void rtc_device_release(struct device *dev)
{
@@ -199,6 +198,11 @@ static SIMPLE_DEV_PM_OPS(rtc_class_dev_pm_ops, rtc_suspend, rtc_resume);
#define RTC_CLASS_DEV_PM_OPS NULL
#endif
+const struct class rtc_class = {
+ .name = "rtc",
+ .pm = RTC_CLASS_DEV_PM_OPS,
+};
+
/* Ensure the caller will set the id before releasing the device */
static struct rtc_device *rtc_allocate_device(void)
{
@@ -220,7 +224,7 @@ static struct rtc_device *rtc_allocate_device(void)
rtc->irq_freq = 1;
rtc->max_user_freq = 64;
- rtc->dev.class = rtc_class;
+ rtc->dev.class = &rtc_class;
rtc->dev.groups = rtc_get_dev_attribute_groups();
rtc->dev.release = rtc_device_release;
@@ -475,13 +479,14 @@ EXPORT_SYMBOL_GPL(devm_rtc_device_register);
static int __init rtc_init(void)
{
- rtc_class = class_create("rtc");
- if (IS_ERR(rtc_class)) {
- pr_err("couldn't create class\n");
- return PTR_ERR(rtc_class);
- }
- rtc_class->pm = RTC_CLASS_DEV_PM_OPS;
+ int err;
+
+ err = class_register(&rtc_class);
+ if (err)
+ return err;
+
rtc_dev_init();
+
return 0;
}
subsys_initcall(rtc_init);
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 1b63111cdda2..5faafb4aa55c 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -696,7 +696,7 @@ struct rtc_device *rtc_class_open(const char *name)
struct device *dev;
struct rtc_device *rtc = NULL;
- dev = class_find_device_by_name(rtc_class, name);
+ dev = class_find_device_by_name(&rtc_class, name);
if (dev)
rtc = to_rtc_device(dev);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 1109cad83838..8b087d9556be 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -22,26 +22,24 @@
#include <linux/io.h>
#include <linux/module.h>
-enum ds1511reg {
- DS1511_SEC = 0x0,
- DS1511_MIN = 0x1,
- DS1511_HOUR = 0x2,
- DS1511_DOW = 0x3,
- DS1511_DOM = 0x4,
- DS1511_MONTH = 0x5,
- DS1511_YEAR = 0x6,
- DS1511_CENTURY = 0x7,
- DS1511_AM1_SEC = 0x8,
- DS1511_AM2_MIN = 0x9,
- DS1511_AM3_HOUR = 0xa,
- DS1511_AM4_DATE = 0xb,
- DS1511_WD_MSEC = 0xc,
- DS1511_WD_SEC = 0xd,
- DS1511_CONTROL_A = 0xe,
- DS1511_CONTROL_B = 0xf,
- DS1511_RAMADDR_LSB = 0x10,
- DS1511_RAMDATA = 0x13
-};
+#define DS1511_SEC 0x0
+#define DS1511_MIN 0x1
+#define DS1511_HOUR 0x2
+#define DS1511_DOW 0x3
+#define DS1511_DOM 0x4
+#define DS1511_MONTH 0x5
+#define DS1511_YEAR 0x6
+#define DS1511_CENTURY 0x7
+#define DS1511_AM1_SEC 0x8
+#define DS1511_AM2_MIN 0x9
+#define DS1511_AM3_HOUR 0xa
+#define DS1511_AM4_DATE 0xb
+#define DS1511_WD_MSEC 0xc
+#define DS1511_WD_SEC 0xd
+#define DS1511_CONTROL_A 0xe
+#define DS1511_CONTROL_B 0xf
+#define DS1511_RAMADDR_LSB 0x10
+#define DS1511_RAMDATA 0x13
#define DS1511_BLF1 0x80
#define DS1511_BLF2 0x40
@@ -61,35 +59,10 @@ enum ds1511reg {
#define DS1511_WDS 0x01
#define DS1511_RAM_MAX 0x100
-#define RTC_CMD DS1511_CONTROL_B
-#define RTC_CMD1 DS1511_CONTROL_A
-
-#define RTC_ALARM_SEC DS1511_AM1_SEC
-#define RTC_ALARM_MIN DS1511_AM2_MIN
-#define RTC_ALARM_HOUR DS1511_AM3_HOUR
-#define RTC_ALARM_DATE DS1511_AM4_DATE
-
-#define RTC_SEC DS1511_SEC
-#define RTC_MIN DS1511_MIN
-#define RTC_HOUR DS1511_HOUR
-#define RTC_DOW DS1511_DOW
-#define RTC_DOM DS1511_DOM
-#define RTC_MON DS1511_MONTH
-#define RTC_YEAR DS1511_YEAR
-#define RTC_CENTURY DS1511_CENTURY
-
-#define RTC_TIE DS1511_TIE
-#define RTC_TE DS1511_TE
-
-struct rtc_plat_data {
+struct ds1511_data {
struct rtc_device *rtc;
void __iomem *ioaddr; /* virtual base address */
int irq;
- unsigned int irqen;
- int alrm_sec;
- int alrm_min;
- int alrm_hour;
- int alrm_mday;
spinlock_t lock;
};
@@ -98,95 +71,33 @@ static DEFINE_SPINLOCK(ds1511_lock);
static __iomem char *ds1511_base;
static u32 reg_spacing = 1;
-static noinline void
-rtc_write(uint8_t val, uint32_t reg)
+static void rtc_write(uint8_t val, uint32_t reg)
{
writeb(val, ds1511_base + (reg * reg_spacing));
}
-static noinline uint8_t
-rtc_read(enum ds1511reg reg)
+static uint8_t rtc_read(uint32_t reg)
{
return readb(ds1511_base + (reg * reg_spacing));
}
-static inline void
-rtc_disable_update(void)
+static void rtc_disable_update(void)
{
- rtc_write((rtc_read(RTC_CMD) & ~RTC_TE), RTC_CMD);
+ rtc_write((rtc_read(DS1511_CONTROL_B) & ~DS1511_TE), DS1511_CONTROL_B);
}
-static void
-rtc_enable_update(void)
+static void rtc_enable_update(void)
{
- rtc_write((rtc_read(RTC_CMD) | RTC_TE), RTC_CMD);
-}
-
-/*
- * #define DS1511_WDOG_RESET_SUPPORT
- *
- * Uncomment this if you want to use these routines in
- * some platform code.
- */
-#ifdef DS1511_WDOG_RESET_SUPPORT
-/*
- * just enough code to set the watchdog timer so that it
- * will reboot the system
- */
-void
-ds1511_wdog_set(unsigned long deciseconds)
-{
- /*
- * the wdog timer can take 99.99 seconds
- */
- deciseconds %= 10000;
- /*
- * set the wdog values in the wdog registers
- */
- rtc_write(bin2bcd(deciseconds % 100), DS1511_WD_MSEC);
- rtc_write(bin2bcd(deciseconds / 100), DS1511_WD_SEC);
- /*
- * set wdog enable and wdog 'steering' bit to issue a reset
- */
- rtc_write(rtc_read(RTC_CMD) | DS1511_WDE | DS1511_WDS, RTC_CMD);
-}
-
-void
-ds1511_wdog_disable(void)
-{
- /*
- * clear wdog enable and wdog 'steering' bits
- */
- rtc_write(rtc_read(RTC_CMD) & ~(DS1511_WDE | DS1511_WDS), RTC_CMD);
- /*
- * clear the wdog counter
- */
- rtc_write(0, DS1511_WD_MSEC);
- rtc_write(0, DS1511_WD_SEC);
+ rtc_write((rtc_read(DS1511_CONTROL_B) | DS1511_TE), DS1511_CONTROL_B);
}
-#endif
-/*
- * set the rtc chip's idea of the time.
- * stupidly, some callers call with year unmolested;
- * and some call with year = year - 1900. thanks.
- */
static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
{
u8 mon, day, dow, hrs, min, sec, yrs, cen;
unsigned long flags;
- /*
- * won't have to change this for a while
- */
- if (rtc_tm->tm_year < 1900)
- rtc_tm->tm_year += 1900;
-
- if (rtc_tm->tm_year < 1970)
- return -EINVAL;
-
yrs = rtc_tm->tm_year % 100;
- cen = rtc_tm->tm_year / 100;
+ cen = 19 + rtc_tm->tm_year / 100;
mon = rtc_tm->tm_mon + 1; /* tm_mon starts at zero */
day = rtc_tm->tm_mday;
dow = rtc_tm->tm_wday & 0x7; /* automatic BCD */
@@ -194,15 +105,6 @@ static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
min = rtc_tm->tm_min;
sec = rtc_tm->tm_sec;
- if ((mon > 12) || (day == 0))
- return -EINVAL;
-
- if (day > rtc_month_days(rtc_tm->tm_mon, rtc_tm->tm_year))
- return -EINVAL;
-
- if ((hrs >= 24) || (min >= 60) || (sec >= 60))
- return -EINVAL;
-
/*
* each register is a different number of valid bits
*/
@@ -216,14 +118,14 @@ static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
spin_lock_irqsave(&ds1511_lock, flags);
rtc_disable_update();
- rtc_write(cen, RTC_CENTURY);
- rtc_write(yrs, RTC_YEAR);
- rtc_write((rtc_read(RTC_MON) & 0xe0) | mon, RTC_MON);
- rtc_write(day, RTC_DOM);
- rtc_write(hrs, RTC_HOUR);
- rtc_write(min, RTC_MIN);
- rtc_write(sec, RTC_SEC);
- rtc_write(dow, RTC_DOW);
+ rtc_write(cen, DS1511_CENTURY);
+ rtc_write(yrs, DS1511_YEAR);
+ rtc_write((rtc_read(DS1511_MONTH) & 0xe0) | mon, DS1511_MONTH);
+ rtc_write(day, DS1511_DOM);
+ rtc_write(hrs, DS1511_HOUR);
+ rtc_write(min, DS1511_MIN);
+ rtc_write(sec, DS1511_SEC);
+ rtc_write(dow, DS1511_DOW);
rtc_enable_update();
spin_unlock_irqrestore(&ds1511_lock, flags);
@@ -238,14 +140,14 @@ static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
spin_lock_irqsave(&ds1511_lock, flags);
rtc_disable_update();
- rtc_tm->tm_sec = rtc_read(RTC_SEC) & 0x7f;
- rtc_tm->tm_min = rtc_read(RTC_MIN) & 0x7f;
- rtc_tm->tm_hour = rtc_read(RTC_HOUR) & 0x3f;
- rtc_tm->tm_mday = rtc_read(RTC_DOM) & 0x3f;
- rtc_tm->tm_wday = rtc_read(RTC_DOW) & 0x7;
- rtc_tm->tm_mon = rtc_read(RTC_MON) & 0x1f;
- rtc_tm->tm_year = rtc_read(RTC_YEAR) & 0x7f;
- century = rtc_read(RTC_CENTURY);
+ rtc_tm->tm_sec = rtc_read(DS1511_SEC) & 0x7f;
+ rtc_tm->tm_min = rtc_read(DS1511_MIN) & 0x7f;
+ rtc_tm->tm_hour = rtc_read(DS1511_HOUR) & 0x3f;
+ rtc_tm->tm_mday = rtc_read(DS1511_DOM) & 0x3f;
+ rtc_tm->tm_wday = rtc_read(DS1511_DOW) & 0x7;
+ rtc_tm->tm_mon = rtc_read(DS1511_MONTH) & 0x1f;
+ rtc_tm->tm_year = rtc_read(DS1511_YEAR) & 0x7f;
+ century = rtc_read(DS1511_CENTURY);
rtc_enable_update();
spin_unlock_irqrestore(&ds1511_lock, flags);
@@ -271,106 +173,67 @@ static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
return 0;
}
-/*
- * write the alarm register settings
- *
- * we only have the use to interrupt every second, otherwise
- * known as the update interrupt, or the interrupt if the whole
- * date/hours/mins/secs matches. the ds1511 has many more
- * permutations, but the kernel doesn't.
- */
-static void
-ds1511_rtc_update_alarm(struct rtc_plat_data *pdata)
+static void ds1511_rtc_alarm_enable(unsigned int enabled)
{
- unsigned long flags;
-
- spin_lock_irqsave(&pdata->lock, flags);
- rtc_write(pdata->alrm_mday < 0 || (pdata->irqen & RTC_UF) ?
- 0x80 : bin2bcd(pdata->alrm_mday) & 0x3f,
- RTC_ALARM_DATE);
- rtc_write(pdata->alrm_hour < 0 || (pdata->irqen & RTC_UF) ?
- 0x80 : bin2bcd(pdata->alrm_hour) & 0x3f,
- RTC_ALARM_HOUR);
- rtc_write(pdata->alrm_min < 0 || (pdata->irqen & RTC_UF) ?
- 0x80 : bin2bcd(pdata->alrm_min) & 0x7f,
- RTC_ALARM_MIN);
- rtc_write(pdata->alrm_sec < 0 || (pdata->irqen & RTC_UF) ?
- 0x80 : bin2bcd(pdata->alrm_sec) & 0x7f,
- RTC_ALARM_SEC);
- rtc_write(rtc_read(RTC_CMD) | (pdata->irqen ? RTC_TIE : 0), RTC_CMD);
- rtc_read(RTC_CMD1); /* clear interrupts */
- spin_unlock_irqrestore(&pdata->lock, flags);
+ rtc_write(rtc_read(DS1511_CONTROL_B) | (enabled ? DS1511_TIE : 0), DS1511_CONTROL_B);
}
-static int
-ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+static int ds1511_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+ struct ds1511_data *ds1511 = dev_get_drvdata(dev);
+ unsigned long flags;
- if (pdata->irq <= 0)
- return -EINVAL;
+ spin_lock_irqsave(&ds1511->lock, flags);
+ rtc_write(bin2bcd(alrm->time.tm_mday) & 0x3f, DS1511_AM4_DATE);
+ rtc_write(bin2bcd(alrm->time.tm_hour) & 0x3f, DS1511_AM3_HOUR);
+ rtc_write(bin2bcd(alrm->time.tm_min) & 0x7f, DS1511_AM2_MIN);
+ rtc_write(bin2bcd(alrm->time.tm_sec) & 0x7f, DS1511_AM1_SEC);
+ ds1511_rtc_alarm_enable(alrm->enabled);
- pdata->alrm_mday = alrm->time.tm_mday;
- pdata->alrm_hour = alrm->time.tm_hour;
- pdata->alrm_min = alrm->time.tm_min;
- pdata->alrm_sec = alrm->time.tm_sec;
- if (alrm->enabled)
- pdata->irqen |= RTC_AF;
+ rtc_read(DS1511_CONTROL_A); /* clear interrupts */
+ spin_unlock_irqrestore(&ds1511->lock, flags);
- ds1511_rtc_update_alarm(pdata);
return 0;
}
-static int
-ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+static int ds1511_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
- struct rtc_plat_data *pdata = dev_get_drvdata(dev);
-
- if (pdata->irq <= 0)
- return -EINVAL;
+ alrm->time.tm_mday = bcd2bin(rtc_read(DS1511_AM4_DATE) & 0x3f);
+ alrm->time.tm_hour = bcd2bin(rtc_read(DS1511_AM3_HOUR) & 0x3f);
+ alrm->time.tm_min = bcd2bin(rtc_read(DS1511_AM2_MIN) & 0x7f);
+ alrm->time.tm_sec = bcd2bin(rtc_read(DS1511_AM1_SEC) & 0x7f);
+ alrm->enabled = !!(rtc_read(DS1511_CONTROL_B) & DS1511_TIE);
- alrm->time.tm_mday = pdata->alrm_mday < 0 ? 0 : pdata->alrm_mday;
- alrm->time.tm_hour = pdata->alrm_hour < 0 ? 0 : pdata->alrm_hour;
- alrm->time.tm_min = pdata->alrm_min < 0 ? 0 : pdata->alrm_min;
- alrm->time.tm_sec = pdata->alrm_sec < 0 ? 0 : pdata->alrm_sec;
- alrm->enabled = (pdata->irqen & RTC_AF) ? 1 : 0;
return 0;
}
-static irqreturn_t
-ds1511_interrupt(int irq, void *dev_id)
+static irqreturn_t ds1511_interrupt(int irq, void *dev_id)
{
struct platform_device *pdev = dev_id;
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ struct ds1511_data *ds1511 = platform_get_drvdata(pdev);
unsigned long events = 0;
- spin_lock(&pdata->lock);
+ spin_lock(&ds1511->lock);
/*
* read and clear interrupt
*/
- if (rtc_read(RTC_CMD1) & DS1511_IRQF) {
- events = RTC_IRQF;
- if (rtc_read(RTC_ALARM_SEC) & 0x80)
- events |= RTC_UF;
- else
- events |= RTC_AF;
- rtc_update_irq(pdata->rtc, 1, events);
+ if (rtc_read(DS1511_CONTROL_A) & DS1511_IRQF) {
+ events = RTC_IRQF | RTC_AF;
+ rtc_update_irq(ds1511->rtc, 1, events);
}
- spin_unlock(&pdata->lock);
+ spin_unlock(&ds1511->lock);
return events ? IRQ_HANDLED : IRQ_NONE;
}
static int ds1511_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
- struct rtc_plat_data *pdata = dev_get_drvdata(dev);
-
- if (pdata->irq <= 0)
- return -EINVAL;
- if (enabled)
- pdata->irqen |= RTC_AF;
- else
- pdata->irqen &= ~RTC_AF;
- ds1511_rtc_update_alarm(pdata);
+ struct ds1511_data *ds1511 = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ds1511->lock, flags);
+ ds1511_rtc_alarm_enable(enabled);
+ spin_unlock_irqrestore(&ds1511->lock, flags);
+
return 0;
}
@@ -408,7 +271,7 @@ static int ds1511_nvram_write(void *priv, unsigned int pos, void *buf,
static int ds1511_rtc_probe(struct platform_device *pdev)
{
- struct rtc_plat_data *pdata;
+ struct ds1511_data *ds1511;
int ret = 0;
struct nvmem_config ds1511_nvmem_cfg = {
.name = "ds1511_nvram",
@@ -420,21 +283,21 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
.priv = &pdev->dev,
};
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata)
+ ds1511 = devm_kzalloc(&pdev->dev, sizeof(*ds1511), GFP_KERNEL);
+ if (!ds1511)
return -ENOMEM;
ds1511_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ds1511_base))
return PTR_ERR(ds1511_base);
- pdata->ioaddr = ds1511_base;
- pdata->irq = platform_get_irq(pdev, 0);
+ ds1511->ioaddr = ds1511_base;
+ ds1511->irq = platform_get_irq(pdev, 0);
/*
* turn on the clock and the crystal, etc.
*/
- rtc_write(DS1511_BME, RTC_CMD);
- rtc_write(0, RTC_CMD1);
+ rtc_write(DS1511_BME, DS1511_CONTROL_B);
+ rtc_write(0, DS1511_CONTROL_A);
/*
* clear the wdog counter
*/
@@ -448,38 +311,43 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
/*
* check for a dying bat-tree
*/
- if (rtc_read(RTC_CMD1) & DS1511_BLF1)
+ if (rtc_read(DS1511_CONTROL_A) & DS1511_BLF1)
dev_warn(&pdev->dev, "voltage-low detected.\n");
- spin_lock_init(&pdata->lock);
- platform_set_drvdata(pdev, pdata);
-
- pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
- if (IS_ERR(pdata->rtc))
- return PTR_ERR(pdata->rtc);
-
- pdata->rtc->ops = &ds1511_rtc_ops;
+ spin_lock_init(&ds1511->lock);
+ platform_set_drvdata(pdev, ds1511);
- ret = devm_rtc_register_device(pdata->rtc);
- if (ret)
- return ret;
+ ds1511->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(ds1511->rtc))
+ return PTR_ERR(ds1511->rtc);
- devm_rtc_nvmem_register(pdata->rtc, &ds1511_nvmem_cfg);
+ ds1511->rtc->ops = &ds1511_rtc_ops;
+ ds1511->rtc->range_max = RTC_TIMESTAMP_END_2099;
+ ds1511->rtc->alarm_offset_max = 28 * 24 * 60 * 60 - 1;
/*
* if the platform has an interrupt in mind for this device,
* then by all means, set it
*/
- if (pdata->irq > 0) {
- rtc_read(RTC_CMD1);
- if (devm_request_irq(&pdev->dev, pdata->irq, ds1511_interrupt,
+ if (ds1511->irq > 0) {
+ rtc_read(DS1511_CONTROL_A);
+ if (devm_request_irq(&pdev->dev, ds1511->irq, ds1511_interrupt,
IRQF_SHARED, pdev->name, pdev) < 0) {
dev_warn(&pdev->dev, "interrupt not available.\n");
- pdata->irq = 0;
+ ds1511->irq = 0;
}
}
+ if (ds1511->irq == 0)
+ clear_bit(RTC_FEATURE_ALARM, ds1511->rtc->features);
+
+ ret = devm_rtc_register_device(ds1511->rtc);
+ if (ret)
+ return ret;
+
+ devm_rtc_nvmem_register(ds1511->rtc, &ds1511_nvmem_cfg);
+
return 0;
}
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 866489ad56d6..0013bff0447d 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -909,10 +909,7 @@ static int m41t80_probe(struct i2c_client *client)
if (IS_ERR(m41t80_data->rtc))
return PTR_ERR(m41t80_data->rtc);
-#ifdef CONFIG_OF
- wakeup_source = of_property_read_bool(client->dev.of_node,
- "wakeup-source");
-#endif
+ wakeup_source = device_property_read_bool(&client->dev, "wakeup-source");
if (client->irq > 0) {
unsigned long irqflags = IRQF_TRIGGER_LOW;
diff --git a/drivers/rtc/rtc-max31335.c b/drivers/rtc/rtc-max31335.c
index 402fda8fd548..a2441e5c2c74 100644
--- a/drivers/rtc/rtc-max31335.c
+++ b/drivers/rtc/rtc-max31335.c
@@ -204,7 +204,7 @@ static bool max31335_volatile_reg(struct device *dev, unsigned int reg)
return true;
/* interrupt status register */
- if (reg == MAX31335_INT_EN1_A1IE)
+ if (reg == MAX31335_STATUS1)
return true;
/* temperature registers */
diff --git a/drivers/rtc/rtc-nct3018y.c b/drivers/rtc/rtc-nct3018y.c
index f488a189a465..076d8b99f913 100644
--- a/drivers/rtc/rtc-nct3018y.c
+++ b/drivers/rtc/rtc-nct3018y.c
@@ -102,6 +102,8 @@ static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *ala
if (flags < 0)
return flags;
*alarm_enable = flags & NCT3018Y_BIT_AIE;
+ dev_dbg(&client->dev, "%s:alarm_enable:%x\n", __func__, *alarm_enable);
+
}
if (alarm_flag) {
@@ -110,11 +112,9 @@ static int nct3018y_get_alarm_mode(struct i2c_client *client, unsigned char *ala
if (flags < 0)
return flags;
*alarm_flag = flags & NCT3018Y_BIT_AF;
+ dev_dbg(&client->dev, "%s:alarm_flag:%x\n", __func__, *alarm_flag);
}
- dev_dbg(&client->dev, "%s:alarm_enable:%x alarm_flag:%x\n",
- __func__, *alarm_enable, *alarm_flag);
-
return 0;
}
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index d1efde3e7a80..98b77f790b0c 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -370,6 +370,30 @@ static int pcf8523_rtc_set_offset(struct device *dev, long offset)
return regmap_write(pcf8523->regmap, PCF8523_REG_OFFSET, value);
}
+#ifdef CONFIG_PM_SLEEP
+static int pcf8523_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq > 0 && device_may_wakeup(dev))
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+static int pcf8523_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (client->irq > 0 && device_may_wakeup(dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pcf8523_pm, pcf8523_suspend, pcf8523_resume);
+
static const struct rtc_class_ops pcf8523_rtc_ops = {
.read_time = pcf8523_rtc_read_time,
.set_time = pcf8523_rtc_set_time,
@@ -487,6 +511,7 @@ static struct i2c_driver pcf8523_driver = {
.driver = {
.name = "rtc-pcf8523",
.of_match_table = pcf8523_of_match,
+ .pm = &pcf8523_pm,
},
.probe = pcf8523_probe,
.id_table = pcf8523_id,
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index cead018c3f06..0a97cfedd706 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3976,7 +3976,7 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
ccw = cqr->cpaddr;
ccw->cmd_code = CCW_CMD_RDC;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
ccw->flags = 0;
ccw->count = rdc_buffer_size;
cqr->startdev = device;
@@ -4020,7 +4020,7 @@ char *dasd_get_sense(struct irb *irb)
if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
if (irb->scsw.tm.tcw)
- tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
+ tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw));
if (tsb && tsb->length == 64 && tsb->flags)
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 459b7f8ac883..bbbacfc386f2 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -216,7 +216,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
memset(ccw, 0, sizeof(struct ccw1));
ccw->cmd_code = CCW_CMD_DCTL;
ccw->count = 4;
- ccw->cda = (__u32)virt_to_phys(DCTL_data);
+ ccw->cda = virt_to_dma32(DCTL_data);
dctl_cqr->flags = erp->flags;
dctl_cqr->function = dasd_3990_erp_DCTL;
dctl_cqr->refers = erp;
@@ -1589,7 +1589,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
{
struct dasd_device *device = default_erp->startdev;
- __u32 cpa = 0;
+ dma32_t cpa = 0;
struct dasd_ccw_req *cqr;
struct dasd_ccw_req *erp;
struct DE_eckd_data *DE_data;
@@ -1693,7 +1693,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
ccw->flags = CCW_FLAG_CC;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(DE_data);
+ ccw->cda = virt_to_dma32(DE_data);
/* create LO ccw */
ccw++;
@@ -1701,7 +1701,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
ccw->flags = CCW_FLAG_CC;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(LO_data);
+ ccw->cda = virt_to_dma32(LO_data);
/* TIC to the failed ccw */
ccw++;
@@ -1747,7 +1747,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
{
struct dasd_device *device = previous_erp->startdev;
- __u32 cpa = 0;
+ dma32_t cpa = 0;
struct dasd_ccw_req *cqr;
struct dasd_ccw_req *erp;
char *LO_data; /* struct LO_eckd_data */
@@ -2386,7 +2386,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
tcw = erp->cpaddr;
tsb = (struct tsb *) &tcw[1];
*tcw = *((struct tcw *)cqr->cpaddr);
- tcw->tsb = virt_to_phys(tsb);
+ tcw->tsb = virt_to_dma64(tsb);
} else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) {
/* PSF cannot be chained from NOOP/TIC */
erp->cpaddr = cqr->cpaddr;
@@ -2397,7 +2397,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
ccw->flags = CCW_FLAG_CC;
ccw++;
ccw->cmd_code = CCW_CMD_TIC;
- ccw->cda = (__u32)virt_to_phys(cqr->cpaddr);
+ ccw->cda = virt_to_dma32(cqr->cpaddr);
}
erp->flags = cqr->flags;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index e84cd5436556..f7e768d8ca76 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -435,7 +435,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - feature codes */
memset(lcu->uac, 0, sizeof(*(lcu->uac)));
@@ -443,7 +443,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*(lcu->uac));
- ccw->cda = (__u32)virt_to_phys(lcu->uac);
+ ccw->cda = virt_to_dma32(lcu->uac);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -739,7 +739,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
ccw->cmd_code = DASD_ECKD_CCW_RSCK;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
((char *)cqr->data)[0] = reason;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 373c1a86c33e..180a008d38ea 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -283,7 +283,7 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
}
memset(data, 0, sizeof(struct DE_eckd_data));
@@ -393,7 +393,7 @@ static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
ccw->count = 22;
else
ccw->count = 20;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
}
memset(data, 0, sizeof(*data));
@@ -539,11 +539,11 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
ccw->flags = 0;
if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
ccw->count = sizeof(*pfxdata) + 2;
- ccw->cda = (__u32)virt_to_phys(pfxdata);
+ ccw->cda = virt_to_dma32(pfxdata);
memset(pfxdata, 0, sizeof(*pfxdata) + 2);
} else {
ccw->count = sizeof(*pfxdata);
- ccw->cda = (__u32)virt_to_phys(pfxdata);
+ ccw->cda = virt_to_dma32(pfxdata);
memset(pfxdata, 0, sizeof(*pfxdata));
}
@@ -610,7 +610,7 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
ccw->flags = 0;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
memset(data, 0, sizeof(struct LO_eckd_data));
sector = 0;
@@ -825,7 +825,7 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RCD;
ccw->flags = 0;
- ccw->cda = (__u32)virt_to_phys(rcd_buffer);
+ ccw->cda = virt_to_dma32(rcd_buffer);
ccw->count = DASD_ECKD_RCD_DATA_SIZE;
cqr->magic = DASD_ECKD_MAGIC;
@@ -853,7 +853,7 @@ static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
if (cqr->status != DASD_CQR_DONE) {
ccw = cqr->cpaddr;
- rcd_buffer = phys_to_virt(ccw->cda);
+ rcd_buffer = dma32_to_virt(ccw->cda);
memset(rcd_buffer, 0, sizeof(*rcd_buffer));
rcd_buffer[0] = 0xE5;
@@ -1534,7 +1534,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - feature codes */
features = (struct dasd_rssd_features *) (prssdp + 1);
@@ -1543,7 +1543,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_features);
- ccw->cda = (__u32)virt_to_phys(features);
+ ccw->cda = virt_to_dma32(features);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1603,7 +1603,7 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(*prssdp);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - Volume Storage Query */
vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
@@ -1613,7 +1613,7 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*vsq);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(vsq);
+ ccw->cda = virt_to_dma32(vsq);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1788,7 +1788,7 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(*prssdp);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
memset(lcq, 0, sizeof(*lcq));
@@ -1797,7 +1797,7 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*lcq);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(lcq);
+ ccw->cda = virt_to_dma32(lcq);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1894,7 +1894,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
- ccw->cda = (__u32)virt_to_phys(psf_ssc_data);
+ ccw->cda = virt_to_dma32(psf_ssc_data);
ccw->count = 66;
cqr->startdev = device;
@@ -2250,7 +2250,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = 0;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(count_data);
+ ccw->cda = virt_to_dma32(count_data);
ccw++;
count_data++;
}
@@ -2264,7 +2264,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = 0;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(count_data);
+ ccw->cda = virt_to_dma32(count_data);
cqr->block = NULL;
cqr->startdev = device;
@@ -2635,7 +2635,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(fmt_buffer);
+ ccw->cda = virt_to_dma32(fmt_buffer);
ccw++;
fmt_buffer++;
}
@@ -2845,7 +2845,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(ect);
+ ccw->cda = virt_to_dma32(ect);
ccw++;
}
if ((intensity & ~0x08) & 0x04) { /* erase track */
@@ -2860,7 +2860,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(ect);
+ ccw->cda = virt_to_dma32(ect);
} else { /* write remaining records */
for (i = 0; i < rpt; i++) {
ect = (struct eckd_count *) data;
@@ -2895,7 +2895,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
DASD_ECKD_CCW_WRITE_CKD_MT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(ect);
+ ccw->cda = virt_to_dma32(ect);
ccw++;
}
}
@@ -3836,7 +3836,7 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
}
ccw = cqr->cpaddr;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
ccw->cmd_code = DASD_ECKD_CCW_DSO;
ccw->count = size;
@@ -3961,7 +3961,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
unsigned int blksize)
{
struct dasd_eckd_private *private;
- unsigned long *idaws;
+ dma64_t *idaws;
struct LO_eckd_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
@@ -4039,8 +4039,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
- idaws = (unsigned long *) (cqr->data +
- sizeof(struct PFX_eckd_data));
+ idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data));
} else {
if (define_extent(ccw++, cqr->data, first_trk,
last_trk, cmd, basedev, 0) == -EAGAIN) {
@@ -4050,8 +4049,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
- idaws = (unsigned long *) (cqr->data +
- sizeof(struct DE_eckd_data));
+ idaws = (dma64_t *)(cqr->data + sizeof(struct DE_eckd_data));
}
/* Build locate_record+read/write/ccws. */
LO_data = (struct LO_eckd_data *) (idaws + cidaw);
@@ -4105,11 +4103,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
ccw->cmd_code = rcmd;
ccw->count = count;
if (idal_is_needed(dst, blksize)) {
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags = CCW_FLAG_IDA;
idaws = idal_create_words(idaws, dst, blksize);
} else {
- ccw->cda = (__u32)virt_to_phys(dst);
+ ccw->cda = virt_to_dma32(dst);
ccw->flags = 0;
}
ccw++;
@@ -4152,7 +4150,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
unsigned int blk_per_trk,
unsigned int blksize)
{
- unsigned long *idaws;
+ dma64_t *idaws;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
@@ -4222,7 +4220,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
* (or 2K blocks on 31-bit)
* - the scope of a ccw and it's idal ends with the track boundaries
*/
- idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
+ idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data));
recid = first_rec;
new_track = 1;
end_idaw = 0;
@@ -4243,7 +4241,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = cmd;
ccw->count = len_to_track_end;
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags = CCW_FLAG_IDA;
ccw++;
recid += count;
@@ -4259,7 +4257,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
* idaw ends
*/
if (!idaw_dst) {
- if ((__u32)virt_to_phys(dst) & (IDA_BLOCK_SIZE - 1)) {
+ if ((unsigned long)(dst) & (IDA_BLOCK_SIZE - 1)) {
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-ERANGE);
} else
@@ -4279,7 +4277,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
* idal_create_words will handle cases where idaw_len
* is larger then IDA_BLOCK_SIZE
*/
- if (!((__u32)virt_to_phys(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1)))
+ if (!((unsigned long)(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1)))
end_idaw = 1;
/* We also need to end the idaw at track end */
if (!len_to_track_end) {
@@ -4738,11 +4736,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
struct req_iterator iter;
struct dasd_ccw_req *cqr;
unsigned int trkcount;
- unsigned long *idaws;
unsigned int size;
unsigned char cmd;
struct bio_vec bv;
struct ccw1 *ccw;
+ dma64_t *idaws;
int use_prefix;
void *data;
char *dst;
@@ -4823,7 +4821,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
trkcount, cmd, basedev, 0, 0);
}
- idaws = (unsigned long *)(cqr->data + size);
+ idaws = (dma64_t *)(cqr->data + size);
len_to_track_end = 0;
if (start_padding_sectors) {
ccw[-1].flags |= CCW_FLAG_CC;
@@ -4832,7 +4830,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
ccw->count = 57326;
/* 64k map to one track */
len_to_track_end = 65536 - start_padding_sectors * 512;
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags |= CCW_FLAG_IDA;
ccw->flags |= CCW_FLAG_SLI;
ccw++;
@@ -4851,7 +4849,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
ccw->count = 57326;
/* 64k map to one track */
len_to_track_end = 65536;
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags |= CCW_FLAG_IDA;
ccw->flags |= CCW_FLAG_SLI;
ccw++;
@@ -4908,9 +4906,9 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
- cda = *((char **)phys_to_virt(ccw->cda));
+ cda = *((char **)dma32_to_virt(ccw->cda));
else
- cda = phys_to_virt(ccw->cda);
+ cda = dma32_to_virt(ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
memcpy(dst, cda, bv.bv_len);
@@ -5060,7 +5058,7 @@ dasd_eckd_release(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5115,7 +5113,7 @@ dasd_eckd_reserve(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5169,7 +5167,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_SLCK;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5230,7 +5228,7 @@ static int dasd_eckd_snid(struct dasd_device *device,
ccw->cmd_code = DASD_ECKD_CCW_SNID;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 12;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5297,7 +5295,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - Performance Statistics */
stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
@@ -5306,7 +5304,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
- ccw->cda = (__u32)virt_to_phys(stats);
+ ccw->cda = virt_to_dma32(stats);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -5450,7 +5448,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = usrparm.psf_data_len;
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(psf_data);
+ ccw->cda = virt_to_dma32(psf_data);
ccw++;
@@ -5458,7 +5456,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = usrparm.rssd_result_len;
ccw->flags = CCW_FLAG_SLI ;
- ccw->cda = (__u32)virt_to_phys(rssd_result);
+ ccw->cda = virt_to_dma32(rssd_result);
rc = dasd_sleep_on(cqr);
if (rc)
@@ -5527,9 +5525,9 @@ dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from,
/* get pointer to data (consider IDALs) */
if (from->flags & CCW_FLAG_IDA)
- datap = (char *)*((addr_t *)phys_to_virt(from->cda));
+ datap = (char *)*((addr_t *)dma32_to_virt(from->cda));
else
- datap = phys_to_virt(from->cda);
+ datap = dma32_to_virt(from->cda);
/* dump data (max 128 bytes) */
for (count = 0; count < from->count && count < 128; count++) {
@@ -5598,7 +5596,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
req ? req->intrc : 0);
len += sprintf(page + len, "Failing CCW: %px\n",
- phys_to_virt(irb->scsw.cmd.cpa));
+ dma32_to_virt(irb->scsw.cmd.cpa));
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, "Sense(hex) %2d-%2d:",
@@ -5641,7 +5639,7 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
/* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
from = ++to;
- fail = phys_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
+ fail = dma32_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
dev_err(dev, "......\n");
@@ -5691,12 +5689,12 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
(irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
req ? req->intrc : 0);
len += sprintf(page + len, "Failing TCW: %px\n",
- phys_to_virt(irb->scsw.tm.tcw));
+ dma32_to_virt(irb->scsw.tm.tcw));
tsb = NULL;
sense = NULL;
if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
- tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
+ tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw));
if (tsb) {
len += sprintf(page + len, "tsb->length %d\n", tsb->length);
@@ -5906,7 +5904,7 @@ retry:
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - message buffer */
message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
@@ -5916,7 +5914,7 @@ retry:
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_messages);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(message_buf);
+ ccw->cda = virt_to_dma32(message_buf);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -5997,14 +5995,14 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - query host access */
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_psf_query_host_access);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(host_access);
+ ccw->cda = virt_to_dma32(host_access);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -6239,14 +6237,14 @@ static int dasd_eckd_query_pprc_status(struct dasd_device *device,
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)(addr_t)prssdp;
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - query host access */
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*pprc_data);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)(addr_t)pprc_data;
+ ccw->cda = virt_to_dma32(pprc_data);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -6340,7 +6338,7 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
psf_cuir->ssid = device->path[pos].ssid;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
- ccw->cda = (__u32)virt_to_phys(psf_cuir);
+ ccw->cda = virt_to_dma32(psf_cuir);
ccw->flags = CCW_FLAG_SLI;
ccw->count = sizeof(struct dasd_psf_cuir_response);
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 5064a616e041..194e9e2d9cb8 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -485,7 +485,7 @@ int dasd_eer_enable(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_SNSS;
ccw->count = SNSS_DATA_SIZE;
ccw->flags = 0;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index bcbb2f8e91fe..361e9bd75257 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -78,7 +78,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
memset(data, 0, sizeof (struct DE_fba_data));
if (rw == WRITE)
(data->mask).perm = 0x0;
@@ -98,7 +98,7 @@ locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
ccw->cmd_code = DASD_FBA_CCW_LOCATE;
ccw->flags = 0;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
memset(data, 0, sizeof (struct LO_fba_data));
if (rw == WRITE)
data->operation.cmd = 0x5;
@@ -257,7 +257,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count)
ccw->cmd_code = DASD_FBA_CCW_WRITE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = count;
- ccw->cda = (__u32)virt_to_phys(dasd_fba_zero_page);
+ ccw->cda = virt_to_dma32(dasd_fba_zero_page);
}
/*
@@ -427,7 +427,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
struct request *req)
{
struct dasd_fba_private *private = block->base->private;
- unsigned long *idaws;
+ dma64_t *idaws;
struct LO_fba_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
@@ -487,7 +487,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
define_extent(ccw++, cqr->data, rq_data_dir(req),
block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
/* Build locate_record + read/write ccws. */
- idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
+ idaws = (dma64_t *)(cqr->data + sizeof(struct DE_fba_data));
LO_data = (struct LO_fba_data *) (idaws + cidaw);
/* Locate record for all blocks for smart devices. */
if (private->rdc_data.mode.bits.data_chain != 0) {
@@ -523,11 +523,11 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
ccw->cmd_code = cmd;
ccw->count = block->bp_block;
if (idal_is_needed(dst, blksize)) {
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags = CCW_FLAG_IDA;
idaws = idal_create_words(idaws, dst, blksize);
} else {
- ccw->cda = (__u32)virt_to_phys(dst);
+ ccw->cda = virt_to_dma32(dst);
ccw->flags = 0;
}
ccw++;
@@ -585,9 +585,9 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
- cda = *((char **)phys_to_virt(ccw->cda));
+ cda = *((char **)dma32_to_virt(ccw->cda));
else
- cda = phys_to_virt(ccw->cda);
+ cda = dma32_to_virt(ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
memcpy(dst, cda, bv.bv_len);
@@ -672,7 +672,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len += sprintf(page + len, "in req: %px CS: 0x%02X DS: 0x%02X\n",
req, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
len += sprintf(page + len, "Failing CCW: %px\n",
- (void *) (addr_t) irb->scsw.cmd.cpa);
+ (void *)(u64)dma32_to_u32(irb->scsw.cmd.cpa));
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
len += sprintf(page + len, "Sense(hex) %2d-%2d:",
@@ -701,7 +701,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
- ((int *) (addr_t) act->cda)
+ ((int *)dma32_to_virt(act->cda))
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
@@ -710,18 +710,18 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
/* print failing CCW area */
len = 0;
- if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
- act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
+ if (act < ((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)) - 2) {
+ act = ((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)) - 2;
len += sprintf(page + len, "......\n");
}
- end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
+ end = min((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa) + 2, last);
while (act <= end) {
len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
- ((int *) (addr_t) act->cda)
+ ((int *)dma32_to_virt(act->cda))
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
@@ -738,7 +738,7 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
- ((int *) (addr_t) act->cda)
+ ((int *)dma32_to_virt(act->cda))
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index ba66aa6a83c6..6d1689a2717e 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -920,7 +920,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
dev_sz = dev_info->end - dev_info->start + 1;
if (kaddr)
- *kaddr = (void *) dev_info->start + offset;
+ *kaddr = __va(dev_info->start + offset);
if (pfn)
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
PFN_DEV|PFN_SPECIAL);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 9f6fdd0daa74..1d456a5a3bfb 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -131,7 +131,7 @@ static void scm_request_done(struct scm_request *scmrq)
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
msb = &scmrq->aob->msb[i];
- aidaw = (u64)phys_to_virt(msb->data_addr);
+ aidaw = (u64)dma64_to_virt(msb->data_addr);
if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
IS_ALIGNED(aidaw, PAGE_SIZE))
@@ -196,12 +196,12 @@ static int scm_request_prepare(struct scm_request *scmrq)
msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
msb->flags |= MSB_FLAG_IDA;
- msb->data_addr = (u64)virt_to_phys(aidaw);
+ msb->data_addr = virt_to_dma64(aidaw);
rq_for_each_segment(bv, req, iter) {
WARN_ON(bv.bv_offset);
msb->blk_count += bv.bv_len >> 12;
- aidaw->data_addr = virt_to_phys(page_address(bv.bv_page));
+ aidaw->data_addr = virt_to_dma64(page_address(bv.bv_page));
aidaw++;
}
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 0b0324fe4aff..dcb3c32f027a 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -159,7 +159,7 @@ static void raw3215_mk_read_req(struct raw3215_info *raw)
ccw->cmd_code = 0x0A; /* read inquiry */
ccw->flags = 0x20; /* ignore incorrect length */
ccw->count = 160;
- ccw->cda = (__u32)__pa(raw->inbuf);
+ ccw->cda = virt_to_dma32(raw->inbuf);
}
/*
@@ -218,7 +218,7 @@ static void raw3215_mk_write_req(struct raw3215_info *raw)
ccw[-1].flags |= 0x40; /* use command chaining */
ccw->cmd_code = 0x01; /* write, auto carrier return */
ccw->flags = 0x20; /* ignore incorrect length ind. */
- ccw->cda = (__u32)__pa(raw->buffer + ix);
+ ccw->cda = virt_to_dma32(raw->buffer + ix);
count = len;
if (ix + count > RAW3215_BUFFER_SIZE)
count = RAW3215_BUFFER_SIZE - ix;
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 4f26b0a55620..4d824f86bbbb 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -126,7 +126,7 @@ static int fs3270_activate(struct raw3270_view *view)
raw3270_request_set_cmd(fp->init, TC_EWRITEA);
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->rescnt = 0;
- cp = fp->rdbuf->data[0];
+ cp = dma64_to_virt(fp->rdbuf->data[0]);
if (fp->rdbuf_size == 0) {
/* No saved buffer. Just clear the screen. */
fp->init->ccw.count = 1;
@@ -164,7 +164,7 @@ static void fs3270_save_callback(struct raw3270_request *rq, void *data)
fp = (struct fs3270 *)rq->view;
/* Correct idal buffer element 0 address. */
- fp->rdbuf->data[0] -= 5;
+ fp->rdbuf->data[0] = dma64_add(fp->rdbuf->data[0], -5);
fp->rdbuf->size += 5;
/*
@@ -202,7 +202,7 @@ static void fs3270_deactivate(struct raw3270_view *view)
* room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence
* in the activation command.
*/
- fp->rdbuf->data[0] += 5;
+ fp->rdbuf->data[0] = dma64_add(fp->rdbuf->data[0], 5);
fp->rdbuf->size -= 5;
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->rescnt = 0;
@@ -521,13 +521,13 @@ static const struct file_operations fs3270_fops = {
static void fs3270_create_cb(int minor)
{
__register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
- device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
+ device_create(&class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
NULL, "3270/tub%d", minor);
}
static void fs3270_destroy_cb(int minor)
{
- device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
+ device_destroy(&class3270, MKDEV(IBM_FS3270_MAJOR, minor));
__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
}
@@ -546,7 +546,7 @@ static int __init fs3270_init(void)
rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops);
if (rc)
return rc;
- device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
+ device_create(&class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
NULL, "3270/tub");
raw3270_register_notifier(&fs3270_notifier);
return 0;
@@ -555,7 +555,7 @@ static int __init fs3270_init(void)
static void __exit fs3270_exit(void)
{
raw3270_unregister_notifier(&fs3270_notifier);
- device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
+ device_destroy(&class3270, MKDEV(IBM_FS3270_MAJOR, 0));
__unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
}
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 7115c0f85650..37173cb0f5f5 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -29,7 +29,9 @@
#include <linux/device.h>
#include <linux/mutex.h>
-struct class *class3270;
+const struct class class3270 = {
+ .name = "3270",
+};
EXPORT_SYMBOL(class3270);
/* The main 3270 data structure. */
@@ -160,7 +162,7 @@ struct raw3270_request *raw3270_request_alloc(size_t size)
/*
* Setup ccw.
*/
- rq->ccw.cda = __pa(rq->buffer);
+ rq->ccw.cda = virt_to_dma32(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
return rq;
@@ -186,7 +188,7 @@ int raw3270_request_reset(struct raw3270_request *rq)
return -EBUSY;
rq->ccw.cmd_code = 0;
rq->ccw.count = 0;
- rq->ccw.cda = __pa(rq->buffer);
+ rq->ccw.cda = virt_to_dma32(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
rq->rescnt = 0;
rq->rc = 0;
@@ -221,7 +223,7 @@ EXPORT_SYMBOL(raw3270_request_add_data);
*/
void raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
{
- rq->ccw.cda = __pa(data);
+ rq->ccw.cda = virt_to_dma32(data);
rq->ccw.count = size;
}
EXPORT_SYMBOL(raw3270_request_set_data);
@@ -231,7 +233,7 @@ EXPORT_SYMBOL(raw3270_request_set_data);
*/
void raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
{
- rq->ccw.cda = __pa(ib->data);
+ rq->ccw.cda = virt_to_dma32(ib->data);
rq->ccw.count = ib->size;
rq->ccw.flags |= CCW_FLAG_IDA;
}
@@ -577,7 +579,7 @@ static void raw3270_read_modified(struct raw3270 *rp)
rp->init_readmod.ccw.cmd_code = TC_READMOD;
rp->init_readmod.ccw.flags = CCW_FLAG_SLI;
rp->init_readmod.ccw.count = sizeof(rp->init_data);
- rp->init_readmod.ccw.cda = (__u32)__pa(rp->init_data);
+ rp->init_readmod.ccw.cda = virt_to_dma32(rp->init_data);
rp->init_readmod.callback = raw3270_read_modified_cb;
rp->init_readmod.callback_data = rp->init_data;
rp->state = RAW3270_STATE_READMOD;
@@ -597,7 +599,7 @@ static void raw3270_writesf_readpart(struct raw3270 *rp)
rp->init_readpart.ccw.cmd_code = TC_WRITESF;
rp->init_readpart.ccw.flags = CCW_FLAG_SLI;
rp->init_readpart.ccw.count = sizeof(wbuf);
- rp->init_readpart.ccw.cda = (__u32)__pa(&rp->init_data);
+ rp->init_readpart.ccw.cda = virt_to_dma32(&rp->init_data);
rp->state = RAW3270_STATE_W4ATTN;
raw3270_start_irq(&rp->init_view, &rp->init_readpart);
}
@@ -635,7 +637,7 @@ static int __raw3270_reset_device(struct raw3270 *rp)
rp->init_reset.ccw.cmd_code = TC_EWRITEA;
rp->init_reset.ccw.flags = CCW_FLAG_SLI;
rp->init_reset.ccw.count = 1;
- rp->init_reset.ccw.cda = (__u32)__pa(rp->init_data);
+ rp->init_reset.ccw.cda = virt_to_dma32(rp->init_data);
rp->init_reset.callback = raw3270_reset_device_cb;
rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset);
if (rc == 0 && rp->state == RAW3270_STATE_INIT)
@@ -1316,23 +1318,25 @@ static int raw3270_init(void)
return 0;
raw3270_registered = 1;
rc = ccw_driver_register(&raw3270_ccw_driver);
- if (rc == 0) {
- /* Create attributes for early (= console) device. */
- mutex_lock(&raw3270_mutex);
- class3270 = class_create("3270");
- list_for_each_entry(rp, &raw3270_devices, list) {
- get_device(&rp->cdev->dev);
- raw3270_create_attributes(rp);
- }
- mutex_unlock(&raw3270_mutex);
+ if (rc)
+ return rc;
+ rc = class_register(&class3270);
+ if (rc)
+ return rc;
+ /* Create attributes for early (= console) device. */
+ mutex_lock(&raw3270_mutex);
+ list_for_each_entry(rp, &raw3270_devices, list) {
+ get_device(&rp->cdev->dev);
+ raw3270_create_attributes(rp);
}
- return rc;
+ mutex_unlock(&raw3270_mutex);
+ return 0;
}
static void raw3270_exit(void)
{
ccw_driver_unregister(&raw3270_ccw_driver);
- class_destroy(class3270);
+ class_unregister(&class3270);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index b1beecc7a0a9..5040c7e0e051 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -14,7 +14,7 @@
struct raw3270;
struct raw3270_view;
-extern struct class *class3270;
+extern const struct class class3270;
/* 3270 CCW request */
struct raw3270_request {
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 4e5d5efa978f..0aba30efb483 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -305,7 +305,9 @@ tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = memsize;
- ccw->cda = (__u32)(addr_t) cda;
+ ccw->cda = 0;
+ if (cda)
+ ccw->cda = virt_to_dma32(cda);
return ccw + 1;
}
@@ -315,7 +317,9 @@ tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = memsize;
- ccw->cda = (__u32)(addr_t) cda;
+ ccw->cda = 0;
+ if (cda)
+ ccw->cda = virt_to_dma32(cda);
return ccw + 1;
}
@@ -325,7 +329,7 @@ tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code)
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = 0;
- ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+ ccw->cda = virt_to_dma32(&ccw->cmd_code);
return ccw + 1;
}
@@ -336,7 +340,7 @@ tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count)
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = 0;
- ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+ ccw->cda = virt_to_dma32(&ccw->cmd_code);
ccw++;
}
return ccw;
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index 277a0f903d11..eae362bbfbb5 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -22,7 +22,9 @@ MODULE_DESCRIPTION(
);
MODULE_LICENSE("GPL");
-static struct class *tape_class;
+static const struct class tape_class = {
+ .name = "tape390",
+};
/*
* Register a tape device and return a pointer to the cdev structure.
@@ -74,7 +76,7 @@ struct tape_class_device *register_tape_dev(
if (rc)
goto fail_with_cdev;
- tcd->class_device = device_create(tape_class, device,
+ tcd->class_device = device_create(&tape_class, device,
tcd->char_device->dev, NULL,
"%s", tcd->device_name);
rc = PTR_ERR_OR_ZERO(tcd->class_device);
@@ -91,7 +93,7 @@ struct tape_class_device *register_tape_dev(
return tcd;
fail_with_class_device:
- device_destroy(tape_class, tcd->char_device->dev);
+ device_destroy(&tape_class, tcd->char_device->dev);
fail_with_cdev:
cdev_del(tcd->char_device);
@@ -107,7 +109,7 @@ void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
{
if (tcd != NULL && !IS_ERR(tcd)) {
sysfs_remove_link(&device->kobj, tcd->mode_name);
- device_destroy(tape_class, tcd->char_device->dev);
+ device_destroy(&tape_class, tcd->char_device->dev);
cdev_del(tcd->char_device);
kfree(tcd);
}
@@ -117,15 +119,12 @@ EXPORT_SYMBOL(unregister_tape_dev);
static int __init tape_init(void)
{
- tape_class = class_create("tape390");
-
- return 0;
+ return class_register(&tape_class);
}
static void __exit tape_exit(void)
{
- class_destroy(tape_class);
- tape_class = NULL;
+ class_unregister(&tape_class);
}
postcore_initcall(tape_init);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 6946ba9a9de2..d7e408c8d0b8 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -679,7 +679,9 @@ static const struct attribute_group *vmlogrdr_attr_groups[] = {
NULL,
};
-static struct class *vmlogrdr_class;
+static const struct class vmlogrdr_class = {
+ .name = "vmlogrdr_class",
+};
static struct device_driver vmlogrdr_driver = {
.name = "vmlogrdr",
.bus = &iucv_bus,
@@ -699,12 +701,9 @@ static int vmlogrdr_register_driver(void)
if (ret)
goto out_iucv;
- vmlogrdr_class = class_create("vmlogrdr");
- if (IS_ERR(vmlogrdr_class)) {
- ret = PTR_ERR(vmlogrdr_class);
- vmlogrdr_class = NULL;
+ ret = class_register(&vmlogrdr_class);
+ if (ret)
goto out_driver;
- }
return 0;
out_driver:
@@ -718,8 +717,7 @@ out:
static void vmlogrdr_unregister_driver(void)
{
- class_destroy(vmlogrdr_class);
- vmlogrdr_class = NULL;
+ class_unregister(&vmlogrdr_class);
driver_unregister(&vmlogrdr_driver);
iucv_unregister(&vmlogrdr_iucv_handler, 1);
}
@@ -754,7 +752,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
return ret;
}
- priv->class_device = device_create(vmlogrdr_class, dev,
+ priv->class_device = device_create(&vmlogrdr_class, dev,
MKDEV(vmlogrdr_major,
priv->minor_num),
priv, "%s", dev_name(dev));
@@ -771,7 +769,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
{
- device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
+ device_destroy(&vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
if (priv->device != NULL) {
device_unregister(priv->device);
priv->device=NULL;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 1d17a83569ce..fe94dec427b6 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -48,7 +48,9 @@ MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
MODULE_LICENSE("GPL");
static dev_t ur_first_dev_maj_min;
-static struct class *vmur_class;
+static const struct class vmur_class = {
+ .name = "vmur",
+};
static struct debug_info *vmur_dbf;
/* We put the device's record length (for writes) in the driver_info field */
@@ -195,7 +197,7 @@ static void free_chan_prog(struct ccw1 *cpa)
struct ccw1 *ptr = cpa;
while (ptr->cda) {
- kfree(phys_to_virt(ptr->cda));
+ kfree(dma32_to_virt(ptr->cda));
ptr++;
}
kfree(cpa);
@@ -237,7 +239,7 @@ static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
free_chan_prog(cpa);
return ERR_PTR(-ENOMEM);
}
- cpa[i].cda = (u32)virt_to_phys(kbuf);
+ cpa[i].cda = virt_to_dma32(kbuf);
if (copy_from_user(kbuf, ubuf, reclen)) {
free_chan_prog(cpa);
return ERR_PTR(-EFAULT);
@@ -912,7 +914,7 @@ static int ur_set_online(struct ccw_device *cdev)
goto fail_free_cdev;
}
- urd->device = device_create(vmur_class, &cdev->dev,
+ urd->device = device_create(&vmur_class, &cdev->dev,
urd->char_device->dev, NULL, "%s", node_id);
if (IS_ERR(urd->device)) {
rc = PTR_ERR(urd->device);
@@ -958,7 +960,7 @@ static int ur_set_offline_force(struct ccw_device *cdev, int force)
/* Work not run yet - need to release reference here */
urdev_put(urd);
}
- device_destroy(vmur_class, urd->char_device->dev);
+ device_destroy(&vmur_class, urd->char_device->dev);
cdev_del(urd->char_device);
urd->char_device = NULL;
rc = 0;
@@ -1022,11 +1024,9 @@ static int __init ur_init(void)
debug_set_level(vmur_dbf, 6);
- vmur_class = class_create("vmur");
- if (IS_ERR(vmur_class)) {
- rc = PTR_ERR(vmur_class);
+ rc = class_register(&vmur_class);
+ if (rc)
goto fail_free_dbf;
- }
rc = ccw_driver_register(&ur_driver);
if (rc)
@@ -1046,7 +1046,7 @@ static int __init ur_init(void)
fail_unregister_driver:
ccw_driver_unregister(&ur_driver);
fail_class_destroy:
- class_destroy(vmur_class);
+ class_unregister(&vmur_class);
fail_free_dbf:
debug_unregister(vmur_dbf);
return rc;
@@ -1056,7 +1056,7 @@ static void __exit ur_exit(void)
{
unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
ccw_driver_unregister(&ur_driver);
- class_destroy(vmur_class);
+ class_unregister(&vmur_class);
debug_unregister(vmur_dbf);
pr_info("%s unloaded.\n", ur_banner);
}
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 6eb8bcd948dc..b72f672a7720 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -240,7 +240,7 @@ static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
&gdev->dev.kobj, "group_device");
if (rc) {
- for (--i; i >= 0; i--)
+ while (i--)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
@@ -251,7 +251,7 @@ static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
rc = sysfs_create_link(&gdev->dev.kobj,
&gdev->cdev[i]->dev.kobj, str);
if (rc) {
- for (--i; i >= 0; i--) {
+ while (i--) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
}
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 3d88899dff7c..44ea76f9e1de 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -191,7 +191,7 @@ EXPORT_SYMBOL_GPL(chsc_ssqd);
* Returns 0 on success.
*/
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
- u64 summary_indicator_addr, u64 subchannel_indicator_addr, u8 isc)
+ dma64_t summary_indicator_addr, dma64_t subchannel_indicator_addr, u8 isc)
{
memset(scssc, 0, sizeof(*scssc));
scssc->request.length = 0x0fe0;
@@ -844,7 +844,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css)
}
return ret;
cleanup:
- for (--i; i >= 0; i--) {
+ while (i--) {
if (!css->chps[i])
continue;
chp_remove_cmg_attr(css->chps[i]);
@@ -861,9 +861,9 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
u32 key : 4;
u32 : 28;
u32 zeroes1;
- u32 cub_addr1;
+ dma32_t cub_addr1;
u32 zeroes2;
- u32 cub_addr2;
+ dma32_t cub_addr2;
u32 reserved[13];
struct chsc_header response;
u32 status : 8;
@@ -881,8 +881,8 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
secm_area->request.code = 0x0016;
secm_area->key = PAGE_DEFAULT_KEY >> 4;
- secm_area->cub_addr1 = virt_to_phys(css->cub_addr1);
- secm_area->cub_addr2 = virt_to_phys(css->cub_addr2);
+ secm_area->cub_addr1 = virt_to_dma32(css->cub_addr1);
+ secm_area->cub_addr2 = virt_to_dma32(css->cub_addr2);
secm_area->operation_code = enable ? 0 : 1;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index d1caacb08e67..03602295f335 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -91,8 +91,8 @@ struct chsc_scssc_area {
u16:16;
u32:32;
u32:32;
- u64 summary_indicator_addr;
- u64 subchannel_indicator_addr;
+ dma64_t summary_indicator_addr;
+ dma64_t subchannel_indicator_addr;
u32 ks:4;
u32 kc:4;
u32:21;
@@ -164,7 +164,7 @@ void chsc_chp_offline(struct chp_id chpid);
int chsc_get_channel_measurement_chars(struct channel_path *chp);
int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
- u64 summary_indicator_addr, u64 subchannel_indicator_addr,
+ dma64_t summary_indicator_addr, dma64_t subchannel_indicator_addr,
u8 isc);
int chsc_sgib(u32 origin);
int chsc_error_from_response(int response);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index a5736b7357b2..7e759c21480e 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -148,7 +148,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
orb->cmd.i2k = 0;
orb->cmd.key = key >> 4;
/* issue "Start Subchannel" */
- orb->cmd.cpa = (u32)virt_to_phys(cpa);
+ orb->cmd.cpa = virt_to_dma32(cpa);
ccode = ssch(sch->schid, orb);
/* process condition code */
@@ -717,7 +717,7 @@ int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
orb->tm.key = key >> 4;
orb->tm.b = 1;
orb->tm.lpm = lpm ? lpm : sch->lpm;
- orb->tm.tcw = (u32)virt_to_phys(tcw);
+ orb->tm.tcw = virt_to_dma32(tcw);
cc = ssch(sch->schid, orb);
switch (cc) {
case 0:
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 094431a62ad5..1d68db1a3d4e 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1114,26 +1114,33 @@ static int cio_dma_pool_init(void)
return 0;
}
-void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
- size_t size)
+void *__cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size, dma32_t *dma_handle)
{
dma_addr_t dma_addr;
- unsigned long addr;
size_t chunk_size;
+ void *addr;
if (!gp_dma)
return NULL;
- addr = gen_pool_alloc(gp_dma, size);
+ addr = gen_pool_dma_alloc(gp_dma, size, &dma_addr);
while (!addr) {
chunk_size = round_up(size, PAGE_SIZE);
- addr = (unsigned long) dma_alloc_coherent(dma_dev,
- chunk_size, &dma_addr, CIO_DMA_GFP);
+ addr = dma_alloc_coherent(dma_dev, chunk_size, &dma_addr, CIO_DMA_GFP);
if (!addr)
return NULL;
- gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
- addr = gen_pool_alloc(gp_dma, size);
+ gen_pool_add_virt(gp_dma, (unsigned long)addr, dma_addr, chunk_size, -1);
+ addr = gen_pool_dma_alloc(gp_dma, size, dma_handle ? &dma_addr : NULL);
}
- return (void *) addr;
+ if (dma_handle)
+ *dma_handle = (__force dma32_t)dma_addr;
+ return addr;
+}
+
+void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size)
+{
+ return __cio_gp_dma_zalloc(gp_dma, dma_dev, size, NULL);
}
void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c396ac3e3a32..65d8b2cfd626 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -64,13 +64,13 @@ static void ccw_timeout_log(struct ccw_device *cdev)
printk(KERN_WARNING "cio: orb indicates transport mode\n");
printk(KERN_WARNING "cio: last tcw:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
- phys_to_virt(orb->tm.tcw),
+ dma32_to_virt(orb->tm.tcw),
sizeof(struct tcw), 0);
} else {
printk(KERN_WARNING "cio: orb indicates command mode\n");
- if ((void *)(addr_t)orb->cmd.cpa ==
+ if (dma32_to_virt(orb->cmd.cpa) ==
&private->dma_area->sense_ccw ||
- (void *)(addr_t)orb->cmd.cpa ==
+ dma32_to_virt(orb->cmd.cpa) ==
cdev->private->dma_area->iccws)
printk(KERN_WARNING "cio: last channel program "
"(intern):\n");
@@ -78,7 +78,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
printk(KERN_WARNING "cio: last channel program:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
- phys_to_virt(orb->cmd.cpa),
+ dma32_to_virt(orb->cmd.cpa),
sizeof(struct ccw1), 0);
}
printk(KERN_WARNING "cio: ccw device state: %d\n",
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index ce99ee2457e6..a512eac83485 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -210,7 +210,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev)
snsid_init(cdev);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_ID;
- cp->cda = (u32)virt_to_phys(&cdev->private->dma_area->senseid);
+ cp->cda = virt_to_dma32(&cdev->private->dma_area->senseid);
cp->count = sizeof(struct senseid);
cp->flags = CCW_FLAG_SLI;
/* Request setup. */
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index a5dba3829769..40c97f873075 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -823,13 +823,14 @@ EXPORT_SYMBOL_GPL(ccw_device_get_chid);
* the subchannels dma pool. Maximal size of allocation supported
* is PAGE_SIZE.
*/
-void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size)
+void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size,
+ dma32_t *dma_handle)
{
void *addr;
if (!get_device(&cdev->dev))
return NULL;
- addr = cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
+ addr = __cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size, dma_handle);
if (IS_ERR_OR_NULL(addr))
put_device(&cdev->dev);
return addr;
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index ad90045873e2..b3afe283cc10 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -141,7 +141,7 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn)
pgid->inf.fc = fn;
cp->cmd_code = CCW_CMD_SET_PGID;
- cp->cda = (u32)virt_to_phys(pgid);
+ cp->cda = virt_to_dma32(pgid);
cp->count = sizeof(*pgid);
cp->flags = CCW_FLAG_SLI;
req->cp = cp;
@@ -442,7 +442,7 @@ static void snid_build_cp(struct ccw_device *cdev)
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_PGID;
- cp->cda = (u32)virt_to_phys(&cdev->private->dma_area->pgid[i]);
+ cp->cda = virt_to_dma32(&cdev->private->dma_area->pgid[i]);
cp->count = sizeof(struct pgid);
cp->flags = CCW_FLAG_SLI;
req->cp = cp;
@@ -632,11 +632,11 @@ static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
struct ccw1 *cp = cdev->private->dma_area->iccws;
cp[0].cmd_code = CCW_CMD_STLCK;
- cp[0].cda = (u32)virt_to_phys(buf1);
+ cp[0].cda = virt_to_dma32(buf1);
cp[0].count = 32;
cp[0].flags = CCW_FLAG_CC;
cp[1].cmd_code = CCW_CMD_RELEASE;
- cp[1].cda = (u32)virt_to_phys(buf2);
+ cp[1].cda = virt_to_dma32(buf2);
cp[1].count = 32;
cp[1].flags = 0;
req->cp = cp;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 6c2e35065fec..0ff8482a7b15 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -332,7 +332,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
*/
sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
- sense_ccw->cda = virt_to_phys(cdev->private->dma_area->irb.ecw);
+ sense_ccw->cda = virt_to_dma32(cdev->private->dma_area->irb.ecw);
sense_ccw->count = SENSE_MAX_COUNT;
sense_ccw->flags = CCW_FLAG_SLI;
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 1caedf931a5f..165de1552301 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -63,7 +63,7 @@ static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
int cc;
orb_init(orb);
- orb->eadm.aob = (u32)virt_to_phys(aob);
+ orb->eadm.aob = virt_to_dma32(aob);
orb->eadm.intparm = (u32)virt_to_phys(sch);
orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
@@ -147,7 +147,7 @@ static void eadm_subchannel_irq(struct subchannel *sch)
css_sched_sch_todo(sch, SCH_TODO_EVAL);
return;
}
- scm_irq_handler(phys_to_virt(scsw->aob), error);
+ scm_irq_handler(dma32_to_virt(scsw->aob), error);
private->state = EADM_IDLE;
if (private->completion)
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
index 84f24a2f46e4..ba35b64949d3 100644
--- a/drivers/s390/cio/fcx.c
+++ b/drivers/s390/cio/fcx.c
@@ -25,7 +25,7 @@
*/
struct tcw *tcw_get_intrg(struct tcw *tcw)
{
- return phys_to_virt(tcw->intrg);
+ return dma32_to_virt(tcw->intrg);
}
EXPORT_SYMBOL(tcw_get_intrg);
@@ -40,9 +40,9 @@ EXPORT_SYMBOL(tcw_get_intrg);
void *tcw_get_data(struct tcw *tcw)
{
if (tcw->r)
- return phys_to_virt(tcw->input);
+ return dma64_to_virt(tcw->input);
if (tcw->w)
- return phys_to_virt(tcw->output);
+ return dma64_to_virt(tcw->output);
return NULL;
}
EXPORT_SYMBOL(tcw_get_data);
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(tcw_get_data);
*/
struct tccb *tcw_get_tccb(struct tcw *tcw)
{
- return phys_to_virt(tcw->tccb);
+ return dma64_to_virt(tcw->tccb);
}
EXPORT_SYMBOL(tcw_get_tccb);
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(tcw_get_tccb);
*/
struct tsb *tcw_get_tsb(struct tcw *tcw)
{
- return phys_to_virt(tcw->tsb);
+ return dma64_to_virt(tcw->tsb);
}
EXPORT_SYMBOL(tcw_get_tsb);
@@ -190,7 +190,7 @@ EXPORT_SYMBOL(tcw_finalize);
*/
void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
{
- tcw->intrg = (u32)virt_to_phys(intrg_tcw);
+ tcw->intrg = virt_to_dma32(intrg_tcw);
}
EXPORT_SYMBOL(tcw_set_intrg);
@@ -208,11 +208,11 @@ EXPORT_SYMBOL(tcw_set_intrg);
void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
{
if (tcw->r) {
- tcw->input = virt_to_phys(data);
+ tcw->input = virt_to_dma64(data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_INPUT_TIDA;
} else if (tcw->w) {
- tcw->output = virt_to_phys(data);
+ tcw->output = virt_to_dma64(data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
}
@@ -228,7 +228,7 @@ EXPORT_SYMBOL(tcw_set_data);
*/
void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
{
- tcw->tccb = virt_to_phys(tccb);
+ tcw->tccb = virt_to_dma64(tccb);
}
EXPORT_SYMBOL(tcw_set_tccb);
@@ -241,7 +241,7 @@ EXPORT_SYMBOL(tcw_set_tccb);
*/
void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
{
- tcw->tsb = virt_to_phys(tsb);
+ tcw->tsb = virt_to_dma64(tsb);
}
EXPORT_SYMBOL(tcw_set_tsb);
@@ -346,7 +346,7 @@ struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
memset(tidaw, 0, sizeof(struct tidaw));
tidaw->flags = flags;
tidaw->count = count;
- tidaw->addr = virt_to_phys(addr);
+ tidaw->addr = virt_to_dma64(addr);
return tidaw;
}
EXPORT_SYMBOL(tcw_add_tidaw);
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
index a2d3778b2c95..14d2a1822b50 100644
--- a/drivers/s390/cio/orb.h
+++ b/drivers/s390/cio/orb.h
@@ -12,6 +12,9 @@
#ifndef S390_ORB_H
#define S390_ORB_H
+#include <linux/types.h>
+#include <asm/dma-types.h>
+
/*
* Command-mode operation request block
*/
@@ -34,7 +37,7 @@ struct cmd_orb {
u32 ils:1; /* incorrect length */
u32 zero:6; /* reserved zeros */
u32 orbx:1; /* ORB extension control */
- u32 cpa; /* channel program address */
+ dma32_t cpa; /* channel program address */
} __packed __aligned(4);
/*
@@ -49,7 +52,7 @@ struct tm_orb {
u32 lpm:8;
u32:7;
u32 x:1;
- u32 tcw;
+ dma32_t tcw;
u32 prio:8;
u32:8;
u32 rsvpgm:8;
@@ -71,7 +74,7 @@ struct eadm_orb {
u32 compat2:1;
u32:21;
u32 x:1;
- u32 aob;
+ dma32_t aob;
u32 css_prio:8;
u32:8;
u32 scm_prio:8;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 9cde55730b65..3d9f0834c78b 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -82,7 +82,7 @@ static inline int do_siga_input(unsigned long schid, unsigned long mask,
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
unsigned int *bb, unsigned long fc,
- unsigned long aob)
+ dma64_t aob)
{
int cc;
@@ -321,7 +321,7 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
}
static int qdio_siga_output(struct qdio_q *q, unsigned int count,
- unsigned int *busy_bit, unsigned long aob)
+ unsigned int *busy_bit, dma64_t aob)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_WRITE;
@@ -628,7 +628,7 @@ int qdio_inspect_output_queue(struct ccw_device *cdev, unsigned int nr,
EXPORT_SYMBOL_GPL(qdio_inspect_output_queue);
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
- unsigned long aob)
+ dma64_t aob)
{
int retries = 0, cc;
unsigned int busy_bit;
@@ -1070,7 +1070,7 @@ int qdio_establish(struct ccw_device *cdev,
irq_ptr->ccw->cmd_code = ciw->cmd;
irq_ptr->ccw->flags = CCW_FLAG_SLI;
irq_ptr->ccw->count = ciw->count;
- irq_ptr->ccw->cda = (u32) virt_to_phys(irq_ptr->qdr);
+ irq_ptr->ccw->cda = virt_to_dma32(irq_ptr->qdr);
spin_lock_irq(get_ccwdev_lock(cdev));
ccw_device_set_options_mask(cdev, 0);
@@ -1263,9 +1263,9 @@ static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int co
qperf_inc(q, outbound_queue_full);
if (queue_type(q) == QDIO_IQDIO_QFMT) {
- unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
+ dma64_t phys_aob = aob ? virt_to_dma64(aob) : 0;
- WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
+ WARN_ON_ONCE(!IS_ALIGNED(dma64_to_u64(phys_aob), 256));
rc = qdio_kick_outbound_q(q, count, phys_aob);
} else if (qdio_need_siga_sync(q->irq_ptr)) {
rc = qdio_sync_output_queue(q);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 714878e2acc4..99c0fd23022d 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -179,7 +179,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
/* fill in sl */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
- q->sl->element[j].sbal = virt_to_phys(q->sbal[j]);
+ q->sl->element[j].sbal = virt_to_dma64(q->sbal[j]);
}
static void setup_queues(struct qdio_irq *irq_ptr,
@@ -291,9 +291,9 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue)
{
- desc->sliba = virt_to_phys(queue->slib);
- desc->sla = virt_to_phys(queue->sl);
- desc->slsba = virt_to_phys(&queue->slsb);
+ desc->sliba = virt_to_dma64(queue->slib);
+ desc->sla = virt_to_dma64(queue->sl);
+ desc->slsba = virt_to_dma64(&queue->slsb);
desc->akey = PAGE_DEFAULT_KEY >> 4;
desc->bkey = PAGE_DEFAULT_KEY >> 4;
@@ -315,7 +315,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
- irq_ptr->qdr->qiba = virt_to_phys(&irq_ptr->qib);
+ irq_ptr->qdr->qiba = virt_to_dma64(&irq_ptr->qib);
irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
for (i = 0; i < qdio_init->no_input_qs; i++)
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 9b9335dd06db..ccd4ed93bd92 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -137,15 +137,15 @@ static struct airq_struct tiqdio_airq = {
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{
struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
- u64 summary_indicator_addr, subchannel_indicator_addr;
+ dma64_t summary_indicator_addr, subchannel_indicator_addr;
int rc;
if (reset) {
summary_indicator_addr = 0;
subchannel_indicator_addr = 0;
} else {
- summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr);
- subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci);
+ summary_indicator_addr = virt_to_dma64(tiqdio_airq.lsi_ptr);
+ subchannel_indicator_addr = virt_to_dma64(irq_ptr->dsci);
}
rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index aafd66305ead..6e5c508b1e07 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -190,7 +190,7 @@ static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length)
}
/* Create the list of IDAL words for a page_array. */
static inline void page_array_idal_create_words(struct page_array *pa,
- unsigned long *idaws)
+ dma64_t *idaws)
{
int i;
@@ -203,10 +203,10 @@ static inline void page_array_idal_create_words(struct page_array *pa,
*/
for (i = 0; i < pa->pa_nr; i++) {
- idaws[i] = page_to_phys(pa->pa_page[i]);
+ idaws[i] = virt_to_dma64(page_to_virt(pa->pa_page[i]));
/* Incorporate any offset from each starting address */
- idaws[i] += pa->pa_iova[i] & (PAGE_SIZE - 1);
+ idaws[i] = dma64_add(idaws[i], pa->pa_iova[i] & ~PAGE_MASK);
}
}
@@ -227,7 +227,7 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
pccw1->flags = ccw0.flags;
pccw1->count = ccw0.count;
}
- pccw1->cda = ccw0.cda;
+ pccw1->cda = u32_to_dma32(ccw0.cda);
pccw1++;
}
}
@@ -299,11 +299,12 @@ static inline int ccw_does_data_transfer(struct ccw1 *ccw)
*
* Returns 1 if yes, 0 if no.
*/
-static inline int is_cpa_within_range(u32 cpa, u32 head, int len)
+static inline int is_cpa_within_range(dma32_t cpa, u32 head, int len)
{
u32 tail = head + (len - 1) * sizeof(struct ccw1);
+ u32 gcpa = dma32_to_u32(cpa);
- return (head <= cpa && cpa <= tail);
+ return head <= gcpa && gcpa <= tail;
}
static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len)
@@ -356,7 +357,7 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx)
if (ccw_is_tic(ccw))
return;
- kfree(phys_to_virt(ccw->cda));
+ kfree(dma32_to_virt(ccw->cda));
}
/**
@@ -417,15 +418,17 @@ static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
static int ccwchain_loop_tic(struct ccwchain *chain,
struct channel_program *cp);
-static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
+static int ccwchain_handle_ccw(dma32_t cda, struct channel_program *cp)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccwchain *chain;
int len, ret;
+ u32 gcda;
+ gcda = dma32_to_u32(cda);
/* Copy 2K (the most we support today) of possible CCWs */
- ret = vfio_dma_rw(vdev, cda, cp->guest_cp, CCWCHAIN_LEN_MAX * sizeof(struct ccw1), false);
+ ret = vfio_dma_rw(vdev, gcda, cp->guest_cp, CCWCHAIN_LEN_MAX * sizeof(struct ccw1), false);
if (ret)
return ret;
@@ -434,7 +437,7 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX);
/* Count the CCWs in the current chain */
- len = ccwchain_calc_length(cda, cp);
+ len = ccwchain_calc_length(gcda, cp);
if (len < 0)
return len;
@@ -444,7 +447,7 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
return -ENOMEM;
chain->ch_len = len;
- chain->ch_iova = cda;
+ chain->ch_iova = gcda;
/* Copy the actual CCWs into the new chain */
memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
@@ -487,13 +490,13 @@ static int ccwchain_fetch_tic(struct ccw1 *ccw,
struct channel_program *cp)
{
struct ccwchain *iter;
- u32 ccw_head;
+ u32 cda, ccw_head;
list_for_each_entry(iter, &cp->ccwchain_list, next) {
ccw_head = iter->ch_iova;
if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
- ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
- (ccw->cda - ccw_head));
+ cda = (u64)iter->ch_ccw + dma32_to_u32(ccw->cda) - ccw_head;
+ ccw->cda = u32_to_dma32(cda);
return 0;
}
}
@@ -501,14 +504,12 @@ static int ccwchain_fetch_tic(struct ccw1 *ccw,
return -EFAULT;
}
-static unsigned long *get_guest_idal(struct ccw1 *ccw,
- struct channel_program *cp,
- int idaw_nr)
+static dma64_t *get_guest_idal(struct ccw1 *ccw, struct channel_program *cp, int idaw_nr)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
- unsigned long *idaws;
- unsigned int *idaws_f1;
+ dma64_t *idaws;
+ dma32_t *idaws_f1;
int idal_len = idaw_nr * sizeof(*idaws);
int idaw_size = idal_is_2k(cp) ? PAGE_SIZE / 2 : PAGE_SIZE;
int idaw_mask = ~(idaw_size - 1);
@@ -520,7 +521,7 @@ static unsigned long *get_guest_idal(struct ccw1 *ccw,
if (ccw_is_idal(ccw)) {
/* Copy IDAL from guest */
- ret = vfio_dma_rw(vdev, ccw->cda, idaws, idal_len, false);
+ ret = vfio_dma_rw(vdev, dma32_to_u32(ccw->cda), idaws, idal_len, false);
if (ret) {
kfree(idaws);
return ERR_PTR(ret);
@@ -528,14 +529,18 @@ static unsigned long *get_guest_idal(struct ccw1 *ccw,
} else {
/* Fabricate an IDAL based off CCW data address */
if (cp->orb.cmd.c64) {
- idaws[0] = ccw->cda;
- for (i = 1; i < idaw_nr; i++)
- idaws[i] = (idaws[i - 1] + idaw_size) & idaw_mask;
+ idaws[0] = u64_to_dma64(dma32_to_u32(ccw->cda));
+ for (i = 1; i < idaw_nr; i++) {
+ idaws[i] = dma64_add(idaws[i - 1], idaw_size);
+ idaws[i] = dma64_and(idaws[i], idaw_mask);
+ }
} else {
- idaws_f1 = (unsigned int *)idaws;
+ idaws_f1 = (dma32_t *)idaws;
idaws_f1[0] = ccw->cda;
- for (i = 1; i < idaw_nr; i++)
- idaws_f1[i] = (idaws_f1[i - 1] + idaw_size) & idaw_mask;
+ for (i = 1; i < idaw_nr; i++) {
+ idaws_f1[i] = dma32_add(idaws_f1[i - 1], idaw_size);
+ idaws_f1[i] = dma32_and(idaws_f1[i], idaw_mask);
+ }
}
}
@@ -572,7 +577,7 @@ static int ccw_count_idaws(struct ccw1 *ccw,
if (ccw_is_idal(ccw)) {
/* Read first IDAW to check its starting address. */
/* All subsequent IDAWs will be 2K- or 4K-aligned. */
- ret = vfio_dma_rw(vdev, ccw->cda, &iova, size, false);
+ ret = vfio_dma_rw(vdev, dma32_to_u32(ccw->cda), &iova, size, false);
if (ret)
return ret;
@@ -583,7 +588,7 @@ static int ccw_count_idaws(struct ccw1 *ccw,
if (!cp->orb.cmd.c64)
iova = iova >> 32;
} else {
- iova = ccw->cda;
+ iova = dma32_to_u32(ccw->cda);
}
/* Format-1 IDAWs operate on 2K each */
@@ -604,8 +609,8 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
- unsigned long *idaws;
- unsigned int *idaws_f1;
+ dma64_t *idaws;
+ dma32_t *idaws_f1;
int ret;
int idaw_nr;
int i;
@@ -636,12 +641,12 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
* Copy guest IDAWs into page_array, in case the memory they
* occupy is not contiguous.
*/
- idaws_f1 = (unsigned int *)idaws;
+ idaws_f1 = (dma32_t *)idaws;
for (i = 0; i < idaw_nr; i++) {
if (cp->orb.cmd.c64)
- pa->pa_iova[i] = idaws[i];
+ pa->pa_iova[i] = dma64_to_u64(idaws[i]);
else
- pa->pa_iova[i] = idaws_f1[i];
+ pa->pa_iova[i] = dma32_to_u32(idaws_f1[i]);
}
if (ccw_does_data_transfer(ccw)) {
@@ -652,7 +657,7 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
pa->pa_nr = 0;
}
- ccw->cda = (__u32) virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags |= CCW_FLAG_IDA;
/* Populate the IDAL with pinned/translated addresses from page */
@@ -874,7 +879,7 @@ union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch)
chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next);
cpa = chain->ch_ccw;
- orb->cmd.cpa = (__u32)virt_to_phys(cpa);
+ orb->cmd.cpa = virt_to_dma32(cpa);
return orb;
}
@@ -896,7 +901,7 @@ union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch)
void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
{
struct ccwchain *chain;
- u32 cpa = scsw->cmd.cpa;
+ dma32_t cpa = scsw->cmd.cpa;
u32 ccw_head;
if (!cp->initialized)
@@ -919,9 +924,10 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
* (cpa - ccw_head) is the offset value of the host
* physical ccw to its chain head.
* Adding this value to the guest physical ccw chain
- * head gets us the guest cpa.
+ * head gets us the guest cpa:
+ * cpa = chain->ch_iova + (cpa - ccw_head)
*/
- cpa = chain->ch_iova + (cpa - ccw_head);
+ cpa = dma32_add(cpa, chain->ch_iova - ccw_head);
break;
}
}
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 09877b46181d..4d7988ea47ef 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -378,7 +378,7 @@ static void fsm_open(struct vfio_ccw_private *private,
spin_lock_irq(&sch->lock);
sch->isc = VFIO_CCW_ISC;
- ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
if (ret)
goto err_unlock;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 02c503f16bc2..eba07f8ef308 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -107,7 +107,11 @@ EXPORT_SYMBOL(zcrypt_msgtype);
struct zcdn_device;
-static struct class *zcrypt_class;
+static void zcdn_device_release(struct device *dev);
+static const struct class zcrypt_class = {
+ .name = ZCRYPT_NAME,
+ .dev_release = zcdn_device_release,
+};
static dev_t zcrypt_devt;
static struct cdev zcrypt_cdev;
@@ -130,7 +134,7 @@ static int zcdn_destroy(const char *name);
*/
static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
{
- struct device *dev = class_find_device_by_name(zcrypt_class, name);
+ struct device *dev = class_find_device_by_name(&zcrypt_class, name);
return dev ? to_zcdn_dev(dev) : NULL;
}
@@ -142,7 +146,7 @@ static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
*/
static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
{
- struct device *dev = class_find_device_by_devt(zcrypt_class, devt);
+ struct device *dev = class_find_device_by_devt(&zcrypt_class, devt);
return dev ? to_zcdn_dev(dev) : NULL;
}
@@ -396,7 +400,7 @@ static int zcdn_create(const char *name)
goto unlockout;
}
zcdndev->device.release = zcdn_device_release;
- zcdndev->device.class = zcrypt_class;
+ zcdndev->device.class = &zcrypt_class;
zcdndev->device.devt = devt;
zcdndev->device.groups = zcdn_dev_attr_groups;
if (name[0])
@@ -573,6 +577,7 @@ static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
{
if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
return NULL;
+ zcrypt_card_get(zc);
zcrypt_queue_get(zq);
get_device(&zq->queue->ap_dev.device);
atomic_add(weight, &zc->load);
@@ -592,6 +597,7 @@ static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
atomic_sub(weight, &zq->load);
put_device(&zq->queue->ap_dev.device);
zcrypt_queue_put(zq);
+ zcrypt_card_put(zc);
module_put(mod);
}
@@ -2075,12 +2081,9 @@ static int __init zcdn_init(void)
int rc;
/* create a new class 'zcrypt' */
- zcrypt_class = class_create(ZCRYPT_NAME);
- if (IS_ERR(zcrypt_class)) {
- rc = PTR_ERR(zcrypt_class);
- goto out_class_create_failed;
- }
- zcrypt_class->dev_release = zcdn_device_release;
+ rc = class_register(&zcrypt_class);
+ if (rc)
+ goto out_class_register_failed;
/* alloc device minor range */
rc = alloc_chrdev_region(&zcrypt_devt,
@@ -2096,35 +2099,35 @@ static int __init zcdn_init(void)
goto out_cdev_add_failed;
/* need some class specific sysfs attributes */
- rc = class_create_file(zcrypt_class, &class_attr_zcdn_create);
+ rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create);
if (rc)
goto out_class_create_file_1_failed;
- rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy);
+ rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy);
if (rc)
goto out_class_create_file_2_failed;
return 0;
out_class_create_file_2_failed:
- class_remove_file(zcrypt_class, &class_attr_zcdn_create);
+ class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
out_class_create_file_1_failed:
cdev_del(&zcrypt_cdev);
out_cdev_add_failed:
unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
out_alloc_chrdev_failed:
- class_destroy(zcrypt_class);
-out_class_create_failed:
+ class_unregister(&zcrypt_class);
+out_class_register_failed:
return rc;
}
static void zcdn_exit(void)
{
- class_remove_file(zcrypt_class, &class_attr_zcdn_create);
- class_remove_file(zcrypt_class, &class_attr_zcdn_destroy);
+ class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
+ class_remove_file(&zcrypt_class, &class_attr_zcdn_destroy);
zcdn_destroy_all();
cdev_del(&zcrypt_cdev);
unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
- class_destroy(zcrypt_class);
+ class_unregister(&zcrypt_class);
}
/*
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 90ec477386a8..9678c6a2cda7 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1325,7 +1325,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
clear_normalized_cda(&ch->ccw[1]);
CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
- (void *)(unsigned long)ch->ccw[1].cda,
+ (void *)(u64)dma32_to_u32(ch->ccw[1].cda),
ch->trans_skb->data);
ch->ccw[1].count = ch->max_bufsize;
@@ -1340,7 +1340,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
}
CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
- (void *)(unsigned long)ch->ccw[1].cda,
+ (void *)(u64)dma32_to_u32(ch->ccw[1].cda),
ch->trans_skb->data);
ch->ccw[1].count = ch->trans_skb->len;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index ac15d7c2b200..878fe3ce53ad 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1389,7 +1389,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
ch->ccw[15].cmd_code = CCW_CMD_WRITE;
ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[15].count = TH_HEADER_LENGTH;
- ch->ccw[15].cda = virt_to_phys(ch->discontact_th);
+ ch->ccw[15].cda = virt_to_dma32(ch->discontact_th);
ch->ccw[16].cmd_code = CCW_CMD_NOOP;
ch->ccw[16].flags = CCW_FLAG_SLI;
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 7a2f34a5e0e0..9e580ef69bda 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1708,57 +1708,57 @@ static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
ch->ccw[9].cmd_code = CCW_CMD_WRITE;
ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[9].count = TH_HEADER_LENGTH;
- ch->ccw[9].cda = virt_to_phys(ch->xid_th);
+ ch->ccw[9].cda = virt_to_dma32(ch->xid_th);
if (ch->xid == NULL)
goto done;
ch->ccw[10].cmd_code = CCW_CMD_WRITE;
ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[10].count = XID2_LENGTH;
- ch->ccw[10].cda = virt_to_phys(ch->xid);
+ ch->ccw[10].cda = virt_to_dma32(ch->xid);
ch->ccw[11].cmd_code = CCW_CMD_READ;
ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[11].count = TH_HEADER_LENGTH;
- ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th);
+ ch->ccw[11].cda = virt_to_dma32(ch->rcvd_xid_th);
ch->ccw[12].cmd_code = CCW_CMD_READ;
ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[12].count = XID2_LENGTH;
- ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid);
+ ch->ccw[12].cda = virt_to_dma32(ch->rcvd_xid);
ch->ccw[13].cmd_code = CCW_CMD_READ;
- ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id);
+ ch->ccw[13].cda = virt_to_dma32(ch->rcvd_xid_id);
} else { /* side == YSIDE : mpc_action_yside_xid */
ch->ccw[9].cmd_code = CCW_CMD_READ;
ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[9].count = TH_HEADER_LENGTH;
- ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th);
+ ch->ccw[9].cda = virt_to_dma32(ch->rcvd_xid_th);
ch->ccw[10].cmd_code = CCW_CMD_READ;
ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[10].count = XID2_LENGTH;
- ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid);
+ ch->ccw[10].cda = virt_to_dma32(ch->rcvd_xid);
if (ch->xid_th == NULL)
goto done;
ch->ccw[11].cmd_code = CCW_CMD_WRITE;
ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[11].count = TH_HEADER_LENGTH;
- ch->ccw[11].cda = virt_to_phys(ch->xid_th);
+ ch->ccw[11].cda = virt_to_dma32(ch->xid_th);
if (ch->xid == NULL)
goto done;
ch->ccw[12].cmd_code = CCW_CMD_WRITE;
ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[12].count = XID2_LENGTH;
- ch->ccw[12].cda = virt_to_phys(ch->xid);
+ ch->ccw[12].cda = virt_to_dma32(ch->xid);
if (ch->xid_id == NULL)
goto done;
ch->ccw[13].cmd_code = CCW_CMD_WRITE;
- ch->ccw[13].cda = virt_to_phys(ch->xid_id);
+ ch->ccw[13].cda = virt_to_dma32(ch->xid_id);
}
ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index a1f2acd6fb8f..25d4e6376591 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -218,7 +218,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
* we do not need to do set_normalized_cda.
*/
card->read.ccws[cnt].cda =
- (__u32)virt_to_phys(card->read.iob[cnt].data);
+ virt_to_dma32(card->read.iob[cnt].data);
((struct lcs_header *)
card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
card->read.iob[cnt].callback = lcs_get_frames_cb;
@@ -230,8 +230,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
/* Last ccw is a tic (transfer in channel). */
card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
- card->read.ccws[LCS_NUM_BUFFS].cda =
- (__u32)virt_to_phys(card->read.ccws);
+ card->read.ccws[LCS_NUM_BUFFS].cda = virt_to_dma32(card->read.ccws);
/* Setg initial state of the read channel. */
card->read.state = LCS_CH_STATE_INIT;
@@ -273,12 +272,11 @@ lcs_setup_write_ccws(struct lcs_card *card)
* we do not need to do set_normalized_cda.
*/
card->write.ccws[cnt].cda =
- (__u32)virt_to_phys(card->write.iob[cnt].data);
+ virt_to_dma32(card->write.iob[cnt].data);
}
/* Last ccw is a tic (transfer in channel). */
card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
- card->write.ccws[LCS_NUM_BUFFS].cda =
- (__u32)virt_to_phys(card->write.ccws);
+ card->write.ccws[LCS_NUM_BUFFS].cda = virt_to_dma32(card->write.ccws);
/* Set initial state of the write channel. */
card->read.state = LCS_CH_STATE_INIT;
@@ -1399,7 +1397,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
if ((channel->state != LCS_CH_STATE_INIT) &&
(irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
(irb->scsw.cmd.cpa != 0)) {
- index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
+ index = (struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)
- channel->ccws;
if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
(irb->scsw.cmd.cstat & SCHN_STAT_PCI))
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index cf8506d0f185..a0cce6872075 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -426,7 +426,7 @@ static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
ccw->cmd_code = cmd_code;
ccw->flags = flags | CCW_FLAG_SLI;
ccw->count = len;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
}
static int __qeth_issue_next_read(struct qeth_card *card)
@@ -1359,7 +1359,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
qeth_tx_complete_buf(queue, buf, error, budget);
for (i = 0; i < queue->max_elements; ++i) {
- void *data = phys_to_virt(buf->buffer->element[i].addr);
+ void *data = dma64_to_virt(buf->buffer->element[i].addr);
if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
kmem_cache_free(qeth_core_header_cache, data);
@@ -1404,7 +1404,7 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
for (i = 0;
i < aob->sb_count && i < queue->max_elements;
i++) {
- void *data = phys_to_virt(aob->sba[i]);
+ void *data = dma64_to_virt(aob->sba[i]);
if (test_bit(i, buf->from_kmem_cache) && data)
kmem_cache_free(qeth_core_header_cache,
@@ -2918,8 +2918,8 @@ static int qeth_init_input_buffer(struct qeth_card *card,
*/
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
buf->buffer->element[i].length = PAGE_SIZE;
- buf->buffer->element[i].addr =
- page_to_phys(pool_entry->elements[i]);
+ buf->buffer->element[i].addr = u64_to_dma64(
+ page_to_phys(pool_entry->elements[i]));
if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
else
@@ -3765,9 +3765,9 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
buffer->element[e].addr) {
- unsigned long phys_aob_addr = buffer->element[e].addr;
+ dma64_t phys_aob_addr = buffer->element[e].addr;
- qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
+ qeth_qdio_handle_aob(card, dma64_to_virt(phys_aob_addr));
++e;
}
qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
@@ -4042,7 +4042,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
if (hd_len) {
is_first_elem = false;
- buffer->element[element].addr = virt_to_phys(hdr);
+ buffer->element[element].addr = virt_to_dma64(hdr);
buffer->element[element].length = hd_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
@@ -4063,7 +4063,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
elem_length = min_t(unsigned int, length,
PAGE_SIZE - offset_in_page(data));
- buffer->element[element].addr = virt_to_phys(data);
+ buffer->element[element].addr = virt_to_dma64(data);
buffer->element[element].length = elem_length;
length -= elem_length;
if (is_first_elem) {
@@ -4093,7 +4093,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
elem_length = min_t(unsigned int, length,
PAGE_SIZE - offset_in_page(data));
- buffer->element[element].addr = virt_to_phys(data);
+ buffer->element[element].addr = virt_to_dma64(data);
buffer->element[element].length = elem_length;
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
@@ -5569,7 +5569,7 @@ next_packet:
offset = 0;
}
- hdr = phys_to_virt(element->addr) + offset;
+ hdr = dma64_to_virt(element->addr) + offset;
offset += sizeof(*hdr);
skb = NULL;
@@ -5661,7 +5661,7 @@ use_skb:
walk_packet:
while (skb_len) {
int data_len = min(skb_len, (int)(element->length - offset));
- char *data = phys_to_virt(element->addr) + offset;
+ char *data = dma64_to_virt(element->addr) + offset;
skb_len -= data_len;
offset += data_len;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index ceed1b6f7cb6..22e82000334a 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -2742,7 +2742,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
sbale = &sbal->element[idx];
- req_id = sbale->addr;
+ req_id = dma64_to_u64(sbale->addr);
fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
if (!fsf_req) {
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index f54f506b02d6..8cbc5e1711af 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -125,7 +125,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
memset(pl, 0,
ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
sbale = qdio->res_q[idx]->element;
- req_id = sbale->addr;
+ req_id = dma64_to_u64(sbale->addr);
scount = min(sbale->scount + 1,
ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
/* incl. signaling SBAL */
@@ -256,7 +256,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
q_req->sbal_number);
return -EINVAL;
}
- sbale->addr = sg_phys(sg);
+ sbale->addr = u64_to_dma64(sg_phys(sg));
sbale->length = sg->length;
}
return 0;
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 90134d9b69a7..8f7d2ae94441 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -129,14 +129,14 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
% QDIO_MAX_BUFFERS_PER_Q;
sbale = zfcp_qdio_sbale_req(qdio, q_req);
- sbale->addr = req_id;
+ sbale->addr = u64_to_dma64(req_id);
sbale->eflags = 0;
sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
if (unlikely(!data))
return;
sbale++;
- sbale->addr = virt_to_phys(data);
+ sbale->addr = virt_to_dma64(data);
sbale->length = len;
}
@@ -159,7 +159,7 @@ void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
q_req->sbale_curr++;
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
- sbale->addr = virt_to_phys(data);
+ sbale->addr = virt_to_dma64(data);
sbale->length = len;
}
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index ac67576301bf..d7569f395559 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -72,6 +72,7 @@ struct virtio_ccw_device {
unsigned int config_ready;
void *airq_info;
struct vcdev_dma_area *dma_area;
+ dma32_t dma_area_addr;
};
static inline unsigned long *indicators(struct virtio_ccw_device *vcdev)
@@ -84,20 +85,50 @@ static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev)
return &vcdev->dma_area->indicators2;
}
+/* Spec stipulates a 64 bit address */
+static inline dma64_t indicators_dma(struct virtio_ccw_device *vcdev)
+{
+ u64 dma_area_addr = dma32_to_u32(vcdev->dma_area_addr);
+
+ return dma64_add(u64_to_dma64(dma_area_addr),
+ offsetof(struct vcdev_dma_area, indicators));
+}
+
+/* Spec stipulates a 64 bit address */
+static inline dma64_t indicators2_dma(struct virtio_ccw_device *vcdev)
+{
+ u64 dma_area_addr = dma32_to_u32(vcdev->dma_area_addr);
+
+ return dma64_add(u64_to_dma64(dma_area_addr),
+ offsetof(struct vcdev_dma_area, indicators2));
+}
+
+static inline dma32_t config_block_dma(struct virtio_ccw_device *vcdev)
+{
+ return dma32_add(vcdev->dma_area_addr,
+ offsetof(struct vcdev_dma_area, config_block));
+}
+
+static inline dma32_t status_dma(struct virtio_ccw_device *vcdev)
+{
+ return dma32_add(vcdev->dma_area_addr,
+ offsetof(struct vcdev_dma_area, status));
+}
+
struct vq_info_block_legacy {
- __u64 queue;
+ dma64_t queue;
__u32 align;
__u16 index;
__u16 num;
} __packed;
struct vq_info_block {
- __u64 desc;
+ dma64_t desc;
__u32 res0;
__u16 index;
__u16 num;
- __u64 avail;
- __u64 used;
+ dma64_t avail;
+ dma64_t used;
} __packed;
struct virtio_feature_desc {
@@ -106,8 +137,8 @@ struct virtio_feature_desc {
} __packed;
struct virtio_thinint_area {
- unsigned long summary_indicator;
- unsigned long indicator;
+ dma64_t summary_indicator;
+ dma64_t indicator;
u64 bit_nr;
u8 isc;
} __packed;
@@ -123,6 +154,7 @@ struct virtio_rev_info {
struct virtio_ccw_vq_info {
struct virtqueue *vq;
+ dma32_t info_block_addr;
int num;
union {
struct vq_info_block s;
@@ -156,6 +188,11 @@ static inline u8 *get_summary_indicator(struct airq_info *info)
return summary_indicators + info->summary_indicator_idx;
}
+static inline dma64_t get_summary_indicator_dma(struct airq_info *info)
+{
+ return virt_to_dma64(get_summary_indicator(info));
+}
+
#define CCW_CMD_SET_VQ 0x13
#define CCW_CMD_VDEV_RESET 0x33
#define CCW_CMD_SET_IND 0x43
@@ -260,12 +297,12 @@ static struct airq_info *new_airq_info(int index)
return info;
}
-static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
- u64 *first, void **airq_info)
+static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
+ u64 *first, void **airq_info)
{
int i, j;
struct airq_info *info;
- unsigned long indicator_addr = 0;
+ unsigned long *indicator_addr = NULL;
unsigned long bit, flags;
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
@@ -275,7 +312,7 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
info = airq_areas[i];
mutex_unlock(&airq_areas_lock);
if (!info)
- return 0;
+ return NULL;
write_lock_irqsave(&info->lock, flags);
bit = airq_iv_alloc(info->aiv, nvqs);
if (bit == -1UL) {
@@ -285,7 +322,7 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
}
*first = bit;
*airq_info = info;
- indicator_addr = (unsigned long)info->aiv->vector;
+ indicator_addr = info->aiv->vector;
for (j = 0; j < nvqs; j++) {
airq_iv_set_ptr(info->aiv, bit + j,
(unsigned long)vqs[j]);
@@ -348,31 +385,31 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
struct ccw1 *ccw)
{
int ret;
- unsigned long *indicatorp = NULL;
struct virtio_thinint_area *thinint_area = NULL;
struct airq_info *airq_info = vcdev->airq_info;
+ dma64_t *indicatorp = NULL;
if (vcdev->is_thinint) {
thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(*thinint_area));
+ sizeof(*thinint_area),
+ &ccw->cda);
if (!thinint_area)
return;
thinint_area->summary_indicator =
- (unsigned long) get_summary_indicator(airq_info);
+ get_summary_indicator_dma(airq_info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->count = sizeof(*thinint_area);
- ccw->cda = (__u32)virt_to_phys(thinint_area);
} else {
/* payload is the address of the indicators */
indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(indicators(vcdev)));
+ sizeof(*indicatorp),
+ &ccw->cda);
if (!indicatorp)
return;
*indicatorp = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
- ccw->count = sizeof(indicators(vcdev));
- ccw->cda = (__u32)virt_to_phys(indicatorp);
+ ccw->count = sizeof(*indicatorp);
}
/* Deregister indicators from host. */
*indicators(vcdev) = 0;
@@ -386,7 +423,7 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
"Failed to deregister indicators (%d)\n", ret);
else if (vcdev->is_thinint)
virtio_ccw_drop_indicators(vcdev);
- ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(*indicatorp));
ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
}
@@ -426,7 +463,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
ccw->flags = 0;
ccw->count = sizeof(struct vq_config_block);
- ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->config_block);
+ ccw->cda = config_block_dma(vcdev);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
if (ret)
return ret;
@@ -463,7 +500,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
}
ccw->cmd_code = CCW_CMD_SET_VQ;
ccw->flags = 0;
- ccw->cda = (__u32)virt_to_phys(info->info_block);
+ ccw->cda = info->info_block_addr;
ret = ccw_io_helper(vcdev, ccw,
VIRTIO_CCW_DOING_SET_VQ | index);
/*
@@ -486,7 +523,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
struct ccw1 *ccw;
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
@@ -525,7 +562,8 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
goto out_err;
}
info->info_block = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(*info->info_block));
+ sizeof(*info->info_block),
+ &info->info_block_addr);
if (!info->info_block) {
dev_warn(&vcdev->cdev->dev, "no info block\n");
err = -ENOMEM;
@@ -556,22 +594,22 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
/* Register it with the host. */
queue = virtqueue_get_desc_addr(vq);
if (vcdev->revision == 0) {
- info->info_block->l.queue = queue;
+ info->info_block->l.queue = u64_to_dma64(queue);
info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
info->info_block->l.index = i;
info->info_block->l.num = info->num;
ccw->count = sizeof(info->info_block->l);
} else {
- info->info_block->s.desc = queue;
+ info->info_block->s.desc = u64_to_dma64(queue);
info->info_block->s.index = i;
info->info_block->s.num = info->num;
- info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq);
- info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq);
+ info->info_block->s.avail = u64_to_dma64(virtqueue_get_avail_addr(vq));
+ info->info_block->s.used = u64_to_dma64(virtqueue_get_used_addr(vq));
ccw->count = sizeof(info->info_block->s);
}
ccw->cmd_code = CCW_CMD_SET_VQ;
ccw->flags = 0;
- ccw->cda = (__u32)virt_to_phys(info->info_block);
+ ccw->cda = info->info_block_addr;
err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
if (err) {
dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
@@ -605,11 +643,12 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
{
int ret;
struct virtio_thinint_area *thinint_area = NULL;
- unsigned long indicator_addr;
+ unsigned long *indicator_addr;
struct airq_info *info;
thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(*thinint_area));
+ sizeof(*thinint_area),
+ &ccw->cda);
if (!thinint_area) {
ret = -ENOMEM;
goto out;
@@ -622,15 +661,13 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
ret = -ENOSPC;
goto out;
}
- thinint_area->indicator = virt_to_phys((void *)indicator_addr);
+ thinint_area->indicator = virt_to_dma64(indicator_addr);
info = vcdev->airq_info;
- thinint_area->summary_indicator =
- virt_to_phys(get_summary_indicator(info));
+ thinint_area->summary_indicator = get_summary_indicator_dma(info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->flags = CCW_FLAG_SLI;
ccw->count = sizeof(*thinint_area);
- ccw->cda = (__u32)virt_to_phys(thinint_area);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
if (ret) {
if (ret == -EOPNOTSUPP) {
@@ -658,11 +695,11 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct irq_affinity *desc)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- unsigned long *indicatorp = NULL;
+ dma64_t *indicatorp = NULL;
int ret, i, queue_idx = 0;
struct ccw1 *ccw;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return -ENOMEM;
@@ -687,10 +724,11 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
* the address of the indicators.
*/
indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(indicators(vcdev)));
+ sizeof(*indicatorp),
+ &ccw->cda);
if (!indicatorp)
goto out;
- *indicatorp = (unsigned long) indicators(vcdev);
+ *indicatorp = indicators_dma(vcdev);
if (vcdev->is_thinint) {
ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
if (ret)
@@ -702,32 +740,30 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
*indicators(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->flags = 0;
- ccw->count = sizeof(indicators(vcdev));
- ccw->cda = (__u32)virt_to_phys(indicatorp);
+ ccw->count = sizeof(*indicatorp);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
if (ret)
goto out;
}
/* Register indicators2 with host for config changes */
- *indicatorp = (unsigned long) indicators2(vcdev);
+ *indicatorp = indicators2_dma(vcdev);
*indicators2(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_CONF_IND;
ccw->flags = 0;
- ccw->count = sizeof(indicators2(vcdev));
- ccw->cda = (__u32)virt_to_phys(indicatorp);
+ ccw->count = sizeof(*indicatorp);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
if (ret)
goto out;
if (indicatorp)
ccw_device_dma_free(vcdev->cdev, indicatorp,
- sizeof(indicators(vcdev)));
+ sizeof(*indicatorp));
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return 0;
out:
if (indicatorp)
ccw_device_dma_free(vcdev->cdev, indicatorp,
- sizeof(indicators(vcdev)));
+ sizeof(*indicatorp));
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
virtio_ccw_del_vqs(vdev);
return ret;
@@ -738,7 +774,7 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct ccw1 *ccw;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
@@ -762,11 +798,12 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
u64 rc;
struct ccw1 *ccw;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return 0;
- features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features),
+ &ccw->cda);
if (!features) {
rc = 0;
goto out_free;
@@ -776,7 +813,6 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
ccw->cmd_code = CCW_CMD_READ_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
- ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
if (ret) {
rc = 0;
@@ -793,7 +829,6 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
ccw->cmd_code = CCW_CMD_READ_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
- ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
if (ret == 0)
rc |= (u64)le32_to_cpu(features->features) << 32;
@@ -825,11 +860,12 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return -ENOMEM;
- features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features),
+ &ccw->cda);
if (!features) {
ret = -ENOMEM;
goto out_free;
@@ -846,7 +882,6 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
ccw->cmd_code = CCW_CMD_WRITE_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
- ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
if (ret)
goto out_free;
@@ -860,7 +895,6 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
ccw->cmd_code = CCW_CMD_WRITE_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
- ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
out_free:
@@ -879,12 +913,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
config_area = ccw_device_dma_zalloc(vcdev->cdev,
- VIRTIO_CCW_CONFIG_SIZE);
+ VIRTIO_CCW_CONFIG_SIZE,
+ &ccw->cda);
if (!config_area)
goto out_free;
@@ -892,7 +927,6 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
ccw->cmd_code = CCW_CMD_READ_CONF;
ccw->flags = 0;
ccw->count = offset + len;
- ccw->cda = (__u32)virt_to_phys(config_area);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
if (ret)
goto out_free;
@@ -919,12 +953,13 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
config_area = ccw_device_dma_zalloc(vcdev->cdev,
- VIRTIO_CCW_CONFIG_SIZE);
+ VIRTIO_CCW_CONFIG_SIZE,
+ &ccw->cda);
if (!config_area)
goto out_free;
@@ -939,7 +974,6 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
ccw->cmd_code = CCW_CMD_WRITE_CONF;
ccw->flags = 0;
ccw->count = offset + len;
- ccw->cda = (__u32)virt_to_phys(config_area);
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
out_free:
@@ -956,14 +990,14 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
if (vcdev->revision < 2)
return vcdev->dma_area->status;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return old_status;
ccw->cmd_code = CCW_CMD_READ_STATUS;
ccw->flags = 0;
ccw->count = sizeof(vcdev->dma_area->status);
- ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->status);
+ ccw->cda = status_dma(vcdev);
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
/*
* If the channel program failed (should only happen if the device
@@ -983,7 +1017,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
struct ccw1 *ccw;
int ret;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
@@ -992,11 +1026,11 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
ccw->cmd_code = CCW_CMD_WRITE_STATUS;
ccw->flags = 0;
ccw->count = sizeof(status);
- ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->status);
/* We use ssch for setting the status which is a serializing
* instruction that guarantees the memory writes have
* completed before ssch.
*/
+ ccw->cda = status_dma(vcdev);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
/* Write failed? We assume status is unchanged. */
if (ret)
@@ -1278,10 +1312,10 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
struct ccw1 *ccw;
int ret;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return -ENOMEM;
- rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev));
+ rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev), &ccw->cda);
if (!rev) {
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return -ENOMEM;
@@ -1291,7 +1325,6 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
ccw->flags = 0;
ccw->count = sizeof(*rev);
- ccw->cda = (__u32)virt_to_phys(rev);
vcdev->revision = VIRTIO_CCW_REV_MAX;
do {
@@ -1333,7 +1366,8 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->vdev.dev.parent = &cdev->dev;
vcdev->cdev = cdev;
vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(*vcdev->dma_area));
+ sizeof(*vcdev->dma_area),
+ &vcdev->dma_area_addr);
if (!vcdev->dma_area) {
ret = -ENOMEM;
goto out_free;
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 8b40f75fc9d7..634f2f501c6c 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -241,6 +241,11 @@ config SCSI_SCAN_ASYNC
Note that this setting also affects whether resuming from
system suspend will be performed asynchronously.
+config SCSI_PROTO_TEST
+ tristate "scsi_proto.h unit tests" if !KUNIT_ALL_TESTS
+ depends on SCSI && KUNIT
+ default KUNIT_ALL_TESTS
+
menu "SCSI Transports"
depends on SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index f055bfd54a68..1313ddf2fd1a 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -24,6 +24,8 @@ obj-$(CONFIG_SCSI_COMMON) += scsi_common.o
obj-$(CONFIG_RAID_ATTRS) += raid_class.o
+obj-$(CONFIG_SCSI_PROTO_TEST) += scsi_proto_test.o
+
# --- NOTE ORDERING HERE ---
# For kernel non-modular link, transport attributes need to
# be initialised before drivers
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 8cad9792a562..3e0c0381277a 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -517,6 +517,8 @@ void scsi_attach_vpd(struct scsi_device *sdev)
scsi_update_vpd_page(sdev, 0xb1, &sdev->vpd_pgb1);
if (vpd_buf->data[i] == 0xb2)
scsi_update_vpd_page(sdev, 0xb2, &sdev->vpd_pgb2);
+ if (vpd_buf->data[i] == 0xb7)
+ scsi_update_vpd_page(sdev, 0xb7, &sdev->vpd_pgb7);
}
kfree(vpd_buf);
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 914d9c12e741..acf0592d63da 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -43,6 +43,7 @@
#include <linux/prefetch.h>
#include <linux/debugfs.h>
#include <linux/async.h>
+#include <linux/cleanup.h>
#include <net/checksum.h>
@@ -532,6 +533,8 @@ static int resp_write_scat(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_start_stop(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_readcap16(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_get_lba_status(struct scsi_cmnd *, struct sdebug_dev_info *);
+static int resp_get_stream_status(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip);
static int resp_report_tgtpgs(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_unmap(struct scsi_cmnd *, struct sdebug_dev_info *);
static int resp_rsup_opcodes(struct scsi_cmnd *, struct sdebug_dev_info *);
@@ -606,6 +609,9 @@ static const struct opcode_info_t sa_in_16_iarr[] = {
{0, 0x9e, 0x12, F_SA_LOW | F_D_IN, resp_get_lba_status, NULL,
{16, 0x12, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0, 0xc7} }, /* GET LBA STATUS(16) */
+ {0, 0x9e, 0x16, F_SA_LOW | F_D_IN, resp_get_stream_status, NULL,
+ {16, 0x16, 0, 0, 0xff, 0xff, 0, 0, 0, 0, 0xff, 0xff, 0xff, 0xff,
+ 0, 0} }, /* GET STREAM STATUS */
};
static const struct opcode_info_t vl_iarr[] = { /* VARIABLE LENGTH */
@@ -896,6 +902,8 @@ static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
static int poll_queues; /* iouring iopoll interface.*/
+static atomic_long_t writes_by_group_number[64];
+
static char sdebug_proc_name[] = MY_NAME;
static const char *my_name = MY_NAME;
@@ -1867,6 +1875,19 @@ static int inquiry_vpd_b6(struct sdebug_dev_info *devip, unsigned char *arr)
return 0x3c;
}
+#define SDEBUG_BLE_LEN_AFTER_B4 28 /* thus vpage 32 bytes long */
+
+enum { MAXIMUM_NUMBER_OF_STREAMS = 6, PERMANENT_STREAM_COUNT = 5 };
+
+/* Block limits extension VPD page (SBC-4) */
+static int inquiry_vpd_b7(unsigned char *arrb4)
+{
+ memset(arrb4, 0, SDEBUG_BLE_LEN_AFTER_B4);
+ arrb4[1] = 1; /* Reduced stream control support (RSCS) */
+ put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS, &arrb4[2]);
+ return SDEBUG_BLE_LEN_AFTER_B4;
+}
+
#define SDEBUG_LONG_INQ_SZ 96
#define SDEBUG_MAX_INQ_ARR_SZ 584
@@ -1903,7 +1924,8 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
u32 len;
char lu_id_str[6];
int host_no = devip->sdbg_host->shost->host_no;
-
+
+ arr[1] = cmd[2];
port_group_id = (((host_no + 1) & 0x7f) << 8) +
(devip->channel & 0x7f);
if (sdebug_vpd_use_hostno == 0)
@@ -1914,7 +1936,6 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
(devip->target * 1000) - 3;
len = scnprintf(lu_id_str, 6, "%d", lu_id_num);
if (0 == cmd[2]) { /* supported vital product data pages */
- arr[1] = cmd[2]; /*sanity */
n = 4;
arr[n++] = 0x0; /* this page */
arr[n++] = 0x80; /* unit serial number */
@@ -1932,26 +1953,22 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[n++] = 0xb2; /* LB Provisioning */
if (is_zbc)
arr[n++] = 0xb6; /* ZB dev. char. */
+ arr[n++] = 0xb7; /* Block limits extension */
}
arr[3] = n - 4; /* number of supported VPD pages */
} else if (0x80 == cmd[2]) { /* unit serial number */
- arr[1] = cmd[2]; /*sanity */
arr[3] = len;
memcpy(&arr[4], lu_id_str, len);
} else if (0x83 == cmd[2]) { /* device identification */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_83(&arr[4], port_group_id,
target_dev_id, lu_id_num,
lu_id_str, len,
&devip->lu_name);
} else if (0x84 == cmd[2]) { /* Software interface ident. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_84(&arr[4]);
} else if (0x85 == cmd[2]) { /* Management network addresses */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_85(&arr[4]);
} else if (0x86 == cmd[2]) { /* extended inquiry */
- arr[1] = cmd[2]; /*sanity */
arr[3] = 0x3c; /* number of following entries */
if (sdebug_dif == T10_PI_TYPE3_PROTECTION)
arr[4] = 0x4; /* SPT: GRD_CHK:1 */
@@ -1959,33 +1976,32 @@ static int resp_inquiry(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
arr[4] = 0x5; /* SPT: GRD_CHK:1, REF_CHK:1 */
else
arr[4] = 0x0; /* no protection stuff */
- arr[5] = 0x7; /* head of q, ordered + simple q's */
+ /*
+ * GROUP_SUP=1; HEADSUP=1 (HEAD OF QUEUE); ORDSUP=1
+ * (ORDERED queuing); SIMPSUP=1 (SIMPLE queuing).
+ */
+ arr[5] = 0x17;
} else if (0x87 == cmd[2]) { /* mode page policy */
- arr[1] = cmd[2]; /*sanity */
arr[3] = 0x8; /* number of following entries */
arr[4] = 0x2; /* disconnect-reconnect mp */
arr[6] = 0x80; /* mlus, shared */
arr[8] = 0x18; /* protocol specific lu */
arr[10] = 0x82; /* mlus, per initiator port */
} else if (0x88 == cmd[2]) { /* SCSI Ports */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_88(&arr[4], target_dev_id);
} else if (is_disk_zbc && 0x89 == cmd[2]) { /* ATA info */
- arr[1] = cmd[2]; /*sanity */
n = inquiry_vpd_89(&arr[4]);
put_unaligned_be16(n, arr + 2);
} else if (is_disk_zbc && 0xb0 == cmd[2]) { /* Block limits */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b0(&arr[4]);
} else if (is_disk_zbc && 0xb1 == cmd[2]) { /* Block char. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b1(devip, &arr[4]);
} else if (is_disk && 0xb2 == cmd[2]) { /* LB Prov. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b2(&arr[4]);
} else if (is_zbc && cmd[2] == 0xb6) { /* ZB dev. charact. */
- arr[1] = cmd[2]; /*sanity */
arr[3] = inquiry_vpd_b6(devip, &arr[4]);
+ } else if (cmd[2] == 0xb7) { /* block limits extension page */
+ arr[3] = inquiry_vpd_b7(&arr[4]);
} else {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, -1);
kfree(arr);
@@ -2554,6 +2570,40 @@ static int resp_ctrl_m_pg(unsigned char *p, int pcontrol, int target)
return sizeof(ctrl_m_pg);
}
+/* IO Advice Hints Grouping mode page */
+static int resp_grouping_m_pg(unsigned char *p, int pcontrol, int target)
+{
+ /* IO Advice Hints Grouping mode page */
+ struct grouping_m_pg {
+ u8 page_code; /* OR 0x40 when subpage_code > 0 */
+ u8 subpage_code;
+ __be16 page_length;
+ u8 reserved[12];
+ struct scsi_io_group_descriptor descr[MAXIMUM_NUMBER_OF_STREAMS];
+ };
+ static const struct grouping_m_pg gr_m_pg = {
+ .page_code = 0xa | 0x40,
+ .subpage_code = 5,
+ .page_length = cpu_to_be16(sizeof(gr_m_pg) - 4),
+ .descr = {
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 1 },
+ { .st_enble = 0 },
+ }
+ };
+
+ BUILD_BUG_ON(sizeof(struct grouping_m_pg) !=
+ 16 + MAXIMUM_NUMBER_OF_STREAMS * 16);
+ memcpy(p, &gr_m_pg, sizeof(gr_m_pg));
+ if (1 == pcontrol) {
+ /* There are no changeable values so clear from byte 4 on. */
+ memset(p + 4, 0, sizeof(gr_m_pg) - 4);
+ }
+ return sizeof(gr_m_pg);
+}
static int resp_iec_m_pg(unsigned char *p, int pcontrol, int target)
{ /* Informational Exceptions control mode page for mode_sense */
@@ -2627,7 +2677,8 @@ static int resp_sas_sha_m_spg(unsigned char *p, int pcontrol)
return sizeof(sas_sha_m_pg);
}
-#define SDEBUG_MAX_MSENSE_SZ 256
+/* PAGE_SIZE is more than necessary but provides room for future expansion. */
+#define SDEBUG_MAX_MSENSE_SZ PAGE_SIZE
static int resp_mode_sense(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
@@ -2638,10 +2689,13 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
int target_dev_id;
int target = scp->device->id;
unsigned char *ap;
- unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
+ unsigned char *arr __free(kfree);
unsigned char *cmd = scp->cmnd;
- bool dbd, llbaa, msense_6, is_disk, is_zbc, bad_pcode;
+ bool dbd, llbaa, msense_6, is_disk, is_zbc;
+ arr = kzalloc(SDEBUG_MAX_MSENSE_SZ, GFP_ATOMIC);
+ if (!arr)
+ return -ENOMEM;
dbd = !!(cmd[1] & 0x8); /* disable block descriptors */
pcontrol = (cmd[2] & 0xc0) >> 6;
pcode = cmd[2] & 0x3f;
@@ -2699,45 +2753,63 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
ap = arr + offset;
}
- if ((subpcode > 0x0) && (subpcode < 0xff) && (0x19 != pcode)) {
- /* TODO: Control Extension page */
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
- return check_condition_result;
- }
- bad_pcode = false;
-
+ /*
+ * N.B. If len>0 before resp_*_pg() call, then form of that call should be:
+ * len += resp_*_pg(ap + len, pcontrol, target);
+ */
switch (pcode) {
case 0x1: /* Read-Write error recovery page, direct access */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
len = resp_err_recov_pg(ap, pcontrol, target);
offset += len;
break;
case 0x2: /* Disconnect-Reconnect page, all devices */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
len = resp_disconnect_pg(ap, pcontrol, target);
offset += len;
break;
case 0x3: /* Format device page, direct access */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
if (is_disk) {
len = resp_format_pg(ap, pcontrol, target);
offset += len;
- } else
- bad_pcode = true;
+ } else {
+ goto bad_pcode;
+ }
break;
case 0x8: /* Caching page, direct access */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
if (is_disk || is_zbc) {
len = resp_caching_pg(ap, pcontrol, target);
offset += len;
- } else
- bad_pcode = true;
+ } else {
+ goto bad_pcode;
+ }
break;
case 0xa: /* Control Mode page, all devices */
- len = resp_ctrl_m_pg(ap, pcontrol, target);
+ switch (subpcode) {
+ case 0:
+ len = resp_ctrl_m_pg(ap, pcontrol, target);
+ break;
+ case 0x05:
+ len = resp_grouping_m_pg(ap, pcontrol, target);
+ break;
+ case 0xff:
+ len = resp_ctrl_m_pg(ap, pcontrol, target);
+ len += resp_grouping_m_pg(ap + len, pcontrol, target);
+ break;
+ default:
+ goto bad_subpcode;
+ }
offset += len;
break;
case 0x19: /* if spc==1 then sas phy, control+discover */
- if ((subpcode > 0x2) && (subpcode < 0xff)) {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
- return check_condition_result;
- }
+ if (subpcode > 0x2 && subpcode < 0xff)
+ goto bad_subpcode;
len = 0;
if ((0x0 == subpcode) || (0xff == subpcode))
len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
@@ -2749,49 +2821,50 @@ static int resp_mode_sense(struct scsi_cmnd *scp,
offset += len;
break;
case 0x1c: /* Informational Exceptions Mode page, all devices */
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
len = resp_iec_m_pg(ap, pcontrol, target);
offset += len;
break;
case 0x3f: /* Read all Mode pages */
- if ((0 == subpcode) || (0xff == subpcode)) {
- len = resp_err_recov_pg(ap, pcontrol, target);
- len += resp_disconnect_pg(ap + len, pcontrol, target);
- if (is_disk) {
- len += resp_format_pg(ap + len, pcontrol,
- target);
- len += resp_caching_pg(ap + len, pcontrol,
- target);
- } else if (is_zbc) {
- len += resp_caching_pg(ap + len, pcontrol,
- target);
- }
- len += resp_ctrl_m_pg(ap + len, pcontrol, target);
- len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
- if (0xff == subpcode) {
- len += resp_sas_pcd_m_spg(ap + len, pcontrol,
- target, target_dev_id);
- len += resp_sas_sha_m_spg(ap + len, pcontrol);
- }
- len += resp_iec_m_pg(ap + len, pcontrol, target);
- offset += len;
- } else {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
- return check_condition_result;
+ if (subpcode > 0x0 && subpcode < 0xff)
+ goto bad_subpcode;
+ len = resp_err_recov_pg(ap, pcontrol, target);
+ len += resp_disconnect_pg(ap + len, pcontrol, target);
+ if (is_disk) {
+ len += resp_format_pg(ap + len, pcontrol, target);
+ len += resp_caching_pg(ap + len, pcontrol, target);
+ } else if (is_zbc) {
+ len += resp_caching_pg(ap + len, pcontrol, target);
}
+ len += resp_ctrl_m_pg(ap + len, pcontrol, target);
+ if (0xff == subpcode)
+ len += resp_grouping_m_pg(ap + len, pcontrol, target);
+ len += resp_sas_sf_m_pg(ap + len, pcontrol, target);
+ if (0xff == subpcode) {
+ len += resp_sas_pcd_m_spg(ap + len, pcontrol, target,
+ target_dev_id);
+ len += resp_sas_sha_m_spg(ap + len, pcontrol);
+ }
+ len += resp_iec_m_pg(ap + len, pcontrol, target);
+ offset += len;
break;
default:
- bad_pcode = true;
- break;
- }
- if (bad_pcode) {
- mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
- return check_condition_result;
+ goto bad_pcode;
}
if (msense_6)
arr[0] = offset - 1;
else
put_unaligned_be16((offset - 2), arr + 0);
return fill_from_dev_buffer(scp, arr, min_t(u32, alloc_len, offset));
+
+bad_pcode:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 2, 5);
+ return check_condition_result;
+
+bad_subpcode:
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 3, -1);
+ return check_condition_result;
}
#define SDEBUG_MAX_MSELECT_SZ 512
@@ -3306,7 +3379,8 @@ static inline struct sdeb_store_info *devip2sip(struct sdebug_dev_info *devip,
/* Returns number of bytes copied or -1 if error. */
static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
- u32 sg_skip, u64 lba, u32 num, bool do_write)
+ u32 sg_skip, u64 lba, u32 num, bool do_write,
+ u8 group_number)
{
int ret;
u64 block, rest = 0;
@@ -3325,6 +3399,10 @@ static int do_device_access(struct sdeb_store_info *sip, struct scsi_cmnd *scp,
return 0;
if (scp->sc_data_direction != dir)
return -1;
+
+ if (do_write && group_number < ARRAY_SIZE(writes_by_group_number))
+ atomic_long_inc(&writes_by_group_number[group_number]);
+
fsp = sip->storep;
block = do_div(lba, sdebug_store_sectors);
@@ -3698,7 +3776,7 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(sip, scp, 0, lba, num, false);
+ ret = do_device_access(sip, scp, 0, lba, num, false, 0);
sdeb_read_unlock(sip);
if (unlikely(ret == -1))
return DID_ERROR << 16;
@@ -3883,6 +3961,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
{
bool check_prot;
u32 num;
+ u8 group = 0;
u32 ei_lba;
int ret;
u64 lba;
@@ -3894,11 +3973,13 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
ei_lba = 0;
lba = get_unaligned_be64(cmd + 2);
num = get_unaligned_be32(cmd + 10);
+ group = cmd[14] & 0x3f;
check_prot = true;
break;
case WRITE_10:
ei_lba = 0;
lba = get_unaligned_be32(cmd + 2);
+ group = cmd[6] & 0x3f;
num = get_unaligned_be16(cmd + 7);
check_prot = true;
break;
@@ -3913,15 +3994,18 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
ei_lba = 0;
lba = get_unaligned_be32(cmd + 2);
num = get_unaligned_be32(cmd + 6);
+ group = cmd[6] & 0x3f;
check_prot = true;
break;
case 0x53: /* XDWRITEREAD(10) */
ei_lba = 0;
lba = get_unaligned_be32(cmd + 2);
+ group = cmd[6] & 0x1f;
num = get_unaligned_be16(cmd + 7);
check_prot = false;
break;
default: /* assume WRITE(32) */
+ group = cmd[6] & 0x3f;
lba = get_unaligned_be64(cmd + 12);
ei_lba = get_unaligned_be32(cmd + 20);
num = get_unaligned_be32(cmd + 28);
@@ -3976,7 +4060,7 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
}
}
- ret = do_device_access(sip, scp, 0, lba, num, true);
+ ret = do_device_access(sip, scp, 0, lba, num, true, group);
if (unlikely(scsi_debug_lbp()))
map_region(sip, lba, num);
/* If ZBC zone then bump its write pointer */
@@ -4028,12 +4112,14 @@ static int resp_write_scat(struct scsi_cmnd *scp,
u32 lb_size = sdebug_sector_size;
u32 ei_lba;
u64 lba;
+ u8 group;
int ret, res;
bool is_16;
static const u32 lrd_size = 32; /* + parameter list header size */
if (cmd[0] == VARIABLE_LENGTH_CMD) {
is_16 = false;
+ group = cmd[6] & 0x3f;
wrprotect = (cmd[10] >> 5) & 0x7;
lbdof = get_unaligned_be16(cmd + 12);
num_lrd = get_unaligned_be16(cmd + 16);
@@ -4044,6 +4130,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
lbdof = get_unaligned_be16(cmd + 4);
num_lrd = get_unaligned_be16(cmd + 8);
bt_len = get_unaligned_be32(cmd + 10);
+ group = cmd[14] & 0x3f;
if (unlikely(have_dif_prot)) {
if (sdebug_dif == T10_PI_TYPE2_PROTECTION &&
wrprotect) {
@@ -4132,7 +4219,7 @@ static int resp_write_scat(struct scsi_cmnd *scp,
}
}
- ret = do_device_access(sip, scp, sg_off, lba, num, true);
+ ret = do_device_access(sip, scp, sg_off, lba, num, true, group);
/* If ZBC zone then bump its write pointer */
if (sdebug_dev_is_zoned(devip))
zbc_inc_wp(devip, lba, num);
@@ -4507,6 +4594,51 @@ static int resp_get_lba_status(struct scsi_cmnd *scp,
return fill_from_dev_buffer(scp, arr, SDEBUG_GET_LBA_STATUS_LEN);
}
+static int resp_get_stream_status(struct scsi_cmnd *scp,
+ struct sdebug_dev_info *devip)
+{
+ u16 starting_stream_id, stream_id;
+ const u8 *cmd = scp->cmnd;
+ u32 alloc_len, offset;
+ u8 arr[256] = {};
+ struct scsi_stream_status_header *h = (void *)arr;
+
+ starting_stream_id = get_unaligned_be16(cmd + 4);
+ alloc_len = get_unaligned_be32(cmd + 10);
+
+ if (alloc_len < 8) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 10, -1);
+ return check_condition_result;
+ }
+
+ if (starting_stream_id >= MAXIMUM_NUMBER_OF_STREAMS) {
+ mk_sense_invalid_fld(scp, SDEB_IN_CDB, 4, -1);
+ return check_condition_result;
+ }
+
+ /*
+ * The GET STREAM STATUS command only reports status information
+ * about open streams. Treat the non-permanent stream as open.
+ */
+ put_unaligned_be16(MAXIMUM_NUMBER_OF_STREAMS,
+ &h->number_of_open_streams);
+
+ for (offset = 8, stream_id = starting_stream_id;
+ offset + 8 <= min_t(u32, alloc_len, sizeof(arr)) &&
+ stream_id < MAXIMUM_NUMBER_OF_STREAMS;
+ offset += 8, stream_id++) {
+ struct scsi_stream_status *stream_status = (void *)arr + offset;
+
+ stream_status->perm = stream_id < PERMANENT_STREAM_COUNT;
+ put_unaligned_be16(stream_id,
+ &stream_status->stream_identifier);
+ stream_status->rel_lifetime = stream_id + 1;
+ }
+ put_unaligned_be32(offset - 8, &h->len); /* PARAMETER DATA LENGTH */
+
+ return fill_from_dev_buffer(scp, arr, min(offset, alloc_len));
+}
+
static int resp_sync_cache(struct scsi_cmnd *scp,
struct sdebug_dev_info *devip)
{
@@ -7182,6 +7314,30 @@ static ssize_t tur_ms_to_ready_show(struct device_driver *ddp, char *buf)
}
static DRIVER_ATTR_RO(tur_ms_to_ready);
+static ssize_t group_number_stats_show(struct device_driver *ddp, char *buf)
+{
+ char *p = buf, *end = buf + PAGE_SIZE;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
+ p += scnprintf(p, end - p, "%d %ld\n", i,
+ atomic_long_read(&writes_by_group_number[i]));
+
+ return p - buf;
+}
+
+static ssize_t group_number_stats_store(struct device_driver *ddp,
+ const char *buf, size_t count)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(writes_by_group_number); i++)
+ atomic_long_set(&writes_by_group_number[i], 0);
+
+ return count;
+}
+static DRIVER_ATTR_RW(group_number_stats);
+
/* Note: The following array creates attribute files in the
/sys/bus/pseudo/drivers/scsi_debug directory. The advantage of these
files (over those found in the /sys/module/scsi_debug/parameters
@@ -7228,6 +7384,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_cdb_len.attr,
&driver_attr_tur_ms_to_ready.attr,
&driver_attr_zbc.attr,
+ &driver_attr_group_number_stats.attr,
NULL,
};
ATTRIBUTE_GROUPS(sdebug_drv);
diff --git a/drivers/scsi/scsi_proto_test.c b/drivers/scsi/scsi_proto_test.c
new file mode 100644
index 000000000000..7fa0a78a2ad1
--- /dev/null
+++ b/drivers/scsi/scsi_proto_test.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2023 Google LLC
+ */
+#include <kunit/test.h>
+#include <asm-generic/unaligned.h>
+#include <scsi/scsi_proto.h>
+
+static void test_scsi_proto(struct kunit *test)
+{
+ static const union {
+ struct scsi_io_group_descriptor desc;
+ u8 arr[sizeof(struct scsi_io_group_descriptor)];
+ } d = { .arr = { 0x45, 0, 0, 0, 0xb0, 0xe4, 0xe3 } };
+ KUNIT_EXPECT_EQ(test, d.desc.io_advice_hints_mode + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.st_enble + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.cs_enble + 0, 0);
+ KUNIT_EXPECT_EQ(test, d.desc.ic_enable + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.acdlu + 0, 1);
+ KUNIT_EXPECT_EQ(test, d.desc.rlbsr + 0, 3);
+ KUNIT_EXPECT_EQ(test, d.desc.lbm_descriptor_type + 0, 0);
+ KUNIT_EXPECT_EQ(test, d.desc.params[0] + 0, 0xe4);
+ KUNIT_EXPECT_EQ(test, d.desc.params[1] + 0, 0xe3);
+
+ static const union {
+ struct scsi_stream_status s;
+ u8 arr[sizeof(struct scsi_stream_status)];
+ } ss = { .arr = { 0x80, 0, 0x12, 0x34, 0x3f } };
+ KUNIT_EXPECT_EQ(test, ss.s.perm + 0, 1);
+ KUNIT_EXPECT_EQ(test, get_unaligned_be16(&ss.s.stream_identifier),
+ 0x1234);
+ KUNIT_EXPECT_EQ(test, ss.s.rel_lifetime + 0, 0x3f);
+
+ static const union {
+ struct scsi_stream_status_header h;
+ u8 arr[sizeof(struct scsi_stream_status_header)];
+ } sh = { .arr = { 1, 2, 3, 4, 0, 0, 5, 6 } };
+ KUNIT_EXPECT_EQ(test, get_unaligned_be32(&sh.h.len), 0x1020304);
+ KUNIT_EXPECT_EQ(test, get_unaligned_be16(&sh.h.number_of_open_streams),
+ 0x506);
+}
+
+static struct kunit_case scsi_proto_test_cases[] = {
+ KUNIT_CASE(test_scsi_proto),
+ {}
+};
+
+static struct kunit_suite scsi_proto_test_suite = {
+ .name = "scsi_proto",
+ .test_cases = scsi_proto_test_cases,
+};
+kunit_test_suite(scsi_proto_test_suite);
+
+MODULE_DESCRIPTION("<scsi/scsi_proto.h> unit tests");
+MODULE_AUTHOR("Bart Van Assche");
+MODULE_LICENSE("GPL");
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 49dd34426d5e..775df00021e4 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -449,6 +449,7 @@ static void scsi_device_dev_release(struct device *dev)
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
struct scsi_vpd *vpd_pgb0 = NULL, *vpd_pgb1 = NULL, *vpd_pgb2 = NULL;
+ struct scsi_vpd *vpd_pgb7 = NULL;
unsigned long flags;
might_sleep();
@@ -494,6 +495,8 @@ static void scsi_device_dev_release(struct device *dev)
lockdep_is_held(&sdev->inquiry_mutex));
vpd_pgb2 = rcu_replace_pointer(sdev->vpd_pgb2, vpd_pgb2,
lockdep_is_held(&sdev->inquiry_mutex));
+ vpd_pgb7 = rcu_replace_pointer(sdev->vpd_pgb7, vpd_pgb7,
+ lockdep_is_held(&sdev->inquiry_mutex));
mutex_unlock(&sdev->inquiry_mutex);
if (vpd_pg0)
@@ -510,6 +513,8 @@ static void scsi_device_dev_release(struct device *dev)
kfree_rcu(vpd_pgb1, rcu);
if (vpd_pgb2)
kfree_rcu(vpd_pgb2, rcu);
+ if (vpd_pgb7)
+ kfree_rcu(vpd_pgb7, rcu);
kfree(sdev->inquiry);
kfree(sdev);
@@ -921,6 +926,7 @@ sdev_vpd_pg_attr(pg89);
sdev_vpd_pg_attr(pgb0);
sdev_vpd_pg_attr(pgb1);
sdev_vpd_pg_attr(pgb2);
+sdev_vpd_pg_attr(pgb7);
sdev_vpd_pg_attr(pg0);
static ssize_t show_inquiry(struct file *filep, struct kobject *kobj,
@@ -1295,6 +1301,9 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj,
if (attr == &dev_attr_vpd_pgb2 && !sdev->vpd_pgb2)
return 0;
+ if (attr == &dev_attr_vpd_pgb7 && !sdev->vpd_pgb7)
+ return 0;
+
return S_IRUGO;
}
@@ -1347,6 +1356,7 @@ static struct bin_attribute *scsi_sdev_bin_attrs[] = {
&dev_attr_vpd_pgb0,
&dev_attr_vpd_pgb1,
&dev_attr_vpd_pgb2,
+ &dev_attr_vpd_pgb7,
&dev_attr_inquiry,
NULL
};
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2cc73c650ca6..ccff8f2e2e75 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -47,6 +47,7 @@
#include <linux/blkpg.h>
#include <linux/blk-pm.h>
#include <linux/delay.h>
+#include <linux/rw_hint.h>
#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
@@ -1080,12 +1081,38 @@ static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
return BLK_STS_OK;
}
+/**
+ * sd_group_number() - Compute the GROUP NUMBER field
+ * @cmd: SCSI command for which to compute the value of the six-bit GROUP NUMBER
+ * field.
+ *
+ * From SBC-5 r05 (https://www.t10.org/cgi-bin/ac.pl?t=f&f=sbc5r05.pdf):
+ * 0: no relative lifetime.
+ * 1: shortest relative lifetime.
+ * 2: second shortest relative lifetime.
+ * 3 - 0x3d: intermediate relative lifetimes.
+ * 0x3e: second longest relative lifetime.
+ * 0x3f: longest relative lifetime.
+ */
+static u8 sd_group_number(struct scsi_cmnd *cmd)
+{
+ const struct request *rq = scsi_cmd_to_rq(cmd);
+ struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
+
+ if (!sdkp->rscs)
+ return 0;
+
+ return min3((u32)rq->write_hint, (u32)sdkp->permanent_stream_count,
+ 0x3fu);
+}
+
static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
sector_t lba, unsigned int nr_blocks,
unsigned char flags, unsigned int dld)
{
cmd->cmd_len = SD_EXT_CDB_SIZE;
cmd->cmnd[0] = VARIABLE_LENGTH_CMD;
+ cmd->cmnd[6] = sd_group_number(cmd);
cmd->cmnd[7] = 0x18; /* Additional CDB len */
cmd->cmnd[9] = write ? WRITE_32 : READ_32;
cmd->cmnd[10] = flags;
@@ -1104,7 +1131,7 @@ static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
cmd->cmd_len = 16;
cmd->cmnd[0] = write ? WRITE_16 : READ_16;
cmd->cmnd[1] = flags | ((dld >> 2) & 0x01);
- cmd->cmnd[14] = (dld & 0x03) << 6;
+ cmd->cmnd[14] = ((dld & 0x03) << 6) | sd_group_number(cmd);
cmd->cmnd[15] = 0;
put_unaligned_be64(lba, &cmd->cmnd[2]);
put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
@@ -1119,7 +1146,7 @@ static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
cmd->cmd_len = 10;
cmd->cmnd[0] = write ? WRITE_10 : READ_10;
cmd->cmnd[1] = flags;
- cmd->cmnd[6] = 0;
+ cmd->cmnd[6] = sd_group_number(cmd);
cmd->cmnd[9] = 0;
put_unaligned_be32(lba, &cmd->cmnd[2]);
put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
@@ -1256,7 +1283,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
protect | fua, dld);
} else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
- sdp->use_10_for_rw || protect) {
+ sdp->use_10_for_rw || protect || rq->write_hint) {
ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
protect | fua);
} else {
@@ -3059,6 +3086,70 @@ defaults:
sdkp->DPOFUA = 0;
}
+static bool sd_is_perm_stream(struct scsi_disk *sdkp, unsigned int stream_id)
+{
+ u8 cdb[16] = { SERVICE_ACTION_IN_16, SAI_GET_STREAM_STATUS };
+ struct {
+ struct scsi_stream_status_header h;
+ struct scsi_stream_status s;
+ } buf;
+ struct scsi_device *sdev = sdkp->device;
+ struct scsi_sense_hdr sshdr;
+ const struct scsi_exec_args exec_args = {
+ .sshdr = &sshdr,
+ };
+ int res;
+
+ put_unaligned_be16(stream_id, &cdb[4]);
+ put_unaligned_be32(sizeof(buf), &cdb[10]);
+
+ res = scsi_execute_cmd(sdev, cdb, REQ_OP_DRV_IN, &buf, sizeof(buf),
+ SD_TIMEOUT, sdkp->max_retries, &exec_args);
+ if (res < 0)
+ return false;
+ if (scsi_status_is_check_condition(res) && scsi_sense_valid(&sshdr))
+ sd_print_sense_hdr(sdkp, &sshdr);
+ if (res)
+ return false;
+ if (get_unaligned_be32(&buf.h.len) < sizeof(struct scsi_stream_status))
+ return false;
+ return buf.h.stream_status[0].perm;
+}
+
+static void sd_read_io_hints(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+ struct scsi_device *sdp = sdkp->device;
+ const struct scsi_io_group_descriptor *desc, *start, *end;
+ struct scsi_sense_hdr sshdr;
+ struct scsi_mode_data data;
+ int res;
+
+ res = scsi_mode_sense(sdp, /*dbd=*/0x8, /*modepage=*/0x0a,
+ /*subpage=*/0x05, buffer, SD_BUF_SIZE, SD_TIMEOUT,
+ sdkp->max_retries, &data, &sshdr);
+ if (res < 0)
+ return;
+ start = (void *)buffer + data.header_length + 16;
+ end = (void *)buffer + ALIGN_DOWN(data.header_length + data.length,
+ sizeof(*end));
+ /*
+ * From "SBC-5 Constrained Streams with Data Lifetimes": Device severs
+ * should assign the lowest numbered stream identifiers to permanent
+ * streams.
+ */
+ for (desc = start; desc < end; desc++)
+ if (!desc->st_enble || !sd_is_perm_stream(sdkp, desc - start))
+ break;
+ sdkp->permanent_stream_count = desc - start;
+ if (sdkp->rscs && sdkp->permanent_stream_count < 2)
+ sd_printk(KERN_INFO, sdkp,
+ "Unexpected: RSCS has been set and the permanent stream count is %u\n",
+ sdkp->permanent_stream_count);
+ else if (sdkp->permanent_stream_count)
+ sd_printk(KERN_INFO, sdkp, "permanent stream count = %d\n",
+ sdkp->permanent_stream_count);
+}
+
/*
* The ATO bit indicates whether the DIF application tag is available
* for use by the operating system.
@@ -3166,6 +3257,18 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
rcu_read_unlock();
}
+/* Parse the Block Limits Extension VPD page (0xb7) */
+static void sd_read_block_limits_ext(struct scsi_disk *sdkp)
+{
+ struct scsi_vpd *vpd;
+
+ rcu_read_lock();
+ vpd = rcu_dereference(sdkp->device->vpd_pgb7);
+ if (vpd && vpd->len >= 2)
+ sdkp->rscs = vpd->data[5] & 1;
+ rcu_read_unlock();
+}
+
/**
* sd_read_block_characteristics - Query block dev. characteristics
* @sdkp: disk to query
@@ -3541,6 +3644,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
+ sd_read_block_limits_ext(sdkp);
sd_read_block_characteristics(sdkp);
sd_zbc_read_zones(sdkp, buffer);
sd_read_cpr(sdkp);
@@ -3550,6 +3654,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_write_protect_flag(sdkp, buffer);
sd_read_cache_type(sdkp, buffer);
+ sd_read_io_hints(sdkp, buffer);
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 409dda5350d1..5c4285a582b2 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -125,6 +125,8 @@ struct scsi_disk {
unsigned int physical_block_size;
unsigned int max_medium_access_timeouts;
unsigned int medium_access_timed_out;
+ /* number of permanent streams */
+ u16 permanent_stream_count;
u8 media_present;
u8 write_prot;
u8 protection_type;/* Data Integrity Field */
@@ -151,6 +153,7 @@ struct scsi_disk {
unsigned urswrz : 1;
unsigned security : 1;
unsigned ignore_medium_access_errors : 1;
+ unsigned rscs : 1; /* reduced stream control support */
};
#define to_scsi_disk(obj) container_of(obj, struct scsi_disk, disk_dev)
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index ca4f4ca413f1..74350b5871dc 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -455,7 +455,7 @@ struct syscore_ops intc_syscore_ops = {
.resume = intc_resume,
};
-struct bus_type intc_subsys = {
+const struct bus_type intc_subsys = {
.name = "intc",
.dev_name = "intc",
};
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index fa73c173b56a..9b6cd1bebb4e 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -160,7 +160,7 @@ void _intc_enable(struct irq_data *data, unsigned long handle);
/* core.c */
extern struct list_head intc_list;
extern raw_spinlock_t intc_big_lock;
-extern struct bus_type intc_subsys;
+extern const struct bus_type intc_subsys;
unsigned int intc_get_dfl_prio_level(void);
unsigned int intc_get_prio_level(unsigned int irq);
diff --git a/drivers/siox/siox-bus-gpio.c b/drivers/siox/siox-bus-gpio.c
index aeefeb725524..9e01642e72de 100644
--- a/drivers/siox/siox-bus-gpio.c
+++ b/drivers/siox/siox-bus-gpio.c
@@ -91,63 +91,42 @@ static int siox_gpio_probe(struct platform_device *pdev)
int ret;
struct siox_master *smaster;
- smaster = siox_master_alloc(&pdev->dev, sizeof(*ddata));
- if (!smaster) {
- dev_err(dev, "failed to allocate siox master\n");
- return -ENOMEM;
- }
+ smaster = devm_siox_master_alloc(dev, sizeof(*ddata));
+ if (!smaster)
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to allocate siox master\n");
platform_set_drvdata(pdev, smaster);
ddata = siox_master_get_devdata(smaster);
ddata->din = devm_gpiod_get(dev, "din", GPIOD_IN);
- if (IS_ERR(ddata->din)) {
- ret = dev_err_probe(dev, PTR_ERR(ddata->din),
- "Failed to get din GPIO\n");
- goto err;
- }
+ if (IS_ERR(ddata->din))
+ return dev_err_probe(dev, PTR_ERR(ddata->din),
+ "Failed to get din GPIO\n");
ddata->dout = devm_gpiod_get(dev, "dout", GPIOD_OUT_LOW);
- if (IS_ERR(ddata->dout)) {
- ret = dev_err_probe(dev, PTR_ERR(ddata->dout),
- "Failed to get dout GPIO\n");
- goto err;
- }
+ if (IS_ERR(ddata->dout))
+ return dev_err_probe(dev, PTR_ERR(ddata->dout),
+ "Failed to get dout GPIO\n");
ddata->dclk = devm_gpiod_get(dev, "dclk", GPIOD_OUT_LOW);
- if (IS_ERR(ddata->dclk)) {
- ret = dev_err_probe(dev, PTR_ERR(ddata->dclk),
- "Failed to get dclk GPIO\n");
- goto err;
- }
+ if (IS_ERR(ddata->dclk))
+ return dev_err_probe(dev, PTR_ERR(ddata->dclk),
+ "Failed to get dclk GPIO\n");
ddata->dld = devm_gpiod_get(dev, "dld", GPIOD_OUT_LOW);
- if (IS_ERR(ddata->dld)) {
- ret = dev_err_probe(dev, PTR_ERR(ddata->dld),
- "Failed to get dld GPIO\n");
- goto err;
- }
+ if (IS_ERR(ddata->dld))
+ return dev_err_probe(dev, PTR_ERR(ddata->dld),
+ "Failed to get dld GPIO\n");
smaster->pushpull = siox_gpio_pushpull;
/* XXX: determine automatically like spi does */
smaster->busno = 0;
- ret = siox_master_register(smaster);
- if (ret) {
- dev_err_probe(dev, ret,
- "Failed to register siox master\n");
-err:
- siox_master_put(smaster);
- }
-
- return ret;
-}
-
-static int siox_gpio_remove(struct platform_device *pdev)
-{
- struct siox_master *master = platform_get_drvdata(pdev);
-
- siox_master_unregister(master);
+ ret = devm_siox_master_register(dev, smaster);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to register siox master\n");
return 0;
}
@@ -160,7 +139,6 @@ MODULE_DEVICE_TABLE(of, siox_gpio_dt_ids);
static struct platform_driver siox_gpio_driver = {
.probe = siox_gpio_probe,
- .remove = siox_gpio_remove,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c
index 561408583b2b..24a45920a240 100644
--- a/drivers/siox/siox-core.c
+++ b/drivers/siox/siox-core.c
@@ -498,7 +498,7 @@ static void siox_device_release(struct device *dev)
kfree(sdevice);
}
-static struct device_type siox_device_type = {
+static const struct device_type siox_device_type = {
.groups = siox_device_groups,
.release = siox_device_release,
};
@@ -543,7 +543,7 @@ static void siox_shutdown(struct device *dev)
sdriver->shutdown(sdevice);
}
-static struct bus_type siox_bus_type = {
+static const struct bus_type siox_bus_type = {
.name = "siox",
.match = siox_match,
.probe = siox_probe,
@@ -676,7 +676,7 @@ static void siox_master_release(struct device *dev)
kfree(smaster);
}
-static struct device_type siox_master_type = {
+static const struct device_type siox_master_type = {
.groups = siox_master_groups,
.release = siox_master_release,
};
@@ -707,6 +707,31 @@ struct siox_master *siox_master_alloc(struct device *dev,
}
EXPORT_SYMBOL_GPL(siox_master_alloc);
+static void devm_siox_master_put(void *data)
+{
+ struct siox_master *smaster = data;
+
+ siox_master_put(smaster);
+}
+
+struct siox_master *devm_siox_master_alloc(struct device *dev,
+ size_t size)
+{
+ struct siox_master *smaster;
+ int ret;
+
+ smaster = siox_master_alloc(dev, size);
+ if (!smaster)
+ return NULL;
+
+ ret = devm_add_action_or_reset(dev, devm_siox_master_put, smaster);
+ if (ret)
+ return NULL;
+
+ return smaster;
+}
+EXPORT_SYMBOL_GPL(devm_siox_master_alloc);
+
int siox_master_register(struct siox_master *smaster)
{
int ret;
@@ -717,6 +742,8 @@ int siox_master_register(struct siox_master *smaster)
if (!smaster->pushpull)
return -EINVAL;
+ get_device(&smaster->dev);
+
dev_set_name(&smaster->dev, "siox-%d", smaster->busno);
mutex_init(&smaster->lock);
@@ -768,6 +795,25 @@ void siox_master_unregister(struct siox_master *smaster)
}
EXPORT_SYMBOL_GPL(siox_master_unregister);
+static void devm_siox_master_unregister(void *data)
+{
+ struct siox_master *smaster = data;
+
+ siox_master_unregister(smaster);
+}
+
+int devm_siox_master_register(struct device *dev, struct siox_master *smaster)
+{
+ int ret;
+
+ ret = siox_master_register(smaster);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_siox_master_unregister, smaster);
+}
+EXPORT_SYMBOL_GPL(devm_siox_master_register);
+
static struct siox_device *siox_device_add(struct siox_master *smaster,
const char *type, size_t inbytes,
size_t outbytes, u8 statustype)
diff --git a/drivers/siox/siox.h b/drivers/siox/siox.h
index f08b43b713c5..513f2c8312f7 100644
--- a/drivers/siox/siox.h
+++ b/drivers/siox/siox.h
@@ -45,5 +45,9 @@ static inline void siox_master_put(struct siox_master *smaster)
put_device(&smaster->dev);
}
+struct siox_master *devm_siox_master_alloc(struct device *dev, size_t size);
+
int siox_master_register(struct siox_master *smaster);
void siox_master_unregister(struct siox_master *smaster);
+
+int devm_siox_master_register(struct device *dev, struct siox_master *smaster);
diff --git a/drivers/slimbus/core.c b/drivers/slimbus/core.c
index d43873bb5fe6..41e62de1f91f 100644
--- a/drivers/slimbus/core.c
+++ b/drivers/slimbus/core.c
@@ -100,7 +100,7 @@ static int slim_device_uevent(const struct device *dev, struct kobj_uevent_env *
return add_uevent_var(env, "MODALIAS=slim:%s", dev_name(&sbdev->dev));
}
-struct bus_type slimbus_bus = {
+const struct bus_type slimbus_bus = {
.name = "slimbus",
.match = slim_device_match,
.probe = slim_device_probe,
@@ -436,8 +436,8 @@ static int slim_device_alloc_laddr(struct slim_device *sbdev,
if (ret < 0)
goto err;
} else if (report_present) {
- ret = ida_simple_get(&ctrl->laddr_ida,
- 0, SLIM_LA_MANAGER - 1, GFP_KERNEL);
+ ret = ida_alloc_max(&ctrl->laddr_ida,
+ SLIM_LA_MANAGER - 1, GFP_KERNEL);
if (ret < 0)
goto err;
diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
index 77aa6d26476c..efeba8275a66 100644
--- a/drivers/slimbus/qcom-ngd-ctrl.c
+++ b/drivers/slimbus/qcom-ngd-ctrl.c
@@ -220,7 +220,7 @@ struct slimbus_power_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
-static struct qmi_elem_info slimbus_select_inst_req_msg_v01_ei[] = {
+static const struct qmi_elem_info slimbus_select_inst_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -262,7 +262,7 @@ static struct qmi_elem_info slimbus_select_inst_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
@@ -284,7 +284,7 @@ static struct qmi_elem_info slimbus_select_inst_resp_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info slimbus_power_req_msg_v01_ei[] = {
+static const struct qmi_elem_info slimbus_power_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
@@ -324,7 +324,7 @@ static struct qmi_elem_info slimbus_power_req_msg_v01_ei[] = {
},
};
-static struct qmi_elem_info slimbus_power_resp_msg_v01_ei[] = {
+static const struct qmi_elem_info slimbus_power_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 1d2b27e3ea63..b811446e0fa5 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -523,7 +523,7 @@ int dpaa2_io_service_enqueue_multiple_desc_fq(struct dpaa2_io *d,
struct qbman_eq_desc *ed;
int i, ret;
- ed = kcalloc(sizeof(struct qbman_eq_desc), 32, GFP_KERNEL);
+ ed = kcalloc(32, sizeof(struct qbman_eq_desc), GFP_KERNEL);
if (!ed)
return -ENOMEM;
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 739e4eee6b75..7e9074519ad2 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -991,7 +991,7 @@ struct qman_portal {
/* linked-list of CSCN handlers. */
struct list_head cgr_cbs;
/* list lock */
- spinlock_t cgr_lock;
+ raw_spinlock_t cgr_lock;
struct work_struct congestion_work;
struct work_struct mr_work;
char irqname[MAX_IRQNAME];
@@ -1281,7 +1281,7 @@ static int qman_create_portal(struct qman_portal *portal,
/* if the given mask is NULL, assume all CGRs can be seen */
qman_cgrs_fill(&portal->cgrs[0]);
INIT_LIST_HEAD(&portal->cgr_cbs);
- spin_lock_init(&portal->cgr_lock);
+ raw_spin_lock_init(&portal->cgr_lock);
INIT_WORK(&portal->congestion_work, qm_congestion_task);
INIT_WORK(&portal->mr_work, qm_mr_process_task);
portal->bits = 0;
@@ -1456,11 +1456,14 @@ static void qm_congestion_task(struct work_struct *work)
union qm_mc_result *mcr;
struct qman_cgr *cgr;
- spin_lock(&p->cgr_lock);
+ /*
+ * FIXME: QM_MCR_TIMEOUT is 10ms, which is too long for a raw spinlock!
+ */
+ raw_spin_lock_irq(&p->cgr_lock);
qm_mc_start(&p->p);
qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
if (!qm_mc_result_timeout(&p->p, &mcr)) {
- spin_unlock(&p->cgr_lock);
+ raw_spin_unlock_irq(&p->cgr_lock);
dev_crit(p->config->dev, "QUERYCONGESTION timeout\n");
qman_p_irqsource_add(p, QM_PIRQ_CSCI);
return;
@@ -1476,7 +1479,7 @@ static void qm_congestion_task(struct work_struct *work)
list_for_each_entry(cgr, &p->cgr_cbs, node)
if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
- spin_unlock(&p->cgr_lock);
+ raw_spin_unlock_irq(&p->cgr_lock);
qman_p_irqsource_add(p, QM_PIRQ_CSCI);
}
@@ -2440,7 +2443,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
preempt_enable();
cgr->chan = p->config->channel;
- spin_lock(&p->cgr_lock);
+ raw_spin_lock_irq(&p->cgr_lock);
if (opts) {
struct qm_mcc_initcgr local_opts = *opts;
@@ -2477,7 +2480,7 @@ int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
qman_cgrs_get(&p->cgrs[1], cgr->cgrid))
cgr->cb(p, cgr, 1);
out:
- spin_unlock(&p->cgr_lock);
+ raw_spin_unlock_irq(&p->cgr_lock);
put_affine_portal();
return ret;
}
@@ -2512,7 +2515,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
return -EINVAL;
memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
- spin_lock_irqsave(&p->cgr_lock, irqflags);
+ raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
list_del(&cgr->node);
/*
* If there are no other CGR objects for this CGRID in the list,
@@ -2537,7 +2540,7 @@ int qman_delete_cgr(struct qman_cgr *cgr)
/* add back to the list */
list_add(&cgr->node, &p->cgr_cbs);
release_lock:
- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
put_affine_portal();
return ret;
}
@@ -2577,9 +2580,9 @@ static int qman_update_cgr(struct qman_cgr *cgr, struct qm_mcc_initcgr *opts)
if (!p)
return -EINVAL;
- spin_lock_irqsave(&p->cgr_lock, irqflags);
+ raw_spin_lock_irqsave(&p->cgr_lock, irqflags);
ret = qm_modify_cgr(cgr, 0, opts);
- spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ raw_spin_unlock_irqrestore(&p->cgr_lock, irqflags);
put_affine_portal();
return ret;
}
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 4458b2e0562b..6eb6cf06278e 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -287,6 +287,7 @@ EXPORT_SYMBOL(sunxi_sram_release);
struct sunxi_sramc_variant {
int num_emac_clocks;
bool has_ldo_ctrl;
+ bool has_ths_offset;
};
static const struct sunxi_sramc_variant sun4i_a10_sramc_variant = {
@@ -308,8 +309,10 @@ static const struct sunxi_sramc_variant sun50i_a64_sramc_variant = {
static const struct sunxi_sramc_variant sun50i_h616_sramc_variant = {
.num_emac_clocks = 2,
+ .has_ths_offset = true,
};
+#define SUNXI_SRAM_THS_OFFSET_REG 0x0
#define SUNXI_SRAM_EMAC_CLOCK_REG 0x30
#define SUNXI_SYS_LDO_CTRL_REG 0x150
@@ -318,6 +321,8 @@ static bool sunxi_sram_regmap_accessible_reg(struct device *dev,
{
const struct sunxi_sramc_variant *variant = dev_get_drvdata(dev);
+ if (reg == SUNXI_SRAM_THS_OFFSET_REG && variant->has_ths_offset)
+ return true;
if (reg >= SUNXI_SRAM_EMAC_CLOCK_REG &&
reg < SUNXI_SRAM_EMAC_CLOCK_REG + variant->num_emac_clocks * 4)
return true;
@@ -327,6 +332,20 @@ static bool sunxi_sram_regmap_accessible_reg(struct device *dev,
return false;
}
+static void sunxi_sram_lock(void *_lock)
+{
+ spinlock_t *lock = _lock;
+
+ spin_lock(lock);
+}
+
+static void sunxi_sram_unlock(void *_lock)
+{
+ spinlock_t *lock = _lock;
+
+ spin_unlock(lock);
+}
+
static struct regmap_config sunxi_sram_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
@@ -336,6 +355,9 @@ static struct regmap_config sunxi_sram_regmap_config = {
/* other devices have no business accessing other registers */
.readable_reg = sunxi_sram_regmap_accessible_reg,
.writeable_reg = sunxi_sram_regmap_accessible_reg,
+ .lock = sunxi_sram_lock,
+ .unlock = sunxi_sram_unlock,
+ .lock_arg = &sram_lock,
};
static int __init sunxi_sram_probe(struct platform_device *pdev)
diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c
index 11991eb12636..079035db7dd8 100644
--- a/drivers/spi/spi-fsl-lpspi.c
+++ b/drivers/spi/spi-fsl-lpspi.c
@@ -830,11 +830,11 @@ static int fsl_lpspi_probe(struct platform_device *pdev)
is_target = of_property_read_bool((&pdev->dev)->of_node, "spi-slave");
if (is_target)
- controller = spi_alloc_target(&pdev->dev,
- sizeof(struct fsl_lpspi_data));
+ controller = devm_spi_alloc_target(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
else
- controller = spi_alloc_host(&pdev->dev,
- sizeof(struct fsl_lpspi_data));
+ controller = devm_spi_alloc_host(&pdev->dev,
+ sizeof(struct fsl_lpspi_data));
if (!controller)
return -ENOMEM;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 833a1bb7a914..c3e5cee18bea 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -668,8 +668,8 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx,
ctrl |= (MX51_ECSPI_CTRL_MAX_BURST * BITS_PER_BYTE - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
else
- ctrl |= spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
- BITS_PER_BYTE) * spi_imx->bits_per_word
+ ctrl |= (spi_imx->count / DIV_ROUND_UP(spi_imx->bits_per_word,
+ BITS_PER_BYTE) * spi_imx->bits_per_word - 1)
<< MX51_ECSPI_CTRL_BL_OFFSET;
}
}
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
index f982bdebd028..3c0c24ed1f3d 100644
--- a/drivers/spi/spi-lm70llp.c
+++ b/drivers/spi/spi-lm70llp.c
@@ -29,10 +29,10 @@
*
* Datasheet and Schematic:
* The LM70 is a temperature sensor chip from National Semiconductor; its
- * datasheet is available at http://www.national.com/pf/LM/LM70.html
+ * datasheet is available at https://www.ti.com/lit/gpn/lm70
* The schematic for this particular board (the LM70EVAL-LLP) is
* available (on page 4) here:
- * http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf
+ * https://download.datasheets.com/pdfs/documentation/nat/kit&board/lm70llpevalmanual.pdf
*
* Also see Documentation/spi/spi-lm70llp.rst. The SPI<->parport code here is
* (heavily) based on spi-butterfly by David Brownell.
diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c
index c9d6d42a88f5..17b8baf749e6 100644
--- a/drivers/spi/spi-mem.c
+++ b/drivers/spi/spi-mem.c
@@ -382,7 +382,7 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
* read path) and expect the core to use the regular SPI
* interface in other cases.
*/
- if (!ret || ret != -ENOTSUPP || ret != -EOPNOTSUPP) {
+ if (!ret || (ret != -ENOTSUPP && ret != -EOPNOTSUPP)) {
spi_mem_add_op_stats(ctlr->pcpu_statistics, op, ret);
spi_mem_add_op_stats(mem->spi->pcpu_statistics, op, ret);
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 8d4633b353ee..e4cb22fe0075 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -788,17 +788,19 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
mtk_spi_setup_packet(host);
- cnt = mdata->xfer_len / 4;
- iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
- trans->tx_buf + mdata->num_xfered, cnt);
+ if (trans->tx_buf) {
+ cnt = mdata->xfer_len / 4;
+ iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
+ trans->tx_buf + mdata->num_xfered, cnt);
- remainder = mdata->xfer_len % 4;
- if (remainder > 0) {
- reg_val = 0;
- memcpy(&reg_val,
- trans->tx_buf + (cnt * 4) + mdata->num_xfered,
- remainder);
- writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ remainder = mdata->xfer_len % 4;
+ if (remainder > 0) {
+ reg_val = 0;
+ memcpy(&reg_val,
+ trans->tx_buf + (cnt * 4) + mdata->num_xfered,
+ remainder);
+ writel(reg_val, mdata->base + SPI_TX_DATA_REG);
+ }
}
mtk_spi_enable_transfer(host);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index f18738ae95f8..ff75838c1b5d 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1063,10 +1063,14 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
- if (spi_is_csgpiod(spi)) {
- if (!spi->controller->set_cs_timing && !activate)
- spi_delay_exec(&spi->cs_hold, NULL);
+ /*
+ * Handle chip select delays for GPIO based CS or controllers without
+ * programmable chip select timing.
+ */
+ if ((spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) && !activate)
+ spi_delay_exec(&spi->cs_hold, NULL);
+ if (spi_is_csgpiod(spi)) {
if (!(spi->mode & SPI_NO_CS)) {
/*
* Historically ACPI has no means of the GPIO polarity and
@@ -1099,16 +1103,16 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
if ((spi->controller->flags & SPI_CONTROLLER_GPIO_SS) &&
spi->controller->set_cs)
spi->controller->set_cs(spi, !enable);
-
- if (!spi->controller->set_cs_timing) {
- if (activate)
- spi_delay_exec(&spi->cs_setup, NULL);
- else
- spi_delay_exec(&spi->cs_inactive, NULL);
- }
} else if (spi->controller->set_cs) {
spi->controller->set_cs(spi, !enable);
}
+
+ if (spi_is_csgpiod(spi) || !spi->controller->set_cs_timing) {
+ if (activate)
+ spi_delay_exec(&spi->cs_setup, NULL);
+ else
+ spi_delay_exec(&spi->cs_inactive, NULL);
+ }
}
#ifdef CONFIG_HAS_DMA
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 784b9f673ead..5175b1c4f161 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -46,14 +46,10 @@ source "drivers/staging/iio/Kconfig"
source "drivers/staging/sm750fb/Kconfig"
-source "drivers/staging/emxx_udc/Kconfig"
-
source "drivers/staging/nvec/Kconfig"
source "drivers/staging/media/Kconfig"
-source "drivers/staging/board/Kconfig"
-
source "drivers/staging/gdm724x/Kconfig"
source "drivers/staging/fbtft/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 2ea99c7b05d9..67399c0ad871 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -14,9 +14,7 @@ obj-$(CONFIG_VT6656) += vt6656/
obj-$(CONFIG_VME_BUS) += vme_user/
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_FB_SM750) += sm750fb/
-obj-$(CONFIG_USB_EMXX) += emxx_udc/
obj-$(CONFIG_MFD_NVEC) += nvec/
-obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_MOST) += most/
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index 727b956aa231..c51818c56dd2 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -165,14 +165,9 @@ static ssize_t sysfs_read(struct device *dev, char *buf,
{
struct axis_fifo *fifo = dev_get_drvdata(dev);
unsigned int read_val;
- unsigned int len;
- char tmp[32];
read_val = ioread32(fifo->base_addr + addr_offset);
- len = snprintf(tmp, sizeof(tmp), "0x%x\n", read_val);
- memcpy(buf, tmp, len);
-
- return len;
+ return sysfs_emit(buf, "0x%x\n", read_val);
}
static ssize_t isr_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig
deleted file mode 100644
index b49216768ef6..000000000000
--- a/drivers/staging/board/Kconfig
+++ /dev/null
@@ -1,12 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config STAGING_BOARD
- bool "Staging Board Support"
- depends on OF_ADDRESS && OF_IRQ && HAVE_CLK
- help
- Staging board base is to support continuous upstream
- in-tree development and integration of platform devices.
-
- Helps developers integrate devices as platform devices for
- device drivers that only provide platform device bindings.
- This in turn allows for incremental development of both
- hardware feature support and DT binding work in parallel.
diff --git a/drivers/staging/board/Makefile b/drivers/staging/board/Makefile
deleted file mode 100644
index ed7839752e12..000000000000
--- a/drivers/staging/board/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-y := board.o
-obj-$(CONFIG_ARCH_EMEV2) += kzm9d.o
-obj-$(CONFIG_ARCH_R8A7740) += armadillo800eva.o
diff --git a/drivers/staging/board/TODO b/drivers/staging/board/TODO
deleted file mode 100644
index 8db70e10aa67..000000000000
--- a/drivers/staging/board/TODO
+++ /dev/null
@@ -1,2 +0,0 @@
-* replace platform device code with DT nodes once the driver supports DT
-* remove staging board code when no more platform devices are needed
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
deleted file mode 100644
index 0225234dd7aa..000000000000
--- a/drivers/staging/board/armadillo800eva.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Staging board support for Armadillo 800 eva.
- * Enable not-yet-DT-capable devices here.
- *
- * Based on board-armadillo800eva.c
- *
- * Copyright (C) 2012 Renesas Solutions Corp.
- * Copyright (C) 2012 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/fb.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/videodev2.h>
-
-#include <video/sh_mobile_lcdc.h>
-
-#include "board.h"
-
-static struct fb_videomode lcdc0_mode = {
- .name = "AMPIER/AM-800480",
- .xres = 800,
- .yres = 480,
- .left_margin = 88,
- .right_margin = 40,
- .hsync_len = 128,
- .upper_margin = 20,
- .lower_margin = 5,
- .vsync_len = 5,
- .sync = 0,
-};
-
-static struct sh_mobile_lcdc_info lcdc0_info = {
- .clock_source = LCDC_CLK_BUS,
- .ch[0] = {
- .chan = LCDC_CHAN_MAINLCD,
- .fourcc = V4L2_PIX_FMT_RGB565,
- .interface_type = RGB24,
- .clock_divider = 5,
- .flags = 0,
- .lcd_modes = &lcdc0_mode,
- .num_modes = 1,
- .panel_cfg = {
- .width = 111,
- .height = 68,
- },
- },
-};
-
-static struct resource lcdc0_resources[] = {
- DEFINE_RES_MEM_NAMED(0xfe940000, 0x4000, "LCD0"),
- DEFINE_RES_IRQ(177 + 32),
-};
-
-static struct platform_device lcdc0_device = {
- .name = "sh_mobile_lcdc_fb",
- .num_resources = ARRAY_SIZE(lcdc0_resources),
- .resource = lcdc0_resources,
- .id = 0,
- .dev = {
- .platform_data = &lcdc0_info,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
-};
-
-static const struct board_staging_clk lcdc0_clocks[] __initconst = {
- { "lcdc0", NULL, "sh_mobile_lcdc_fb.0" },
-};
-
-static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
- {
- .pdev = &lcdc0_device,
- .clocks = lcdc0_clocks,
- .nclocks = ARRAY_SIZE(lcdc0_clocks),
- .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
- },
-};
-
-static void __init armadillo800eva_init(void)
-{
- board_staging_gic_setup_xlate("arm,pl390", 32);
- board_staging_register_devices(armadillo800eva_devices,
- ARRAY_SIZE(armadillo800eva_devices));
-}
-
-board_staging("renesas,armadillo800eva", armadillo800eva_init);
diff --git a/drivers/staging/board/board.c b/drivers/staging/board/board.c
deleted file mode 100644
index f980af037345..000000000000
--- a/drivers/staging/board/board.c
+++ /dev/null
@@ -1,204 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (C) 2014 Magnus Damm
- * Copyright (C) 2015 Glider bvba
- */
-
-#define pr_fmt(fmt) "board_staging: " fmt
-
-#include <linux/clkdev.h>
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/platform_device.h>
-#include <linux/pm_domain.h>
-
-#include "board.h"
-
-static struct device_node *irqc_node __initdata;
-static unsigned int irqc_base __initdata;
-
-static bool find_by_address(u64 base_address)
-{
- struct device_node *dn = of_find_all_nodes(NULL);
- struct resource res;
-
- while (dn) {
- if (!of_address_to_resource(dn, 0, &res)) {
- if (res.start == base_address) {
- of_node_put(dn);
- return true;
- }
- }
- dn = of_find_all_nodes(dn);
- }
-
- return false;
-}
-
-bool __init board_staging_dt_node_available(const struct resource *resource,
- unsigned int num_resources)
-{
- unsigned int i;
-
- for (i = 0; i < num_resources; i++) {
- const struct resource *r = resource + i;
-
- if (resource_type(r) == IORESOURCE_MEM)
- if (find_by_address(r->start))
- return true; /* DT node available */
- }
-
- return false; /* Nothing found */
-}
-
-int __init board_staging_gic_setup_xlate(const char *gic_match,
- unsigned int base)
-{
- WARN_ON(irqc_node);
-
- irqc_node = of_find_compatible_node(NULL, NULL, gic_match);
-
- WARN_ON(!irqc_node);
- if (!irqc_node)
- return -ENOENT;
-
- irqc_base = base;
- return 0;
-}
-
-static void __init gic_fixup_resource(struct resource *res)
-{
- struct of_phandle_args irq_data;
- unsigned int hwirq = res->start;
- unsigned int virq;
-
- if (resource_type(res) != IORESOURCE_IRQ || !irqc_node)
- return;
-
- irq_data.np = irqc_node;
- irq_data.args_count = 3;
- irq_data.args[0] = 0;
- irq_data.args[1] = hwirq - irqc_base;
- switch (res->flags &
- (IORESOURCE_IRQ_LOWEDGE | IORESOURCE_IRQ_HIGHEDGE |
- IORESOURCE_IRQ_LOWLEVEL | IORESOURCE_IRQ_HIGHLEVEL)) {
- case IORESOURCE_IRQ_LOWEDGE:
- irq_data.args[2] = IRQ_TYPE_EDGE_FALLING;
- break;
- case IORESOURCE_IRQ_HIGHEDGE:
- irq_data.args[2] = IRQ_TYPE_EDGE_RISING;
- break;
- case IORESOURCE_IRQ_LOWLEVEL:
- irq_data.args[2] = IRQ_TYPE_LEVEL_LOW;
- break;
- case IORESOURCE_IRQ_HIGHLEVEL:
- default:
- irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
- break;
- }
-
- virq = irq_create_of_mapping(&irq_data);
- if (WARN_ON(!virq))
- return;
-
- pr_debug("hwirq %u -> virq %u\n", hwirq, virq);
- res->start = virq;
-}
-
-void __init board_staging_gic_fixup_resources(struct resource *res,
- unsigned int nres)
-{
- unsigned int i;
-
- for (i = 0; i < nres; i++)
- gic_fixup_resource(&res[i]);
-}
-
-int __init board_staging_register_clock(const struct board_staging_clk *bsc)
-{
- int error;
-
- pr_debug("Aliasing clock %s for con_id %s dev_id %s\n", bsc->clk,
- bsc->con_id, bsc->dev_id);
- error = clk_add_alias(bsc->con_id, bsc->dev_id, bsc->clk, NULL);
- if (error)
- pr_err("Failed to alias clock %s (%d)\n", bsc->clk, error);
-
- return error;
-}
-
-#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
-static int board_staging_add_dev_domain(struct platform_device *pdev,
- const char *domain)
-{
- struct device *dev = &pdev->dev;
- struct of_phandle_args pd_args;
- struct device_node *np;
-
- np = of_find_node_by_path(domain);
- if (!np) {
- pr_err("Cannot find domain node %s\n", domain);
- return -ENOENT;
- }
-
- pd_args.np = np;
- pd_args.args_count = 0;
-
- /* Initialization similar to device_pm_init_common() */
- spin_lock_init(&dev->power.lock);
- dev->power.early_init = true;
-
- return of_genpd_add_device(&pd_args, dev);
-}
-#else
-static inline int board_staging_add_dev_domain(struct platform_device *pdev,
- const char *domain)
-{
- return 0;
-}
-#endif
-
-int __init board_staging_register_device(const struct board_staging_dev *dev)
-{
- struct platform_device *pdev = dev->pdev;
- unsigned int i;
- int error;
-
- pr_debug("Trying to register device %s\n", pdev->name);
- if (board_staging_dt_node_available(pdev->resource,
- pdev->num_resources)) {
- pr_warn("Skipping %s, already in DT\n", pdev->name);
- return -EEXIST;
- }
-
- board_staging_gic_fixup_resources(pdev->resource, pdev->num_resources);
-
- for (i = 0; i < dev->nclocks; i++)
- board_staging_register_clock(&dev->clocks[i]);
-
- if (dev->domain)
- board_staging_add_dev_domain(pdev, dev->domain);
-
- error = platform_device_register(pdev);
- if (error) {
- pr_err("Failed to register device %s (%d)\n", pdev->name,
- error);
- return error;
- }
-
- return error;
-}
-
-void __init board_staging_register_devices(const struct board_staging_dev *devs,
- unsigned int ndevs)
-{
- unsigned int i;
-
- for (i = 0; i < ndevs; i++)
- board_staging_register_device(&devs[i]);
-}
diff --git a/drivers/staging/board/board.h b/drivers/staging/board/board.h
deleted file mode 100644
index 5609daf4d869..000000000000
--- a/drivers/staging/board/board.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __BOARD_H__
-#define __BOARD_H__
-
-#include <linux/init.h>
-#include <linux/of.h>
-
-struct board_staging_clk {
- const char *clk;
- const char *con_id;
- const char *dev_id;
-};
-
-struct board_staging_dev {
- /* Platform Device */
- struct platform_device *pdev;
- /* Clocks (optional) */
- const struct board_staging_clk *clocks;
- unsigned int nclocks;
- /* Generic PM Domain (optional) */
- const char *domain;
-};
-
-struct resource;
-
-bool board_staging_dt_node_available(const struct resource *resource,
- unsigned int num_resources);
-int board_staging_gic_setup_xlate(const char *gic_match, unsigned int base);
-void board_staging_gic_fixup_resources(struct resource *res, unsigned int nres);
-int board_staging_register_clock(const struct board_staging_clk *bsc);
-int board_staging_register_device(const struct board_staging_dev *dev);
-void board_staging_register_devices(const struct board_staging_dev *devs,
- unsigned int ndevs);
-
-#define board_staging(str, fn) \
-static int __init runtime_board_check(void) \
-{ \
- if (of_machine_is_compatible(str)) \
- fn(); \
- \
- return 0; \
-} \
- \
-device_initcall(runtime_board_check)
-
-#endif /* __BOARD_H__ */
diff --git a/drivers/staging/board/kzm9d.c b/drivers/staging/board/kzm9d.c
deleted file mode 100644
index d449a837414e..000000000000
--- a/drivers/staging/board/kzm9d.c
+++ /dev/null
@@ -1,26 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Staging board support for KZM9D. Enable not-yet-DT-capable devices here. */
-
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include "board.h"
-
-static struct resource usbs1_res[] __initdata = {
- DEFINE_RES_MEM(0xe2800000, 0x2000),
- DEFINE_RES_IRQ(159),
-};
-
-static void __init kzm9d_init(void)
-{
- board_staging_gic_setup_xlate("arm,pl390", 32);
-
- if (!board_staging_dt_node_available(usbs1_res,
- ARRAY_SIZE(usbs1_res))) {
- board_staging_gic_fixup_resources(usbs1_res,
- ARRAY_SIZE(usbs1_res));
- platform_device_register_simple("emxx_udc", -1, usbs1_res,
- ARRAY_SIZE(usbs1_res));
- }
-}
-
-board_staging("renesas,kzm9d", kzm9d_init);
diff --git a/drivers/staging/emxx_udc/Kconfig b/drivers/staging/emxx_udc/Kconfig
deleted file mode 100644
index e7a95b3b6a2f..000000000000
--- a/drivers/staging/emxx_udc/Kconfig
+++ /dev/null
@@ -1,11 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-config USB_EMXX
- tristate "EMXX USB Function Device Controller"
- depends on USB_GADGET && (ARCH_RENESAS || COMPILE_TEST)
- help
- The Emma Mobile series of SoCs from Renesas Electronics and
- former NEC Electronics include USB Function hardware.
-
- Say "y" to link the driver statically, or "m" to build a
- dynamically linked module called "emxx_udc" and force all
- gadget drivers to also be dynamically linked.
diff --git a/drivers/staging/emxx_udc/Makefile b/drivers/staging/emxx_udc/Makefile
deleted file mode 100644
index 569c5e9a9bae..000000000000
--- a/drivers/staging/emxx_udc/Makefile
+++ /dev/null
@@ -1,2 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_USB_EMXX) := emxx_udc.o
diff --git a/drivers/staging/emxx_udc/TODO b/drivers/staging/emxx_udc/TODO
deleted file mode 100644
index 471529a470c7..000000000000
--- a/drivers/staging/emxx_udc/TODO
+++ /dev/null
@@ -1,6 +0,0 @@
-* add clock framework support (platform device with CCF needs special care)
-* break out board-specific VBUS GPIO to work with multiplatform
-* convert VBUS GPIO to use GPIO descriptors from <linux/gpio/consumer.h>
- and stop using the old GPIO API
-* DT bindings
-* move driver into drivers/usb/gadget/
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
deleted file mode 100644
index eb63daaca702..000000000000
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ /dev/null
@@ -1,3223 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * drivers/usb/gadget/emxx_udc.c
- * EMXX FCD (Function Controller Driver) for USB.
- *
- * Copyright (C) 2010 Renesas Electronics Corporation
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/proc_fs.h>
-#include <linux/clk.h>
-#include <linux/ctype.h>
-#include <linux/string.h>
-#include <linux/dma-mapping.h>
-#include <linux/workqueue.h>
-#include <linux/device.h>
-
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-
-#include <linux/irq.h>
-#include <linux/gpio/consumer.h>
-
-#include "emxx_udc.h"
-
-#define DRIVER_DESC "EMXX UDC driver"
-#define DMA_ADDR_INVALID (~(dma_addr_t)0)
-
-static struct gpio_desc *vbus_gpio;
-static int vbus_irq;
-
-static const char driver_name[] = "emxx_udc";
-
-/*===========================================================================*/
-/* Prototype */
-static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *, struct nbu2ss_ep *);
-static void _nbu2ss_ep0_enable(struct nbu2ss_udc *);
-/*static void _nbu2ss_ep0_disable(struct nbu2ss_udc *);*/
-static void _nbu2ss_ep_done(struct nbu2ss_ep *, struct nbu2ss_req *, int);
-static void _nbu2ss_set_test_mode(struct nbu2ss_udc *, u32 mode);
-static void _nbu2ss_endpoint_toggle_reset(struct nbu2ss_udc *udc, u8 ep_adrs);
-
-static int _nbu2ss_pullup(struct nbu2ss_udc *, int);
-static void _nbu2ss_fifo_flush(struct nbu2ss_udc *, struct nbu2ss_ep *);
-
-/*===========================================================================*/
-/* Macro */
-#define _nbu2ss_zero_len_pkt(udc, epnum) \
- _nbu2ss_ep_in_end(udc, epnum, 0, 0)
-
-/*===========================================================================*/
-/* Global */
-static struct nbu2ss_udc udc_controller;
-
-/*-------------------------------------------------------------------------*/
-/* Read */
-static inline u32 _nbu2ss_readl(void __iomem *address)
-{
- return __raw_readl(address);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Write */
-static inline void _nbu2ss_writel(void __iomem *address, u32 udata)
-{
- __raw_writel(udata, address);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Set Bit */
-static inline void _nbu2ss_bitset(void __iomem *address, u32 udata)
-{
- u32 reg_dt = __raw_readl(address) | (udata);
-
- __raw_writel(reg_dt, address);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Clear Bit */
-static inline void _nbu2ss_bitclr(void __iomem *address, u32 udata)
-{
- u32 reg_dt = __raw_readl(address) & ~(udata);
-
- __raw_writel(reg_dt, address);
-}
-
-#ifdef UDC_DEBUG_DUMP
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_dump_register(struct nbu2ss_udc *udc)
-{
- int i;
- u32 reg_data;
-
- pr_info("=== %s()\n", __func__);
-
- if (!udc) {
- pr_err("%s udc == NULL\n", __func__);
- return;
- }
-
- spin_unlock(&udc->lock);
-
- dev_dbg(&udc->dev, "\n-USB REG-\n");
- for (i = 0x0 ; i < USB_BASE_SIZE ; i += 16) {
- reg_data = _nbu2ss_readl(IO_ADDRESS(USB_BASE_ADDRESS + i));
- dev_dbg(&udc->dev, "USB%04x =%08x", i, (int)reg_data);
-
- reg_data = _nbu2ss_readl(IO_ADDRESS(USB_BASE_ADDRESS + i + 4));
- dev_dbg(&udc->dev, " %08x", (int)reg_data);
-
- reg_data = _nbu2ss_readl(IO_ADDRESS(USB_BASE_ADDRESS + i + 8));
- dev_dbg(&udc->dev, " %08x", (int)reg_data);
-
- reg_data = _nbu2ss_readl(IO_ADDRESS(USB_BASE_ADDRESS + i + 12));
- dev_dbg(&udc->dev, " %08x\n", (int)reg_data);
- }
-
- spin_lock(&udc->lock);
-}
-#endif /* UDC_DEBUG_DUMP */
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 Callback (Complete) */
-static void _nbu2ss_ep0_complete(struct usb_ep *_ep, struct usb_request *_req)
-{
- u8 recipient;
- u16 selector;
- u16 wIndex;
- u32 test_mode;
- struct usb_ctrlrequest *p_ctrl;
- struct nbu2ss_udc *udc;
-
- if (!_ep || !_req)
- return;
-
- udc = (struct nbu2ss_udc *)_req->context;
- p_ctrl = &udc->ctrl;
- if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
- if (p_ctrl->bRequest == USB_REQ_SET_FEATURE) {
- /*-------------------------------------------------*/
- /* SET_FEATURE */
- recipient = (u8)(p_ctrl->bRequestType & USB_RECIP_MASK);
- selector = le16_to_cpu(p_ctrl->wValue);
- if ((recipient == USB_RECIP_DEVICE) &&
- (selector == USB_DEVICE_TEST_MODE)) {
- wIndex = le16_to_cpu(p_ctrl->wIndex);
- test_mode = (u32)(wIndex >> 8);
- _nbu2ss_set_test_mode(udc, test_mode);
- }
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-/* Initialization usb_request */
-static void _nbu2ss_create_ep0_packet(struct nbu2ss_udc *udc,
- void *p_buf, unsigned int length)
-{
- udc->ep0_req.req.buf = p_buf;
- udc->ep0_req.req.length = length;
- udc->ep0_req.req.dma = 0;
- udc->ep0_req.req.zero = true;
- udc->ep0_req.req.complete = _nbu2ss_ep0_complete;
- udc->ep0_req.req.status = -EINPROGRESS;
- udc->ep0_req.req.context = udc;
- udc->ep0_req.req.actual = 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Acquisition of the first address of RAM(FIFO) */
-static u32 _nbu2ss_get_begin_ram_address(struct nbu2ss_udc *udc)
-{
- u32 num, buf_type;
- u32 data, last_ram_adr, use_ram_size;
-
- struct ep_regs __iomem *p_ep_regs;
-
- last_ram_adr = (D_RAM_SIZE_CTRL / sizeof(u32)) * 2;
- use_ram_size = 0;
-
- for (num = 0; num < NUM_ENDPOINTS - 1; num++) {
- p_ep_regs = &udc->p_regs->EP_REGS[num];
- data = _nbu2ss_readl(&p_ep_regs->EP_PCKT_ADRS);
- buf_type = _nbu2ss_readl(&p_ep_regs->EP_CONTROL) & EPN_BUF_TYPE;
- if (buf_type == 0) {
- /* Single Buffer */
- use_ram_size += (data & EPN_MPKT) / sizeof(u32);
- } else {
- /* Double Buffer */
- use_ram_size += ((data & EPN_MPKT) / sizeof(u32)) * 2;
- }
-
- if ((data >> 16) > last_ram_adr)
- last_ram_adr = data >> 16;
- }
-
- return last_ram_adr + use_ram_size;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Construction of Endpoint */
-static int _nbu2ss_ep_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- u32 num;
- u32 data;
- u32 begin_adrs;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- /*-------------------------------------------------------------*/
- /* RAM Transfer Address */
- begin_adrs = _nbu2ss_get_begin_ram_address(udc);
- data = (begin_adrs << 16) | ep->ep.maxpacket;
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_PCKT_ADRS, data);
-
- /*-------------------------------------------------------------*/
- /* Interrupt Enable */
- data = 1 << (ep->epnum + 8);
- _nbu2ss_bitset(&udc->p_regs->USB_INT_ENA, data);
-
- /*-------------------------------------------------------------*/
- /* Endpoint Type(Mode) */
- /* Bulk, Interrupt, ISO */
- switch (ep->ep_type) {
- case USB_ENDPOINT_XFER_BULK:
- data = EPN_BULK;
- break;
-
- case USB_ENDPOINT_XFER_INT:
- data = EPN_BUF_SINGLE | EPN_INTERRUPT;
- break;
-
- case USB_ENDPOINT_XFER_ISOC:
- data = EPN_ISO;
- break;
-
- default:
- data = 0;
- break;
- }
-
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
- _nbu2ss_endpoint_toggle_reset(udc, (ep->epnum | ep->direct));
-
- if (ep->direct == USB_DIR_OUT) {
- /*---------------------------------------------------------*/
- /* OUT */
- data = EPN_EN | EPN_BCLR | EPN_DIR0;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_ONAK | EPN_OSTL_EN | EPN_OSTL;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_OUT_EN | EPN_OUT_END_EN;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
- } else {
- /*---------------------------------------------------------*/
- /* IN */
- data = EPN_EN | EPN_BCLR | EPN_AUTO;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_ISTL;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_IN_EN | EPN_IN_END_EN;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Release of Endpoint */
-static int _nbu2ss_epn_exit(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- u32 num;
- u32 data;
-
- if ((ep->epnum == 0) || (udc->vbus_active == 0))
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- /*-------------------------------------------------------------*/
- /* RAM Transfer Address */
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_PCKT_ADRS, 0);
-
- /*-------------------------------------------------------------*/
- /* Interrupt Disable */
- data = 1 << (ep->epnum + 8);
- _nbu2ss_bitclr(&udc->p_regs->USB_INT_ENA, data);
-
- if (ep->direct == USB_DIR_OUT) {
- /*---------------------------------------------------------*/
- /* OUT */
- data = EPN_ONAK | EPN_BCLR;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_EN | EPN_DIR0;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_OUT_EN | EPN_OUT_END_EN;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
- } else {
- /*---------------------------------------------------------*/
- /* IN */
- data = EPN_BCLR;
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_EN | EPN_AUTO;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-
- data = EPN_IN_EN | EPN_IN_END_EN;
- _nbu2ss_bitclr(&udc->p_regs->EP_REGS[num].EP_INT_ENA, data);
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* DMA setting (without Endpoint 0) */
-static void _nbu2ss_ep_dma_init(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- u32 num;
- u32 data;
-
- data = _nbu2ss_readl(&udc->p_regs->USBSSCONF);
- if (((ep->epnum == 0) || (data & (1 << ep->epnum)) == 0))
- return; /* Not Support DMA */
-
- num = ep->epnum - 1;
-
- if (ep->direct == USB_DIR_OUT) {
- /*---------------------------------------------------------*/
- /* OUT */
- data = ep->ep.maxpacket;
- _nbu2ss_writel(&udc->p_regs->EP_DCR[num].EP_DCR2, data);
-
- /*---------------------------------------------------------*/
- /* Transfer Direct */
- data = DCR1_EPN_DIR0;
- _nbu2ss_bitset(&udc->p_regs->EP_DCR[num].EP_DCR1, data);
-
- /*---------------------------------------------------------*/
- /* DMA Mode etc. */
- data = EPN_STOP_MODE | EPN_STOP_SET | EPN_DMAMODE0;
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_DMA_CTRL, data);
- } else {
- /*---------------------------------------------------------*/
- /* IN */
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, EPN_AUTO);
-
- /*---------------------------------------------------------*/
- /* DMA Mode etc. */
- data = EPN_BURST_SET | EPN_DMAMODE0;
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_DMA_CTRL, data);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-/* DMA setting release */
-static void _nbu2ss_ep_dma_exit(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- u32 num;
- u32 data;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (udc->vbus_active == 0)
- return; /* VBUS OFF */
-
- data = _nbu2ss_readl(&preg->USBSSCONF);
- if ((ep->epnum == 0) || ((data & (1 << ep->epnum)) == 0))
- return; /* Not Support DMA */
-
- num = ep->epnum - 1;
-
- _nbu2ss_ep_dma_abort(udc, ep);
-
- if (ep->direct == USB_DIR_OUT) {
- /*---------------------------------------------------------*/
- /* OUT */
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, 0);
- _nbu2ss_bitclr(&preg->EP_DCR[num].EP_DCR1, DCR1_EPN_DIR0);
- _nbu2ss_writel(&preg->EP_REGS[num].EP_DMA_CTRL, 0);
- } else {
- /*---------------------------------------------------------*/
- /* IN */
- _nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL, EPN_AUTO);
- _nbu2ss_writel(&preg->EP_REGS[num].EP_DMA_CTRL, 0);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-/* Abort DMA */
-static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- struct fc_regs __iomem *preg = udc->p_regs;
-
- _nbu2ss_bitclr(&preg->EP_DCR[ep->epnum - 1].EP_DCR1, DCR1_EPN_REQEN);
- mdelay(DMA_DISABLE_TIME); /* DCR1_EPN_REQEN Clear */
- _nbu2ss_bitclr(&preg->EP_REGS[ep->epnum - 1].EP_DMA_CTRL, EPN_DMA_EN);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Start IN Transfer */
-static void _nbu2ss_ep_in_end(struct nbu2ss_udc *udc,
- u32 epnum, u32 data32, u32 length)
-{
- u32 data;
- u32 num;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (length >= sizeof(u32))
- return;
-
- if (epnum == 0) {
- _nbu2ss_bitclr(&preg->EP0_CONTROL, EP0_AUTO);
-
- /* Writing of 1-4 bytes */
- if (length)
- _nbu2ss_writel(&preg->EP0_WRITE, data32);
-
- data = ((length << 5) & EP0_DW) | EP0_DEND;
- _nbu2ss_writel(&preg->EP0_CONTROL, data);
-
- _nbu2ss_bitset(&preg->EP0_CONTROL, EP0_AUTO);
- } else {
- num = epnum - 1;
-
- _nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL, EPN_AUTO);
-
- /* Writing of 1-4 bytes */
- if (length)
- _nbu2ss_writel(&preg->EP_REGS[num].EP_WRITE, data32);
-
- data = (((length) << 5) & EPN_DW) | EPN_DEND;
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
-
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, EPN_AUTO);
- }
-}
-
-#ifdef USE_DMA
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_dma_map_single(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u8 direct)
-{
- if (req->req.dma == DMA_ADDR_INVALID) {
- if (req->unaligned) {
- req->req.dma = ep->phys_buf;
- } else {
- req->req.dma = dma_map_single(udc->gadget.dev.parent,
- req->req.buf,
- req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
- }
- req->mapped = 1;
- } else {
- if (!req->unaligned)
- dma_sync_single_for_device(udc->gadget.dev.parent,
- req->req.dma,
- req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
-
- req->mapped = 0;
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_dma_unmap_single(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u8 direct)
-{
- u8 data[4];
- u8 *p;
- u32 count = 0;
-
- if (direct == USB_DIR_OUT) {
- count = req->req.actual % 4;
- if (count) {
- p = req->req.buf;
- p += (req->req.actual - count);
- memcpy(data, p, count);
- }
- }
-
- if (req->mapped) {
- if (req->unaligned) {
- if (direct == USB_DIR_OUT)
- memcpy(req->req.buf, ep->virt_buf,
- req->req.actual & 0xfffffffc);
- } else {
- dma_unmap_single(udc->gadget.dev.parent,
- req->req.dma, req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
- }
- req->req.dma = DMA_ADDR_INVALID;
- req->mapped = 0;
- } else {
- if (!req->unaligned)
- dma_sync_single_for_cpu(udc->gadget.dev.parent,
- req->req.dma, req->req.length,
- (direct == USB_DIR_IN)
- ? DMA_TO_DEVICE
- : DMA_FROM_DEVICE);
- }
-
- if (count) {
- p = req->req.buf;
- p += (req->req.actual - count);
- memcpy(p, data, count);
- }
-}
-#endif
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 OUT Transfer (PIO) */
-static int ep0_out_pio(struct nbu2ss_udc *udc, u8 *buf, u32 length)
-{
- u32 i;
- u32 numreads = length / sizeof(u32);
- union usb_reg_access *buf32 = (union usb_reg_access *)buf;
-
- if (!numreads)
- return 0;
-
- /* PIO Read */
- for (i = 0; i < numreads; i++) {
- buf32->dw = _nbu2ss_readl(&udc->p_regs->EP0_READ);
- buf32++;
- }
-
- return numreads * sizeof(u32);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 OUT Transfer (PIO, OverBytes) */
-static int ep0_out_overbytes(struct nbu2ss_udc *udc, u8 *p_buf, u32 length)
-{
- u32 i;
- u32 i_read_size = 0;
- union usb_reg_access temp_32;
- union usb_reg_access *p_buf_32 = (union usb_reg_access *)p_buf;
-
- if ((length > 0) && (length < sizeof(u32))) {
- temp_32.dw = _nbu2ss_readl(&udc->p_regs->EP0_READ);
- for (i = 0 ; i < length ; i++)
- p_buf_32->byte.DATA[i] = temp_32.byte.DATA[i];
- i_read_size += length;
- }
-
- return i_read_size;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 IN Transfer (PIO) */
-static int EP0_in_PIO(struct nbu2ss_udc *udc, u8 *p_buf, u32 length)
-{
- u32 i;
- u32 i_max_length = EP0_PACKETSIZE;
- u32 i_word_length = 0;
- u32 i_write_length = 0;
- union usb_reg_access *p_buf_32 = (union usb_reg_access *)p_buf;
-
- /*------------------------------------------------------------*/
- /* Transfer Length */
- if (i_max_length < length)
- i_word_length = i_max_length / sizeof(u32);
- else
- i_word_length = length / sizeof(u32);
-
- /*------------------------------------------------------------*/
- /* PIO */
- for (i = 0; i < i_word_length; i++) {
- _nbu2ss_writel(&udc->p_regs->EP0_WRITE, p_buf_32->dw);
- p_buf_32++;
- i_write_length += sizeof(u32);
- }
-
- return i_write_length;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint 0 IN Transfer (PIO, OverBytes) */
-static int ep0_in_overbytes(struct nbu2ss_udc *udc,
- u8 *p_buf,
- u32 i_remain_size)
-{
- u32 i;
- union usb_reg_access temp_32;
- union usb_reg_access *p_buf_32 = (union usb_reg_access *)p_buf;
-
- if ((i_remain_size > 0) && (i_remain_size < sizeof(u32))) {
- for (i = 0 ; i < i_remain_size ; i++)
- temp_32.byte.DATA[i] = p_buf_32->byte.DATA[i];
- _nbu2ss_ep_in_end(udc, 0, temp_32.dw, i_remain_size);
-
- return i_remain_size;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Transfer NULL Packet (Epndoint 0) */
-static int EP0_send_NULL(struct nbu2ss_udc *udc, bool pid_flag)
-{
- u32 data;
-
- data = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
- data &= ~(u32)EP0_INAK;
-
- if (pid_flag)
- data |= (EP0_INAK_EN | EP0_PIDCLR | EP0_DEND);
- else
- data |= (EP0_INAK_EN | EP0_DEND);
-
- _nbu2ss_writel(&udc->p_regs->EP0_CONTROL, data);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Receive NULL Packet (Endpoint 0) */
-static int EP0_receive_NULL(struct nbu2ss_udc *udc, bool pid_flag)
-{
- u32 data;
-
- data = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
- data &= ~(u32)EP0_ONAK;
-
- if (pid_flag)
- data |= EP0_PIDCLR;
-
- _nbu2ss_writel(&udc->p_regs->EP0_CONTROL, data);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_ep0_in_transfer(struct nbu2ss_udc *udc,
- struct nbu2ss_req *req)
-{
- u8 *p_buffer; /* IN Data Buffer */
- u32 data;
- u32 i_remain_size = 0;
- int result = 0;
-
- /*-------------------------------------------------------------*/
- /* End confirmation */
- if (req->req.actual == req->req.length) {
- if ((req->req.actual % EP0_PACKETSIZE) == 0) {
- if (req->zero) {
- req->zero = false;
- EP0_send_NULL(udc, false);
- return 1;
- }
- }
-
- return 0; /* Transfer End */
- }
-
- /*-------------------------------------------------------------*/
- /* NAK release */
- data = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
- data |= EP0_INAK_EN;
- data &= ~(u32)EP0_INAK;
- _nbu2ss_writel(&udc->p_regs->EP0_CONTROL, data);
-
- i_remain_size = req->req.length - req->req.actual;
- p_buffer = (u8 *)req->req.buf;
- p_buffer += req->req.actual;
-
- /*-------------------------------------------------------------*/
- /* Data transfer */
- result = EP0_in_PIO(udc, p_buffer, i_remain_size);
-
- req->div_len = result;
- i_remain_size -= result;
-
- if (i_remain_size == 0) {
- EP0_send_NULL(udc, false);
- return result;
- }
-
- if ((i_remain_size < sizeof(u32)) && (result != EP0_PACKETSIZE)) {
- p_buffer += result;
- result += ep0_in_overbytes(udc, p_buffer, i_remain_size);
- req->div_len = result;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_ep0_out_transfer(struct nbu2ss_udc *udc,
- struct nbu2ss_req *req)
-{
- u8 *p_buffer;
- u32 i_remain_size;
- u32 i_recv_length;
- int result = 0;
- int f_rcv_zero;
-
- /*-------------------------------------------------------------*/
- /* Receive data confirmation */
- i_recv_length = _nbu2ss_readl(&udc->p_regs->EP0_LENGTH) & EP0_LDATA;
- if (i_recv_length != 0) {
- f_rcv_zero = 0;
-
- i_remain_size = req->req.length - req->req.actual;
- p_buffer = (u8 *)req->req.buf;
- p_buffer += req->req.actual;
-
- result = ep0_out_pio(udc, p_buffer
- , min(i_remain_size, i_recv_length));
- if (result < 0)
- return result;
-
- req->req.actual += result;
- i_recv_length -= result;
-
- if ((i_recv_length > 0) && (i_recv_length < sizeof(u32))) {
- p_buffer += result;
- i_remain_size -= result;
-
- result = ep0_out_overbytes(udc, p_buffer
- , min(i_remain_size, i_recv_length));
- req->req.actual += result;
- }
- } else {
- f_rcv_zero = 1;
- }
-
- /*-------------------------------------------------------------*/
- /* End confirmation */
- if (req->req.actual == req->req.length) {
- if ((req->req.actual % EP0_PACKETSIZE) == 0) {
- if (req->zero) {
- req->zero = false;
- EP0_receive_NULL(udc, false);
- return 1;
- }
- }
-
- return 0; /* Transfer End */
- }
-
- if ((req->req.actual % EP0_PACKETSIZE) != 0)
- return 0; /* Short Packet Transfer End */
-
- if (req->req.actual > req->req.length) {
- dev_err(udc->dev, " *** Overrun Error\n");
- return -EOVERFLOW;
- }
-
- if (f_rcv_zero != 0) {
- i_remain_size = _nbu2ss_readl(&udc->p_regs->EP0_CONTROL);
- if (i_remain_size & EP0_ONAK) {
- /*---------------------------------------------------*/
- /* NACK release */
- _nbu2ss_bitclr(&udc->p_regs->EP0_CONTROL, EP0_ONAK);
- }
- result = 1;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_out_dma(struct nbu2ss_udc *udc, struct nbu2ss_req *req,
- u32 num, u32 length)
-{
- dma_addr_t p_buffer;
- u32 mpkt;
- u32 lmpkt;
- u32 dmacnt;
- u32 burst = 1;
- u32 data;
- int result;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (req->dma_flag)
- return 1; /* DMA is forwarded */
-
- req->dma_flag = true;
- p_buffer = req->req.dma;
- p_buffer += req->req.actual;
-
- /* DMA Address */
- _nbu2ss_writel(&preg->EP_DCR[num].EP_TADR, (u32)p_buffer);
-
- /* Number of transfer packets */
- mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPN_MPKT;
- dmacnt = length / mpkt;
- lmpkt = (length % mpkt) & ~(u32)0x03;
-
- if (dmacnt > DMA_MAX_COUNT) {
- dmacnt = DMA_MAX_COUNT;
- lmpkt = 0;
- } else if (lmpkt != 0) {
- if (dmacnt == 0)
- burst = 0; /* Burst OFF */
- dmacnt++;
- }
-
- data = mpkt | (lmpkt << 16);
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, data);
-
- data = ((dmacnt & 0xff) << 16) | DCR1_EPN_DIR0 | DCR1_EPN_REQEN;
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR1, data);
-
- if (burst == 0) {
- _nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT, 0);
- _nbu2ss_bitclr(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_BURST_SET);
- } else {
- _nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT
- , (dmacnt << 16));
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_BURST_SET);
- }
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_DMA_EN);
-
- result = length & ~(u32)0x03;
- req->div_len = result;
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_out_pio(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u32 length)
-{
- u8 *p_buffer;
- u32 i;
- u32 data;
- u32 i_word_length;
- union usb_reg_access temp_32;
- union usb_reg_access *p_buf_32;
- int result = 0;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (req->dma_flag)
- return 1; /* DMA is forwarded */
-
- if (length == 0)
- return 0;
-
- p_buffer = (u8 *)req->req.buf;
- p_buf_32 = (union usb_reg_access *)(p_buffer + req->req.actual);
-
- i_word_length = length / sizeof(u32);
- if (i_word_length > 0) {
- /*---------------------------------------------------------*/
- /* Copy of every four bytes */
- for (i = 0; i < i_word_length; i++) {
- p_buf_32->dw =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
- p_buf_32++;
- }
- result = i_word_length * sizeof(u32);
- }
-
- data = length - result;
- if (data > 0) {
- /*---------------------------------------------------------*/
- /* Copy of fraction byte */
- temp_32.dw =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
- for (i = 0 ; i < data ; i++)
- p_buf_32->byte.DATA[i] = temp_32.byte.DATA[i];
- result += data;
- }
-
- req->req.actual += result;
-
- if ((req->req.actual == req->req.length) ||
- ((req->req.actual % ep->ep.maxpacket) != 0)) {
- result = 0;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_out_data(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u32 data_size)
-{
- u32 num;
- u32 i_buf_size;
- int nret = 1;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- i_buf_size = min((req->req.length - req->req.actual), data_size);
-
- if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
- (i_buf_size >= sizeof(u32))) {
- nret = _nbu2ss_out_dma(udc, req, num, i_buf_size);
- } else {
- i_buf_size = min_t(u32, i_buf_size, ep->ep.maxpacket);
- nret = _nbu2ss_epn_out_pio(udc, ep, req, i_buf_size);
- }
-
- return nret;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_out_transfer(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- u32 num;
- u32 i_recv_length;
- int result = 1;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- /*-------------------------------------------------------------*/
- /* Receive Length */
- i_recv_length =
- _nbu2ss_readl(&preg->EP_REGS[num].EP_LEN_DCNT) & EPN_LDATA;
-
- if (i_recv_length != 0) {
- result = _nbu2ss_epn_out_data(udc, ep, req, i_recv_length);
- if (i_recv_length < ep->ep.maxpacket) {
- if (i_recv_length == result) {
- req->req.actual += result;
- result = 0;
- }
- }
- } else {
- if ((req->req.actual == req->req.length) ||
- ((req->req.actual % ep->ep.maxpacket) != 0)) {
- result = 0;
- }
- }
-
- if (result == 0) {
- if ((req->req.actual % ep->ep.maxpacket) == 0) {
- if (req->zero) {
- req->zero = false;
- return 1;
- }
- }
- }
-
- if (req->req.actual > req->req.length) {
- dev_err(udc->dev, " Overrun Error\n");
- dev_err(udc->dev, " actual = %d, length = %d\n",
- req->req.actual, req->req.length);
- result = -EOVERFLOW;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_in_dma(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u32 num, u32 length)
-{
- dma_addr_t p_buffer;
- u32 mpkt; /* MaxPacketSize */
- u32 lmpkt; /* Last Packet Data Size */
- u32 dmacnt; /* IN Data Size */
- u32 i_write_length;
- u32 data;
- int result = -EINVAL;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (req->dma_flag)
- return 1; /* DMA is forwarded */
-
-#ifdef USE_DMA
- if (req->req.actual == 0)
- _nbu2ss_dma_map_single(udc, ep, req, USB_DIR_IN);
-#endif
- req->dma_flag = true;
-
- /* MAX Packet Size */
- mpkt = _nbu2ss_readl(&preg->EP_REGS[num].EP_PCKT_ADRS) & EPN_MPKT;
-
- i_write_length = min(DMA_MAX_COUNT * mpkt, length);
-
- /*------------------------------------------------------------*/
- /* Number of transmission packets */
- if (mpkt < i_write_length) {
- dmacnt = i_write_length / mpkt;
- lmpkt = (i_write_length % mpkt) & ~(u32)0x3;
- if (lmpkt != 0)
- dmacnt++;
- else
- lmpkt = mpkt & ~(u32)0x3;
-
- } else {
- dmacnt = 1;
- lmpkt = i_write_length & ~(u32)0x3;
- }
-
- /* Packet setting */
- data = mpkt | (lmpkt << 16);
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR2, data);
-
- /* Address setting */
- p_buffer = req->req.dma;
- p_buffer += req->req.actual;
- _nbu2ss_writel(&preg->EP_DCR[num].EP_TADR, (u32)p_buffer);
-
- /* Packet and DMA setting */
- data = ((dmacnt & 0xff) << 16) | DCR1_EPN_REQEN;
- _nbu2ss_writel(&preg->EP_DCR[num].EP_DCR1, data);
-
- /* Packet setting of EPC */
- data = dmacnt << 16;
- _nbu2ss_writel(&preg->EP_REGS[num].EP_LEN_DCNT, data);
-
- /*DMA setting of EPC */
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_DMA_CTRL, EPN_DMA_EN);
-
- result = i_write_length & ~(u32)0x3;
- req->div_len = result;
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_in_pio(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u32 length)
-{
- u8 *p_buffer;
- u32 i;
- u32 data;
- u32 i_word_length;
- union usb_reg_access temp_32;
- union usb_reg_access *p_buf_32 = NULL;
- int result = 0;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (req->dma_flag)
- return 1; /* DMA is forwarded */
-
- if (length > 0) {
- p_buffer = (u8 *)req->req.buf;
- p_buf_32 = (union usb_reg_access *)(p_buffer + req->req.actual);
-
- i_word_length = length / sizeof(u32);
- if (i_word_length > 0) {
- for (i = 0; i < i_word_length; i++) {
- _nbu2ss_writel(&preg->EP_REGS[ep->epnum - 1].EP_WRITE,
- p_buf_32->dw);
-
- p_buf_32++;
- }
- result = i_word_length * sizeof(u32);
- }
- }
-
- if (result != ep->ep.maxpacket) {
- data = length - result;
- temp_32.dw = 0;
- for (i = 0 ; i < data ; i++)
- temp_32.byte.DATA[i] = p_buf_32->byte.DATA[i];
-
- _nbu2ss_ep_in_end(udc, ep->epnum, temp_32.dw, data);
- result += data;
- }
-
- req->div_len = result;
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_in_data(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep,
- struct nbu2ss_req *req, u32 data_size)
-{
- u32 num;
- int nret = 1;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- if ((ep->ep_type != USB_ENDPOINT_XFER_INT) && (req->req.dma != 0) &&
- (data_size >= sizeof(u32))) {
- nret = _nbu2ss_in_dma(udc, ep, req, num, data_size);
- } else {
- data_size = min_t(u32, data_size, ep->ep.maxpacket);
- nret = _nbu2ss_epn_in_pio(udc, ep, req, data_size);
- }
-
- return nret;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_epn_in_transfer(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep, struct nbu2ss_req *req)
-{
- u32 num;
- u32 i_buf_size;
- int result = 0;
- u32 status;
-
- if (ep->epnum == 0)
- return -EINVAL;
-
- num = ep->epnum - 1;
-
- status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS);
-
- /*-------------------------------------------------------------*/
- /* State confirmation of FIFO */
- if (req->req.actual == 0) {
- if ((status & EPN_IN_EMPTY) == 0)
- return 1; /* Not Empty */
-
- } else {
- if ((status & EPN_IN_FULL) != 0)
- return 1; /* Not Empty */
- }
-
- /*-------------------------------------------------------------*/
- /* Start transfer */
- i_buf_size = req->req.length - req->req.actual;
- if (i_buf_size > 0)
- result = _nbu2ss_epn_in_data(udc, ep, req, i_buf_size);
- else if (req->req.length == 0)
- _nbu2ss_zero_len_pkt(udc, ep->epnum);
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_start_transfer(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req,
- bool bflag)
-{
- int nret = -EINVAL;
-
- req->dma_flag = false;
- req->div_len = 0;
-
- if (req->req.length == 0) {
- req->zero = false;
- } else {
- if ((req->req.length % ep->ep.maxpacket) == 0)
- req->zero = req->req.zero;
- else
- req->zero = false;
- }
-
- if (ep->epnum == 0) {
- /* EP0 */
- switch (udc->ep0state) {
- case EP0_IN_DATA_PHASE:
- nret = _nbu2ss_ep0_in_transfer(udc, req);
- break;
-
- case EP0_OUT_DATA_PHASE:
- nret = _nbu2ss_ep0_out_transfer(udc, req);
- break;
-
- case EP0_IN_STATUS_PHASE:
- nret = EP0_send_NULL(udc, true);
- break;
-
- default:
- break;
- }
-
- } else {
- /* EPN */
- if (ep->direct == USB_DIR_OUT) {
- /* OUT */
- if (!bflag)
- nret = _nbu2ss_epn_out_transfer(udc, ep, req);
- } else {
- /* IN */
- nret = _nbu2ss_epn_in_transfer(udc, ep, req);
- }
- }
-
- return nret;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_restert_transfer(struct nbu2ss_ep *ep)
-{
- u32 length;
- bool bflag = false;
- struct nbu2ss_req *req;
-
- req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
- if (!req)
- return;
-
- if (ep->epnum > 0) {
- length = _nbu2ss_readl(&ep->udc->p_regs->EP_REGS[ep->epnum - 1].EP_LEN_DCNT);
-
- length &= EPN_LDATA;
- if (length < ep->ep.maxpacket)
- bflag = true;
- }
-
- _nbu2ss_start_transfer(ep->udc, ep, req, bflag);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint Toggle Reset */
-static void _nbu2ss_endpoint_toggle_reset(struct nbu2ss_udc *udc, u8 ep_adrs)
-{
- u8 num;
- u32 data;
-
- if ((ep_adrs == 0) || (ep_adrs == 0x80))
- return;
-
- num = (ep_adrs & 0x7F) - 1;
-
- if (ep_adrs & USB_DIR_IN)
- data = EPN_IPIDCLR;
- else
- data = EPN_BCLR | EPN_OPIDCLR;
-
- _nbu2ss_bitset(&udc->p_regs->EP_REGS[num].EP_CONTROL, data);
-}
-
-/*-------------------------------------------------------------------------*/
-/* Endpoint STALL set */
-static void _nbu2ss_set_endpoint_stall(struct nbu2ss_udc *udc,
- u8 ep_adrs, bool bstall)
-{
- u8 num, epnum;
- u32 data;
- struct nbu2ss_ep *ep;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if ((ep_adrs == 0) || (ep_adrs == 0x80)) {
- if (bstall) {
- /* Set STALL */
- _nbu2ss_bitset(&preg->EP0_CONTROL, EP0_STL);
- } else {
- /* Clear STALL */
- _nbu2ss_bitclr(&preg->EP0_CONTROL, EP0_STL);
- }
- } else {
- epnum = ep_adrs & USB_ENDPOINT_NUMBER_MASK;
- num = epnum - 1;
- ep = &udc->ep[epnum];
-
- if (bstall) {
- /* Set STALL */
- ep->halted = true;
-
- if (ep_adrs & USB_DIR_IN)
- data = EPN_BCLR | EPN_ISTL;
- else
- data = EPN_OSTL_EN | EPN_OSTL;
-
- _nbu2ss_bitset(&preg->EP_REGS[num].EP_CONTROL, data);
- } else {
- if (ep_adrs & USB_DIR_IN) {
- _nbu2ss_bitclr(&preg->EP_REGS[num].EP_CONTROL
- , EPN_ISTL);
- } else {
- data =
- _nbu2ss_readl(&preg->EP_REGS[num].EP_CONTROL);
-
- data &= ~EPN_OSTL;
- data |= EPN_OSTL_EN;
-
- _nbu2ss_writel(&preg->EP_REGS[num].EP_CONTROL
- , data);
- }
-
- /* Clear STALL */
- ep->stalled = false;
- if (ep->halted) {
- ep->halted = false;
- _nbu2ss_restert_transfer(ep);
- }
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_set_test_mode(struct nbu2ss_udc *udc, u32 mode)
-{
- u32 data;
-
- if (mode > MAX_TEST_MODE_NUM)
- return;
-
- dev_info(udc->dev, "SET FEATURE : test mode = %d\n", mode);
-
- data = _nbu2ss_readl(&udc->p_regs->USB_CONTROL);
- data &= ~TEST_FORCE_ENABLE;
- data |= mode << TEST_MODE_SHIFT;
-
- _nbu2ss_writel(&udc->p_regs->USB_CONTROL, data);
- _nbu2ss_bitset(&udc->p_regs->TEST_CONTROL, CS_TESTMODEEN);
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_set_feature_device(struct nbu2ss_udc *udc,
- u16 selector, u16 wIndex)
-{
- int result = -EOPNOTSUPP;
-
- switch (selector) {
- case USB_DEVICE_REMOTE_WAKEUP:
- if (wIndex == 0x0000) {
- udc->remote_wakeup = U2F_ENABLE;
- result = 0;
- }
- break;
-
- case USB_DEVICE_TEST_MODE:
- wIndex >>= 8;
- if (wIndex <= MAX_TEST_MODE_NUM)
- result = 0;
- break;
-
- default:
- break;
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_get_ep_stall(struct nbu2ss_udc *udc, u8 ep_adrs)
-{
- u8 epnum;
- u32 data = 0, bit_data;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- epnum = ep_adrs & ~USB_ENDPOINT_DIR_MASK;
- if (epnum == 0) {
- data = _nbu2ss_readl(&preg->EP0_CONTROL);
- bit_data = EP0_STL;
-
- } else {
- data = _nbu2ss_readl(&preg->EP_REGS[epnum - 1].EP_CONTROL);
- if ((data & EPN_EN) == 0)
- return -1;
-
- if (ep_adrs & USB_ENDPOINT_DIR_MASK)
- bit_data = EPN_ISTL;
- else
- bit_data = EPN_OSTL;
- }
-
- if ((data & bit_data) == 0)
- return 0;
- return 1;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_req_feature(struct nbu2ss_udc *udc, bool bset)
-{
- u8 recipient = (u8)(udc->ctrl.bRequestType & USB_RECIP_MASK);
- u8 direction = (u8)(udc->ctrl.bRequestType & USB_DIR_IN);
- u16 selector = le16_to_cpu(udc->ctrl.wValue);
- u16 wIndex = le16_to_cpu(udc->ctrl.wIndex);
- u8 ep_adrs;
- int result = -EOPNOTSUPP;
-
- if ((udc->ctrl.wLength != 0x0000) ||
- (direction != USB_DIR_OUT)) {
- return -EINVAL;
- }
-
- switch (recipient) {
- case USB_RECIP_DEVICE:
- if (bset)
- result =
- _nbu2ss_set_feature_device(udc, selector, wIndex);
- break;
-
- case USB_RECIP_ENDPOINT:
- if (0x0000 == (wIndex & 0xFF70)) {
- if (selector == USB_ENDPOINT_HALT) {
- ep_adrs = wIndex & 0xFF;
- if (!bset) {
- _nbu2ss_endpoint_toggle_reset(udc,
- ep_adrs);
- }
-
- _nbu2ss_set_endpoint_stall(udc, ep_adrs, bset);
-
- result = 0;
- }
- }
- break;
-
- default:
- break;
- }
-
- if (result >= 0)
- _nbu2ss_create_ep0_packet(udc, udc->ep0_buf, 0);
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline enum usb_device_speed _nbu2ss_get_speed(struct nbu2ss_udc *udc)
-{
- u32 data;
- enum usb_device_speed speed = USB_SPEED_FULL;
-
- data = _nbu2ss_readl(&udc->p_regs->USB_STATUS);
- if (data & HIGH_SPEED)
- speed = USB_SPEED_HIGH;
-
- return speed;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_epn_set_stall(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep)
-{
- u8 ep_adrs;
- u32 regdata;
- int limit_cnt = 0;
-
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (ep->direct == USB_DIR_IN) {
- for (limit_cnt = 0
- ; limit_cnt < IN_DATA_EMPTY_COUNT
- ; limit_cnt++) {
- regdata = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
-
- if ((regdata & EPN_IN_DATA) == 0)
- break;
-
- mdelay(1);
- }
- }
-
- ep_adrs = ep->epnum | ep->direct;
- _nbu2ss_set_endpoint_stall(udc, ep_adrs, 1);
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_get_status(struct nbu2ss_udc *udc)
-{
- u32 length;
- u16 status_data = 0;
- u8 recipient = (u8)(udc->ctrl.bRequestType & USB_RECIP_MASK);
- u8 direction = (u8)(udc->ctrl.bRequestType & USB_DIR_IN);
- u8 ep_adrs;
- int result = -EINVAL;
-
- if ((udc->ctrl.wValue != 0x0000) || (direction != USB_DIR_IN))
- return result;
-
- length =
- min_t(u16, le16_to_cpu(udc->ctrl.wLength), sizeof(status_data));
- switch (recipient) {
- case USB_RECIP_DEVICE:
- if (udc->ctrl.wIndex == 0x0000) {
- if (udc->gadget.is_selfpowered)
- status_data |= BIT(USB_DEVICE_SELF_POWERED);
-
- if (udc->remote_wakeup)
- status_data |= BIT(USB_DEVICE_REMOTE_WAKEUP);
-
- result = 0;
- }
- break;
-
- case USB_RECIP_ENDPOINT:
- if (0x0000 == (le16_to_cpu(udc->ctrl.wIndex) & 0xFF70)) {
- ep_adrs = (u8)(le16_to_cpu(udc->ctrl.wIndex) & 0xFF);
- result = _nbu2ss_get_ep_stall(udc, ep_adrs);
-
- if (result > 0)
- status_data |= BIT(USB_ENDPOINT_HALT);
- }
- break;
-
- default:
- break;
- }
-
- if (result >= 0) {
- memcpy(udc->ep0_buf, &status_data, length);
- _nbu2ss_create_ep0_packet(udc, udc->ep0_buf, length);
- _nbu2ss_ep0_in_transfer(udc, &udc->ep0_req);
-
- } else {
- dev_err(udc->dev, " Error GET_STATUS\n");
- }
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_clear_feature(struct nbu2ss_udc *udc)
-{
- return _nbu2ss_req_feature(udc, false);
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_set_feature(struct nbu2ss_udc *udc)
-{
- return _nbu2ss_req_feature(udc, true);
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_set_address(struct nbu2ss_udc *udc)
-{
- int result = 0;
- u32 wValue = le16_to_cpu(udc->ctrl.wValue);
-
- if ((udc->ctrl.bRequestType != 0x00) ||
- (udc->ctrl.wIndex != 0x0000) ||
- (udc->ctrl.wLength != 0x0000)) {
- return -EINVAL;
- }
-
- if (wValue != (wValue & 0x007F))
- return -EINVAL;
-
- wValue <<= USB_ADRS_SHIFT;
-
- _nbu2ss_writel(&udc->p_regs->USB_ADDRESS, wValue);
- _nbu2ss_create_ep0_packet(udc, udc->ep0_buf, 0);
-
- return result;
-}
-
-/*-------------------------------------------------------------------------*/
-static int std_req_set_configuration(struct nbu2ss_udc *udc)
-{
- u32 config_value = (u32)(le16_to_cpu(udc->ctrl.wValue) & 0x00ff);
-
- if ((udc->ctrl.wIndex != 0x0000) ||
- (udc->ctrl.wLength != 0x0000) ||
- (udc->ctrl.bRequestType != 0x00)) {
- return -EINVAL;
- }
-
- udc->curr_config = config_value;
-
- if (config_value > 0) {
- _nbu2ss_bitset(&udc->p_regs->USB_CONTROL, CONF);
- udc->devstate = USB_STATE_CONFIGURED;
-
- } else {
- _nbu2ss_bitclr(&udc->p_regs->USB_CONTROL, CONF);
- udc->devstate = USB_STATE_ADDRESS;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_read_request_data(struct nbu2ss_udc *udc, u32 *pdata)
-{
- *pdata = _nbu2ss_readl(&udc->p_regs->SETUP_DATA0);
- pdata++;
- *pdata = _nbu2ss_readl(&udc->p_regs->SETUP_DATA1);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_decode_request(struct nbu2ss_udc *udc)
-{
- bool bcall_back = true;
- int nret = -EINVAL;
- struct usb_ctrlrequest *p_ctrl;
-
- p_ctrl = &udc->ctrl;
- _nbu2ss_read_request_data(udc, (u32 *)p_ctrl);
-
- /* ep0 state control */
- if (p_ctrl->wLength == 0) {
- udc->ep0state = EP0_IN_STATUS_PHASE;
-
- } else {
- if (p_ctrl->bRequestType & USB_DIR_IN)
- udc->ep0state = EP0_IN_DATA_PHASE;
- else
- udc->ep0state = EP0_OUT_DATA_PHASE;
- }
-
- if ((p_ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
- switch (p_ctrl->bRequest) {
- case USB_REQ_GET_STATUS:
- nret = std_req_get_status(udc);
- bcall_back = false;
- break;
-
- case USB_REQ_CLEAR_FEATURE:
- nret = std_req_clear_feature(udc);
- bcall_back = false;
- break;
-
- case USB_REQ_SET_FEATURE:
- nret = std_req_set_feature(udc);
- bcall_back = false;
- break;
-
- case USB_REQ_SET_ADDRESS:
- nret = std_req_set_address(udc);
- bcall_back = false;
- break;
-
- case USB_REQ_SET_CONFIGURATION:
- nret = std_req_set_configuration(udc);
- break;
-
- default:
- break;
- }
- }
-
- if (!bcall_back) {
- if (udc->ep0state == EP0_IN_STATUS_PHASE) {
- if (nret >= 0) {
- /*--------------------------------------*/
- /* Status Stage */
- nret = EP0_send_NULL(udc, true);
- }
- }
-
- } else {
- spin_unlock(&udc->lock);
- nret = udc->driver->setup(&udc->gadget, &udc->ctrl);
- spin_lock(&udc->lock);
- }
-
- if (nret < 0)
- udc->ep0state = EP0_IDLE;
-
- return nret;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_ep0_in_data_stage(struct nbu2ss_udc *udc)
-{
- int nret;
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep = &udc->ep[0];
-
- req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
- if (!req)
- req = &udc->ep0_req;
-
- req->req.actual += req->div_len;
- req->div_len = 0;
-
- nret = _nbu2ss_ep0_in_transfer(udc, req);
- if (nret == 0) {
- udc->ep0state = EP0_OUT_STATUS_PAHSE;
- EP0_receive_NULL(udc, true);
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_ep0_out_data_stage(struct nbu2ss_udc *udc)
-{
- int nret;
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep = &udc->ep[0];
-
- req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
- if (!req)
- req = &udc->ep0_req;
-
- nret = _nbu2ss_ep0_out_transfer(udc, req);
- if (nret == 0) {
- udc->ep0state = EP0_IN_STATUS_PHASE;
- EP0_send_NULL(udc, true);
-
- } else if (nret < 0) {
- _nbu2ss_bitset(&udc->p_regs->EP0_CONTROL, EP0_BCLR);
- req->req.status = nret;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline int _nbu2ss_ep0_status_stage(struct nbu2ss_udc *udc)
-{
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep = &udc->ep[0];
-
- req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
- if (!req) {
- req = &udc->ep0_req;
- if (req->req.complete)
- req->req.complete(&ep->ep, &req->req);
-
- } else {
- if (req->req.complete)
- _nbu2ss_ep_done(ep, req, 0);
- }
-
- udc->ep0state = EP0_IDLE;
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_ep0_int(struct nbu2ss_udc *udc)
-{
- int i;
- u32 status;
- u32 intr;
- int nret = -1;
-
- status = _nbu2ss_readl(&udc->p_regs->EP0_STATUS);
- intr = status & EP0_STATUS_RW_BIT;
- _nbu2ss_writel(&udc->p_regs->EP0_STATUS, ~intr);
-
- status &= (SETUP_INT | EP0_IN_INT | EP0_OUT_INT
- | STG_END_INT | EP0_OUT_NULL_INT);
-
- if (status == 0) {
- dev_info(udc->dev, "%s Not Decode Interrupt\n", __func__);
- dev_info(udc->dev, "EP0_STATUS = 0x%08x\n", intr);
- return;
- }
-
- if (udc->gadget.speed == USB_SPEED_UNKNOWN)
- udc->gadget.speed = _nbu2ss_get_speed(udc);
-
- for (i = 0; i < EP0_END_XFER; i++) {
- switch (udc->ep0state) {
- case EP0_IDLE:
- if (status & SETUP_INT) {
- status = 0;
- nret = _nbu2ss_decode_request(udc);
- }
- break;
-
- case EP0_IN_DATA_PHASE:
- if (status & EP0_IN_INT) {
- status &= ~EP0_IN_INT;
- nret = _nbu2ss_ep0_in_data_stage(udc);
- }
- break;
-
- case EP0_OUT_DATA_PHASE:
- if (status & EP0_OUT_INT) {
- status &= ~EP0_OUT_INT;
- nret = _nbu2ss_ep0_out_data_stage(udc);
- }
- break;
-
- case EP0_IN_STATUS_PHASE:
- if ((status & STG_END_INT) || (status & SETUP_INT)) {
- status &= ~(STG_END_INT | EP0_IN_INT);
- nret = _nbu2ss_ep0_status_stage(udc);
- }
- break;
-
- case EP0_OUT_STATUS_PAHSE:
- if ((status & STG_END_INT) || (status & SETUP_INT) ||
- (status & EP0_OUT_NULL_INT)) {
- status &= ~(STG_END_INT
- | EP0_OUT_INT
- | EP0_OUT_NULL_INT);
-
- nret = _nbu2ss_ep0_status_stage(udc);
- }
-
- break;
-
- default:
- status = 0;
- break;
- }
-
- if (status == 0)
- break;
- }
-
- if (nret < 0) {
- /* Send Stall */
- _nbu2ss_set_endpoint_stall(udc, 0, true);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_ep_done(struct nbu2ss_ep *ep,
- struct nbu2ss_req *req,
- int status)
-{
- struct nbu2ss_udc *udc = ep->udc;
-
- list_del_init(&req->queue);
-
- if (status == -ECONNRESET)
- _nbu2ss_fifo_flush(udc, ep);
-
- if (likely(req->req.status == -EINPROGRESS))
- req->req.status = status;
-
- if (ep->stalled) {
- _nbu2ss_epn_set_stall(udc, ep);
- } else {
- if (!list_empty(&ep->queue))
- _nbu2ss_restert_transfer(ep);
- }
-
-#ifdef USE_DMA
- if ((ep->direct == USB_DIR_OUT) && (ep->epnum > 0) &&
- (req->req.dma != 0))
- _nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_OUT);
-#endif
-
- spin_unlock(&udc->lock);
- req->req.complete(&ep->ep, &req->req);
- spin_lock(&udc->lock);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_in_int(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- int result = 0;
- u32 status;
-
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (req->dma_flag)
- return; /* DMA is forwarded */
-
- req->req.actual += req->div_len;
- req->div_len = 0;
-
- if (req->req.actual != req->req.length) {
- /*---------------------------------------------------------*/
- /* remainder of data */
- result = _nbu2ss_epn_in_transfer(udc, ep, req);
-
- } else {
- if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) {
- status =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
-
- if ((status & EPN_IN_FULL) == 0) {
- /*-----------------------------------------*/
- /* 0 Length Packet */
- req->zero = false;
- _nbu2ss_zero_len_pkt(udc, ep->epnum);
- }
- return;
- }
- }
-
- if (result <= 0) {
- /*---------------------------------------------------------*/
- /* Complete */
- _nbu2ss_ep_done(ep, req, result);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_out_int(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- int result;
-
- result = _nbu2ss_epn_out_transfer(udc, ep, req);
- if (result <= 0)
- _nbu2ss_ep_done(ep, req, result);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_in_dma_int(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- u32 mpkt;
- u32 size;
- struct usb_request *preq;
-
- preq = &req->req;
-
- if (!req->dma_flag)
- return;
-
- preq->actual += req->div_len;
- req->div_len = 0;
- req->dma_flag = false;
-
-#ifdef USE_DMA
- _nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_IN);
-#endif
-
- if (preq->actual != preq->length) {
- _nbu2ss_epn_in_transfer(udc, ep, req);
- } else {
- mpkt = ep->ep.maxpacket;
- size = preq->actual % mpkt;
- if (size > 0) {
- if (((preq->actual & 0x03) == 0) && (size < mpkt))
- _nbu2ss_ep_in_end(udc, ep->epnum, 0, 0);
- } else {
- _nbu2ss_epn_in_int(udc, ep, req);
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_out_dma_int(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- struct nbu2ss_req *req)
-{
- int i;
- u32 num;
- u32 dmacnt, ep_dmacnt;
- u32 mpkt;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- num = ep->epnum - 1;
-
- if (req->req.actual == req->req.length) {
- if ((req->req.length % ep->ep.maxpacket) && !req->zero) {
- req->div_len = 0;
- req->dma_flag = false;
- _nbu2ss_ep_done(ep, req, 0);
- return;
- }
- }
-
- ep_dmacnt = _nbu2ss_readl(&preg->EP_REGS[num].EP_LEN_DCNT)
- & EPN_DMACNT;
- ep_dmacnt >>= 16;
-
- for (i = 0; i < EPC_PLL_LOCK_COUNT; i++) {
- dmacnt = _nbu2ss_readl(&preg->EP_DCR[num].EP_DCR1)
- & DCR1_EPN_DMACNT;
- dmacnt >>= 16;
- if (ep_dmacnt == dmacnt)
- break;
- }
-
- _nbu2ss_bitclr(&preg->EP_DCR[num].EP_DCR1, DCR1_EPN_REQEN);
-
- if (dmacnt != 0) {
- mpkt = ep->ep.maxpacket;
- if ((req->div_len % mpkt) == 0)
- req->div_len -= mpkt * dmacnt;
- }
-
- if ((req->req.actual % ep->ep.maxpacket) > 0) {
- if (req->req.actual == req->div_len) {
- req->div_len = 0;
- req->dma_flag = false;
- _nbu2ss_ep_done(ep, req, 0);
- return;
- }
- }
-
- req->req.actual += req->div_len;
- req->div_len = 0;
- req->dma_flag = false;
-
- _nbu2ss_epn_out_int(udc, ep, req);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_epn_int(struct nbu2ss_udc *udc, u32 epnum)
-{
- u32 num;
- u32 status;
-
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep = &udc->ep[epnum];
-
- num = epnum - 1;
-
- /* Interrupt Status */
- status = _nbu2ss_readl(&udc->p_regs->EP_REGS[num].EP_STATUS);
-
- /* Interrupt Clear */
- _nbu2ss_writel(&udc->p_regs->EP_REGS[num].EP_STATUS, ~status);
-
- req = list_first_entry_or_null(&ep->queue, struct nbu2ss_req, queue);
- if (!req) {
- /* pr_warn("=== %s(%d) req == NULL\n", __func__, epnum); */
- return;
- }
-
- if (status & EPN_OUT_END_INT) {
- status &= ~EPN_OUT_INT;
- _nbu2ss_epn_out_dma_int(udc, ep, req);
- }
-
- if (status & EPN_OUT_INT)
- _nbu2ss_epn_out_int(udc, ep, req);
-
- if (status & EPN_IN_END_INT) {
- status &= ~EPN_IN_INT;
- _nbu2ss_epn_in_dma_int(udc, ep, req);
- }
-
- if (status & EPN_IN_INT)
- _nbu2ss_epn_in_int(udc, ep, req);
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_ep_int(struct nbu2ss_udc *udc, u32 epnum)
-{
- if (epnum == 0)
- _nbu2ss_ep0_int(udc);
- else
- _nbu2ss_epn_int(udc, epnum);
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_ep0_enable(struct nbu2ss_udc *udc)
-{
- _nbu2ss_bitset(&udc->p_regs->EP0_CONTROL, (EP0_AUTO | EP0_BCLR));
- _nbu2ss_writel(&udc->p_regs->EP0_INT_ENA, EP0_INT_EN_BIT);
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_nuke(struct nbu2ss_udc *udc,
- struct nbu2ss_ep *ep,
- int status)
-{
- struct nbu2ss_req *req, *n;
-
- /* Endpoint Disable */
- _nbu2ss_epn_exit(udc, ep);
-
- /* DMA Disable */
- _nbu2ss_ep_dma_exit(udc, ep);
-
- if (list_empty(&ep->queue))
- return 0;
-
- /* called with irqs blocked */
- list_for_each_entry_safe(req, n, &ep->queue, queue) {
- _nbu2ss_ep_done(ep, req, status);
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_quiesce(struct nbu2ss_udc *udc)
-{
- struct nbu2ss_ep *ep;
-
- udc->gadget.speed = USB_SPEED_UNKNOWN;
-
- _nbu2ss_nuke(udc, &udc->ep[0], -ESHUTDOWN);
-
- /* Endpoint n */
- list_for_each_entry(ep, &udc->gadget.ep_list, ep.ep_list) {
- _nbu2ss_nuke(udc, ep, -ESHUTDOWN);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_pullup(struct nbu2ss_udc *udc, int is_on)
-{
- u32 reg_dt;
-
- if (udc->vbus_active == 0)
- return -ESHUTDOWN;
-
- if (is_on) {
- /* D+ Pullup */
- if (udc->driver) {
- reg_dt = (_nbu2ss_readl(&udc->p_regs->USB_CONTROL)
- | PUE2) & ~(u32)CONNECTB;
-
- _nbu2ss_writel(&udc->p_regs->USB_CONTROL, reg_dt);
- }
-
- } else {
- /* D+ Pulldown */
- reg_dt = (_nbu2ss_readl(&udc->p_regs->USB_CONTROL) | CONNECTB)
- & ~(u32)PUE2;
-
- _nbu2ss_writel(&udc->p_regs->USB_CONTROL, reg_dt);
- udc->gadget.speed = USB_SPEED_UNKNOWN;
- }
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_fifo_flush(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
-{
- struct fc_regs __iomem *p = udc->p_regs;
-
- if (udc->vbus_active == 0)
- return;
-
- if (ep->epnum == 0) {
- /* EP0 */
- _nbu2ss_bitset(&p->EP0_CONTROL, EP0_BCLR);
-
- } else {
- /* EPN */
- _nbu2ss_ep_dma_abort(udc, ep);
- _nbu2ss_bitset(&p->EP_REGS[ep->epnum - 1].EP_CONTROL, EPN_BCLR);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static int _nbu2ss_enable_controller(struct nbu2ss_udc *udc)
-{
- int waitcnt = 0;
-
- if (udc->udc_enabled)
- return 0;
-
- /* Reset */
- _nbu2ss_bitset(&udc->p_regs->EPCTR, (DIRPD | EPC_RST));
- udelay(EPC_RST_DISABLE_TIME); /* 1us wait */
-
- _nbu2ss_bitclr(&udc->p_regs->EPCTR, DIRPD);
- mdelay(EPC_DIRPD_DISABLE_TIME); /* 1ms wait */
-
- _nbu2ss_bitclr(&udc->p_regs->EPCTR, EPC_RST);
-
- _nbu2ss_writel(&udc->p_regs->AHBSCTR, WAIT_MODE);
-
- _nbu2ss_writel(&udc->p_regs->AHBMCTR,
- HBUSREQ_MODE | HTRANS_MODE | WBURST_TYPE);
-
- while (!(_nbu2ss_readl(&udc->p_regs->EPCTR) & PLL_LOCK)) {
- waitcnt++;
- udelay(1); /* 1us wait */
- if (waitcnt == EPC_PLL_LOCK_COUNT) {
- dev_err(udc->dev, "*** Reset Cancel failed\n");
- return -EINVAL;
- }
- }
-
- _nbu2ss_bitset(&udc->p_regs->UTMI_CHARACTER_1, USB_SQUSET);
-
- _nbu2ss_bitset(&udc->p_regs->USB_CONTROL, (INT_SEL | SOF_RCV));
-
- /* EP0 */
- _nbu2ss_ep0_enable(udc);
-
- /* USB Interrupt Enable */
- _nbu2ss_bitset(&udc->p_regs->USB_INT_ENA, USB_INT_EN_BIT);
-
- udc->udc_enabled = true;
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_reset_controller(struct nbu2ss_udc *udc)
-{
- _nbu2ss_bitset(&udc->p_regs->EPCTR, EPC_RST);
- _nbu2ss_bitclr(&udc->p_regs->EPCTR, EPC_RST);
-}
-
-/*-------------------------------------------------------------------------*/
-static void _nbu2ss_disable_controller(struct nbu2ss_udc *udc)
-{
- if (udc->udc_enabled) {
- udc->udc_enabled = false;
- _nbu2ss_reset_controller(udc);
- _nbu2ss_bitset(&udc->p_regs->EPCTR, (DIRPD | EPC_RST));
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_check_vbus(struct nbu2ss_udc *udc)
-{
- int nret;
- u32 reg_dt;
-
- /* chattering */
- mdelay(VBUS_CHATTERING_MDELAY); /* wait (ms) */
-
- /* VBUS ON Check*/
- reg_dt = gpiod_get_value(vbus_gpio);
- if (reg_dt == 0) {
- udc->linux_suspended = 0;
-
- _nbu2ss_reset_controller(udc);
- dev_info(udc->dev, " ----- VBUS OFF\n");
-
- if (udc->vbus_active == 1) {
- /* VBUS OFF */
- udc->vbus_active = 0;
- if (udc->usb_suspended) {
- udc->usb_suspended = 0;
- /* _nbu2ss_reset_controller(udc); */
- }
- udc->devstate = USB_STATE_NOTATTACHED;
-
- _nbu2ss_quiesce(udc);
- if (udc->driver) {
- spin_unlock(&udc->lock);
- udc->driver->disconnect(&udc->gadget);
- spin_lock(&udc->lock);
- }
-
- _nbu2ss_disable_controller(udc);
- }
- } else {
- mdelay(5); /* wait (5ms) */
- reg_dt = gpiod_get_value(vbus_gpio);
- if (reg_dt == 0)
- return;
-
- dev_info(udc->dev, " ----- VBUS ON\n");
-
- if (udc->linux_suspended)
- return;
-
- if (udc->vbus_active == 0) {
- /* VBUS ON */
- udc->vbus_active = 1;
- udc->devstate = USB_STATE_POWERED;
-
- nret = _nbu2ss_enable_controller(udc);
- if (nret < 0) {
- _nbu2ss_disable_controller(udc);
- udc->vbus_active = 0;
- return;
- }
-
- _nbu2ss_pullup(udc, 1);
-
-#ifdef UDC_DEBUG_DUMP
- _nbu2ss_dump_register(udc);
-#endif /* UDC_DEBUG_DUMP */
-
- } else {
- if (udc->devstate == USB_STATE_POWERED)
- _nbu2ss_pullup(udc, 1);
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_int_bus_reset(struct nbu2ss_udc *udc)
-{
- udc->devstate = USB_STATE_DEFAULT;
- udc->remote_wakeup = 0;
-
- _nbu2ss_quiesce(udc);
-
- udc->ep0state = EP0_IDLE;
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_int_usb_resume(struct nbu2ss_udc *udc)
-{
- if (udc->usb_suspended == 1) {
- udc->usb_suspended = 0;
- if (udc->driver && udc->driver->resume) {
- spin_unlock(&udc->lock);
- udc->driver->resume(&udc->gadget);
- spin_lock(&udc->lock);
- }
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static inline void _nbu2ss_int_usb_suspend(struct nbu2ss_udc *udc)
-{
- u32 reg_dt;
-
- if (udc->usb_suspended == 0) {
- reg_dt = gpiod_get_value(vbus_gpio);
-
- if (reg_dt == 0)
- return;
-
- udc->usb_suspended = 1;
- if (udc->driver && udc->driver->suspend) {
- spin_unlock(&udc->lock);
- udc->driver->suspend(&udc->gadget);
- spin_lock(&udc->lock);
- }
-
- _nbu2ss_bitset(&udc->p_regs->USB_CONTROL, SUSPEND);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-/* VBUS (GPIO153) Interrupt */
-static irqreturn_t _nbu2ss_vbus_irq(int irq, void *_udc)
-{
- struct nbu2ss_udc *udc = (struct nbu2ss_udc *)_udc;
-
- spin_lock(&udc->lock);
- _nbu2ss_check_vbus(udc);
- spin_unlock(&udc->lock);
-
- return IRQ_HANDLED;
-}
-
-/*-------------------------------------------------------------------------*/
-/* Interrupt (udc) */
-static irqreturn_t _nbu2ss_udc_irq(int irq, void *_udc)
-{
- u8 suspend_flag = 0;
- u32 status;
- u32 epnum, int_bit;
-
- struct nbu2ss_udc *udc = (struct nbu2ss_udc *)_udc;
- struct fc_regs __iomem *preg = udc->p_regs;
-
- if (gpiod_get_value(vbus_gpio) == 0) {
- _nbu2ss_writel(&preg->USB_INT_STA, ~USB_INT_STA_RW);
- _nbu2ss_writel(&preg->USB_INT_ENA, 0);
- return IRQ_HANDLED;
- }
-
- spin_lock(&udc->lock);
-
- for (;;) {
- if (gpiod_get_value(vbus_gpio) == 0) {
- _nbu2ss_writel(&preg->USB_INT_STA, ~USB_INT_STA_RW);
- _nbu2ss_writel(&preg->USB_INT_ENA, 0);
- status = 0;
- } else {
- status = _nbu2ss_readl(&preg->USB_INT_STA);
- }
-
- if (status == 0)
- break;
-
- _nbu2ss_writel(&preg->USB_INT_STA, ~(status & USB_INT_STA_RW));
-
- if (status & USB_RST_INT) {
- /* USB Reset */
- _nbu2ss_int_bus_reset(udc);
- }
-
- if (status & RSUM_INT) {
- /* Resume */
- _nbu2ss_int_usb_resume(udc);
- }
-
- if (status & SPND_INT) {
- /* Suspend */
- suspend_flag = 1;
- }
-
- if (status & EPN_INT) {
- /* EP INT */
- int_bit = status >> 8;
-
- for (epnum = 0; epnum < NUM_ENDPOINTS; epnum++) {
- if (0x01 & int_bit)
- _nbu2ss_ep_int(udc, epnum);
-
- int_bit >>= 1;
-
- if (int_bit == 0)
- break;
- }
- }
- }
-
- if (suspend_flag)
- _nbu2ss_int_usb_suspend(udc);
-
- spin_unlock(&udc->lock);
-
- return IRQ_HANDLED;
-}
-
-/*-------------------------------------------------------------------------*/
-/* usb_ep_ops */
-static int nbu2ss_ep_enable(struct usb_ep *_ep,
- const struct usb_endpoint_descriptor *desc)
-{
- u8 ep_type;
- unsigned long flags;
-
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
-
- if (!_ep || !desc) {
- pr_err(" *** %s, bad param\n", __func__);
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
- if (!ep->udc) {
- pr_err(" *** %s, ep == NULL !!\n", __func__);
- return -EINVAL;
- }
-
- ep_type = usb_endpoint_type(desc);
- if ((ep_type == USB_ENDPOINT_XFER_CONTROL) ||
- (ep_type == USB_ENDPOINT_XFER_ISOC)) {
- pr_err(" *** %s, bat bmAttributes\n", __func__);
- return -EINVAL;
- }
-
- udc = ep->udc;
- if (udc->vbus_active == 0)
- return -ESHUTDOWN;
-
- if ((!udc->driver) || (udc->gadget.speed == USB_SPEED_UNKNOWN)) {
- dev_err(ep->udc->dev, " *** %s, udc !!\n", __func__);
- return -ESHUTDOWN;
- }
-
- spin_lock_irqsave(&udc->lock, flags);
-
- ep->desc = desc;
- ep->epnum = usb_endpoint_num(desc);
- ep->direct = desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK;
- ep->ep_type = ep_type;
- ep->wedged = 0;
- ep->halted = false;
- ep->stalled = false;
-
- ep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize);
-
- /* DMA setting */
- _nbu2ss_ep_dma_init(udc, ep);
-
- /* Endpoint setting */
- _nbu2ss_ep_init(udc, ep);
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_disable(struct usb_ep *_ep)
-{
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!_ep) {
- pr_err(" *** %s, bad param\n", __func__);
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
- if (!ep->udc) {
- pr_err("udc: *** %s, ep == NULL !!\n", __func__);
- return -EINVAL;
- }
-
- udc = ep->udc;
- if (udc->vbus_active == 0)
- return -ESHUTDOWN;
-
- spin_lock_irqsave(&udc->lock, flags);
- _nbu2ss_nuke(udc, ep, -EINPROGRESS); /* dequeue request */
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static struct usb_request *nbu2ss_ep_alloc_request(struct usb_ep *ep,
- gfp_t gfp_flags)
-{
- struct nbu2ss_req *req;
-
- req = kzalloc(sizeof(*req), gfp_flags);
- if (!req)
- return NULL;
-
-#ifdef USE_DMA
- req->req.dma = DMA_ADDR_INVALID;
-#endif
- INIT_LIST_HEAD(&req->queue);
-
- return &req->req;
-}
-
-/*-------------------------------------------------------------------------*/
-static void nbu2ss_ep_free_request(struct usb_ep *_ep,
- struct usb_request *_req)
-{
- struct nbu2ss_req *req;
-
- if (_req) {
- req = container_of(_req, struct nbu2ss_req, req);
-
- kfree(req);
- }
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_queue(struct usb_ep *_ep,
- struct usb_request *_req, gfp_t gfp_flags)
-{
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
- bool bflag;
- int result = -EINVAL;
-
- /* catch various bogus parameters */
- if (!_ep || !_req) {
- if (!_ep)
- pr_err("udc: %s --- _ep == NULL\n", __func__);
-
- if (!_req)
- pr_err("udc: %s --- _req == NULL\n", __func__);
-
- return -EINVAL;
- }
-
- req = container_of(_req, struct nbu2ss_req, req);
- if (unlikely(!_req->complete ||
- !_req->buf ||
- !list_empty(&req->queue))) {
- if (!_req->complete)
- pr_err("udc: %s --- !_req->complete\n", __func__);
-
- if (!_req->buf)
- pr_err("udc:%s --- !_req->buf\n", __func__);
-
- if (!list_empty(&req->queue))
- pr_err("%s --- !list_empty(&req->queue)\n", __func__);
-
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
- udc = ep->udc;
-
- if (udc->vbus_active == 0) {
- dev_info(udc->dev, "Can't ep_queue (VBUS OFF)\n");
- return -ESHUTDOWN;
- }
-
- if (unlikely(!udc->driver)) {
- dev_err(udc->dev, "%s, bogus device state %p\n", __func__,
- udc->driver);
- return -ESHUTDOWN;
- }
-
- spin_lock_irqsave(&udc->lock, flags);
-
-#ifdef USE_DMA
- if ((uintptr_t)req->req.buf & 0x3)
- req->unaligned = true;
- else
- req->unaligned = false;
-
- if (req->unaligned) {
- if (!ep->virt_buf) {
- ep->virt_buf = dma_alloc_coherent(udc->dev, PAGE_SIZE,
- &ep->phys_buf,
- GFP_ATOMIC | GFP_DMA);
- if (!ep->virt_buf) {
- spin_unlock_irqrestore(&udc->lock, flags);
- return -ENOMEM;
- }
- }
- if (ep->epnum > 0) {
- if (ep->direct == USB_DIR_IN)
- memcpy(ep->virt_buf, req->req.buf,
- req->req.length);
- }
- }
-
- if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT) &&
- (req->req.dma != 0))
- _nbu2ss_dma_map_single(udc, ep, req, USB_DIR_OUT);
-#endif
-
- _req->status = -EINPROGRESS;
- _req->actual = 0;
-
- bflag = list_empty(&ep->queue);
- list_add_tail(&req->queue, &ep->queue);
-
- if (bflag && !ep->stalled) {
- result = _nbu2ss_start_transfer(udc, ep, req, false);
- if (result < 0) {
- dev_err(udc->dev, " *** %s, result = %d\n", __func__,
- result);
- list_del(&req->queue);
- } else if ((ep->epnum > 0) && (ep->direct == USB_DIR_OUT)) {
-#ifdef USE_DMA
- if (req->req.length < 4 &&
- req->req.length == req->req.actual)
-#else
- if (req->req.length == req->req.actual)
-#endif
- _nbu2ss_ep_done(ep, req, result);
- }
- }
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
-{
- struct nbu2ss_req *req;
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- /* catch various bogus parameters */
- if (!_ep || !_req) {
- /* pr_err("%s, bad param(1)\n", __func__); */
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
-
- udc = ep->udc;
- if (!udc)
- return -EINVAL;
-
- spin_lock_irqsave(&udc->lock, flags);
-
- /* make sure it's actually queued on this endpoint */
- list_for_each_entry(req, &ep->queue, queue) {
- if (&req->req == _req) {
- _nbu2ss_ep_done(ep, req, -ECONNRESET);
- spin_unlock_irqrestore(&udc->lock, flags);
- return 0;
- }
- }
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- pr_debug("%s no queue(EINVAL)\n", __func__);
-
- return -EINVAL;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_set_halt(struct usb_ep *_ep, int value)
-{
- u8 ep_adrs;
- unsigned long flags;
-
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
-
- if (!_ep) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
-
- udc = ep->udc;
- if (!udc) {
- dev_err(ep->udc->dev, " *** %s, bad udc\n", __func__);
- return -EINVAL;
- }
-
- spin_lock_irqsave(&udc->lock, flags);
-
- ep_adrs = ep->epnum | ep->direct;
- if (value == 0) {
- _nbu2ss_set_endpoint_stall(udc, ep_adrs, value);
- ep->stalled = false;
- } else {
- if (list_empty(&ep->queue))
- _nbu2ss_epn_set_stall(udc, ep);
- else
- ep->stalled = true;
- }
-
- if (value == 0)
- ep->wedged = 0;
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-static int nbu2ss_ep_set_wedge(struct usb_ep *_ep)
-{
- return nbu2ss_ep_set_halt(_ep, 1);
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_ep_fifo_status(struct usb_ep *_ep)
-{
- u32 data;
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
- struct fc_regs __iomem *preg;
-
- if (!_ep) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
-
- udc = ep->udc;
- if (!udc) {
- dev_err(ep->udc->dev, "%s, bad udc\n", __func__);
- return -EINVAL;
- }
-
- preg = udc->p_regs;
-
- data = gpiod_get_value(vbus_gpio);
- if (data == 0)
- return -EINVAL;
-
- spin_lock_irqsave(&udc->lock, flags);
-
- if (ep->epnum == 0) {
- data = _nbu2ss_readl(&preg->EP0_LENGTH) & EP0_LDATA;
-
- } else {
- data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_LEN_DCNT)
- & EPN_LDATA;
- }
-
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static void nbu2ss_ep_fifo_flush(struct usb_ep *_ep)
-{
- u32 data;
- struct nbu2ss_ep *ep;
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!_ep) {
- pr_err("udc: %s, bad param\n", __func__);
- return;
- }
-
- ep = container_of(_ep, struct nbu2ss_ep, ep);
-
- udc = ep->udc;
- if (!udc) {
- dev_err(ep->udc->dev, "%s, bad udc\n", __func__);
- return;
- }
-
- data = gpiod_get_value(vbus_gpio);
- if (data == 0)
- return;
-
- spin_lock_irqsave(&udc->lock, flags);
- _nbu2ss_fifo_flush(udc, ep);
- spin_unlock_irqrestore(&udc->lock, flags);
-}
-
-/*-------------------------------------------------------------------------*/
-static const struct usb_ep_ops nbu2ss_ep_ops = {
- .enable = nbu2ss_ep_enable,
- .disable = nbu2ss_ep_disable,
-
- .alloc_request = nbu2ss_ep_alloc_request,
- .free_request = nbu2ss_ep_free_request,
-
- .queue = nbu2ss_ep_queue,
- .dequeue = nbu2ss_ep_dequeue,
-
- .set_halt = nbu2ss_ep_set_halt,
- .set_wedge = nbu2ss_ep_set_wedge,
-
- .fifo_status = nbu2ss_ep_fifo_status,
- .fifo_flush = nbu2ss_ep_fifo_flush,
-};
-
-/*-------------------------------------------------------------------------*/
-/* usb_gadget_ops */
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_get_frame(struct usb_gadget *pgadget)
-{
- u32 data;
- struct nbu2ss_udc *udc;
-
- if (!pgadget) {
- pr_err("udc: %s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
- data = gpiod_get_value(vbus_gpio);
- if (data == 0)
- return -EINVAL;
-
- return _nbu2ss_readl(&udc->p_regs->USB_ADDRESS) & FRAME;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_wakeup(struct usb_gadget *pgadget)
-{
- int i;
- u32 data;
-
- struct nbu2ss_udc *udc;
-
- if (!pgadget) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
-
- data = gpiod_get_value(vbus_gpio);
- if (data == 0) {
- dev_warn(&pgadget->dev, "VBUS LEVEL = %d\n", data);
- return -EINVAL;
- }
-
- _nbu2ss_bitset(&udc->p_regs->EPCTR, PLL_RESUME);
-
- for (i = 0; i < EPC_PLL_LOCK_COUNT; i++) {
- data = _nbu2ss_readl(&udc->p_regs->EPCTR);
-
- if (data & PLL_LOCK)
- break;
- }
-
- _nbu2ss_bitclr(&udc->p_regs->EPCTR, PLL_RESUME);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_set_selfpowered(struct usb_gadget *pgadget,
- int is_selfpowered)
-{
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!pgadget) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
-
- spin_lock_irqsave(&udc->lock, flags);
- pgadget->is_selfpowered = (is_selfpowered != 0);
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_vbus_session(struct usb_gadget *pgadget, int is_active)
-{
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_vbus_draw(struct usb_gadget *pgadget, unsigned int mA)
-{
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!pgadget) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
-
- spin_lock_irqsave(&udc->lock, flags);
- udc->mA = mA;
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_pullup(struct usb_gadget *pgadget, int is_on)
-{
- struct nbu2ss_udc *udc;
- unsigned long flags;
-
- if (!pgadget) {
- pr_err("%s, bad param\n", __func__);
- return -EINVAL;
- }
-
- udc = container_of(pgadget, struct nbu2ss_udc, gadget);
-
- if (!udc->driver) {
- pr_warn("%s, Not Regist Driver\n", __func__);
- return -EINVAL;
- }
-
- if (udc->vbus_active == 0)
- return -ESHUTDOWN;
-
- spin_lock_irqsave(&udc->lock, flags);
- _nbu2ss_pullup(udc, is_on);
- spin_unlock_irqrestore(&udc->lock, flags);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_ioctl(struct usb_gadget *pgadget,
- unsigned int code, unsigned long param)
-{
- return 0;
-}
-
-static const struct usb_gadget_ops nbu2ss_gadget_ops = {
- .get_frame = nbu2ss_gad_get_frame,
- .wakeup = nbu2ss_gad_wakeup,
- .set_selfpowered = nbu2ss_gad_set_selfpowered,
- .vbus_session = nbu2ss_gad_vbus_session,
- .vbus_draw = nbu2ss_gad_vbus_draw,
- .pullup = nbu2ss_gad_pullup,
- .ioctl = nbu2ss_gad_ioctl,
-};
-
-static const struct {
- const char *name;
- const struct usb_ep_caps caps;
-} ep_info[NUM_ENDPOINTS] = {
-#define EP_INFO(_name, _caps) \
- { \
- .name = _name, \
- .caps = _caps, \
- }
-
- EP_INFO("ep0",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep1-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep2-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep3in-int",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep4-iso",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep5-iso",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep6-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep7-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("ep8in-int",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
- EP_INFO("ep9-iso",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("epa-iso",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("epb-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("epc-bulk",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_ALL)),
- EP_INFO("epdin-int",
- USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)),
-
-#undef EP_INFO
-};
-
-/*-------------------------------------------------------------------------*/
-static void nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
-{
- int i;
-
- INIT_LIST_HEAD(&udc->gadget.ep_list);
- udc->gadget.ep0 = &udc->ep[0].ep;
-
- for (i = 0; i < NUM_ENDPOINTS; i++) {
- struct nbu2ss_ep *ep = &udc->ep[i];
-
- ep->udc = udc;
- ep->desc = NULL;
-
- ep->ep.driver_data = NULL;
- ep->ep.name = ep_info[i].name;
- ep->ep.caps = ep_info[i].caps;
- ep->ep.ops = &nbu2ss_ep_ops;
-
- usb_ep_set_maxpacket_limit(&ep->ep,
- i == 0 ? EP0_PACKETSIZE
- : EP_PACKETSIZE);
-
- list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
- INIT_LIST_HEAD(&ep->queue);
- }
-
- list_del_init(&udc->ep[0].ep.ep_list);
-}
-
-/*-------------------------------------------------------------------------*/
-/* platform_driver */
-static int nbu2ss_drv_contest_init(struct platform_device *pdev,
- struct nbu2ss_udc *udc)
-{
- spin_lock_init(&udc->lock);
- udc->dev = &pdev->dev;
-
- udc->gadget.is_selfpowered = 1;
- udc->devstate = USB_STATE_NOTATTACHED;
- udc->pdev = pdev;
- udc->mA = 0;
-
- udc->pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-
- /* init Endpoint */
- nbu2ss_drv_ep_init(udc);
-
- /* init Gadget */
- udc->gadget.ops = &nbu2ss_gadget_ops;
- udc->gadget.ep0 = &udc->ep[0].ep;
- udc->gadget.speed = USB_SPEED_UNKNOWN;
- udc->gadget.name = driver_name;
- /* udc->gadget.is_dualspeed = 1; */
-
- device_initialize(&udc->gadget.dev);
-
- dev_set_name(&udc->gadget.dev, "gadget");
- udc->gadget.dev.parent = &pdev->dev;
- udc->gadget.dev.dma_mask = pdev->dev.dma_mask;
-
- return 0;
-}
-
-/*
- * probe - binds to the platform device
- */
-static int nbu2ss_drv_probe(struct platform_device *pdev)
-{
- int status;
- struct nbu2ss_udc *udc;
- int irq;
- void __iomem *mmio_base;
-
- udc = &udc_controller;
- memset(udc, 0, sizeof(struct nbu2ss_udc));
-
- platform_set_drvdata(pdev, udc);
-
- /* require I/O memory and IRQ to be provided as resources */
- mmio_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mmio_base))
- return PTR_ERR(mmio_base);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
- status = devm_request_irq(&pdev->dev, irq, _nbu2ss_udc_irq,
- 0, driver_name, udc);
-
- /* IO Memory */
- udc->p_regs = (struct fc_regs __iomem *)mmio_base;
-
- /* USB Function Controller Interrupt */
- if (status != 0) {
- dev_err(udc->dev, "request_irq(USB_UDC_IRQ_1) failed\n");
- return status;
- }
-
- /* Driver Initialization */
- status = nbu2ss_drv_contest_init(pdev, udc);
- if (status < 0) {
- /* Error */
- return status;
- }
-
- /* VBUS Interrupt */
- vbus_irq = gpiod_to_irq(vbus_gpio);
- irq_set_irq_type(vbus_irq, IRQ_TYPE_EDGE_BOTH);
- status = request_irq(vbus_irq,
- _nbu2ss_vbus_irq, IRQF_SHARED, driver_name, udc);
-
- if (status != 0) {
- dev_err(udc->dev, "request_irq(vbus_irq) failed\n");
- return status;
- }
-
- return status;
-}
-
-/*-------------------------------------------------------------------------*/
-static void nbu2ss_drv_shutdown(struct platform_device *pdev)
-{
- struct nbu2ss_udc *udc;
-
- udc = platform_get_drvdata(pdev);
- if (!udc)
- return;
-
- _nbu2ss_disable_controller(udc);
-}
-
-/*-------------------------------------------------------------------------*/
-static void nbu2ss_drv_remove(struct platform_device *pdev)
-{
- struct nbu2ss_udc *udc;
- struct nbu2ss_ep *ep;
- int i;
-
- udc = &udc_controller;
-
- for (i = 0; i < NUM_ENDPOINTS; i++) {
- ep = &udc->ep[i];
- if (ep->virt_buf)
- dma_free_coherent(udc->dev, PAGE_SIZE, (void *)ep->virt_buf,
- ep->phys_buf);
- }
-
- /* Interrupt Handler - Release */
- free_irq(vbus_irq, udc);
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_drv_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct nbu2ss_udc *udc;
-
- udc = platform_get_drvdata(pdev);
- if (!udc)
- return 0;
-
- if (udc->vbus_active) {
- udc->vbus_active = 0;
- udc->devstate = USB_STATE_NOTATTACHED;
- udc->linux_suspended = 1;
-
- if (udc->usb_suspended) {
- udc->usb_suspended = 0;
- _nbu2ss_reset_controller(udc);
- }
-
- _nbu2ss_quiesce(udc);
- }
- _nbu2ss_disable_controller(udc);
-
- return 0;
-}
-
-/*-------------------------------------------------------------------------*/
-static int nbu2ss_drv_resume(struct platform_device *pdev)
-{
- u32 data;
- struct nbu2ss_udc *udc;
-
- udc = platform_get_drvdata(pdev);
- if (!udc)
- return 0;
-
- data = gpiod_get_value(vbus_gpio);
- if (data) {
- udc->vbus_active = 1;
- udc->devstate = USB_STATE_POWERED;
- _nbu2ss_enable_controller(udc);
- _nbu2ss_pullup(udc, 1);
- }
-
- udc->linux_suspended = 0;
-
- return 0;
-}
-
-static struct platform_driver udc_driver = {
- .probe = nbu2ss_drv_probe,
- .shutdown = nbu2ss_drv_shutdown,
- .remove_new = nbu2ss_drv_remove,
- .suspend = nbu2ss_drv_suspend,
- .resume = nbu2ss_drv_resume,
- .driver = {
- .name = driver_name,
- },
-};
-
-module_platform_driver(udc_driver);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR("Renesas Electronics Corporation");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
deleted file mode 100644
index c9e37a1b8139..000000000000
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ /dev/null
@@ -1,554 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * EMXX FCD (Function Controller Driver) for USB.
- *
- * Copyright (C) 2010 Renesas Electronics Corporation
- */
-
-#ifndef _LINUX_EMXX_H
-#define _LINUX_EMXX_H
-
-/*---------------------------------------------------------------------------*/
-
-/*----------------- Default define */
-#define USE_DMA 1
-#define USE_SUSPEND_WAIT 1
-
-/*------------ Board dependence(Resource) */
-#define VBUS_VALUE GPIO_VBUS
-
-/* below hacked up for staging integration */
-#define GPIO_VBUS 0 /* GPIO_P153 on KZM9D */
-#define INT_VBUS 0 /* IRQ for GPIO_P153 */
-
-/*------------ Board dependence(Wait) */
-
-/* CHATTERING wait time ms */
-#define VBUS_CHATTERING_MDELAY 1
-/* DMA Abort wait time ms */
-#define DMA_DISABLE_TIME 10
-
-/*------------ Controller dependence */
-#define NUM_ENDPOINTS 14 /* Endpoint */
-#define REG_EP_NUM 15 /* Endpoint Register */
-#define DMA_MAX_COUNT 256 /* DMA Block */
-
-#define EPC_RST_DISABLE_TIME 1 /* 1 usec */
-#define EPC_DIRPD_DISABLE_TIME 1 /* 1 msec */
-#define EPC_PLL_LOCK_COUNT 1000 /* 1000 */
-#define IN_DATA_EMPTY_COUNT 1000 /* 1000 */
-
-#define CHATGER_TIME 700 /* 700msec */
-#define USB_SUSPEND_TIME 2000 /* 2 sec */
-
-/* U2F FLAG */
-#define U2F_ENABLE 1
-#define U2F_DISABLE 0
-
-#define TEST_FORCE_ENABLE (BIT(18) | BIT(16))
-
-#define INT_SEL BIT(10)
-#define CONSTFS BIT(9)
-#define SOF_RCV BIT(8)
-#define RSUM_IN BIT(7)
-#define SUSPEND BIT(6)
-#define CONF BIT(5)
-#define DEFAULT BIT(4)
-#define CONNECTB BIT(3)
-#define PUE2 BIT(2)
-
-#define MAX_TEST_MODE_NUM 0x05
-#define TEST_MODE_SHIFT 16
-
-/*------- (0x0004) USB Status Register */
-#define SPEED_MODE BIT(6)
-#define HIGH_SPEED BIT(6)
-
-#define CONF BIT(5)
-#define DEFAULT BIT(4)
-#define USB_RST BIT(3)
-#define SPND_OUT BIT(2)
-#define RSUM_OUT BIT(1)
-
-/*------- (0x0008) USB Address Register */
-#define USB_ADDR 0x007F0000
-#define SOF_STATUS BIT(15)
-#define UFRAME (BIT(14) | BIT(13) | BIT(12))
-#define FRAME 0x000007FF
-
-#define USB_ADRS_SHIFT 16
-
-/*------- (0x000C) UTMI Characteristic 1 Register */
-#define SQUSET (BIT(7) | BIT(6) | BIT(5) | BIT(4))
-
-#define USB_SQUSET (BIT(6) | BIT(5) | BIT(4))
-
-/*------- (0x0010) TEST Control Register */
-#define FORCEHS BIT(2)
-#define CS_TESTMODEEN BIT(1)
-#define LOOPBACK BIT(0)
-
-/*------- (0x0018) Setup Data 0 Register */
-/*------- (0x001C) Setup Data 1 Register */
-
-/*------- (0x0020) USB Interrupt Status Register */
-#define EPN_INT 0x00FFFF00
-#define EP15_INT BIT(23)
-#define EP14_INT BIT(22)
-#define EP13_INT BIT(21)
-#define EP12_INT BIT(20)
-#define EP11_INT BIT(19)
-#define EP10_INT BIT(18)
-#define EP9_INT BIT(17)
-#define EP8_INT BIT(16)
-#define EP7_INT BIT(15)
-#define EP6_INT BIT(14)
-#define EP5_INT BIT(13)
-#define EP4_INT BIT(12)
-#define EP3_INT BIT(11)
-#define EP2_INT BIT(10)
-#define EP1_INT BIT(9)
-#define EP0_INT BIT(8)
-#define SPEED_MODE_INT BIT(6)
-#define SOF_ERROR_INT BIT(5)
-#define SOF_INT BIT(4)
-#define USB_RST_INT BIT(3)
-#define SPND_INT BIT(2)
-#define RSUM_INT BIT(1)
-
-#define USB_INT_STA_RW 0x7E
-
-/*------- (0x0024) USB Interrupt Enable Register */
-#define EP15_0_EN 0x00FFFF00
-#define EP15_EN BIT(23)
-#define EP14_EN BIT(22)
-#define EP13_EN BIT(21)
-#define EP12_EN BIT(20)
-#define EP11_EN BIT(19)
-#define EP10_EN BIT(18)
-#define EP9_EN BIT(17)
-#define EP8_EN BIT(16)
-#define EP7_EN BIT(15)
-#define EP6_EN BIT(14)
-#define EP5_EN BIT(13)
-#define EP4_EN BIT(12)
-#define EP3_EN BIT(11)
-#define EP2_EN BIT(10)
-#define EP1_EN BIT(9)
-#define EP0_EN BIT(8)
-#define SPEED_MODE_EN BIT(6)
-#define SOF_ERROR_EN BIT(5)
-#define SOF_EN BIT(4)
-#define USB_RST_EN BIT(3)
-#define SPND_EN BIT(2)
-#define RSUM_EN BIT(1)
-
-#define USB_INT_EN_BIT \
- (EP0_EN | SPEED_MODE_EN | USB_RST_EN | SPND_EN | RSUM_EN)
-
-/*------- (0x0028) EP0 Control Register */
-#define EP0_STGSEL BIT(18)
-#define EP0_OVERSEL BIT(17)
-#define EP0_AUTO BIT(16)
-#define EP0_PIDCLR BIT(9)
-#define EP0_BCLR BIT(8)
-#define EP0_DEND BIT(7)
-#define EP0_DW (BIT(6) | BIT(5))
-#define EP0_DW4 0
-#define EP0_DW3 (BIT(6) | BIT(5))
-#define EP0_DW2 BIT(6)
-#define EP0_DW1 BIT(5)
-
-#define EP0_INAK_EN BIT(4)
-#define EP0_PERR_NAK_CLR BIT(3)
-#define EP0_STL BIT(2)
-#define EP0_INAK BIT(1)
-#define EP0_ONAK BIT(0)
-
-/*------- (0x002C) EP0 Status Register */
-#define EP0_PID BIT(18)
-#define EP0_PERR_NAK BIT(17)
-#define EP0_PERR_NAK_INT BIT(16)
-#define EP0_OUT_NAK_INT BIT(15)
-#define EP0_OUT_NULL BIT(14)
-#define EP0_OUT_FULL BIT(13)
-#define EP0_OUT_EMPTY BIT(12)
-#define EP0_IN_NAK_INT BIT(11)
-#define EP0_IN_DATA BIT(10)
-#define EP0_IN_FULL BIT(9)
-#define EP0_IN_EMPTY BIT(8)
-#define EP0_OUT_NULL_INT BIT(7)
-#define EP0_OUT_OR_INT BIT(6)
-#define EP0_OUT_INT BIT(5)
-#define EP0_IN_INT BIT(4)
-#define EP0_STALL_INT BIT(3)
-#define STG_END_INT BIT(2)
-#define STG_START_INT BIT(1)
-#define SETUP_INT BIT(0)
-
-#define EP0_STATUS_RW_BIT (BIT(16) | BIT(15) | BIT(11) | 0xFF)
-
-/*------- (0x0030) EP0 Interrupt Enable Register */
-#define EP0_PERR_NAK_EN BIT(16)
-#define EP0_OUT_NAK_EN BIT(15)
-
-#define EP0_IN_NAK_EN BIT(11)
-
-#define EP0_OUT_NULL_EN BIT(7)
-#define EP0_OUT_OR_EN BIT(6)
-#define EP0_OUT_EN BIT(5)
-#define EP0_IN_EN BIT(4)
-#define EP0_STALL_EN BIT(3)
-#define STG_END_EN BIT(2)
-#define STG_START_EN BIT(1)
-#define SETUP_EN BIT(0)
-
-#define EP0_INT_EN_BIT \
- (EP0_OUT_OR_EN | EP0_OUT_EN | EP0_IN_EN | STG_END_EN | SETUP_EN)
-
-/*------- (0x0034) EP0 Length Register */
-#define EP0_LDATA 0x0000007F
-
-/*------- (0x0038) EP0 Read Register */
-/*------- (0x003C) EP0 Write Register */
-
-/*------- (0x0040:) EPN Control Register */
-#define EPN_EN BIT(31)
-#define EPN_BUF_TYPE BIT(30)
-#define EPN_BUF_SINGLE BIT(30)
-
-#define EPN_DIR0 BIT(26)
-#define EPN_MODE (BIT(25) | BIT(24))
-#define EPN_BULK 0
-#define EPN_INTERRUPT BIT(24)
-#define EPN_ISO BIT(25)
-
-#define EPN_OVERSEL BIT(17)
-#define EPN_AUTO BIT(16)
-
-#define EPN_IPIDCLR BIT(11)
-#define EPN_OPIDCLR BIT(10)
-#define EPN_BCLR BIT(9)
-#define EPN_CBCLR BIT(8)
-#define EPN_DEND BIT(7)
-#define EPN_DW (BIT(6) | BIT(5))
-#define EPN_DW4 0
-#define EPN_DW3 (BIT(6) | BIT(5))
-#define EPN_DW2 BIT(6)
-#define EPN_DW1 BIT(5)
-
-#define EPN_OSTL_EN BIT(4)
-#define EPN_ISTL BIT(3)
-#define EPN_OSTL BIT(2)
-
-#define EPN_ONAK BIT(0)
-
-/*------- (0x0044:) EPN Status Register */
-#define EPN_ISO_PIDERR BIT(29) /* R */
-#define EPN_OPID BIT(28) /* R */
-#define EPN_OUT_NOTKN BIT(27) /* R */
-#define EPN_ISO_OR BIT(26) /* R */
-
-#define EPN_ISO_CRC BIT(24) /* R */
-#define EPN_OUT_END_INT BIT(23) /* RW */
-#define EPN_OUT_OR_INT BIT(22) /* RW */
-#define EPN_OUT_NAK_ERR_INT BIT(21) /* RW */
-#define EPN_OUT_STALL_INT BIT(20) /* RW */
-#define EPN_OUT_INT BIT(19) /* RW */
-#define EPN_OUT_NULL_INT BIT(18) /* RW */
-#define EPN_OUT_FULL BIT(17) /* R */
-#define EPN_OUT_EMPTY BIT(16) /* R */
-
-#define EPN_IPID BIT(10) /* R */
-#define EPN_IN_NOTKN BIT(9) /* R */
-#define EPN_ISO_UR BIT(8) /* R */
-#define EPN_IN_END_INT BIT(7) /* RW */
-
-#define EPN_IN_NAK_ERR_INT BIT(5) /* RW */
-#define EPN_IN_STALL_INT BIT(4) /* RW */
-#define EPN_IN_INT BIT(3) /* RW */
-#define EPN_IN_DATA BIT(2) /* R */
-#define EPN_IN_FULL BIT(1) /* R */
-#define EPN_IN_EMPTY BIT(0) /* R */
-
-#define EPN_INT_EN \
- (EPN_OUT_END_INT | EPN_OUT_INT | EPN_IN_END_INT | EPN_IN_INT)
-
-/*------- (0x0048:) EPN Interrupt Enable Register */
-#define EPN_OUT_END_EN BIT(23) /* RW */
-#define EPN_OUT_OR_EN BIT(22) /* RW */
-#define EPN_OUT_NAK_ERR_EN BIT(21) /* RW */
-#define EPN_OUT_STALL_EN BIT(20) /* RW */
-#define EPN_OUT_EN BIT(19) /* RW */
-#define EPN_OUT_NULL_EN BIT(18) /* RW */
-
-#define EPN_IN_END_EN BIT(7) /* RW */
-
-#define EPN_IN_NAK_ERR_EN BIT(5) /* RW */
-#define EPN_IN_STALL_EN BIT(4) /* RW */
-#define EPN_IN_EN BIT(3) /* RW */
-
-/*------- (0x004C:) EPN Interrupt Enable Register */
-#define EPN_STOP_MODE BIT(11)
-#define EPN_DEND_SET BIT(10)
-#define EPN_BURST_SET BIT(9)
-#define EPN_STOP_SET BIT(8)
-
-#define EPN_DMA_EN BIT(4)
-
-#define EPN_DMAMODE0 BIT(0)
-
-/*------- (0x0050:) EPN MaxPacket & BaseAddress Register */
-#define EPN_BASEAD 0x1FFF0000
-#define EPN_MPKT 0x000007FF
-
-/*------- (0x0054:) EPN Length & DMA Count Register */
-#define EPN_DMACNT 0x01FF0000
-#define EPN_LDATA 0x000007FF
-
-/*------- (0x0058:) EPN Read Register */
-/*------- (0x005C:) EPN Write Register */
-
-/*------- (0x1000) AHBSCTR Register */
-#define WAIT_MODE BIT(0)
-
-/*------- (0x1004) AHBMCTR Register */
-#define ARBITER_CTR BIT(31) /* RW */
-#define MCYCLE_RST BIT(12) /* RW */
-
-#define ENDIAN_CTR (BIT(9) | BIT(8)) /* RW */
-#define ENDIAN_BYTE_SWAP BIT(9)
-#define ENDIAN_HALF_WORD_SWAP ENDIAN_CTR
-
-#define HBUSREQ_MODE BIT(5) /* RW */
-#define HTRANS_MODE BIT(4) /* RW */
-
-#define WBURST_TYPE BIT(2) /* RW */
-#define BURST_TYPE (BIT(1) | BIT(0)) /* RW */
-#define BURST_MAX_16 0
-#define BURST_MAX_8 BIT(0)
-#define BURST_MAX_4 BIT(1)
-#define BURST_SINGLE BURST_TYPE
-
-/*------- (0x1008) AHBBINT Register */
-#define DMA_ENDINT 0xFFFE0000 /* RW */
-
-#define AHB_VBUS_INT BIT(13) /* RW */
-
-#define MBUS_ERRINT BIT(6) /* RW */
-
-#define SBUS_ERRINT0 BIT(4) /* RW */
-#define ERR_MASTER 0x0000000F /* R */
-
-/*------- (0x100C) AHBBINTEN Register */
-#define DMA_ENDINTEN 0xFFFE0000 /* RW */
-
-#define VBUS_INTEN BIT(13) /* RW */
-
-#define MBUS_ERRINTEN BIT(6) /* RW */
-
-#define SBUS_ERRINT0EN BIT(4) /* RW */
-
-/*------- (0x1010) EPCTR Register */
-#define DIRPD BIT(12) /* RW */
-
-#define VBUS_LEVEL BIT(8) /* R */
-
-#define PLL_RESUME BIT(5) /* RW */
-#define PLL_LOCK BIT(4) /* R */
-
-#define EPC_RST BIT(0) /* RW */
-
-/*------- (0x1014) USBF_EPTEST Register */
-#define LINESTATE (BIT(9) | BIT(8)) /* R */
-#define DM_LEVEL BIT(9) /* R */
-#define DP_LEVEL BIT(8) /* R */
-
-#define PHY_TST BIT(1) /* RW */
-#define PHY_TSTCLK BIT(0) /* RW */
-
-/*------- (0x1020) USBSSVER Register */
-#define AHBB_VER 0x00FF0000 /* R */
-#define EPC_VER 0x0000FF00 /* R */
-#define SS_VER 0x000000FF /* R */
-
-/*------- (0x1024) USBSSCONF Register */
-#define EP_AVAILABLE 0xFFFF0000 /* R */
-#define DMA_AVAILABLE 0x0000FFFF /* R */
-
-/*------- (0x1110:) EPNDCR1 Register */
-#define DCR1_EPN_DMACNT 0x00FF0000 /* RW */
-
-#define DCR1_EPN_DIR0 BIT(1) /* RW */
-#define DCR1_EPN_REQEN BIT(0) /* RW */
-
-/*------- (0x1114:) EPNDCR2 Register */
-#define DCR2_EPN_LMPKT 0x07FF0000 /* RW */
-
-#define DCR2_EPN_MPKT 0x000007FF /* RW */
-
-/*------- (0x1118:) EPNTADR Register */
-#define EPN_TADR 0xFFFFFFFF /* RW */
-
-/*===========================================================================*/
-/* Struct */
-/*------- ep_regs */
-struct ep_regs {
- u32 EP_CONTROL; /* EP Control */
- u32 EP_STATUS; /* EP Status */
- u32 EP_INT_ENA; /* EP Interrupt Enable */
- u32 EP_DMA_CTRL; /* EP DMA Control */
- u32 EP_PCKT_ADRS; /* EP Maxpacket & BaseAddress */
- u32 EP_LEN_DCNT; /* EP Length & DMA count */
- u32 EP_READ; /* EP Read */
- u32 EP_WRITE; /* EP Write */
-};
-
-/*------- ep_dcr */
-struct ep_dcr {
- u32 EP_DCR1; /* EP_DCR1 */
- u32 EP_DCR2; /* EP_DCR2 */
- u32 EP_TADR; /* EP_TADR */
- u32 Reserved; /* Reserved */
-};
-
-/*------- Function Registers */
-struct fc_regs {
- u32 USB_CONTROL; /* (0x0000) USB Control */
- u32 USB_STATUS; /* (0x0004) USB Status */
- u32 USB_ADDRESS; /* (0x0008) USB Address */
- u32 UTMI_CHARACTER_1; /* (0x000C) UTMI Setting */
- u32 TEST_CONTROL; /* (0x0010) TEST Control */
- u32 reserved_14; /* (0x0014) Reserved */
- u32 SETUP_DATA0; /* (0x0018) Setup Data0 */
- u32 SETUP_DATA1; /* (0x001C) Setup Data1 */
- u32 USB_INT_STA; /* (0x0020) USB Interrupt Status */
- u32 USB_INT_ENA; /* (0x0024) USB Interrupt Enable */
- u32 EP0_CONTROL; /* (0x0028) EP0 Control */
- u32 EP0_STATUS; /* (0x002C) EP0 Status */
- u32 EP0_INT_ENA; /* (0x0030) EP0 Interrupt Enable */
- u32 EP0_LENGTH; /* (0x0034) EP0 Length */
- u32 EP0_READ; /* (0x0038) EP0 Read */
- u32 EP0_WRITE; /* (0x003C) EP0 Write */
-
- struct ep_regs EP_REGS[REG_EP_NUM]; /* Endpoint Register */
-
- u8 reserved_220[0x1000 - 0x220]; /* (0x0220:0x0FFF) Reserved */
-
- u32 AHBSCTR; /* (0x1000) AHBSCTR */
- u32 AHBMCTR; /* (0x1004) AHBMCTR */
- u32 AHBBINT; /* (0x1008) AHBBINT */
- u32 AHBBINTEN; /* (0x100C) AHBBINTEN */
- u32 EPCTR; /* (0x1010) EPCTR */
- u32 USBF_EPTEST; /* (0x1014) USBF_EPTEST */
-
- u8 reserved_1018[0x20 - 0x18]; /* (0x1018:0x101F) Reserved */
-
- u32 USBSSVER; /* (0x1020) USBSSVER */
- u32 USBSSCONF; /* (0x1024) USBSSCONF */
-
- u8 reserved_1028[0x110 - 0x28]; /* (0x1028:0x110F) Reserved */
-
- struct ep_dcr EP_DCR[REG_EP_NUM]; /* */
-
- u8 reserved_1200[0x1000 - 0x200]; /* Reserved */
-} __aligned(32);
-
-#define EP0_PACKETSIZE 64
-#define EP_PACKETSIZE 1024
-
-/* EPN RAM SIZE */
-#define D_RAM_SIZE_CTRL 64
-
-/* EPN Bulk Endpoint Max Packet Size */
-#define D_FS_RAM_SIZE_BULK 64
-#define D_HS_RAM_SIZE_BULK 512
-
-struct nbu2ss_udc;
-
-enum ep0_state {
- EP0_IDLE,
- EP0_IN_DATA_PHASE,
- EP0_OUT_DATA_PHASE,
- EP0_IN_STATUS_PHASE,
- EP0_OUT_STATUS_PAHSE,
- EP0_END_XFER,
- EP0_SUSPEND,
- EP0_STALL,
-};
-
-struct nbu2ss_req {
- struct usb_request req;
- struct list_head queue;
-
- u32 div_len;
- bool dma_flag;
- bool zero;
-
- bool unaligned;
-
- unsigned mapped:1;
-};
-
-struct nbu2ss_ep {
- struct usb_ep ep;
- struct list_head queue;
-
- struct nbu2ss_udc *udc;
-
- const struct usb_endpoint_descriptor *desc;
-
- u8 epnum;
- u8 direct;
- u8 ep_type;
-
- unsigned wedged:1;
- unsigned halted:1;
- unsigned stalled:1;
-
- u8 *virt_buf;
- dma_addr_t phys_buf;
-};
-
-struct nbu2ss_udc {
- struct usb_gadget gadget;
- struct usb_gadget_driver *driver;
- struct platform_device *pdev;
- struct device *dev;
- spinlock_t lock; /* Protects nbu2ss_udc structure fields */
- struct completion *pdone;
-
- enum ep0_state ep0state;
- enum usb_device_state devstate;
- struct usb_ctrlrequest ctrl;
- struct nbu2ss_req ep0_req;
- u8 ep0_buf[EP0_PACKETSIZE];
-
- struct nbu2ss_ep ep[NUM_ENDPOINTS];
-
- unsigned softconnect:1;
- unsigned vbus_active:1;
- unsigned linux_suspended:1;
- unsigned linux_resume:1;
- unsigned usb_suspended:1;
- unsigned remote_wakeup:1;
- unsigned udc_enabled:1;
-
- unsigned int mA;
-
- u32 curr_config; /* Current Configuration Number */
-
- struct fc_regs __iomem *p_regs;
-};
-
-/* USB register access structure */
-union usb_reg_access {
- struct {
- unsigned char DATA[4];
- } byte;
- unsigned int dw;
-};
-
-/*-------------------------------------------------------------------------*/
-
-#endif /* _LINUX_EMXX_H */
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 68add4d598ae..38845f23023f 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -327,7 +327,6 @@ static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagereflis
unsigned int dirty_lines_start, dirty_lines_end;
struct fb_deferred_io_pageref *pageref;
unsigned int y_low = 0, y_high = 0;
- int count = 0;
spin_lock(&par->dirty_lock);
dirty_lines_start = par->dirty_lines_start;
@@ -339,7 +338,6 @@ static void fbtft_deferred_io(struct fb_info *info, struct list_head *pagereflis
/* Mark display lines as dirty */
list_for_each_entry(pageref, pagereflist, list) {
- count++;
y_low = pageref->offset / info->fix.line_length;
y_high = (pageref->offset + PAGE_SIZE - 1) / info->fix.line_length;
dev_dbg(info->device,
diff --git a/drivers/staging/fieldbus/anybuss/arcx-anybus.c b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
index 34d18b09bedd..fcd3e3722ae0 100644
--- a/drivers/staging/fieldbus/anybuss/arcx-anybus.c
+++ b/drivers/staging/fieldbus/anybuss/arcx-anybus.c
@@ -285,7 +285,7 @@ static int controller_probe(struct platform_device *pdev)
}
}
- id = ida_simple_get(&controller_index_ida, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&controller_index_ida, GFP_KERNEL);
if (id < 0) {
err = id;
goto out_reset;
@@ -318,7 +318,7 @@ static int controller_probe(struct platform_device *pdev)
out_dev:
put_device(cd->class_dev);
out_ida:
- ida_simple_remove(&controller_index_ida, id);
+ ida_free(&controller_index_ida, id);
out_reset:
gpiod_set_value_cansleep(cd->reset_gpiod, 1);
return err;
@@ -330,7 +330,7 @@ static void controller_remove(struct platform_device *pdev)
int id = cd->class_dev->id;
device_unregister(cd->class_dev);
- ida_simple_remove(&controller_index_ida, id);
+ ida_free(&controller_index_ida, id);
gpiod_set_value_cansleep(cd->reset_gpiod, 1);
}
diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
index cd86b9c9e345..410e6f8073c0 100644
--- a/drivers/staging/fieldbus/anybuss/host.c
+++ b/drivers/staging/fieldbus/anybuss/host.c
@@ -1195,7 +1195,7 @@ static void anybus_bus_remove(struct device *dev)
adrv->remove(to_anybuss_client(dev));
}
-static struct bus_type anybus_bus = {
+static const struct bus_type anybus_bus = {
.name = "anybuss",
.match = anybus_bus_match,
.probe = anybus_bus_probe,
diff --git a/drivers/staging/fieldbus/dev_core.c b/drivers/staging/fieldbus/dev_core.c
index bf1812d8924f..0053ebd91442 100644
--- a/drivers/staging/fieldbus/dev_core.c
+++ b/drivers/staging/fieldbus/dev_core.c
@@ -152,7 +152,7 @@ static const struct attribute_group fieldbus_group = {
};
__ATTRIBUTE_GROUPS(fieldbus);
-static struct class fieldbus_class = {
+static const struct class fieldbus_class = {
.name = "fieldbus_dev",
.dev_groups = fieldbus_groups,
};
@@ -247,7 +247,7 @@ static void __fieldbus_dev_unregister(struct fieldbus_dev *fb)
return;
device_destroy(&fieldbus_class, fb->cdev.dev);
cdev_del(&fb->cdev);
- ida_simple_remove(&fieldbus_ida, fb->id);
+ ida_free(&fieldbus_ida, fb->id);
}
void fieldbus_dev_unregister(struct fieldbus_dev *fb)
@@ -267,7 +267,7 @@ static int __fieldbus_dev_register(struct fieldbus_dev *fb)
return -EINVAL;
if (!fb->read_area || !fb->write_area || !fb->fieldbus_id_get)
return -EINVAL;
- fb->id = ida_simple_get(&fieldbus_ida, 0, MAX_FIELDBUSES, GFP_KERNEL);
+ fb->id = ida_alloc_max(&fieldbus_ida, MAX_FIELDBUSES - 1, GFP_KERNEL);
if (fb->id < 0)
return fb->id;
devno = MKDEV(MAJOR(fieldbus_devt), fb->id);
@@ -290,7 +290,7 @@ static int __fieldbus_dev_register(struct fieldbus_dev *fb)
err_dev_create:
cdev_del(&fb->cdev);
err_cdev:
- ida_simple_remove(&fieldbus_ida, fb->id);
+ ida_free(&fieldbus_ida, fb->id);
return err;
}
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index 5703a9ddb6d0..eb754b231429 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -43,7 +43,7 @@ static struct {
struct sock *sock;
} lte_event;
-static struct device_type wwan_type = {
+static const struct device_type wwan_type = {
.name = "wwan",
};
diff --git a/drivers/staging/greybus/audio_apbridgea.h b/drivers/staging/greybus/audio_apbridgea.h
index efec0f815efd..ab707d310129 100644
--- a/drivers/staging/greybus/audio_apbridgea.h
+++ b/drivers/staging/greybus/audio_apbridgea.h
@@ -65,7 +65,6 @@
struct audio_apbridgea_hdr {
__u8 type;
__le16 i2s_port;
- __u8 data[];
} __packed;
struct audio_apbridgea_set_config_request {
diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
index 9a3f7c034ab4..fa43d35bbcec 100644
--- a/drivers/staging/greybus/audio_manager.c
+++ b/drivers/staging/greybus/audio_manager.c
@@ -44,14 +44,14 @@ int gb_audio_manager_add(struct gb_audio_manager_module_descriptor *desc)
int id;
int err;
- id = ida_simple_get(&module_id, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&module_id, GFP_KERNEL);
if (id < 0)
return id;
err = gb_audio_manager_module_create(&module, manager_kset,
id, desc);
if (err) {
- ida_simple_remove(&module_id, id);
+ ida_free(&module_id, id);
return err;
}
@@ -78,7 +78,7 @@ int gb_audio_manager_remove(int id)
list_del(&module->list);
kobject_put(&module->kobj);
up_write(&modules_rwsem);
- ida_simple_remove(&module_id, id);
+ ida_free(&module_id, id);
return 0;
}
EXPORT_SYMBOL_GPL(gb_audio_manager_remove);
@@ -92,7 +92,7 @@ void gb_audio_manager_remove_all(void)
list_for_each_entry_safe(module, next, &modules_list, list) {
list_del(&module->list);
- ida_simple_remove(&module_id, module->id);
+ ida_free(&module_id, module->id);
kobject_put(&module->kobj);
}
diff --git a/drivers/staging/greybus/audio_topology.c b/drivers/staging/greybus/audio_topology.c
index 08e6a807c132..5dc4721105d4 100644
--- a/drivers/staging/greybus/audio_topology.c
+++ b/drivers/staging/greybus/audio_topology.c
@@ -761,7 +761,6 @@ static int gbcodec_enum_dapm_ctl_put(struct snd_kcontrol *kcontrol,
{
int ret, wi, ctl_id;
unsigned int val, mux, change;
- unsigned int mask;
struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
struct snd_soc_dapm_widget *widget = wlist->widgets[0];
struct gb_audio_ctl_elem_value gbvalue;
@@ -802,7 +801,6 @@ static int gbcodec_enum_dapm_ctl_put(struct snd_kcontrol *kcontrol,
mux = ucontrol->value.enumerated.item[0];
val = mux << e->shift_l;
- mask = e->mask << e->shift_l;
if (le32_to_cpu(gbvalue.value.enumerated_item[0]) !=
ucontrol->value.enumerated.item[0]) {
@@ -815,7 +813,6 @@ static int gbcodec_enum_dapm_ctl_put(struct snd_kcontrol *kcontrol,
if (ucontrol->value.enumerated.item[1] > e->items - 1)
return -EINVAL;
val |= ucontrol->value.enumerated.item[1] << e->shift_r;
- mask |= e->mask << e->shift_r;
if (le32_to_cpu(gbvalue.value.enumerated_item[1]) !=
ucontrol->value.enumerated.item[1]) {
change = 1;
diff --git a/drivers/staging/greybus/authentication.c b/drivers/staging/greybus/authentication.c
index b67315641d18..d53e58f92e81 100644
--- a/drivers/staging/greybus/authentication.c
+++ b/drivers/staging/greybus/authentication.c
@@ -324,7 +324,7 @@ int gb_cap_connection_init(struct gb_connection *connection)
if (ret)
goto err_list_del;
- minor = ida_simple_get(&cap_minors_map, 0, NUM_MINORS, GFP_KERNEL);
+ minor = ida_alloc_max(&cap_minors_map, NUM_MINORS - 1, GFP_KERNEL);
if (minor < 0) {
ret = minor;
goto err_connection_disable;
@@ -351,7 +351,7 @@ int gb_cap_connection_init(struct gb_connection *connection)
err_del_cdev:
cdev_del(&cap->cdev);
err_remove_ida:
- ida_simple_remove(&cap_minors_map, minor);
+ ida_free(&cap_minors_map, minor);
err_connection_disable:
gb_connection_disable(connection);
err_list_del:
@@ -375,7 +375,7 @@ void gb_cap_connection_exit(struct gb_connection *connection)
device_destroy(&cap_class, cap->dev_num);
cdev_del(&cap->cdev);
- ida_simple_remove(&cap_minors_map, MINOR(cap->dev_num));
+ ida_free(&cap_minors_map, MINOR(cap->dev_num));
/*
* Disallow any new ioctl operations on the char device and wait for
diff --git a/drivers/staging/greybus/bootrom.c b/drivers/staging/greybus/bootrom.c
index 79581457c4af..c0d338db6b52 100644
--- a/drivers/staging/greybus/bootrom.c
+++ b/drivers/staging/greybus/bootrom.c
@@ -243,10 +243,10 @@ static int gb_bootrom_get_firmware(struct gb_operation *op)
struct gb_bootrom *bootrom = gb_connection_get_data(op->connection);
const struct firmware *fw;
struct gb_bootrom_get_firmware_request *firmware_request;
- struct gb_bootrom_get_firmware_response *firmware_response;
struct device *dev = &op->connection->bundle->dev;
unsigned int offset, size;
enum next_request_type next_request;
+ u8 *firmware_response;
int ret = 0;
/* Disable timeouts */
@@ -280,15 +280,15 @@ static int gb_bootrom_get_firmware(struct gb_operation *op)
goto unlock;
}
- if (!gb_operation_response_alloc(op, sizeof(*firmware_response) + size,
- GFP_KERNEL)) {
+ /* gb_bootrom_get_firmware_response contains only a byte array */
+ if (!gb_operation_response_alloc(op, size, GFP_KERNEL)) {
dev_err(dev, "%s: error allocating response\n", __func__);
ret = -ENOMEM;
goto unlock;
}
firmware_response = op->response->payload;
- memcpy(firmware_response->data, fw->data + offset, size);
+ memcpy(firmware_response, fw->data + offset, size);
dev_dbg(dev, "responding with firmware (offs = %u, size = %u)\n",
offset, size);
diff --git a/drivers/staging/greybus/fw-download.c b/drivers/staging/greybus/fw-download.c
index 543692c567f9..9a09bd3af79b 100644
--- a/drivers/staging/greybus/fw-download.c
+++ b/drivers/staging/greybus/fw-download.c
@@ -63,8 +63,7 @@ static void fw_req_release(struct kref *kref)
* just hope that it never happens.
*/
if (!fw_req->timedout)
- ida_simple_remove(&fw_req->fw_download->id_map,
- fw_req->firmware_id);
+ ida_free(&fw_req->fw_download->id_map, fw_req->firmware_id);
kfree(fw_req);
}
@@ -171,7 +170,7 @@ static struct fw_request *find_firmware(struct fw_download *fw_download,
return ERR_PTR(-ENOMEM);
/* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
- ret = ida_simple_get(&fw_download->id_map, 1, 256, GFP_KERNEL);
+ ret = ida_alloc_range(&fw_download->id_map, 1, 255, GFP_KERNEL);
if (ret < 0) {
dev_err(fw_download->parent,
"failed to allocate firmware id (%d)\n", ret);
@@ -212,7 +211,7 @@ static struct fw_request *find_firmware(struct fw_download *fw_download,
return fw_req;
err_free_id:
- ida_simple_remove(&fw_download->id_map, fw_req->firmware_id);
+ ida_free(&fw_download->id_map, fw_req->firmware_id);
err_free_req:
kfree(fw_req);
@@ -271,11 +270,11 @@ static int fw_download_fetch_firmware(struct gb_operation *op)
struct gb_connection *connection = op->connection;
struct fw_download *fw_download = gb_connection_get_data(connection);
struct gb_fw_download_fetch_firmware_request *request;
- struct gb_fw_download_fetch_firmware_response *response;
struct fw_request *fw_req;
const struct firmware *fw;
unsigned int offset, size;
u8 firmware_id;
+ u8 *response;
int ret = 0;
if (op->request->payload_size != sizeof(*request)) {
@@ -325,8 +324,8 @@ static int fw_download_fetch_firmware(struct gb_operation *op)
goto put_fw;
}
- if (!gb_operation_response_alloc(op, sizeof(*response) + size,
- GFP_KERNEL)) {
+ /* gb_fw_download_fetch_firmware_response contains only a byte array */
+ if (!gb_operation_response_alloc(op, size, GFP_KERNEL)) {
dev_err(fw_download->parent,
"error allocating fetch firmware response\n");
ret = -ENOMEM;
@@ -334,7 +333,7 @@ static int fw_download_fetch_firmware(struct gb_operation *op)
}
response = op->response->payload;
- memcpy(response->data, fw->data + offset, size);
+ memcpy(response, fw->data + offset, size);
dev_dbg(fw_download->parent,
"responding with firmware (offs = %u, size = %u)\n", offset,
diff --git a/drivers/staging/greybus/fw-management.c b/drivers/staging/greybus/fw-management.c
index 93137a3c4907..3054f084d777 100644
--- a/drivers/staging/greybus/fw-management.c
+++ b/drivers/staging/greybus/fw-management.c
@@ -165,7 +165,7 @@ static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt,
}
/* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
- ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
+ ret = ida_alloc_range(&fw_mgmt->id_map, 1, 255, GFP_KERNEL);
if (ret < 0) {
dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
ret);
@@ -180,8 +180,7 @@ static int fw_mgmt_load_and_validate_operation(struct fw_mgmt *fw_mgmt,
GB_FW_MGMT_TYPE_LOAD_AND_VALIDATE_FW, &request,
sizeof(request), NULL, 0);
if (ret) {
- ida_simple_remove(&fw_mgmt->id_map,
- fw_mgmt->intf_fw_request_id);
+ ida_free(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
fw_mgmt->intf_fw_request_id = 0;
dev_err(fw_mgmt->parent,
"load and validate firmware request failed (%d)\n",
@@ -220,7 +219,7 @@ static int fw_mgmt_interface_fw_loaded_operation(struct gb_operation *op)
return -ENODEV;
}
- ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
+ ida_free(&fw_mgmt->id_map, fw_mgmt->intf_fw_request_id);
fw_mgmt->intf_fw_request_id = 0;
fw_mgmt->intf_fw_status = request->status;
fw_mgmt->intf_fw_major = le16_to_cpu(request->major);
@@ -316,7 +315,7 @@ static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt,
}
/* Allocate ids from 1 to 255 (u8-max), 0 is an invalid id */
- ret = ida_simple_get(&fw_mgmt->id_map, 1, 256, GFP_KERNEL);
+ ret = ida_alloc_range(&fw_mgmt->id_map, 1, 255, GFP_KERNEL);
if (ret < 0) {
dev_err(fw_mgmt->parent, "failed to allocate request id (%d)\n",
ret);
@@ -330,8 +329,7 @@ static int fw_mgmt_backend_fw_update_operation(struct fw_mgmt *fw_mgmt,
GB_FW_MGMT_TYPE_BACKEND_FW_UPDATE, &request,
sizeof(request), NULL, 0);
if (ret) {
- ida_simple_remove(&fw_mgmt->id_map,
- fw_mgmt->backend_fw_request_id);
+ ida_free(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
fw_mgmt->backend_fw_request_id = 0;
dev_err(fw_mgmt->parent,
"backend %s firmware update request failed (%d)\n", tag,
@@ -369,7 +367,7 @@ static int fw_mgmt_backend_fw_updated_operation(struct gb_operation *op)
return -ENODEV;
}
- ida_simple_remove(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
+ ida_free(&fw_mgmt->id_map, fw_mgmt->backend_fw_request_id);
fw_mgmt->backend_fw_request_id = 0;
fw_mgmt->backend_fw_status = request->status;
@@ -617,7 +615,7 @@ int gb_fw_mgmt_connection_init(struct gb_connection *connection)
if (ret)
goto err_list_del;
- minor = ida_simple_get(&fw_mgmt_minors_map, 0, NUM_MINORS, GFP_KERNEL);
+ minor = ida_alloc_max(&fw_mgmt_minors_map, NUM_MINORS - 1, GFP_KERNEL);
if (minor < 0) {
ret = minor;
goto err_connection_disable;
@@ -645,7 +643,7 @@ int gb_fw_mgmt_connection_init(struct gb_connection *connection)
err_del_cdev:
cdev_del(&fw_mgmt->cdev);
err_remove_ida:
- ida_simple_remove(&fw_mgmt_minors_map, minor);
+ ida_free(&fw_mgmt_minors_map, minor);
err_connection_disable:
gb_connection_disable(connection);
err_list_del:
@@ -669,7 +667,7 @@ void gb_fw_mgmt_connection_exit(struct gb_connection *connection)
device_destroy(&fw_mgmt_class, fw_mgmt->dev_num);
cdev_del(&fw_mgmt->cdev);
- ida_simple_remove(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num));
+ ida_free(&fw_mgmt_minors_map, MINOR(fw_mgmt->dev_num));
/*
* Disallow any new ioctl operations on the char device and wait for
diff --git a/drivers/staging/greybus/gbphy.c b/drivers/staging/greybus/gbphy.c
index 6a7d8cf2a1eb..d827f03f5253 100644
--- a/drivers/staging/greybus/gbphy.c
+++ b/drivers/staging/greybus/gbphy.c
@@ -46,7 +46,7 @@ static void gbphy_dev_release(struct device *dev)
{
struct gbphy_device *gbphy_dev = to_gbphy_dev(dev);
- ida_simple_remove(&gbphy_id, gbphy_dev->id);
+ ida_free(&gbphy_id, gbphy_dev->id);
kfree(gbphy_dev);
}
@@ -182,7 +182,7 @@ static void gbphy_dev_remove(struct device *dev)
pm_runtime_dont_use_autosuspend(dev);
}
-static struct bus_type gbphy_bus_type = {
+static const struct bus_type gbphy_bus_type = {
.name = "gbphy",
.match = gbphy_dev_match,
.probe = gbphy_dev_probe,
@@ -225,13 +225,13 @@ static struct gbphy_device *gb_gbphy_create_dev(struct gb_bundle *bundle,
int retval;
int id;
- id = ida_simple_get(&gbphy_id, 1, 0, GFP_KERNEL);
+ id = ida_alloc_min(&gbphy_id, 1, GFP_KERNEL);
if (id < 0)
return ERR_PTR(id);
gbphy_dev = kzalloc(sizeof(*gbphy_dev), GFP_KERNEL);
if (!gbphy_dev) {
- ida_simple_remove(&gbphy_id, id);
+ ida_free(&gbphy_id, id);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/staging/greybus/greybus_authentication.h b/drivers/staging/greybus/greybus_authentication.h
index 48b4a9794d3c..ee88f880cfe3 100644
--- a/drivers/staging/greybus/greybus_authentication.h
+++ b/drivers/staging/greybus/greybus_authentication.h
@@ -44,7 +44,7 @@
/* IOCTL support */
struct cap_ioc_get_endpoint_uid {
__u8 uid[8];
-} __attribute__ ((__packed__));
+} __packed;
struct cap_ioc_get_ims_certificate {
__u32 certificate_class;
@@ -53,7 +53,7 @@ struct cap_ioc_get_ims_certificate {
__u8 result_code;
__u32 cert_size;
__u8 certificate[CAP_CERTIFICATE_MAX_SIZE];
-} __attribute__ ((__packed__));
+} __packed;
struct cap_ioc_authenticate {
__u32 auth_type;
@@ -64,7 +64,7 @@ struct cap_ioc_authenticate {
__u8 response[64];
__u32 signature_size;
__u8 signature[CAP_SIGNATURE_MAX_SIZE];
-} __attribute__ ((__packed__));
+} __packed;
#define CAP_IOCTL_BASE 'C'
#define CAP_IOC_GET_ENDPOINT_UID _IOR(CAP_IOCTL_BASE, 0, struct cap_ioc_get_endpoint_uid)
diff --git a/drivers/staging/greybus/greybus_firmware.h b/drivers/staging/greybus/greybus_firmware.h
index f68fd5e25321..b6042a82ada4 100644
--- a/drivers/staging/greybus/greybus_firmware.h
+++ b/drivers/staging/greybus/greybus_firmware.h
@@ -41,14 +41,14 @@ struct fw_mgmt_ioc_get_intf_version {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u16 major;
__u16 minor;
-} __attribute__ ((__packed__));
+} __packed;
struct fw_mgmt_ioc_get_backend_version {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u16 major;
__u16 minor;
__u8 status;
-} __attribute__ ((__packed__));
+} __packed;
struct fw_mgmt_ioc_intf_load_and_validate {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
@@ -56,12 +56,12 @@ struct fw_mgmt_ioc_intf_load_and_validate {
__u8 status;
__u16 major;
__u16 minor;
-} __attribute__ ((__packed__));
+} __packed;
struct fw_mgmt_ioc_backend_fw_update {
__u8 firmware_tag[GB_FIRMWARE_U_TAG_MAX_SIZE];
__u8 status;
-} __attribute__ ((__packed__));
+} __packed;
#define FW_MGMT_IOCTL_BASE 'F'
#define FW_MGMT_IOC_GET_INTF_FW _IOR(FW_MGMT_IOCTL_BASE, 0, struct fw_mgmt_ioc_get_intf_version)
diff --git a/drivers/staging/greybus/light.c b/drivers/staging/greybus/light.c
index d62f97249aca..a5c2fe963866 100644
--- a/drivers/staging/greybus/light.c
+++ b/drivers/staging/greybus/light.c
@@ -95,15 +95,15 @@ static struct led_classdev *get_channel_cdev(struct gb_channel *channel)
static struct gb_channel *get_channel_from_mode(struct gb_light *light,
u32 mode)
{
- struct gb_channel *channel = NULL;
+ struct gb_channel *channel;
int i;
for (i = 0; i < light->channels_count; i++) {
channel = &light->channels[i];
- if (channel && channel->mode == mode)
- break;
+ if (channel->mode == mode)
+ return channel;
}
- return channel;
+ return NULL;
}
static int __gb_lights_flash_intensity_set(struct gb_channel *channel,
diff --git a/drivers/staging/greybus/loopback.c b/drivers/staging/greybus/loopback.c
index d7b39f3bb652..bb33379b5297 100644
--- a/drivers/staging/greybus/loopback.c
+++ b/drivers/staging/greybus/loopback.c
@@ -1028,7 +1028,7 @@ static int gb_loopback_probe(struct gb_bundle *bundle,
gb->file = debugfs_create_file(name, S_IFREG | 0444, gb_dev.root, gb,
&gb_loopback_dbgfs_latency_fops);
- gb->id = ida_simple_get(&loopback_ida, 0, 0, GFP_KERNEL);
+ gb->id = ida_alloc(&loopback_ida, GFP_KERNEL);
if (gb->id < 0) {
retval = gb->id;
goto out_debugfs_remove;
@@ -1079,7 +1079,7 @@ out_conn:
out_connection_disable:
gb_connection_disable(connection);
out_ida_remove:
- ida_simple_remove(&loopback_ida, gb->id);
+ ida_free(&loopback_ida, gb->id);
out_debugfs_remove:
debugfs_remove(gb->file);
out_connection_destroy:
@@ -1121,7 +1121,7 @@ static void gb_loopback_disconnect(struct gb_bundle *bundle)
spin_unlock_irqrestore(&gb_dev.lock, flags);
device_unregister(gb->dev);
- ida_simple_remove(&loopback_ida, gb->id);
+ ida_free(&loopback_ida, gb->id);
gb_connection_destroy(gb->connection);
kfree(gb);
diff --git a/drivers/staging/greybus/raw.c b/drivers/staging/greybus/raw.c
index b9c6eff7cdc1..836d35e5fa85 100644
--- a/drivers/staging/greybus/raw.c
+++ b/drivers/staging/greybus/raw.c
@@ -181,7 +181,7 @@ static int gb_raw_probe(struct gb_bundle *bundle,
raw->connection = connection;
greybus_set_drvdata(bundle, raw);
- minor = ida_simple_get(&minors, 0, 0, GFP_KERNEL);
+ minor = ida_alloc(&minors, GFP_KERNEL);
if (minor < 0) {
retval = minor;
goto error_connection_destroy;
@@ -214,7 +214,7 @@ error_connection_disable:
gb_connection_disable(connection);
error_remove_ida:
- ida_simple_remove(&minors, minor);
+ ida_free(&minors, minor);
error_connection_destroy:
gb_connection_destroy(connection);
@@ -235,7 +235,7 @@ static void gb_raw_disconnect(struct gb_bundle *bundle)
device_destroy(&raw_class, raw->dev);
cdev_del(&raw->cdev);
gb_connection_disable(connection);
- ida_simple_remove(&minors, MINOR(raw->dev));
+ ida_free(&minors, MINOR(raw->dev));
gb_connection_destroy(connection);
mutex_lock(&raw->list_lock);
diff --git a/drivers/staging/greybus/vibrator.c b/drivers/staging/greybus/vibrator.c
index 227e18d92a95..89bef8045549 100644
--- a/drivers/staging/greybus/vibrator.c
+++ b/drivers/staging/greybus/vibrator.c
@@ -153,7 +153,7 @@ static int gb_vibrator_probe(struct gb_bundle *bundle,
* there is a "real" device somewhere in the kernel for this, but I
* can't find it at the moment...
*/
- vib->minor = ida_simple_get(&minors, 0, 0, GFP_KERNEL);
+ vib->minor = ida_alloc(&minors, GFP_KERNEL);
if (vib->minor < 0) {
retval = vib->minor;
goto err_connection_disable;
@@ -173,7 +173,7 @@ static int gb_vibrator_probe(struct gb_bundle *bundle,
return 0;
err_ida_remove:
- ida_simple_remove(&minors, vib->minor);
+ ida_free(&minors, vib->minor);
err_connection_disable:
gb_connection_disable(connection);
err_connection_destroy:
@@ -197,7 +197,7 @@ static void gb_vibrator_disconnect(struct gb_bundle *bundle)
turn_off(vib);
device_unregister(vib->dev);
- ida_simple_remove(&minors, vib->minor);
+ ida_free(&minors, vib->minor);
gb_connection_disable(vib->connection);
gb_connection_destroy(vib->connection);
kfree(vib);
diff --git a/drivers/staging/nvec/TODO b/drivers/staging/nvec/TODO
index e4d85d9b4681..8afde3ccc960 100644
--- a/drivers/staging/nvec/TODO
+++ b/drivers/staging/nvec/TODO
@@ -1,6 +1,5 @@
ToDo list (incomplete, unordered)
- - add compile as module support
- - move half of the nvec init stuff to i2c-tegra.c
- - move event handling to nvec_events
+ - move the driver to the new i2c slave framework
- finish suspend/resume support
- - add support for more device implementations
+ - fix udelay in the isr
+ - add atomic ops in order to fix shutoff/reboot problems
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 2823cacde130..282a664c9176 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -709,10 +709,11 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
status & RNW ? " RNW" : "");
/*
- * TODO: A correct fix needs to be found for this.
+ * TODO: replace the udelay with a read back after each writel above
+ * in order to work around a hardware issue, see i2c-tegra.c
*
- * We experience less incomplete messages with this delay than without
- * it, but we don't know why. Help is appreciated.
+ * Unfortunately, this change causes an intialisation issue with the
+ * touchpad, which needs to be fixed first.
*/
udelay(100);
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index b3049108edc4..211423059e30 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -10,7 +10,6 @@
#include <linux/phy.h>
#include <linux/ratelimit.h>
#include <linux/of_mdio.h>
-#include <generated/utsrelease.h>
#include <net/dst.h>
#include "octeon-ethernet.h"
@@ -22,7 +21,6 @@ static void cvm_oct_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strscpy(info->version, UTS_RELEASE, sizeof(info->version));
strscpy(info->bus_info, "Builtin", sizeof(info->bus_info));
}
diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
index 3e7b92cd2e35..44cced319c11 100644
--- a/drivers/staging/octeon/octeon-stubs.h
+++ b/drivers/staging/octeon/octeon-stubs.h
@@ -1362,7 +1362,7 @@ static inline struct cvmx_wqe *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
}
static inline int cvmx_spi_restart_interface(int interface,
- cvmx_spi_mode_t mode, int timeout)
+ cvmx_spi_mode_t mode, int timeout)
{
return 0;
}
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index 0ec3130225db..b6c4917d515e 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -49,6 +49,7 @@
#define N_PI433_MINORS BIT(MINORBITS) /*32*/ /* ... up to 256 */
#define MAX_MSG_SIZE 900 /* min: FIFO_SIZE! */
#define MSG_FIFO_SIZE 65536 /* 65536 = 2^16 */
+#define FIFO_THRESHOLD 15 /* bytes */
#define NUM_DIO 2
static dev_t pi433_dev;
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c
index 8c7fab6a46bb..5a1c362badb6 100644
--- a/drivers/staging/pi433/rf69.c
+++ b/drivers/staging/pi433/rf69.c
@@ -8,12 +8,12 @@
#include <linux/types.h>
#include <linux/spi/spi.h>
+#include <linux/units.h>
#include "rf69.h"
#include "rf69_registers.h"
-#define F_OSC 32000000 /* in Hz */
-#define FIFO_SIZE 66 /* in byte */
+#define F_OSC (32 * HZ_PER_MHZ)
/*-------------------------------------------------------------------------*/
diff --git a/drivers/staging/pi433/rf69.h b/drivers/staging/pi433/rf69.h
index 78fa0b8bab8b..76f0f9896a52 100644
--- a/drivers/staging/pi433/rf69.h
+++ b/drivers/staging/pi433/rf69.h
@@ -11,11 +11,7 @@
#include "rf69_enum.h"
#include "rf69_registers.h"
-/* NOTE: Modifying FREQUENCY value impacts CE certification */
-#define F_OSC 32000000 /* Hz */
-#define FREQUENCY 433920000 /* Hz */
#define FIFO_SIZE 66 /* bytes */
-#define FIFO_THRESHOLD 15 /* bytes */
u8 rf69_read_reg(struct spi_device *spi, u8 addr);
int rf69_get_version(struct spi_device *spi);
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index c7a2eae2fdb9..e3ed709a7674 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -164,7 +164,7 @@ void rtl92e_set_reg(struct net_device *dev, u8 variable, u8 *val)
eACI);
break;
}
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACM_CTRL,
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_ACM_CTRL,
&pAcParam);
break;
}
@@ -693,11 +693,10 @@ void rtl92e_link_change(struct net_device *dev)
u32 reg;
reg = rtl92e_readl(dev, RCR);
- if (priv->rtllib->link_state == MAC80211_LINKED) {
+ if (priv->rtllib->link_state == MAC80211_LINKED)
priv->receive_config = reg |= RCR_CBSSID;
- } else {
+ else
priv->receive_config = reg &= ~RCR_CBSSID;
- }
rtl92e_writel(dev, RCR, reg);
}
@@ -1296,7 +1295,6 @@ static void _rtl92e_query_rxphystatus(
pwdb_all = rtl92e_rx_db_to_percent(rx_pwr_all);
pstats->RxPWDBAll = precord_stats->RxPWDBAll = pwdb_all;
- pstats->RxPower = precord_stats->RxPower = rx_pwr_all;
pstats->RecvSignalPower = rx_pwr_all;
if (pdrvinfo->RxHT && pdrvinfo->RxRate >= DESC90_RATEMCS8 &&
pdrvinfo->RxRate <= DESC90_RATEMCS15)
@@ -1348,14 +1346,7 @@ static void _rtl92e_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
static u32 slide_beacon_adc_pwdb_index;
static u32 slide_beacon_adc_pwdb_statistics;
static u32 last_beacon_adc_pwdb;
- struct ieee80211_hdr_3addr *hdr;
- u16 sc;
- unsigned int seq;
- hdr = (struct ieee80211_hdr_3addr *)buffer;
- sc = le16_to_cpu(hdr->seq_ctrl);
- seq = WLAN_GET_SEQ_SEQ(sc);
- curr_st->Seq_Num = seq;
if (!prev_st->bIsAMPDU)
bcheck = true;
@@ -1536,7 +1527,7 @@ static void _rtl92e_update_received_rate_histogram_stats(
{
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
u32 rcvType = 1;
- u32 rateIndex;
+ u32 rate_index;
if (pstats->bCRC)
rcvType = 2;
@@ -1545,95 +1536,95 @@ static void _rtl92e_update_received_rate_histogram_stats(
switch (pstats->rate) {
case MGN_1M:
- rateIndex = 0;
+ rate_index = 0;
break;
case MGN_2M:
- rateIndex = 1;
+ rate_index = 1;
break;
case MGN_5_5M:
- rateIndex = 2;
+ rate_index = 2;
break;
case MGN_11M:
- rateIndex = 3;
+ rate_index = 3;
break;
case MGN_6M:
- rateIndex = 4;
+ rate_index = 4;
break;
case MGN_9M:
- rateIndex = 5;
+ rate_index = 5;
break;
case MGN_12M:
- rateIndex = 6;
+ rate_index = 6;
break;
case MGN_18M:
- rateIndex = 7;
+ rate_index = 7;
break;
case MGN_24M:
- rateIndex = 8;
+ rate_index = 8;
break;
case MGN_36M:
- rateIndex = 9;
+ rate_index = 9;
break;
case MGN_48M:
- rateIndex = 10;
+ rate_index = 10;
break;
case MGN_54M:
- rateIndex = 11;
+ rate_index = 11;
break;
case MGN_MCS0:
- rateIndex = 12;
+ rate_index = 12;
break;
case MGN_MCS1:
- rateIndex = 13;
+ rate_index = 13;
break;
case MGN_MCS2:
- rateIndex = 14;
+ rate_index = 14;
break;
case MGN_MCS3:
- rateIndex = 15;
+ rate_index = 15;
break;
case MGN_MCS4:
- rateIndex = 16;
+ rate_index = 16;
break;
case MGN_MCS5:
- rateIndex = 17;
+ rate_index = 17;
break;
case MGN_MCS6:
- rateIndex = 18;
+ rate_index = 18;
break;
case MGN_MCS7:
- rateIndex = 19;
+ rate_index = 19;
break;
case MGN_MCS8:
- rateIndex = 20;
+ rate_index = 20;
break;
case MGN_MCS9:
- rateIndex = 21;
+ rate_index = 21;
break;
case MGN_MCS10:
- rateIndex = 22;
+ rate_index = 22;
break;
case MGN_MCS11:
- rateIndex = 23;
+ rate_index = 23;
break;
case MGN_MCS12:
- rateIndex = 24;
+ rate_index = 24;
break;
case MGN_MCS13:
- rateIndex = 25;
+ rate_index = 25;
break;
case MGN_MCS14:
- rateIndex = 26;
+ rate_index = 26;
break;
case MGN_MCS15:
- rateIndex = 27;
+ rate_index = 27;
break;
default:
- rateIndex = 28;
+ rate_index = 28;
break;
}
- priv->stats.received_rate_histogram[0][rateIndex]++;
- priv->stats.received_rate_histogram[rcvType][rateIndex]++;
+ priv->stats.received_rate_histogram[0][rate_index]++;
+ priv->stats.received_rate_histogram[rcvType][rate_index]++;
}
bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
@@ -1650,7 +1641,6 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
stats->bHwError |= 1;
if (stats->bHwError) {
- stats->bShift = false;
return false;
}
@@ -1662,7 +1652,6 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
stats->rate = _rtl92e_rate_hw_to_mgn((bool)pDrvInfo->RxHT,
pDrvInfo->RxRate);
- stats->bShortPreamble = pDrvInfo->SPLCP;
_rtl92e_update_received_rate_histogram_stats(dev, stats);
@@ -1673,19 +1662,9 @@ bool rtl92e_get_rx_stats(struct net_device *dev, struct rtllib_rx_stats *stats,
stats->TimeStampLow = pDrvInfo->TSFL;
stats->TimeStampHigh = rtl92e_readl(dev, TSFR + 4);
- if ((stats->RxBufShift + stats->RxDrvInfoSize) > 0)
- stats->bShift = 1;
-
- stats->RxIs40MHzPacket = pDrvInfo->BW;
-
_rtl92e_translate_rx_signal_stats(dev, skb, stats, pdesc, pDrvInfo);
skb_trim(skb, skb->len - S_CRC_LEN);
-
- stats->packetlength = stats->Length - 4;
- stats->fraglength = stats->packetlength;
- stats->fragoffset = 0;
- stats->ntotalfrag = 1;
return true;
}
@@ -1698,7 +1677,7 @@ void rtl92e_stop_adapter(struct net_device *dev, bool reset)
u32 ulRegRead;
op_mode = RT_OP_MODE_NO_LINK;
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_MEDIA_STATUS, &op_mode);
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_MEDIA_STATUS, &op_mode);
if (!priv->rtllib->bSupportRemoteWakeUp) {
u1bTmp = 0x0;
@@ -1852,7 +1831,7 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
u16 RegRxCounter = rtl92e_readw(dev, 0x130);
bool bStuck = false;
static u8 rx_chk_cnt;
- u32 SlotIndex = 0, TotalRxStuckCount = 0;
+ u32 slot_index = 0, TotalRxStuckCount = 0;
u8 i;
u8 SilentResetRxSoltNum = 4;
@@ -1882,10 +1861,10 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
}
- SlotIndex = (priv->silent_reset_rx_slot_index++) % SilentResetRxSoltNum;
+ slot_index = (priv->silent_reset_rx_slot_index++) % SilentResetRxSoltNum;
if (priv->rx_ctr == RegRxCounter) {
- priv->silent_reset_rx_stuck_event[SlotIndex] = 1;
+ priv->silent_reset_rx_stuck_event[slot_index] = 1;
for (i = 0; i < SilentResetRxSoltNum; i++)
TotalRxStuckCount += priv->silent_reset_rx_stuck_event[i];
@@ -1897,7 +1876,7 @@ bool rtl92e_is_rx_stuck(struct net_device *dev)
priv->silent_reset_rx_stuck_event[i];
}
} else {
- priv->silent_reset_rx_stuck_event[SlotIndex] = 0;
+ priv->silent_reset_rx_stuck_event[slot_index] = 0;
}
priv->rx_ctr = RegRxCounter;
@@ -1938,5 +1917,5 @@ bool rtl92e_is_halfn_supported_by_ap(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
struct rtllib_device *ieee = priv->rtllib;
- return ieee->bHalfWirelessN24GMode;
+ return ieee->half_wireless_n24g_mode;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index e1bd4d67e862..18b948d4d86d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -714,7 +714,6 @@ void rtl92e_set_channel(struct net_device *dev, u8 channel)
if (priv->up)
_rtl92e_phy_switch_channel_work_item(dev);
priv->sw_chnl_in_progress = false;
- return;
}
static void _rtl92e_cck_tx_power_track_bw_switch_tssi(struct net_device *dev)
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 6815d18a7919..649b529657ba 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -172,7 +172,7 @@ bool rtl92e_set_rf_state(struct net_device *dev,
priv->blinked_ingpio = true;
else
priv->blinked_ingpio = false;
- rtllib_MgntDisconnect(priv->rtllib,
+ rtllib_mgnt_disconnect(priv->rtllib,
WLAN_REASON_DISASSOC_STA_HAS_LEFT);
}
}
@@ -236,14 +236,14 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
if (priv->dot11_current_preamble_mode != PREAMBLE_SHORT) {
ShortPreamble = true;
priv->dot11_current_preamble_mode = PREAMBLE_SHORT;
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE,
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_ACK_PREAMBLE,
(unsigned char *)&ShortPreamble);
}
} else {
if (priv->dot11_current_preamble_mode != PREAMBLE_LONG) {
ShortPreamble = false;
priv->dot11_current_preamble_mode = PREAMBLE_LONG;
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_ACK_PREAMBLE,
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_ACK_PREAMBLE,
(unsigned char *)&ShortPreamble);
}
}
@@ -256,13 +256,13 @@ static void _rtl92e_update_cap(struct net_device *dev, u16 cap)
(!priv->rtllib->ht_info->current_rt2rt_long_slot_time)) {
if (cur_slot_time != SHORT_SLOT_TIME) {
slot_time_val = SHORT_SLOT_TIME;
- priv->rtllib->SetHwRegHandler(dev,
+ priv->rtllib->set_hw_reg_handler(dev,
HW_VAR_SLOT_TIME, &slot_time_val);
}
} else {
if (cur_slot_time != NON_SHORT_SLOT_TIME) {
slot_time_val = NON_SHORT_SLOT_TIME;
- priv->rtllib->SetHwRegHandler(dev,
+ priv->rtllib->set_hw_reg_handler(dev,
HW_VAR_SLOT_TIME, &slot_time_val);
}
}
@@ -301,7 +301,7 @@ static void _rtl92e_qos_activate(void *data)
goto success;
for (i = 0; i < QOS_QUEUE_NUM; i++)
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_AC_PARAM, (u8 *)(&i));
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_AC_PARAM, (u8 *)(&i));
success:
mutex_unlock(&priv->mutex);
@@ -656,12 +656,12 @@ static void _rtl92e_init_priv_handler(struct net_device *dev)
priv->rtllib->enter_sleep_state = rtl92e_enter_sleep;
priv->rtllib->ps_is_queue_empty = _rtl92e_is_tx_queue_empty;
- priv->rtllib->GetNmodeSupportBySecCfg = rtl92e_get_nmode_support_by_sec;
- priv->rtllib->GetHalfNmodeSupportByAPsHandler =
+ priv->rtllib->get_nmode_support_by_sec_cfg = rtl92e_get_nmode_support_by_sec;
+ priv->rtllib->get_half_nmode_support_by_aps_handler =
rtl92e_is_halfn_supported_by_ap;
- priv->rtllib->SetHwRegHandler = rtl92e_set_reg;
- priv->rtllib->AllowAllDestAddrHandler = rtl92e_set_monitor_mode;
+ priv->rtllib->set_hw_reg_handler = rtl92e_set_reg;
+ priv->rtllib->allow_all_dest_addr_handler = rtl92e_set_monitor_mode;
priv->rtllib->init_gain_handler = rtl92e_init_gain;
priv->rtllib->rtllib_ips_leave_wq = rtl92e_rtllib_ips_leave_wq;
priv->rtllib->rtllib_ips_leave = rtl92e_rtllib_ips_leave;
@@ -705,7 +705,7 @@ static void _rtl92e_init_priv_variable(struct net_device *dev)
priv->hw_rf_off_action = 0;
priv->set_rf_pwr_state_in_progress = false;
priv->rtllib->pwr_save_ctrl.bLeisurePs = true;
- priv->rtllib->LPSDelayCnt = 0;
+ priv->rtllib->lps_delay_cnt = 0;
priv->rtllib->sta_sleep = LPS_IS_WAKE;
priv->rtllib->rf_power_state = rf_on;
@@ -909,25 +909,24 @@ static void _rtl92e_if_check_reset(struct net_device *dev)
netdev_info(dev, "%s(): TxResetType is %d, RxResetType is %d\n",
__func__, TxResetType, RxResetType);
}
- return;
}
static void _rtl92e_update_rxcounts(struct r8192_priv *priv, u32 *TotalRxBcnNum,
u32 *TotalRxDataNum)
{
- u16 SlotIndex;
+ u16 slot_index;
u8 i;
*TotalRxBcnNum = 0;
*TotalRxDataNum = 0;
- SlotIndex = (priv->rtllib->link_detect_info.SlotIndex++) %
- (priv->rtllib->link_detect_info.SlotNum);
- priv->rtllib->link_detect_info.RxBcnNum[SlotIndex] =
- priv->rtllib->link_detect_info.NumRecvBcnInPeriod;
- priv->rtllib->link_detect_info.RxDataNum[SlotIndex] =
- priv->rtllib->link_detect_info.NumRecvDataInPeriod;
- for (i = 0; i < priv->rtllib->link_detect_info.SlotNum; i++) {
+ slot_index = (priv->rtllib->link_detect_info.slot_index++) %
+ (priv->rtllib->link_detect_info.slot_num);
+ priv->rtllib->link_detect_info.RxBcnNum[slot_index] =
+ priv->rtllib->link_detect_info.num_recv_bcn_in_period;
+ priv->rtllib->link_detect_info.RxDataNum[slot_index] =
+ priv->rtllib->link_detect_info.num_recv_data_in_period;
+ for (i = 0; i < priv->rtllib->link_detect_info.slot_num; i++) {
*TotalRxBcnNum += priv->rtllib->link_detect_info.RxBcnNum[i];
*TotalRxDataNum += priv->rtllib->link_detect_info.RxDataNum[i];
}
@@ -943,7 +942,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
unsigned long flags;
struct rt_pwr_save_ctrl *psc = (struct rt_pwr_save_ctrl *)
(&priv->rtllib->pwr_save_ctrl);
- bool bBusyTraffic = false;
+ bool busy_traffic = false;
bool bHigherBusyTraffic = false;
bool bHigherBusyRxTraffic = false;
bool bEnterPS = false;
@@ -965,15 +964,14 @@ static void _rtl92e_watchdog_wq_cb(void *data)
MAC80211_NOLINK) &&
(ieee->rf_power_state == rf_on) && !ieee->is_set_key &&
(!ieee->proto_stoppping) && !ieee->wx_set_enc) {
- if (ieee->pwr_save_ctrl.ReturnPoint == IPS_CALLBACK_NONE) {
+ if (ieee->pwr_save_ctrl.ReturnPoint == IPS_CALLBACK_NONE)
rtl92e_ips_enter(dev);
- }
}
}
if ((ieee->link_state == MAC80211_LINKED) && (ieee->iw_mode == IW_MODE_INFRA)) {
if (ieee->link_detect_info.num_rx_ok_in_period > 100 ||
ieee->link_detect_info.num_tx_ok_in_period > 100)
- bBusyTraffic = true;
+ busy_traffic = true;
if (ieee->link_detect_info.num_rx_ok_in_period > 4000 ||
ieee->link_detect_info.num_tx_ok_in_period > 4000) {
@@ -984,9 +982,9 @@ static void _rtl92e_watchdog_wq_cb(void *data)
bHigherBusyRxTraffic = false;
}
- if (((ieee->link_detect_info.NumRxUnicastOkInPeriod +
+ if (((ieee->link_detect_info.num_rx_unicast_ok_in_period +
ieee->link_detect_info.num_tx_ok_in_period) > 8) ||
- (ieee->link_detect_info.NumRxUnicastOkInPeriod > 2))
+ (ieee->link_detect_info.num_rx_unicast_ok_in_period > 2))
bEnterPS = false;
else
bEnterPS = true;
@@ -1005,8 +1003,8 @@ static void _rtl92e_watchdog_wq_cb(void *data)
ieee->link_detect_info.num_rx_ok_in_period = 0;
ieee->link_detect_info.num_tx_ok_in_period = 0;
- ieee->link_detect_info.NumRxUnicastOkInPeriod = 0;
- ieee->link_detect_info.bBusyTraffic = bBusyTraffic;
+ ieee->link_detect_info.num_rx_unicast_ok_in_period = 0;
+ ieee->link_detect_info.busy_traffic = busy_traffic;
ieee->link_detect_info.bHigherBusyTraffic = bHigherBusyTraffic;
ieee->link_detect_info.bHigherBusyRxTraffic = bHigherBusyRxTraffic;
@@ -1032,7 +1030,7 @@ static void _rtl92e_watchdog_wq_cb(void *data)
ieee->link_state = RTLLIB_ASSOCIATING;
- RemovePeerTS(priv->rtllib,
+ remove_peer_ts(priv->rtllib,
priv->rtllib->current_network.bssid);
ieee->is_roaming = true;
ieee->is_set_key = false;
@@ -1046,8 +1044,8 @@ static void _rtl92e_watchdog_wq_cb(void *data)
priv->check_roaming_cnt = 0;
}
- ieee->link_detect_info.NumRecvBcnInPeriod = 0;
- ieee->link_detect_info.NumRecvDataInPeriod = 0;
+ ieee->link_detect_info.num_recv_bcn_in_period = 0;
+ ieee->link_detect_info.num_recv_data_in_period = 0;
}
spin_lock_irqsave(&priv->tx_lock, flags);
@@ -1257,7 +1255,7 @@ static short _rtl92e_tx(struct net_device *dev, struct sk_buff *skb)
int idx;
u32 fwinfo_size = 0;
- priv->rtllib->bAwakePktSent = true;
+ priv->rtllib->awake_pkt_sent = true;
fwinfo_size = sizeof(struct tx_fwinfo_8190pci);
@@ -1502,8 +1500,6 @@ static void _rtl92e_rx_normal(struct net_device *dev)
};
unsigned int count = priv->rxringcount;
- stats.nic_type = NIC_8192E;
-
while (count--) {
struct rx_desc *pdesc = &priv->rx_ring
[priv->rx_idx];
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index 92143c50c149..c34087af973c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
@@ -354,7 +354,7 @@ static void _rtl92e_dm_check_rate_adaptive(struct net_device *dev)
}
}
- if (priv->rtllib->GetHalfNmodeSupportByAPsHandler(dev))
+ if (priv->rtllib->get_half_nmode_support_by_aps_handler(dev))
target_ratr &= 0xf00fffff;
current_ratr = rtl92e_readl(dev, RATR0);
@@ -1185,7 +1185,7 @@ static void _rtl92e_dm_check_edca_turbo(struct net_device *dev)
if (priv->bcurrent_turbo_EDCA) {
u8 tmp = AC0_BE;
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_AC_PARAM,
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_AC_PARAM,
(u8 *)(&tmp));
priv->bcurrent_turbo_EDCA = false;
}
@@ -1523,7 +1523,7 @@ static void _rtl92e_dm_init_fsync(struct net_device *dev)
priv->rtllib->fsync_multiple_timeinterval = 3;
priv->rtllib->fsync_firstdiff_ratethreshold = 100;
priv->rtllib->fsync_seconddiff_ratethreshold = 200;
- priv->rtllib->fsync_state = Default_Fsync;
+ priv->rtllib->fsync_state = DEFAULT_FSYNC;
timer_setup(&priv->fsync_timer, _rtl92e_dm_fsync_timer_callback, 0);
}
@@ -1636,7 +1636,7 @@ static void _rtl92e_dm_start_hw_fsync(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c12cf);
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING,
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_RF_TIMING,
(u8 *)(&rf_timing));
rtl92e_writeb(dev, 0xc3b, 0x41);
}
@@ -1647,7 +1647,7 @@ static void _rtl92e_dm_end_hw_fsync(struct net_device *dev)
struct r8192_priv *priv = rtllib_priv(dev);
rtl92e_writel(dev, rOFDM0_RxDetector2, 0x465c52cd);
- priv->rtllib->SetHwRegHandler(dev, HW_VAR_RF_TIMING, (u8 *)
+ priv->rtllib->set_hw_reg_handler(dev, HW_VAR_RF_TIMING, (u8 *)
(&rf_timing));
rtl92e_writeb(dev, 0xc3b, 0x49);
}
@@ -1716,31 +1716,29 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
priv->rtllib->ht_info->iot_peer == HT_IOT_PEER_BROADCOM) {
if (priv->rtllib->bfsync_enable == 0) {
switch (priv->rtllib->fsync_state) {
- case Default_Fsync:
+ case DEFAULT_FSYNC:
_rtl92e_dm_start_hw_fsync(dev);
- priv->rtllib->fsync_state = HW_Fsync;
+ priv->rtllib->fsync_state = HW_FSYNC;
break;
- case SW_Fsync:
+ case SW_FSYNC:
_rtl92e_dm_end_sw_fsync(dev);
_rtl92e_dm_start_hw_fsync(dev);
- priv->rtllib->fsync_state = HW_Fsync;
+ priv->rtllib->fsync_state = HW_FSYNC;
break;
- case HW_Fsync:
default:
break;
}
} else {
switch (priv->rtllib->fsync_state) {
- case Default_Fsync:
+ case DEFAULT_FSYNC:
_rtl92e_dm_start_sw_fsync(dev);
- priv->rtllib->fsync_state = SW_Fsync;
+ priv->rtllib->fsync_state = SW_FSYNC;
break;
- case HW_Fsync:
+ case HW_FSYNC:
_rtl92e_dm_end_hw_fsync(dev);
_rtl92e_dm_start_sw_fsync(dev);
- priv->rtllib->fsync_state = SW_Fsync;
+ priv->rtllib->fsync_state = SW_FSYNC;
break;
- case SW_Fsync:
default:
break;
}
@@ -1752,15 +1750,14 @@ static void _rtl92e_dm_check_fsync(struct net_device *dev)
}
} else {
switch (priv->rtllib->fsync_state) {
- case HW_Fsync:
+ case HW_FSYNC:
_rtl92e_dm_end_hw_fsync(dev);
- priv->rtllib->fsync_state = Default_Fsync;
+ priv->rtllib->fsync_state = DEFAULT_FSYNC;
break;
- case SW_Fsync:
+ case SW_FSYNC:
_rtl92e_dm_end_sw_fsync(dev);
- priv->rtllib->fsync_state = Default_Fsync;
+ priv->rtllib->fsync_state = DEFAULT_FSYNC;
break;
- case Default_Fsync:
default:
break;
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index 44a9fe831849..5aac9110bff6 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -208,12 +208,12 @@ void rtl92e_leisure_ps_enter(struct net_device *dev)
return;
if (psc->bLeisurePs) {
- if (psc->LpsIdleCount >= RT_CHECK_FOR_HANG_PERIOD) {
+ if (psc->lps_idle_count >= RT_CHECK_FOR_HANG_PERIOD) {
if (priv->rtllib->ps == RTLLIB_PS_DISABLED)
_rtl92e_ps_set_mode(dev, RTLLIB_PS_MBCAST | RTLLIB_PS_UNICAST);
} else {
- psc->LpsIdleCount++;
+ psc->lps_idle_count++;
}
}
}
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 4c884c5277f9..d131ef525f46 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -253,7 +253,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
rt_state = priv->rtllib->rf_power_state;
if (!priv->up)
return -ENETDOWN;
- if (priv->rtllib->link_detect_info.bBusyTraffic)
+ if (priv->rtllib->link_detect_info.busy_traffic)
return -EAGAIN;
if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
@@ -269,7 +269,7 @@ static int _rtl92e_wx_set_scan(struct net_device *dev,
mutex_lock(&priv->wx_mutex);
- priv->rtllib->FirstIe_InScan = true;
+ priv->rtllib->first_ie_in_scan = true;
if (priv->rtllib->link_state != MAC80211_LINKED) {
if (rt_state == rf_off) {
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index ee9ce392155c..834329886ea2 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -125,7 +125,7 @@ static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *dst,
static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
struct ba_record *ba,
- enum tr_select TxRxSelect, u16 reason_code)
+ enum tr_select tx_rx_select, u16 reason_code)
{
union delba_param_set del_ba_param_set;
struct sk_buff *skb = NULL;
@@ -139,7 +139,7 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
memset(&del_ba_param_set, 0, 2);
- del_ba_param_set.field.initiator = (TxRxSelect == TX_DIR) ? 1 : 0;
+ del_ba_param_set.field.initiator = (tx_rx_select == TX_DIR) ? 1 : 0;
del_ba_param_set.field.tid = ba->ba_param_set.field.tid;
skb = dev_alloc_skb(len + sizeof(struct ieee80211_hdr_3addr));
@@ -173,8 +173,8 @@ static struct sk_buff *rtllib_DELBA(struct rtllib_device *ieee, u8 *dst,
return skb;
}
-static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst,
- struct ba_record *ba)
+static void rtllib_send_add_ba_req(struct rtllib_device *ieee, u8 *dst,
+ struct ba_record *ba)
{
struct sk_buff *skb;
@@ -186,8 +186,8 @@ static void rtllib_send_ADDBAReq(struct rtllib_device *ieee, u8 *dst,
netdev_dbg(ieee->dev, "Failed to generate ADDBAReq packet.\n");
}
-static void rtllib_send_ADDBARsp(struct rtllib_device *ieee, u8 *dst,
- struct ba_record *ba, u16 status_code)
+static void rtllib_send_add_ba_rsp(struct rtllib_device *ieee, u8 *dst,
+ struct ba_record *ba, u16 status_code)
{
struct sk_buff *skb;
@@ -199,19 +199,19 @@ static void rtllib_send_ADDBARsp(struct rtllib_device *ieee, u8 *dst,
}
static void rtllib_send_DELBA(struct rtllib_device *ieee, u8 *dst,
- struct ba_record *ba, enum tr_select TxRxSelect,
+ struct ba_record *ba, enum tr_select tx_rx_select,
u16 reason_code)
{
struct sk_buff *skb;
- skb = rtllib_DELBA(ieee, dst, ba, TxRxSelect, reason_code);
+ skb = rtllib_DELBA(ieee, dst, ba, tx_rx_select, reason_code);
if (skb)
softmac_mgmt_xmit(skb, ieee);
else
netdev_dbg(ieee->dev, "Failed to generate DELBA packet.\n");
}
-int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
+int rtllib_rx_add_ba_req(struct rtllib_device *ieee, struct sk_buff *skb)
{
struct ieee80211_hdr_3addr *req = NULL;
u16 rc = 0;
@@ -251,13 +251,13 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
"Failed to reply on ADDBA_REQ as some capability is not ready(%d, %d)\n",
ieee->current_network.qos_data.active,
ieee->ht_info->current_ht_support);
- goto OnADDBAReq_Fail;
+ goto on_add_ba_req_fail;
}
if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
- (u8)(ba_param_set->field.tid), RX_DIR, true)) {
+ (u8)(ba_param_set->field.tid), RX_DIR, true)) {
rc = ADDBA_STATUS_REFUSED;
netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
- goto OnADDBAReq_Fail;
+ goto on_add_ba_req_fail;
}
ba = &ts->rx_admitted_ba_record;
@@ -265,10 +265,10 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
rc = ADDBA_STATUS_INVALID_PARAM;
netdev_warn(ieee->dev, "%s(): BA Policy is not correct\n",
__func__);
- goto OnADDBAReq_Fail;
+ goto on_add_ba_req_fail;
}
- rtllib_FlushRxTsPendingPkts(ieee, ts);
+ rtllib_flush_rx_ts_pending_pkts(ieee, ts);
deactivate_ba_entry(ieee, ba);
ba->dialog_token = *dialog_token;
@@ -276,18 +276,18 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
ba->ba_timeout_value = *ba_timeout_value;
ba->ba_start_seq_ctrl = *ba_start_seq_ctrl;
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev) ||
- (ieee->ht_info->iot_action & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT))
+ if (ieee->get_half_nmode_support_by_aps_handler(ieee->dev) ||
+ (ieee->ht_info->iot_action & HT_IOT_ACT_ALLOW_PEER_AGG_ONE_PKT))
ba->ba_param_set.field.buffer_size = 1;
else
ba->ba_param_set.field.buffer_size = 32;
activate_ba_entry(ba, 0);
- rtllib_send_ADDBARsp(ieee, dst, ba, ADDBA_STATUS_SUCCESS);
+ rtllib_send_add_ba_rsp(ieee, dst, ba, ADDBA_STATUS_SUCCESS);
return 0;
-OnADDBAReq_Fail:
+on_add_ba_req_fail:
{
struct ba_record BA;
@@ -295,12 +295,12 @@ OnADDBAReq_Fail:
BA.ba_timeout_value = *ba_timeout_value;
BA.dialog_token = *dialog_token;
BA.ba_param_set.field.ba_policy = BA_POLICY_IMMEDIATE;
- rtllib_send_ADDBARsp(ieee, dst, &BA, rc);
+ rtllib_send_add_ba_rsp(ieee, dst, &BA, rc);
return 0;
}
}
-int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
+int rtllib_rx_add_ba_rsp(struct rtllib_device *ieee, struct sk_buff *skb)
{
struct ieee80211_hdr_3addr *rsp = NULL;
struct ba_record *pending_ba, *admitted_ba;
@@ -334,14 +334,14 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
ieee->ht_info->current_ht_support,
ieee->ht_info->current_ampdu_enable);
reason_code = DELBA_REASON_UNKNOWN_BA;
- goto OnADDBARsp_Reject;
+ goto on_add_ba_rsp_reject;
}
if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
- (u8)(ba_param_set->field.tid), TX_DIR, false)) {
+ (u8)(ba_param_set->field.tid), TX_DIR, false)) {
netdev_warn(ieee->dev, "%s(): can't get TS\n", __func__);
reason_code = DELBA_REASON_UNKNOWN_BA;
- goto OnADDBARsp_Reject;
+ goto on_add_ba_rsp_reject;
}
ts->add_ba_req_in_progress = false;
@@ -358,7 +358,7 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
"%s(): ADDBA Rsp. BA invalid, DELBA!\n",
__func__);
reason_code = DELBA_REASON_UNKNOWN_BA;
- goto OnADDBARsp_Reject;
+ goto on_add_ba_rsp_reject;
} else {
netdev_dbg(ieee->dev,
"%s(): Recv ADDBA Rsp. BA is admitted! Status code:%X\n",
@@ -371,7 +371,7 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
ts->add_ba_req_delayed = true;
deactivate_ba_entry(ieee, admitted_ba);
reason_code = DELBA_REASON_END_BA;
- goto OnADDBARsp_Reject;
+ goto on_add_ba_rsp_reject;
}
admitted_ba->dialog_token = *dialog_token;
@@ -384,12 +384,12 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
ts->add_ba_req_delayed = true;
ts->disable_add_ba = true;
reason_code = DELBA_REASON_END_BA;
- goto OnADDBARsp_Reject;
+ goto on_add_ba_rsp_reject;
}
return 0;
-OnADDBARsp_Reject:
+on_add_ba_rsp_reject:
{
struct ba_record BA;
@@ -433,7 +433,7 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
struct rx_ts_record *ts;
if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
- (u8)del_ba_param_set->field.tid, RX_DIR, false)) {
+ (u8)del_ba_param_set->field.tid, RX_DIR, false)) {
netdev_warn(ieee->dev,
"%s(): can't get TS for RXTS. dst:%pM TID:%d\n",
__func__, dst,
@@ -446,7 +446,7 @@ int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb)
struct tx_ts_record *ts;
if (!rtllib_get_ts(ieee, (struct ts_common_info **)&ts, dst,
- (u8)del_ba_param_set->field.tid, TX_DIR, false)) {
+ (u8)del_ba_param_set->field.tid, TX_DIR, false)) {
netdev_warn(ieee->dev, "%s(): can't get TS for TXTS\n",
__func__);
return -1;
@@ -481,14 +481,14 @@ void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *ts,
activate_ba_entry(ba, BA_SETUP_TIMEOUT);
- rtllib_send_ADDBAReq(ieee, ts->ts_common_info.addr, ba);
+ rtllib_send_add_ba_req(ieee, ts->ts_common_info.addr, ba);
}
void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
struct ts_common_info *ts_common_info,
- enum tr_select TxRxSelect)
+ enum tr_select tx_rx_select)
{
- if (TxRxSelect == TX_DIR) {
+ if (tx_rx_select == TX_DIR) {
struct tx_ts_record *ts =
(struct tx_ts_record *)ts_common_info;
@@ -497,14 +497,14 @@ void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
(ts->tx_admitted_ba_record.b_valid) ?
(&ts->tx_admitted_ba_record) :
(&ts->tx_pending_ba_record),
- TxRxSelect, DELBA_REASON_END_BA);
- } else if (TxRxSelect == RX_DIR) {
+ tx_rx_select, DELBA_REASON_END_BA);
+ } else if (tx_rx_select == RX_DIR) {
struct rx_ts_record *ts =
(struct rx_ts_record *)ts_common_info;
if (rx_ts_delete_ba(ieee, ts))
rtllib_send_DELBA(ieee, ts_common_info->addr,
&ts->rx_admitted_ba_record,
- TxRxSelect, DELBA_REASON_END_BA);
+ tx_rx_select, DELBA_REASON_END_BA);
}
}
diff --git a/drivers/staging/rtl8192e/rtl819x_HT.h b/drivers/staging/rtl8192e/rtl819x_HT.h
index 68577bffb936..a4580445305d 100644
--- a/drivers/staging/rtl8192e/rtl819x_HT.h
+++ b/drivers/staging/rtl8192e/rtl819x_HT.h
@@ -98,9 +98,9 @@ struct rt_hi_throughput {
u8 cur_short_gi_40mhz;
u8 cur_short_gi_20mhz;
enum ht_spec_ver peer_ht_spec_ver;
- struct ht_capab_ele SelfHTCap;
- u8 PeerHTCapBuf[32];
- u8 PeerHTInfoBuf[32];
+ struct ht_capab_ele self_ht_cap;
+ u8 peer_ht_cap_buf[32];
+ u8 peer_ht_info_buf[32];
u8 ampdu_enable;
u8 current_ampdu_enable;
u8 ampdu_factor;
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index 6d0912f90198..fa96a2c2c916 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -252,7 +252,7 @@ void ht_construct_capability_element(struct rtllib_device *ieee, u8 *pos_ht_cap,
}
cap_ele->AdvCoding = 0;
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
+ if (ieee->get_half_nmode_support_by_aps_handler(ieee->dev))
cap_ele->ChlWidth = 0;
else
cap_ele->ChlWidth = 1;
@@ -301,7 +301,7 @@ void ht_construct_capability_element(struct rtllib_device *ieee, u8 *pos_ht_cap,
if (ht->iot_action & HT_IOT_ACT_DISABLE_RX_40MHZ_SHORT_GI)
cap_ele->ShortGI40Mhz = 0;
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) {
+ if (ieee->get_half_nmode_support_by_aps_handler(ieee->dev)) {
cap_ele->ChlWidth = 0;
cap_ele->MCS[1] = 0;
}
@@ -408,7 +408,7 @@ static u8 ht_filter_mcs_rate(struct rtllib_device *ieee, u8 *pSupportMCS,
ht_pick_mcs_rate(ieee, pOperateMCS);
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
+ if (ieee->get_half_nmode_support_by_aps_handler(ieee->dev))
pOperateMCS[1] = 0;
for (i = 2; i <= 15; i++)
@@ -437,16 +437,16 @@ void ht_on_assoc_rsp(struct rtllib_device *ieee)
}
netdev_dbg(ieee->dev, "%s(): HT_ENABLE\n", __func__);
- if (!memcmp(ht_info->PeerHTCapBuf, EWC11NHTCap, sizeof(EWC11NHTCap)))
- pPeerHTCap = (struct ht_capab_ele *)(&ht_info->PeerHTCapBuf[4]);
+ if (!memcmp(ht_info->peer_ht_cap_buf, EWC11NHTCap, sizeof(EWC11NHTCap)))
+ pPeerHTCap = (struct ht_capab_ele *)(&ht_info->peer_ht_cap_buf[4]);
else
- pPeerHTCap = (struct ht_capab_ele *)(ht_info->PeerHTCapBuf);
+ pPeerHTCap = (struct ht_capab_ele *)(ht_info->peer_ht_cap_buf);
- if (!memcmp(ht_info->PeerHTInfoBuf, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
+ if (!memcmp(ht_info->peer_ht_info_buf, EWC11NHTInfo, sizeof(EWC11NHTInfo)))
pPeerHTInfo = (struct ht_info_ele *)
- (&ht_info->PeerHTInfoBuf[4]);
+ (&ht_info->peer_ht_info_buf[4]);
else
- pPeerHTInfo = (struct ht_info_ele *)(ht_info->PeerHTInfoBuf);
+ pPeerHTInfo = (struct ht_info_ele *)(ht_info->peer_ht_info_buf);
#ifdef VERBOSE_DEBUG
print_hex_dump_bytes("%s: ", __func__, DUMP_PREFIX_NONE,
@@ -480,9 +480,9 @@ void ht_on_assoc_rsp(struct rtllib_device *ieee)
}
ht_info->current_mpdu_density = pPeerHTCap->MPDUDensity;
- if (ht_info->iot_action & HT_IOT_ACT_TX_USE_AMSDU_8K) {
+ if (ht_info->iot_action & HT_IOT_ACT_TX_USE_AMSDU_8K)
ht_info->current_ampdu_enable = false;
- }
+
ht_info->cur_rx_reorder_enable = 1;
if (pPeerHTCap->MCS[0] == 0)
@@ -516,12 +516,12 @@ void ht_initialize_ht_info(struct rtllib_device *ieee)
ht_info->current_mpdu_density = 0;
ht_info->CurrentAMPDUFactor = ht_info->ampdu_factor;
- memset((void *)(&ht_info->SelfHTCap), 0,
- sizeof(ht_info->SelfHTCap));
- memset((void *)(&ht_info->PeerHTCapBuf), 0,
- sizeof(ht_info->PeerHTCapBuf));
- memset((void *)(&ht_info->PeerHTInfoBuf), 0,
- sizeof(ht_info->PeerHTInfoBuf));
+ memset((void *)(&ht_info->self_ht_cap), 0,
+ sizeof(ht_info->self_ht_cap));
+ memset((void *)(&ht_info->peer_ht_cap_buf), 0,
+ sizeof(ht_info->peer_ht_cap_buf));
+ memset((void *)(&ht_info->peer_ht_info_buf), 0,
+ sizeof(ht_info->peer_ht_info_buf));
ht_info->sw_bw_in_progress = false;
@@ -572,15 +572,15 @@ void ht_reset_self_and_save_peer_setting(struct rtllib_device *ieee,
ht_info->peer_ht_spec_ver = pNetwork->bssht.bd_ht_spec_ver;
if (pNetwork->bssht.bd_ht_cap_len > 0 &&
- pNetwork->bssht.bd_ht_cap_len <= sizeof(ht_info->PeerHTCapBuf))
- memcpy(ht_info->PeerHTCapBuf,
+ pNetwork->bssht.bd_ht_cap_len <= sizeof(ht_info->peer_ht_cap_buf))
+ memcpy(ht_info->peer_ht_cap_buf,
pNetwork->bssht.bd_ht_cap_buf,
pNetwork->bssht.bd_ht_cap_len);
if (pNetwork->bssht.bd_ht_info_len > 0 &&
pNetwork->bssht.bd_ht_info_len <=
- sizeof(ht_info->PeerHTInfoBuf))
- memcpy(ht_info->PeerHTInfoBuf,
+ sizeof(ht_info->peer_ht_info_buf))
+ memcpy(ht_info->peer_ht_info_buf,
pNetwork->bssht.bd_ht_info_buf,
pNetwork->bssht.bd_ht_info_len);
@@ -666,7 +666,7 @@ void ht_set_connect_bw_mode(struct rtllib_device *ieee,
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
- if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev))
+ if (ieee->get_half_nmode_support_by_aps_handler(ieee->dev))
bandwidth = HT_CHANNEL_WIDTH_20;
if (ht_info->sw_bw_in_progress) {
diff --git a/drivers/staging/rtl8192e/rtl819x_Qos.h b/drivers/staging/rtl8192e/rtl819x_Qos.h
index 50e01ca49a4c..dc991100742f 100644
--- a/drivers/staging/rtl8192e/rtl819x_Qos.h
+++ b/drivers/staging/rtl8192e/rtl819x_Qos.h
@@ -13,7 +13,7 @@ struct qos_tsinfo {
};
struct octet_string {
- u8 *Octet;
+ u8 *octet;
u16 Length;
};
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 7e73d31dcccf..9903fe3f3c77 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -171,14 +171,14 @@ void rtllib_ts_init(struct rtllib_device *ieee)
static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
u8 *addr, u8 TID,
- enum tr_select TxRxSelect)
+ enum tr_select tx_rx_select)
{
u8 dir;
bool search_dir[4] = {0};
struct list_head *psearch_list;
struct ts_common_info *pRet = NULL;
- if (TxRxSelect == TX_DIR) {
+ if (tx_rx_select == TX_DIR) {
search_dir[DIR_UP] = true;
search_dir[DIR_BI_DIR] = true;
search_dir[DIR_DIRECT] = true;
@@ -188,7 +188,7 @@ static struct ts_common_info *SearchAdmitTRStream(struct rtllib_device *ieee,
search_dir[DIR_DIRECT] = true;
}
- if (TxRxSelect == TX_DIR)
+ if (tx_rx_select == TX_DIR)
psearch_list = &ieee->Tx_TS_Admit_List;
else
psearch_list = &ieee->Rx_TS_Admit_List;
@@ -225,7 +225,7 @@ static void MakeTSEntry(struct ts_common_info *ts_common_info, u8 *addr,
}
bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
- u8 *addr, u8 TID, enum tr_select TxRxSelect, bool bAddNewTs)
+ u8 *addr, u8 TID, enum tr_select tx_rx_select, bool bAddNewTs)
{
u8 UP = 0;
struct qos_tsinfo tspec;
@@ -265,7 +265,7 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
}
}
- *ppTS = SearchAdmitTRStream(ieee, addr, UP, TxRxSelect);
+ *ppTS = SearchAdmitTRStream(ieee, addr, UP, tx_rx_select);
if (*ppTS)
return true;
@@ -274,21 +274,21 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
return false;
}
- pUnusedList = (TxRxSelect == TX_DIR) ?
+ pUnusedList = (tx_rx_select == TX_DIR) ?
(&ieee->Tx_TS_Unused_List) :
(&ieee->Rx_TS_Unused_List);
- pAddmitList = (TxRxSelect == TX_DIR) ?
+ pAddmitList = (tx_rx_select == TX_DIR) ?
(&ieee->Tx_TS_Admit_List) :
(&ieee->Rx_TS_Admit_List);
- Dir = ((TxRxSelect == TX_DIR) ? DIR_UP : DIR_DOWN);
+ Dir = ((tx_rx_select == TX_DIR) ? DIR_UP : DIR_DOWN);
if (!list_empty(pUnusedList)) {
(*ppTS) = list_entry(pUnusedList->next,
struct ts_common_info, list);
list_del_init(&(*ppTS)->list);
- if (TxRxSelect == TX_DIR) {
+ if (tx_rx_select == TX_DIR) {
struct tx_ts_record *tmp =
container_of(*ppTS,
struct tx_ts_record,
@@ -321,11 +321,11 @@ bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS,
}
static void RemoveTsEntry(struct rtllib_device *ieee,
- struct ts_common_info *pTs, enum tr_select TxRxSelect)
+ struct ts_common_info *pTs, enum tr_select tx_rx_select)
{
- rtllib_ts_init_del_ba(ieee, pTs, TxRxSelect);
+ rtllib_ts_init_del_ba(ieee, pTs, tx_rx_select);
- if (TxRxSelect == RX_DIR) {
+ if (tx_rx_select == RX_DIR) {
struct rx_reorder_entry *pRxReorderEntry;
struct rx_ts_record *ts = (struct rx_ts_record *)pTs;
@@ -360,7 +360,7 @@ static void RemoveTsEntry(struct rtllib_device *ieee,
}
}
-void RemovePeerTS(struct rtllib_device *ieee, u8 *addr)
+void remove_peer_ts(struct rtllib_device *ieee, u8 *addr)
{
struct ts_common_info *ts, *pTmpTS;
@@ -400,9 +400,9 @@ void RemovePeerTS(struct rtllib_device *ieee, u8 *addr)
}
}
}
-EXPORT_SYMBOL(RemovePeerTS);
+EXPORT_SYMBOL(remove_peer_ts);
-void RemoveAllTS(struct rtllib_device *ieee)
+void remove_all_ts(struct rtllib_device *ieee)
{
struct ts_common_info *ts, *pTmpTS;
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 7b39a1987fdd..6fbf11ac168f 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -123,7 +123,7 @@ struct cb_desc {
u8 bPacketBW:1;
u8 bRTSUseShortPreamble:1;
u8 bRTSUseShortGI:1;
- u8 bMulticast:1;
+ u8 multicast:1;
u8 bBroadcast:1;
u8 drv_agg_enable:1;
u8 reserved2:1;
@@ -474,47 +474,30 @@ struct rtllib_rx_stats {
u8 control;
u8 mask;
u16 len;
- u64 tsf;
- u32 beacon_time;
- u8 nic_type;
u16 Length;
u8 SignalQuality;
s32 RecvSignalPower;
- s8 RxPower;
u8 SignalStrength;
u16 bHwError:1;
u16 bCRC:1;
u16 bICV:1;
- u16 bShortPreamble:1;
- u16 Antenna:1;
u16 Decrypted:1;
- u16 Wakeup:1;
- u16 Reserved0:1;
- u8 AGC;
u32 TimeStampLow;
u32 TimeStampHigh;
- bool bShift;
- bool bIsQosData;
u8 RxDrvInfoSize;
u8 RxBufShift;
bool bIsAMPDU;
bool bFirstMPDU;
bool bContainHTC;
- bool RxIs40MHzPacket;
u32 RxPWDBAll;
u8 RxMIMOSignalStrength[4];
s8 RxMIMOSignalQuality[2];
bool bPacketMatchBSSID;
bool bIsCCK;
bool bPacketToSelf;
- u16 packetlength;
- u16 fraglength;
- u16 fragoffset;
- u16 ntotalfrag;
bool bPacketBeacon;
bool bToSelfBA;
- u16 Seq_Num;
};
/* IEEE 802.11 requires that STA supports concurrent reception of at least
@@ -928,14 +911,14 @@ struct rtllib_network {
struct rtllib_qos_data qos_data;
bool bWithAironetIE;
- bool bCkipSupported;
- bool bCcxRmEnable;
+ bool ckip_supported;
+ bool ccx_rm_enable;
u8 CcxRmState[2];
bool bMBssidValid;
u8 MBssidMask;
u8 MBssid[ETH_ALEN];
bool bWithCcxVerNum;
- u8 BssCcxVerNumber;
+ u8 bss_ccx_ver_number;
/* These are network statistics */
struct rtllib_rx_stats stats;
u16 capability;
@@ -965,7 +948,7 @@ struct rtllib_network {
u8 wmm_info;
struct rtllib_wmm_ac_param wmm_param[4];
- u8 Turbo_Enable;
+ u8 turbo_enable;
u16 CountryIeLen;
u8 CountryIeBuf[MAX_IE_LEN];
struct bss_ht bssht;
@@ -1048,9 +1031,9 @@ struct rx_reorder_entry {
};
enum fsync_state {
- Default_Fsync,
- HW_Fsync,
- SW_Fsync
+ DEFAULT_FSYNC,
+ HW_FSYNC,
+ SW_FSYNC
};
enum ips_callback_function {
@@ -1071,8 +1054,8 @@ struct rt_pwr_save_ctrl {
enum ips_callback_function ReturnPoint;
bool bLeisurePs;
- u8 LpsIdleCount;
- u8 LPSAwakeIntvl;
+ u8 lps_idle_count;
+ u8 lps_awake_intvl;
u32 CurPsLevel;
};
@@ -1110,18 +1093,18 @@ enum scan_op_backup_opt {
#define RT_MAX_LD_SLOT_NUM 10
struct rt_link_detect {
- u32 NumRecvBcnInPeriod;
- u32 NumRecvDataInPeriod;
+ u32 num_recv_bcn_in_period;
+ u32 num_recv_data_in_period;
u32 RxBcnNum[RT_MAX_LD_SLOT_NUM];
u32 RxDataNum[RT_MAX_LD_SLOT_NUM];
- u16 SlotNum;
- u16 SlotIndex;
+ u16 slot_num;
+ u16 slot_index;
u32 num_tx_ok_in_period;
u32 num_rx_ok_in_period;
- u32 NumRxUnicastOkInPeriod;
- bool bBusyTraffic;
+ u32 num_rx_unicast_ok_in_period;
+ bool busy_traffic;
bool bHigherBusyTraffic;
bool bHigherBusyRxTraffic;
};
@@ -1161,7 +1144,7 @@ struct rate_adaptive {
#define NUM_PMKID_CACHE 16
struct rt_pmkid_list {
- u8 Bssid[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
u8 PMKID[16];
u8 SsidBuf[33];
u8 used;
@@ -1193,7 +1176,7 @@ struct rtllib_device {
u8 *assocreq_ies, *assocresp_ies;
size_t assocreq_ies_len, assocresp_ies_len;
- bool bForcedBgMode;
+ bool forced_bg_mode;
u8 hwsec_active;
bool is_roaming;
@@ -1201,7 +1184,7 @@ struct rtllib_device {
bool cannot_notify;
bool bSupportRemoteWakeUp;
bool actscanning;
- bool FirstIe_InScan;
+ bool first_ie_in_scan;
bool be_scan_inprogress;
bool beinretry;
enum rt_rf_power_state rf_power_state;
@@ -1264,7 +1247,7 @@ struct rtllib_device {
int ieee802_1x; /* is IEEE 802.1X used */
/* WPA data */
- bool bHalfWirelessN24GMode;
+ bool half_wireless_n24g_mode;
int wpa_enabled;
int drop_unencrypted;
int tkip_countermeasures;
@@ -1281,7 +1264,7 @@ struct rtllib_device {
struct sw_cam_table swcamtable[TOTAL_CAM_ENTRY];
- struct rt_pmkid_list PMKIDList[NUM_PMKID_CACHE];
+ struct rt_pmkid_list pmkid_list[NUM_PMKID_CACHE];
/* Fragmentation structures */
struct rtllib_frag_entry frag_cache[17][RTLLIB_FRAG_CACHE_LEN];
@@ -1374,14 +1357,14 @@ struct rtllib_device {
/* for PS mode */
unsigned long last_rx_ps_time;
- bool bAwakePktSent;
- u8 LPSDelayCnt;
+ bool awake_pkt_sent;
+ u8 lps_delay_cnt;
/* used if IEEE_SOFTMAC_SINGLE_QUEUE is set */
struct sk_buff *mgmt_queue_ring[MGMT_QUEUE_NUM];
int mgmt_queue_head;
int mgmt_queue_tail;
- u8 AsocRetryCount;
+ u8 asoc_retry_count;
struct sk_buff_head skb_waitq[MAX_QUEUE_SIZE];
bool bdynamic_txpower_enable;
@@ -1484,17 +1467,18 @@ struct rtllib_device {
void (*set_bw_mode_handler)(struct net_device *dev,
enum ht_channel_width bandwidth,
enum ht_extchnl_offset Offset);
- bool (*GetNmodeSupportBySecCfg)(struct net_device *dev);
+ bool (*get_nmode_support_by_sec_cfg)(struct net_device *dev);
void (*set_wireless_mode)(struct net_device *dev, u8 wireless_mode);
- bool (*GetHalfNmodeSupportByAPsHandler)(struct net_device *dev);
+ bool (*get_half_nmode_support_by_aps_handler)(struct net_device *dev);
u8 (*rtllib_ap_sec_type)(struct rtllib_device *ieee);
void (*init_gain_handler)(struct net_device *dev, u8 Operation);
void (*ScanOperationBackupHandler)(struct net_device *dev,
u8 Operation);
- void (*SetHwRegHandler)(struct net_device *dev, u8 variable, u8 *val);
+ void (*set_hw_reg_handler)(struct net_device *dev, u8 variable, u8 *val);
- void (*AllowAllDestAddrHandler)(struct net_device *dev,
- bool bAllowAllDA, bool WriteIntoReg);
+ void (*allow_all_dest_addr_handler)(struct net_device *dev,
+ bool bAllowAllDA,
+ bool WriteIntoReg);
void (*rtllib_ips_leave_wq)(struct net_device *dev);
void (*rtllib_ips_leave)(struct net_device *dev);
@@ -1662,7 +1646,7 @@ int rtllib_rx_frame_softmac(struct rtllib_device *ieee, struct sk_buff *skb,
void rtllib_softmac_new_net(struct rtllib_device *ieee,
struct rtllib_network *net);
-void SendDisassociation(struct rtllib_device *ieee, bool deauth, u16 asRsn);
+void send_disassociation(struct rtllib_device *ieee, bool deauth, u16 rsn);
void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee);
int rtllib_softmac_init(struct rtllib_device *ieee);
@@ -1771,25 +1755,25 @@ u8 ht_c_check(struct rtllib_device *ieee, u8 *pFrame);
void ht_reset_iot_setting(struct rt_hi_throughput *ht_info);
bool is_ht_half_nmode_aps(struct rtllib_device *ieee);
u16 tx_count_to_data_rate(struct rtllib_device *ieee, u8 nDataRate);
-int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb);
-int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb);
+int rtllib_rx_add_ba_req(struct rtllib_device *ieee, struct sk_buff *skb);
+int rtllib_rx_add_ba_rsp(struct rtllib_device *ieee, struct sk_buff *skb);
int rtllib_rx_DELBA(struct rtllib_device *ieee, struct sk_buff *skb);
void rtllib_ts_init_add_ba(struct rtllib_device *ieee, struct tx_ts_record *ts,
u8 policy, u8 overwrite_pending);
void rtllib_ts_init_del_ba(struct rtllib_device *ieee,
struct ts_common_info *ts_common_info,
- enum tr_select TxRxSelect);
+ enum tr_select tx_rx_select);
void rtllib_ba_setup_timeout(struct timer_list *t);
void rtllib_tx_ba_inact_timeout(struct timer_list *t);
void rtllib_rx_ba_inact_timeout(struct timer_list *t);
void rtllib_reset_ba_entry(struct ba_record *ba);
bool rtllib_get_ts(struct rtllib_device *ieee, struct ts_common_info **ppTS, u8 *addr,
- u8 TID, enum tr_select TxRxSelect, bool bAddNewTs);
+ u8 TID, enum tr_select tx_rx_select, bool bAddNewTs);
void rtllib_ts_init(struct rtllib_device *ieee);
void TsStartAddBaProcess(struct rtllib_device *ieee,
struct tx_ts_record *pTxTS);
-void RemovePeerTS(struct rtllib_device *ieee, u8 *addr);
-void RemoveAllTS(struct rtllib_device *ieee);
+void remove_peer_ts(struct rtllib_device *ieee, u8 *addr);
+void remove_all_ts(struct rtllib_device *ieee);
static inline const char *escape_essid(const char *essid, u8 essid_len)
{
@@ -1805,13 +1789,13 @@ static inline const char *escape_essid(const char *essid, u8 essid_len)
}
/* fun with the built-in rtllib stack... */
-bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn);
+bool rtllib_mgnt_disconnect(struct rtllib_device *rtllib, u8 rsn);
/* For the function is more related to hardware setting, it's better to use the
* ieee handler to refer to it.
*/
-void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
- struct rx_ts_record *ts);
+void rtllib_flush_rx_ts_pending_pkts(struct rtllib_device *ieee,
+ struct rx_ts_record *ts);
int rtllib_parse_info_param(struct rtllib_device *ieee,
struct rtllib_info_element *info_element,
u16 length,
@@ -1821,6 +1805,6 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
void rtllib_indicate_packets(struct rtllib_device *ieee,
struct rtllib_rxb **prxbIndicateArray, u8 index);
#define RT_ASOC_RETRY_LIMIT 5
-u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee);
+u8 mgnt_query_tx_rate_exclude_cck_rates(struct rtllib_device *ieee);
#endif /* RTLLIB_H */
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 4df20f4d6bf9..ebf8a2fd36d3 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -487,8 +487,8 @@ void rtllib_indicate_packets(struct rtllib_device *ieee,
}
}
-void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee,
- struct rx_ts_record *ts)
+void rtllib_flush_rx_ts_pending_pkts(struct rtllib_device *ieee,
+ struct rx_ts_record *ts)
{
struct rx_reorder_entry *pRxReorderEntry;
u8 RfdCnt = 0;
@@ -865,9 +865,6 @@ static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee,
rx_stats->bContainHTC = true;
}
- if (RTLLIB_QOS_HAS_SEQ(fc))
- rx_stats->bIsQosData = true;
-
return hdrlen;
}
@@ -943,10 +940,9 @@ static void rtllib_rx_extract_addr(struct rtllib_device *ieee,
static int rtllib_rx_data_filter(struct rtllib_device *ieee, struct ieee80211_hdr *hdr,
u8 *dst, u8 *src, u8 *bssid, u8 *addr2)
{
- u8 type, stype;
u16 fc = le16_to_cpu(hdr->frame_control);
- type = WLAN_FC_GET_TYPE(fc);
- stype = WLAN_FC_GET_STYPE(fc);
+ u8 type = WLAN_FC_GET_TYPE(fc);
+ u8 stype = WLAN_FC_GET_STYPE(fc);
/* Filter frames from different BSS */
if (ieee80211_has_a4(hdr->frame_control) &&
@@ -1149,9 +1145,9 @@ static void rtllib_rx_check_leave_lps(struct rtllib_device *ieee, u8 unicast,
{
if (unicast) {
if (ieee->link_state == MAC80211_LINKED) {
- if (((ieee->link_detect_info.NumRxUnicastOkInPeriod +
+ if (((ieee->link_detect_info.num_rx_unicast_ok_in_period +
ieee->link_detect_info.num_tx_ok_in_period) > 8) ||
- (ieee->link_detect_info.NumRxUnicastOkInPeriod > 2)) {
+ (ieee->link_detect_info.num_rx_unicast_ok_in_period > 2)) {
ieee->leisure_ps_leave(ieee->dev);
}
}
@@ -1284,7 +1280,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
/* Filter WAPI DATA Frame */
/* Update statstics for AP roaming */
- ieee->link_detect_info.NumRecvDataInPeriod++;
+ ieee->link_detect_info.num_recv_data_in_period++;
ieee->link_detect_info.num_rx_ok_in_period++;
/* Data frame - extract src/dst addresses */
@@ -1363,7 +1359,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
else
nr_subframes = 1;
if (unicast)
- ieee->link_detect_info.NumRxUnicastOkInPeriod += nr_subframes;
+ ieee->link_detect_info.num_rx_unicast_ok_in_period += nr_subframes;
rtllib_rx_check_leave_lps(ieee, unicast, nr_subframes);
/* Indicate packets to upper layer or Rx Reorder */
@@ -1689,7 +1685,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
info_element->data[2] == 0x4c &&
info_element->data[3] == 0x01 &&
info_element->data[4] == 0x02)
- network->Turbo_Enable = 1;
+ network->turbo_enable = 1;
if (*tmp_htcap_len == 0) {
if (info_element->len >= 4 &&
@@ -1819,9 +1815,9 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
if (info_element->len == 6) {
memcpy(network->CcxRmState, &info_element->data[4], 2);
if (network->CcxRmState[0] != 0)
- network->bCcxRmEnable = true;
+ network->ccx_rm_enable = true;
else
- network->bCcxRmEnable = false;
+ network->ccx_rm_enable = false;
network->MBssidMask = network->CcxRmState[1] & 0x07;
if (network->MBssidMask != 0) {
network->bMBssidValid = true;
@@ -1834,7 +1830,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
network->bMBssidValid = false;
}
} else {
- network->bCcxRmEnable = false;
+ network->ccx_rm_enable = false;
}
}
if (info_element->len > 4 &&
@@ -1844,10 +1840,10 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
info_element->data[3] == 0x03) {
if (info_element->len == 5) {
network->bWithCcxVerNum = true;
- network->BssCcxVerNumber = info_element->data[4];
+ network->bss_ccx_ver_number = info_element->data[4];
} else {
network->bWithCcxVerNum = false;
- network->BssCcxVerNumber = 0;
+ network->bss_ccx_ver_number = 0;
}
}
if (info_element->len > 4 &&
@@ -2100,12 +2096,12 @@ int rtllib_parse_info_param(struct rtllib_device *ieee,
& SUPPORT_CKIP_MIC) ||
(info_element->data[IE_CISCO_FLAG_POSITION]
& SUPPORT_CKIP_PK))
- network->bCkipSupported = true;
+ network->ckip_supported = true;
else
- network->bCkipSupported = false;
+ network->ckip_supported = false;
} else {
network->bWithAironetIE = false;
- network->bCkipSupported = false;
+ network->ckip_supported = false;
}
break;
case MFIE_TYPE_QOS_PARAMETER:
@@ -2184,7 +2180,7 @@ static inline int rtllib_network_init(
network->realtek_cap_exit = false;
network->marvell_cap_exist = false;
network->airgo_cap_exist = false;
- network->Turbo_Enable = 0;
+ network->turbo_enable = 0;
network->SignalStrength = stats->SignalStrength;
network->RSSI = stats->SignalStrength;
network->CountryIeLen = 0;
@@ -2344,20 +2340,20 @@ static inline void update_network(struct rtllib_device *ieee,
dst->SignalStrength = src->SignalStrength;
dst->RSSI = src->RSSI;
- dst->Turbo_Enable = src->Turbo_Enable;
+ dst->turbo_enable = src->turbo_enable;
dst->CountryIeLen = src->CountryIeLen;
memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen);
dst->bWithAironetIE = src->bWithAironetIE;
- dst->bCkipSupported = src->bCkipSupported;
+ dst->ckip_supported = src->ckip_supported;
memcpy(dst->CcxRmState, src->CcxRmState, 2);
- dst->bCcxRmEnable = src->bCcxRmEnable;
+ dst->ccx_rm_enable = src->ccx_rm_enable;
dst->MBssidMask = src->MBssidMask;
dst->bMBssidValid = src->bMBssidValid;
memcpy(dst->MBssid, src->MBssid, 6);
dst->bWithCcxVerNum = src->bWithCcxVerNum;
- dst->BssCcxVerNumber = src->BssCcxVerNumber;
+ dst->bss_ccx_ver_number = src->bss_ccx_ver_number;
}
static int IsPassiveChannel(struct rtllib_device *rtllib, u8 channel)
@@ -2470,7 +2466,7 @@ static inline void rtllib_process_probe_response(
}
if (ieee80211_is_beacon(frame_ctl)) {
if (ieee->link_state >= MAC80211_LINKED)
- ieee->link_detect_info.NumRecvBcnInPeriod++;
+ ieee->link_detect_info.num_recv_bcn_in_period++;
}
}
list_for_each_entry(target, &ieee->network_list, list) {
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index b9278b26accd..97fdca828da7 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -138,7 +138,7 @@ static void init_mgmt_queue(struct rtllib_device *ieee)
ieee->mgmt_queue_head = 0;
}
-u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee)
+u8 mgnt_query_tx_rate_exclude_cck_rates(struct rtllib_device *ieee)
{
u16 i;
u8 query_rate = 0;
@@ -163,7 +163,7 @@ u8 MgntQuery_TxRateExcludeCCKRates(struct rtllib_device *ieee)
return query_rate;
}
-static u8 MgntQuery_MgntFrameTxRate(struct rtllib_device *ieee)
+static u8 mgnt_query_mgnt_frame_tx_rate(struct rtllib_device *ieee)
{
struct rt_hi_throughput *ht_info = ieee->ht_info;
u8 rate;
@@ -201,7 +201,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct rtllib_device *ieee)
if (ieee->disable_mgnt_queue)
tcb_desc->queue_index = HIGH_QUEUE;
- tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
+ tcb_desc->data_rate = mgnt_query_mgnt_frame_tx_rate(ieee);
tcb_desc->ratr_index = 7;
tcb_desc->tx_dis_rate_fallback = 1;
tcb_desc->tx_use_drv_assinged_rate = 1;
@@ -277,7 +277,7 @@ softmac_ps_mgmt_xmit(struct sk_buff *skb,
if (ieee->disable_mgnt_queue)
tcb_desc->queue_index = HIGH_QUEUE;
- tcb_desc->data_rate = MgntQuery_MgntFrameTxRate(ieee);
+ tcb_desc->data_rate = mgnt_query_mgnt_frame_tx_rate(ieee);
tcb_desc->ratr_index = 7;
tcb_desc->tx_dis_rate_fallback = 1;
tcb_desc->tx_use_drv_assinged_rate = 1;
@@ -355,20 +355,19 @@ void rtllib_enable_net_monitor_mode(struct net_device *dev,
netdev_info(dev, "========>Enter Monitor Mode\n");
- ieee->AllowAllDestAddrHandler(dev, true, !init_state);
+ ieee->allow_all_dest_addr_handler(dev, true, !init_state);
}
/* Disables network monitor mode. Only packets destinated to
* us will be received.
*/
-void rtllib_disable_net_monitor_mode(struct net_device *dev,
- bool init_state)
+void rtllib_disable_net_monitor_mode(struct net_device *dev, bool init_state)
{
struct rtllib_device *ieee = netdev_priv_rsl(dev);
netdev_info(dev, "========>Exit Monitor Mode\n");
- ieee->AllowAllDestAddrHandler(dev, false, !init_state);
+ ieee->allow_all_dest_addr_handler(dev, false, !init_state);
}
static void rtllib_send_probe(struct rtllib_device *ieee)
@@ -665,13 +664,13 @@ static struct sk_buff *rtllib_pspoll_func(struct rtllib_device *ieee)
return skb;
}
-static inline int SecIsInPMKIDList(struct rtllib_device *ieee, u8 *bssid)
+static inline int sec_is_in_pmkid_list(struct rtllib_device *ieee, u8 *bssid)
{
int i = 0;
do {
- if ((ieee->PMKIDList[i].used) &&
- (memcmp(ieee->PMKIDList[i].Bssid, bssid, ETH_ALEN) == 0))
+ if ((ieee->pmkid_list[i].used) &&
+ (memcmp(ieee->pmkid_list[i].bssid, bssid, ETH_ALEN) == 0))
break;
i++;
} while (i < NUM_PMKID_CACHE);
@@ -700,7 +699,7 @@ rtllib_association_req(struct rtllib_network *beacon,
unsigned int cxvernum_ie_len = 0;
struct lib80211_crypt_data *crypt;
int encrypt;
- int PMKCacheIdx;
+ int pmk_cache_idx;
unsigned int rate_len = (beacon->rates_len ?
(beacon->rates_len + 2) : 0) +
@@ -708,7 +707,7 @@ rtllib_association_req(struct rtllib_network *beacon,
2 : 0);
unsigned int wmm_info_len = beacon->qos_data.supported ? 9 : 0;
- unsigned int turbo_info_len = beacon->Turbo_Enable ? 9 : 0;
+ unsigned int turbo_info_len = beacon->turbo_enable ? 9 : 0;
int len = 0;
@@ -722,14 +721,14 @@ rtllib_association_req(struct rtllib_network *beacon,
if ((ieee->rtllib_ap_sec_type &&
(ieee->rtllib_ap_sec_type(ieee) & SEC_ALG_TKIP)) ||
- ieee->bForcedBgMode) {
+ ieee->forced_bg_mode) {
ieee->ht_info->enable_ht = 0;
ieee->mode = WIRELESS_MODE_G;
}
if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
- ht_cap_buf = (u8 *)&ieee->ht_info->SelfHTCap;
- ht_cap_len = sizeof(ieee->ht_info->SelfHTCap);
+ ht_cap_buf = (u8 *)&ieee->ht_info->self_ht_cap;
+ ht_cap_len = sizeof(ieee->ht_info->self_ht_cap);
ht_construct_capability_element(ieee, ht_cap_buf, &ht_cap_len,
encrypt, true);
if (ieee->ht_info->current_rt2rt_aggregation) {
@@ -741,15 +740,15 @@ rtllib_association_req(struct rtllib_network *beacon,
}
}
- if (beacon->bCkipSupported)
+ if (beacon->ckip_supported)
ckip_ie_len = 30 + 2;
- if (beacon->bCcxRmEnable)
+ if (beacon->ccx_rm_enable)
ccxrm_ie_len = 6 + 2;
- if (beacon->BssCcxVerNumber >= 2)
+ if (beacon->bss_ccx_ver_number >= 2)
cxvernum_ie_len = 5 + 2;
- PMKCacheIdx = SecIsInPMKIDList(ieee, ieee->current_network.bssid);
- if (PMKCacheIdx >= 0) {
+ pmk_cache_idx = sec_is_in_pmkid_list(ieee, ieee->current_network.bssid);
+ if (pmk_cache_idx >= 0) {
wpa_ie_len += 18;
netdev_info(ieee->dev, "[PMK cache]: WPA2 IE length: %x\n",
wpa_ie_len);
@@ -818,52 +817,52 @@ rtllib_association_req(struct rtllib_network *beacon,
*tag++ = beacon->rates_ex[i];
}
- if (beacon->bCkipSupported) {
- static const u8 AironetIeOui[] = {0x00, 0x01, 0x66};
- u8 CcxAironetBuf[30];
- struct octet_string osCcxAironetIE;
+ if (beacon->ckip_supported) {
+ static const u8 aironet_ie_oui[] = {0x00, 0x01, 0x66};
+ u8 ccx_aironet_buf[30];
+ struct octet_string os_ccx_aironet_ie;
- memset(CcxAironetBuf, 0, 30);
- osCcxAironetIE.Octet = CcxAironetBuf;
- osCcxAironetIE.Length = sizeof(CcxAironetBuf);
- memcpy(osCcxAironetIE.Octet, AironetIeOui,
- sizeof(AironetIeOui));
+ memset(ccx_aironet_buf, 0, 30);
+ os_ccx_aironet_ie.octet = ccx_aironet_buf;
+ os_ccx_aironet_ie.Length = sizeof(ccx_aironet_buf);
+ memcpy(os_ccx_aironet_ie.octet, aironet_ie_oui,
+ sizeof(aironet_ie_oui));
- osCcxAironetIE.Octet[IE_CISCO_FLAG_POSITION] |=
+ os_ccx_aironet_ie.octet[IE_CISCO_FLAG_POSITION] |=
(SUPPORT_CKIP_PK | SUPPORT_CKIP_MIC);
tag = skb_put(skb, ckip_ie_len);
*tag++ = MFIE_TYPE_AIRONET;
- *tag++ = osCcxAironetIE.Length;
- memcpy(tag, osCcxAironetIE.Octet, osCcxAironetIE.Length);
- tag += osCcxAironetIE.Length;
+ *tag++ = os_ccx_aironet_ie.Length;
+ memcpy(tag, os_ccx_aironet_ie.octet, os_ccx_aironet_ie.Length);
+ tag += os_ccx_aironet_ie.Length;
}
- if (beacon->bCcxRmEnable) {
- static const u8 CcxRmCapBuf[] = {0x00, 0x40, 0x96, 0x01, 0x01,
+ if (beacon->ccx_rm_enable) {
+ static const u8 ccx_rm_cap_buf[] = {0x00, 0x40, 0x96, 0x01, 0x01,
0x00};
- struct octet_string osCcxRmCap;
+ struct octet_string os_ccx_rm_cap;
- osCcxRmCap.Octet = (u8 *)CcxRmCapBuf;
- osCcxRmCap.Length = sizeof(CcxRmCapBuf);
+ os_ccx_rm_cap.octet = (u8 *)ccx_rm_cap_buf;
+ os_ccx_rm_cap.Length = sizeof(ccx_rm_cap_buf);
tag = skb_put(skb, ccxrm_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
- *tag++ = osCcxRmCap.Length;
- memcpy(tag, osCcxRmCap.Octet, osCcxRmCap.Length);
- tag += osCcxRmCap.Length;
+ *tag++ = os_ccx_rm_cap.Length;
+ memcpy(tag, os_ccx_rm_cap.octet, os_ccx_rm_cap.Length);
+ tag += os_ccx_rm_cap.Length;
}
- if (beacon->BssCcxVerNumber >= 2) {
- u8 CcxVerNumBuf[] = {0x00, 0x40, 0x96, 0x03, 0x00};
- struct octet_string osCcxVerNum;
+ if (beacon->bss_ccx_ver_number >= 2) {
+ u8 ccx_ver_num_buf[] = {0x00, 0x40, 0x96, 0x03, 0x00};
+ struct octet_string os_ccx_ver_num;
- CcxVerNumBuf[4] = beacon->BssCcxVerNumber;
- osCcxVerNum.Octet = CcxVerNumBuf;
- osCcxVerNum.Length = sizeof(CcxVerNumBuf);
+ ccx_ver_num_buf[4] = beacon->bss_ccx_ver_number;
+ os_ccx_ver_num.octet = ccx_ver_num_buf;
+ os_ccx_ver_num.Length = sizeof(ccx_ver_num_buf);
tag = skb_put(skb, cxvernum_ie_len);
*tag++ = MFIE_TYPE_GENERIC;
- *tag++ = osCcxVerNum.Length;
- memcpy(tag, osCcxVerNum.Octet, osCcxVerNum.Length);
- tag += osCcxVerNum.Length;
+ *tag++ = os_ccx_ver_num.Length;
+ memcpy(tag, os_ccx_ver_num.octet, os_ccx_ver_num.Length);
+ tag += os_ccx_ver_num.Length;
}
if (ieee->ht_info->current_ht_support && ieee->ht_info->enable_ht) {
if (ieee->ht_info->peer_ht_spec_ver != HT_SPEC_VER_EWC) {
@@ -878,11 +877,11 @@ rtllib_association_req(struct rtllib_network *beacon,
if (wpa_ie_len) {
skb_put_data(skb, ieee->wpa_ie, ieee->wpa_ie_len);
- if (PMKCacheIdx >= 0) {
+ if (pmk_cache_idx >= 0) {
tag = skb_put(skb, 18);
*tag = 1;
*(tag + 1) = 0;
- memcpy((tag + 2), &ieee->PMKIDList[PMKCacheIdx].PMKID,
+ memcpy((tag + 2), &ieee->pmkid_list[pmk_cache_idx].PMKID,
16);
}
}
@@ -1072,17 +1071,16 @@ static void rtllib_associate_complete_wq(void *data)
ieee->ht_info->enable_ht);
memset(ieee->dot11ht_oper_rate_set, 0, 16);
}
- ieee->link_detect_info.SlotNum = 2 * (1 +
+ ieee->link_detect_info.slot_num = 2 * (1 +
ieee->current_network.beacon_interval /
500);
- if (ieee->link_detect_info.NumRecvBcnInPeriod == 0 ||
- ieee->link_detect_info.NumRecvDataInPeriod == 0) {
- ieee->link_detect_info.NumRecvBcnInPeriod = 1;
- ieee->link_detect_info.NumRecvDataInPeriod = 1;
+ if (ieee->link_detect_info.num_recv_bcn_in_period == 0 ||
+ ieee->link_detect_info.num_recv_data_in_period == 0) {
+ ieee->link_detect_info.num_recv_bcn_in_period = 1;
+ ieee->link_detect_info.num_recv_data_in_period = 1;
}
- psc->LpsIdleCount = 0;
+ psc->lps_idle_count = 0;
ieee->link_change(ieee->dev);
-
}
static void rtllib_sta_send_associnfo(struct rtllib_device *ieee)
@@ -1209,18 +1207,18 @@ inline void rtllib_softmac_new_net(struct rtllib_device *ieee,
ieee->current_network.flags);
if ((rtllib_act_scanning(ieee, false)) &&
- !(ieee->softmac_features & IEEE_SOFTMAC_SCAN))
+ !(ieee->softmac_features & IEEE_SOFTMAC_SCAN))
rtllib_stop_scan_syncro(ieee);
ht_reset_iot_setting(ieee->ht_info);
ieee->wmm_acm = 0;
if (ieee->iw_mode == IW_MODE_INFRA) {
/* Join the network for the first time */
- ieee->AsocRetryCount = 0;
+ ieee->asoc_retry_count = 0;
if ((ieee->current_network.qos_data.supported == 1) &&
ieee->current_network.bssht.bd_support_ht)
ht_reset_self_and_save_peer_setting(ieee,
- &(ieee->current_network));
+ &ieee->current_network);
else
ieee->ht_info->current_ht_support = false;
@@ -1319,10 +1317,10 @@ static inline u16 assoc_parse(struct rtllib_device *ieee, struct sk_buff *skb,
status_code == WLAN_STATUS_CAPS_UNSUPPORTED) &&
((ieee->mode == WIRELESS_MODE_G) &&
(ieee->current_network.mode == WIRELESS_MODE_N_24G) &&
- (ieee->AsocRetryCount++ < (RT_ASOC_RETRY_LIMIT - 1)))) {
+ (ieee->asoc_retry_count++ < (RT_ASOC_RETRY_LIMIT - 1)))) {
ieee->ht_info->iot_action |= HT_IOT_ACT_PURE_N_MODE;
} else {
- ieee->AsocRetryCount = 0;
+ ieee->asoc_retry_count = 0;
}
return le16_to_cpu(response_head->status);
@@ -1351,8 +1349,8 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
u8 dtim;
struct rt_pwr_save_ctrl *psc = &ieee->pwr_save_ctrl;
- if (ieee->LPSDelayCnt) {
- ieee->LPSDelayCnt--;
+ if (ieee->lps_delay_cnt) {
+ ieee->lps_delay_cnt--;
return 0;
}
@@ -1378,45 +1376,45 @@ static short rtllib_sta_ps_sleep(struct rtllib_device *ieee, u64 *time)
return 0;
if (time) {
- if (ieee->bAwakePktSent) {
- psc->LPSAwakeIntvl = 1;
+ if (ieee->awake_pkt_sent) {
+ psc->lps_awake_intvl = 1;
} else {
- u8 MaxPeriod = 5;
+ u8 max_period = 5;
- if (psc->LPSAwakeIntvl == 0)
- psc->LPSAwakeIntvl = 1;
- psc->LPSAwakeIntvl = (psc->LPSAwakeIntvl >=
- MaxPeriod) ? MaxPeriod :
- (psc->LPSAwakeIntvl + 1);
+ if (psc->lps_awake_intvl == 0)
+ psc->lps_awake_intvl = 1;
+ psc->lps_awake_intvl = (psc->lps_awake_intvl >=
+ max_period) ? max_period :
+ (psc->lps_awake_intvl + 1);
}
{
- u8 LPSAwakeIntvl_tmp = 0;
+ u8 lps_awake_intvl_tmp = 0;
u8 period = ieee->current_network.dtim_period;
u8 count = ieee->current_network.tim.tim_count;
if (count == 0) {
- if (psc->LPSAwakeIntvl > period)
- LPSAwakeIntvl_tmp = period +
- (psc->LPSAwakeIntvl -
+ if (psc->lps_awake_intvl > period)
+ lps_awake_intvl_tmp = period +
+ (psc->lps_awake_intvl -
period) -
- ((psc->LPSAwakeIntvl - period) %
+ ((psc->lps_awake_intvl - period) %
period);
else
- LPSAwakeIntvl_tmp = psc->LPSAwakeIntvl;
+ lps_awake_intvl_tmp = psc->lps_awake_intvl;
} else {
- if (psc->LPSAwakeIntvl >
+ if (psc->lps_awake_intvl >
ieee->current_network.tim.tim_count)
- LPSAwakeIntvl_tmp = count +
- (psc->LPSAwakeIntvl - count) -
- ((psc->LPSAwakeIntvl - count) % period);
+ lps_awake_intvl_tmp = count +
+ (psc->lps_awake_intvl - count) -
+ ((psc->lps_awake_intvl - count) % period);
else
- LPSAwakeIntvl_tmp = psc->LPSAwakeIntvl;
+ lps_awake_intvl_tmp = psc->lps_awake_intvl;
}
*time = ieee->current_network.last_dtim_sta_time
+ msecs_to_jiffies(ieee->current_network.beacon_interval *
- LPSAwakeIntvl_tmp);
+ lps_awake_intvl_tmp);
}
}
@@ -1461,7 +1459,7 @@ static inline void rtllib_sta_ps(struct work_struct *work)
spin_unlock_irqrestore(&ieee->mgmt_tx_lock, flags2);
}
- ieee->bAwakePktSent = false;
+ ieee->awake_pkt_sent = false;
} else if (sleep == 2) {
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags2);
@@ -1553,10 +1551,10 @@ static void rtllib_process_action(struct rtllib_device *ieee,
case ACT_CAT_BA:
switch (*act) {
case ACT_ADDBAREQ:
- rtllib_rx_ADDBAReq(ieee, skb);
+ rtllib_rx_add_ba_req(ieee, skb);
break;
case ACT_ADDBARSP:
- rtllib_rx_ADDBARsp(ieee, skb);
+ rtllib_rx_add_ba_rsp(ieee, skb);
break;
case ACT_DELBA:
rtllib_rx_DELBA(ieee, skb);
@@ -1606,10 +1604,10 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
kfree(network);
return 1;
}
- memcpy(ieee->ht_info->PeerHTCapBuf,
+ memcpy(ieee->ht_info->peer_ht_cap_buf,
network->bssht.bd_ht_cap_buf,
network->bssht.bd_ht_cap_len);
- memcpy(ieee->ht_info->PeerHTInfoBuf,
+ memcpy(ieee->ht_info->peer_ht_info_buf,
network->bssht.bd_ht_info_buf,
network->bssht.bd_ht_info_len);
ieee->handle_assoc_response(ieee->dev,
@@ -1634,7 +1632,7 @@ rtllib_rx_assoc_resp(struct rtllib_device *ieee, struct sk_buff *skb,
netdev_info(ieee->dev,
"Association response status code 0x%x\n",
errcode);
- if (ieee->AsocRetryCount < RT_ASOC_RETRY_LIMIT)
+ if (ieee->asoc_retry_count < RT_ASOC_RETRY_LIMIT)
schedule_delayed_work(&ieee->associate_procedure_wq, 0);
else
rtllib_associate_abort(ieee);
@@ -1648,7 +1646,7 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
int errcode;
u8 *challenge;
int chlen = 0;
- bool bSupportNmode = true, bHalfSupportNmode = false;
+ bool support_nmode = true, half_support_nmode = false;
errcode = auth_parse(ieee->dev, skb, &challenge, &chlen);
@@ -1664,18 +1662,18 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
ieee->link_state = RTLLIB_ASSOCIATING_AUTHENTICATED;
ieee->softmac_stats.rx_auth_rs_ok++;
if (!(ieee->ht_info->iot_action & HT_IOT_ACT_PURE_N_MODE)) {
- if (!ieee->GetNmodeSupportBySecCfg(ieee->dev)) {
+ if (!ieee->get_nmode_support_by_sec_cfg(ieee->dev)) {
if (is_ht_half_nmode_aps(ieee)) {
- bSupportNmode = true;
- bHalfSupportNmode = true;
+ support_nmode = true;
+ half_support_nmode = true;
} else {
- bSupportNmode = false;
- bHalfSupportNmode = false;
+ support_nmode = false;
+ half_support_nmode = false;
}
}
}
/* Dummy wirless mode setting to avoid encryption issue */
- if (bSupportNmode) {
+ if (support_nmode) {
ieee->set_wireless_mode(ieee->dev,
ieee->current_network.mode);
} else {
@@ -1684,11 +1682,11 @@ static void rtllib_rx_auth_resp(struct rtllib_device *ieee, struct sk_buff *skb)
}
if ((ieee->current_network.mode == WIRELESS_MODE_N_24G) &&
- bHalfSupportNmode) {
+ half_support_nmode) {
netdev_info(ieee->dev, "======>enter half N mode\n");
- ieee->bHalfWirelessN24GMode = true;
+ ieee->half_wireless_n24g_mode = true;
} else {
- ieee->bHalfWirelessN24GMode = false;
+ ieee->half_wireless_n24g_mode = false;
}
rtllib_associate_step2(ieee);
} else {
@@ -1734,13 +1732,11 @@ rtllib_rx_deauth(struct rtllib_device *ieee, struct sk_buff *skb)
ieee->link_state = RTLLIB_ASSOCIATING;
ieee->softmac_stats.reassoc++;
ieee->is_roaming = true;
- ieee->link_detect_info.bBusyTraffic = false;
+ ieee->link_detect_info.busy_traffic = false;
rtllib_disassociate(ieee);
- RemovePeerTS(ieee, header->addr2);
- if (!(ieee->rtllib_ap_sec_type(ieee) &
- (SEC_ALG_CCMP | SEC_ALG_TKIP)))
- schedule_delayed_work(
- &ieee->associate_procedure_wq, 5);
+ remove_peer_ts(ieee, header->addr2);
+ if (!(ieee->rtllib_ap_sec_type(ieee) & (SEC_ALG_CCMP | SEC_ALG_TKIP)))
+ schedule_delayed_work(&ieee->associate_procedure_wq, 5);
}
return 0;
}
@@ -1816,7 +1812,7 @@ void rtllib_softmac_xmit(struct rtllib_txb *txb, struct rtllib_device *ieee)
/* update the tx status */
tcb_desc = (struct cb_desc *)(txb->fragments[0]->cb +
MAX_DEV_ADDR_SIZE);
- if (tcb_desc->bMulticast)
+ if (tcb_desc->multicast)
ieee->stats.multicast++;
/* if xmit available, just xmit it immediately, else just insert it to
@@ -1998,11 +1994,11 @@ void rtllib_stop_protocol(struct rtllib_device *ieee)
if (ieee->link_state == MAC80211_LINKED) {
if (ieee->iw_mode == IW_MODE_INFRA)
- SendDisassociation(ieee, 1, WLAN_REASON_DEAUTH_LEAVING);
+ send_disassociation(ieee, 1, WLAN_REASON_DEAUTH_LEAVING);
rtllib_disassociate(ieee);
}
- RemoveAllTS(ieee);
+ remove_all_ts(ieee);
ieee->proto_stoppping = 0;
kfree(ieee->assocreq_ies);
@@ -2072,13 +2068,13 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
for (i = 0; i < 5; i++)
ieee->seq_ctrl[i] = 0;
- ieee->link_detect_info.SlotIndex = 0;
- ieee->link_detect_info.SlotNum = 2;
- ieee->link_detect_info.NumRecvBcnInPeriod = 0;
- ieee->link_detect_info.NumRecvDataInPeriod = 0;
+ ieee->link_detect_info.slot_index = 0;
+ ieee->link_detect_info.slot_num = 2;
+ ieee->link_detect_info.num_recv_bcn_in_period = 0;
+ ieee->link_detect_info.num_recv_data_in_period = 0;
ieee->link_detect_info.num_tx_ok_in_period = 0;
ieee->link_detect_info.num_rx_ok_in_period = 0;
- ieee->link_detect_info.NumRxUnicastOkInPeriod = 0;
+ ieee->link_detect_info.num_rx_unicast_ok_in_period = 0;
ieee->is_aggregate_frame = false;
ieee->assoc_id = 0;
ieee->queue_stop = 0;
@@ -2101,7 +2097,7 @@ int rtllib_softmac_init(struct rtllib_device *ieee)
ieee->reg_dot11tx_ht_oper_rate_set[1] = 0xff;
ieee->reg_dot11tx_ht_oper_rate_set[4] = 0x01;
- ieee->FirstIe_InScan = false;
+ ieee->first_ie_in_scan = false;
ieee->actscanning = false;
ieee->beinretry = false;
ieee->is_set_key = false;
@@ -2148,7 +2144,7 @@ void rtllib_softmac_free(struct rtllib_device *ieee)
static inline struct sk_buff *
rtllib_disauth_skb(struct rtllib_network *beacon,
- struct rtllib_device *ieee, u16 asRsn)
+ struct rtllib_device *ieee, u16 rsn)
{
struct sk_buff *skb;
struct rtllib_disauth *disauth;
@@ -2168,13 +2164,13 @@ rtllib_disauth_skb(struct rtllib_network *beacon,
ether_addr_copy(disauth->header.addr2, ieee->dev->dev_addr);
ether_addr_copy(disauth->header.addr3, beacon->bssid);
- disauth->reason = cpu_to_le16(asRsn);
+ disauth->reason = cpu_to_le16(rsn);
return skb;
}
static inline struct sk_buff *
rtllib_disassociate_skb(struct rtllib_network *beacon,
- struct rtllib_device *ieee, u16 asRsn)
+ struct rtllib_device *ieee, u16 rsn)
{
struct sk_buff *skb;
struct rtllib_disassoc *disass;
@@ -2195,19 +2191,19 @@ rtllib_disassociate_skb(struct rtllib_network *beacon,
ether_addr_copy(disass->header.addr2, ieee->dev->dev_addr);
ether_addr_copy(disass->header.addr3, beacon->bssid);
- disass->reason = cpu_to_le16(asRsn);
+ disass->reason = cpu_to_le16(rsn);
return skb;
}
-void SendDisassociation(struct rtllib_device *ieee, bool deauth, u16 asRsn)
+void send_disassociation(struct rtllib_device *ieee, bool deauth, u16 rsn)
{
struct rtllib_network *beacon = &ieee->current_network;
struct sk_buff *skb;
if (deauth)
- skb = rtllib_disauth_skb(beacon, ieee, asRsn);
+ skb = rtllib_disauth_skb(beacon, ieee, rsn);
else
- skb = rtllib_disassociate_skb(beacon, ieee, asRsn);
+ skb = rtllib_disassociate_skb(beacon, ieee, rsn);
if (skb)
softmac_mgmt_xmit(skb, ieee);
@@ -2241,56 +2237,56 @@ u8 rtllib_ap_sec_type(struct rtllib_device *ieee)
}
}
-static void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib,
- u8 *asSta, u8 asRsn)
+static void rtllib_mlme_disassociate_request(struct rtllib_device *rtllib,
+ u8 *addr, u8 rsn)
{
u8 i;
u8 op_mode;
- RemovePeerTS(rtllib, asSta);
+ remove_peer_ts(rtllib, addr);
- if (memcmp(rtllib->current_network.bssid, asSta, 6) == 0) {
+ if (memcmp(rtllib->current_network.bssid, addr, 6) == 0) {
rtllib->link_state = MAC80211_NOLINK;
for (i = 0; i < 6; i++)
rtllib->current_network.bssid[i] = 0x22;
op_mode = RT_OP_MODE_NO_LINK;
rtllib->op_mode = RT_OP_MODE_NO_LINK;
- rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_MEDIA_STATUS,
+ rtllib->set_hw_reg_handler(rtllib->dev, HW_VAR_MEDIA_STATUS,
(u8 *)(&op_mode));
rtllib_disassociate(rtllib);
- rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_BSSID,
+ rtllib->set_hw_reg_handler(rtllib->dev, HW_VAR_BSSID,
rtllib->current_network.bssid);
}
}
-static void rtllib_MgntDisconnectAP(struct rtllib_device *rtllib, u8 asRsn)
+static void rtllib_mgnt_disconnect_ap(struct rtllib_device *rtllib, u8 rsn)
{
- bool bFilterOutNonAssociatedBSSID = false;
+ bool filter_out_nonassociated_bssid = false;
- bFilterOutNonAssociatedBSSID = false;
- rtllib->SetHwRegHandler(rtllib->dev, HW_VAR_CECHK_BSSID,
- (u8 *)(&bFilterOutNonAssociatedBSSID));
- rtllib_MlmeDisassociateRequest(rtllib, rtllib->current_network.bssid,
- asRsn);
+ filter_out_nonassociated_bssid = false;
+ rtllib->set_hw_reg_handler(rtllib->dev, HW_VAR_CECHK_BSSID,
+ (u8 *)(&filter_out_nonassociated_bssid));
+ rtllib_mlme_disassociate_request(rtllib, rtllib->current_network.bssid,
+ rsn);
rtllib->link_state = MAC80211_NOLINK;
}
-bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn)
+bool rtllib_mgnt_disconnect(struct rtllib_device *rtllib, u8 rsn)
{
if (rtllib->ps != RTLLIB_PS_DISABLED)
rtllib->sta_wake_up(rtllib->dev);
if (rtllib->link_state == MAC80211_LINKED) {
if (rtllib->iw_mode == IW_MODE_INFRA)
- rtllib_MgntDisconnectAP(rtllib, asRsn);
+ rtllib_mgnt_disconnect_ap(rtllib, rsn);
}
return true;
}
-EXPORT_SYMBOL(rtllib_MgntDisconnect);
+EXPORT_SYMBOL(rtllib_mgnt_disconnect);
void notify_wx_assoc_event(struct rtllib_device *ieee)
{
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 2afa701e5445..d6bc74ba9092 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -347,10 +347,10 @@ void rtllib_wx_sync_scan_wq(void *data)
/* Notify AP that I wake up again */
rtllib_sta_ps_send_null_frame(ieee, 0);
- if (ieee->link_detect_info.NumRecvBcnInPeriod == 0 ||
- ieee->link_detect_info.NumRecvDataInPeriod == 0) {
- ieee->link_detect_info.NumRecvBcnInPeriod = 1;
- ieee->link_detect_info.NumRecvDataInPeriod = 1;
+ if (ieee->link_detect_info.num_recv_bcn_in_period == 0 ||
+ ieee->link_detect_info.num_recv_data_in_period == 0) {
+ ieee->link_detect_info.num_recv_bcn_in_period = 1;
+ ieee->link_detect_info.num_recv_data_in_period = 1;
}
rtllib_wake_all_queues(ieee);
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index f7098a2ba8b0..54100dd81505 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -286,7 +286,7 @@ static void rtllib_tx_query_agg_cap(struct rtllib_device *ieee,
if (ht_info->iot_action & HT_IOT_ACT_TX_NO_AGGREGATION)
return;
- if (!ieee->GetNmodeSupportBySecCfg(ieee->dev))
+ if (!ieee->get_nmode_support_by_sec_cfg(ieee->dev))
return;
if (ht_info->current_ampdu_enable) {
if (!rtllib_get_ts(ieee, (struct ts_common_info **)(&ts), hdr->addr1,
@@ -356,7 +356,7 @@ static void rtllib_query_BandwidthMode(struct rtllib_device *ieee,
if (!ht_info->current_ht_support || !ht_info->enable_ht)
return;
- if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
+ if (tcb_desc->multicast || tcb_desc->bBroadcast)
return;
if ((tcb_desc->data_rate & 0x80) == 0)
@@ -378,7 +378,7 @@ static void rtllib_query_protectionmode(struct rtllib_device *ieee,
tcb_desc->RTSSC = 0;
tcb_desc->bRTSBW = false;
- if (tcb_desc->bBroadcast || tcb_desc->bMulticast)
+ if (tcb_desc->bBroadcast || tcb_desc->multicast)
return;
if (is_broadcast_ether_addr(skb->data + 16))
@@ -595,14 +595,14 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
((((u8 *)udp)[1] == 67) &&
(((u8 *)udp)[3] == 68))) {
bdhcp = true;
- ieee->LPSDelayCnt = 200;
+ ieee->lps_delay_cnt = 200;
}
}
} else if (ether_type == ETH_P_ARP) {
netdev_info(ieee->dev,
"=================>DHCP Protocol start tx ARP pkt!!\n");
bdhcp = true;
- ieee->LPSDelayCnt =
+ ieee->lps_delay_cnt =
ieee->current_network.tim.tim_count;
}
}
@@ -832,7 +832,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
if (ieee->ht_info->iot_action &
HT_IOT_ACT_WA_IOT_Broadcom) {
tcb_desc->data_rate =
- MgntQuery_TxRateExcludeCCKRates(ieee);
+ mgnt_query_tx_rate_exclude_cck_rates(ieee);
tcb_desc->tx_dis_rate_fallback = false;
} else {
tcb_desc->data_rate = ieee->basic_rate;
@@ -843,11 +843,11 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
tcb_desc->tx_use_drv_assinged_rate = 1;
} else {
if (is_multicast_ether_addr(header.addr1))
- tcb_desc->bMulticast = 1;
+ tcb_desc->multicast = 1;
if (is_broadcast_ether_addr(header.addr1))
tcb_desc->bBroadcast = 1;
rtllib_txrate_selectmode(ieee, tcb_desc);
- if (tcb_desc->bMulticast || tcb_desc->bBroadcast)
+ if (tcb_desc->multicast || tcb_desc->bBroadcast)
tcb_desc->data_rate = ieee->basic_rate;
else
tcb_desc->data_rate = rtllib_current_rate(ieee);
@@ -856,7 +856,7 @@ static int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
if (ieee->ht_info->iot_action &
HT_IOT_ACT_WA_IOT_Broadcom) {
tcb_desc->data_rate =
- MgntQuery_TxRateExcludeCCKRates(ieee);
+ mgnt_query_tx_rate_exclude_cck_rates(ieee);
tcb_desc->tx_dis_rate_fallback = false;
} else {
tcb_desc->data_rate = MGN_1M;
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index f92ec0faf4d5..55a3e4222cd6 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -636,7 +636,7 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
ieee->cannot_notify = true;
- SendDisassociation(ieee, deauth, mlme->reason_code);
+ send_disassociation(ieee, deauth, mlme->reason_code);
rtllib_disassociate(ieee);
ieee->wap_set = 0;
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index 30e7457a9c31..b89e88d6a82d 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -1035,8 +1035,8 @@ void rtw_get_bcn_info(struct wlan_network *pnetwork)
u16 wpa_len = 0, rsn_len = 0;
struct HT_info_element *pht_info = NULL;
struct ieee80211_ht_cap *pht_cap = NULL;
- unsigned int len;
- unsigned char *p;
+ unsigned int len;
+ unsigned char *p;
__le16 le_cap;
memcpy((u8 *)&le_cap, rtw_get_capability_from_ie(pnetwork->network.ies), 2);
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index b221913733fb..bfb27f902753 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -169,7 +169,6 @@ void _rtw_free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwor
{
unsigned int delta_time;
u32 lifetime = SCANQUEUE_LIFETIME;
-/* _irqL irqL; */
struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
if (!pnetwork)
@@ -389,7 +388,6 @@ int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst, u8 fea
d_cap = le16_to_cpu(tmpd);
return (src->ssid.ssid_length == dst->ssid.ssid_length) &&
- /* (src->configuration.ds_config == dst->configuration.ds_config) && */
((!memcmp(src->mac_address, dst->mac_address, ETH_ALEN))) &&
((!memcmp(src->ssid.ssid, dst->ssid.ssid, src->ssid.ssid_length))) &&
((s_cap & WLAN_CAPABILITY_IBSS) ==
@@ -1548,9 +1546,9 @@ void _rtw_join_timeout_handler(struct timer_list *t)
int do_join_r;
do_join_r = rtw_do_join(adapter);
- if (do_join_r != _SUCCESS) {
+ if (do_join_r != _SUCCESS)
continue;
- }
+
break;
} else {
rtw_indicate_disconnect(adapter);
@@ -2432,9 +2430,8 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len, u8 channe
return;
/* maybe needs check if ap supports rx ampdu. */
- if (!(phtpriv->ampdu_enable) && pregistrypriv->ampdu_enable == 1) {
+ if (!(phtpriv->ampdu_enable) && pregistrypriv->ampdu_enable == 1)
phtpriv->ampdu_enable = true;
- }
/* check Max Rx A-MPDU Size */
len = 0;
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index 1593980d2c6a..0145c4da5ac0 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -127,9 +127,8 @@ void kfree_all_stainfo(struct sta_priv *pstapriv)
phead = get_list_head(&pstapriv->free_sta_queue);
plist = get_next(phead);
- while (phead != plist) {
+ while (phead != plist)
plist = get_next(plist);
- }
spin_unlock_bh(&pstapriv->sta_hash_lock);
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index c5219a4a4919..7a5c3a98183b 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -954,7 +954,7 @@ static u16 hal_EfuseGetCurrentSize_WiFi(
#endif
u16 efuse_addr = 0;
u16 start_addr = 0; /* for debug */
- u8 hoffset = 0, hworden = 0;
+ u8 hworden = 0;
u8 efuse_data, word_cnts = 0;
u32 count = 0; /* for debug */
@@ -1001,16 +1001,13 @@ static u16 hal_EfuseGetCurrentSize_WiFi(
}
if (EXT_HEADER(efuse_data)) {
- hoffset = GET_HDR_OFFSET_2_0(efuse_data);
efuse_addr++;
efuse_OneByteRead(padapter, efuse_addr, &efuse_data, bPseudoTest);
if (ALL_WORDS_DISABLED(efuse_data))
continue;
- hoffset |= ((efuse_data & 0xF0) >> 1);
hworden = efuse_data & 0x0F;
} else {
- hoffset = (efuse_data>>4) & 0x0F;
hworden = efuse_data & 0x0F;
}
@@ -1047,7 +1044,7 @@ static u16 hal_EfuseGetCurrentSize_BT(struct adapter *padapter, u8 bPseudoTest)
u16 btusedbytes;
u16 efuse_addr;
u8 bank, startBank;
- u8 hoffset = 0, hworden = 0;
+ u8 hworden = 0;
u8 efuse_data, word_cnts = 0;
u16 retU2 = 0;
@@ -1085,7 +1082,6 @@ static u16 hal_EfuseGetCurrentSize_BT(struct adapter *padapter, u8 bPseudoTest)
break;
if (EXT_HEADER(efuse_data)) {
- hoffset = GET_HDR_OFFSET_2_0(efuse_data);
efuse_addr++;
efuse_OneByteRead(padapter, efuse_addr, &efuse_data, bPseudoTest);
@@ -1094,11 +1090,8 @@ static u16 hal_EfuseGetCurrentSize_BT(struct adapter *padapter, u8 bPseudoTest)
continue;
}
-/* hoffset = ((hoffset & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1); */
- hoffset |= ((efuse_data & 0xF0) >> 1);
hworden = efuse_data & 0x0F;
} else {
- hoffset = (efuse_data>>4) & 0x0F;
hworden = efuse_data & 0x0F;
}
@@ -1114,18 +1107,15 @@ static u16 hal_EfuseGetCurrentSize_BT(struct adapter *padapter, u8 bPseudoTest)
) {
if (efuse_data != 0xFF) {
if ((efuse_data&0x1F) == 0x0F) { /* extended header */
- hoffset = efuse_data;
efuse_addr++;
efuse_OneByteRead(padapter, efuse_addr, &efuse_data, bPseudoTest);
if ((efuse_data & 0x0F) == 0x0F) {
efuse_addr++;
continue;
} else {
- hoffset = ((hoffset & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
hworden = efuse_data & 0x0F;
}
} else {
- hoffset = (efuse_data>>4) & 0x0F;
hworden = efuse_data & 0x0F;
}
word_cnts = Efuse_CalculateWordCnts(hworden);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 1ff763c10064..65a450fcdce7 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -1259,8 +1259,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
goto check_need_indicate_scan_done;
}
- ssid = kzalloc(RTW_SSID_SCAN_AMOUNT * sizeof(struct ndis_802_11_ssid),
- GFP_KERNEL);
+ ssid = kcalloc(RTW_SSID_SCAN_AMOUNT, sizeof(*ssid), GFP_KERNEL);
if (!ssid) {
ret = -ENOMEM;
goto check_need_indicate_scan_done;
diff --git a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
index e6e89784d84b..c3ba490e53cb 100644
--- a/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
+++ b/drivers/staging/vc04_services/bcm2835-camera/bcm2835-camera.c
@@ -350,12 +350,11 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
if (is_capturing(dev)) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Grab another frame");
- vchiq_mmal_port_parameter_set(
- instance,
- dev->capture.camera_port,
- MMAL_PARAMETER_CAPTURE,
- &dev->capture.frame_count,
- sizeof(dev->capture.frame_count));
+ vchiq_mmal_port_parameter_set(instance,
+ dev->capture.camera_port,
+ MMAL_PARAMETER_CAPTURE,
+ &dev->capture.frame_count,
+ sizeof(dev->capture.frame_count));
}
if (vchiq_mmal_submit_buffer(instance, port,
&buf->mmal))
@@ -406,12 +405,11 @@ static void buffer_cb(struct vchiq_mmal_instance *instance,
is_capturing(dev)) {
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Grab another frame as buffer has EOS");
- vchiq_mmal_port_parameter_set(
- instance,
- dev->capture.camera_port,
- MMAL_PARAMETER_CAPTURE,
- &dev->capture.frame_count,
- sizeof(dev->capture.frame_count));
+ vchiq_mmal_port_parameter_set(instance,
+ dev->capture.camera_port,
+ MMAL_PARAMETER_CAPTURE,
+ &dev->capture.frame_count,
+ sizeof(dev->capture.frame_count));
}
}
@@ -420,11 +418,10 @@ static int enable_camera(struct bcm2835_mmal_dev *dev)
int ret;
if (!dev->camera_use_count) {
- ret = vchiq_mmal_port_parameter_set(
- dev->instance,
- &dev->component[COMP_CAMERA]->control,
- MMAL_PARAMETER_CAMERA_NUM, &dev->camera_num,
- sizeof(dev->camera_num));
+ ret = vchiq_mmal_port_parameter_set(dev->instance,
+ &dev->component[COMP_CAMERA]->control,
+ MMAL_PARAMETER_CAMERA_NUM, &dev->camera_num,
+ sizeof(dev->camera_num));
if (ret < 0) {
v4l2_err(&dev->v4l2_dev,
"Failed setting camera num, ret %d\n", ret);
@@ -468,11 +465,11 @@ static int disable_camera(struct bcm2835_mmal_dev *dev)
"Failed disabling camera, ret %d\n", ret);
return -EINVAL;
}
- vchiq_mmal_port_parameter_set(
- dev->instance,
- &dev->component[COMP_CAMERA]->control,
- MMAL_PARAMETER_CAMERA_NUM, &i,
- sizeof(i));
+ vchiq_mmal_port_parameter_set(dev->instance,
+ &dev->component[COMP_CAMERA]->control,
+ MMAL_PARAMETER_CAMERA_NUM,
+ &i,
+ sizeof(i));
}
v4l2_dbg(1, bcm2835_v4l2_debug, &dev->v4l2_dev,
"Camera refcount now %d\n", dev->camera_use_count);
@@ -786,9 +783,8 @@ static int vidioc_overlay(struct file *file, void *f, unsigned int on)
ret = vchiq_mmal_port_connect_tunnel(dev->instance, src,
NULL);
if (ret >= 0)
- ret = vchiq_mmal_component_disable(
- dev->instance,
- dev->component[COMP_PREVIEW]);
+ ret = vchiq_mmal_component_disable(dev->instance,
+ dev->component[COMP_PREVIEW]);
disable_camera(dev);
return ret;
@@ -1006,7 +1002,6 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
return 0;
}
-
static int mmal_setup_video_component(struct bcm2835_mmal_dev *dev,
struct v4l2_format *f)
{
@@ -1042,8 +1037,8 @@ static int mmal_setup_video_component(struct bcm2835_mmal_dev *dev,
if (overlay_enabled) {
ret = vchiq_mmal_port_connect_tunnel(dev->instance,
- preview_port,
- &dev->component[COMP_PREVIEW]->input[0]);
+ preview_port,
+ &dev->component[COMP_PREVIEW]->input[0]);
if (ret)
return ret;
@@ -1720,11 +1715,11 @@ static int mmal_init(struct bcm2835_mmal_dev *dev)
{
unsigned int enable = 1;
- vchiq_mmal_port_parameter_set(
- dev->instance,
- &dev->component[COMP_VIDEO_ENCODE]->control,
- MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT,
- &enable, sizeof(enable));
+ vchiq_mmal_port_parameter_set(dev->instance,
+ &dev->component[COMP_VIDEO_ENCODE]->control,
+ MMAL_PARAMETER_VIDEO_IMMUTABLE_INPUT,
+ &enable,
+ sizeof(enable));
vchiq_mmal_port_parameter_set(dev->instance,
&dev->component[COMP_VIDEO_ENCODE]->control,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c
index 933027e0011e..68f830d75531 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.c
@@ -37,7 +37,7 @@ static int vchiq_bus_probe(struct device *dev)
return driver->probe(device);
}
-struct bus_type vchiq_bus_type = {
+const struct bus_type vchiq_bus_type = {
.name = "vchiq-bus",
.match = vchiq_bus_type_match,
.uevent = vchiq_bus_uevent,
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h
index caa6fdf25bb1..4db86e76edbd 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_bus.h
@@ -34,7 +34,7 @@ static inline struct vchiq_driver *to_vchiq_driver(struct device_driver *d)
return container_of(d, struct vchiq_driver, driver);
}
-extern struct bus_type vchiq_bus_type;
+extern const struct bus_type vchiq_bus_type;
struct vchiq_device *
vchiq_device_register(struct device *parent, const char *name);
diff --git a/drivers/staging/vme_user/vme.c b/drivers/staging/vme_user/vme.c
index e9461a7a7ab8..0cd370ab1008 100644
--- a/drivers/staging/vme_user/vme.c
+++ b/drivers/staging/vme_user/vme.c
@@ -1970,7 +1970,7 @@ static void vme_bus_remove(struct device *dev)
driver->remove(vdev);
}
-struct bus_type vme_bus_type = {
+const struct bus_type vme_bus_type = {
.name = "vme",
.match = vme_bus_match,
.probe = vme_bus_probe,
diff --git a/drivers/staging/vme_user/vme.h b/drivers/staging/vme_user/vme.h
index 06504dccd5ff..26aa40f78a74 100644
--- a/drivers/staging/vme_user/vme.h
+++ b/drivers/staging/vme_user/vme.h
@@ -81,7 +81,7 @@ struct vme_resource {
struct list_head *entry;
};
-extern struct bus_type vme_bus_type;
+extern const struct bus_type vme_bus_type;
/* Number of VME interrupt vectors */
#define VME_NUM_STATUSID 256
diff --git a/drivers/staging/vme_user/vme_tsi148.h b/drivers/staging/vme_user/vme_tsi148.h
index 4dd224d0b86e..db246cbc54c3 100644
--- a/drivers/staging/vme_user/vme_tsi148.h
+++ b/drivers/staging/vme_user/vme_tsi148.h
@@ -691,8 +691,7 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_VMCTRL_RMWEN BIT(20) /* RMW Enable */
-#define TSI148_LCSR_VMCTRL_ATO_M (7 << 16) /* Master Access Time-out Mask
- */
+#define TSI148_LCSR_VMCTRL_ATO_M (7 << 16) /* Master Access Time-out Mask */
#define TSI148_LCSR_VMCTRL_ATO_32 (0 << 16) /* 32 us */
#define TSI148_LCSR_VMCTRL_ATO_128 BIT(16) /* 128 us */
#define TSI148_LCSR_VMCTRL_ATO_512 (2 << 16) /* 512 us */
@@ -753,8 +752,7 @@ static const int TSI148_GCSR_MBOX[4] = { TSI148_GCSR_MBOX0,
#define TSI148_LCSR_VCTRL_DLT_16384 (0xB << 24) /* 16384 VCLKS */
#define TSI148_LCSR_VCTRL_DLT_32768 (0xC << 24) /* 32768 VCLKS */
-#define TSI148_LCSR_VCTRL_NERBB BIT(20) /* No Early Release of Bus Busy
- */
+#define TSI148_LCSR_VCTRL_NERBB BIT(20) /* No Early Release of Bus Busy */
#define TSI148_LCSR_VCTRL_SRESET BIT(17) /* System Reset */
#define TSI148_LCSR_VCTRL_LRESET BIT(16) /* Local Reset */
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 36183f2a64c1..688c870d89bc 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -81,9 +81,9 @@ static void vt6655_mac_set_bb_type(void __iomem *iobase, u32 mask)
* Return Value: none
*/
static void calculate_ofdmr_parameter(unsigned char rate,
- u8 bb_type,
- unsigned char *tx_rate,
- unsigned char *rsv_time)
+ u8 bb_type,
+ unsigned char *tx_rate,
+ unsigned char *rsv_time)
{
switch (rate) {
case RATE_6M:
@@ -288,7 +288,7 @@ bool card_set_phy_parameter(struct vnt_private *priv, u8 bb_type)
* Return Value: none
*/
bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
- u64 bss_timestamp)
+ u64 bss_timestamp)
{
u64 local_tsf;
u64 tsf_offset = 0;
@@ -297,7 +297,7 @@ bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
if (bss_timestamp != local_tsf) {
tsf_offset = card_get_tsf_offset(rx_rate, bss_timestamp,
- local_tsf);
+ local_tsf);
/* adjust TSF, HW's TSF add TSF Offset reg */
tsf_offset = le64_to_cpu(tsf_offset);
iowrite32((u32)tsf_offset, priv->port_offset + MAC_REG_TSFOFST);
@@ -321,7 +321,7 @@ bool card_update_tsf(struct vnt_private *priv, unsigned char rx_rate,
* Return Value: true if succeed; otherwise false
*/
bool card_set_beacon_period(struct vnt_private *priv,
- unsigned short beacon_interval)
+ unsigned short beacon_interval)
{
u64 next_tbtt;
@@ -586,61 +586,61 @@ void card_set_rspinf(struct vnt_private *priv, u8 bb_type)
/* RSPINF_a_6 */
calculate_ofdmr_parameter(RATE_6M,
- bb_type,
- &byTxRate,
- &byRsvTime);
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_6);
/* RSPINF_a_9 */
calculate_ofdmr_parameter(RATE_9M,
- bb_type,
- &byTxRate,
- &byRsvTime);
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_9);
/* RSPINF_a_12 */
calculate_ofdmr_parameter(RATE_12M,
- bb_type,
- &byTxRate,
- &byRsvTime);
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_12);
/* RSPINF_a_18 */
calculate_ofdmr_parameter(RATE_18M,
- bb_type,
- &byTxRate,
- &byRsvTime);
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_18);
/* RSPINF_a_24 */
calculate_ofdmr_parameter(RATE_24M,
- bb_type,
- &byTxRate,
- &byRsvTime);
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_24);
/* RSPINF_a_36 */
calculate_ofdmr_parameter(CARDwGetOFDMControlRate((void *)priv,
- RATE_36M),
- bb_type,
- &byTxRate,
- &byRsvTime);
+ RATE_36M),
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_36);
/* RSPINF_a_48 */
calculate_ofdmr_parameter(CARDwGetOFDMControlRate((void *)priv,
- RATE_48M),
- bb_type,
- &byTxRate,
- &byRsvTime);
+ RATE_48M),
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_48);
/* RSPINF_a_54 */
calculate_ofdmr_parameter(CARDwGetOFDMControlRate((void *)priv,
- RATE_54M),
- bb_type,
- &byTxRate,
- &byRsvTime);
+ RATE_54M),
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_54);
/* RSPINF_a_72 */
calculate_ofdmr_parameter(CARDwGetOFDMControlRate((void *)priv,
- RATE_54M),
- bb_type,
- &byTxRate,
- &byRsvTime);
+ RATE_54M),
+ bb_type,
+ &byTxRate,
+ &byRsvTime);
iowrite16(MAKEWORD(byTxRate, byRsvTime), priv->port_offset + MAC_REG_RSPINF_A_72);
/* Set to Page0 */
VT6655_MAC_SELECT_PAGE0(priv->port_offset);
diff --git a/drivers/staging/vt6655/rxtx.h b/drivers/staging/vt6655/rxtx.h
index a67757c9bb5c..be1e5180d57b 100644
--- a/drivers/staging/vt6655/rxtx.h
+++ b/drivers/staging/vt6655/rxtx.h
@@ -19,7 +19,6 @@
#define DEFAULT_MSDU_LIFETIME_RES_64us 8000 /* 64us */
#define DEFAULT_MGN_LIFETIME_RES_64us 125 /* 64us */
-
/*--------------------- Export Definitions -------------------------*/
/*--------------------- Export Variables --------------------------*/
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index d77d7fe99a84..5cdf7d68687f 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -43,7 +43,7 @@ obj-$(CONFIG_RCAR_GEN3_THERMAL) += rcar_gen3_thermal.o
obj-$(CONFIG_RZG2L_THERMAL) += rzg2l_thermal.o
obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o
obj-y += samsung/
-obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
+obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o
obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o
obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
diff --git a/drivers/thermal/mediatek/auxadc_thermal.c b/drivers/thermal/mediatek/auxadc_thermal.c
index 8b0edb204844..9ee2e7283435 100644
--- a/drivers/thermal/mediatek/auxadc_thermal.c
+++ b/drivers/thermal/mediatek/auxadc_thermal.c
@@ -690,6 +690,9 @@ static const struct mtk_thermal_data mt7986_thermal_data = {
.adcpnp = mt7986_adcpnp,
.sensor_mux_values = mt7986_mux_values,
.version = MTK_THERMAL_V3,
+ .apmixed_buffer_ctl_reg = APMIXED_SYS_TS_CON1,
+ .apmixed_buffer_ctl_mask = GENMASK(31, 6) | BIT(3),
+ .apmixed_buffer_ctl_set = BIT(0),
};
static bool mtk_thermal_temp_is_valid(int temp)
diff --git a/drivers/thermal/mediatek/lvts_thermal.c b/drivers/thermal/mediatek/lvts_thermal.c
index 98d9c80bd4c6..fd4bd650c77a 100644
--- a/drivers/thermal/mediatek/lvts_thermal.c
+++ b/drivers/thermal/mediatek/lvts_thermal.c
@@ -719,8 +719,10 @@ static int lvts_calibration_read(struct device *dev, struct lvts_domain *lvts_td
lvts_td->calib = devm_krealloc(dev, lvts_td->calib,
lvts_td->calib_len + len, GFP_KERNEL);
- if (!lvts_td->calib)
+ if (!lvts_td->calib) {
+ kfree(efuse);
return -ENOMEM;
+ }
memcpy(lvts_td->calib + lvts_td->calib_len, efuse, len);
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index ccc2eea7f9f5..404f01cca4da 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -57,6 +57,9 @@
#define REGS_TTRnCR(n) (0xf10 + 4 * (n)) /* Temperature Range n
* Control Register
*/
+#define NUM_TTRCR_V1 4
+#define NUM_TTRCR_MAX 16
+
#define REGS_IPBRR(n) (0xbf8 + 4 * (n)) /* IP Block Revision
* Register n
*/
@@ -71,6 +74,7 @@ struct qoriq_sensor {
struct qoriq_tmu_data {
int ver;
+ u32 ttrcr[NUM_TTRCR_MAX];
struct regmap *regmap;
struct clk *clk;
struct qoriq_sensor sensor[SITES_MAX];
@@ -182,17 +186,17 @@ static int qoriq_tmu_calibration(struct device *dev,
struct qoriq_tmu_data *data)
{
int i, val, len;
- u32 range[4];
const u32 *calibration;
struct device_node *np = dev->of_node;
len = of_property_count_u32_elems(np, "fsl,tmu-range");
- if (len < 0 || len > 4) {
+ if (len < 0 || (data->ver == TMU_VER1 && len > NUM_TTRCR_V1) ||
+ (data->ver > TMU_VER1 && len > NUM_TTRCR_MAX)) {
dev_err(dev, "invalid range data.\n");
return len;
}
- val = of_property_read_u32_array(np, "fsl,tmu-range", range, len);
+ val = of_property_read_u32_array(np, "fsl,tmu-range", data->ttrcr, len);
if (val != 0) {
dev_err(dev, "failed to read range data.\n");
return val;
@@ -200,7 +204,7 @@ static int qoriq_tmu_calibration(struct device *dev,
/* Init temperature range registers */
for (i = 0; i < len; i++)
- regmap_write(data->regmap, REGS_TTRnCR(i), range[i]);
+ regmap_write(data->regmap, REGS_TTRnCR(i), data->ttrcr[i]);
calibration = of_get_property(np, "fsl,tmu-calibration", &len);
if (calibration == NULL || len % 8) {
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index cafcb6d6e235..a764cb1115a5 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -428,6 +428,10 @@ static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
.compatible = "renesas,r8a779g0-thermal",
.data = &rcar_gen4_thermal_info,
},
+ {
+ .compatible = "renesas,r8a779h0-thermal",
+ .data = &rcar_gen4_thermal_info,
+ },
{},
};
MODULE_DEVICE_TABLE(of, rcar_gen3_thermal_dt_ids);
diff --git a/drivers/thermal/st/st_thermal.h b/drivers/thermal/st/st_thermal.h
index 75a84e6ec6a7..8639d9165c9b 100644
--- a/drivers/thermal/st/st_thermal.h
+++ b/drivers/thermal/st/st_thermal.h
@@ -38,10 +38,10 @@ struct st_thermal_sensor;
*
* @power_ctrl: Function for powering on/off a sensor. Clock to the
* sensor is also controlled from this function.
- * @alloc_regfields: Allocate regmap register fields, specific to a sensor.
- * @do_memmap_regmap: Memory map the thermal register space and init regmap
+ * @alloc_regfields: Allocate regmap register fields, specific to a sensor.
+ * @do_memmap_regmap: Memory map the thermal register space and init regmap
* instance or find regmap instance.
- * @register_irq: Register an interrupt handler for a sensor.
+ * @register_irq: Register an interrupt handler for a sensor.
*/
struct st_thermal_sensor_ops {
int (*power_ctrl)(struct st_thermal_sensor *, enum st_thermal_power_state);
@@ -56,15 +56,15 @@ struct st_thermal_sensor_ops {
*
* @reg_fields: Pointer to the regfields array for a sensor.
* @sys_compat: Pointer to the syscon node compatible string.
- * @ops: Pointer to private thermal ops for a sensor.
- * @calibration_val: Default calibration value to be written to the DCORRECT
+ * @ops: Pointer to private thermal ops for a sensor.
+ * @calibration_val: Default calibration value to be written to the DCORRECT
* register field for a sensor.
- * @temp_adjust_val: Value to be added/subtracted from the data read from
+ * @temp_adjust_val: Value to be added/subtracted from the data read from
* the sensor. If value needs to be added please provide a
* positive value and if it is to be subtracted please
- * provide a negative value.
- * @crit_temp: The temperature beyond which the SoC should be shutdown
- * to prevent damage.
+ * provide a negative value.
+ * @crit_temp: The temperature beyond which the SoC should be shutdown
+ * to prevent damage.
*/
struct st_thermal_compat_data {
char *sys_compat;
diff --git a/drivers/thermal/st/st_thermal_memmap.c b/drivers/thermal/st/st_thermal_memmap.c
index e8cfa83b724a..29c2269b0fb3 100644
--- a/drivers/thermal/st/st_thermal_memmap.c
+++ b/drivers/thermal/st/st_thermal_memmap.c
@@ -27,7 +27,7 @@ static const struct reg_field st_mmap_thermal_regfields[MAX_REGFIELDS] = {
* written simultaneously for powering on and off the temperature
* sensor. regmap_update_bits() will be used to update the register.
*/
- [INT_THRESH_HI] = REG_FIELD(STIH416_MPE_INT_THRESH, 0, 7),
+ [INT_THRESH_HI] = REG_FIELD(STIH416_MPE_INT_THRESH, 0, 7),
[DCORRECT] = REG_FIELD(STIH416_MPE_CONF, 5, 9),
[OVERFLOW] = REG_FIELD(STIH416_MPE_STATUS, 9, 9),
[DATA] = REG_FIELD(STIH416_MPE_STATUS, 11, 18),
diff --git a/drivers/thermal/sun8i_thermal.c b/drivers/thermal/sun8i_thermal.c
index 6a8e386dbc8d..3203d8bd13a8 100644
--- a/drivers/thermal/sun8i_thermal.c
+++ b/drivers/thermal/sun8i_thermal.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/nvmem-consumer.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -50,7 +51,8 @@
#define SUN8I_THS_CTRL2_T_ACQ1(x) ((GENMASK(15, 0) & (x)) << 16)
#define SUN8I_THS_DATA_IRQ_STS(x) BIT(x + 8)
-#define SUN50I_THS_CTRL0_T_ACQ(x) ((GENMASK(15, 0) & (x)) << 16)
+#define SUN50I_THS_CTRL0_T_ACQ(x) (GENMASK(15, 0) & ((x) - 1))
+#define SUN50I_THS_CTRL0_T_SAMPLE_PER(x) ((GENMASK(15, 0) & ((x) - 1)) << 16)
#define SUN50I_THS_FILTER_EN BIT(2)
#define SUN50I_THS_FILTER_TYPE(x) (GENMASK(1, 0) & (x))
#define SUN50I_H6_THS_PC_TEMP_PERIOD(x) ((GENMASK(19, 0) & (x)) << 12)
@@ -65,6 +67,7 @@ struct tsensor {
struct ths_thermal_chip {
bool has_mod_clk;
bool has_bus_clk_reset;
+ bool needs_sram;
int sensor_num;
int offset;
int scale;
@@ -82,12 +85,16 @@ struct ths_device {
const struct ths_thermal_chip *chip;
struct device *dev;
struct regmap *regmap;
+ struct regmap_field *sram_regmap_field;
struct reset_control *reset;
struct clk *bus_clk;
struct clk *mod_clk;
struct tsensor sensor[MAX_SENSOR_NUM];
};
+/* The H616 needs to have a bit 16 in the SRAM control register cleared. */
+static const struct reg_field sun8i_ths_sram_reg_field = REG_FIELD(0x0, 16, 16);
+
/* Temp Unit: millidegree Celsius */
static int sun8i_ths_calc_temp(struct ths_device *tmdev,
int id, int reg)
@@ -188,6 +195,9 @@ static irqreturn_t sun8i_irq_thread(int irq, void *data)
int i;
for_each_set_bit(i, &irq_bitmap, tmdev->chip->sensor_num) {
+ /* We allow some zones to not register. */
+ if (IS_ERR(tmdev->sensor[i].tzd))
+ continue;
thermal_zone_device_update(tmdev->sensor[i].tzd,
THERMAL_EVENT_UNSPECIFIED);
}
@@ -221,16 +231,21 @@ static int sun50i_h6_ths_calibrate(struct ths_device *tmdev,
struct device *dev = tmdev->dev;
int i, ft_temp;
- if (!caldata[0] || callen < 2 + 2 * tmdev->chip->sensor_num)
+ if (!caldata[0])
return -EINVAL;
/*
* efuse layout:
*
- * 0 11 16 32
- * +-------+-------+-------+
- * |temp| |sensor0|sensor1|
- * +-------+-------+-------+
+ * 0 11 16 27 32 43 48 57
+ * +----------+-----------+-----------+-----------+
+ * | temp | |sensor0| |sensor1| |sensor2| |
+ * +----------+-----------+-----------+-----------+
+ * ^ ^ ^
+ * | | |
+ * | | sensor3[11:8]
+ * | sensor3[7:4]
+ * sensor3[3:0]
*
* The calibration data on the H6 is the ambient temperature and
* sensor values that are filled during the factory test stage.
@@ -243,9 +258,16 @@ static int sun50i_h6_ths_calibrate(struct ths_device *tmdev,
ft_temp = (caldata[0] & FT_TEMP_MASK) * 100;
for (i = 0; i < tmdev->chip->sensor_num; i++) {
- int sensor_reg = caldata[i + 1] & TEMP_CALIB_MASK;
- int cdata, offset;
- int sensor_temp = tmdev->chip->calc_temp(tmdev, i, sensor_reg);
+ int sensor_reg, sensor_temp, cdata, offset;
+
+ if (i == 3)
+ sensor_reg = (caldata[1] >> 12)
+ | ((caldata[2] >> 12) << 4)
+ | ((caldata[3] >> 12) << 8);
+ else
+ sensor_reg = caldata[i + 1] & TEMP_CALIB_MASK;
+
+ sensor_temp = tmdev->chip->calc_temp(tmdev, i, sensor_reg);
/*
* Calibration data is CALIBRATE_DEFAULT - (calculated
@@ -324,6 +346,34 @@ static void sun8i_ths_reset_control_assert(void *data)
reset_control_assert(data);
}
+static struct regmap *sun8i_ths_get_sram_regmap(struct device_node *node)
+{
+ struct device_node *sram_node;
+ struct platform_device *sram_pdev;
+ struct regmap *regmap = NULL;
+
+ sram_node = of_parse_phandle(node, "allwinner,sram", 0);
+ if (!sram_node)
+ return ERR_PTR(-ENODEV);
+
+ sram_pdev = of_find_device_by_node(sram_node);
+ if (!sram_pdev) {
+ /* platform device might not be probed yet */
+ regmap = ERR_PTR(-EPROBE_DEFER);
+ goto out_put_node;
+ }
+
+ /* If no regmap is found then the other device driver is at fault */
+ regmap = dev_get_regmap(&sram_pdev->dev, NULL);
+ if (!regmap)
+ regmap = ERR_PTR(-EINVAL);
+
+ platform_device_put(sram_pdev);
+out_put_node:
+ of_node_put(sram_node);
+ return regmap;
+}
+
static int sun8i_ths_resource_init(struct ths_device *tmdev)
{
struct device *dev = tmdev->dev;
@@ -368,6 +418,19 @@ static int sun8i_ths_resource_init(struct ths_device *tmdev)
if (ret)
return ret;
+ if (tmdev->chip->needs_sram) {
+ struct regmap *regmap;
+
+ regmap = sun8i_ths_get_sram_regmap(dev->of_node);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+ tmdev->sram_regmap_field = devm_regmap_field_alloc(dev,
+ regmap,
+ sun8i_ths_sram_reg_field);
+ if (IS_ERR(tmdev->sram_regmap_field))
+ return PTR_ERR(tmdev->sram_regmap_field);
+ }
+
ret = sun8i_ths_calibrate(tmdev);
if (ret)
return ret;
@@ -410,25 +473,31 @@ static int sun8i_h3_thermal_init(struct ths_device *tmdev)
return 0;
}
-/*
- * Without this undocumented value, the returned temperatures would
- * be higher than real ones by about 20C.
- */
-#define SUN50I_H6_CTRL0_UNK 0x0000002f
-
static int sun50i_h6_thermal_init(struct ths_device *tmdev)
{
int val;
+ /* The H616 needs to have a bit in the SRAM control register cleared. */
+ if (tmdev->sram_regmap_field)
+ regmap_field_write(tmdev->sram_regmap_field, 0);
+
/*
- * T_acq = 20us
- * clkin = 24MHz
- *
- * x = T_acq * clkin - 1
- * = 479
+ * The manual recommends an overall sample frequency of 50 KHz (20us,
+ * 480 cycles at 24 MHz), which provides plenty of time for both the
+ * acquisition time (>24 cycles) and the actual conversion time
+ * (>14 cycles).
+ * The lower half of the CTRL register holds the "acquire time", in
+ * clock cycles, which the manual recommends to be 2us:
+ * 24MHz * 2us = 48 cycles.
+ * The high half of THS_CTRL encodes the sample frequency, in clock
+ * cycles: 24MHz * 20us = 480 cycles.
+ * This is explained in the H616 manual, but apparently wrongly
+ * described in the H6 manual, although the BSP code does the same
+ * for both SoCs.
*/
regmap_write(tmdev->regmap, SUN50I_THS_CTRL0,
- SUN50I_H6_CTRL0_UNK | SUN50I_THS_CTRL0_T_ACQ(479));
+ SUN50I_THS_CTRL0_T_ACQ(48) |
+ SUN50I_THS_CTRL0_T_SAMPLE_PER(480));
/* average over 4 samples */
regmap_write(tmdev->regmap, SUN50I_H6_THS_MFC,
SUN50I_THS_FILTER_EN |
@@ -465,8 +534,17 @@ static int sun8i_ths_register(struct ths_device *tmdev)
i,
&tmdev->sensor[i],
&ths_ops);
- if (IS_ERR(tmdev->sensor[i].tzd))
- return PTR_ERR(tmdev->sensor[i].tzd);
+
+ /*
+ * If an individual zone fails to register for reasons
+ * other than probe deferral (eg, a bad DT) then carry
+ * on, other zones might register successfully.
+ */
+ if (IS_ERR(tmdev->sensor[i].tzd)) {
+ if (PTR_ERR(tmdev->sensor[i].tzd) == -EPROBE_DEFER)
+ return PTR_ERR(tmdev->sensor[i].tzd);
+ continue;
+ }
devm_thermal_add_hwmon_sysfs(tmdev->dev, tmdev->sensor[i].tzd);
}
@@ -618,6 +696,20 @@ static const struct ths_thermal_chip sun20i_d1_ths = {
.calc_temp = sun8i_ths_calc_temp,
};
+static const struct ths_thermal_chip sun50i_h616_ths = {
+ .sensor_num = 4,
+ .has_bus_clk_reset = true,
+ .needs_sram = true,
+ .ft_deviation = 8000,
+ .offset = 263655,
+ .scale = 810,
+ .temp_data_base = SUN50I_H6_THS_TEMP_DATA,
+ .calibrate = sun50i_h6_ths_calibrate,
+ .init = sun50i_h6_thermal_init,
+ .irq_ack = sun50i_h6_irq_ack,
+ .calc_temp = sun8i_ths_calc_temp,
+};
+
static const struct of_device_id of_ths_match[] = {
{ .compatible = "allwinner,sun8i-a83t-ths", .data = &sun8i_a83t_ths },
{ .compatible = "allwinner,sun8i-h3-ths", .data = &sun8i_h3_ths },
@@ -627,6 +719,7 @@ static const struct of_device_id of_ths_match[] = {
{ .compatible = "allwinner,sun50i-h5-ths", .data = &sun50i_h5_ths },
{ .compatible = "allwinner,sun50i-h6-ths", .data = &sun50i_h6_ths },
{ .compatible = "allwinner,sun20i-d1-ths", .data = &sun20i_d1_ths },
+ { .compatible = "allwinner,sun50i-h616-ths", .data = &sun50i_h616_ths },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, of_ths_match);
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index f1cbf9aa62cf..aa34b6e82e26 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -227,14 +227,18 @@ static int thermal_of_monitor_init(struct device_node *np, int *delay, int *pdel
int ret;
ret = of_property_read_u32(np, "polling-delay-passive", pdelay);
- if (ret < 0) {
- pr_err("%pOFn: missing polling-delay-passive property\n", np);
+ if (ret == -EINVAL) {
+ *pdelay = 0;
+ } else if (ret < 0) {
+ pr_err("%pOFn: Couldn't get polling-delay-passive: %d\n", np, ret);
return ret;
}
ret = of_property_read_u32(np, "polling-delay", delay);
- if (ret < 0) {
- pr_err("%pOFn: missing polling-delay property\n", np);
+ if (ret == -EINVAL) {
+ *delay = 0;
+ } else if (ret < 0) {
+ pr_err("%pOFn: Couldn't get polling-delay: %d\n", np, ret);
return ret;
}
@@ -460,7 +464,7 @@ static void thermal_of_zone_unregister(struct thermal_zone_device *tz)
* @ops: A set of thermal sensor ops
*
* Return: a valid thermal zone structure pointer on success.
- * - EINVAL: if the device tree thermal description is malformed
+ * - EINVAL: if the device tree thermal description is malformed
* - ENOMEM: if one structure can not be allocated
* - Other negative errors are returned by the underlying called functions
*/
diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile
index c8b3d7b78098..b44b32dcb832 100644
--- a/drivers/thunderbolt/Makefile
+++ b/drivers/thunderbolt/Makefile
@@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y := -I$(src)
obj-${CONFIG_USB4} := thunderbolt.o
thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o
thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index d997a4c545f7..4bdb2d45e0bf 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -15,6 +15,8 @@
#include "ctl.h"
+#define CREATE_TRACE_POINTS
+#include "trace.h"
#define TB_CTL_RX_PKG_COUNT 10
#define TB_CTL_RETRIES 4
@@ -32,6 +34,7 @@
* @timeout_msec: Default timeout for non-raw control messages
* @callback: Callback called when hotplug message is received
* @callback_data: Data passed to @callback
+ * @index: Domain number. This will be output with the trace record.
*/
struct tb_ctl {
struct tb_nhi *nhi;
@@ -47,6 +50,8 @@ struct tb_ctl {
int timeout_msec;
event_cb callback;
void *callback_data;
+
+ int index;
};
@@ -369,6 +374,9 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
pkg->frame.size = len + 4;
pkg->frame.sof = type;
pkg->frame.eof = type;
+
+ trace_tb_tx(ctl->index, type, data, len);
+
cpu_to_be32_array(pkg->buffer, data, len / 4);
*(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
@@ -384,6 +392,7 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len,
static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type,
struct ctl_pkg *pkg, size_t size)
{
+ trace_tb_event(ctl->index, type, pkg->buffer, size);
return ctl->callback(ctl->callback_data, type, pkg->buffer, size);
}
@@ -489,6 +498,9 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
* triggered from messing with the active requests.
*/
req = tb_cfg_request_find(pkg->ctl, pkg);
+
+ trace_tb_rx(pkg->ctl->index, frame->eof, pkg->buffer, frame->size, !req);
+
if (req) {
if (req->copy(req, pkg))
schedule_work(&req->work);
@@ -614,6 +626,7 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
/**
* tb_ctl_alloc() - allocate a control channel
* @nhi: Pointer to NHI
+ * @index: Domain number
* @timeout_msec: Default timeout used with non-raw control messages
* @cb: Callback called for plug events
* @cb_data: Data passed to @cb
@@ -622,14 +635,16 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl,
*
* Return: Returns a pointer on success or NULL on failure.
*/
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
- void *cb_data)
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
+ event_cb cb, void *cb_data)
{
int i;
struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
if (!ctl)
return NULL;
+
ctl->nhi = nhi;
+ ctl->index = index;
ctl->timeout_msec = timeout_msec;
ctl->callback = cb;
ctl->callback_data = cb_data;
diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h
index eec5c953c743..bf930a191472 100644
--- a/drivers/thunderbolt/ctl.h
+++ b/drivers/thunderbolt/ctl.h
@@ -21,8 +21,8 @@ struct tb_ctl;
typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type,
const void *buf, size_t size);
-struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb,
- void *cb_data);
+struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int index, int timeout_msec,
+ event_cb cb, void *cb_data);
void tb_ctl_start(struct tb_ctl *ctl);
void tb_ctl_stop(struct tb_ctl *ctl);
void tb_ctl_free(struct tb_ctl *ctl);
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 9fb1a64f3300..0023017299f7 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -321,12 +321,12 @@ static void tb_domain_release(struct device *dev)
tb_ctl_free(tb->ctl);
destroy_workqueue(tb->wq);
- ida_simple_remove(&tb_domain_ida, tb->index);
+ ida_free(&tb_domain_ida, tb->index);
mutex_destroy(&tb->lock);
kfree(tb);
}
-struct device_type tb_domain_type = {
+const struct device_type tb_domain_type = {
.name = "thunderbolt_domain",
.release = tb_domain_release,
};
@@ -389,7 +389,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
tb->nhi = nhi;
mutex_init(&tb->lock);
- tb->index = ida_simple_get(&tb_domain_ida, 0, 0, GFP_KERNEL);
+ tb->index = ida_alloc(&tb_domain_ida, GFP_KERNEL);
if (tb->index < 0)
goto err_free;
@@ -397,7 +397,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
if (!tb->wq)
goto err_remove_ida;
- tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb);
+ tb->ctl = tb_ctl_alloc(nhi, tb->index, timeout_msec, tb_domain_event_cb, tb);
if (!tb->ctl)
goto err_destroy_wq;
@@ -413,7 +413,7 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize
err_destroy_wq:
destroy_workqueue(tb->wq);
err_remove_ida:
- ida_simple_remove(&tb_domain_ida, tb->index);
+ ida_free(&tb_domain_ida, tb->index);
err_free:
kfree(tb);
@@ -423,6 +423,7 @@ err_free:
/**
* tb_domain_add() - Add domain to the system
* @tb: Domain to add
+ * @reset: Issue reset to the host router
*
* Starts the domain and adds it to the system. Hotplugging devices will
* work after this has been returned successfully. In order to remove
@@ -431,7 +432,7 @@ err_free:
*
* Return: %0 in case of success and negative errno in case of error
*/
-int tb_domain_add(struct tb *tb)
+int tb_domain_add(struct tb *tb, bool reset)
{
int ret;
@@ -460,7 +461,7 @@ int tb_domain_add(struct tb *tb)
/* Start the domain */
if (tb->cm_ops->start) {
- ret = tb->cm_ops->start(tb);
+ ret = tb->cm_ops->start(tb, reset);
if (ret)
goto err_domain_del;
}
@@ -505,6 +506,10 @@ void tb_domain_remove(struct tb *tb)
mutex_unlock(&tb->lock);
flush_workqueue(tb->wq);
+
+ if (tb->cm_ops->deinit)
+ tb->cm_ops->deinit(tb);
+
device_unregister(&tb->dev);
}
diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
index 56790d50f9e3..baf10d099c77 100644
--- a/drivers/thunderbolt/icm.c
+++ b/drivers/thunderbolt/icm.c
@@ -2144,7 +2144,7 @@ static int icm_runtime_resume(struct tb *tb)
return 0;
}
-static int icm_start(struct tb *tb)
+static int icm_start(struct tb *tb, bool not_used)
{
struct icm *icm = tb_priv(tb);
int ret;
diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c
index 633970fbe9b0..63cb4b6afb71 100644
--- a/drivers/thunderbolt/lc.c
+++ b/drivers/thunderbolt/lc.c
@@ -6,6 +6,8 @@
* Author: Mika Westerberg <mika.westerberg@linux.intel.com>
*/
+#include <linux/delay.h>
+
#include "tb.h"
/**
@@ -45,6 +47,49 @@ static int find_port_lc_cap(struct tb_port *port)
return sw->cap_lc + start + phys * size;
}
+/**
+ * tb_lc_reset_port() - Trigger downstream port reset through LC
+ * @port: Port that is reset
+ *
+ * Triggers downstream port reset through link controller registers.
+ * Returns %0 in case of success negative errno otherwise. Only supports
+ * non-USB4 routers with link controller (that's Thunderbolt 2 and
+ * Thunderbolt 3).
+ */
+int tb_lc_reset_port(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+ int cap, ret;
+ u32 mode;
+
+ if (sw->generation < 2)
+ return -EINVAL;
+
+ cap = find_port_lc_cap(port);
+ if (cap < 0)
+ return cap;
+
+ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ mode |= TB_LC_PORT_MODE_DPR;
+
+ ret = tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ fsleep(10000);
+
+ ret = tb_sw_read(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+ if (ret)
+ return ret;
+
+ mode &= ~TB_LC_PORT_MODE_DPR;
+
+ return tb_sw_write(sw, &mode, TB_CFG_SWITCH, cap + TB_LC_PORT_MODE, 1);
+}
+
static int tb_lc_set_port_configured(struct tb_port *port, bool configured)
{
bool upstream = tb_is_upstream_port(port);
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index fb4f46e51753..7af2642b97cb 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -48,7 +48,7 @@
static bool host_reset = true;
module_param(host_reset, bool, 0444);
-MODULE_PARM_DESC(host_reset, "reset USBv2 host router (default: true)");
+MODULE_PARM_DESC(host_reset, "reset USB4 host router (default: true)");
static int ring_interrupt_index(const struct tb_ring *ring)
{
@@ -465,7 +465,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
if (!nhi->pdev->msix_enabled)
return 0;
- ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
+ ret = ida_alloc_max(&nhi->msix_ida, MSIX_MAX_VECS - 1, GFP_KERNEL);
if (ret < 0)
return ret;
@@ -485,7 +485,7 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
return 0;
err_ida_remove:
- ida_simple_remove(&nhi->msix_ida, ring->vector);
+ ida_free(&nhi->msix_ida, ring->vector);
return ret;
}
@@ -496,7 +496,7 @@ static void ring_release_msix(struct tb_ring *ring)
return;
free_irq(ring->irq, ring);
- ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
+ ida_free(&ring->nhi->msix_ida, ring->vector);
ring->vector = 0;
ring->irq = 0;
}
@@ -1364,7 +1364,6 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
nhi_check_quirks(nhi);
nhi_check_iommu(nhi);
-
nhi_reset(nhi);
res = nhi_init_msi(nhi);
@@ -1392,7 +1391,7 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_dbg(dev, "NHI initialized, starting thunderbolt\n");
- res = tb_domain_add(tb);
+ res = tb_domain_add(tb, host_reset);
if (res) {
/*
* At this point the RX/TX rings might already have been
diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c
index 69fb3b0fa34f..8901db2de327 100644
--- a/drivers/thunderbolt/nvm.c
+++ b/drivers/thunderbolt/nvm.c
@@ -330,7 +330,7 @@ struct tb_nvm *tb_nvm_alloc(struct device *dev)
if (!nvm)
return ERR_PTR(-ENOMEM);
- ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL);
+ ret = ida_alloc(&nvm_ida, GFP_KERNEL);
if (ret < 0) {
kfree(nvm);
return ERR_PTR(ret);
@@ -528,7 +528,7 @@ void tb_nvm_free(struct tb_nvm *nvm)
nvmem_unregister(nvm->non_active);
nvmem_unregister(nvm->active);
vfree(nvm->buf);
- ida_simple_remove(&nvm_ida, nvm->id);
+ ida_free(&nvm_ida, nvm->id);
}
kfree(nvm);
}
diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c
index 091a81bbdbdc..f760e54cd9bd 100644
--- a/drivers/thunderbolt/path.c
+++ b/drivers/thunderbolt/path.c
@@ -446,6 +446,19 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index,
return -ETIMEDOUT;
}
+/**
+ * tb_path_deactivate_hop() - Deactivate one path in path config space
+ * @port: Lane or protocol adapter
+ * @hop_index: HopID of the path to be cleared
+ *
+ * This deactivates or clears a single path config space entry at
+ * @hop_index. Returns %0 in success and negative errno otherwise.
+ */
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index)
+{
+ return __tb_path_deactivate_hop(port, hop_index, true);
+}
+
static void __tb_path_deactivate_hops(struct tb_path *path, int first_hop)
{
int i, res;
diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c
index e6bfa63b40ae..e81de9c30eac 100644
--- a/drivers/thunderbolt/quirks.c
+++ b/drivers/thunderbolt/quirks.c
@@ -43,6 +43,12 @@ static void quirk_usb3_maximum_bandwidth(struct tb_switch *sw)
}
}
+static void quirk_block_rpm_in_redrive(struct tb_switch *sw)
+{
+ sw->quirks |= QUIRK_KEEP_POWER_IN_DP_REDRIVE;
+ tb_sw_dbg(sw, "preventing runtime PM in DP redrive mode\n");
+}
+
struct tb_quirk {
u16 hw_vendor_id;
u16 hw_device_id;
@@ -87,6 +93,14 @@ static const struct tb_quirk tb_quirks[] = {
{ 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HUB_40G_BRIDGE, 0x0000, 0x0000,
quirk_usb3_maximum_bandwidth },
/*
+ * Block Runtime PM in DP redrive mode for Intel Barlow Ridge host
+ * controllers.
+ */
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_80G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ { 0x8087, PCI_DEVICE_ID_INTEL_BARLOW_RIDGE_HOST_40G_NHI, 0x0000, 0x0000,
+ quirk_block_rpm_in_redrive },
+ /*
* CLx is not supported on AMD USB4 Yellow Carp and Pink Sardine platforms.
*/
{ 0x0438, 0x0208, 0x0000, 0x0000, quirk_clx_disable },
diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c
index d49d6628dbf2..6bb49bdcd6c1 100644
--- a/drivers/thunderbolt/retimer.c
+++ b/drivers/thunderbolt/retimer.c
@@ -356,7 +356,7 @@ static void tb_retimer_release(struct device *dev)
kfree(rt);
}
-struct device_type tb_retimer_type = {
+const struct device_type tb_retimer_type = {
.name = "thunderbolt_retimer",
.groups = retimer_groups,
.release = tb_retimer_release,
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index fad40c4bc710..6ffc4e81ffed 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -676,6 +676,13 @@ int tb_port_disable(struct tb_port *port)
return __tb_port_enable(port, false);
}
+static int tb_port_reset(struct tb_port *port)
+{
+ if (tb_switch_is_usb4(port->sw))
+ return port->cap_usb4 ? usb4_port_reset(port) : 0;
+ return tb_lc_reset_port(port);
+}
+
/*
* tb_init_port() - initialize a port
*
@@ -771,7 +778,7 @@ static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid,
if (max_hopid < 0 || max_hopid > port_max_hopid)
max_hopid = port_max_hopid;
- return ida_simple_get(ida, min_hopid, max_hopid + 1, GFP_KERNEL);
+ return ida_alloc_range(ida, min_hopid, max_hopid, GFP_KERNEL);
}
/**
@@ -809,7 +816,7 @@ int tb_port_alloc_out_hopid(struct tb_port *port, int min_hopid, int max_hopid)
*/
void tb_port_release_in_hopid(struct tb_port *port, int hopid)
{
- ida_simple_remove(&port->in_hopids, hopid);
+ ida_free(&port->in_hopids, hopid);
}
/**
@@ -819,7 +826,7 @@ void tb_port_release_in_hopid(struct tb_port *port, int hopid)
*/
void tb_port_release_out_hopid(struct tb_port *port, int hopid)
{
- ida_simple_remove(&port->out_hopids, hopid);
+ ida_free(&port->out_hopids, hopid);
}
static inline bool tb_switch_is_reachable(const struct tb_switch *parent,
@@ -1120,7 +1127,7 @@ int tb_port_lane_bonding_enable(struct tb_port *port)
ret = tb_port_set_link_width(port->dual_link_port,
TB_LINK_WIDTH_DUAL);
if (ret)
- goto err_lane0;
+ goto err_lane1;
}
/*
@@ -1534,29 +1541,124 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw)
regs->__unknown1, regs->__unknown4);
}
+static int tb_switch_reset_host(struct tb_switch *sw)
+{
+ if (sw->generation > 1) {
+ struct tb_port *port;
+
+ tb_switch_for_each_port(sw, port) {
+ int i, ret;
+
+ /*
+ * For lane adapters we issue downstream port
+ * reset and clear up path config spaces.
+ *
+ * For protocol adapters we disable the path and
+ * clear path config space one by one (from 8 to
+ * Max Input HopID of the adapter).
+ */
+ if (tb_port_is_null(port) && !tb_is_upstream_port(port)) {
+ ret = tb_port_reset(port);
+ if (ret)
+ return ret;
+ } else if (tb_port_is_usb3_down(port) ||
+ tb_port_is_usb3_up(port)) {
+ tb_usb3_port_enable(port, false);
+ } else if (tb_port_is_dpin(port) ||
+ tb_port_is_dpout(port)) {
+ tb_dp_port_enable(port, false);
+ } else if (tb_port_is_pcie_down(port) ||
+ tb_port_is_pcie_up(port)) {
+ tb_pci_port_enable(port, false);
+ } else {
+ continue;
+ }
+
+ /* Cleanup path config space of protocol adapter */
+ for (i = TB_PATH_MIN_HOPID;
+ i <= port->config.max_in_hop_id; i++) {
+ ret = tb_path_deactivate_hop(port, i);
+ if (ret)
+ return ret;
+ }
+ }
+ } else {
+ struct tb_cfg_result res;
+
+ /* Thunderbolt 1 uses the "reset" config space packet */
+ res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
+ TB_CFG_SWITCH, 2, 2);
+ if (res.err)
+ return res.err;
+ res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
+ if (res.err > 0)
+ return -EIO;
+ else if (res.err < 0)
+ return res.err;
+ }
+
+ return 0;
+}
+
+static int tb_switch_reset_device(struct tb_switch *sw)
+{
+ return tb_port_reset(tb_switch_downstream_port(sw));
+}
+
+static bool tb_switch_enumerated(struct tb_switch *sw)
+{
+ u32 val;
+ int ret;
+
+ /*
+ * Read directly from the hardware because we use this also
+ * during system sleep where sw->config.enabled is already set
+ * by us.
+ */
+ ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_3, 1);
+ if (ret)
+ return false;
+
+ return !!(val & ROUTER_CS_3_V);
+}
+
/**
- * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET
- * @sw: Switch to reset
+ * tb_switch_reset() - Perform reset to the router
+ * @sw: Router to reset
*
- * Return: Returns 0 on success or an error code on failure.
+ * Issues reset to the router @sw. Can be used for any router. For host
+ * routers, resets all the downstream ports and cleans up path config
+ * spaces accordingly. For device routers issues downstream port reset
+ * through the parent router, so as side effect there will be unplug
+ * soon after this is finished.
+ *
+ * If the router is not enumerated does nothing.
+ *
+ * Returns %0 on success or negative errno in case of failure.
*/
int tb_switch_reset(struct tb_switch *sw)
{
- struct tb_cfg_result res;
+ int ret;
- if (sw->generation > 1)
+ /*
+ * We cannot access the port config spaces unless the router is
+ * already enumerated. If the router is not enumerated it is
+ * equal to being reset so we can skip that here.
+ */
+ if (!tb_switch_enumerated(sw))
return 0;
- tb_sw_dbg(sw, "resetting switch\n");
+ tb_sw_dbg(sw, "resetting\n");
- res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2,
- TB_CFG_SWITCH, 2, 2);
- if (res.err)
- return res.err;
- res = tb_cfg_reset(sw->tb->ctl, tb_route(sw));
- if (res.err > 0)
- return -EIO;
- return res.err;
+ if (tb_route(sw))
+ ret = tb_switch_reset_device(sw);
+ else
+ ret = tb_switch_reset_host(sw);
+
+ if (ret)
+ tb_sw_warn(sw, "failed to reset\n");
+
+ return ret;
}
/**
@@ -2228,7 +2330,7 @@ static const struct dev_pm_ops tb_switch_pm_ops = {
NULL)
};
-struct device_type tb_switch_type = {
+const struct device_type tb_switch_type = {
.name = "thunderbolt_device",
.release = tb_switch_release,
.uevent = tb_switch_uevent,
diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c
index 846d2813bb1a..c5ce7a694b27 100644
--- a/drivers/thunderbolt/tb.c
+++ b/drivers/thunderbolt/tb.c
@@ -17,6 +17,7 @@
#include "tunnel.h"
#define TB_TIMEOUT 100 /* ms */
+#define TB_RELEASE_BW_TIMEOUT 10000 /* ms */
/*
* Minimum bandwidth (in Mb/s) that is needed in the single transmitter/receiver
@@ -75,112 +76,6 @@ struct tb_hotplug_event {
bool unplug;
};
-static void tb_init_bandwidth_groups(struct tb_cm *tcm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- group->tb = tcm_to_tb(tcm);
- group->index = i + 1;
- INIT_LIST_HEAD(&group->ports);
- }
-}
-
-static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
- struct tb_port *in)
-{
- if (!group || WARN_ON(in->group))
- return;
-
- in->group = group;
- list_add_tail(&in->group_list, &group->ports);
-
- tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
-}
-
-static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- if (list_empty(&group->ports))
- return group;
- }
-
- return NULL;
-}
-
-static struct tb_bandwidth_group *
-tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
- struct tb_port *out)
-{
- struct tb_bandwidth_group *group;
- struct tb_tunnel *tunnel;
-
- /*
- * Find all DP tunnels that go through all the same USB4 links
- * as this one. Because we always setup tunnels the same way we
- * can just check for the routers at both ends of the tunnels
- * and if they are the same we have a match.
- */
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- if (!tb_tunnel_is_dp(tunnel))
- continue;
-
- if (tunnel->src_port->sw == in->sw &&
- tunnel->dst_port->sw == out->sw) {
- group = tunnel->src_port->group;
- if (group) {
- tb_bandwidth_group_attach_port(group, in);
- return group;
- }
- }
- }
-
- /* Pick up next available group then */
- group = tb_find_free_bandwidth_group(tcm);
- if (group)
- tb_bandwidth_group_attach_port(group, in);
- else
- tb_port_warn(in, "no available bandwidth groups\n");
-
- return group;
-}
-
-static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
- struct tb_port *out)
-{
- if (usb4_dp_port_bandwidth_mode_enabled(in)) {
- int index, i;
-
- index = usb4_dp_port_group_id(in);
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- if (tcm->groups[i].index == index) {
- tb_bandwidth_group_attach_port(&tcm->groups[i], in);
- return;
- }
- }
- }
-
- tb_attach_bandwidth_group(tcm, in, out);
-}
-
-static void tb_detach_bandwidth_group(struct tb_port *in)
-{
- struct tb_bandwidth_group *group = in->group;
-
- if (group) {
- in->group = NULL;
- list_del_init(&in->group_list);
-
- tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
- }
-}
-
static void tb_handle_hotplug(struct work_struct *work);
static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug)
@@ -472,34 +367,6 @@ static void tb_switch_discover_tunnels(struct tb_switch *sw,
}
}
-static void tb_discover_tunnels(struct tb *tb)
-{
- struct tb_cm *tcm = tb_priv(tb);
- struct tb_tunnel *tunnel;
-
- tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
-
- list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
- if (tb_tunnel_is_pci(tunnel)) {
- struct tb_switch *parent = tunnel->dst_port->sw;
-
- while (parent != tunnel->src_port->sw) {
- parent->boot = true;
- parent = tb_switch_parent(parent);
- }
- } else if (tb_tunnel_is_dp(tunnel)) {
- struct tb_port *in = tunnel->src_port;
- struct tb_port *out = tunnel->dst_port;
-
- /* Keep the domain from powering down */
- pm_runtime_get_sync(&in->sw->dev);
- pm_runtime_get_sync(&out->sw->dev);
-
- tb_discover_bandwidth_group(tcm, in, out);
- }
- }
-}
-
static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd)
{
if (tb_switch_is_usb4(port->sw))
@@ -681,6 +548,10 @@ static int tb_consumed_usb3_pcie_bandwidth(struct tb *tb,
* Calculates consumed DP bandwidth at @port between path from @src_port
* to @dst_port. Does not take tunnel starting from @src_port and ending
* from @src_port into account.
+ *
+ * If there is bandwidth reserved for any of the groups between
+ * @src_port and @dst_port (but not yet used) that is also taken into
+ * account in the returned consumed bandwidth.
*/
static int tb_consumed_dp_bandwidth(struct tb *tb,
struct tb_port *src_port,
@@ -689,9 +560,11 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
int *consumed_up,
int *consumed_down)
{
+ int group_reserved[MAX_GROUPS] = {};
struct tb_cm *tcm = tb_priv(tb);
struct tb_tunnel *tunnel;
- int ret;
+ bool downstream;
+ int i, ret;
*consumed_up = *consumed_down = 0;
@@ -700,6 +573,7 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
* their consumed bandwidth from the available.
*/
list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ const struct tb_bandwidth_group *group;
int dp_consumed_up, dp_consumed_down;
if (tb_tunnel_is_invalid(tunnel))
@@ -712,6 +586,15 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
continue;
/*
+ * Calculate what is reserved for groups crossing the
+ * same ports only once (as that is reserved for all the
+ * tunnels in the group).
+ */
+ group = tunnel->src_port->group;
+ if (group && group->reserved && !group_reserved[group->index])
+ group_reserved[group->index] = group->reserved;
+
+ /*
* Ignore the DP tunnel between src_port and dst_port
* because it is the same tunnel and we may be
* re-calculating estimated bandwidth.
@@ -729,6 +612,14 @@ static int tb_consumed_dp_bandwidth(struct tb *tb,
*consumed_down += dp_consumed_down;
}
+ downstream = tb_port_path_direction_downstream(src_port, dst_port);
+ for (i = 0; i < ARRAY_SIZE(group_reserved); i++) {
+ if (downstream)
+ *consumed_down += group_reserved[i];
+ else
+ *consumed_up += group_reserved[i];
+ }
+
return 0;
}
@@ -1181,8 +1072,6 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* @tb: Domain structure
* @src_port: Source adapter to start the transition
* @dst_port: Destination adapter
- * @requested_up: New lower bandwidth request upstream (Mb/s)
- * @requested_down: New lower bandwidth request downstream (Mb/s)
* @keep_asym: Keep asymmetric link if preferred
*
* Goes over each link from @src_port to @dst_port and tries to
@@ -1190,8 +1079,7 @@ static int tb_configure_asym(struct tb *tb, struct tb_port *src_port,
* allows and link asymmetric preference is ignored (if @keep_asym is %false).
*/
static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
- struct tb_port *dst_port, int requested_up,
- int requested_down, bool keep_asym)
+ struct tb_port *dst_port, bool keep_asym)
{
bool clx = false, clx_disabled = false, downstream;
struct tb_switch *sw;
@@ -1230,10 +1118,10 @@ static int tb_configure_sym(struct tb *tb, struct tb_port *src_port,
* guard band 10%) as the link was configured asymmetric
* already.
*/
- if (consumed_down + requested_down >= asym_threshold)
+ if (consumed_down >= asym_threshold)
continue;
} else {
- if (consumed_up + requested_up >= asym_threshold)
+ if (consumed_up >= asym_threshold)
continue;
}
@@ -1306,7 +1194,7 @@ static void tb_configure_link(struct tb_port *down, struct tb_port *up,
struct tb_port *host_port;
host_port = tb_port_at(tb_route(sw), tb->root_switch);
- tb_configure_sym(tb, host_port, up, 0, 0, false);
+ tb_configure_sym(tb, host_port, up, false);
}
/* Set the link configured */
@@ -1464,6 +1352,297 @@ out_rpm_put:
}
}
+static void
+tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *first_tunnel;
+ struct tb *tb = group->tb;
+ struct tb_port *in;
+ int ret;
+
+ tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
+ group->index);
+
+ first_tunnel = NULL;
+ list_for_each_entry(in, &group->ports, group_list) {
+ int estimated_bw, estimated_up, estimated_down;
+ struct tb_tunnel *tunnel;
+ struct tb_port *out;
+
+ if (!usb4_dp_port_bandwidth_mode_enabled(in))
+ continue;
+
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (WARN_ON(!tunnel))
+ break;
+
+ if (!first_tunnel) {
+ /*
+ * Since USB3 bandwidth is shared by all DP
+ * tunnels under the host router USB4 port, even
+ * if they do not begin from the host router, we
+ * can release USB3 bandwidth just once and not
+ * for each tunnel separately.
+ */
+ first_tunnel = tunnel;
+ ret = tb_release_unused_usb3_bandwidth(tb,
+ first_tunnel->src_port, first_tunnel->dst_port);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to release unused bandwidth\n");
+ break;
+ }
+ }
+
+ out = tunnel->dst_port;
+ ret = tb_available_bandwidth(tb, in, out, &estimated_up,
+ &estimated_down, true);
+ if (ret) {
+ tb_tunnel_warn(tunnel,
+ "failed to re-calculate estimated bandwidth\n");
+ break;
+ }
+
+ /*
+ * Estimated bandwidth includes:
+ * - already allocated bandwidth for the DP tunnel
+ * - available bandwidth along the path
+ * - bandwidth allocated for USB 3.x but not used.
+ */
+ if (tb_tunnel_direction_downstream(tunnel))
+ estimated_bw = estimated_down;
+ else
+ estimated_bw = estimated_up;
+
+ /*
+ * If there is reserved bandwidth for the group that is
+ * not yet released we report that too.
+ */
+ tb_tunnel_dbg(tunnel,
+ "re-calculated estimated bandwidth %u (+ %u reserved) = %u Mb/s\n",
+ estimated_bw, group->reserved,
+ estimated_bw + group->reserved);
+
+ if (usb4_dp_port_set_estimated_bandwidth(in,
+ estimated_bw + group->reserved))
+ tb_tunnel_warn(tunnel,
+ "failed to update estimated bandwidth\n");
+ }
+
+ if (first_tunnel)
+ tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
+ first_tunnel->dst_port);
+
+ tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
+}
+
+static void tb_recalc_estimated_bandwidth(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (!list_empty(&group->ports))
+ tb_recalc_estimated_bandwidth_for_group(group);
+ }
+
+ tb_dbg(tb, "bandwidth re-calculation done\n");
+}
+
+static bool __release_group_bandwidth(struct tb_bandwidth_group *group)
+{
+ if (group->reserved) {
+ tb_dbg(group->tb, "group %d released total %d Mb/s\n", group->index,
+ group->reserved);
+ group->reserved = 0;
+ return true;
+ }
+ return false;
+}
+
+static void __configure_group_sym(struct tb_bandwidth_group *group)
+{
+ struct tb_tunnel *tunnel;
+ struct tb_port *in;
+
+ if (list_empty(&group->ports))
+ return;
+
+ /*
+ * All the tunnels in the group go through the same USB4 links
+ * so we find the first one here and pass the IN and OUT
+ * adapters to tb_configure_sym() which now transitions the
+ * links back to symmetric if bandwidth requirement < asym_threshold.
+ *
+ * We do this here to avoid unnecessary transitions (for example
+ * if the graphics released bandwidth for other tunnel in the
+ * same group).
+ */
+ in = list_first_entry(&group->ports, struct tb_port, group_list);
+ tunnel = tb_find_tunnel(group->tb, TB_TUNNEL_DP, in, NULL);
+ if (tunnel)
+ tb_configure_sym(group->tb, in, tunnel->dst_port, true);
+}
+
+static void tb_bandwidth_group_release_work(struct work_struct *work)
+{
+ struct tb_bandwidth_group *group =
+ container_of(work, typeof(*group), release_work.work);
+ struct tb *tb = group->tb;
+
+ mutex_lock(&tb->lock);
+ if (__release_group_bandwidth(group))
+ tb_recalc_estimated_bandwidth(tb);
+ __configure_group_sym(group);
+ mutex_unlock(&tb->lock);
+}
+
+static void tb_init_bandwidth_groups(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ group->tb = tcm_to_tb(tcm);
+ group->index = i + 1;
+ INIT_LIST_HEAD(&group->ports);
+ INIT_DELAYED_WORK(&group->release_work,
+ tb_bandwidth_group_release_work);
+ }
+}
+
+static void tb_bandwidth_group_attach_port(struct tb_bandwidth_group *group,
+ struct tb_port *in)
+{
+ if (!group || WARN_ON(in->group))
+ return;
+
+ in->group = group;
+ list_add_tail(&in->group_list, &group->ports);
+
+ tb_port_dbg(in, "attached to bandwidth group %d\n", group->index);
+}
+
+static struct tb_bandwidth_group *tb_find_free_bandwidth_group(struct tb_cm *tcm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ struct tb_bandwidth_group *group = &tcm->groups[i];
+
+ if (list_empty(&group->ports))
+ return group;
+ }
+
+ return NULL;
+}
+
+static struct tb_bandwidth_group *
+tb_attach_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ struct tb_bandwidth_group *group;
+ struct tb_tunnel *tunnel;
+
+ /*
+ * Find all DP tunnels that go through all the same USB4 links
+ * as this one. Because we always setup tunnels the same way we
+ * can just check for the routers at both ends of the tunnels
+ * and if they are the same we have a match.
+ */
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (!tb_tunnel_is_dp(tunnel))
+ continue;
+
+ if (tunnel->src_port->sw == in->sw &&
+ tunnel->dst_port->sw == out->sw) {
+ group = tunnel->src_port->group;
+ if (group) {
+ tb_bandwidth_group_attach_port(group, in);
+ return group;
+ }
+ }
+ }
+
+ /* Pick up next available group then */
+ group = tb_find_free_bandwidth_group(tcm);
+ if (group)
+ tb_bandwidth_group_attach_port(group, in);
+ else
+ tb_port_warn(in, "no available bandwidth groups\n");
+
+ return group;
+}
+
+static void tb_discover_bandwidth_group(struct tb_cm *tcm, struct tb_port *in,
+ struct tb_port *out)
+{
+ if (usb4_dp_port_bandwidth_mode_enabled(in)) {
+ int index, i;
+
+ index = usb4_dp_port_group_id(in);
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
+ if (tcm->groups[i].index == index) {
+ tb_bandwidth_group_attach_port(&tcm->groups[i], in);
+ return;
+ }
+ }
+ }
+
+ tb_attach_bandwidth_group(tcm, in, out);
+}
+
+static void tb_detach_bandwidth_group(struct tb_port *in)
+{
+ struct tb_bandwidth_group *group = in->group;
+
+ if (group) {
+ in->group = NULL;
+ list_del_init(&in->group_list);
+
+ tb_port_dbg(in, "detached from bandwidth group %d\n", group->index);
+
+ /* No more tunnels so release the reserved bandwidth if any */
+ if (list_empty(&group->ports)) {
+ cancel_delayed_work(&group->release_work);
+ __release_group_bandwidth(group);
+ }
+ }
+}
+
+static void tb_discover_tunnels(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_tunnel *tunnel;
+
+ tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true);
+
+ list_for_each_entry(tunnel, &tcm->tunnel_list, list) {
+ if (tb_tunnel_is_pci(tunnel)) {
+ struct tb_switch *parent = tunnel->dst_port->sw;
+
+ while (parent != tunnel->src_port->sw) {
+ parent->boot = true;
+ parent = tb_switch_parent(parent);
+ }
+ } else if (tb_tunnel_is_dp(tunnel)) {
+ struct tb_port *in = tunnel->src_port;
+ struct tb_port *out = tunnel->dst_port;
+
+ /* Keep the domain from powering down */
+ pm_runtime_get_sync(&in->sw->dev);
+ pm_runtime_get_sync(&out->sw->dev);
+
+ tb_discover_bandwidth_group(tcm, in, out);
+ }
+ }
+}
+
static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
{
struct tb_port *src_port, *dst_port;
@@ -1491,7 +1670,7 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel)
* If bandwidth on a link is < asym_threshold
* transition the link to symmetric.
*/
- tb_configure_sym(tb, src_port, dst_port, 0, 0, true);
+ tb_configure_sym(tb, src_port, dst_port, true);
/* Now we can allow the domain to runtime suspend again */
pm_runtime_mark_last_busy(&dst_port->sw->dev);
pm_runtime_put_autosuspend(&dst_port->sw->dev);
@@ -1605,101 +1784,6 @@ out:
return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN);
}
-static void
-tb_recalc_estimated_bandwidth_for_group(struct tb_bandwidth_group *group)
-{
- struct tb_tunnel *first_tunnel;
- struct tb *tb = group->tb;
- struct tb_port *in;
- int ret;
-
- tb_dbg(tb, "re-calculating bandwidth estimation for group %u\n",
- group->index);
-
- first_tunnel = NULL;
- list_for_each_entry(in, &group->ports, group_list) {
- int estimated_bw, estimated_up, estimated_down;
- struct tb_tunnel *tunnel;
- struct tb_port *out;
-
- if (!usb4_dp_port_bandwidth_mode_enabled(in))
- continue;
-
- tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
- if (WARN_ON(!tunnel))
- break;
-
- if (!first_tunnel) {
- /*
- * Since USB3 bandwidth is shared by all DP
- * tunnels under the host router USB4 port, even
- * if they do not begin from the host router, we
- * can release USB3 bandwidth just once and not
- * for each tunnel separately.
- */
- first_tunnel = tunnel;
- ret = tb_release_unused_usb3_bandwidth(tb,
- first_tunnel->src_port, first_tunnel->dst_port);
- if (ret) {
- tb_tunnel_warn(tunnel,
- "failed to release unused bandwidth\n");
- break;
- }
- }
-
- out = tunnel->dst_port;
- ret = tb_available_bandwidth(tb, in, out, &estimated_up,
- &estimated_down, true);
- if (ret) {
- tb_tunnel_warn(tunnel,
- "failed to re-calculate estimated bandwidth\n");
- break;
- }
-
- /*
- * Estimated bandwidth includes:
- * - already allocated bandwidth for the DP tunnel
- * - available bandwidth along the path
- * - bandwidth allocated for USB 3.x but not used.
- */
- tb_tunnel_dbg(tunnel,
- "re-calculated estimated bandwidth %u/%u Mb/s\n",
- estimated_up, estimated_down);
-
- if (tb_port_path_direction_downstream(in, out))
- estimated_bw = estimated_down;
- else
- estimated_bw = estimated_up;
-
- if (usb4_dp_port_set_estimated_bandwidth(in, estimated_bw))
- tb_tunnel_warn(tunnel,
- "failed to update estimated bandwidth\n");
- }
-
- if (first_tunnel)
- tb_reclaim_usb3_bandwidth(tb, first_tunnel->src_port,
- first_tunnel->dst_port);
-
- tb_dbg(tb, "bandwidth estimation for group %u done\n", group->index);
-}
-
-static void tb_recalc_estimated_bandwidth(struct tb *tb)
-{
- struct tb_cm *tcm = tb_priv(tb);
- int i;
-
- tb_dbg(tb, "bandwidth consumption changed, re-calculating estimated bandwidth\n");
-
- for (i = 0; i < ARRAY_SIZE(tcm->groups); i++) {
- struct tb_bandwidth_group *group = &tcm->groups[i];
-
- if (!list_empty(&group->ports))
- tb_recalc_estimated_bandwidth_for_group(group);
- }
-
- tb_dbg(tb, "bandwidth re-calculation done\n");
-}
-
static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
{
struct tb_port *host_port, *port;
@@ -1737,49 +1821,15 @@ static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in)
return NULL;
}
-static bool tb_tunnel_one_dp(struct tb *tb)
+static bool tb_tunnel_one_dp(struct tb *tb, struct tb_port *in,
+ struct tb_port *out)
{
int available_up, available_down, ret, link_nr;
struct tb_cm *tcm = tb_priv(tb);
- struct tb_port *port, *in, *out;
int consumed_up, consumed_down;
struct tb_tunnel *tunnel;
/*
- * Find pair of inactive DP IN and DP OUT adapters and then
- * establish a DP tunnel between them.
- */
- tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
-
- in = NULL;
- out = NULL;
- list_for_each_entry(port, &tcm->dp_resources, list) {
- if (!tb_port_is_dpin(port))
- continue;
-
- if (tb_port_is_enabled(port)) {
- tb_port_dbg(port, "DP IN in use\n");
- continue;
- }
-
- in = port;
- tb_port_dbg(in, "DP IN available\n");
-
- out = tb_find_dp_out(tb, port);
- if (out)
- break;
- }
-
- if (!in) {
- tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
- return false;
- }
- if (!out) {
- tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n");
- return false;
- }
-
- /*
* This is only applicable to links that are not bonded (so
* when Thunderbolt 1 hardware is involved somewhere in the
* topology). For these try to share the DP bandwidth between
@@ -1839,15 +1889,19 @@ static bool tb_tunnel_one_dp(struct tb *tb)
goto err_free;
}
+ /* If fail reading tunnel's consumed bandwidth, tear it down */
+ ret = tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down);
+ if (ret)
+ goto err_deactivate;
+
list_add_tail(&tunnel->list, &tcm->tunnel_list);
- tb_reclaim_usb3_bandwidth(tb, in, out);
+ tb_reclaim_usb3_bandwidth(tb, in, out);
/*
* Transition the links to asymmetric if the consumption exceeds
* the threshold.
*/
- if (!tb_tunnel_consumed_bandwidth(tunnel, &consumed_up, &consumed_down))
- tb_configure_asym(tb, in, out, consumed_up, consumed_down);
+ tb_configure_asym(tb, in, out, consumed_up, consumed_down);
/* Update the domain with the new bandwidth estimation */
tb_recalc_estimated_bandwidth(tb);
@@ -1859,6 +1913,8 @@ static bool tb_tunnel_one_dp(struct tb *tb)
tb_increase_tmu_accuracy(tunnel);
return true;
+err_deactivate:
+ tb_tunnel_deactivate(tunnel);
err_free:
tb_tunnel_free(tunnel);
err_reclaim_usb:
@@ -1878,13 +1934,86 @@ err_rpm_put:
static void tb_tunnel_dp(struct tb *tb)
{
+ struct tb_cm *tcm = tb_priv(tb);
+ struct tb_port *port, *in, *out;
+
if (!tb_acpi_may_tunnel_dp()) {
tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n");
return;
}
- while (tb_tunnel_one_dp(tb))
- ;
+ /*
+ * Find pair of inactive DP IN and DP OUT adapters and then
+ * establish a DP tunnel between them.
+ */
+ tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n");
+
+ in = NULL;
+ out = NULL;
+ list_for_each_entry(port, &tcm->dp_resources, list) {
+ if (!tb_port_is_dpin(port))
+ continue;
+
+ if (tb_port_is_enabled(port)) {
+ tb_port_dbg(port, "DP IN in use\n");
+ continue;
+ }
+
+ in = port;
+ tb_port_dbg(in, "DP IN available\n");
+
+ out = tb_find_dp_out(tb, port);
+ if (out)
+ tb_tunnel_one_dp(tb, in, out);
+ else
+ tb_port_dbg(in, "no suitable DP OUT adapter available, not tunneling\n");
+ }
+
+ if (!in)
+ tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n");
+}
+
+static void tb_enter_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ /*
+ * If we get hot-unplug for the DP IN port of the host router
+ * and the DP resource is not available anymore it means there
+ * is a monitor connected directly to the Type-C port and we are
+ * in "redrive" mode. For this to work we cannot enter RTD3 so
+ * we bump up the runtime PM reference count here.
+ */
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (!tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = true;
+ pm_runtime_get(&sw->dev);
+ tb_port_dbg(port, "enter redrive mode, keeping powered\n");
+ }
+}
+
+static void tb_exit_redrive(struct tb_port *port)
+{
+ struct tb_switch *sw = port->sw;
+
+ if (!(sw->quirks & QUIRK_KEEP_POWER_IN_DP_REDRIVE))
+ return;
+
+ if (!tb_port_is_dpin(port))
+ return;
+ if (tb_route(sw))
+ return;
+ if (port->redrive && tb_switch_query_dp_resource(sw, port)) {
+ port->redrive = false;
+ pm_runtime_put(&sw->dev);
+ tb_port_dbg(port, "exit redrive mode\n");
+ }
}
static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
@@ -1903,7 +2032,10 @@ static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port)
}
tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out);
- tb_deactivate_and_free_tunnel(tunnel);
+ if (tunnel)
+ tb_deactivate_and_free_tunnel(tunnel);
+ else
+ tb_enter_redrive(port);
list_del_init(&port->list);
/*
@@ -1930,6 +2062,7 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port)
tb_port_dbg(port, "DP %s resource available after hotplug\n",
tb_port_is_dpin(port) ? "IN" : "OUT");
list_add_tail(&port->list, &tcm->dp_resources);
+ tb_exit_redrive(port);
/* Look for suitable DP IN <-> DP OUT pairs now */
tb_tunnel_dp(tb);
@@ -2243,8 +2376,10 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
int allocated_up, allocated_down, available_up, available_down, ret;
int requested_up_corrected, requested_down_corrected, granularity;
int max_up, max_down, max_up_rounded, max_down_rounded;
+ struct tb_bandwidth_group *group;
struct tb *tb = tunnel->tb;
struct tb_port *in, *out;
+ bool downstream;
ret = tb_tunnel_allocated_bandwidth(tunnel, &allocated_up, &allocated_down);
if (ret)
@@ -2270,11 +2405,11 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
*/
ret = tb_tunnel_maximum_bandwidth(tunnel, &max_up, &max_down);
if (ret)
- return ret;
+ goto fail;
ret = usb4_dp_port_granularity(in);
if (ret < 0)
- return ret;
+ goto fail;
granularity = ret;
max_up_rounded = roundup(max_up, granularity);
@@ -2304,24 +2439,48 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
"bandwidth request too high (%d/%d Mb/s > %d/%d Mb/s)\n",
requested_up_corrected, requested_down_corrected,
max_up_rounded, max_down_rounded);
- return -ENOBUFS;
+ ret = -ENOBUFS;
+ goto fail;
}
+ downstream = tb_tunnel_direction_downstream(tunnel);
+ group = in->group;
+
if ((*requested_up >= 0 && requested_up_corrected <= allocated_up) ||
(*requested_down >= 0 && requested_down_corrected <= allocated_down)) {
- /*
- * If bandwidth on a link is < asym_threshold transition
- * the link to symmetric.
- */
- tb_configure_sym(tb, in, out, *requested_up, *requested_down, true);
- /*
- * If requested bandwidth is less or equal than what is
- * currently allocated to that tunnel we simply change
- * the reservation of the tunnel. Since all the tunnels
- * going out from the same USB4 port are in the same
- * group the released bandwidth will be taken into
- * account for the other tunnels automatically below.
- */
+ if (tunnel->bw_mode) {
+ int reserved;
+ /*
+ * If requested bandwidth is less or equal than
+ * what is currently allocated to that tunnel we
+ * simply change the reservation of the tunnel
+ * and add the released bandwidth for the group
+ * for the next 10s. Then we release it for
+ * others to use.
+ */
+ if (downstream)
+ reserved = allocated_down - *requested_down;
+ else
+ reserved = allocated_up - *requested_up;
+
+ if (reserved > 0) {
+ group->reserved += reserved;
+ tb_dbg(tb, "group %d reserved %d total %d Mb/s\n",
+ group->index, reserved, group->reserved);
+
+ /*
+ * If it was not already pending,
+ * schedule release now. If it is then
+ * postpone it for the next 10s (unless
+ * it is already running in which case
+ * the 10s already expired and we should
+ * give the reserved back to others).
+ */
+ mod_delayed_work(system_wq, &group->release_work,
+ msecs_to_jiffies(TB_RELEASE_BW_TIMEOUT));
+ }
+ }
+
return tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
}
@@ -2332,7 +2491,7 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
*/
ret = tb_release_unused_usb3_bandwidth(tb, in, out);
if (ret)
- return ret;
+ goto fail;
/*
* Then go over all tunnels that cross the same USB4 ports (they
@@ -2344,11 +2503,15 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
if (ret)
goto reclaim;
- tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d Mb/s\n",
- available_up, available_down);
+ tb_tunnel_dbg(tunnel, "bandwidth available for allocation %d/%d (+ %u reserved) Mb/s\n",
+ available_up, available_down, group->reserved);
+
+ if ((*requested_up >= 0 &&
+ available_up + group->reserved >= requested_up_corrected) ||
+ (*requested_down >= 0 &&
+ available_down + group->reserved >= requested_down_corrected)) {
+ int released = 0;
- if ((*requested_up >= 0 && available_up >= requested_up_corrected) ||
- (*requested_down >= 0 && available_down >= requested_down_corrected)) {
/*
* If bandwidth on a link is >= asym_threshold
* transition the link to asymmetric.
@@ -2356,15 +2519,28 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
ret = tb_configure_asym(tb, in, out, *requested_up,
*requested_down);
if (ret) {
- tb_configure_sym(tb, in, out, 0, 0, true);
- return ret;
+ tb_configure_sym(tb, in, out, true);
+ goto fail;
}
ret = tb_tunnel_alloc_bandwidth(tunnel, requested_up,
requested_down);
if (ret) {
tb_tunnel_warn(tunnel, "failed to allocate bandwidth\n");
- tb_configure_sym(tb, in, out, 0, 0, true);
+ tb_configure_sym(tb, in, out, true);
+ }
+
+ if (downstream) {
+ if (*requested_down > available_down)
+ released = *requested_down - available_down;
+ } else {
+ if (*requested_up > available_up)
+ released = *requested_up - available_up;
+ }
+ if (released) {
+ group->reserved -= released;
+ tb_dbg(tb, "group %d released %d total %d Mb/s\n",
+ group->index, released, group->reserved);
}
} else {
ret = -ENOBUFS;
@@ -2372,6 +2548,18 @@ static int tb_alloc_dp_bandwidth(struct tb_tunnel *tunnel, int *requested_up,
reclaim:
tb_reclaim_usb3_bandwidth(tb, in, out);
+fail:
+ if (ret && ret != -ENODEV) {
+ /*
+ * Write back the same allocated (so no change), this
+ * makes the DPTX request fail on graphics side.
+ */
+ tb_tunnel_dbg(tunnel,
+ "failing the request by rewriting allocated %d/%d Mb/s\n",
+ allocated_up, allocated_down);
+ tb_tunnel_alloc_bandwidth(tunnel, &allocated_up, &allocated_down);
+ }
+
return ret;
}
@@ -2379,11 +2567,11 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
{
struct tb_hotplug_event *ev = container_of(work, typeof(*ev), work);
int requested_bw, requested_up, requested_down, ret;
- struct tb_port *in, *out;
struct tb_tunnel *tunnel;
struct tb *tb = ev->tb;
struct tb_cm *tcm = tb_priv(tb);
struct tb_switch *sw;
+ struct tb_port *in;
pm_runtime_get_sync(&tb->dev);
@@ -2406,32 +2594,48 @@ static void tb_handle_dp_bandwidth_request(struct work_struct *work)
tb_port_dbg(in, "handling bandwidth allocation request\n");
+ tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
+ if (!tunnel) {
+ tb_port_warn(in, "failed to find tunnel\n");
+ goto put_sw;
+ }
+
if (!usb4_dp_port_bandwidth_mode_enabled(in)) {
- tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ if (tunnel->bw_mode) {
+ /*
+ * Reset the tunnel back to use the legacy
+ * allocation.
+ */
+ tunnel->bw_mode = false;
+ tb_port_dbg(in, "DPTX disabled bandwidth allocation mode\n");
+ } else {
+ tb_port_warn(in, "bandwidth allocation mode not enabled\n");
+ }
goto put_sw;
}
ret = usb4_dp_port_requested_bandwidth(in);
if (ret < 0) {
- if (ret == -ENODATA)
- tb_port_dbg(in, "no bandwidth request active\n");
- else
+ if (ret == -ENODATA) {
+ /*
+ * There is no request active so this means the
+ * BW allocation mode was enabled from graphics
+ * side. At this point we know that the graphics
+ * driver has read the DRPX capabilities so we
+ * can offer an better bandwidth estimatation.
+ */
+ tb_port_dbg(in, "DPTX enabled bandwidth allocation mode, updating estimated bandwidth\n");
+ tb_recalc_estimated_bandwidth(tb);
+ } else {
tb_port_warn(in, "failed to read requested bandwidth\n");
+ }
goto put_sw;
}
requested_bw = ret;
tb_port_dbg(in, "requested bandwidth %d Mb/s\n", requested_bw);
- tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, NULL);
- if (!tunnel) {
- tb_port_warn(in, "failed to find tunnel\n");
- goto put_sw;
- }
-
- out = tunnel->dst_port;
-
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
requested_up = -1;
requested_down = requested_bw;
} else {
@@ -2560,6 +2764,16 @@ static void tb_stop(struct tb *tb)
tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */
}
+static void tb_deinit(struct tb *tb)
+{
+ struct tb_cm *tcm = tb_priv(tb);
+ int i;
+
+ /* Cancel all the release bandwidth workers */
+ for (i = 0; i < ARRAY_SIZE(tcm->groups); i++)
+ cancel_delayed_work_sync(&tcm->groups[i].release_work);
+}
+
static int tb_scan_finalize_switch(struct device *dev, void *data)
{
if (tb_is_switch(dev)) {
@@ -2581,9 +2795,10 @@ static int tb_scan_finalize_switch(struct device *dev, void *data)
return 0;
}
-static int tb_start(struct tb *tb)
+static int tb_start(struct tb *tb, bool reset)
{
struct tb_cm *tcm = tb_priv(tb);
+ bool discover = true;
int ret;
tb->root_switch = tb_switch_alloc(tb, &tb->dev, 0);
@@ -2622,12 +2837,28 @@ static int tb_start(struct tb *tb)
tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_MODE_LOWRES);
/* Enable TMU if it is off */
tb_switch_tmu_enable(tb->root_switch);
- /* Full scan to discover devices added before the driver was loaded. */
- tb_scan_switch(tb->root_switch);
- /* Find out tunnels created by the boot firmware */
- tb_discover_tunnels(tb);
- /* Add DP resources from the DP tunnels created by the boot firmware */
- tb_discover_dp_resources(tb);
+
+ /*
+ * Boot firmware might have created tunnels of its own. Since we
+ * cannot be sure they are usable for us, tear them down and
+ * reset the ports to handle it as new hotplug for USB4 v1
+ * routers (for USB4 v2 and beyond we already do host reset).
+ */
+ if (reset && tb_switch_is_usb4(tb->root_switch)) {
+ discover = false;
+ if (usb4_switch_version(tb->root_switch) == 1)
+ tb_switch_reset(tb->root_switch);
+ }
+
+ if (discover) {
+ /* Full scan to discover devices added before the driver was loaded. */
+ tb_scan_switch(tb->root_switch);
+ /* Find out tunnels created by the boot firmware */
+ tb_discover_tunnels(tb);
+ /* Add DP resources from the DP tunnels created by the boot firmware */
+ tb_discover_dp_resources(tb);
+ }
+
/*
* If the boot firmware did not create USB 3.x tunnels create them
* now for the whole topology.
@@ -2698,8 +2929,12 @@ static int tb_resume_noirq(struct tb *tb)
tb_dbg(tb, "resuming...\n");
- /* remove any pci devices the firmware might have setup */
- tb_switch_reset(tb->root_switch);
+ /*
+ * For non-USB4 hosts (Apple systems) remove any PCIe devices
+ * the firmware might have setup.
+ */
+ if (!tb_switch_is_usb4(tb->root_switch))
+ tb_switch_reset(tb->root_switch);
tb_switch_resume(tb->root_switch);
tb_free_invalid_tunnels(tb);
@@ -2847,6 +3082,7 @@ static int tb_runtime_resume(struct tb *tb)
static const struct tb_cm_ops tb_cm_ops = {
.start = tb_start,
.stop = tb_stop,
+ .deinit = tb_deinit,
.suspend_noirq = tb_suspend_noirq,
.resume_noirq = tb_resume_noirq,
.freeze_noirq = tb_freeze_noirq,
diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
index 997c5a536905..feed8ecaf712 100644
--- a/drivers/thunderbolt/tb.h
+++ b/drivers/thunderbolt/tb.h
@@ -23,6 +23,8 @@
#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0)
/* Disable CLx if not supported */
#define QUIRK_NO_CLX BIT(1)
+/* Need to keep power on while USB4 port is in redrive mode */
+#define QUIRK_KEEP_POWER_IN_DP_REDRIVE BIT(2)
/**
* struct tb_nvm - Structure holding NVM information
@@ -217,6 +219,11 @@ struct tb_switch {
* @tb: Pointer to the domain the group belongs to
* @index: Index of the group (aka Group_ID). Valid values %1-%7
* @ports: DP IN adapters belonging to this group are linked here
+ * @reserved: Bandwidth released by one tunnel in the group, available
+ * to others. This is reported as part of estimated_bw for
+ * the group.
+ * @release_work: Worker to release the @reserved if it is not used by
+ * any of the tunnels.
*
* Any tunnel that requires isochronous bandwidth (that's DP for now) is
* attached to a bandwidth group. All tunnels going through the same
@@ -227,6 +234,8 @@ struct tb_bandwidth_group {
struct tb *tb;
int index;
struct list_head ports;
+ int reserved;
+ struct delayed_work release_work;
};
/**
@@ -258,6 +267,7 @@ struct tb_bandwidth_group {
* @group_list: The adapter is linked to the group's list of ports through this
* @max_bw: Maximum possible bandwidth through this adapter if set to
* non-zero.
+ * @redrive: For DP IN, if true the adapter is in redrive mode.
*
* In USB4 terminology this structure represents an adapter (protocol or
* lane adapter).
@@ -286,6 +296,7 @@ struct tb_port {
struct tb_bandwidth_group *group;
struct list_head group_list;
unsigned int max_bw;
+ bool redrive;
};
/**
@@ -452,6 +463,8 @@ struct tb_path {
* ICM to send driver ready message to the firmware.
* @start: Starts the domain
* @stop: Stops the domain
+ * @deinit: Perform any cleanup after the domain is stopped but before
+ * it is unregistered. Called without @tb->lock taken. Optional.
* @suspend_noirq: Connection manager specific suspend_noirq
* @resume_noirq: Connection manager specific resume_noirq
* @suspend: Connection manager specific suspend
@@ -483,8 +496,9 @@ struct tb_path {
*/
struct tb_cm_ops {
int (*driver_ready)(struct tb *tb);
- int (*start)(struct tb *tb);
+ int (*start)(struct tb *tb, bool reset);
void (*stop)(struct tb *tb);
+ void (*deinit)(struct tb *tb);
int (*suspend_noirq)(struct tb *tb);
int (*resume_noirq)(struct tb *tb);
int (*suspend)(struct tb *tb);
@@ -735,10 +749,10 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer,
struct tb *icm_probe(struct tb_nhi *nhi);
struct tb *tb_probe(struct tb_nhi *nhi);
-extern struct device_type tb_domain_type;
-extern struct device_type tb_retimer_type;
-extern struct device_type tb_switch_type;
-extern struct device_type usb4_port_device_type;
+extern const struct device_type tb_domain_type;
+extern const struct device_type tb_retimer_type;
+extern const struct device_type tb_switch_type;
+extern const struct device_type usb4_port_device_type;
int tb_domain_init(void);
void tb_domain_exit(void);
@@ -746,7 +760,7 @@ int tb_xdomain_init(void);
void tb_xdomain_exit(void);
struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize);
-int tb_domain_add(struct tb *tb);
+int tb_domain_add(struct tb *tb, bool reset);
void tb_domain_remove(struct tb *tb);
int tb_domain_suspend_noirq(struct tb *tb);
int tb_domain_resume_noirq(struct tb *tb);
@@ -1150,6 +1164,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid,
void tb_path_free(struct tb_path *path);
int tb_path_activate(struct tb_path *path);
void tb_path_deactivate(struct tb_path *path);
+int tb_path_deactivate_hop(struct tb_port *port, int hop_index);
bool tb_path_is_invalid(struct tb_path *path);
bool tb_path_port_on_path(const struct tb_path *path,
const struct tb_port *port);
@@ -1169,6 +1184,7 @@ int tb_drom_read(struct tb_switch *sw);
int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid);
int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid);
+int tb_lc_reset_port(struct tb_port *port);
int tb_lc_configure_port(struct tb_port *port);
void tb_lc_unconfigure_port(struct tb_port *port);
int tb_lc_configure_xdomain(struct tb_port *port);
@@ -1301,6 +1317,7 @@ void usb4_switch_remove_ports(struct tb_switch *sw);
int usb4_port_unlock(struct tb_port *port);
int usb4_port_hotplug_enable(struct tb_port *port);
+int usb4_port_reset(struct tb_port *port);
int usb4_port_configure(struct tb_port *port);
void usb4_port_unconfigure(struct tb_port *port);
int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd);
diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h
index 6f798f6a2b84..4e43b47f9f11 100644
--- a/drivers/thunderbolt/tb_regs.h
+++ b/drivers/thunderbolt/tb_regs.h
@@ -194,6 +194,8 @@ struct tb_regs_switch_header {
#define USB4_VERSION_MAJOR_MASK GENMASK(7, 5)
#define ROUTER_CS_1 0x01
+#define ROUTER_CS_3 0x03
+#define ROUTER_CS_3_V BIT(31)
#define ROUTER_CS_4 0x04
/* Used with the router cmuv field */
#define ROUTER_CS_4_CMUV_V1 0x10
@@ -389,6 +391,7 @@ struct tb_regs_port_header {
#define PORT_CS_18_CSA BIT(22)
#define PORT_CS_18_TIP BIT(24)
#define PORT_CS_19 0x13
+#define PORT_CS_19_DPR BIT(0)
#define PORT_CS_19_PC BIT(3)
#define PORT_CS_19_PID BIT(4)
#define PORT_CS_19_WOC BIT(16)
@@ -584,6 +587,9 @@ struct tb_regs_hop {
#define TB_LC_POWER 0x740
/* Link controller registers */
+#define TB_LC_PORT_MODE 0x26
+#define TB_LC_PORT_MODE_DPR BIT(0)
+
#define TB_LC_CS_42 0x2a
#define TB_LC_CS_42_USB_PLUGGED BIT(31)
diff --git a/drivers/thunderbolt/trace.h b/drivers/thunderbolt/trace.h
new file mode 100644
index 000000000000..4dccfcf7af6a
--- /dev/null
+++ b/drivers/thunderbolt/trace.h
@@ -0,0 +1,188 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Thunderbolt tracing support
+ *
+ * Copyright (C) 2024, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ * Gil Fine <gil.fine@intel.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM thunderbolt
+
+#if !defined(TB_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define TB_TRACE_H_
+
+#include <linux/trace_seq.h>
+#include <linux/tracepoint.h>
+
+#include "tb_msgs.h"
+
+#define tb_cfg_type_name(type) { type, #type }
+#define show_type_name(val) \
+ __print_symbolic(val, \
+ tb_cfg_type_name(TB_CFG_PKG_READ), \
+ tb_cfg_type_name(TB_CFG_PKG_WRITE), \
+ tb_cfg_type_name(TB_CFG_PKG_ERROR), \
+ tb_cfg_type_name(TB_CFG_PKG_NOTIFY_ACK), \
+ tb_cfg_type_name(TB_CFG_PKG_EVENT), \
+ tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_REQ), \
+ tb_cfg_type_name(TB_CFG_PKG_XDOMAIN_RESP), \
+ tb_cfg_type_name(TB_CFG_PKG_OVERRIDE), \
+ tb_cfg_type_name(TB_CFG_PKG_RESET), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_EVENT), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_CMD), \
+ tb_cfg_type_name(TB_CFG_PKG_ICM_RESP))
+
+#ifndef TB_TRACE_HELPERS
+#define TB_TRACE_HELPERS
+static inline const char *show_data_read_write(struct trace_seq *p,
+ const u32 *data)
+{
+ const struct cfg_read_pkg *msg = (const struct cfg_read_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "offset=%#x, len=%u, port=%d, config=%#x, seq=%d, ",
+ msg->addr.offset, msg->addr.length, msg->addr.port,
+ msg->addr.space, msg->addr.seq);
+
+ return ret;
+}
+
+static inline const char *show_data_error(struct trace_seq *p, const u32 *data)
+{
+ const struct cfg_error_pkg *msg = (const struct cfg_error_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "error=%#x, port=%d, plug=%#x, ", msg->error,
+ msg->port, msg->pg);
+
+ return ret;
+}
+
+static inline const char *show_data_event(struct trace_seq *p, const u32 *data)
+{
+ const struct cfg_event_pkg *msg = (const struct cfg_event_pkg *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "port=%d, unplug=%#x, ", msg->port, msg->unplug);
+
+ return ret;
+}
+
+static inline const char *show_route(struct trace_seq *p, const u32 *data)
+{
+ const struct tb_cfg_header *header = (const struct tb_cfg_header *)data;
+ const char *ret = trace_seq_buffer_ptr(p);
+
+ trace_seq_printf(p, "route=%llx, ", tb_cfg_get_route(header));
+
+ return ret;
+}
+
+static inline const char *show_data(struct trace_seq *p, u8 type,
+ const u32 *data, u32 length)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ const char *prefix = "";
+ int i;
+
+ show_route(p, data);
+
+ switch (type) {
+ case TB_CFG_PKG_READ:
+ case TB_CFG_PKG_WRITE:
+ show_data_read_write(p, data);
+ break;
+
+ case TB_CFG_PKG_ERROR:
+ show_data_error(p, data);
+ break;
+
+ case TB_CFG_PKG_EVENT:
+ show_data_event(p, data);
+ break;
+
+ default:
+ break;
+ }
+
+ trace_seq_printf(p, "data=[");
+ for (i = 0; i < length; i++) {
+ trace_seq_printf(p, "%s0x%08x", prefix, data[i]);
+ prefix = ", ";
+ }
+ trace_seq_printf(p, "]");
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+#endif
+
+DECLARE_EVENT_CLASS(tb_raw,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size),
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u8, type)
+ __field(size_t, size)
+ __dynamic_array(u32, data, size / 4)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->type = type;
+ __entry->size = size / 4;
+ memcpy(__get_dynamic_array(data), data, size);
+ ),
+ TP_printk("type=%s, size=%zd, domain=%d, %s",
+ show_type_name(__entry->type), __entry->size, __entry->index,
+ show_data(p, __entry->type, __get_dynamic_array(data),
+ __entry->size)
+ )
+);
+
+DEFINE_EVENT(tb_raw, tb_tx,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size)
+);
+
+DEFINE_EVENT(tb_raw, tb_event,
+ TP_PROTO(int index, u8 type, const void *data, size_t size),
+ TP_ARGS(index, type, data, size)
+);
+
+TRACE_EVENT(tb_rx,
+ TP_PROTO(int index, u8 type, const void *data, size_t size, bool dropped),
+ TP_ARGS(index, type, data, size, dropped),
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(u8, type)
+ __field(size_t, size)
+ __dynamic_array(u32, data, size / 4)
+ __field(bool, dropped)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->type = type;
+ __entry->size = size / 4;
+ memcpy(__get_dynamic_array(data), data, size);
+ __entry->dropped = dropped;
+ ),
+ TP_printk("type=%s, dropped=%u, size=%zd, domain=%d, %s",
+ show_type_name(__entry->type), __entry->dropped,
+ __entry->size, __entry->index,
+ show_data(p, __entry->type, __get_dynamic_array(data),
+ __entry->size)
+ )
+);
+
+#endif /* TB_TRACE_H_ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index 6fffb2c82d3d..cb6609a56a03 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -706,7 +706,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel)
"DP OUT maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n",
out_rate, out_lanes, bw);
- if (tb_port_path_direction_downstream(in, out))
+ if (tb_tunnel_direction_downstream(tunnel))
max_bw = tunnel->max_down;
else
max_bw = tunnel->max_up;
@@ -831,7 +831,7 @@ static int tb_dp_bandwidth_alloc_mode_enable(struct tb_tunnel *tunnel)
* max_up/down fields. For discovery we just read what the
* estimation was set to.
*/
- if (tb_port_path_direction_downstream(in, out))
+ if (tb_tunnel_direction_downstream(tunnel))
estimated_bw = tunnel->max_down;
else
estimated_bw = tunnel->max_up;
@@ -926,12 +926,18 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active)
return 0;
}
-/* max_bw is rounded up to next granularity */
+/**
+ * tb_dp_bandwidth_mode_maximum_bandwidth() - Maximum possible bandwidth
+ * @tunnel: DP tunnel to check
+ * @max_bw_rounded: Maximum bandwidth in Mb/s rounded up to the next granularity
+ *
+ * Returns maximum possible bandwidth for this tunnel in Mb/s.
+ */
static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
- int *max_bw)
+ int *max_bw_rounded)
{
struct tb_port *in = tunnel->src_port;
- int ret, rate, lanes, nrd_bw;
+ int ret, rate, lanes, max_bw;
u32 cap;
/*
@@ -947,41 +953,26 @@ static int tb_dp_bandwidth_mode_maximum_bandwidth(struct tb_tunnel *tunnel,
return ret;
rate = tb_dp_cap_get_rate_ext(cap);
- if (tb_dp_is_uhbr_rate(rate)) {
- /*
- * When UHBR is used there is no reduction in lanes so
- * we can use this directly.
- */
- lanes = tb_dp_cap_get_lanes(cap);
- } else {
- /*
- * If there is no UHBR supported then check the
- * non-reduced rate and lanes.
- */
- ret = usb4_dp_port_nrd(in, &rate, &lanes);
- if (ret)
- return ret;
- }
+ lanes = tb_dp_cap_get_lanes(cap);
- nrd_bw = tb_dp_bandwidth(rate, lanes);
+ max_bw = tb_dp_bandwidth(rate, lanes);
- if (max_bw) {
+ if (max_bw_rounded) {
ret = usb4_dp_port_granularity(in);
if (ret < 0)
return ret;
- *max_bw = roundup(nrd_bw, ret);
+ *max_bw_rounded = roundup(max_bw, ret);
}
- return nrd_bw;
+ return max_bw;
}
static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
int *consumed_up,
int *consumed_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- int ret, allocated_bw, max_bw;
+ int ret, allocated_bw, max_bw_rounded;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
@@ -995,13 +986,13 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
return ret;
allocated_bw = ret;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
if (ret < 0)
return ret;
- if (allocated_bw == max_bw)
+ if (allocated_bw == max_bw_rounded)
allocated_bw = ret;
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*consumed_up = 0;
*consumed_down = allocated_bw;
} else {
@@ -1015,7 +1006,6 @@ static int tb_dp_bandwidth_mode_consumed_bandwidth(struct tb_tunnel *tunnel,
static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up,
int *allocated_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
/*
@@ -1023,20 +1013,21 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
* Otherwise we read it from the DPRX.
*/
if (usb4_dp_port_bandwidth_mode_enabled(in) && tunnel->bw_mode) {
- int ret, allocated_bw, max_bw;
+ int ret, allocated_bw, max_bw_rounded;
ret = usb4_dp_port_allocated_bandwidth(in);
if (ret < 0)
return ret;
allocated_bw = ret;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel,
+ &max_bw_rounded);
if (ret < 0)
return ret;
- if (allocated_bw == max_bw)
+ if (allocated_bw == max_bw_rounded)
allocated_bw = ret;
- if (tb_port_path_direction_downstream(in, out)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*allocated_up = 0;
*allocated_down = allocated_bw;
} else {
@@ -1053,26 +1044,25 @@ static int tb_dp_allocated_bandwidth(struct tb_tunnel *tunnel, int *allocated_up
static int tb_dp_alloc_bandwidth(struct tb_tunnel *tunnel, int *alloc_up,
int *alloc_down)
{
- struct tb_port *out = tunnel->dst_port;
struct tb_port *in = tunnel->src_port;
- int max_bw, ret, tmp;
+ int max_bw_rounded, ret, tmp;
if (!usb4_dp_port_bandwidth_mode_enabled(in))
return -EOPNOTSUPP;
- ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw);
+ ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, &max_bw_rounded);
if (ret < 0)
return ret;
- if (tb_port_path_direction_downstream(in, out)) {
- tmp = min(*alloc_down, max_bw);
+ if (tb_tunnel_direction_downstream(tunnel)) {
+ tmp = min(*alloc_down, max_bw_rounded);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
*alloc_down = tmp;
*alloc_up = 0;
} else {
- tmp = min(*alloc_up, max_bw);
+ tmp = min(*alloc_up, max_bw_rounded);
ret = usb4_dp_port_allocate_bandwidth(in, tmp);
if (ret)
return ret;
@@ -1150,17 +1140,16 @@ static int tb_dp_read_cap(struct tb_tunnel *tunnel, unsigned int cap, u32 *rate,
static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
int *max_down)
{
- struct tb_port *in = tunnel->src_port;
int ret;
- if (!usb4_dp_port_bandwidth_mode_enabled(in))
+ if (!usb4_dp_port_bandwidth_mode_enabled(tunnel->src_port))
return -EOPNOTSUPP;
ret = tb_dp_bandwidth_mode_maximum_bandwidth(tunnel, NULL);
if (ret < 0)
return ret;
- if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*max_up = 0;
*max_down = ret;
} else {
@@ -1174,8 +1163,7 @@ static int tb_dp_maximum_bandwidth(struct tb_tunnel *tunnel, int *max_up,
static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
int *consumed_down)
{
- struct tb_port *in = tunnel->src_port;
- const struct tb_switch *sw = in->sw;
+ const struct tb_switch *sw = tunnel->src_port->sw;
u32 rate = 0, lanes = 0;
int ret;
@@ -1196,17 +1184,13 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
/*
* Then see if the DPRX negotiation is ready and if yes
* return that bandwidth (it may be smaller than the
- * reduced one). Otherwise return the remote (possibly
- * reduced) caps.
+ * reduced one). According to VESA spec, the DPRX
+ * negotiation shall compete in 5 seconds after tunnel
+ * established. We give it 100ms extra just in case.
*/
- ret = tb_dp_wait_dprx(tunnel, 150);
- if (ret) {
- if (ret == -ETIMEDOUT)
- ret = tb_dp_read_cap(tunnel, DP_REMOTE_CAP,
- &rate, &lanes);
- if (ret)
- return ret;
- }
+ ret = tb_dp_wait_dprx(tunnel, 5100);
+ if (ret)
+ return ret;
ret = tb_dp_read_cap(tunnel, DP_COMMON_CAP, &rate, &lanes);
if (ret)
return ret;
@@ -1221,7 +1205,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up,
return 0;
}
- if (tb_port_path_direction_downstream(in, tunnel->dst_port)) {
+ if (tb_tunnel_direction_downstream(tunnel)) {
*consumed_up = 0;
*consumed_down = tb_dp_bandwidth(rate, lanes);
} else {
diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h
index b4cff5482112..1a27ccd08b86 100644
--- a/drivers/thunderbolt/tunnel.h
+++ b/drivers/thunderbolt/tunnel.h
@@ -139,6 +139,12 @@ static inline bool tb_tunnel_is_usb3(const struct tb_tunnel *tunnel)
return tunnel->type == TB_TUNNEL_USB3;
}
+static inline bool tb_tunnel_direction_downstream(const struct tb_tunnel *tunnel)
+{
+ return tb_port_path_direction_downstream(tunnel->src_port,
+ tunnel->dst_port);
+}
+
const char *tb_tunnel_type_name(const struct tb_tunnel *tunnel);
#define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \
diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c
index 1515eff8cc3e..9860b49d7a2b 100644
--- a/drivers/thunderbolt/usb4.c
+++ b/drivers/thunderbolt/usb4.c
@@ -1113,6 +1113,45 @@ int usb4_port_hotplug_enable(struct tb_port *port)
return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1);
}
+/**
+ * usb4_port_reset() - Issue downstream port reset
+ * @port: USB4 port to reset
+ *
+ * Issues downstream port reset to @port.
+ */
+int usb4_port_reset(struct tb_port *port)
+{
+ int ret;
+ u32 val;
+
+ if (!port->cap_usb4)
+ return -EINVAL;
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val |= PORT_CS_19_DPR;
+
+ ret = tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ fsleep(10000);
+
+ ret = tb_port_read(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+ if (ret)
+ return ret;
+
+ val &= ~PORT_CS_19_DPR;
+
+ return tb_port_write(port, &val, TB_CFG_PORT,
+ port->cap_usb4 + PORT_CS_19, 1);
+}
+
static int usb4_port_set_configured(struct tb_port *port, bool configured)
{
int ret;
@@ -2819,8 +2858,10 @@ static int usb4_dp_port_wait_and_clear_cm_ack(struct tb_port *port,
usleep_range(50, 100);
} while (ktime_before(ktime_get(), end));
- if (val & ADP_DP_CS_8_DR)
+ if (val & ADP_DP_CS_8_DR) {
+ tb_port_warn(port, "timeout waiting for DPTX request to clear\n");
return -ETIMEDOUT;
+ }
ret = tb_port_read(port, &val, TB_CFG_PORT,
port->cap_adap + ADP_DP_CS_2, 1);
diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c
index e355bfd6343f..5150879888ca 100644
--- a/drivers/thunderbolt/usb4_port.c
+++ b/drivers/thunderbolt/usb4_port.c
@@ -243,7 +243,7 @@ static void usb4_port_device_release(struct device *dev)
kfree(usb4);
}
-struct device_type usb4_port_device_type = {
+const struct device_type usb4_port_device_type = {
.name = "usb4_port",
.groups = usb4_port_device_groups,
.release = usb4_port_device_release,
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index 9495742913d5..940ae97987ff 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -997,12 +997,12 @@ static void tb_service_release(struct device *dev)
struct tb_xdomain *xd = tb_service_parent(svc);
tb_service_debugfs_remove(svc);
- ida_simple_remove(&xd->service_ids, svc->id);
+ ida_free(&xd->service_ids, svc->id);
kfree(svc->key);
kfree(svc);
}
-struct device_type tb_service_type = {
+const struct device_type tb_service_type = {
.name = "thunderbolt_service",
.groups = tb_service_attr_groups,
.uevent = tb_service_uevent,
@@ -1099,7 +1099,7 @@ static void enumerate_services(struct tb_xdomain *xd)
break;
}
- id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
+ id = ida_alloc(&xd->service_ids, GFP_KERNEL);
if (id < 0) {
kfree(svc->key);
kfree(svc);
@@ -1791,13 +1791,13 @@ static ssize_t rx_lanes_show(struct device *dev, struct device_attribute *attr,
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
- case TB_LINK_WIDTH_ASYM_RX:
+ case TB_LINK_WIDTH_ASYM_TX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
- case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
width = 3;
break;
default:
@@ -1817,13 +1817,13 @@ static ssize_t tx_lanes_show(struct device *dev, struct device_attribute *attr,
switch (xd->link_width) {
case TB_LINK_WIDTH_SINGLE:
- case TB_LINK_WIDTH_ASYM_TX:
+ case TB_LINK_WIDTH_ASYM_RX:
width = 1;
break;
case TB_LINK_WIDTH_DUAL:
width = 2;
break;
- case TB_LINK_WIDTH_ASYM_RX:
+ case TB_LINK_WIDTH_ASYM_TX:
width = 3;
break;
default:
@@ -1893,7 +1893,7 @@ static const struct dev_pm_ops tb_xdomain_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tb_xdomain_suspend, tb_xdomain_resume)
};
-struct device_type tb_xdomain_type = {
+const struct device_type tb_xdomain_type = {
.name = "thunderbolt_xdomain",
.release = tb_xdomain_release,
.pm = &tb_xdomain_pm_ops,
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 5646dc6242cd..a45d423ad10f 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -75,14 +75,9 @@ config VT_CONSOLE_SLEEP
def_bool y
depends on VT_CONSOLE && PM_SLEEP
-config HW_CONSOLE
- bool
- depends on VT
- default y
-
config VT_HW_CONSOLE_BINDING
bool "Support for binding and unbinding console drivers"
- depends on HW_CONSOLE
+ depends on VT
help
The virtual terminal is the device that interacts with the physical
terminal through console drivers. On these systems, at least one
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 7716ce0d35bc..e27360652d9b 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1566,7 +1566,7 @@ fail_tty_driver_kref_put:
return error;
}
-static int __exit amiga_serial_remove(struct platform_device *pdev)
+static void __exit amiga_serial_remove(struct platform_device *pdev)
{
struct serial_state *state = platform_get_drvdata(pdev);
@@ -1576,12 +1576,10 @@ static int __exit amiga_serial_remove(struct platform_device *pdev)
free_irq(IRQ_AMIGA_TBE, state);
free_irq(IRQ_AMIGA_RBF, state);
-
- return 0;
}
static struct platform_driver amiga_serial_driver = {
- .remove = __exit_p(amiga_serial_remove),
+ .remove_new = __exit_p(amiga_serial_remove),
.driver = {
.name = "amiga-serial",
},
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index d27979eabfdf..34421ec06a69 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -408,7 +408,7 @@ err_unmap:
return ret;
}
-static int goldfish_tty_remove(struct platform_device *pdev)
+static void goldfish_tty_remove(struct platform_device *pdev)
{
struct goldfish_tty *qtty = platform_get_drvdata(pdev);
@@ -424,7 +424,6 @@ static int goldfish_tty_remove(struct platform_device *pdev)
if (goldfish_tty_current_line_count == 0)
goldfish_tty_delete_driver();
mutex_unlock(&goldfish_tty_lock);
- return 0;
}
#ifdef CONFIG_GOLDFISH_TTY_EARLY_CONSOLE
@@ -462,7 +461,7 @@ MODULE_DEVICE_TABLE(of, goldfish_tty_of_match);
static struct platform_driver goldfish_tty_platform_driver = {
.probe = goldfish_tty_probe,
- .remove = goldfish_tty_remove,
+ .remove_new = goldfish_tty_remove,
.driver = {
.name = "goldfish_tty",
.of_match_table = goldfish_tty_of_match,
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index fdecc0d63731..b1149bc62ca1 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -1035,6 +1035,10 @@ static const struct attribute_group *hvc_iucv_dev_attr_groups[] = {
NULL,
};
+static void hvc_iucv_free(struct device *data)
+{
+ kfree(data);
+}
/**
* hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance
@@ -1097,7 +1101,7 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console)
priv->dev->bus = &iucv_bus;
priv->dev->parent = iucv_root;
priv->dev->groups = hvc_iucv_dev_attr_groups;
- priv->dev->release = (void (*)(struct device *)) kfree;
+ priv->dev->release = hvc_iucv_free;
rc = device_register(priv->dev);
if (rc) {
put_device(priv->dev);
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 822a5cd05566..613cb356b918 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -431,7 +431,7 @@ static void serdev_drv_remove(struct device *dev)
dev_pm_domain_detach(dev, true);
}
-static struct bus_type serdev_bus_type = {
+static const struct bus_type serdev_bus_type = {
.name = "serial",
.match = serdev_device_match,
.probe = serdev_drv_probe,
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index e94e090cf0a1..3d7ae7fa5018 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -27,19 +27,17 @@ static size_t ttyport_receive_buf(struct tty_port *port, const u8 *cp,
{
struct serdev_controller *ctrl = port->client_data;
struct serport *serport = serdev_controller_get_drvdata(ctrl);
- int ret;
+ size_t ret;
if (!test_bit(SERPORT_ACTIVE, &serport->flags))
return 0;
ret = serdev_controller_receive_buf(ctrl, cp, count);
- dev_WARN_ONCE(&ctrl->dev, ret < 0 || ret > count,
- "receive_buf returns %d (count = %zu)\n",
+ dev_WARN_ONCE(&ctrl->dev, ret > count,
+ "receive_buf returns %zu (count = %zu)\n",
ret, count);
- if (ret < 0)
- return 0;
- else if (ret > count)
+ if (ret > count)
return count;
return ret;
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index 8c2aaf7af7b7..53d8eee9b1c8 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -419,8 +419,8 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
struct aspeed_vuart *vuart;
struct device_node *np;
struct resource *res;
- u32 clk, prop, sirq[2];
int rc, sirq_polarity;
+ u32 prop, sirq[2];
struct clk *vclk;
np = pdev->dev.of_node;
@@ -447,53 +447,35 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
port.port.status = UPSTAT_SYNC_FIFO;
port.port.dev = &pdev->dev;
port.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
+ port.port.flags = UPF_BOOT_AUTOCONF | UPF_IOREMAP | UPF_FIXED_PORT | UPF_FIXED_TYPE |
+ UPF_NO_THRE_TEST;
port.bugs |= UART_BUG_TXRACE;
rc = sysfs_create_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
if (rc < 0)
return rc;
- if (of_property_read_u32(np, "clock-frequency", &clk)) {
+ rc = uart_read_port_properties(&port.port);
+ if (rc)
+ goto err_sysfs_remove;
+
+ /* Get clk rate through clk driver if present */
+ if (!port.port.uartclk) {
vclk = devm_clk_get_enabled(dev, NULL);
if (IS_ERR(vclk)) {
rc = dev_err_probe(dev, PTR_ERR(vclk), "clk or clock-frequency not defined\n");
goto err_sysfs_remove;
}
- clk = clk_get_rate(vclk);
+ port.port.uartclk = clk_get_rate(vclk);
}
/* If current-speed was set, then try not to change it. */
if (of_property_read_u32(np, "current-speed", &prop) == 0)
- port.port.custom_divisor = clk / (16 * prop);
-
- /* Check for shifted address mapping */
- if (of_property_read_u32(np, "reg-offset", &prop) == 0)
- port.port.mapbase += prop;
-
- /* Check for registers offset within the devices address range */
- if (of_property_read_u32(np, "reg-shift", &prop) == 0)
- port.port.regshift = prop;
+ port.port.custom_divisor = port.port.uartclk / (16 * prop);
- /* Check for fifo size */
- if (of_property_read_u32(np, "fifo-size", &prop) == 0)
- port.port.fifosize = prop;
-
- /* Check for a fixed line number */
- rc = of_alias_get_id(np, "serial");
- if (rc >= 0)
- port.port.line = rc;
-
- port.port.irq = irq_of_parse_and_map(np, 0);
port.port.handle_irq = aspeed_vuart_handle_irq;
- port.port.iotype = UPIO_MEM;
port.port.type = PORT_ASPEED_VUART;
- port.port.uartclk = clk;
- port.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_IOREMAP
- | UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_NO_THRE_TEST;
-
- if (of_property_read_bool(np, "no-loopback-test"))
- port.port.flags |= UPF_SKIP_TEST;
if (port.port.fifosize)
port.capabilities = UART_CAP_FIFO;
@@ -503,7 +485,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
rc = serial8250_register_8250_port(&port);
if (rc < 0)
- goto err_clk_disable;
+ goto err_sysfs_remove;
vuart->line = rc;
vuart->port = serial8250_get_port(vuart->line);
@@ -529,7 +511,7 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
rc = aspeed_vuart_set_lpc_address(vuart, prop);
if (rc < 0) {
dev_err_probe(dev, rc, "invalid value in aspeed,lpc-io-reg property\n");
- goto err_clk_disable;
+ goto err_sysfs_remove;
}
rc = of_property_read_u32_array(np, "aspeed,lpc-interrupts", sirq, 2);
@@ -541,14 +523,14 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
rc = aspeed_vuart_set_sirq(vuart, sirq[0]);
if (rc < 0) {
dev_err_probe(dev, rc, "invalid sirq number in aspeed,lpc-interrupts property\n");
- goto err_clk_disable;
+ goto err_sysfs_remove;
}
sirq_polarity = aspeed_vuart_map_irq_polarity(sirq[1]);
if (sirq_polarity < 0) {
rc = dev_err_probe(dev, sirq_polarity,
"invalid sirq polarity in aspeed,lpc-interrupts property\n");
- goto err_clk_disable;
+ goto err_sysfs_remove;
}
aspeed_vuart_set_sirq_polarity(vuart, sirq_polarity);
@@ -559,8 +541,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
return 0;
-err_clk_disable:
- irq_dispose_mapping(port.port.irq);
err_sysfs_remove:
sysfs_remove_group(&vuart->dev->kobj, &aspeed_vuart_attr_group);
return rc;
diff --git a/drivers/tty/serial/8250/8250_bcm2835aux.c b/drivers/tty/serial/8250/8250_bcm2835aux.c
index beac6b340ace..121a5ce86050 100644
--- a/drivers/tty/serial/8250/8250_bcm2835aux.c
+++ b/drivers/tty/serial/8250/8250_bcm2835aux.c
@@ -45,10 +45,6 @@ struct bcm2835aux_data {
u32 cntl;
};
-struct bcm2835_aux_serial_driver_data {
- resource_size_t offset;
-};
-
static void bcm2835aux_rs485_start_tx(struct uart_8250_port *up)
{
if (!(up->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
@@ -85,10 +81,9 @@ static void bcm2835aux_rs485_stop_tx(struct uart_8250_port *up)
static int bcm2835aux_serial_probe(struct platform_device *pdev)
{
- const struct bcm2835_aux_serial_driver_data *bcm_data;
+ const struct software_node *bcm2835_swnode;
struct uart_8250_port up = { };
struct bcm2835aux_data *data;
- resource_size_t offset = 0;
struct resource *res;
unsigned int uartclk;
int ret;
@@ -101,12 +96,8 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
/* initialize data */
up.capabilities = UART_CAP_FIFO | UART_CAP_MINI;
up.port.dev = &pdev->dev;
- up.port.regshift = 2;
up.port.type = PORT_16550;
- up.port.iotype = UPIO_MEM;
- up.port.fifosize = 8;
- up.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT | UPF_FIXED_TYPE |
- UPF_SKIP_TEST | UPF_IOREMAP;
+ up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SKIP_TEST | UPF_IOREMAP;
up.port.rs485_config = serial8250_em485_config;
up.port.rs485_supported = serial8250_em485_supported;
up.rs485_start_tx = bcm2835aux_rs485_start_tx;
@@ -122,12 +113,6 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
if (IS_ERR(data->clk))
return dev_err_probe(&pdev->dev, PTR_ERR(data->clk), "could not get clk\n");
- /* get the interrupt */
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
- up.port.irq = ret;
-
/* map the main registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -135,52 +120,40 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
return -EINVAL;
}
- bcm_data = device_get_match_data(&pdev->dev);
-
- /* Some UEFI implementations (e.g. tianocore/edk2 for the Raspberry Pi)
- * describe the miniuart with a base address that encompasses the auxiliary
- * registers shared between the miniuart and spi.
- *
- * This is due to historical reasons, see discussion here :
- * https://edk2.groups.io/g/devel/topic/87501357#84349
- *
- * We need to add the offset between the miniuart and auxiliary
- * registers to get the real miniuart base address.
- */
- if (bcm_data)
- offset = bcm_data->offset;
+ up.port.mapbase = res->start;
+ up.port.mapsize = resource_size(res);
+
+ bcm2835_swnode = device_get_match_data(&pdev->dev);
+ if (bcm2835_swnode) {
+ ret = device_add_software_node(&pdev->dev, bcm2835_swnode);
+ if (ret)
+ return ret;
+ }
- up.port.mapbase = res->start + offset;
- up.port.mapsize = resource_size(res) - offset;
+ ret = uart_read_port_properties(&up.port);
+ if (ret)
+ goto rm_swnode;
- /* Check for a fixed line number */
- ret = of_alias_get_id(pdev->dev.of_node, "serial");
- if (ret >= 0)
- up.port.line = ret;
+ up.port.regshift = 2;
+ up.port.fifosize = 8;
/* enable the clock as a last step */
ret = clk_prepare_enable(data->clk);
if (ret) {
- dev_err(&pdev->dev, "unable to enable uart clock - %d\n",
- ret);
- return ret;
+ dev_err_probe(&pdev->dev, ret, "unable to enable uart clock\n");
+ goto rm_swnode;
}
uartclk = clk_get_rate(data->clk);
- if (!uartclk) {
- ret = device_property_read_u32(&pdev->dev, "clock-frequency", &uartclk);
- if (ret) {
- dev_err_probe(&pdev->dev, ret, "could not get clk rate\n");
- goto dis_clk;
- }
- }
+ if (uartclk)
+ up.port.uartclk = uartclk;
/* the HW-clock divider for bcm2835aux is 8,
* but 8250 expects a divider of 16,
* so we have to multiply the actual clock by 2
* to get identical baudrates.
*/
- up.port.uartclk = uartclk * 2;
+ up.port.uartclk *= 2;
/* register the port */
ret = serial8250_register_8250_port(&up);
@@ -194,6 +167,8 @@ static int bcm2835aux_serial_probe(struct platform_device *pdev)
dis_clk:
clk_disable_unprepare(data->clk);
+rm_swnode:
+ device_remove_software_node(&pdev->dev);
return ret;
}
@@ -203,10 +178,27 @@ static void bcm2835aux_serial_remove(struct platform_device *pdev)
serial8250_unregister_port(data->line);
clk_disable_unprepare(data->clk);
+ device_remove_software_node(&pdev->dev);
}
-static const struct bcm2835_aux_serial_driver_data bcm2835_acpi_data = {
- .offset = 0x40,
+/*
+ * Some UEFI implementations (e.g. tianocore/edk2 for the Raspberry Pi)
+ * describe the miniuart with a base address that encompasses the auxiliary
+ * registers shared between the miniuart and spi.
+ *
+ * This is due to historical reasons, see discussion here:
+ * https://edk2.groups.io/g/devel/topic/87501357#84349
+ *
+ * We need to add the offset between the miniuart and auxiliary registers
+ * to get the real miniuart base address.
+ */
+static const struct property_entry bcm2835_acpi_properties[] = {
+ PROPERTY_ENTRY_U32("reg-offset", 0x40),
+ { }
+};
+
+static const struct software_node bcm2835_acpi_node = {
+ .properties = bcm2835_acpi_properties,
};
static const struct of_device_id bcm2835aux_serial_match[] = {
@@ -216,7 +208,7 @@ static const struct of_device_id bcm2835aux_serial_match[] = {
MODULE_DEVICE_TABLE(of, bcm2835aux_serial_match);
static const struct acpi_device_id bcm2835aux_serial_acpi_match[] = {
- { "BCM2836", (kernel_ulong_t)&bcm2835_acpi_data },
+ { "BCM2836", (kernel_ulong_t)&bcm2835_acpi_node },
{ }
};
MODULE_DEVICE_TABLE(acpi, bcm2835aux_serial_acpi_match);
diff --git a/drivers/tty/serial/8250/8250_bcm7271.c b/drivers/tty/serial/8250/8250_bcm7271.c
index 504c4c020857..5daa38d9c64e 100644
--- a/drivers/tty/serial/8250/8250_bcm7271.c
+++ b/drivers/tty/serial/8250/8250_bcm7271.c
@@ -22,6 +22,7 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
+#include <linux/units.h>
#include "8250.h"
@@ -187,21 +188,19 @@
#define TX_BUF_SIZE 4096
#define RX_BUF_SIZE 4096
#define RX_BUFS_COUNT 2
-#define KHZ 1000
-#define MHZ(x) ((x) * KHZ * KHZ)
static const u32 brcmstb_rate_table[] = {
- MHZ(81),
- MHZ(108),
- MHZ(64), /* Actually 64285715 for some chips */
- MHZ(48),
+ 81 * HZ_PER_MHZ,
+ 108 * HZ_PER_MHZ,
+ 64 * HZ_PER_MHZ, /* Actually 64285715 for some chips */
+ 48 * HZ_PER_MHZ,
};
static const u32 brcmstb_rate_table_7278[] = {
- MHZ(81),
- MHZ(108),
+ 81 * HZ_PER_MHZ,
+ 108 * HZ_PER_MHZ,
0,
- MHZ(48),
+ 48 * HZ_PER_MHZ,
};
struct brcmuart_priv {
@@ -936,17 +935,14 @@ static void brcmuart_init_debugfs(struct brcmuart_priv *priv,
static int brcmuart_probe(struct platform_device *pdev)
{
struct resource *regs;
- struct device_node *np = pdev->dev.of_node;
const struct of_device_id *of_id = NULL;
struct uart_8250_port *new_port;
struct device *dev = &pdev->dev;
struct brcmuart_priv *priv;
struct clk *baud_mux_clk;
struct uart_8250_port up;
- int irq;
void __iomem *membase = NULL;
resource_size_t mapbase = 0;
- u32 clk_rate = 0;
int ret;
int x;
int dma_irq;
@@ -954,15 +950,12 @@ static int brcmuart_probe(struct platform_device *pdev)
"uart", "dma_rx", "dma_tx", "dma_intr2", "dma_arb"
};
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
priv = devm_kzalloc(dev, sizeof(struct brcmuart_priv),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
- of_id = of_match_node(brcmuart_dt_ids, np);
+ of_id = of_match_node(brcmuart_dt_ids, dev->of_node);
if (!of_id || !of_id->data)
priv->rate_table = brcmstb_rate_table;
else
@@ -1012,7 +1005,23 @@ static int brcmuart_probe(struct platform_device *pdev)
}
}
- of_property_read_u32(np, "clock-frequency", &clk_rate);
+ dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not ");
+
+ memset(&up, 0, sizeof(up));
+ up.port.type = PORT_BCM7271;
+ up.port.dev = dev;
+ up.port.mapbase = mapbase;
+ up.port.membase = membase;
+ up.port.handle_irq = brcmuart_handle_irq;
+ up.port.flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_FIXED_TYPE;
+ up.port.private_data = priv;
+
+ ret = uart_read_port_properties(&up.port);
+ if (ret)
+ goto release_dma;
+
+ up.port.regshift = 2;
+ up.port.iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32;
/* See if a Baud clock has been specified */
baud_mux_clk = devm_clk_get_optional_enabled(dev, "sw_baud");
@@ -1024,39 +1033,11 @@ static int brcmuart_probe(struct platform_device *pdev)
priv->baud_mux_clk = baud_mux_clk;
init_real_clk_rates(dev, priv);
- clk_rate = priv->default_mux_rate;
+ up.port.uartclk = priv->default_mux_rate;
} else {
dev_dbg(dev, "BAUD MUX clock not specified\n");
}
- if (clk_rate == 0) {
- ret = dev_err_probe(dev, -EINVAL, "clock-frequency or clk not defined\n");
- goto release_dma;
- }
-
- dev_dbg(dev, "DMA is %senabled\n", priv->dma_enabled ? "" : "not ");
-
- memset(&up, 0, sizeof(up));
- up.port.type = PORT_BCM7271;
- up.port.uartclk = clk_rate;
- up.port.dev = dev;
- up.port.mapbase = mapbase;
- up.port.membase = membase;
- up.port.irq = irq;
- up.port.handle_irq = brcmuart_handle_irq;
- up.port.regshift = 2;
- up.port.iotype = of_device_is_big_endian(np) ?
- UPIO_MEM32BE : UPIO_MEM32;
- up.port.flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF
- | UPF_FIXED_PORT | UPF_FIXED_TYPE;
- up.port.dev = dev;
- up.port.private_data = priv;
-
- /* Check for a fixed line number */
- ret = of_alias_get_id(np, "serial");
- if (ret >= 0)
- up.port.line = ret;
-
/* setup HR timer */
hrtimer_init(&priv->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
priv->hrt.function = brcmuart_hrtimer_func;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index c1d43f040c43..a3acbf0f5da1 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -9,7 +9,6 @@
* LCR is written whilst busy. If it is, then a busy detect interrupt is
* raised, the LCR needs to be rewritten and the uart status register read.
*/
-#include <linux/acpi.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -17,7 +16,6 @@
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/notifier.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
@@ -56,6 +54,7 @@
#define DW_UART_QUIRK_ARMADA_38X BIT(1)
#define DW_UART_QUIRK_SKIP_SET_RATE BIT(2)
#define DW_UART_QUIRK_IS_DMA_FC BIT(3)
+#define DW_UART_QUIRK_APMC0D08 BIT(4)
static inline struct dw8250_data *clk_to_dw8250_data(struct notifier_block *nb)
{
@@ -445,44 +444,29 @@ static void dw8250_prepare_rx_dma(struct uart_8250_port *p)
static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
{
- struct device_node *np = p->dev->of_node;
+ unsigned int quirks = data->pdata ? data->pdata->quirks : 0;
- if (np) {
- unsigned int quirks = data->pdata->quirks;
- int id;
-
- /* get index of serial line, if found in DT aliases */
- id = of_alias_get_id(np, "serial");
- if (id >= 0)
- p->line = id;
#ifdef CONFIG_64BIT
- if (quirks & DW_UART_QUIRK_OCTEON) {
- p->serial_in = dw8250_serial_inq;
- p->serial_out = dw8250_serial_outq;
- p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
- p->type = PORT_OCTEON;
- data->skip_autocfg = true;
- }
+ if (quirks & DW_UART_QUIRK_OCTEON) {
+ p->serial_in = dw8250_serial_inq;
+ p->serial_out = dw8250_serial_outq;
+ p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
+ p->type = PORT_OCTEON;
+ data->skip_autocfg = true;
+ }
#endif
- if (of_device_is_big_endian(np)) {
- p->iotype = UPIO_MEM32BE;
- p->serial_in = dw8250_serial_in32be;
- p->serial_out = dw8250_serial_out32be;
- }
-
- if (quirks & DW_UART_QUIRK_ARMADA_38X)
- p->serial_out = dw8250_serial_out38x;
- if (quirks & DW_UART_QUIRK_SKIP_SET_RATE)
- p->set_termios = dw8250_do_set_termios;
- if (quirks & DW_UART_QUIRK_IS_DMA_FC) {
- data->data.dma.txconf.device_fc = 1;
- data->data.dma.rxconf.device_fc = 1;
- data->data.dma.prepare_tx_dma = dw8250_prepare_tx_dma;
- data->data.dma.prepare_rx_dma = dw8250_prepare_rx_dma;
- }
-
- } else if (acpi_dev_present("APMC0D08", NULL, -1)) {
+ if (quirks & DW_UART_QUIRK_ARMADA_38X)
+ p->serial_out = dw8250_serial_out38x;
+ if (quirks & DW_UART_QUIRK_SKIP_SET_RATE)
+ p->set_termios = dw8250_do_set_termios;
+ if (quirks & DW_UART_QUIRK_IS_DMA_FC) {
+ data->data.dma.txconf.device_fc = 1;
+ data->data.dma.rxconf.device_fc = 1;
+ data->data.dma.prepare_tx_dma = dw8250_prepare_tx_dma;
+ data->data.dma.prepare_rx_dma = dw8250_prepare_rx_dma;
+ }
+ if (quirks & DW_UART_QUIRK_APMC0D08) {
p->iotype = UPIO_MEM32;
p->regshift = 2;
p->serial_in = dw8250_serial_in32;
@@ -510,39 +494,21 @@ static int dw8250_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct dw8250_data *data;
struct resource *regs;
- int irq;
int err;
- u32 val;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return dev_err_probe(dev, -EINVAL, "no registers defined\n");
- irq = platform_get_irq_optional(pdev, 0);
- /* no interrupt -> fall back to polling */
- if (irq == -ENXIO)
- irq = 0;
- if (irq < 0)
- return irq;
-
spin_lock_init(&p->lock);
- p->mapbase = regs->start;
- p->irq = irq;
p->handle_irq = dw8250_handle_irq;
p->pm = dw8250_do_pm;
p->type = PORT_8250;
- p->flags = UPF_SHARE_IRQ | UPF_FIXED_PORT;
+ p->flags = UPF_FIXED_PORT;
p->dev = dev;
- p->iotype = UPIO_MEM;
- p->serial_in = dw8250_serial_in;
- p->serial_out = dw8250_serial_out;
p->set_ldisc = dw8250_set_ldisc;
p->set_termios = dw8250_set_termios;
- p->membase = devm_ioremap(dev, regs->start, resource_size(regs));
- if (!p->membase)
- return -ENOMEM;
-
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -554,15 +520,35 @@ static int dw8250_probe(struct platform_device *pdev)
data->uart_16550_compatible = device_property_read_bool(dev,
"snps,uart-16550-compatible");
- err = device_property_read_u32(dev, "reg-shift", &val);
- if (!err)
- p->regshift = val;
+ p->mapbase = regs->start;
+ p->mapsize = resource_size(regs);
- err = device_property_read_u32(dev, "reg-io-width", &val);
- if (!err && val == 4) {
- p->iotype = UPIO_MEM32;
+ p->membase = devm_ioremap(dev, p->mapbase, p->mapsize);
+ if (!p->membase)
+ return -ENOMEM;
+
+ err = uart_read_port_properties(p);
+ /* no interrupt -> fall back to polling */
+ if (err == -ENXIO)
+ err = 0;
+ if (err)
+ return err;
+
+ switch (p->iotype) {
+ case UPIO_MEM:
+ p->serial_in = dw8250_serial_in;
+ p->serial_out = dw8250_serial_out;
+ break;
+ case UPIO_MEM32:
p->serial_in = dw8250_serial_in32;
p->serial_out = dw8250_serial_out32;
+ break;
+ case UPIO_MEM32BE:
+ p->serial_in = dw8250_serial_in32be;
+ p->serial_out = dw8250_serial_out32be;
+ break;
+ default:
+ return -ENODEV;
}
if (device_property_read_bool(dev, "dcd-override")) {
@@ -589,15 +575,13 @@ static int dw8250_probe(struct platform_device *pdev)
data->msr_mask_off |= UART_MSR_TERI;
}
- /* Always ask for fixed clock rate from a property. */
- device_property_read_u32(dev, "clock-frequency", &p->uartclk);
-
/* If there is separate baudclk, get the rate from it. */
data->clk = devm_clk_get_optional_enabled(dev, "baudclk");
if (data->clk == NULL)
data->clk = devm_clk_get_optional_enabled(dev, NULL);
if (IS_ERR(data->clk))
- return PTR_ERR(data->clk);
+ return dev_err_probe(dev, PTR_ERR(data->clk),
+ "failed to get baudclk\n");
INIT_WORK(&data->clk_work, dw8250_clk_work_cb);
data->clk_notifier.notifier_call = dw8250_clk_notifier_cb;
@@ -762,13 +746,18 @@ static const struct of_device_id dw8250_of_match[] = {
};
MODULE_DEVICE_TABLE(of, dw8250_of_match);
+static const struct dw8250_platform_data dw8250_apmc0d08 = {
+ .usr_reg = DW_UART_USR,
+ .quirks = DW_UART_QUIRK_APMC0D08,
+};
+
static const struct acpi_device_id dw8250_acpi_match[] = {
{ "80860F0A", (kernel_ulong_t)&dw8250_dw_apb },
{ "8086228A", (kernel_ulong_t)&dw8250_dw_apb },
{ "AMD0020", (kernel_ulong_t)&dw8250_dw_apb },
{ "AMDI0020", (kernel_ulong_t)&dw8250_dw_apb },
{ "AMDI0022", (kernel_ulong_t)&dw8250_dw_apb },
- { "APMC0D08", (kernel_ulong_t)&dw8250_dw_apb},
+ { "APMC0D08", (kernel_ulong_t)&dw8250_apmc0d08 },
{ "BRCM2032", (kernel_ulong_t)&dw8250_dw_apb },
{ "HISI0031", (kernel_ulong_t)&dw8250_dw_apb },
{ "INT33C4", (kernel_ulong_t)&dw8250_dw_apb },
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 23366f868ae3..0440df7de1ed 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -6,23 +6,29 @@
*
* Copyright (C) 2017 Sudip Mukherjee, All Rights Reserved.
*/
-#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
#include <linux/dmi.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/kernel.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/property.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <linux/serial_8250.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/tty.h>
-#include <linux/delay.h>
#include <asm/byteorder.h>
#include "8250.h"
+#include "8250_pcilib.h"
#define PCI_DEVICE_ID_ACCESSIO_COM_2S 0x1052
#define PCI_DEVICE_ID_ACCESSIO_COM_4S 0x105d
@@ -229,13 +235,12 @@ static int default_setup(struct exar8250 *priv, struct pci_dev *pcidev,
struct uart_8250_port *port)
{
const struct exar8250_board *board = priv->board;
- unsigned int bar = 0;
unsigned char status;
+ int err;
- port->port.iotype = UPIO_MEM;
- port->port.mapbase = pci_resource_start(pcidev, bar) + offset;
- port->port.membase = priv->virt + offset;
- port->port.regshift = board->reg_shift;
+ err = serial8250_pci_setup_port(pcidev, port, 0, offset, board->reg_shift);
+ if (err)
+ return err;
/*
* XR17V35x UARTs have an extra divisor register, DLD that gets enabled
@@ -375,7 +380,7 @@ static struct platform_device *__xr17v35x_register_gpio(struct pci_dev *pcidev,
return NULL;
pdev->dev.parent = &pcidev->dev;
- ACPI_COMPANION_SET(&pdev->dev, ACPI_COMPANION(&pcidev->dev));
+ device_set_node(&pdev->dev, dev_fwnode(&pcidev->dev));
if (device_add_software_node(&pdev->dev, node) < 0 ||
platform_device_add(pdev) < 0) {
@@ -713,14 +718,14 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
uart.port.irq = pci_irq_vector(pcidev, 0);
uart.port.dev = &pcidev->dev;
+ /* Clear interrupts */
+ exar_misc_clear(priv);
+
rc = devm_request_irq(&pcidev->dev, uart.port.irq, exar_misc_handler,
IRQF_SHARED, "exar_uart", priv);
if (rc)
return rc;
- /* Clear interrupts */
- exar_misc_clear(priv);
-
for (i = 0; i < nr_ports && i < maxnr; i++) {
rc = board->setup(priv, pcidev, &uart, i);
if (rc) {
@@ -753,28 +758,24 @@ static void exar_pci_remove(struct pci_dev *pcidev)
for (i = 0; i < priv->nr; i++)
serial8250_unregister_port(priv->line[i]);
+ /* Ensure that every init quirk is properly torn down */
if (priv->board->exit)
priv->board->exit(pcidev);
}
-static int __maybe_unused exar_suspend(struct device *dev)
+static int exar_suspend(struct device *dev)
{
- struct pci_dev *pcidev = to_pci_dev(dev);
- struct exar8250 *priv = pci_get_drvdata(pcidev);
+ struct exar8250 *priv = dev_get_drvdata(dev);
unsigned int i;
for (i = 0; i < priv->nr; i++)
if (priv->line[i] >= 0)
serial8250_suspend_port(priv->line[i]);
- /* Ensure that every init quirk is properly torn down */
- if (priv->board->exit)
- priv->board->exit(pcidev);
-
return 0;
}
-static int __maybe_unused exar_resume(struct device *dev)
+static int exar_resume(struct device *dev)
{
struct exar8250 *priv = dev_get_drvdata(dev);
unsigned int i;
@@ -788,7 +789,7 @@ static int __maybe_unused exar_resume(struct device *dev)
return 0;
}
-static SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume);
static const struct exar8250_board pbn_fastcom335_2 = {
.num_ports = 2,
@@ -938,12 +939,13 @@ static struct pci_driver exar_pci_driver = {
.probe = exar_pci_probe,
.remove = exar_pci_remove,
.driver = {
- .pm = &exar_pci_pm,
+ .pm = pm_sleep_ptr(&exar_pci_pm),
},
.id_table = exar_pci_tbl,
};
module_pci_driver(exar_pci_driver);
+MODULE_IMPORT_NS(SERIAL_8250_PCI);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Exar Serial Driver");
MODULE_AUTHOR("Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>");
diff --git a/drivers/tty/serial/8250/8250_ingenic.c b/drivers/tty/serial/8250/8250_ingenic.c
index a12f737924c0..a2783e38a2e3 100644
--- a/drivers/tty/serial/8250/8250_ingenic.c
+++ b/drivers/tty/serial/8250/8250_ingenic.c
@@ -234,7 +234,7 @@ static int ingenic_uart_probe(struct platform_device *pdev)
struct ingenic_uart_data *data;
const struct ingenic_uart_config *cdata;
struct resource *regs;
- int irq, err, line;
+ int err;
cdata = of_device_get_match_data(&pdev->dev);
if (!cdata) {
@@ -242,10 +242,6 @@ static int ingenic_uart_probe(struct platform_device *pdev)
return -ENODEV;
}
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
dev_err(&pdev->dev, "no registers defined\n");
@@ -259,21 +255,19 @@ static int ingenic_uart_probe(struct platform_device *pdev)
spin_lock_init(&uart.port.lock);
uart.port.type = PORT_16550A;
uart.port.flags = UPF_SKIP_TEST | UPF_IOREMAP | UPF_FIXED_TYPE;
- uart.port.iotype = UPIO_MEM;
uart.port.mapbase = regs->start;
- uart.port.regshift = 2;
uart.port.serial_out = ingenic_uart_serial_out;
uart.port.serial_in = ingenic_uart_serial_in;
- uart.port.irq = irq;
uart.port.dev = &pdev->dev;
- uart.port.fifosize = cdata->fifosize;
uart.tx_loadsz = cdata->tx_loadsz;
uart.capabilities = UART_CAP_FIFO | UART_CAP_RTOIE;
- /* Check for a fixed line number */
- line = of_alias_get_id(pdev->dev.of_node, "serial");
- if (line >= 0)
- uart.port.line = line;
+ err = uart_read_port_properties(&uart.port);
+ if (err)
+ return err;
+
+ uart.port.regshift = 2;
+ uart.port.fifosize = cdata->fifosize;
uart.port.membase = devm_ioremap(&pdev->dev, regs->start,
resource_size(regs));
diff --git a/drivers/tty/serial/8250/8250_lpc18xx.c b/drivers/tty/serial/8250/8250_lpc18xx.c
index 8d728a6a5991..7984ee05af1d 100644
--- a/drivers/tty/serial/8250/8250_lpc18xx.c
+++ b/drivers/tty/serial/8250/8250_lpc18xx.c
@@ -92,11 +92,7 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
struct lpc18xx_uart_data *data;
struct uart_8250_port uart;
struct resource *res;
- int irq, ret;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -139,19 +135,12 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
goto dis_clk_reg;
}
- ret = of_alias_get_id(pdev->dev.of_node, "serial");
- if (ret >= 0)
- uart.port.line = ret;
-
data->dma.rx_param = data;
data->dma.tx_param = data;
spin_lock_init(&uart.port.lock);
uart.port.dev = &pdev->dev;
- uart.port.irq = irq;
- uart.port.iotype = UPIO_MEM32;
uart.port.mapbase = res->start;
- uart.port.regshift = 2;
uart.port.type = PORT_16550A;
uart.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SKIP_TEST;
uart.port.uartclk = clk_get_rate(data->clk_uart);
@@ -160,6 +149,13 @@ static int lpc18xx_serial_probe(struct platform_device *pdev)
uart.port.rs485_supported = lpc18xx_rs485_supported;
uart.port.serial_out = lpc18xx_uart_serial_out;
+ ret = uart_read_port_properties(&uart.port);
+ if (ret)
+ return ret;
+
+ uart.port.iotype = UPIO_MEM32;
+ uart.port.regshift = 2;
+
uart.dma = &data->dma;
uart.dma->rxconf.src_maxburst = 1;
uart.dma->txconf.dst_maxburst = 1;
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 34f17a9785e7..5d1dd992d8fb 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -4,7 +4,10 @@
*
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>, IBM Corp.
*/
+
+#include <linux/bits.h>
#include <linux/console.h>
+#include <linux/math.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/serial_core.h>
@@ -25,6 +28,36 @@ struct of_serial_info {
int line;
};
+/* Nuvoton NPCM timeout register */
+#define UART_NPCM_TOR 7
+#define UART_NPCM_TOIE BIT(7) /* Timeout Interrupt Enable */
+
+static int npcm_startup(struct uart_port *port)
+{
+ /*
+ * Nuvoton calls the scratch register 'UART_TOR' (timeout
+ * register). Enable it, and set TIOC (timeout interrupt
+ * comparator) to be 0x20 for correct operation.
+ */
+ serial_port_out(port, UART_NPCM_TOR, UART_NPCM_TOIE | 0x20);
+
+ return serial8250_do_startup(port);
+}
+
+/* Nuvoton NPCM UARTs have a custom divisor calculation */
+static unsigned int npcm_get_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int *frac)
+{
+ return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2;
+}
+
+static int npcm_setup(struct uart_port *port)
+{
+ port->get_divisor = npcm_get_divisor;
+ port->startup = npcm_startup;
+ return 0;
+}
+
/*
* Fill a struct uart_port for a given device node
*/
@@ -36,37 +69,22 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
struct device *dev = &ofdev->dev;
struct device_node *np = dev->of_node;
struct uart_port *port = &up->port;
- u32 clk, spd, prop;
- int ret, irq;
+ u32 spd;
+ int ret;
memset(port, 0, sizeof *port);
pm_runtime_enable(&ofdev->dev);
pm_runtime_get_sync(&ofdev->dev);
- if (of_property_read_u32(np, "clock-frequency", &clk)) {
-
- /* Get clk rate through clk driver if present */
- info->clk = devm_clk_get_enabled(dev, NULL);
- if (IS_ERR(info->clk)) {
- ret = dev_err_probe(dev, PTR_ERR(info->clk), "failed to get clock\n");
- goto err_pmruntime;
- }
-
- clk = clk_get_rate(info->clk);
- }
- /* If current-speed was set, then try not to change it. */
- if (of_property_read_u32(np, "current-speed", &spd) == 0)
- port->custom_divisor = clk / (16 * spd);
-
ret = of_address_to_resource(np, 0, &resource);
if (ret) {
dev_err_probe(dev, ret, "invalid address\n");
goto err_pmruntime;
}
- port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT |
- UPF_FIXED_TYPE;
+ port->dev = &ofdev->dev;
+ port->flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_FIXED_TYPE;
spin_lock_init(&port->lock);
if (resource_type(&resource) == IORESOURCE_IO) {
@@ -75,70 +93,31 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
} else {
port->mapbase = resource.start;
port->mapsize = resource_size(&resource);
+ port->flags |= UPF_IOREMAP;
+ }
- /* Check for shifted address mapping */
- if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
- if (prop >= port->mapsize) {
- ret = dev_err_probe(dev, -EINVAL, "reg-offset %u exceeds region size %pa\n",
- prop, &port->mapsize);
- goto err_pmruntime;
- }
+ ret = uart_read_and_validate_port_properties(port);
+ if (ret)
+ goto err_pmruntime;
- port->mapbase += prop;
- port->mapsize -= prop;
+ /* Get clk rate through clk driver if present */
+ if (!port->uartclk) {
+ info->clk = devm_clk_get_enabled(dev, NULL);
+ if (IS_ERR(info->clk)) {
+ ret = dev_err_probe(dev, PTR_ERR(info->clk), "failed to get clock\n");
+ goto err_pmruntime;
}
- port->iotype = UPIO_MEM;
- if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
- switch (prop) {
- case 1:
- port->iotype = UPIO_MEM;
- break;
- case 2:
- port->iotype = UPIO_MEM16;
- break;
- case 4:
- port->iotype = of_device_is_big_endian(np) ?
- UPIO_MEM32BE : UPIO_MEM32;
- break;
- default:
- ret = dev_err_probe(dev, -EINVAL, "unsupported reg-io-width (%u)\n",
- prop);
- goto err_pmruntime;
- }
- }
- port->flags |= UPF_IOREMAP;
+ port->uartclk = clk_get_rate(info->clk);
}
+ /* If current-speed was set, then try not to change it. */
+ if (of_property_read_u32(np, "current-speed", &spd) == 0)
+ port->custom_divisor = port->uartclk / (16 * spd);
/* Compatibility with the deprecated pxa driver and 8250_pxa drivers. */
if (of_device_is_compatible(np, "mrvl,mmp-uart"))
port->regshift = 2;
- /* Check for registers offset within the devices address range */
- if (of_property_read_u32(np, "reg-shift", &prop) == 0)
- port->regshift = prop;
-
- /* Check for fifo size */
- if (of_property_read_u32(np, "fifo-size", &prop) == 0)
- port->fifosize = prop;
-
- /* Check for a fixed line number */
- ret = of_alias_get_id(np, "serial");
- if (ret >= 0)
- port->line = ret;
-
- irq = of_irq_get(np, 0);
- if (irq < 0) {
- if (irq == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto err_pmruntime;
- }
- /* IRQ support not mandatory */
- irq = 0;
- }
-
- port->irq = irq;
-
info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL);
if (IS_ERR(info->rst)) {
ret = PTR_ERR(info->rst);
@@ -150,12 +129,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
goto err_pmruntime;
port->type = type;
- port->uartclk = clk;
-
- if (of_property_read_bool(np, "no-loopback-test"))
- port->flags |= UPF_SKIP_TEST;
-
- port->dev = &ofdev->dev;
port->rs485_config = serial8250_em485_config;
port->rs485_supported = serial8250_em485_supported;
up->rs485_start_tx = serial8250_em485_start_tx;
@@ -164,10 +137,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
switch (type) {
case PORT_RT2880:
ret = rt288x_setup(port);
- if (ret)
- goto err_pmruntime;
+ break;
+ case PORT_NPCM:
+ ret = npcm_setup(port);
+ break;
+ default:
+ /* Nothing to do */
+ ret = 0;
break;
}
+ if (ret)
+ goto err_pmruntime;
if (IS_REACHABLE(CONFIG_SERIAL_8250_FSL) &&
(of_device_is_compatible(np, "fsl,ns16550") ||
@@ -240,7 +220,6 @@ static int of_platform_serial_probe(struct platform_device *ofdev)
platform_set_drvdata(ofdev, info);
return 0;
err_dispose:
- irq_dispose_mapping(port8250.port.irq);
pm_runtime_put_sync(&ofdev->dev);
pm_runtime_disable(&ofdev->dev);
err_free:
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index 6942990a333c..66901d93089a 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1394,11 +1394,7 @@ static int omap8250_probe(struct platform_device *pdev)
struct uart_8250_port up;
struct resource *regs;
void __iomem *membase;
- int irq, ret;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ int ret;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs) {
@@ -1419,7 +1415,6 @@ static int omap8250_probe(struct platform_device *pdev)
up.port.dev = &pdev->dev;
up.port.mapbase = regs->start;
up.port.membase = membase;
- up.port.irq = irq;
/*
* It claims to be 16C750 compatible however it is a little different.
* It has EFR and has no FCR7_64byte bit. The AFE (which it claims to
@@ -1429,13 +1424,9 @@ static int omap8250_probe(struct platform_device *pdev)
* or pm callback.
*/
up.port.type = PORT_8250;
- up.port.iotype = UPIO_MEM;
- up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SOFT_FLOW |
- UPF_HARD_FLOW;
+ up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SOFT_FLOW | UPF_HARD_FLOW;
up.port.private_data = priv;
- up.port.regshift = OMAP_UART_REGSHIFT;
- up.port.fifosize = 64;
up.tx_loadsz = 64;
up.capabilities = UART_CAP_FIFO;
#ifdef CONFIG_PM
@@ -1461,14 +1452,14 @@ static int omap8250_probe(struct platform_device *pdev)
up.rs485_stop_tx = serial8250_em485_stop_tx;
up.port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE);
- ret = of_alias_get_id(np, "serial");
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to get alias\n");
+ ret = uart_read_port_properties(&up.port);
+ if (ret)
return ret;
- }
- up.port.line = ret;
- if (of_property_read_u32(np, "clock-frequency", &up.port.uartclk)) {
+ up.port.regshift = OMAP_UART_REGSHIFT;
+ up.port.fifosize = 64;
+
+ if (!up.port.uartclk) {
struct clk *clk;
clk = devm_clk_get(&pdev->dev, NULL);
@@ -1560,8 +1551,8 @@ static int omap8250_probe(struct platform_device *pdev)
}
#endif
- irq_set_status_flags(irq, IRQ_NOAUTOEN);
- ret = devm_request_irq(&pdev->dev, irq, omap8250_irq, 0,
+ irq_set_status_flags(up.port.irq, IRQ_NOAUTOEN);
+ ret = devm_request_irq(&pdev->dev, up.port.irq, omap8250_irq, 0,
dev_name(&pdev->dev), priv);
if (ret < 0)
return ret;
diff --git a/drivers/tty/serial/8250/8250_pci1xxxx.c b/drivers/tty/serial/8250/8250_pci1xxxx.c
index 2dda737b1660..2fbb5851f788 100644
--- a/drivers/tty/serial/8250/8250_pci1xxxx.c
+++ b/drivers/tty/serial/8250/8250_pci1xxxx.c
@@ -7,23 +7,31 @@
* Copyright (C) 2022 Microchip Technology Inc., All Rights Reserved.
*/
+#include <linux/array_size.h>
#include <linux/bitfield.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
+#include <linux/bits.h>
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/gfp_types.h>
#include <linux/io.h>
#include <linux/iopoll.h>
-#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/overflow.h>
#include <linux/pci.h>
+#include <linux/pm.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
-#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
-#include <linux/units.h>
+#include <linux/time.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <linux/8250_pci.h>
+#include <linux/types.h>
+#include <linux/units.h>
#include <asm/byteorder.h>
@@ -67,6 +75,7 @@
#define SYSLOCK_RETRY_CNT 1000
#define UART_RX_BYTE_FIFO 0x00
+#define UART_TX_BYTE_FIFO 0x00
#define UART_FIFO_CTL 0x02
#define UART_ACTV_REG 0x11
@@ -81,10 +90,10 @@
#define ADCL_CFG_PIN_SEL BIT(1)
#define ADCL_CFG_EN BIT(0)
-#define UART_BIT_SAMPLE_CNT 16
+#define UART_BIT_SAMPLE_CNT_8 8
+#define UART_BIT_SAMPLE_CNT_16 16
#define BAUD_CLOCK_DIV_INT_MSK GENMASK(31, 8)
#define ADCL_CFG_RTS_DELAY_MASK GENMASK(11, 8)
-#define UART_CLOCK_DEFAULT (62500 * HZ_PER_KHZ)
#define UART_WAKE_REG 0x8C
#define UART_WAKE_MASK_REG 0x90
@@ -95,13 +104,19 @@
(UART_WAKE_N_PIN | UART_WAKE_NCTS | UART_WAKE_INT)
#define UART_BAUD_CLK_DIVISOR_REG 0x54
+#define FRAC_DIV_CFG_REG 0x58
#define UART_RESET_REG 0x94
#define UART_RESET_D3_RESET_DISABLE BIT(16)
#define UART_BURST_STATUS_REG 0x9C
+#define UART_TX_BURST_FIFO 0xA0
#define UART_RX_BURST_FIFO 0xA4
+#define UART_BIT_DIVISOR_8 0x26731000
+#define UART_BIT_DIVISOR_16 0x6ef71000
+#define UART_BAUD_4MBPS 4000000
+
#define MAX_PORTS 4
#define PORT_OFFSET 0x100
#define RX_BUF_SIZE 512
@@ -109,6 +124,7 @@
#define UART_BURST_SIZE 4
#define UART_BST_STAT_RX_COUNT_MASK 0x00FF
+#define UART_BST_STAT_TX_COUNT_MASK 0xFF00
#define UART_BST_STAT_IIR_INT_PEND 0x100000
#define UART_LSR_OVERRUN_ERR_CLR 0x43
#define UART_BST_STAT_LSR_RX_MASK 0x9F000000
@@ -116,6 +132,7 @@
#define UART_BST_STAT_LSR_OVERRUN_ERR 0x2000000
#define UART_BST_STAT_LSR_PARITY_ERR 0x4000000
#define UART_BST_STAT_LSR_FRAME_ERR 0x8000000
+#define UART_BST_STAT_LSR_THRE 0x20000000
struct pci1xxxx_8250 {
unsigned int nr;
@@ -206,15 +223,21 @@ static int pci1xxxx_get_num_ports(struct pci_dev *dev)
static unsigned int pci1xxxx_get_divisor(struct uart_port *port,
unsigned int baud, unsigned int *frac)
{
+ unsigned int uart_sample_cnt;
unsigned int quot;
+ if (baud >= UART_BAUD_4MBPS)
+ uart_sample_cnt = UART_BIT_SAMPLE_CNT_8;
+ else
+ uart_sample_cnt = UART_BIT_SAMPLE_CNT_16;
+
/*
* Calculate baud rate sampling period in nanoseconds.
* Fractional part x denotes x/255 parts of a nanosecond.
*/
- quot = NSEC_PER_SEC / (baud * UART_BIT_SAMPLE_CNT);
- *frac = (NSEC_PER_SEC - quot * baud * UART_BIT_SAMPLE_CNT) *
- 255 / UART_BIT_SAMPLE_CNT / baud;
+ quot = NSEC_PER_SEC / (baud * uart_sample_cnt);
+ *frac = (NSEC_PER_SEC - quot * baud * uart_sample_cnt) *
+ 255 / uart_sample_cnt / baud;
return quot;
}
@@ -222,6 +245,11 @@ static unsigned int pci1xxxx_get_divisor(struct uart_port *port,
static void pci1xxxx_set_divisor(struct uart_port *port, unsigned int baud,
unsigned int quot, unsigned int frac)
{
+ if (baud >= UART_BAUD_4MBPS)
+ writel(UART_BIT_DIVISOR_8, port->membase + FRAC_DIV_CFG_REG);
+ else
+ writel(UART_BIT_DIVISOR_16, port->membase + FRAC_DIV_CFG_REG);
+
writel(FIELD_PREP(BAUD_CLOCK_DIV_INT_MSK, quot) | frac,
port->membase + UART_BAUD_CLK_DIVISOR_REG);
}
@@ -233,7 +261,16 @@ static int pci1xxxx_rs485_config(struct uart_port *port,
u32 delay_in_baud_periods;
u32 baud_period_in_ns;
u32 mode_cfg = 0;
+ u32 sample_cnt;
u32 clock_div;
+ u32 frac_div;
+
+ frac_div = readl(port->membase + FRAC_DIV_CFG_REG);
+
+ if (frac_div == UART_BIT_DIVISOR_16)
+ sample_cnt = UART_BIT_SAMPLE_CNT_16;
+ else
+ sample_cnt = UART_BIT_SAMPLE_CNT_8;
/*
* pci1xxxx's uart hardware supports only RTS delay after
@@ -249,7 +286,7 @@ static int pci1xxxx_rs485_config(struct uart_port *port,
clock_div = readl(port->membase + UART_BAUD_CLK_DIVISOR_REG);
baud_period_in_ns =
FIELD_GET(BAUD_CLOCK_DIV_INT_MSK, clock_div) *
- UART_BIT_SAMPLE_CNT;
+ sample_cnt;
delay_in_baud_periods =
rs485->delay_rts_after_send * NSEC_PER_MSEC /
baud_period_in_ns;
@@ -344,6 +381,105 @@ static void pci1xxxx_rx_burst(struct uart_port *port, u32 uart_status)
}
}
+static void pci1xxxx_process_write_data(struct uart_port *port,
+ struct circ_buf *xmit,
+ int *data_empty_count,
+ u32 *valid_byte_count)
+{
+ u32 valid_burst_count = *valid_byte_count / UART_BURST_SIZE;
+
+ /*
+ * Each transaction transfers data in DWORDs. If there are less than
+ * four remaining valid_byte_count to transfer or if the circular
+ * buffer has insufficient space for a DWORD, the data is transferred
+ * one byte at a time.
+ */
+ while (valid_burst_count) {
+ if (*data_empty_count - UART_BURST_SIZE < 0)
+ break;
+ if (xmit->tail > (UART_XMIT_SIZE - UART_BURST_SIZE))
+ break;
+ writel(*(unsigned int *)&xmit->buf[xmit->tail],
+ port->membase + UART_TX_BURST_FIFO);
+ *valid_byte_count -= UART_BURST_SIZE;
+ *data_empty_count -= UART_BURST_SIZE;
+ valid_burst_count -= UART_BYTE_SIZE;
+
+ xmit->tail = (xmit->tail + UART_BURST_SIZE) &
+ (UART_XMIT_SIZE - 1);
+ }
+
+ while (*valid_byte_count) {
+ if (*data_empty_count - UART_BYTE_SIZE < 0)
+ break;
+ writeb(xmit->buf[xmit->tail], port->membase +
+ UART_TX_BYTE_FIFO);
+ *data_empty_count -= UART_BYTE_SIZE;
+ *valid_byte_count -= UART_BYTE_SIZE;
+
+ /*
+ * When the tail of the circular buffer is reached, the next
+ * byte is transferred to the beginning of the buffer.
+ */
+ xmit->tail = (xmit->tail + UART_BYTE_SIZE) &
+ (UART_XMIT_SIZE - 1);
+
+ /*
+ * If there are any pending burst count, data is handled by
+ * transmitting DWORDs at a time.
+ */
+ if (valid_burst_count && (xmit->tail <
+ (UART_XMIT_SIZE - UART_BURST_SIZE)))
+ break;
+ }
+}
+
+static void pci1xxxx_tx_burst(struct uart_port *port, u32 uart_status)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+ u32 valid_byte_count;
+ int data_empty_count;
+ struct circ_buf *xmit;
+
+ xmit = &port->state->xmit;
+
+ if (port->x_char) {
+ writeb(port->x_char, port->membase + UART_TX);
+ port->icount.tx++;
+ port->x_char = 0;
+ return;
+ }
+
+ if ((uart_tx_stopped(port)) || (uart_circ_empty(xmit))) {
+ port->ops->stop_tx(port);
+ } else {
+ data_empty_count = (pci1xxxx_read_burst_status(port) &
+ UART_BST_STAT_TX_COUNT_MASK) >> 8;
+ do {
+ valid_byte_count = uart_circ_chars_pending(xmit);
+
+ pci1xxxx_process_write_data(port, xmit,
+ &data_empty_count,
+ &valid_byte_count);
+
+ port->icount.tx++;
+ if (uart_circ_empty(xmit))
+ break;
+ } while (data_empty_count && valid_byte_count);
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ /*
+ * With RPM enabled, we have to wait until the FIFO is empty before
+ * the HW can go idle. So we get here once again with empty FIFO and
+ * disable the interrupt and RPM in __stop_tx()
+ */
+ if (uart_circ_empty(xmit) && !(up->capabilities & UART_CAP_RPM))
+ port->ops->stop_tx(port);
+}
+
static int pci1xxxx_handle_irq(struct uart_port *port)
{
unsigned long flags;
@@ -359,6 +495,9 @@ static int pci1xxxx_handle_irq(struct uart_port *port)
if (status & UART_BST_STAT_LSR_RX_MASK)
pci1xxxx_rx_burst(port, status);
+ if (status & UART_BST_STAT_LSR_THRE)
+ pci1xxxx_tx_burst(port, status);
+
spin_unlock_irqrestore(&port->lock, flags);
return 1;
@@ -481,6 +620,17 @@ static int pci1xxxx_setup(struct pci_dev *pdev,
port->port.flags |= UPF_FIXED_TYPE | UPF_SKIP_TEST;
port->port.type = PORT_MCHP16550A;
+ /*
+ * 8250 core considers prescaller value to be always 16.
+ * The MCHP ports support downscaled mode and hence the
+ * functional UART clock can be lower, i.e. 62.5MHz, than
+ * software expects in order to support higher baud rates.
+ * Assign here 64MHz to support 4Mbps.
+ *
+ * The value itself is not really used anywhere except baud
+ * rate calculations, so we can mangle it as we wish.
+ */
+ port->port.uartclk = 64 * HZ_PER_MHZ;
port->port.set_termios = serial8250_do_set_termios;
port->port.get_divisor = pci1xxxx_get_divisor;
port->port.set_divisor = pci1xxxx_set_divisor;
@@ -594,7 +744,6 @@ static int pci1xxxx_serial_probe(struct pci_dev *pdev,
memset(&uart, 0, sizeof(uart));
uart.port.flags = UPF_SHARE_IRQ | UPF_FIXED_PORT;
- uart.port.uartclk = UART_CLOCK_DEFAULT;
uart.port.dev = dev;
if (num_vectors == max_vec_reqd)
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 8ca061d3bbb9..fc9dd5d45295 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -38,10 +38,6 @@
#include "8250.h"
-/* Nuvoton NPCM timeout register */
-#define UART_NPCM_TOR 7
-#define UART_NPCM_TOIE BIT(7) /* Timeout Interrupt Enable */
-
/*
* Debugging.
*/
@@ -1329,9 +1325,6 @@ static void autoconfig_irq(struct uart_8250_port *up)
inb_p(ICP);
}
- if (uart_console(port))
- console_lock();
-
/* forget possible initially masked and pending IRQ */
probe_irq_off(probe_irq_on());
save_mcr = serial8250_in_MCR(up);
@@ -1371,9 +1364,6 @@ static void autoconfig_irq(struct uart_8250_port *up)
if (port->flags & UPF_FOURPORT)
outb_p(save_ICP, ICP);
- if (uart_console(port))
- console_unlock();
-
port->irq = (irq > 0) ? irq : 0;
}
@@ -2235,15 +2225,6 @@ int serial8250_do_startup(struct uart_port *port)
UART_DA830_PWREMU_MGMT_FREE);
}
- if (port->type == PORT_NPCM) {
- /*
- * Nuvoton calls the scratch register 'UART_TOR' (timeout
- * register). Enable it, and set TIOC (timeout interrupt
- * comparator) to be 0x20 for correct operation.
- */
- serial_port_out(port, UART_NPCM_TOR, UART_NPCM_TOIE | 0x20);
- }
-
#ifdef CONFIG_SERIAL_8250_RSA
/*
* If this is an RSA port, see if we can kick it up to the
@@ -2545,15 +2526,6 @@ static void serial8250_shutdown(struct uart_port *port)
serial8250_do_shutdown(port);
}
-/* Nuvoton NPCM UARTs have a custom divisor calculation */
-static unsigned int npcm_get_divisor(struct uart_8250_port *up,
- unsigned int baud)
-{
- struct uart_port *port = &up->port;
-
- return DIV_ROUND_CLOSEST(port->uartclk, 16 * baud + 2) - 2;
-}
-
static unsigned int serial8250_do_get_divisor(struct uart_port *port,
unsigned int baud,
unsigned int *frac)
@@ -2598,8 +2570,6 @@ static unsigned int serial8250_do_get_divisor(struct uart_port *port,
quot = 0x8001;
else if (magic_multiplier && baud >= port->uartclk / 12)
quot = 0x8002;
- else if (up->port.type == PORT_NPCM)
- quot = npcm_get_divisor(up, baud);
else
quot = uart_get_divisor(port, baud);
@@ -2714,12 +2684,8 @@ static unsigned int serial8250_get_baud_rate(struct uart_port *port,
*/
void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
{
- struct uart_8250_port *up = up_to_u8250p(port);
struct tty_port *tport = &port->state->port;
- unsigned int baud, quot, frac = 0;
- struct ktermios *termios;
struct tty_struct *tty;
- unsigned long flags;
tty = tty_port_tty_get(tport);
if (!tty) {
@@ -2740,21 +2706,7 @@ void serial8250_update_uartclk(struct uart_port *port, unsigned int uartclk)
if (!tty_port_initialized(tport))
goto out_unlock;
- termios = &tty->termios;
-
- baud = serial8250_get_baud_rate(port, termios, NULL);
- quot = serial8250_get_divisor(port, baud, &frac);
-
- serial8250_rpm_get(up);
- uart_port_lock_irqsave(port, &flags);
-
- uart_update_timeout(port, termios->c_cflag, baud);
-
- serial8250_set_divisor(port, baud, quot, frac);
- serial_port_out(port, UART_LCR, up->lcr);
-
- uart_port_unlock_irqrestore(port, flags);
- serial8250_rpm_put(up);
+ serial8250_do_set_termios(port, &tty->termios, NULL);
out_unlock:
mutex_unlock(&tport->mutex);
diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
index 77686da42ce8..f1a51b00b1b9 100644
--- a/drivers/tty/serial/8250/8250_pxa.c
+++ b/drivers/tty/serial/8250/8250_pxa.c
@@ -92,11 +92,7 @@ static int serial_pxa_probe(struct platform_device *pdev)
struct uart_8250_port uart = {};
struct pxa8250_data *data;
struct resource *mmres;
- int irq, ret;
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
+ int ret;
mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mmres)
@@ -114,21 +110,21 @@ static int serial_pxa_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = of_alias_get_id(pdev->dev.of_node, "serial");
- if (ret >= 0)
- uart.port.line = ret;
-
uart.port.type = PORT_XSCALE;
- uart.port.iotype = UPIO_MEM32;
uart.port.mapbase = mmres->start;
- uart.port.regshift = 2;
- uart.port.irq = irq;
- uart.port.fifosize = 64;
uart.port.flags = UPF_IOREMAP | UPF_SKIP_TEST | UPF_FIXED_TYPE;
uart.port.dev = &pdev->dev;
uart.port.uartclk = clk_get_rate(data->clk);
uart.port.pm = serial_pxa_pm;
uart.port.private_data = data;
+
+ ret = uart_read_port_properties(&uart.port);
+ if (ret)
+ return ret;
+
+ uart.port.iotype = UPIO_MEM32;
+ uart.port.regshift = 2;
+ uart.port.fifosize = 64;
uart.dl_write = serial_pxa_dl_write;
ret = serial8250_register_8250_port(&uart);
diff --git a/drivers/tty/serial/8250/8250_tegra.c b/drivers/tty/serial/8250/8250_tegra.c
index ba352262df75..60a80d00d251 100644
--- a/drivers/tty/serial/8250/8250_tegra.c
+++ b/drivers/tty/serial/8250/8250_tegra.c
@@ -57,25 +57,11 @@ static int tegra_uart_probe(struct platform_device *pdev)
port = &port8250.port;
spin_lock_init(&port->lock);
- port->flags = UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF | UPF_FIXED_PORT |
- UPF_FIXED_TYPE;
- port->iotype = UPIO_MEM32;
- port->regshift = 2;
+ port->flags = UPF_BOOT_AUTOCONF | UPF_FIXED_PORT | UPF_FIXED_TYPE;
port->type = PORT_TEGRA;
- port->irqflags |= IRQF_SHARED;
port->dev = &pdev->dev;
port->handle_break = tegra_uart_handle_break;
- ret = of_alias_get_id(pdev->dev.of_node, "serial");
- if (ret >= 0)
- port->line = ret;
-
- ret = platform_get_irq(pdev, 0);
- if (ret < 0)
- return ret;
-
- port->irq = ret;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
@@ -88,12 +74,18 @@ static int tegra_uart_probe(struct platform_device *pdev)
port->mapbase = res->start;
port->mapsize = resource_size(res);
+ ret = uart_read_port_properties(port);
+ if (ret)
+ return ret;
+
+ port->iotype = UPIO_MEM32;
+ port->regshift = 2;
+
uart->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL);
if (IS_ERR(uart->rst))
return PTR_ERR(uart->rst);
- if (device_property_read_u32(&pdev->dev, "clock-frequency",
- &port->uartclk)) {
+ if (!port->uartclk) {
uart->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(uart->clk)) {
dev_err(&pdev->dev, "failed to get clock!\n");
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index 6399a38ecce2..670d2ca0f757 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -162,7 +162,6 @@ static int uniphier_uart_probe(struct platform_device *pdev)
struct uniphier8250_priv *priv;
struct resource *regs;
void __iomem *membase;
- int irq;
int ret;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -175,23 +174,12 @@ static int uniphier_uart_probe(struct platform_device *pdev)
if (!membase)
return -ENOMEM;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
memset(&up, 0, sizeof(up));
- ret = of_alias_get_id(dev->of_node, "serial");
- if (ret < 0) {
- dev_err(dev, "failed to get alias id\n");
- return ret;
- }
- up.port.line = ret;
-
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get clock\n");
@@ -211,7 +199,10 @@ static int uniphier_uart_probe(struct platform_device *pdev)
up.port.mapbase = regs->start;
up.port.mapsize = resource_size(regs);
up.port.membase = membase;
- up.port.irq = irq;
+
+ ret = uart_read_port_properties(&up.port);
+ if (ret)
+ return ret;
up.port.type = PORT_16550A;
up.port.iotype = UPIO_MEM32;
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 8b9a2c4902e2..47ff50763c04 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -149,6 +149,7 @@ config SERIAL_8250_PCI
config SERIAL_8250_EXAR
tristate "8250/16550 Exar/Commtech PCI/PCIe device support"
depends on SERIAL_8250 && PCI
+ select SERIAL_8250_PCILIB
default SERIAL_8250
help
This builds support for XR17C1xx, XR17V3xx and some Commtech
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index cf2c890a560f..2fa3fb30dc6c 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -348,10 +348,7 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
flag = TTY_FRAME;
}
- uart_port_unlock(&uap->port);
- sysrq = uart_handle_sysrq_char(&uap->port, ch & 255);
- uart_port_lock(&uap->port);
-
+ sysrq = uart_prepare_sysrq_char(&uap->port, ch & 255);
if (!sysrq)
uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
}
@@ -1017,7 +1014,7 @@ static void pl011_dma_rx_callback(void *data)
ret = pl011_dma_rx_trigger_dma(uap);
pl011_dma_rx_chars(uap, pending, lastbuf, false);
- uart_port_unlock_irq(&uap->port);
+ uart_unlock_and_check_sysrq(&uap->port);
/*
* Do this check after we picked the DMA chars so we don't
* get some IRQ immediately from RX.
@@ -1540,11 +1537,10 @@ static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
static irqreturn_t pl011_int(int irq, void *dev_id)
{
struct uart_amba_port *uap = dev_id;
- unsigned long flags;
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
int handled = 0;
- uart_port_lock_irqsave(&uap->port, &flags);
+ uart_port_lock(&uap->port);
status = pl011_read(uap, REG_RIS) & uap->im;
if (status) {
do {
@@ -1573,7 +1569,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
handled = 1;
}
- uart_port_unlock_irqrestore(&uap->port, flags);
+ uart_unlock_and_check_sysrq(&uap->port);
return IRQ_RETVAL(handled);
}
@@ -2322,13 +2318,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
clk_enable(uap->clk);
- local_irq_save(flags);
- if (uap->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&uap->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&uap->port, &flags);
else
- uart_port_lock(&uap->port);
+ uart_port_lock_irqsave(&uap->port, &flags);
/*
* First save the CR then disable the interrupts
@@ -2354,8 +2347,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
pl011_write(old_cr, uap, REG_CR);
if (locked)
- uart_port_unlock(&uap->port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&uap->port, flags);
clk_disable(uap->clk);
}
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 8d09ace062e5..7790cbc57391 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -378,7 +378,7 @@ static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
up->port.icount.rx++;
ch = rdata & AR933X_UART_DATA_TX_RX_MASK;
- if (uart_handle_sysrq_char(&up->port, ch))
+ if (uart_prepare_sysrq_char(&up->port, ch))
continue;
if ((up->port.ignore_status_mask & AR933X_DUMMY_STATUS_RD) == 0)
@@ -468,7 +468,7 @@ static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
ar933x_uart_tx_chars(up);
}
- uart_port_unlock(&up->port);
+ uart_unlock_and_check_sysrq(&up->port);
return IRQ_HANDLED;
}
@@ -627,14 +627,10 @@ static void ar933x_uart_console_write(struct console *co, const char *s,
unsigned int int_en;
int locked = 1;
- local_irq_save(flags);
-
- if (up->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&up->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- uart_port_lock(&up->port);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* First save the IER then disable the interrupts
@@ -654,9 +650,7 @@ static void ar933x_uart_console_write(struct console *co, const char *s,
ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
if (locked)
- uart_port_unlock(&up->port);
-
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int ar933x_uart_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index a3cefa153456..34801a6f300b 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -285,10 +285,9 @@ static void bcm_uart_do_rx(struct uart_port *port)
flag = TTY_PARITY;
}
- if (uart_handle_sysrq_char(port, c))
+ if (uart_prepare_sysrq_char(port, c))
continue;
-
if ((cstat & port->ignore_status_mask) == 0)
tty_insert_flip_char(tty_port, c, flag);
@@ -353,7 +352,7 @@ static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
estat & UART_EXTINP_DCD_MASK);
}
- uart_port_unlock(port);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -703,20 +702,14 @@ static void bcm_console_write(struct console *co, const char *s,
{
struct uart_port *port;
unsigned long flags;
- int locked;
+ int locked = 1;
port = &ports[co->index];
- local_irq_save(flags);
- if (port->sysrq) {
- /* bcm_uart_interrupt() already took the lock */
- locked = 0;
- } else if (oops_in_progress) {
- locked = uart_port_trylock(port);
- } else {
- uart_port_lock(port);
- locked = 1;
- }
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+ uart_port_lock_irqsave(port, &flags);
/* call helper to deal with \r\n */
uart_console_write(port, s, count, bcm_console_putchar);
@@ -725,8 +718,7 @@ static void bcm_console_write(struct console *co, const char *s,
wait_for_xmitr(port);
if (locked)
- uart_port_unlock(port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(port, flags);
}
/*
diff --git a/drivers/tty/serial/fsl_linflexuart.c b/drivers/tty/serial/fsl_linflexuart.c
index 52c87876a88d..5426322b5f0c 100644
--- a/drivers/tty/serial/fsl_linflexuart.c
+++ b/drivers/tty/serial/fsl_linflexuart.c
@@ -837,7 +837,6 @@ static int linflex_probe(struct platform_device *pdev)
return ret;
sport->dev = &pdev->dev;
- sport->type = PORT_LINFLEXUART;
sport->iotype = UPIO_MEM;
sport->irq = ret;
sport->ops = &linflex_pops;
diff --git a/drivers/tty/serial/jsm/jsm_cls.c b/drivers/tty/serial/jsm/jsm_cls.c
index 1eda48964c0b..ddbd42c09637 100644
--- a/drivers/tty/serial/jsm/jsm_cls.c
+++ b/drivers/tty/serial/jsm/jsm_cls.c
@@ -395,7 +395,6 @@ static void cls_copy_data_from_uart_to_queue(struct jsm_channel *ch)
* which in this case is the break signal.
*/
if (linestatus & error_mask) {
- linestatus = 0;
readb(&ch->ch_cls_uart->txrx);
continue;
}
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index ec20329f0603..3e4ac46de1bc 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -136,20 +136,16 @@ static void lpc32xx_hsuart_console_write(struct console *co, const char *s,
int locked = 1;
touch_nmi_watchdog();
- local_irq_save(flags);
- if (up->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&up->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- uart_port_lock(&up->port);
+ uart_port_lock_irqsave(&up->port, &flags);
uart_console_write(&up->port, s, count, lpc32xx_hsuart_console_putchar);
wait_for_xmit_empty(&up->port);
if (locked)
- uart_port_unlock(&up->port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int __init lpc32xx_hsuart_console_setup(struct console *co,
@@ -233,8 +229,6 @@ static unsigned int __serial_get_clock_div(unsigned long uartclk,
hsu_rate++;
}
- if (hsu_rate > 0xFF)
- hsu_rate = 0xFF;
return goodrate;
}
@@ -268,7 +262,8 @@ static void __serial_lpc32xx_rx(struct uart_port *port)
tty_insert_flip_char(tport, 0, TTY_FRAME);
}
- tty_insert_flip_char(tport, (tmp & 0xFF), flag);
+ if (!uart_prepare_sysrq_char(port, tmp & 0xff))
+ tty_insert_flip_char(tport, (tmp & 0xFF), flag);
tmp = readl(LPC32XX_HSUART_FIFO(port->membase));
}
@@ -333,7 +328,7 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
__serial_lpc32xx_tx(port);
}
- uart_port_unlock(port);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 10bf6d75bf9e..14dd9cfaa9f7 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -30,6 +30,7 @@
#define MAX310X_MAJOR 204
#define MAX310X_MINOR 209
#define MAX310X_UART_NRMAX 16
+#define MAX310X_MAX_PORTS 4 /* Maximum number of UART ports per IC. */
/* MAX310X register definitions */
#define MAX310X_RHR_REG (0x00) /* RX FIFO */
@@ -66,6 +67,7 @@
#define MAX310X_BRGDIVMSB_REG (0x1d) /* Baud rate divisor MSB */
#define MAX310X_CLKSRC_REG (0x1e) /* Clock source */
#define MAX310X_REG_1F (0x1f)
+#define MAX310X_EXTREG_START (0x20) /* Only relevant in SPI mode. */
#define MAX310X_REVID_REG MAX310X_REG_1F /* Revision ID */
@@ -73,9 +75,9 @@
#define MAX310X_GLOBALCMD_REG MAX310X_REG_1F /* Global Command (WO) */
/* Extended registers */
-#define MAX310X_SPI_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
-#define MAX310X_I2C_REVID_EXTREG (0x25) /* Revision ID */
-
+#define MAX310X_REVID_EXTREG (0x25) /* Revision ID
+ * (extended addressing space)
+ */
/* IRQ register bits */
#define MAX310X_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
#define MAX310X_IRQ_SPCHR_BIT (1 << 1) /* Special char interrupt */
@@ -160,14 +162,14 @@
#define MAX310X_IRDA_SIR_BIT (1 << 1) /* SIR mode enable */
/* Flow control trigger level register masks */
-#define MAX310X_FLOWLVL_HALT_MASK (0x000f) /* Flow control halt level */
-#define MAX310X_FLOWLVL_RES_MASK (0x00f0) /* Flow control resume level */
+#define MAX310X_FLOWLVL_HALT_MASK GENMASK(3, 0) /* Flow control halt level */
+#define MAX310X_FLOWLVL_RES_MASK GENMASK(7, 4) /* Flow control resume level */
#define MAX310X_FLOWLVL_HALT(words) ((words / 8) & 0x0f)
#define MAX310X_FLOWLVL_RES(words) (((words / 8) & 0x0f) << 4)
/* FIFO interrupt trigger level register masks */
-#define MAX310X_FIFOTRIGLVL_TX_MASK (0x0f) /* TX FIFO trigger level */
-#define MAX310X_FIFOTRIGLVL_RX_MASK (0xf0) /* RX FIFO trigger level */
+#define MAX310X_FIFOTRIGLVL_TX_MASK GENMASK(3, 0) /* TX FIFO trigger level */
+#define MAX310X_FIFOTRIGLVL_RX_MASK GENMASK(7, 4) /* RX FIFO trigger level */
#define MAX310X_FIFOTRIGLVL_TX(words) ((words / 8) & 0x0f)
#define MAX310X_FIFOTRIGLVL_RX(words) (((words / 8) & 0x0f) << 4)
@@ -177,7 +179,8 @@
#define MAX310X_FLOWCTRL_GPIADDR_BIT (1 << 2) /* Enables that GPIO inputs
* are used in conjunction with
* XOFF2 for definition of
- * special character */
+ * special character
+ */
#define MAX310X_FLOWCTRL_SWFLOWEN_BIT (1 << 3) /* Auto SW flow ctrl enable */
#define MAX310X_FLOWCTRL_SWFLOW0_BIT (1 << 4) /* SWFLOW bit 0 */
#define MAX310X_FLOWCTRL_SWFLOW1_BIT (1 << 5) /* SWFLOW bit 1
@@ -214,8 +217,8 @@
*/
/* PLL configuration register masks */
-#define MAX310X_PLLCFG_PREDIV_MASK (0x3f) /* PLL predivision value */
-#define MAX310X_PLLCFG_PLLFACTOR_MASK (0xc0) /* PLL multiplication factor */
+#define MAX310X_PLLCFG_PREDIV_MASK GENMASK(5, 0) /* PLL predivision value */
+#define MAX310X_PLLCFG_PLLFACTOR_MASK GENMASK(7, 6) /* PLL multiplication factor */
/* Baud rate generator configuration register bits */
#define MAX310X_BRGCFG_2XMODE_BIT (1 << 4) /* Double baud rate */
@@ -234,7 +237,7 @@
/* Misc definitions */
#define MAX310X_FIFO_SIZE (128)
-#define MAX310x_REV_MASK (0xf8)
+#define MAX310x_REV_MASK GENMASK(7, 3)
#define MAX310X_WRITE_BIT 0x80
/* Port startup definitions */
@@ -257,20 +260,21 @@
struct max310x_if_cfg {
int (*extended_reg_enable)(struct device *dev, bool enable);
-
- unsigned int rev_id_reg;
+ u8 rev_id_offset;
};
struct max310x_devtype {
struct {
unsigned short min;
unsigned short max;
- } slave_addr;
- char name[9];
+ } slave_addr; /* Relevant only in I2C mode. */
int nr;
+ char name[9];
u8 mode1;
- int (*detect)(struct device *);
- void (*power)(struct uart_port *, int);
+ u8 rev_id_val;
+ u8 rev_id_reg; /* Relevant only if rev_id_val is defined. */
+ u8 power_reg; /* Register address for power/sleep control. */
+ u8 power_bit; /* Bit for sleep or power-off mode (active high). */
};
struct max310x_one {
@@ -331,62 +335,52 @@ static void max310x_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val)
regmap_update_bits(one->regmap, reg, mask, val);
}
-static int max3107_detect(struct device *dev)
+static int max310x_detect(struct device *dev)
{
struct max310x_port *s = dev_get_drvdata(dev);
unsigned int val = 0;
int ret;
- ret = regmap_read(s->regmap, MAX310X_REVID_REG, &val);
- if (ret)
- return ret;
+ /* Check if variant supports REV ID register: */
+ if (s->devtype->rev_id_val) {
+ u8 rev_id_reg = s->devtype->rev_id_reg;
- if (((val & MAX310x_REV_MASK) != MAX3107_REV_ID)) {
- dev_err(dev,
- "%s ID 0x%02x does not match\n", s->devtype->name, val);
- return -ENODEV;
- }
-
- return 0;
-}
+ /* Check if REV ID is in extended addressing space: */
+ if (s->devtype->rev_id_reg >= MAX310X_EXTREG_START) {
+ ret = s->if_cfg->extended_reg_enable(dev, true);
+ if (ret)
+ return ret;
-static int max3108_detect(struct device *dev)
-{
- struct max310x_port *s = dev_get_drvdata(dev);
- unsigned int val = 0;
- int ret;
-
- /* MAX3108 have not REV ID register, we just check default value
- * from clocksource register to make sure everything works.
- */
- ret = regmap_read(s->regmap, MAX310X_CLKSRC_REG, &val);
- if (ret)
- return ret;
-
- if (val != (MAX310X_CLKSRC_EXTCLK_BIT | MAX310X_CLKSRC_PLLBYP_BIT)) {
- dev_err(dev, "%s not present\n", s->devtype->name);
- return -ENODEV;
- }
+ /* Adjust REV ID extended addressing space address: */
+ if (s->if_cfg->rev_id_offset)
+ rev_id_reg -= s->if_cfg->rev_id_offset;
+ }
- return 0;
-}
+ regmap_read(s->regmap, rev_id_reg, &val);
-static int max3109_detect(struct device *dev)
-{
- struct max310x_port *s = dev_get_drvdata(dev);
- unsigned int val = 0;
- int ret;
-
- ret = s->if_cfg->extended_reg_enable(dev, true);
- if (ret)
- return ret;
+ if (s->devtype->rev_id_reg >= MAX310X_EXTREG_START) {
+ ret = s->if_cfg->extended_reg_enable(dev, false);
+ if (ret)
+ return ret;
+ }
- regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
- s->if_cfg->extended_reg_enable(dev, false);
- if (((val & MAX310x_REV_MASK) != MAX3109_REV_ID)) {
- dev_err(dev,
- "%s ID 0x%02x does not match\n", s->devtype->name, val);
- return -ENODEV;
+ if (((val & MAX310x_REV_MASK) != s->devtype->rev_id_val))
+ return dev_err_probe(dev, -ENODEV,
+ "%s ID 0x%02x does not match\n",
+ s->devtype->name, val);
+ } else {
+ /*
+ * For variant without REV ID register, just check default value
+ * from clocksource register to make sure everything works.
+ */
+ ret = regmap_read(s->regmap, MAX310X_CLKSRC_REG, &val);
+ if (ret)
+ return ret;
+
+ if (val != (MAX310X_CLKSRC_EXTCLK_BIT | MAX310X_CLKSRC_PLLBYP_BIT))
+ return dev_err_probe(dev, -ENODEV,
+ "%s not present\n",
+ s->devtype->name);
}
return 0;
@@ -394,39 +388,10 @@ static int max3109_detect(struct device *dev)
static void max310x_power(struct uart_port *port, int on)
{
- max310x_port_update(port, MAX310X_MODE1_REG,
- MAX310X_MODE1_FORCESLEEP_BIT,
- on ? 0 : MAX310X_MODE1_FORCESLEEP_BIT);
- if (on)
- msleep(50);
-}
-
-static int max14830_detect(struct device *dev)
-{
- struct max310x_port *s = dev_get_drvdata(dev);
- unsigned int val = 0;
- int ret;
-
- ret = s->if_cfg->extended_reg_enable(dev, true);
- if (ret)
- return ret;
-
- regmap_read(s->regmap, s->if_cfg->rev_id_reg, &val);
- s->if_cfg->extended_reg_enable(dev, false);
- if (((val & MAX310x_REV_MASK) != MAX14830_REV_ID)) {
- dev_err(dev,
- "%s ID 0x%02x does not match\n", s->devtype->name, val);
- return -ENODEV;
- }
-
- return 0;
-}
+ struct max310x_port *s = dev_get_drvdata(port->dev);
-static void max14830_power(struct uart_port *port, int on)
-{
- max310x_port_update(port, MAX310X_BRGCFG_REG,
- MAX14830_BRGCFG_CLKDIS_BIT,
- on ? 0 : MAX14830_BRGCFG_CLKDIS_BIT);
+ max310x_port_update(port, s->devtype->power_reg, s->devtype->power_bit,
+ on ? 0 : s->devtype->power_bit);
if (on)
msleep(50);
}
@@ -435,8 +400,10 @@ static const struct max310x_devtype max3107_devtype = {
.name = "MAX3107",
.nr = 1,
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT | MAX310X_MODE1_IRQSEL_BIT,
- .detect = max3107_detect,
- .power = max310x_power,
+ .rev_id_val = MAX3107_REV_ID,
+ .rev_id_reg = MAX310X_REVID_REG,
+ .power_reg = MAX310X_MODE1_REG,
+ .power_bit = MAX310X_MODE1_FORCESLEEP_BIT,
.slave_addr = {
.min = 0x2c,
.max = 0x2f,
@@ -447,8 +414,10 @@ static const struct max310x_devtype max3108_devtype = {
.name = "MAX3108",
.nr = 1,
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
- .detect = max3108_detect,
- .power = max310x_power,
+ .rev_id_val = 0, /* Unsupported. */
+ .rev_id_reg = 0, /* Irrelevant when rev_id_val is not defined. */
+ .power_reg = MAX310X_MODE1_REG,
+ .power_bit = MAX310X_MODE1_FORCESLEEP_BIT,
.slave_addr = {
.min = 0x60,
.max = 0x6f,
@@ -459,8 +428,10 @@ static const struct max310x_devtype max3109_devtype = {
.name = "MAX3109",
.nr = 2,
.mode1 = MAX310X_MODE1_AUTOSLEEP_BIT,
- .detect = max3109_detect,
- .power = max310x_power,
+ .rev_id_val = MAX3109_REV_ID,
+ .rev_id_reg = MAX310X_REVID_EXTREG,
+ .power_reg = MAX310X_MODE1_REG,
+ .power_bit = MAX310X_MODE1_FORCESLEEP_BIT,
.slave_addr = {
.min = 0x60,
.max = 0x6f,
@@ -471,8 +442,10 @@ static const struct max310x_devtype max14830_devtype = {
.name = "MAX14830",
.nr = 4,
.mode1 = MAX310X_MODE1_IRQSEL_BIT,
- .detect = max14830_detect,
- .power = max14830_power,
+ .rev_id_val = MAX14830_REV_ID,
+ .rev_id_reg = MAX310X_REVID_EXTREG,
+ .power_reg = MAX310X_BRGCFG_REG,
+ .power_bit = MAX14830_BRGCFG_CLKDIS_BIT,
.slave_addr = {
.min = 0x60,
.max = 0x6f,
@@ -490,10 +463,8 @@ static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
case MAX310X_RXFIFOLVL_REG:
return false;
default:
- break;
+ return true;
}
-
- return true;
}
static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
@@ -512,10 +483,8 @@ static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
case MAX310X_REG_1F:
return true;
default:
- break;
+ return false;
}
-
- return false;
}
static bool max310x_reg_precious(struct device *dev, unsigned int reg)
@@ -527,10 +496,8 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
case MAX310X_STS_IRQSTS_REG:
return true;
default:
- break;
+ return false;
}
-
- return false;
}
static bool max310x_reg_noinc(struct device *dev, unsigned int reg)
@@ -689,7 +656,8 @@ static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
u8 ch, flag;
if (port->read_status_mask == MAX310X_LSR_RXOVR_BIT) {
- /* We are just reading, happily ignoring any error conditions.
+ /*
+ * We are just reading, happily ignoring any error conditions.
* Break condition, parity checking, framing errors -- they
* are all ignored. That means that we can do a batch-read.
*
@@ -698,7 +666,7 @@ static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
* that the LSR register applies to the "current" character.
* That's also the reason why we cannot do batched reads when
* asked to check the individual statuses.
- * */
+ */
sts = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG);
max310x_batch_read(port, one->rx_buf, rxlen);
@@ -802,8 +770,10 @@ static void max310x_handle_tx(struct uart_port *port)
to_send = (to_send > txlen) ? txlen : to_send;
if (until_end < to_send) {
- /* It's a circ buffer -- wrap around.
- * We could do that in one SPI transaction, but meh. */
+ /*
+ * It's a circ buffer -- wrap around.
+ * We could do that in one SPI transaction, but meh.
+ */
max310x_batch_write(port, xmit->buf + xmit->tail, until_end);
max310x_batch_write(port, xmit->buf, to_send - until_end);
} else {
@@ -848,6 +818,7 @@ static irqreturn_t max310x_port_irq(struct max310x_port *s, int portno)
if (ists & MAX310X_IRQ_TXEMPTY_BIT)
max310x_start_tx(port);
} while (1);
+
return res;
}
@@ -892,7 +863,8 @@ static unsigned int max310x_tx_empty(struct uart_port *port)
static unsigned int max310x_get_mctrl(struct uart_port *port)
{
- /* DCD and DSR are not wired and CTS/RTS is handled automatically
+ /*
+ * DCD and DSR are not wired and CTS/RTS is handled automatically
* so just indicate DSR and CAR asserted
*/
return TIOCM_DSR | TIOCM_CAR;
@@ -984,7 +956,8 @@ static void max310x_set_termios(struct uart_port *port,
max310x_port_write(port, MAX310X_XON1_REG, termios->c_cc[VSTART]);
max310x_port_write(port, MAX310X_XOFF1_REG, termios->c_cc[VSTOP]);
- /* Disable transmitter before enabling AutoCTS or auto transmitter
+ /*
+ * Disable transmitter before enabling AutoCTS or auto transmitter
* flow control
*/
if (termios->c_cflag & CRTSCTS || termios->c_iflag & IXOFF) {
@@ -1011,7 +984,8 @@ static void max310x_set_termios(struct uart_port *port,
}
max310x_port_write(port, MAX310X_FLOWCTRL_REG, flow);
- /* Enable transmitter after disabling AutoCTS and auto transmitter
+ /*
+ * Enable transmitter after disabling AutoCTS and auto transmitter
* flow control
*/
if (!(termios->c_cflag & CRTSCTS) && !(termios->c_iflag & IXOFF)) {
@@ -1072,10 +1046,9 @@ static int max310x_rs485_config(struct uart_port *port, struct ktermios *termios
static int max310x_startup(struct uart_port *port)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
unsigned int val;
- s->devtype->power(port, 1);
+ max310x_power(port, 1);
/* Configure MODE1 register */
max310x_port_update(port, MAX310X_MODE1_REG,
@@ -1103,8 +1076,11 @@ static int max310x_startup(struct uart_port *port)
MAX310X_MODE2_ECHOSUPR_BIT);
}
- /* Configure flow control levels */
- /* Flow control halt level 96, resume level 48 */
+ /*
+ * Configure flow control levels:
+ * resume: 48
+ * halt: 96
+ */
max310x_port_write(port, MAX310X_FLOWLVL_REG,
MAX310X_FLOWLVL_RES(48) | MAX310X_FLOWLVL_HALT(96));
@@ -1120,12 +1096,10 @@ static int max310x_startup(struct uart_port *port)
static void max310x_shutdown(struct uart_port *port)
{
- struct max310x_port *s = dev_get_drvdata(port->dev);
-
/* Disable all interrupts */
max310x_port_write(port, MAX310X_IRQEN_REG, 0);
- s->devtype->power(port, 0);
+ max310x_power(port, 0);
}
static const char *max310x_type(struct uart_port *port)
@@ -1187,7 +1161,7 @@ static int __maybe_unused max310x_suspend(struct device *dev)
for (i = 0; i < s->devtype->nr; i++) {
uart_suspend_port(&max310x_uart, &s->p[i].port);
- s->devtype->power(&s->p[i].port, 0);
+ max310x_power(&s->p[i].port, 0);
}
return 0;
@@ -1199,7 +1173,7 @@ static int __maybe_unused max310x_resume(struct device *dev)
int i;
for (i = 0; i < s->devtype->nr; i++) {
- s->devtype->power(&s->p[i].port, 1);
+ max310x_power(&s->p[i].port, 1);
uart_resume_port(&max310x_uart, &s->p[i].port);
}
@@ -1209,7 +1183,7 @@ static int __maybe_unused max310x_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(max310x_pm_ops, max310x_suspend, max310x_resume);
#ifdef CONFIG_GPIOLIB
-static int max310x_gpio_get(struct gpio_chip *chip, unsigned offset)
+static int max310x_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
unsigned int val;
struct max310x_port *s = gpiochip_get_data(chip);
@@ -1220,7 +1194,7 @@ static int max310x_gpio_get(struct gpio_chip *chip, unsigned offset)
return !!((val >> 4) & (1 << (offset % 4)));
}
-static void max310x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+static void max310x_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
{
struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
@@ -1229,7 +1203,7 @@ static void max310x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
value ? 1 << (offset % 4) : 0);
}
-static int max310x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+static int max310x_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
@@ -1240,7 +1214,7 @@ static int max310x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
}
static int max310x_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
+ unsigned int offset, int value)
{
struct max310x_port *s = gpiochip_get_data(chip);
struct uart_port *port = &s->p[offset / 4].port;
@@ -1296,10 +1270,9 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
/* Alloc port structure */
s = devm_kzalloc(dev, struct_size(s, p, devtype->nr), GFP_KERNEL);
- if (!s) {
- dev_err(dev, "Error allocating port structure\n");
- return -ENOMEM;
- }
+ if (!s)
+ return dev_err_probe(dev, -ENOMEM,
+ "Error allocating port structure\n");
/* Always ask for fixed clock rate from a property. */
device_property_read_u32(dev, "clock-frequency", &uartclk);
@@ -1320,8 +1293,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
if (freq == 0)
freq = uartclk;
if (freq == 0) {
- dev_err(dev, "Cannot get clock rate\n");
- ret = -EINVAL;
+ ret = dev_err_probe(dev, -EINVAL, "Cannot get clock rate\n");
goto out_clk;
}
@@ -1345,7 +1317,7 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
dev_set_drvdata(dev, s);
/* Check device to ensure we are talking to what we expect */
- ret = devtype->detect(dev);
+ ret = max310x_detect(dev);
if (ret)
goto out_clk;
@@ -1427,14 +1399,13 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
/* Register port */
ret = uart_add_one_port(&max310x_uart, &s->p[i].port);
- if (ret) {
- s->p[i].port.dev = NULL;
+ if (ret)
goto out_uart;
- }
+
set_bit(line, max310x_lines);
/* Go to suspend mode */
- devtype->power(&s->p[i].port, 0);
+ max310x_power(&s->p[i].port, 0);
}
#ifdef CONFIG_GPIOLIB
@@ -1461,14 +1432,12 @@ static int max310x_probe(struct device *dev, const struct max310x_devtype *devty
if (!ret)
return 0;
- dev_err(dev, "Unable to reguest IRQ %i\n", irq);
+ dev_err(dev, "Unable to request IRQ %i\n", irq);
out_uart:
for (i = 0; i < devtype->nr; i++) {
- if (s->p[i].port.dev) {
+ if (test_and_clear_bit(s->p[i].port.line, max310x_lines))
uart_remove_one_port(&max310x_uart, &s->p[i].port);
- clear_bit(s->p[i].port.line, max310x_lines);
- }
}
out_clk:
@@ -1486,9 +1455,11 @@ static void max310x_remove(struct device *dev)
cancel_work_sync(&s->p[i].tx_work);
cancel_work_sync(&s->p[i].md_work);
cancel_work_sync(&s->p[i].rs_work);
- uart_remove_one_port(&max310x_uart, &s->p[i].port);
- clear_bit(s->p[i].port.line, max310x_lines);
- s->devtype->power(&s->p[i].port, 0);
+
+ if (test_and_clear_bit(s->p[i].port.line, max310x_lines))
+ uart_remove_one_port(&max310x_uart, &s->p[i].port);
+
+ max310x_power(&s->p[i].port, 0);
}
clk_disable_unprepare(s->clk);
@@ -1518,6 +1489,19 @@ static struct regmap_config regcfg = {
.max_raw_write = MAX310X_FIFO_SIZE,
};
+static const char *max310x_regmap_name(u8 port_id)
+{
+ switch (port_id) {
+ case 0: return "port0";
+ case 1: return "port1";
+ case 2: return "port2";
+ case 3: return "port3";
+ default:
+ WARN_ON(true);
+ return NULL;
+ }
+}
+
#ifdef CONFIG_SPI_MASTER
static int max310x_spi_extended_reg_enable(struct device *dev, bool enable)
{
@@ -1529,13 +1513,13 @@ static int max310x_spi_extended_reg_enable(struct device *dev, bool enable)
static const struct max310x_if_cfg __maybe_unused max310x_spi_if_cfg = {
.extended_reg_enable = max310x_spi_extended_reg_enable,
- .rev_id_reg = MAX310X_SPI_REVID_EXTREG,
+ .rev_id_offset = MAX310X_EXTREG_START,
};
static int max310x_spi_probe(struct spi_device *spi)
{
const struct max310x_devtype *devtype;
- struct regmap *regmaps[4];
+ struct regmap *regmaps[MAX310X_MAX_PORTS];
unsigned int i;
int ret;
@@ -1547,12 +1531,14 @@ static int max310x_spi_probe(struct spi_device *spi)
if (ret)
return ret;
- devtype = device_get_match_data(&spi->dev);
+ devtype = spi_get_device_match_data(spi);
if (!devtype)
- devtype = (struct max310x_devtype *)spi_get_device_id(spi)->driver_data;
+ return dev_err_probe(&spi->dev, -ENODEV, "Failed to match device\n");
for (i = 0; i < devtype->nr; i++) {
u8 port_mask = i * 0x20;
+
+ regcfg.name = max310x_regmap_name(i);
regcfg.read_flag_mask = port_mask;
regcfg.write_flag_mask = port_mask | MAX310X_WRITE_BIT;
regmaps[i] = devm_regmap_init_spi(spi, &regcfg);
@@ -1600,7 +1586,7 @@ static struct regmap_config regcfg_i2c = {
.writeable_reg = max310x_reg_writeable,
.volatile_reg = max310x_reg_volatile,
.precious_reg = max310x_reg_precious,
- .max_register = MAX310X_I2C_REVID_EXTREG,
+ .max_register = MAX310X_REVID_EXTREG,
.writeable_noinc_reg = max310x_reg_noinc,
.readable_noinc_reg = max310x_reg_noinc,
.max_raw_read = MAX310X_FIFO_SIZE,
@@ -1609,7 +1595,7 @@ static struct regmap_config regcfg_i2c = {
static const struct max310x_if_cfg max310x_i2c_if_cfg = {
.extended_reg_enable = max310x_i2c_extended_reg_enable,
- .rev_id_reg = MAX310X_I2C_REVID_EXTREG,
+ .rev_id_offset = 0, /* No offset in I2C mode. */
};
static unsigned short max310x_i2c_slave_addr(unsigned short addr,
@@ -1619,10 +1605,10 @@ static unsigned short max310x_i2c_slave_addr(unsigned short addr,
* For MAX14830 and MAX3109, the slave address depends on what the
* A0 and A1 pins are tied to.
* See Table I2C Address Map of the datasheet.
- * Based on that table, the following formulas were determined.
- * UART1 - UART0 = 0x10
- * UART2 - UART1 = 0x20 + 0x10
- * UART3 - UART2 = 0x10
+ * Based on that table, the following formulas were determined:
+ * UART1 - UART0 = 0x10
+ * UART2 - UART1 = 0x20 + 0x10
+ * UART3 - UART2 = 0x10
*/
addr -= nr * 0x10;
@@ -1635,20 +1621,24 @@ static unsigned short max310x_i2c_slave_addr(unsigned short addr,
static int max310x_i2c_probe(struct i2c_client *client)
{
- const struct max310x_devtype *devtype =
- device_get_match_data(&client->dev);
+ const struct max310x_devtype *devtype;
struct i2c_client *port_client;
- struct regmap *regmaps[4];
+ struct regmap *regmaps[MAX310X_MAX_PORTS];
unsigned int i;
u8 port_addr;
+ devtype = i2c_get_match_data(client);
+ if (!devtype)
+ return dev_err_probe(&client->dev, -ENODEV, "Failed to match device\n");
+
if (client->addr < devtype->slave_addr.min ||
- client->addr > devtype->slave_addr.max)
+ client->addr > devtype->slave_addr.max)
return dev_err_probe(&client->dev, -EINVAL,
"Slave addr 0x%x outside of range [0x%x, 0x%x]\n",
client->addr, devtype->slave_addr.min,
devtype->slave_addr.max);
+ regcfg_i2c.name = max310x_regmap_name(0);
regmaps[0] = devm_regmap_init_i2c(client, &regcfg_i2c);
for (i = 1; i < devtype->nr; i++) {
@@ -1657,6 +1647,7 @@ static int max310x_i2c_probe(struct i2c_client *client)
client->adapter,
port_addr);
+ regcfg_i2c.name = max310x_regmap_name(i);
regmaps[i] = devm_regmap_init_i2c(port_client, &regcfg_i2c);
}
@@ -1669,6 +1660,15 @@ static void max310x_i2c_remove(struct i2c_client *client)
max310x_remove(&client->dev);
}
+static const struct i2c_device_id max310x_i2c_id_table[] = {
+ { "max3107", (kernel_ulong_t)&max3107_devtype, },
+ { "max3108", (kernel_ulong_t)&max3108_devtype, },
+ { "max3109", (kernel_ulong_t)&max3109_devtype, },
+ { "max14830", (kernel_ulong_t)&max14830_devtype, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max310x_i2c_id_table);
+
static struct i2c_driver max310x_i2c_driver = {
.driver = {
.name = MAX310X_NAME,
@@ -1677,6 +1677,7 @@ static struct i2c_driver max310x_i2c_driver = {
},
.probe = max310x_i2c_probe,
.remove = max310x_i2c_remove,
+ .id_table = max310x_i2c_id_table,
};
#endif
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 8690a45239e0..b0604d6da025 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -470,33 +470,6 @@ static struct mcf_uart mcf_ports[4];
#if defined(CONFIG_SERIAL_MCF_CONSOLE)
/****************************************************************************/
-int __init early_mcf_setup(struct mcf_platform_uart *platp)
-{
- struct uart_port *port;
- int i;
-
- for (i = 0; ((i < MCF_MAXPORTS) && (platp[i].mapbase)); i++) {
- port = &mcf_ports[i].port;
-
- port->line = i;
- port->type = PORT_MCF;
- port->mapbase = platp[i].mapbase;
- port->membase = (platp[i].membase) ? platp[i].membase :
- (unsigned char __iomem *) port->mapbase;
- port->iotype = SERIAL_IO_MEM;
- port->irq = platp[i].irq;
- port->uartclk = MCF_BUSCLK;
- port->flags = UPF_BOOT_AUTOCONF;
- port->rs485_config = mcf_config_rs485;
- port->rs485_supported = mcf_rs485_supported;
- port->ops = &mcf_uart_ops;
- }
-
- return 0;
-}
-
-/****************************************************************************/
-
static void mcf_console_putc(struct console *co, const char c)
{
struct uart_port *port = &(mcf_ports + co->index)->port;
diff --git a/drivers/tty/serial/meson_uart.c b/drivers/tty/serial/meson_uart.c
index 8395688f5ee9..6feac459c0cf 100644
--- a/drivers/tty/serial/meson_uart.c
+++ b/drivers/tty/serial/meson_uart.c
@@ -220,7 +220,7 @@ static void meson_receive_chars(struct uart_port *port)
continue;
}
- if (uart_handle_sysrq_char(port, ch))
+ if (uart_prepare_sysrq_char(port, ch))
continue;
if ((status & port->ignore_status_mask) == 0)
@@ -248,7 +248,7 @@ static irqreturn_t meson_uart_interrupt(int irq, void *dev_id)
meson_uart_start_tx(port);
}
- uart_port_unlock(port);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -556,18 +556,13 @@ static void meson_serial_port_write(struct uart_port *port, const char *s,
u_int count)
{
unsigned long flags;
- int locked;
+ int locked = 1;
u32 val, tmp;
- local_irq_save(flags);
- if (port->sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = uart_port_trylock(port);
- } else {
- uart_port_lock(port);
- locked = 1;
- }
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+ uart_port_lock_irqsave(port, &flags);
val = readl(port->membase + AML_UART_CONTROL);
tmp = val & ~(AML_UART_TX_INT_EN | AML_UART_RX_INT_EN);
@@ -577,8 +572,7 @@ static void meson_serial_port_write(struct uart_port *port, const char *s,
writel(val, port->membase + AML_UART_CONTROL);
if (locked)
- uart_port_unlock(port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void meson_serial_console_write(struct console *co, const char *s,
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index e24204ad35de..d27c4c8c84e1 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -588,16 +588,14 @@ static void msm_complete_rx_dma(void *args)
if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
- uart_port_unlock_irqrestore(port, flags);
- sysrq = uart_handle_sysrq_char(port, dma->virt[i]);
- uart_port_lock_irqsave(port, &flags);
+ sysrq = uart_prepare_sysrq_char(port, dma->virt[i]);
if (!sysrq)
tty_insert_flip_char(tport, dma->virt[i], flag);
}
msm_start_rx_dma(msm_port);
done:
- uart_port_unlock_irqrestore(port, flags);
+ uart_unlock_and_check_sysrq_irqrestore(port, flags);
if (count)
tty_flip_buffer_push(tport);
@@ -763,9 +761,7 @@ static void msm_handle_rx_dm(struct uart_port *port, unsigned int misr)
if (!(port->read_status_mask & MSM_UART_SR_RX_BREAK))
flag = TTY_NORMAL;
- uart_port_unlock(port);
- sysrq = uart_handle_sysrq_char(port, buf[i]);
- uart_port_lock(port);
+ sysrq = uart_prepare_sysrq_char(port, buf[i]);
if (!sysrq)
tty_insert_flip_char(tport, buf[i], flag);
}
@@ -825,9 +821,7 @@ static void msm_handle_rx(struct uart_port *port)
else if (sr & MSM_UART_SR_PAR_FRAME_ERR)
flag = TTY_FRAME;
- uart_port_unlock(port);
- sysrq = uart_handle_sysrq_char(port, c);
- uart_port_lock(port);
+ sysrq = uart_prepare_sysrq_char(port, c);
if (!sysrq)
tty_insert_flip_char(tport, c, flag);
}
@@ -948,11 +942,10 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
struct uart_port *port = dev_id;
struct msm_port *msm_port = to_msm_port(port);
struct msm_dma *dma = &msm_port->rx_dma;
- unsigned long flags;
unsigned int misr;
u32 val;
- uart_port_lock_irqsave(port, &flags);
+ uart_port_lock(port);
misr = msm_read(port, MSM_UART_MISR);
msm_write(port, 0, MSM_UART_IMR); /* disable interrupt */
@@ -984,7 +977,7 @@ static irqreturn_t msm_uart_irq(int irq, void *dev_id)
msm_handle_delta_cts(port);
msm_write(port, msm_port->imr, MSM_UART_IMR); /* restore interrupt */
- uart_port_unlock_irqrestore(port, flags);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -1621,14 +1614,10 @@ static void __msm_console_write(struct uart_port *port, const char *s,
num_newlines++;
count += num_newlines;
- local_irq_save(flags);
-
- if (port->sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(port, &flags);
else
- uart_port_lock(port);
+ uart_port_lock_irqsave(port, &flags);
if (is_uartdm)
msm_reset_dm_count(port, count);
@@ -1667,9 +1656,7 @@ static void __msm_console_write(struct uart_port *port, const char *s,
}
if (locked)
- uart_port_unlock(port);
-
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void msm_console_write(struct console *co, const char *s,
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index f5a0b401af63..9be1c871cf11 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -508,7 +508,7 @@ static void serial_omap_rdi(struct uart_omap_port *up, unsigned int lsr)
up->port.icount.rx++;
- if (uart_handle_sysrq_char(&up->port, ch))
+ if (uart_prepare_sysrq_char(&up->port, ch))
return;
uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, TTY_NORMAL);
@@ -563,7 +563,7 @@ static irqreturn_t serial_omap_irq(int irq, void *dev_id)
}
} while (max_count--);
- uart_port_unlock(&up->port);
+ uart_unlock_and_check_sysrq(&up->port);
tty_flip_buffer_push(&up->port.state->port);
@@ -1212,13 +1212,10 @@ serial_omap_console_write(struct console *co, const char *s,
unsigned int ier;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&up->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- uart_port_lock(&up->port);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* First save the IER then disable the interrupts
@@ -1245,8 +1242,7 @@ serial_omap_console_write(struct console *co, const char *s,
check_modem_status(up);
if (locked)
- uart_port_unlock(&up->port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
}
static int __init
diff --git a/drivers/tty/serial/owl-uart.c b/drivers/tty/serial/owl-uart.c
index d9fe85397741..8b60ac0ad7cd 100644
--- a/drivers/tty/serial/owl-uart.c
+++ b/drivers/tty/serial/owl-uart.c
@@ -199,6 +199,7 @@ static void owl_uart_receive_chars(struct uart_port *port)
stat = owl_uart_read(port, OWL_UART_STAT);
while (!(stat & OWL_UART_STAT_RFEM)) {
char flag = TTY_NORMAL;
+ bool sysrq;
if (stat & OWL_UART_STAT_RXER)
port->icount.overrun++;
@@ -217,7 +218,9 @@ static void owl_uart_receive_chars(struct uart_port *port)
val = owl_uart_read(port, OWL_UART_RXDAT);
val &= 0xff;
- if ((stat & port->ignore_status_mask) == 0)
+ sysrq = uart_prepare_sysrq_char(port, val);
+
+ if (!sysrq && (stat & port->ignore_status_mask) == 0)
tty_insert_flip_char(&port->state->port, val, flag);
stat = owl_uart_read(port, OWL_UART_STAT);
@@ -229,10 +232,9 @@ static void owl_uart_receive_chars(struct uart_port *port)
static irqreturn_t owl_uart_irq(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- unsigned long flags;
u32 stat;
- uart_port_lock_irqsave(port, &flags);
+ uart_port_lock(port);
stat = owl_uart_read(port, OWL_UART_STAT);
@@ -246,7 +248,7 @@ static irqreturn_t owl_uart_irq(int irq, void *dev_id)
stat |= OWL_UART_STAT_RIP | OWL_UART_STAT_TIP;
owl_uart_write(port, stat, OWL_UART_STAT);
- uart_port_unlock_irqrestore(port, flags);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -508,18 +510,12 @@ static void owl_uart_port_write(struct uart_port *port, const char *s,
{
u32 old_ctl, val;
unsigned long flags;
- int locked;
+ int locked = 1;
- local_irq_save(flags);
-
- if (port->sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(port);
- else {
- uart_port_lock(port);
- locked = 1;
- }
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+ uart_port_lock_irqsave(port, &flags);
old_ctl = owl_uart_read(port, OWL_UART_CTL);
val = old_ctl | OWL_UART_CTL_TRFS_TX;
@@ -541,9 +537,7 @@ static void owl_uart_port_write(struct uart_port *port, const char *s,
owl_uart_write(port, old_ctl, OWL_UART_CTL);
if (locked)
- uart_port_unlock(port);
-
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void owl_uart_console_write(struct console *co, const char *s,
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 436cc6d52a11..89257cddf540 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -237,9 +237,6 @@ struct eg20t_port {
#define IRQ_NAME_SIZE 17
char irq_name[IRQ_NAME_SIZE];
-
- /* protect the eg20t_port private structure and io access to membase */
- spinlock_t lock;
};
/**
@@ -567,7 +564,7 @@ static int pch_uart_hal_read(struct eg20t_port *priv, unsigned char *buf,
if (uart_handle_break(port))
continue;
}
- if (uart_handle_sysrq_char(port, rbr))
+ if (uart_prepare_sysrq_char(port, rbr))
continue;
buf[i++] = rbr;
@@ -599,16 +596,14 @@ static void pch_uart_hal_set_break(struct eg20t_port *priv, int on)
iowrite8(lcr, priv->membase + UART_LCR);
}
-static int push_rx(struct eg20t_port *priv, const unsigned char *buf,
- int size)
+static void push_rx(struct eg20t_port *priv, const unsigned char *buf,
+ int size)
{
struct uart_port *port = &priv->port;
struct tty_port *tport = &port->state->port;
tty_insert_flip_string(tport, buf, size);
tty_flip_buffer_push(tport);
-
- return 0;
}
static int dma_push_rx(struct eg20t_port *priv, int size)
@@ -761,7 +756,7 @@ static int handle_rx_to(struct eg20t_port *priv)
{
struct pch_uart_buffer *buf;
int rx_size;
- int ret;
+
if (!priv->start_rx) {
pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_RX_INT |
PCH_UART_HAL_RX_ERR_INT);
@@ -770,19 +765,12 @@ static int handle_rx_to(struct eg20t_port *priv)
buf = &priv->rxbuf;
do {
rx_size = pch_uart_hal_read(priv, buf->buf, buf->size);
- ret = push_rx(priv, buf->buf, rx_size);
- if (ret)
- return 0;
+ push_rx(priv, buf->buf, rx_size);
} while (rx_size == buf->size);
return PCH_UART_HANDLED_RX_INT;
}
-static int handle_rx(struct eg20t_port *priv)
-{
- return handle_rx_to(priv);
-}
-
static int dma_handle_rx(struct eg20t_port *priv)
{
struct uart_port *port = &priv->port;
@@ -1019,11 +1007,10 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
u8 lsr;
int ret = 0;
unsigned char iid;
- unsigned long flags;
int next = 1;
u8 msr;
- spin_lock_irqsave(&priv->lock, flags);
+ uart_port_lock(&priv->port);
handled = 0;
while (next) {
iid = pch_uart_hal_get_iid(priv);
@@ -1051,7 +1038,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
PCH_UART_HAL_RX_INT |
PCH_UART_HAL_RX_ERR_INT);
} else {
- ret = handle_rx(priv);
+ ret = handle_rx_to(priv);
}
break;
case PCH_UART_IID_RDR_TO: /* Received Data Ready
@@ -1083,7 +1070,7 @@ static irqreturn_t pch_uart_interrupt(int irq, void *dev_id)
handled |= (unsigned int)ret;
}
- spin_unlock_irqrestore(&priv->lock, flags);
+ uart_unlock_and_check_sysrq(&priv->port);
return IRQ_RETVAL(handled);
}
@@ -1194,9 +1181,9 @@ static void pch_uart_break_ctl(struct uart_port *port, int ctl)
unsigned long flags;
priv = container_of(port, struct eg20t_port, port);
- spin_lock_irqsave(&priv->lock, flags);
+ uart_port_lock_irqsave(&priv->port, &flags);
pch_uart_hal_set_break(priv, ctl);
- spin_unlock_irqrestore(&priv->lock, flags);
+ uart_port_unlock_irqrestore(&priv->port, flags);
}
/* Grab any interrupt resources and initialise any low level driver state. */
@@ -1346,8 +1333,7 @@ static void pch_uart_set_termios(struct uart_port *port,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16);
- spin_lock_irqsave(&priv->lock, flags);
- uart_port_lock(port);
+ uart_port_lock_irqsave(port, &flags);
uart_update_timeout(port, termios->c_cflag, baud);
rtn = pch_uart_hal_set_line(priv, baud, parity, bits, stb);
@@ -1360,8 +1346,7 @@ static void pch_uart_set_termios(struct uart_port *port,
tty_termios_encode_baud_rate(termios, baud, baud);
out:
- uart_port_unlock(port);
- spin_unlock_irqrestore(&priv->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static const char *pch_uart_type(struct uart_port *port)
@@ -1565,27 +1550,17 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
{
struct eg20t_port *priv;
unsigned long flags;
- int priv_locked = 1;
- int port_locked = 1;
+ int locked = 1;
u8 ier;
priv = pch_uart_ports[co->index];
touch_nmi_watchdog();
- local_irq_save(flags);
- if (priv->port.sysrq) {
- /* call to uart_handle_sysrq_char already took the priv lock */
- priv_locked = 0;
- /* serial8250_handle_port() already took the port lock */
- port_locked = 0;
- } else if (oops_in_progress) {
- priv_locked = spin_trylock(&priv->lock);
- port_locked = uart_port_trylock(&priv->port);
- } else {
- spin_lock(&priv->lock);
- uart_port_lock(&priv->port);
- }
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&priv->port, &flags);
+ else
+ uart_port_lock_irqsave(&priv->port, &flags);
/*
* First save the IER then disable the interrupts
@@ -1603,11 +1578,8 @@ pch_console_write(struct console *co, const char *s, unsigned int count)
wait_for_xmitr(priv, UART_LSR_BOTH_EMPTY);
iowrite8(ier, priv->membase + UART_IER);
- if (port_locked)
- uart_port_unlock(&priv->port);
- if (priv_locked)
- spin_unlock(&priv->lock);
- local_irq_restore(flags);
+ if (locked)
+ uart_port_unlock_irqrestore(&priv->port, flags);
}
static int __init pch_console_setup(struct console *co, char *options)
@@ -1704,8 +1676,6 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
pci_enable_msi(pdev);
pci_set_master(pdev);
- spin_lock_init(&priv->lock);
-
iobase = pci_resource_start(pdev, 0);
mapbase = pci_resource_start(pdev, 1);
priv->mapbase = mapbase;
@@ -1735,8 +1705,6 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
KBUILD_MODNAME ":" PCH_UART_DRIVER_DEVICE "%d",
priv->port.line);
- spin_lock_init(&priv->port.lock);
-
pci_set_drvdata(pdev, priv);
priv->trigger_level = 1;
priv->fcr = 0;
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 732d821db4f8..05d97e89511e 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1714,18 +1714,13 @@ static int __init pmz_attach(struct platform_device *pdev)
return uart_add_one_port(&pmz_uart_reg, &uap->port);
}
-static int __exit pmz_detach(struct platform_device *pdev)
+static void __exit pmz_detach(struct platform_device *pdev)
{
struct uart_pmac_port *uap = platform_get_drvdata(pdev);
- if (!uap)
- return -ENODEV;
-
uart_remove_one_port(&pmz_uart_reg, &uap->port);
uap->port.dev = NULL;
-
- return 0;
}
#endif /* !CONFIG_PPC_PMAC */
@@ -1794,7 +1789,7 @@ static struct macio_driver pmz_driver = {
#else
static struct platform_driver pmz_driver = {
- .remove = __exit_p(pmz_detach),
+ .remove_new = __exit_p(pmz_detach),
.driver = {
.name = "scc",
},
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 46e70e155aab..e395ff29c1a2 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -151,7 +151,7 @@ static inline void receive_chars(struct uart_pxa_port *up, int *status)
flag = TTY_FRAME;
}
- if (uart_handle_sysrq_char(&up->port, ch))
+ if (uart_prepare_sysrq_char(&up->port, ch))
goto ignore_char;
uart_insert_char(&up->port, *status, UART_LSR_OE, ch, flag);
@@ -232,7 +232,7 @@ static inline irqreturn_t serial_pxa_irq(int irq, void *dev_id)
check_modem_status(up);
if (lsr & UART_LSR_THRE)
transmit_chars(up);
- uart_port_unlock(&up->port);
+ uart_unlock_and_check_sysrq(&up->port);
return IRQ_HANDLED;
}
@@ -604,13 +604,10 @@ serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
int locked = 1;
clk_enable(up->clk);
- local_irq_save(flags);
- if (up->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&up->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&up->port, &flags);
else
- uart_port_lock(&up->port);
+ uart_port_lock_irqsave(&up->port, &flags);
/*
* First save the IER then disable the interrupts
@@ -628,10 +625,8 @@ serial_pxa_console_write(struct console *co, const char *s, unsigned int count)
serial_out(up, UART_IER, ier);
if (locked)
- uart_port_unlock(&up->port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&up->port, flags);
clk_disable(up->clk);
-
}
#ifdef CONFIG_CONSOLE_POLL
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 99e08737f293..f9f7ac1a10df 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -488,18 +488,16 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
geni_status = readl(uport->membase + SE_GENI_STATUS);
- /* Cancel the current write to log the fault */
if (!locked) {
- geni_se_cancel_m_cmd(&port->se);
- if (!qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_CANCEL_EN, true)) {
- geni_se_abort_m_cmd(&port->se);
- qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
- M_CMD_ABORT_EN, true);
- writel(M_CMD_ABORT_EN, uport->membase +
- SE_GENI_M_IRQ_CLEAR);
- }
- writel(M_CMD_CANCEL_EN, uport->membase + SE_GENI_M_IRQ_CLEAR);
+ /*
+ * We can only get here if an oops is in progress then we were
+ * unable to get the lock. This means we can't safely access
+ * our state variables like tx_remaining. About the best we
+ * can do is wait for the FIFO to be empty before we start our
+ * transfer, so we'll do that.
+ */
+ qcom_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+ M_TX_FIFO_NOT_EMPTY_EN, false);
} else if ((geni_status & M_GENI_CMD_ACTIVE) && !port->tx_remaining) {
/*
* It seems we can't interrupt existing transfers if all data
@@ -516,11 +514,12 @@ static void qcom_geni_serial_console_write(struct console *co, const char *s,
__qcom_geni_serial_console_write(uport, s, count);
- if (port->tx_remaining)
- qcom_geni_serial_setup_tx(uport, port->tx_remaining);
- if (locked)
+ if (locked) {
+ if (port->tx_remaining)
+ qcom_geni_serial_setup_tx(uport, port->tx_remaining);
uart_port_unlock_irqrestore(uport, flags);
+ }
}
static void handle_rx_console(struct uart_port *uport, u32 bytes, bool drop)
diff --git a/drivers/tty/serial/rda-uart.c b/drivers/tty/serial/rda-uart.c
index 13deb355cf1b..82def9b8632a 100644
--- a/drivers/tty/serial/rda-uart.c
+++ b/drivers/tty/serial/rda-uart.c
@@ -394,7 +394,8 @@ static void rda_uart_receive_chars(struct uart_port *port)
val &= 0xff;
port->icount.rx++;
- tty_insert_flip_char(&port->state->port, val, flag);
+ if (!uart_prepare_sysrq_char(port, val))
+ tty_insert_flip_char(&port->state->port, val, flag);
status = rda_uart_read(port, RDA_UART_STATUS);
}
@@ -405,10 +406,9 @@ static void rda_uart_receive_chars(struct uart_port *port)
static irqreturn_t rda_interrupt(int irq, void *dev_id)
{
struct uart_port *port = dev_id;
- unsigned long flags;
u32 val, irq_mask;
- uart_port_lock_irqsave(port, &flags);
+ uart_port_lock(port);
/* Clear IRQ cause */
val = rda_uart_read(port, RDA_UART_IRQ_CAUSE);
@@ -425,7 +425,7 @@ static irqreturn_t rda_interrupt(int irq, void *dev_id)
rda_uart_send_chars(port);
}
- uart_port_unlock_irqrestore(port, flags);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -590,18 +590,12 @@ static void rda_uart_port_write(struct uart_port *port, const char *s,
{
u32 old_irq_mask;
unsigned long flags;
- int locked;
-
- local_irq_save(flags);
+ int locked = 1;
- if (port->sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = uart_port_trylock(port);
- } else {
- uart_port_lock(port);
- locked = 1;
- }
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(port, &flags);
+ else
+ uart_port_lock_irqsave(port, &flags);
old_irq_mask = rda_uart_read(port, RDA_UART_IRQ_MASK);
rda_uart_write(port, 0, RDA_UART_IRQ_MASK);
@@ -615,9 +609,7 @@ static void rda_uart_port_write(struct uart_port *port, const char *s,
rda_uart_write(port, old_irq_mask, RDA_UART_IRQ_MASK);
if (locked)
- uart_port_unlock(port);
-
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(port, flags);
}
static void rda_uart_console_write(struct console *co, const char *s,
diff --git a/drivers/tty/serial/samsung_tty.c b/drivers/tty/serial/samsung_tty.c
index 71d17d804fda..a2d07e05c502 100644
--- a/drivers/tty/serial/samsung_tty.c
+++ b/drivers/tty/serial/samsung_tty.c
@@ -21,26 +21,28 @@
* BJD, 04-Nov-2004
*/
-#include <linux/dmaengine.h>
+#include <linux/console.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
-#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/math.h>
#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/serial_s3c.h>
+#include <linux/slab.h>
#include <linux/sysrq.h>
-#include <linux/console.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-#include <linux/serial_s3c.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
-#include <linux/of.h>
+#include <linux/types.h>
+
#include <asm/irq.h>
/* UART name and device definitions */
@@ -73,21 +75,21 @@ struct s3c24xx_uart_info {
enum s3c24xx_port_type type;
unsigned int port_type;
unsigned int fifosize;
- unsigned long rx_fifomask;
- unsigned long rx_fifoshift;
- unsigned long rx_fifofull;
- unsigned long tx_fifomask;
- unsigned long tx_fifoshift;
- unsigned long tx_fifofull;
- unsigned int def_clk_sel;
- unsigned long num_clks;
- unsigned long clksel_mask;
- unsigned long clksel_shift;
- unsigned long ucon_mask;
+ u32 rx_fifomask;
+ u32 rx_fifoshift;
+ u32 rx_fifofull;
+ u32 tx_fifomask;
+ u32 tx_fifoshift;
+ u32 tx_fifofull;
+ u32 clksel_mask;
+ u32 clksel_shift;
+ u32 ucon_mask;
+ u8 def_clk_sel;
+ u8 num_clks;
+ u8 iotype;
/* uart port features */
-
- unsigned int has_divslot:1;
+ bool has_divslot;
};
struct s3c24xx_serial_drv_data {
@@ -196,7 +198,7 @@ static void wr_reg(const struct uart_port *port, u32 reg, u32 val)
/* Byte-order aware bit setting/clearing functions. */
static inline void s3c24xx_set_bit(const struct uart_port *port, int idx,
- unsigned int reg)
+ u32 reg)
{
unsigned long flags;
u32 val;
@@ -209,7 +211,7 @@ static inline void s3c24xx_set_bit(const struct uart_port *port, int idx,
}
static inline void s3c24xx_clear_bit(const struct uart_port *port, int idx,
- unsigned int reg)
+ u32 reg)
{
unsigned long flags;
u32 val;
@@ -233,7 +235,7 @@ static inline const char *s3c24xx_serial_portname(const struct uart_port *port)
return to_platform_device(port->dev)->name;
}
-static int s3c24xx_serial_txempty_nofifo(const struct uart_port *port)
+static bool s3c24xx_serial_txempty_nofifo(const struct uart_port *port)
{
return rd_regl(port, S3C2410_UTRSTAT) & S3C2410_UTRSTAT_TXE;
}
@@ -242,8 +244,8 @@ static void s3c24xx_serial_rx_enable(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
- unsigned int ucon, ufcon;
int count = 10000;
+ u32 ucon, ufcon;
uart_port_lock_irqsave(port, &flags);
@@ -266,7 +268,7 @@ static void s3c24xx_serial_rx_disable(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
- unsigned int ucon;
+ u32 ucon;
uart_port_lock_irqsave(port, &flags);
@@ -587,8 +589,8 @@ static inline const struct s3c2410_uartcfg
return ourport->cfg;
}
-static int s3c24xx_serial_rx_fifocnt(const struct s3c24xx_uart_port *ourport,
- unsigned long ufstat)
+static unsigned int
+s3c24xx_serial_rx_fifocnt(const struct s3c24xx_uart_port *ourport, u32 ufstat)
{
const struct s3c24xx_uart_info *info = ourport->info;
@@ -660,7 +662,7 @@ static void s3c64xx_start_rx_dma(struct s3c24xx_uart_port *ourport)
static void enable_rx_dma(struct s3c24xx_uart_port *ourport)
{
struct uart_port *port = &ourport->port;
- unsigned int ucon;
+ u32 ucon;
/* set Rx mode to DMA mode */
ucon = rd_regl(port, S3C2410_UCON);
@@ -683,7 +685,7 @@ static void enable_rx_dma(struct s3c24xx_uart_port *ourport)
static void enable_rx_pio(struct s3c24xx_uart_port *ourport)
{
struct uart_port *port = &ourport->port;
- unsigned int ucon;
+ u32 ucon;
/* set Rx mode to DMA mode */
ucon = rd_regl(port, S3C2410_UCON);
@@ -708,13 +710,14 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport);
static irqreturn_t s3c24xx_serial_rx_chars_dma(void *dev_id)
{
- unsigned int utrstat, received;
struct s3c24xx_uart_port *ourport = dev_id;
struct uart_port *port = &ourport->port;
struct s3c24xx_uart_dma *dma = ourport->dma;
struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port);
struct tty_port *t = &port->state->port;
struct dma_tx_state state;
+ unsigned int received;
+ u32 utrstat;
utrstat = rd_regl(port, S3C2410_UTRSTAT);
rd_regl(port, S3C2410_UFSTAT);
@@ -756,9 +759,9 @@ finish:
static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
{
struct uart_port *port = &ourport->port;
- unsigned int ufcon, ufstat, uerstat;
+ unsigned int max_count = port->fifosize;
unsigned int fifocnt = 0;
- int max_count = port->fifosize;
+ u32 ufcon, ufstat, uerstat;
u8 ch, flag;
while (max_count-- > 0) {
@@ -778,7 +781,7 @@ static void s3c24xx_serial_rx_drain_fifo(struct s3c24xx_uart_port *ourport)
ch = rd_reg(port, S3C2410_URXH);
if (port->flags & UPF_CONS_FLOW) {
- int txe = s3c24xx_serial_txempty_nofifo(port);
+ bool txe = s3c24xx_serial_txempty_nofifo(port);
if (ourport->rx_enabled) {
if (!txe) {
@@ -942,7 +945,7 @@ static irqreturn_t s3c64xx_serial_handle_irq(int irq, void *id)
{
const struct s3c24xx_uart_port *ourport = id;
const struct uart_port *port = &ourport->port;
- unsigned int pend = rd_regl(port, S3C64XX_UINTP);
+ u32 pend = rd_regl(port, S3C64XX_UINTP);
irqreturn_t ret = IRQ_HANDLED;
if (pend & S3C64XX_UINTM_RXD_MSK) {
@@ -961,7 +964,7 @@ static irqreturn_t apple_serial_handle_irq(int irq, void *id)
{
const struct s3c24xx_uart_port *ourport = id;
const struct uart_port *port = &ourport->port;
- unsigned int pend = rd_regl(port, S3C2410_UTRSTAT);
+ u32 pend = rd_regl(port, S3C2410_UTRSTAT);
irqreturn_t ret = IRQ_NONE;
if (pend & (APPLE_S5L_UTRSTAT_RXTHRESH | APPLE_S5L_UTRSTAT_RXTO)) {
@@ -980,24 +983,23 @@ static irqreturn_t apple_serial_handle_irq(int irq, void *id)
static unsigned int s3c24xx_serial_tx_empty(struct uart_port *port)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
- unsigned long ufstat = rd_regl(port, S3C2410_UFSTAT);
- unsigned long ufcon = rd_regl(port, S3C2410_UFCON);
+ u32 ufstat = rd_regl(port, S3C2410_UFSTAT);
+ u32 ufcon = rd_regl(port, S3C2410_UFCON);
if (ufcon & S3C2410_UFCON_FIFOMODE) {
- if ((ufstat & info->tx_fifomask) != 0 ||
+ if ((ufstat & info->tx_fifomask) ||
(ufstat & info->tx_fifofull))
return 0;
-
- return 1;
+ return TIOCSER_TEMT;
}
- return s3c24xx_serial_txempty_nofifo(port);
+ return s3c24xx_serial_txempty_nofifo(port) ? TIOCSER_TEMT : 0;
}
/* no modem control lines */
static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
{
- unsigned int umstat = rd_reg(port, S3C2410_UMSTAT);
+ u32 umstat = rd_reg(port, S3C2410_UMSTAT);
if (umstat & S3C2410_UMSTAT_CTS)
return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
@@ -1007,8 +1009,8 @@ static unsigned int s3c24xx_serial_get_mctrl(struct uart_port *port)
static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- unsigned int umcon = rd_regl(port, S3C2410_UMCON);
- unsigned int ucon = rd_regl(port, S3C2410_UCON);
+ u32 umcon = rd_regl(port, S3C2410_UMCON);
+ u32 ucon = rd_regl(port, S3C2410_UCON);
if (mctrl & TIOCM_RTS)
umcon |= S3C2410_UMCOM_RTS_LOW;
@@ -1028,7 +1030,7 @@ static void s3c24xx_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void s3c24xx_serial_break_ctl(struct uart_port *port, int break_state)
{
unsigned long flags;
- unsigned int ucon;
+ u32 ucon;
uart_port_lock_irqsave(port, &flags);
@@ -1186,7 +1188,7 @@ static void apple_s5l_serial_shutdown(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
- unsigned int ucon;
+ u32 ucon;
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK |
@@ -1212,7 +1214,7 @@ static int s3c64xx_serial_startup(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
- unsigned int ufcon;
+ u32 ufcon;
int ret;
wr_regl(port, S3C64XX_UINTM, 0xf);
@@ -1257,7 +1259,7 @@ static int apple_s5l_serial_startup(struct uart_port *port)
{
struct s3c24xx_uart_port *ourport = to_ourport(port);
unsigned long flags;
- unsigned int ufcon;
+ u32 ufcon;
int ret;
wr_regl(port, S3C2410_UTRSTAT, APPLE_S5L_UTRSTAT_ALL_FLAGS);
@@ -1292,8 +1294,6 @@ static int apple_s5l_serial_startup(struct uart_port *port)
return ret;
}
-/* power power management control */
-
static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
unsigned int old)
{
@@ -1339,10 +1339,10 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
#define MAX_CLK_NAME_LENGTH 15
-static inline int s3c24xx_serial_getsource(struct uart_port *port)
+static inline u8 s3c24xx_serial_getsource(struct uart_port *port)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
- unsigned int ucon;
+ u32 ucon;
if (info->num_clks == 1)
return 0;
@@ -1352,11 +1352,10 @@ static inline int s3c24xx_serial_getsource(struct uart_port *port)
return ucon >> info->clksel_shift;
}
-static void s3c24xx_serial_setsource(struct uart_port *port,
- unsigned int clk_sel)
+static void s3c24xx_serial_setsource(struct uart_port *port, u8 clk_sel)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
- unsigned int ucon;
+ u32 ucon;
if (info->num_clks == 1)
return;
@@ -1372,14 +1371,15 @@ static void s3c24xx_serial_setsource(struct uart_port *port,
static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
unsigned int req_baud, struct clk **best_clk,
- unsigned int *clk_num)
+ u8 *clk_num)
{
const struct s3c24xx_uart_info *info = ourport->info;
struct clk *clk;
unsigned long rate;
- unsigned int cnt, baud, quot, best_quot = 0;
+ unsigned int baud, quot, best_quot = 0;
char clkname[MAX_CLK_NAME_LENGTH];
int calc_deviation, deviation = (1 << 30) - 1;
+ u8 cnt;
for (cnt = 0; cnt < info->num_clks; cnt++) {
/* Keep selected clock if provided */
@@ -1472,10 +1472,10 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
struct s3c24xx_uart_port *ourport = to_ourport(port);
struct clk *clk = ERR_PTR(-EINVAL);
unsigned long flags;
- unsigned int baud, quot, clk_sel = 0;
- unsigned int ulcon;
- unsigned int umcon;
+ unsigned int baud, quot;
unsigned int udivslot = 0;
+ u32 ulcon, umcon;
+ u8 clk_sel = 0;
/*
* We don't support modem control lines.
@@ -1737,12 +1737,12 @@ static struct uart_driver s3c24xx_uart_drv = {
static struct s3c24xx_uart_port s3c24xx_serial_ports[UART_NR];
-static void s3c24xx_serial_init_port_default(int index) {
+static void s3c24xx_serial_init_port_default(int index)
+{
struct uart_port *port = &s3c24xx_serial_ports[index].port;
spin_lock_init(&port->lock);
- port->iotype = UPIO_MEM;
port->uartclk = 0;
port->fifosize = 16;
port->flags = UPF_BOOT_AUTOCONF;
@@ -1758,7 +1758,7 @@ static void s3c24xx_serial_resetport(struct uart_port *port,
const struct s3c2410_uartcfg *cfg)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
+ u32 ucon = rd_regl(port, S3C2410_UCON);
ucon &= (info->clksel_mask | info->ucon_mask);
wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
@@ -1776,10 +1776,9 @@ static int s3c24xx_serial_enable_baudclk(struct s3c24xx_uart_port *ourport)
struct device *dev = ourport->port.dev;
const struct s3c24xx_uart_info *info = ourport->info;
char clk_name[MAX_CLK_NAME_LENGTH];
- unsigned int clk_sel;
struct clk *clk;
- int clk_num;
int ret;
+ u8 clk_sel, clk_num;
clk_sel = ourport->cfg->clk_sel ? : info->def_clk_sel;
for (clk_num = 0; clk_num < info->num_clks; clk_num++) {
@@ -1904,7 +1903,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
wr_regl(port, S3C64XX_UINTSP, 0xf);
break;
case TYPE_APPLE_S5L: {
- unsigned int ucon;
+ u32 ucon;
ucon = rd_regl(port, S3C2410_UCON);
ucon &= ~(APPLE_S5L_UCON_TXTHRESH_ENA_MSK |
@@ -1952,7 +1951,7 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct s3c24xx_uart_port *ourport;
int index = probe_index;
- int ret, prop = 0;
+ int ret, prop = 0, fifosize_prop = 1;
if (np) {
ret = of_alias_get_id(np, "serial");
@@ -1989,9 +1988,11 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
break;
}
+ ourport->port.iotype = ourport->info->iotype;
+
if (np) {
- of_property_read_u32(np,
- "samsung,uart-fifosize", &ourport->port.fifosize);
+ fifosize_prop = of_property_read_u32(np, "samsung,uart-fifosize",
+ &ourport->port.fifosize);
if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
switch (prop) {
@@ -2009,10 +2010,13 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
}
}
- if (ourport->drv_data->fifosize[index])
- ourport->port.fifosize = ourport->drv_data->fifosize[index];
- else if (ourport->info->fifosize)
- ourport->port.fifosize = ourport->info->fifosize;
+ if (fifosize_prop) {
+ if (ourport->drv_data->fifosize[index])
+ ourport->port.fifosize = ourport->drv_data->fifosize[index];
+ else if (ourport->info->fifosize)
+ ourport->port.fifosize = ourport->info->fifosize;
+ }
+
ourport->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_SAMSUNG_CONSOLE);
/*
@@ -2058,9 +2062,8 @@ static void s3c24xx_serial_remove(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
- if (port) {
+ if (port)
uart_remove_one_port(&s3c24xx_uart_drv, port);
- }
uart_unregister_driver(&s3c24xx_uart_drv);
}
@@ -2106,7 +2109,7 @@ static int s3c24xx_serial_resume_noirq(struct device *dev)
/* restore IRQ mask */
switch (ourport->info->type) {
case TYPE_S3C6400: {
- unsigned int uintm = 0xf;
+ u32 uintm = 0xf;
if (ourport->tx_enabled)
uintm &= ~S3C64XX_UINTM_TXD_MSK;
@@ -2122,7 +2125,7 @@ static int s3c24xx_serial_resume_noirq(struct device *dev)
break;
}
case TYPE_APPLE_S5L: {
- unsigned int ucon;
+ u32 ucon;
int ret;
ret = clk_prepare_enable(ourport->clk);
@@ -2183,27 +2186,27 @@ static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
static struct uart_port *cons_uart;
-static int
-s3c24xx_serial_console_txrdy(struct uart_port *port, unsigned int ufcon)
+static bool
+s3c24xx_serial_console_txrdy(struct uart_port *port, u32 ufcon)
{
const struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
- unsigned long ufstat, utrstat;
+ u32 ufstat, utrstat;
if (ufcon & S3C2410_UFCON_FIFOMODE) {
/* fifo mode - check amount of data in fifo registers... */
ufstat = rd_regl(port, S3C2410_UFSTAT);
- return (ufstat & info->tx_fifofull) ? 0 : 1;
+ return !(ufstat & info->tx_fifofull);
}
/* in non-fifo mode, we go and use the tx buffer empty */
utrstat = rd_regl(port, S3C2410_UTRSTAT);
- return (utrstat & S3C2410_UTRSTAT_TXE) ? 1 : 0;
+ return utrstat & S3C2410_UTRSTAT_TXE;
}
static bool
-s3c24xx_port_configured(unsigned int ucon)
+s3c24xx_port_configured(u32 ucon)
{
/* consider the serial port configured if the tx/rx mode set */
return (ucon & 0xf) != 0;
@@ -2218,7 +2221,7 @@ s3c24xx_port_configured(unsigned int ucon)
static int s3c24xx_serial_get_poll_char(struct uart_port *port)
{
const struct s3c24xx_uart_port *ourport = to_ourport(port);
- unsigned int ufstat;
+ u32 ufstat;
ufstat = rd_regl(port, S3C2410_UFSTAT);
if (s3c24xx_serial_rx_fifocnt(ourport, ufstat) == 0)
@@ -2230,8 +2233,8 @@ static int s3c24xx_serial_get_poll_char(struct uart_port *port)
static void s3c24xx_serial_put_poll_char(struct uart_port *port,
unsigned char c)
{
- unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
- unsigned int ucon = rd_regl(port, S3C2410_UCON);
+ u32 ufcon = rd_regl(port, S3C2410_UFCON);
+ u32 ucon = rd_regl(port, S3C2410_UCON);
/* not possible to xmit on unconfigured port */
if (!s3c24xx_port_configured(ucon))
@@ -2247,7 +2250,7 @@ static void s3c24xx_serial_put_poll_char(struct uart_port *port,
static void
s3c24xx_serial_console_putchar(struct uart_port *port, unsigned char ch)
{
- unsigned int ufcon = rd_regl(port, S3C2410_UFCON);
+ u32 ufcon = rd_regl(port, S3C2410_UFCON);
while (!s3c24xx_serial_console_txrdy(port, ufcon))
cpu_relax();
@@ -2258,7 +2261,7 @@ static void
s3c24xx_serial_console_write(struct console *co, const char *s,
unsigned int count)
{
- unsigned int ucon = rd_regl(cons_uart, S3C2410_UCON);
+ u32 ucon = rd_regl(cons_uart, S3C2410_UCON);
unsigned long flags;
bool locked = true;
@@ -2285,12 +2288,10 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
struct clk *clk;
- unsigned int ulcon;
- unsigned int ucon;
- unsigned int ubrdiv;
unsigned long rate;
- unsigned int clk_sel;
+ u32 ulcon, ucon, ubrdiv;
char clk_name[MAX_CLK_NAME_LENGTH];
+ u8 clk_sel;
ulcon = rd_regl(port, S3C2410_ULCON);
ucon = rd_regl(port, S3C2410_UCON);
@@ -2399,8 +2400,9 @@ static const struct s3c24xx_serial_drv_data s3c6400_serial_drv_data = {
.name = "Samsung S3C6400 UART",
.type = TYPE_S3C6400,
.port_type = PORT_S3C6400,
+ .iotype = UPIO_MEM,
.fifosize = 64,
- .has_divslot = 1,
+ .has_divslot = true,
.rx_fifomask = S3C2440_UFSTAT_RXMASK,
.rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
.rx_fifofull = S3C2440_UFSTAT_RXFULL,
@@ -2428,7 +2430,8 @@ static const struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
.name = "Samsung S5PV210 UART",
.type = TYPE_S3C6400,
.port_type = PORT_S3C6400,
- .has_divslot = 1,
+ .iotype = UPIO_MEM,
+ .has_divslot = true,
.rx_fifomask = S5PV210_UFSTAT_RXMASK,
.rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
.rx_fifofull = S5PV210_UFSTAT_RXFULL,
@@ -2452,12 +2455,13 @@ static const struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
#endif
#if defined(CONFIG_ARCH_EXYNOS)
-#define EXYNOS_COMMON_SERIAL_DRV_DATA() \
+#define EXYNOS_COMMON_SERIAL_DRV_DATA \
.info = { \
.name = "Samsung Exynos UART", \
.type = TYPE_S3C6400, \
.port_type = PORT_S3C6400, \
- .has_divslot = 1, \
+ .iotype = UPIO_MEM, \
+ .has_divslot = true, \
.rx_fifomask = S5PV210_UFSTAT_RXMASK, \
.rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \
.rx_fifofull = S5PV210_UFSTAT_RXFULL, \
@@ -2476,39 +2480,57 @@ static const struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
} \
static const struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(),
+ EXYNOS_COMMON_SERIAL_DRV_DATA,
.fifosize = { 256, 64, 16, 16 },
};
static const struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(),
+ EXYNOS_COMMON_SERIAL_DRV_DATA,
.fifosize = { 64, 256, 16, 256 },
};
static const struct s3c24xx_serial_drv_data exynos850_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(),
+ EXYNOS_COMMON_SERIAL_DRV_DATA,
.fifosize = { 256, 64, 64, 64 },
};
-/*
- * Common drv_data struct for platforms that specify samsung,uart-fifosize in
- * device tree.
- */
-static const struct s3c24xx_serial_drv_data exynos_fifoszdt_serial_drv_data = {
- EXYNOS_COMMON_SERIAL_DRV_DATA(),
+static const struct s3c24xx_serial_drv_data gs101_serial_drv_data = {
+ .info = {
+ .name = "Google GS101 UART",
+ .type = TYPE_S3C6400,
+ .port_type = PORT_S3C6400,
+ .iotype = UPIO_MEM32,
+ .has_divslot = true,
+ .rx_fifomask = S5PV210_UFSTAT_RXMASK,
+ .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
+ .rx_fifofull = S5PV210_UFSTAT_RXFULL,
+ .tx_fifofull = S5PV210_UFSTAT_TXFULL,
+ .tx_fifomask = S5PV210_UFSTAT_TXMASK,
+ .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL0,
+ .num_clks = 1,
+ .clksel_mask = 0,
+ .clksel_shift = 0,
+ },
+ .def_cfg = {
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ .has_fracval = 1,
+ },
+ /* samsung,uart-fifosize must be specified in the device tree. */
.fifosize = { 0 },
};
#define EXYNOS4210_SERIAL_DRV_DATA (&exynos4210_serial_drv_data)
#define EXYNOS5433_SERIAL_DRV_DATA (&exynos5433_serial_drv_data)
#define EXYNOS850_SERIAL_DRV_DATA (&exynos850_serial_drv_data)
-#define EXYNOS_FIFOSZDT_DRV_DATA (&exynos_fifoszdt_serial_drv_data)
+#define GS101_SERIAL_DRV_DATA (&gs101_serial_drv_data)
#else
#define EXYNOS4210_SERIAL_DRV_DATA NULL
#define EXYNOS5433_SERIAL_DRV_DATA NULL
#define EXYNOS850_SERIAL_DRV_DATA NULL
-#define EXYNOS_FIFOSZDT_DRV_DATA NULL
+#define GS101_SERIAL_DRV_DATA NULL
#endif
#ifdef CONFIG_ARCH_APPLE
@@ -2517,6 +2539,7 @@ static const struct s3c24xx_serial_drv_data s5l_serial_drv_data = {
.name = "Apple S5L UART",
.type = TYPE_APPLE_S5L,
.port_type = PORT_8250,
+ .iotype = UPIO_MEM,
.fifosize = 16,
.rx_fifomask = S3C2410_UFSTAT_RXMASK,
.rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
@@ -2546,8 +2569,9 @@ static const struct s3c24xx_serial_drv_data artpec8_serial_drv_data = {
.name = "Axis ARTPEC-8 UART",
.type = TYPE_S3C6400,
.port_type = PORT_S3C6400,
+ .iotype = UPIO_MEM,
.fifosize = 64,
- .has_divslot = 1,
+ .has_divslot = true,
.rx_fifomask = S5PV210_UFSTAT_RXMASK,
.rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
.rx_fifofull = S5PV210_UFSTAT_RXFULL,
@@ -2594,7 +2618,7 @@ static const struct platform_device_id s3c24xx_serial_driver_ids[] = {
.driver_data = (kernel_ulong_t)ARTPEC8_SERIAL_DRV_DATA,
}, {
.name = "gs101-uart",
- .driver_data = (kernel_ulong_t)EXYNOS_FIFOSZDT_DRV_DATA,
+ .driver_data = (kernel_ulong_t)GS101_SERIAL_DRV_DATA,
},
{ },
};
@@ -2617,7 +2641,7 @@ static const struct of_device_id s3c24xx_uart_dt_match[] = {
{ .compatible = "axis,artpec8-uart",
.data = ARTPEC8_SERIAL_DRV_DATA },
{ .compatible = "google,gs101-uart",
- .data = EXYNOS_FIFOSZDT_DRV_DATA },
+ .data = GS101_SERIAL_DRV_DATA },
{},
};
MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
@@ -2716,7 +2740,8 @@ static int samsung_early_read(struct console *con, char *s, unsigned int n)
{
struct earlycon_device *dev = con->data;
const struct samsung_early_console_data *data = dev->port.private_data;
- int ch, ufstat, num_read = 0;
+ int num_read = 0;
+ u32 ch, ufstat;
while (num_read < n) {
ufstat = rd_regl(&dev->port, S3C2410_UFSTAT);
@@ -2785,6 +2810,17 @@ OF_EARLYCON_DECLARE(exynos4210, "samsung,exynos4210-uart",
OF_EARLYCON_DECLARE(artpec8, "axis,artpec8-uart",
s5pv210_early_console_setup);
+static int __init gs101_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ /* gs101 always expects MMIO32 register accesses. */
+ device->port.iotype = UPIO_MEM32;
+
+ return s5pv210_early_console_setup(device, opt);
+}
+
+OF_EARLYCON_DECLARE(gs101, "google,gs101-uart", gs101_early_console_setup);
+
/* Apple S5L */
static int __init apple_s5l_early_console_setup(struct earlycon_device *device,
const char *opt)
diff --git a/drivers/tty/serial/serial_base_bus.c b/drivers/tty/serial/serial_base_bus.c
index 3dfcf20c4eb6..4df2a4b10445 100644
--- a/drivers/tty/serial/serial_base_bus.c
+++ b/drivers/tty/serial/serial_base_bus.c
@@ -41,7 +41,7 @@ static int serial_base_match(struct device *dev, struct device_driver *drv)
return 0;
}
-static struct bus_type serial_base_bus_type = {
+static const struct bus_type serial_base_bus_type = {
.name = "serial-base",
.match = serial_base_match,
};
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index d6a58a9e072a..ff85ebd3a007 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2608,7 +2608,12 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
port->type = PORT_UNKNOWN;
flags |= UART_CONFIG_TYPE;
}
+ /* Synchronize with possible boot console. */
+ if (uart_console(port))
+ console_lock();
port->ops->config_port(port, flags);
+ if (uart_console(port))
+ console_unlock();
}
if (port->type != PORT_UNKNOWN) {
@@ -2616,6 +2621,10 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
uart_report_port(drv, port);
+ /* Synchronize with possible boot console. */
+ if (uart_console(port))
+ console_lock();
+
/* Power up port for set_mctrl() */
uart_change_pm(state, UART_PM_STATE_ON);
@@ -2632,6 +2641,9 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
uart_rs485_config(port);
+ if (uart_console(port))
+ console_unlock();
+
/*
* If this driver supports console, and it hasn't been
* successfully registered yet, try to re-register it.
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index 72b6f4f326e2..22b9eeb23e68 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -8,7 +8,10 @@
#include <linux/device.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/property.h>
#include <linux/serial_core.h>
#include <linux/spinlock.h>
@@ -105,6 +108,148 @@ void uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
}
EXPORT_SYMBOL(uart_remove_one_port);
+/**
+ * __uart_read_properties - read firmware properties of the given UART port
+ * @port: corresponding port
+ * @use_defaults: apply defaults (when %true) or validate the values (when %false)
+ *
+ * The following device properties are supported:
+ * - clock-frequency (optional)
+ * - fifo-size (optional)
+ * - no-loopback-test (optional)
+ * - reg-shift (defaults may apply)
+ * - reg-offset (value may be validated)
+ * - reg-io-width (defaults may apply or value may be validated)
+ * - interrupts (OF only)
+ * - serial [alias ID] (OF only)
+ *
+ * If the port->dev is of struct platform_device type the interrupt line
+ * will be retrieved via platform_get_irq() call against that device.
+ * Otherwise it will be assigned by fwnode_irq_get() call. In both cases
+ * the index 0 of the resource is used.
+ *
+ * The caller is responsible to initialize the following fields of the @port
+ * ->dev (must be valid)
+ * ->flags
+ * ->mapbase
+ * ->mapsize
+ * ->regshift (if @use_defaults is false)
+ * before calling this function. Alternatively the above mentioned fields
+ * may be zeroed, in such case the only ones, that have associated properties
+ * found, will be set to the respective values.
+ *
+ * If no error happened, the ->irq, ->mapbase, ->mapsize will be altered.
+ * The ->iotype is always altered.
+ *
+ * When @use_defaults is true and the respective property is not found
+ * the following values will be applied:
+ * ->regshift = 0
+ * In this case IRQ must be provided, otherwise an error will be returned.
+ *
+ * When @use_defaults is false and the respective property is found
+ * the following values will be validated:
+ * - reg-io-width (->iotype)
+ * - reg-offset (->mapsize against ->mapbase)
+ *
+ * Returns: 0 on success or negative errno on failure
+ */
+static int __uart_read_properties(struct uart_port *port, bool use_defaults)
+{
+ struct device *dev = port->dev;
+ u32 value;
+ int ret;
+
+ /* Read optional UART functional clock frequency */
+ device_property_read_u32(dev, "clock-frequency", &port->uartclk);
+
+ /* Read the registers alignment (default: 8-bit) */
+ ret = device_property_read_u32(dev, "reg-shift", &value);
+ if (ret)
+ port->regshift = use_defaults ? 0 : port->regshift;
+ else
+ port->regshift = value;
+
+ /* Read the registers I/O access type (default: MMIO 8-bit) */
+ ret = device_property_read_u32(dev, "reg-io-width", &value);
+ if (ret) {
+ port->iotype = UPIO_MEM;
+ } else {
+ switch (value) {
+ case 1:
+ port->iotype = UPIO_MEM;
+ break;
+ case 2:
+ port->iotype = UPIO_MEM16;
+ break;
+ case 4:
+ port->iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32;
+ break;
+ default:
+ if (!use_defaults) {
+ dev_err(dev, "Unsupported reg-io-width (%u)\n", value);
+ return -EINVAL;
+ }
+ port->iotype = UPIO_UNKNOWN;
+ break;
+ }
+ }
+
+ /* Read the address mapping base offset (default: no offset) */
+ ret = device_property_read_u32(dev, "reg-offset", &value);
+ if (ret)
+ value = 0;
+
+ /* Check for shifted address mapping overflow */
+ if (!use_defaults && port->mapsize < value) {
+ dev_err(dev, "reg-offset %u exceeds region size %pa\n", value, &port->mapsize);
+ return -EINVAL;
+ }
+
+ port->mapbase += value;
+ port->mapsize -= value;
+
+ /* Read optional FIFO size */
+ device_property_read_u32(dev, "fifo-size", &port->fifosize);
+
+ if (device_property_read_bool(dev, "no-loopback-test"))
+ port->flags |= UPF_SKIP_TEST;
+
+ /* Get index of serial line, if found in DT aliases */
+ ret = of_alias_get_id(dev_of_node(dev), "serial");
+ if (ret >= 0)
+ port->line = ret;
+
+ if (dev_is_platform(dev))
+ ret = platform_get_irq(to_platform_device(dev), 0);
+ else
+ ret = fwnode_irq_get(dev_fwnode(dev), 0);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ if (ret > 0)
+ port->irq = ret;
+ else if (use_defaults)
+ /* By default IRQ support is mandatory */
+ return ret;
+ else
+ port->irq = 0;
+
+ port->flags |= UPF_SHARE_IRQ;
+
+ return 0;
+}
+
+int uart_read_port_properties(struct uart_port *port)
+{
+ return __uart_read_properties(port, true);
+}
+EXPORT_SYMBOL_GPL(uart_read_port_properties);
+
+int uart_read_and_validate_port_properties(struct uart_port *port)
+{
+ return __uart_read_properties(port, false);
+}
+EXPORT_SYMBOL_GPL(uart_read_and_validate_port_properties);
+
static struct device_driver serial_port_driver = {
.name = "port",
.suppress_bind_attrs = true,
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index e1897894a4ef..abba39722958 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -23,9 +23,10 @@
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
-
#include <linux/io.h>
+#include <asm/txx9/generic.h>
+
#define PASS_LIMIT 256
#if !defined(CONFIG_SERIAL_TXX9_STDSERIAL)
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index a85e7b9a2e49..e512eaa57ed5 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -576,13 +576,13 @@ static void sci_start_tx(struct uart_port *port)
#ifdef CONFIG_SERIAL_SH_SCI_DMA
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
- u16 new, scr = serial_port_in(port, SCSCR);
+ u16 new, scr = sci_serial_in(port, SCSCR);
if (s->chan_tx)
new = scr | SCSCR_TDRQE;
else
new = scr & ~SCSCR_TDRQE;
if (new != scr)
- serial_port_out(port, SCSCR, new);
+ sci_serial_out(port, SCSCR, new);
}
if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
@@ -599,7 +599,7 @@ static void sci_start_tx(struct uart_port *port)
if (!s->chan_tx || s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE ||
port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
/* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
/*
* For SCI, TE (transmit enable) must be set after setting TIE
@@ -609,7 +609,7 @@ static void sci_start_tx(struct uart_port *port)
if (port->type == PORT_SCI)
ctrl |= SCSCR_TE;
- serial_port_out(port, SCSCR, ctrl | SCSCR_TIE);
+ sci_serial_out(port, SCSCR, ctrl | SCSCR_TIE);
}
}
@@ -618,14 +618,14 @@ static void sci_stop_tx(struct uart_port *port)
unsigned short ctrl;
/* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~SCSCR_TDRQE;
ctrl &= ~SCSCR_TIE;
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
if (to_sci_port(port)->chan_tx &&
@@ -640,41 +640,40 @@ static void sci_start_rx(struct uart_port *port)
{
unsigned short ctrl;
- ctrl = serial_port_in(port, SCSCR) | port_rx_irq_mask(port);
+ ctrl = sci_serial_in(port, SCSCR) | port_rx_irq_mask(port);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~SCSCR_RDRQE;
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
}
static void sci_stop_rx(struct uart_port *port)
{
unsigned short ctrl;
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
ctrl &= ~SCSCR_RDRQE;
ctrl &= ~port_rx_irq_mask(port);
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
}
static void sci_clear_SCxSR(struct uart_port *port, unsigned int mask)
{
if (port->type == PORT_SCI) {
/* Just store the mask */
- serial_port_out(port, SCxSR, mask);
+ sci_serial_out(port, SCxSR, mask);
} else if (to_sci_port(port)->params->overrun_mask == SCIFA_ORER) {
/* SCIFA/SCIFB and SCIF on SH7705/SH7720/SH7721 */
/* Only clear the status bits we want to clear */
- serial_port_out(port, SCxSR,
- serial_port_in(port, SCxSR) & mask);
+ sci_serial_out(port, SCxSR, sci_serial_in(port, SCxSR) & mask);
} else {
/* Store the mask, clear parity/framing errors */
- serial_port_out(port, SCxSR, mask & ~(SCIF_FERC | SCIF_PERC));
+ sci_serial_out(port, SCxSR, mask & ~(SCIF_FERC | SCIF_PERC));
}
}
@@ -688,7 +687,7 @@ static int sci_poll_get_char(struct uart_port *port)
int c;
do {
- status = serial_port_in(port, SCxSR);
+ status = sci_serial_in(port, SCxSR);
if (status & SCxSR_ERRORS(port)) {
sci_clear_SCxSR(port, SCxSR_ERROR_CLEAR(port));
continue;
@@ -699,10 +698,10 @@ static int sci_poll_get_char(struct uart_port *port)
if (!(status & SCxSR_RDxF(port)))
return NO_POLL_CHAR;
- c = serial_port_in(port, SCxRDR);
+ c = sci_serial_in(port, SCxRDR);
/* Dummy read */
- serial_port_in(port, SCxSR);
+ sci_serial_in(port, SCxSR);
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
return c;
@@ -714,10 +713,10 @@ static void sci_poll_put_char(struct uart_port *port, unsigned char c)
unsigned short status;
do {
- status = serial_port_in(port, SCxSR);
+ status = sci_serial_in(port, SCxSR);
} while (!(status & SCxSR_TDxE(port)));
- serial_port_out(port, SCxTDR, c);
+ sci_serial_out(port, SCxTDR, c);
sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port) & ~SCxSR_TEND(port));
}
#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_SH_SCI_CONSOLE ||
@@ -736,8 +735,8 @@ static void sci_init_pins(struct uart_port *port, unsigned int cflag)
}
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
- u16 data = serial_port_in(port, SCPDR);
- u16 ctrl = serial_port_in(port, SCPCR);
+ u16 data = sci_serial_in(port, SCPDR);
+ u16 ctrl = sci_serial_in(port, SCPCR);
/* Enable RXD and TXD pin functions */
ctrl &= ~(SCPCR_RXDC | SCPCR_TXDC);
@@ -756,10 +755,10 @@ static void sci_init_pins(struct uart_port *port, unsigned int cflag)
/* Enable CTS# pin function */
ctrl &= ~SCPCR_CTSC;
}
- serial_port_out(port, SCPDR, data);
- serial_port_out(port, SCPCR, ctrl);
+ sci_serial_out(port, SCPDR, data);
+ sci_serial_out(port, SCPCR, ctrl);
} else if (sci_getreg(port, SCSPTR)->size) {
- u16 status = serial_port_in(port, SCSPTR);
+ u16 status = sci_serial_in(port, SCSPTR);
/* RTS# is always output; and active low, unless autorts */
status |= SCSPTR_RTSIO;
@@ -769,7 +768,7 @@ static void sci_init_pins(struct uart_port *port, unsigned int cflag)
status &= ~SCSPTR_RTSDT;
/* CTS# and SCK are inputs */
status &= ~(SCSPTR_CTSIO | SCSPTR_SCKIO);
- serial_port_out(port, SCSPTR, status);
+ sci_serial_out(port, SCSPTR, status);
}
}
@@ -781,13 +780,13 @@ static int sci_txfill(struct uart_port *port)
reg = sci_getreg(port, SCTFDR);
if (reg->size)
- return serial_port_in(port, SCTFDR) & fifo_mask;
+ return sci_serial_in(port, SCTFDR) & fifo_mask;
reg = sci_getreg(port, SCFDR);
if (reg->size)
- return serial_port_in(port, SCFDR) >> 8;
+ return sci_serial_in(port, SCFDR) >> 8;
- return !(serial_port_in(port, SCxSR) & SCI_TDRE);
+ return !(sci_serial_in(port, SCxSR) & SCI_TDRE);
}
static int sci_txroom(struct uart_port *port)
@@ -803,13 +802,13 @@ static int sci_rxfill(struct uart_port *port)
reg = sci_getreg(port, SCRFDR);
if (reg->size)
- return serial_port_in(port, SCRFDR) & fifo_mask;
+ return sci_serial_in(port, SCRFDR) & fifo_mask;
reg = sci_getreg(port, SCFDR);
if (reg->size)
- return serial_port_in(port, SCFDR) & fifo_mask;
+ return sci_serial_in(port, SCFDR) & fifo_mask;
- return (serial_port_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
+ return (sci_serial_in(port, SCxSR) & SCxSR_RDxF(port)) != 0;
}
/* ********************************************************************** *
@@ -824,14 +823,14 @@ static void sci_transmit_chars(struct uart_port *port)
unsigned short ctrl;
int count;
- status = serial_port_in(port, SCxSR);
+ status = sci_serial_in(port, SCxSR);
if (!(status & SCxSR_TDxE(port))) {
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
if (uart_circ_empty(xmit))
ctrl &= ~SCSCR_TIE;
else
ctrl |= SCSCR_TIE;
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
return;
}
@@ -847,15 +846,15 @@ static void sci_transmit_chars(struct uart_port *port)
c = xmit->buf[xmit->tail];
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
} else if (port->type == PORT_SCI && uart_circ_empty(xmit)) {
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
ctrl &= ~SCSCR_TE;
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
return;
} else {
break;
}
- serial_port_out(port, SCxTDR, c);
+ sci_serial_out(port, SCxTDR, c);
port->icount.tx++;
} while (--count > 0);
@@ -866,10 +865,10 @@ static void sci_transmit_chars(struct uart_port *port)
uart_write_wakeup(port);
if (uart_circ_empty(xmit)) {
if (port->type == PORT_SCI) {
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
ctrl &= ~SCSCR_TIE;
ctrl |= SCSCR_TEIE;
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
}
sci_stop_tx(port);
@@ -883,7 +882,7 @@ static void sci_receive_chars(struct uart_port *port)
unsigned short status;
unsigned char flag;
- status = serial_port_in(port, SCxSR);
+ status = sci_serial_in(port, SCxSR);
if (!(status & SCxSR_RDxF(port)))
return;
@@ -896,7 +895,7 @@ static void sci_receive_chars(struct uart_port *port)
break;
if (port->type == PORT_SCI) {
- char c = serial_port_in(port, SCxRDR);
+ char c = sci_serial_in(port, SCxRDR);
if (uart_handle_sysrq_char(port, c))
count = 0;
else
@@ -907,11 +906,11 @@ static void sci_receive_chars(struct uart_port *port)
if (port->type == PORT_SCIF ||
port->type == PORT_HSCIF) {
- status = serial_port_in(port, SCxSR);
- c = serial_port_in(port, SCxRDR);
+ status = sci_serial_in(port, SCxSR);
+ c = sci_serial_in(port, SCxRDR);
} else {
- c = serial_port_in(port, SCxRDR);
- status = serial_port_in(port, SCxSR);
+ c = sci_serial_in(port, SCxRDR);
+ status = sci_serial_in(port, SCxSR);
}
if (uart_handle_sysrq_char(port, c)) {
count--; i--;
@@ -932,7 +931,7 @@ static void sci_receive_chars(struct uart_port *port)
}
}
- serial_port_in(port, SCxSR); /* dummy read */
+ sci_serial_in(port, SCxSR); /* dummy read */
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
copied += count;
@@ -944,8 +943,8 @@ static void sci_receive_chars(struct uart_port *port)
tty_flip_buffer_push(tport);
} else {
/* TTY buffers full; read from RX reg to prevent lockup */
- serial_port_in(port, SCxRDR);
- serial_port_in(port, SCxSR); /* dummy read */
+ sci_serial_in(port, SCxRDR);
+ sci_serial_in(port, SCxSR); /* dummy read */
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
}
}
@@ -953,7 +952,7 @@ static void sci_receive_chars(struct uart_port *port)
static int sci_handle_errors(struct uart_port *port)
{
int copied = 0;
- unsigned short status = serial_port_in(port, SCxSR);
+ unsigned short status = sci_serial_in(port, SCxSR);
struct tty_port *tport = &port->state->port;
struct sci_port *s = to_sci_port(port);
@@ -1000,10 +999,10 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
if (!reg->size)
return 0;
- status = serial_port_in(port, s->params->overrun_reg);
+ status = sci_serial_in(port, s->params->overrun_reg);
if (status & s->params->overrun_mask) {
status &= ~s->params->overrun_mask;
- serial_port_out(port, s->params->overrun_reg, status);
+ sci_serial_out(port, s->params->overrun_reg, status);
port->icount.overrun++;
@@ -1018,7 +1017,7 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
static int sci_handle_breaks(struct uart_port *port)
{
int copied = 0;
- unsigned short status = serial_port_in(port, SCxSR);
+ unsigned short status = sci_serial_in(port, SCxSR);
struct tty_port *tport = &port->state->port;
if (uart_handle_break(port))
@@ -1051,7 +1050,7 @@ static int scif_set_rtrg(struct uart_port *port, int rx_trig)
/* HSCIF can be set to an arbitrary level. */
if (sci_getreg(port, HSRTRGR)->size) {
- serial_port_out(port, HSRTRGR, rx_trig);
+ sci_serial_out(port, HSRTRGR, rx_trig);
return rx_trig;
}
@@ -1092,9 +1091,9 @@ static int scif_set_rtrg(struct uart_port *port, int rx_trig)
return 1;
}
- serial_port_out(port, SCFCR,
- (serial_port_in(port, SCFCR) &
- ~(SCFCR_RTRG1 | SCFCR_RTRG0)) | bits);
+ sci_serial_out(port, SCFCR,
+ (sci_serial_in(port, SCFCR) &
+ ~(SCFCR_RTRG1 | SCFCR_RTRG0)) | bits);
return rx_trig;
}
@@ -1102,9 +1101,9 @@ static int scif_set_rtrg(struct uart_port *port, int rx_trig)
static int scif_rtrg_enabled(struct uart_port *port)
{
if (sci_getreg(port, HSRTRGR)->size)
- return serial_port_in(port, HSRTRGR) != 0;
+ return sci_serial_in(port, HSRTRGR) != 0;
else
- return (serial_port_in(port, SCFCR) &
+ return (sci_serial_in(port, SCFCR) &
(SCFCR_RTRG0 | SCFCR_RTRG1)) != 0;
}
@@ -1219,8 +1218,8 @@ static void sci_dma_tx_complete(void *arg)
s->cookie_tx = -EINVAL;
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB ||
s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE) {
- u16 ctrl = serial_port_in(port, SCSCR);
- serial_port_out(port, SCSCR, ctrl & ~SCSCR_TIE);
+ u16 ctrl = sci_serial_in(port, SCSCR);
+ sci_serial_out(port, SCSCR, ctrl & ~SCSCR_TIE);
if (s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE) {
/* Switch irq from DMA to SCIF */
dmaengine_pause(s->chan_tx_saved);
@@ -1296,7 +1295,7 @@ static void sci_dma_rx_reenable_irq(struct sci_port *s)
u16 scr;
/* Direct new serial port interrupts back to CPU */
- scr = serial_port_in(port, SCSCR);
+ scr = sci_serial_in(port, SCSCR);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB ||
s->cfg->regtype == SCIx_RZ_SCIFA_REGTYPE) {
enable_irq(s->irqs[SCIx_RXI_IRQ]);
@@ -1305,7 +1304,7 @@ static void sci_dma_rx_reenable_irq(struct sci_port *s)
else
scr &= ~SCSCR_RDRQE;
}
- serial_port_out(port, SCSCR, scr | SCSCR_RIE);
+ sci_serial_out(port, SCSCR, scr | SCSCR_RIE);
}
static void sci_dma_rx_complete(void *arg)
@@ -1714,8 +1713,8 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
#ifdef CONFIG_SERIAL_SH_SCI_DMA
if (s->chan_rx) {
- u16 scr = serial_port_in(port, SCSCR);
- u16 ssr = serial_port_in(port, SCxSR);
+ u16 scr = sci_serial_in(port, SCSCR);
+ u16 ssr = sci_serial_in(port, SCxSR);
/* Disable future Rx interrupts */
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB ||
@@ -1733,10 +1732,10 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
scr &= ~SCSCR_RIE;
}
- serial_port_out(port, SCSCR, scr);
+ sci_serial_out(port, SCSCR, scr);
/* Clear current interrupt */
- serial_port_out(port, SCxSR,
- ssr & ~(SCIF_DR | SCxSR_RDxF(port)));
+ sci_serial_out(port, SCxSR,
+ ssr & ~(SCIF_DR | SCxSR_RDxF(port)));
dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u us\n",
jiffies, s->rx_timeout);
start_hrtimer_us(&s->rx_timer, s->rx_timeout);
@@ -1786,9 +1785,9 @@ static irqreturn_t sci_tx_end_interrupt(int irq, void *ptr)
return sci_tx_interrupt(irq, ptr);
uart_port_lock_irqsave(port, &flags);
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
ctrl &= ~(SCSCR_TE | SCSCR_TEIE);
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
uart_port_unlock_irqrestore(port, flags);
return IRQ_HANDLED;
@@ -1802,7 +1801,7 @@ static irqreturn_t sci_br_interrupt(int irq, void *ptr)
sci_handle_breaks(port);
/* drop invalid character received before break was detected */
- serial_port_in(port, SCxRDR);
+ sci_serial_in(port, SCxRDR);
sci_clear_SCxSR(port, SCxSR_BREAK_CLEAR(port));
@@ -1816,7 +1815,7 @@ static irqreturn_t sci_er_interrupt(int irq, void *ptr)
if (s->irqs[SCIx_ERI_IRQ] == s->irqs[SCIx_BRI_IRQ]) {
/* Break and Error interrupts are muxed */
- unsigned short ssr_status = serial_port_in(port, SCxSR);
+ unsigned short ssr_status = sci_serial_in(port, SCxSR);
/* Break Interrupt */
if (ssr_status & SCxSR_BRK(port))
@@ -1831,7 +1830,7 @@ static irqreturn_t sci_er_interrupt(int irq, void *ptr)
if (port->type == PORT_SCI) {
if (sci_handle_errors(port)) {
/* discard character in rx buffer */
- serial_port_in(port, SCxSR);
+ sci_serial_in(port, SCxSR);
sci_clear_SCxSR(port, SCxSR_RDxF_CLEAR(port));
}
} else {
@@ -1856,12 +1855,12 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
struct sci_port *s = to_sci_port(port);
irqreturn_t ret = IRQ_NONE;
- ssr_status = serial_port_in(port, SCxSR);
- scr_status = serial_port_in(port, SCSCR);
+ ssr_status = sci_serial_in(port, SCxSR);
+ scr_status = sci_serial_in(port, SCSCR);
if (s->params->overrun_reg == SCxSR)
orer_status = ssr_status;
else if (sci_getreg(port, s->params->overrun_reg)->size)
- orer_status = serial_port_in(port, s->params->overrun_reg);
+ orer_status = sci_serial_in(port, s->params->overrun_reg);
err_enabled = scr_status & port_rx_irq_mask(port);
@@ -2038,7 +2037,7 @@ static void sci_free_irq(struct sci_port *port)
static unsigned int sci_tx_empty(struct uart_port *port)
{
- unsigned short status = serial_port_in(port, SCxSR);
+ unsigned short status = sci_serial_in(port, SCxSR);
unsigned short in_tx_fifo = sci_txfill(port);
return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
@@ -2047,27 +2046,27 @@ static unsigned int sci_tx_empty(struct uart_port *port)
static void sci_set_rts(struct uart_port *port, bool state)
{
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
- u16 data = serial_port_in(port, SCPDR);
+ u16 data = sci_serial_in(port, SCPDR);
/* Active low */
if (state)
data &= ~SCPDR_RTSD;
else
data |= SCPDR_RTSD;
- serial_port_out(port, SCPDR, data);
+ sci_serial_out(port, SCPDR, data);
/* RTS# is output */
- serial_port_out(port, SCPCR,
- serial_port_in(port, SCPCR) | SCPCR_RTSC);
+ sci_serial_out(port, SCPCR,
+ sci_serial_in(port, SCPCR) | SCPCR_RTSC);
} else if (sci_getreg(port, SCSPTR)->size) {
- u16 ctrl = serial_port_in(port, SCSPTR);
+ u16 ctrl = sci_serial_in(port, SCSPTR);
/* Active low */
if (state)
ctrl &= ~SCSPTR_RTSDT;
else
ctrl |= SCSPTR_RTSDT;
- serial_port_out(port, SCSPTR, ctrl);
+ sci_serial_out(port, SCSPTR, ctrl);
}
}
@@ -2075,10 +2074,10 @@ static bool sci_get_cts(struct uart_port *port)
{
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
/* Active low */
- return !(serial_port_in(port, SCPDR) & SCPDR_CTSD);
+ return !(sci_serial_in(port, SCPDR) & SCPDR_CTSD);
} else if (sci_getreg(port, SCSPTR)->size) {
/* Active low */
- return !(serial_port_in(port, SCSPTR) & SCSPTR_CTSDT);
+ return !(sci_serial_in(port, SCSPTR) & SCSPTR_CTSDT);
}
return true;
@@ -2108,9 +2107,8 @@ static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
*/
reg = sci_getreg(port, SCFCR);
if (reg->size)
- serial_port_out(port, SCFCR,
- serial_port_in(port, SCFCR) |
- SCFCR_LOOP);
+ sci_serial_out(port, SCFCR,
+ sci_serial_in(port, SCFCR) | SCFCR_LOOP);
}
mctrl_gpio_set(s->gpios, mctrl);
@@ -2120,21 +2118,21 @@ static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (!(mctrl & TIOCM_RTS)) {
/* Disable Auto RTS */
- serial_port_out(port, SCFCR,
- serial_port_in(port, SCFCR) & ~SCFCR_MCE);
+ sci_serial_out(port, SCFCR,
+ sci_serial_in(port, SCFCR) & ~SCFCR_MCE);
/* Clear RTS */
sci_set_rts(port, 0);
} else if (s->autorts) {
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) {
/* Enable RTS# pin function */
- serial_port_out(port, SCPCR,
- serial_port_in(port, SCPCR) & ~SCPCR_RTSC);
+ sci_serial_out(port, SCPCR,
+ sci_serial_in(port, SCPCR) & ~SCPCR_RTSC);
}
/* Enable Auto RTS */
- serial_port_out(port, SCFCR,
- serial_port_in(port, SCFCR) | SCFCR_MCE);
+ sci_serial_out(port, SCFCR,
+ sci_serial_in(port, SCFCR) | SCFCR_MCE);
} else {
/* Set RTS */
sci_set_rts(port, 1);
@@ -2187,8 +2185,8 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
}
uart_port_lock_irqsave(port, &flags);
- scsptr = serial_port_in(port, SCSPTR);
- scscr = serial_port_in(port, SCSCR);
+ scsptr = sci_serial_in(port, SCSPTR);
+ scscr = sci_serial_in(port, SCSCR);
if (break_state == -1) {
scsptr = (scsptr | SCSPTR_SPB2IO) & ~SCSPTR_SPB2DT;
@@ -2198,8 +2196,8 @@ static void sci_break_ctl(struct uart_port *port, int break_state)
scscr |= SCSCR_TE;
}
- serial_port_out(port, SCSPTR, scsptr);
- serial_port_out(port, SCSCR, scscr);
+ sci_serial_out(port, SCSPTR, scsptr);
+ sci_serial_out(port, SCSCR, scscr);
uart_port_unlock_irqrestore(port, flags);
}
@@ -2239,9 +2237,9 @@ static void sci_shutdown(struct uart_port *port)
* Stop RX and TX, disable related interrupts, keep clock source
* and HSCIF TOT bits
*/
- scr = serial_port_in(port, SCSCR);
- serial_port_out(port, SCSCR, scr &
- (SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
+ scr = sci_serial_in(port, SCSCR);
+ sci_serial_out(port, SCSCR,
+ scr & (SCSCR_CKE1 | SCSCR_CKE0 | s->hscif_tot));
uart_port_unlock_irqrestore(port, flags);
#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -2390,19 +2388,19 @@ static void sci_reset(struct uart_port *port)
unsigned int status;
struct sci_port *s = to_sci_port(port);
- serial_port_out(port, SCSCR, s->hscif_tot); /* TE=0, RE=0, CKE1=0 */
+ sci_serial_out(port, SCSCR, s->hscif_tot); /* TE=0, RE=0, CKE1=0 */
reg = sci_getreg(port, SCFCR);
if (reg->size)
- serial_port_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
+ sci_serial_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
sci_clear_SCxSR(port,
SCxSR_RDxF_CLEAR(port) & SCxSR_ERROR_CLEAR(port) &
SCxSR_BREAK_CLEAR(port));
if (sci_getreg(port, SCLSR)->size) {
- status = serial_port_in(port, SCLSR);
+ status = sci_serial_in(port, SCLSR);
status &= ~(SCLSR_TO | SCLSR_ORER);
- serial_port_out(port, SCLSR, status);
+ sci_serial_out(port, SCLSR, status);
}
if (s->rx_trigger > 1) {
@@ -2540,8 +2538,8 @@ done:
* It controls the mux to select (H)SCK or frequency divided clock.
*/
if (best_clk >= 0 && sci_getreg(port, SCCKS)->size) {
- serial_port_out(port, SCDL, dl);
- serial_port_out(port, SCCKS, sccks);
+ sci_serial_out(port, SCDL, dl);
+ sci_serial_out(port, SCCKS, sccks);
}
uart_port_lock_irqsave(port, &flags);
@@ -2554,7 +2552,7 @@ done:
bits = tty_get_frame_size(termios->c_cflag);
if (sci_getreg(port, SEMR)->size)
- serial_port_out(port, SEMR, 0);
+ sci_serial_out(port, SEMR, 0);
if (best_clk >= 0) {
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
@@ -2569,9 +2567,9 @@ done:
case 27: smr_val |= SCSMR_SRC_27; break;
}
smr_val |= cks;
- serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
- serial_port_out(port, SCSMR, smr_val);
- serial_port_out(port, SCBRR, brr);
+ sci_serial_out(port, SCSCR, scr_val | s->hscif_tot);
+ sci_serial_out(port, SCSMR, smr_val);
+ sci_serial_out(port, SCBRR, brr);
if (sci_getreg(port, HSSRR)->size) {
unsigned int hssrr = srr | HSCIF_SRE;
/* Calculate deviation from intended rate at the
@@ -2593,7 +2591,7 @@ done:
HSCIF_SRHP_MASK;
hssrr |= HSCIF_SRDE;
}
- serial_port_out(port, HSSRR, hssrr);
+ sci_serial_out(port, HSSRR, hssrr);
}
/* Wait one bit interval */
@@ -2601,10 +2599,10 @@ done:
} else {
/* Don't touch the bit rate configuration */
scr_val = s->cfg->scscr & (SCSCR_CKE1 | SCSCR_CKE0);
- smr_val |= serial_port_in(port, SCSMR) &
+ smr_val |= sci_serial_in(port, SCSMR) &
(SCSMR_CKEDG | SCSMR_SRC_MASK | SCSMR_CKS);
- serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
- serial_port_out(port, SCSMR, smr_val);
+ sci_serial_out(port, SCSCR, scr_val | s->hscif_tot);
+ sci_serial_out(port, SCSMR, smr_val);
}
sci_init_pins(port, termios->c_cflag);
@@ -2613,7 +2611,7 @@ done:
s->autorts = false;
reg = sci_getreg(port, SCFCR);
if (reg->size) {
- unsigned short ctrl = serial_port_in(port, SCFCR);
+ unsigned short ctrl = sci_serial_in(port, SCFCR);
if ((port->flags & UPF_HARD_FLOW) &&
(termios->c_cflag & CRTSCTS)) {
@@ -2630,7 +2628,7 @@ done:
*/
ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST);
- serial_port_out(port, SCFCR, ctrl);
+ sci_serial_out(port, SCFCR, ctrl);
}
if (port->flags & UPF_HARD_FLOW) {
/* Refresh (Auto) RTS */
@@ -2645,7 +2643,7 @@ done:
if (port->type != PORT_SCI)
scr_val |= SCSCR_TE;
scr_val |= SCSCR_RE | (s->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0));
- serial_port_out(port, SCSCR, scr_val | s->hscif_tot);
+ sci_serial_out(port, SCSCR, scr_val | s->hscif_tot);
if ((srr + 1 == 5) &&
(port->type == PORT_SCIFA || port->type == PORT_SCIFB)) {
/*
@@ -3017,9 +3015,6 @@ static int sci_init_single(struct platform_device *dev,
port->irq = sci_port->irqs[SCIx_RXI_IRQ];
port->irqflags = 0;
- port->serial_in = sci_serial_in;
- port->serial_out = sci_serial_out;
-
return 0;
}
@@ -3056,21 +3051,21 @@ static void serial_console_write(struct console *co, const char *s,
uart_port_lock_irqsave(port, &flags);
/* first save SCSCR then disable interrupts, keep clock source */
- ctrl = serial_port_in(port, SCSCR);
+ ctrl = sci_serial_in(port, SCSCR);
ctrl_temp = SCSCR_RE | SCSCR_TE |
(sci_port->cfg->scscr & ~(SCSCR_CKE1 | SCSCR_CKE0)) |
(ctrl & (SCSCR_CKE1 | SCSCR_CKE0));
- serial_port_out(port, SCSCR, ctrl_temp | sci_port->hscif_tot);
+ sci_serial_out(port, SCSCR, ctrl_temp | sci_port->hscif_tot);
uart_console_write(port, s, count, serial_console_putchar);
/* wait until fifo is empty and last bit has been transmitted */
bits = SCxSR_TDxE(port) | SCxSR_TEND(port);
- while ((serial_port_in(port, SCxSR) & bits) != bits)
+ while ((sci_serial_in(port, SCxSR) & bits) != bits)
cpu_relax();
/* restore the SCSCR */
- serial_port_out(port, SCSCR, ctrl);
+ sci_serial_out(port, SCSCR, ctrl);
if (locked)
uart_port_unlock_irqrestore(port, flags);
@@ -3503,8 +3498,6 @@ static int __init early_console_setup(struct earlycon_device *device,
if (!device->port.membase)
return -ENODEV;
- device->port.serial_in = sci_serial_in;
- device->port.serial_out = sci_serial_out;
device->port.type = type;
memcpy(&sci_ports[0].port, &device->port, sizeof(struct uart_port));
port_cfg.type = type;
diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c
index a4cc569a78a2..0670fd9f8496 100644
--- a/drivers/tty/serial/sifive.c
+++ b/drivers/tty/serial/sifive.c
@@ -412,7 +412,8 @@ static void __ssp_receive_chars(struct sifive_serial_port *ssp)
break;
ssp->port.icount.rx++;
- uart_insert_char(&ssp->port, 0, 0, ch, TTY_NORMAL);
+ if (!uart_prepare_sysrq_char(&ssp->port, ch))
+ uart_insert_char(&ssp->port, 0, 0, ch, TTY_NORMAL);
}
tty_flip_buffer_push(&ssp->port.state->port);
@@ -534,7 +535,7 @@ static irqreturn_t sifive_serial_irq(int irq, void *dev_id)
if (ip & SIFIVE_SERIAL_IP_TXWM_MASK)
__ssp_transmit_chars(ssp);
- uart_port_unlock(&ssp->port);
+ uart_unlock_and_check_sysrq(&ssp->port);
return IRQ_HANDLED;
}
@@ -791,13 +792,10 @@ static void sifive_serial_console_write(struct console *co, const char *s,
if (!ssp)
return;
- local_irq_save(flags);
- if (ssp->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&ssp->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&ssp->port, &flags);
else
- uart_port_lock(&ssp->port);
+ uart_port_lock_irqsave(&ssp->port, &flags);
ier = __ssp_readl(ssp, SIFIVE_SERIAL_IE_OFFS);
__ssp_writel(0, SIFIVE_SERIAL_IE_OFFS, ssp);
@@ -807,8 +805,7 @@ static void sifive_serial_console_write(struct console *co, const char *s,
__ssp_writel(ier, SIFIVE_SERIAL_IE_OFFS, ssp);
if (locked)
- uart_port_unlock(&ssp->port);
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&ssp->port, flags);
}
static int sifive_serial_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
index bbb5595d7e24..a23e59551848 100644
--- a/drivers/tty/serial/st-asc.c
+++ b/drivers/tty/serial/st-asc.c
@@ -465,6 +465,7 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
const struct ktermios *old)
{
struct asc_port *ascport = to_asc_port(port);
+ bool manual_rts, toggle_rts = false;
struct gpio_desc *gpiod;
unsigned int baud;
u32 ctrl_val;
@@ -518,25 +519,13 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
/* If flow-control selected, stop handling RTS manually */
if (ascport->rts) {
- devm_gpiod_put(port->dev, ascport->rts);
- ascport->rts = NULL;
-
- pinctrl_select_state(ascport->pinctrl,
- ascport->states[DEFAULT]);
+ toggle_rts = true;
+ manual_rts = false;
}
} else {
/* If flow-control disabled, it's safe to handle RTS manually */
- if (!ascport->rts && ascport->states[NO_HW_FLOWCTRL]) {
- pinctrl_select_state(ascport->pinctrl,
- ascport->states[NO_HW_FLOWCTRL]);
-
- gpiod = devm_gpiod_get(port->dev, "rts", GPIOD_OUT_LOW);
- if (!IS_ERR(gpiod)) {
- gpiod_set_consumer_name(gpiod,
- port->dev->of_node->name);
- ascport->rts = gpiod;
- }
- }
+ if (!ascport->rts && ascport->states[NO_HW_FLOWCTRL])
+ manual_rts = toggle_rts = true;
}
if ((baud < 19200) && !ascport->force_m1) {
@@ -595,6 +584,25 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
asc_out(port, ASC_CTL, (ctrl_val | ASC_CTL_RUN));
uart_port_unlock_irqrestore(port, flags);
+
+ if (toggle_rts) {
+ if (manual_rts) {
+ pinctrl_select_state(ascport->pinctrl,
+ ascport->states[NO_HW_FLOWCTRL]);
+
+ gpiod = devm_gpiod_get(port->dev, "rts", GPIOD_OUT_LOW);
+ if (!IS_ERR(gpiod)) {
+ gpiod_set_consumer_name(gpiod,
+ port->dev->of_node->name);
+ ascport->rts = gpiod;
+ }
+ } else {
+ devm_gpiod_put(port->dev, ascport->rts);
+ ascport->rts = NULL;
+ pinctrl_select_state(ascport->pinctrl,
+ ascport->states[DEFAULT]);
+ }
+ }
}
static const char *asc_type(struct uart_port *port)
diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
index 693e932d6feb..58d169e5c1db 100644
--- a/drivers/tty/serial/stm32-usart.c
+++ b/drivers/tty/serial/stm32-usart.c
@@ -9,6 +9,7 @@
* Inspired by st-asc.c from STMicroelectronics (c)
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/console.h>
#include <linux/delay.h>
@@ -39,60 +40,64 @@
/* Register offsets */
static struct stm32_usart_info __maybe_unused stm32f4_info = {
.ofs = {
- .isr = 0x00,
- .rdr = 0x04,
- .tdr = 0x04,
- .brr = 0x08,
- .cr1 = 0x0c,
- .cr2 = 0x10,
- .cr3 = 0x14,
- .gtpr = 0x18,
- .rtor = UNDEF_REG,
- .rqr = UNDEF_REG,
- .icr = UNDEF_REG,
+ .isr = 0x00,
+ .rdr = 0x04,
+ .tdr = 0x04,
+ .brr = 0x08,
+ .cr1 = 0x0c,
+ .cr2 = 0x10,
+ .cr3 = 0x14,
+ .gtpr = 0x18,
+ .rtor = UNDEF_REG,
+ .rqr = UNDEF_REG,
+ .icr = UNDEF_REG,
+ .presc = UNDEF_REG,
+ .hwcfgr1 = UNDEF_REG,
},
.cfg = {
.uart_enable_bit = 13,
.has_7bits_data = false,
- .fifosize = 1,
}
};
static struct stm32_usart_info __maybe_unused stm32f7_info = {
.ofs = {
- .cr1 = 0x00,
- .cr2 = 0x04,
- .cr3 = 0x08,
- .brr = 0x0c,
- .gtpr = 0x10,
- .rtor = 0x14,
- .rqr = 0x18,
- .isr = 0x1c,
- .icr = 0x20,
- .rdr = 0x24,
- .tdr = 0x28,
+ .cr1 = 0x00,
+ .cr2 = 0x04,
+ .cr3 = 0x08,
+ .brr = 0x0c,
+ .gtpr = 0x10,
+ .rtor = 0x14,
+ .rqr = 0x18,
+ .isr = 0x1c,
+ .icr = 0x20,
+ .rdr = 0x24,
+ .tdr = 0x28,
+ .presc = UNDEF_REG,
+ .hwcfgr1 = UNDEF_REG,
},
.cfg = {
.uart_enable_bit = 0,
.has_7bits_data = true,
.has_swap = true,
- .fifosize = 1,
}
};
static struct stm32_usart_info __maybe_unused stm32h7_info = {
.ofs = {
- .cr1 = 0x00,
- .cr2 = 0x04,
- .cr3 = 0x08,
- .brr = 0x0c,
- .gtpr = 0x10,
- .rtor = 0x14,
- .rqr = 0x18,
- .isr = 0x1c,
- .icr = 0x20,
- .rdr = 0x24,
- .tdr = 0x28,
+ .cr1 = 0x00,
+ .cr2 = 0x04,
+ .cr3 = 0x08,
+ .brr = 0x0c,
+ .gtpr = 0x10,
+ .rtor = 0x14,
+ .rqr = 0x18,
+ .isr = 0x1c,
+ .icr = 0x20,
+ .rdr = 0x24,
+ .tdr = 0x28,
+ .presc = 0x2c,
+ .hwcfgr1 = 0x3f0,
},
.cfg = {
.uart_enable_bit = 0,
@@ -100,7 +105,6 @@ static struct stm32_usart_info __maybe_unused stm32h7_info = {
.has_swap = true,
.has_wakeup = true,
.has_fifo = true,
- .fifosize = 16,
}
};
@@ -1147,6 +1151,8 @@ static void stm32_usart_shutdown(struct uart_port *port)
free_irq(port->irq, port);
}
+static const unsigned int stm32_usart_presc_val[] = {1, 2, 4, 6, 8, 10, 12, 16, 32, 64, 128, 256};
+
static void stm32_usart_set_termios(struct uart_port *port,
struct ktermios *termios,
const struct ktermios *old)
@@ -1155,17 +1161,19 @@ static void stm32_usart_set_termios(struct uart_port *port,
const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
const struct stm32_usart_config *cfg = &stm32_port->info->cfg;
struct serial_rs485 *rs485conf = &port->rs485;
- unsigned int baud, bits;
+ unsigned int baud, bits, uart_clk, uart_clk_pres;
u32 usartdiv, mantissa, fraction, oversampling;
tcflag_t cflag = termios->c_cflag;
- u32 cr1, cr2, cr3, isr;
+ u32 cr1, cr2, cr3, isr, brr, presc;
unsigned long flags;
int ret;
if (!stm32_port->hw_flow_control)
cflag &= ~CRTSCTS;
- baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 8);
+ uart_clk = clk_get_rate(stm32_port->clk);
+
+ baud = uart_get_baud_rate(port, termios, old, 0, uart_clk / 8);
uart_port_lock_irqsave(port, &flags);
@@ -1267,27 +1275,48 @@ static void stm32_usart_set_termios(struct uart_port *port,
cr3 |= USART_CR3_CTSE | USART_CR3_RTSE;
}
- usartdiv = DIV_ROUND_CLOSEST(port->uartclk, baud);
+ for (presc = 0; presc <= USART_PRESC_MAX; presc++) {
+ uart_clk_pres = DIV_ROUND_CLOSEST(uart_clk, stm32_usart_presc_val[presc]);
+ usartdiv = DIV_ROUND_CLOSEST(uart_clk_pres, baud);
- /*
- * The USART supports 16 or 8 times oversampling.
- * By default we prefer 16 times oversampling, so that the receiver
- * has a better tolerance to clock deviations.
- * 8 times oversampling is only used to achieve higher speeds.
- */
- if (usartdiv < 16) {
- oversampling = 8;
- cr1 |= USART_CR1_OVER8;
- stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
- } else {
- oversampling = 16;
- cr1 &= ~USART_CR1_OVER8;
- stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
+ /*
+ * The USART supports 16 or 8 times oversampling.
+ * By default we prefer 16 times oversampling, so that the receiver
+ * has a better tolerance to clock deviations.
+ * 8 times oversampling is only used to achieve higher speeds.
+ */
+ if (usartdiv < 16) {
+ oversampling = 8;
+ cr1 |= USART_CR1_OVER8;
+ stm32_usart_set_bits(port, ofs->cr1, USART_CR1_OVER8);
+ } else {
+ oversampling = 16;
+ cr1 &= ~USART_CR1_OVER8;
+ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_OVER8);
+ }
+
+ mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
+ fraction = usartdiv % oversampling;
+ brr = mantissa | fraction;
+
+ if (FIELD_FIT(USART_BRR_MASK, brr)) {
+ if (ofs->presc != UNDEF_REG) {
+ port->uartclk = uart_clk_pres;
+ writel_relaxed(presc, port->membase + ofs->presc);
+ } else if (presc) {
+ /* We need a prescaler but we don't have it (STM32F4, STM32F7) */
+ dev_err(port->dev,
+ "unable to set baudrate, input clock is too high");
+ }
+ break;
+ } else if (presc == USART_PRESC_MAX) {
+ /* Even with prescaler and brr at max value we can't set baudrate */
+ dev_err(port->dev, "unable to set baudrate, input clock is too high");
+ break;
+ }
}
- mantissa = (usartdiv / oversampling) << USART_BRR_DIV_M_SHIFT;
- fraction = usartdiv % oversampling;
- writel_relaxed(mantissa | fraction, port->membase + ofs->brr);
+ writel_relaxed(brr, port->membase + ofs->brr);
uart_update_timeout(port, cflag, baud);
@@ -1471,37 +1500,57 @@ static const struct uart_ops stm32_uart_ops = {
#endif /* CONFIG_CONSOLE_POLL */
};
-/*
- * STM32H7 RX & TX FIFO threshold configuration (CR3 RXFTCFG / TXFTCFG)
- * Note: 1 isn't a valid value in RXFTCFG / TXFTCFG. In this case,
- * RXNEIE / TXEIE can be used instead of threshold irqs: RXFTIE / TXFTIE.
- * So, RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1.
- */
-static const u32 stm32h7_usart_fifo_thresh_cfg[] = { 1, 2, 4, 8, 12, 14, 16 };
+struct stm32_usart_thresh_ratio {
+ int mul;
+ int div;
+};
+
+static const struct stm32_usart_thresh_ratio stm32h7_usart_fifo_thresh_cfg[] = {
+ {1, 8}, {1, 4}, {1, 2}, {3, 4}, {7, 8}, {1, 1} };
+
+static int stm32_usart_get_thresh_value(u32 fifo_size, int index)
+{
+ return fifo_size * stm32h7_usart_fifo_thresh_cfg[index].mul /
+ stm32h7_usart_fifo_thresh_cfg[index].div;
+}
-static void stm32_usart_get_ftcfg(struct platform_device *pdev, const char *p,
- int *ftcfg)
+static int stm32_usart_get_ftcfg(struct platform_device *pdev, struct stm32_port *stm32port,
+ const char *p, int *ftcfg)
{
- u32 bytes, i;
+ const struct stm32_usart_offsets *ofs = &stm32port->info->ofs;
+ u32 bytes, i, cfg8;
+ int fifo_size;
+
+ if (WARN_ON(ofs->hwcfgr1 == UNDEF_REG))
+ return 1;
+
+ cfg8 = FIELD_GET(USART_HWCFGR1_CFG8,
+ readl_relaxed(stm32port->port.membase + ofs->hwcfgr1));
+
+ /* On STM32H7, hwcfgr is not present, so returned value will be 0 */
+ fifo_size = cfg8 ? 1 << cfg8 : STM32H7_USART_FIFO_SIZE;
- /* DT option to get RX & TX FIFO threshold (default to 8 bytes) */
+ /* DT option to get RX & TX FIFO threshold (default to half fifo size) */
if (of_property_read_u32(pdev->dev.of_node, p, &bytes))
- bytes = 8;
+ bytes = fifo_size / 2;
- for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++)
- if (stm32h7_usart_fifo_thresh_cfg[i] >= bytes)
+ if (bytes < stm32_usart_get_thresh_value(fifo_size, 0)) {
+ *ftcfg = -EINVAL;
+ return fifo_size;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg); i++) {
+ if (stm32_usart_get_thresh_value(fifo_size, i) >= bytes)
break;
+ }
if (i >= ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg))
i = ARRAY_SIZE(stm32h7_usart_fifo_thresh_cfg) - 1;
- dev_dbg(&pdev->dev, "%s set to %d bytes\n", p,
- stm32h7_usart_fifo_thresh_cfg[i]);
+ dev_dbg(&pdev->dev, "%s set to %d/%d bytes\n", p,
+ stm32_usart_get_thresh_value(fifo_size, i), fifo_size);
- /* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */
- if (i)
- *ftcfg = i - 1;
- else
- *ftcfg = -EINVAL;
+ *ftcfg = i;
+ return fifo_size;
}
static void stm32_usart_deinit_port(struct stm32_port *stm32port)
@@ -1531,7 +1580,6 @@ static int stm32_usart_init_port(struct stm32_port *stm32port,
port->flags = UPF_BOOT_AUTOCONF;
port->ops = &stm32_uart_ops;
port->dev = &pdev->dev;
- port->fifosize = stm32port->info->cfg.fifosize;
port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_STM32_CONSOLE);
port->irq = irq;
port->rs485_config = stm32_usart_config_rs485;
@@ -1547,14 +1595,6 @@ static int stm32_usart_init_port(struct stm32_port *stm32port,
stm32port->swap = stm32port->info->cfg.has_swap &&
of_property_read_bool(pdev->dev.of_node, "rx-tx-swap");
- stm32port->fifoen = stm32port->info->cfg.has_fifo;
- if (stm32port->fifoen) {
- stm32_usart_get_ftcfg(pdev, "rx-threshold",
- &stm32port->rxftcfg);
- stm32_usart_get_ftcfg(pdev, "tx-threshold",
- &stm32port->txftcfg);
- }
-
port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(port->membase))
return PTR_ERR(port->membase);
@@ -1577,6 +1617,15 @@ static int stm32_usart_init_port(struct stm32_port *stm32port,
goto err_clk;
}
+ stm32port->fifoen = stm32port->info->cfg.has_fifo;
+ if (stm32port->fifoen) {
+ stm32_usart_get_ftcfg(pdev, stm32port, "rx-threshold", &stm32port->rxftcfg);
+ port->fifosize = stm32_usart_get_ftcfg(pdev, stm32port, "tx-threshold",
+ &stm32port->txftcfg);
+ } else {
+ port->fifosize = 1;
+ }
+
stm32port->gpios = mctrl_gpio_init(&stm32port->port, 0);
if (IS_ERR(stm32port->gpios)) {
ret = PTR_ERR(stm32port->gpios);
diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
index f59f831b2a10..af20258ccc7a 100644
--- a/drivers/tty/serial/stm32-usart.h
+++ b/drivers/tty/serial/stm32-usart.h
@@ -9,17 +9,19 @@
#define DRIVER_NAME "stm32-usart"
struct stm32_usart_offsets {
- u8 cr1;
- u8 cr2;
- u8 cr3;
- u8 brr;
- u8 gtpr;
- u8 rtor;
- u8 rqr;
- u8 isr;
- u8 icr;
- u8 rdr;
- u8 tdr;
+ u16 cr1;
+ u16 cr2;
+ u16 cr3;
+ u16 brr;
+ u16 gtpr;
+ u16 rtor;
+ u16 rqr;
+ u16 isr;
+ u16 icr;
+ u16 rdr;
+ u16 tdr;
+ u16 presc;
+ u16 hwcfgr1;
};
struct stm32_usart_config {
@@ -28,7 +30,6 @@ struct stm32_usart_config {
bool has_swap;
bool has_wakeup;
bool has_fifo;
- int fifosize;
};
struct stm32_usart_info {
@@ -36,7 +37,7 @@ struct stm32_usart_info {
struct stm32_usart_config cfg;
};
-#define UNDEF_REG 0xff
+#define UNDEF_REG 0xffff
/* USART_SR (F4) / USART_ISR (F7) */
#define USART_SR_PE BIT(0)
@@ -71,6 +72,7 @@ struct stm32_usart_info {
#define USART_BRR_DIV_M_MASK GENMASK(15, 4)
#define USART_BRR_DIV_M_SHIFT 4
#define USART_BRR_04_R_SHIFT 1
+#define USART_BRR_MASK (USART_BRR_DIV_M_MASK | USART_BRR_DIV_F_MASK)
/* USART_CR1 */
#define USART_CR1_SBK BIT(0)
@@ -176,8 +178,16 @@ struct stm32_usart_info {
#define USART_ICR_CMCF BIT(17) /* F7 */
#define USART_ICR_WUCF BIT(20) /* H7 */
+/* USART_PRESC */
+#define USART_PRESC GENMASK(3, 0) /* H7 */
+#define USART_PRESC_MAX 0b1011
+
+/* USART_HWCFCR1 */
+#define USART_HWCFGR1_CFG8 GENMASK(31, 28) /* MP1 */
+
#define STM32_SERIAL_NAME "ttySTM"
-#define STM32_MAX_PORTS 8
+#define STM32_MAX_PORTS 9
+#define STM32H7_USART_FIFO_SIZE 16
#define RX_BUF_L 4096 /* dma rx buffer length */
#define RX_BUF_P (RX_BUF_L / 2) /* dma rx buffer period */
diff --git a/drivers/tty/serial/sunplus-uart.c b/drivers/tty/serial/sunplus-uart.c
index 99f5285819d4..f5e29eb4a4ce 100644
--- a/drivers/tty/serial/sunplus-uart.c
+++ b/drivers/tty/serial/sunplus-uart.c
@@ -260,7 +260,7 @@ static void receive_chars(struct uart_port *port)
if (port->ignore_status_mask & SUP_DUMMY_READ)
goto ignore_char;
- if (uart_handle_sysrq_char(port, ch))
+ if (uart_prepare_sysrq_char(port, ch))
goto ignore_char;
uart_insert_char(port, lsr, SUP_UART_LSR_OE, ch, flag);
@@ -287,7 +287,7 @@ static irqreturn_t sunplus_uart_irq(int irq, void *args)
if (isc & SUP_UART_ISC_TX)
transmit_chars(port);
- uart_port_unlock(port);
+ uart_unlock_and_check_sysrq(port);
return IRQ_HANDLED;
}
@@ -512,22 +512,16 @@ static void sunplus_console_write(struct console *co,
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
-
- if (sunplus_console_ports[co->index]->port.sysrq)
- locked = 0;
- else if (oops_in_progress)
- locked = uart_port_trylock(&sunplus_console_ports[co->index]->port);
+ if (oops_in_progress)
+ locked = uart_port_trylock_irqsave(&sunplus_console_ports[co->index]->port, &flags);
else
- uart_port_lock(&sunplus_console_ports[co->index]->port);
+ uart_port_lock_irqsave(&sunplus_console_ports[co->index]->port, &flags);
uart_console_write(&sunplus_console_ports[co->index]->port, s, count,
sunplus_uart_console_putchar);
if (locked)
- uart_port_unlock(&sunplus_console_ports[co->index]->port);
-
- local_irq_restore(flags);
+ uart_port_unlock_irqrestore(&sunplus_console_ports[co->index]->port, flags);
}
static int __init sunplus_console_setup(struct console *co, char *options)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index 920762d7b4a4..5f48ec37cb25 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -22,7 +22,9 @@
#include <linux/of.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
-#include <linux/iopoll.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
#define CDNS_UART_TTY_NAME "ttyPS"
#define CDNS_UART_NAME "xuartps"
@@ -193,6 +195,9 @@ MODULE_PARM_DESC(rx_timeout, "Rx timeout, 1-255");
* @clk_rate_change_nb: Notifier block for clock changes
* @quirks: Flags for RXBS support.
* @cts_override: Modem control state override
+ * @gpiod_rts: Pointer to the gpio descriptor
+ * @rs485_tx_started: RS485 tx state
+ * @tx_timer: Timer for tx
*/
struct cdns_uart {
struct uart_port *port;
@@ -203,10 +208,21 @@ struct cdns_uart {
struct notifier_block clk_rate_change_nb;
u32 quirks;
bool cts_override;
+ struct gpio_desc *gpiod_rts;
+ bool rs485_tx_started;
+ struct hrtimer tx_timer;
};
struct cdns_platform_data {
u32 quirks;
};
+
+struct serial_rs485 cdns_rs485_supported = {
+ .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND |
+ SER_RS485_RTS_AFTER_SEND,
+ .delay_rts_before_send = 1,
+ .delay_rts_after_send = 1,
+};
+
#define to_cdns_uart(_nb) container_of(_nb, struct cdns_uart, \
clk_rate_change_nb)
@@ -306,17 +322,114 @@ static void cdns_uart_handle_rx(void *dev_id, unsigned int isrstatus)
}
/**
- * cdns_uart_handle_tx - Handle the bytes to be Txed.
+ * cdns_rts_gpio_enable - Configure RTS/GPIO to high/low
+ * @cdns_uart: Handle to the cdns_uart
+ * @enable: Value to be set to RTS/GPIO
+ */
+static void cdns_rts_gpio_enable(struct cdns_uart *cdns_uart, bool enable)
+{
+ u32 val;
+
+ if (cdns_uart->gpiod_rts) {
+ gpiod_set_value(cdns_uart->gpiod_rts, enable);
+ } else {
+ val = readl(cdns_uart->port->membase + CDNS_UART_MODEMCR);
+ if (enable)
+ val |= CDNS_UART_MODEMCR_RTS;
+ else
+ val &= ~CDNS_UART_MODEMCR_RTS;
+ writel(val, cdns_uart->port->membase + CDNS_UART_MODEMCR);
+ }
+}
+
+/**
+ * cdns_rs485_tx_setup - Tx setup specific to rs485
+ * @cdns_uart: Handle to the cdns_uart
+ */
+static void cdns_rs485_tx_setup(struct cdns_uart *cdns_uart)
+{
+ bool enable;
+
+ enable = cdns_uart->port->rs485.flags & SER_RS485_RTS_ON_SEND;
+ cdns_rts_gpio_enable(cdns_uart, enable);
+
+ cdns_uart->rs485_tx_started = true;
+}
+
+/**
+ * cdns_rs485_rx_setup - Rx setup specific to rs485
+ * @cdns_uart: Handle to the cdns_uart
+ */
+static void cdns_rs485_rx_setup(struct cdns_uart *cdns_uart)
+{
+ bool enable;
+
+ enable = cdns_uart->port->rs485.flags & SER_RS485_RTS_AFTER_SEND;
+ cdns_rts_gpio_enable(cdns_uart, enable);
+
+ cdns_uart->rs485_tx_started = false;
+}
+
+/**
+ * cdns_uart_tx_empty - Check whether TX is empty
+ * @port: Handle to the uart port structure
+ *
+ * Return: TIOCSER_TEMT on success, 0 otherwise
+ */
+static unsigned int cdns_uart_tx_empty(struct uart_port *port)
+{
+ unsigned int status;
+
+ status = readl(port->membase + CDNS_UART_SR);
+ status &= (CDNS_UART_SR_TXEMPTY | CDNS_UART_SR_TACTIVE);
+ return (status == CDNS_UART_SR_TXEMPTY) ? TIOCSER_TEMT : 0;
+}
+
+/**
+ * cdns_rs485_rx_callback - Timer rx callback handler for rs485.
+ * @t: Handle to the hrtimer structure
+ */
+static enum hrtimer_restart cdns_rs485_rx_callback(struct hrtimer *t)
+{
+ struct cdns_uart *cdns_uart = container_of(t, struct cdns_uart, tx_timer);
+
+ /*
+ * Default Rx should be setup, because Rx signaling path
+ * need to enable to receive data.
+ */
+ cdns_rs485_rx_setup(cdns_uart);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * cdns_calc_after_tx_delay - calculate delay required for after tx.
+ * @cdns_uart: Handle to the cdns_uart
+ */
+static u64 cdns_calc_after_tx_delay(struct cdns_uart *cdns_uart)
+{
+ /*
+ * Frame time + stop bit time + rs485.delay_rts_after_send
+ */
+ return cdns_uart->port->frame_time
+ + DIV_ROUND_UP(cdns_uart->port->frame_time, 7)
+ + (u64)cdns_uart->port->rs485.delay_rts_after_send * NSEC_PER_MSEC;
+}
+
+/**
+ * cdns_uart_handle_tx - Handle the bytes to be transmitted.
* @dev_id: Id of the UART port
* Return: None
*/
static void cdns_uart_handle_tx(void *dev_id)
{
struct uart_port *port = (struct uart_port *)dev_id;
+ struct cdns_uart *cdns_uart = port->private_data;
struct circ_buf *xmit = &port->state->xmit;
unsigned int numbytes;
- if (uart_circ_empty(xmit)) {
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ /* Disable the TX Empty interrupt */
writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IDR);
return;
}
@@ -332,6 +445,16 @@ static void cdns_uart_handle_tx(void *dev_id)
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
+
+ /* Enable the TX Empty interrupt */
+ writel(CDNS_UART_IXR_TXEMPTY, cdns_uart->port->membase + CDNS_UART_IER);
+
+ if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED &&
+ (uart_circ_empty(xmit) || uart_tx_stopped(port))) {
+ cdns_uart->tx_timer.function = &cdns_rs485_rx_callback;
+ hrtimer_start(&cdns_uart->tx_timer,
+ ns_to_ktime(cdns_calc_after_tx_delay(cdns_uart)), HRTIMER_MODE_REL);
+ }
}
/**
@@ -565,12 +688,28 @@ static int cdns_uart_clk_notifier_cb(struct notifier_block *nb,
#endif
/**
+ * cdns_rs485_tx_callback - Timer tx callback handler for rs485.
+ * @t: Handle to the hrtimer structure
+ */
+static enum hrtimer_restart cdns_rs485_tx_callback(struct hrtimer *t)
+{
+ struct cdns_uart *cdns_uart = container_of(t, struct cdns_uart, tx_timer);
+
+ uart_port_lock(cdns_uart->port);
+ cdns_uart_handle_tx(cdns_uart->port);
+ uart_port_unlock(cdns_uart->port);
+
+ return HRTIMER_NORESTART;
+}
+
+/**
* cdns_uart_start_tx - Start transmitting bytes
* @port: Handle to the uart port structure
*/
static void cdns_uart_start_tx(struct uart_port *port)
{
unsigned int status;
+ struct cdns_uart *cdns_uart = port->private_data;
if (uart_tx_stopped(port))
return;
@@ -587,12 +726,19 @@ static void cdns_uart_start_tx(struct uart_port *port)
if (uart_circ_empty(&port->state->xmit))
return;
+ /* Clear the TX Empty interrupt */
writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_ISR);
+ if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED) {
+ if (!cdns_uart->rs485_tx_started) {
+ cdns_uart->tx_timer.function = &cdns_rs485_tx_callback;
+ cdns_rs485_tx_setup(cdns_uart);
+ return hrtimer_start(&cdns_uart->tx_timer,
+ ms_to_ktime(port->rs485.delay_rts_before_send),
+ HRTIMER_MODE_REL);
+ }
+ }
cdns_uart_handle_tx(port);
-
- /* Enable the TX Empty interrupt */
- writel(CDNS_UART_IXR_TXEMPTY, port->membase + CDNS_UART_IER);
}
/**
@@ -602,6 +748,10 @@ static void cdns_uart_start_tx(struct uart_port *port)
static void cdns_uart_stop_tx(struct uart_port *port)
{
unsigned int regval;
+ struct cdns_uart *cdns_uart = port->private_data;
+
+ if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED)
+ cdns_rs485_rx_setup(cdns_uart);
regval = readl(port->membase + CDNS_UART_CR);
regval |= CDNS_UART_CR_TX_DIS;
@@ -627,21 +777,6 @@ static void cdns_uart_stop_rx(struct uart_port *port)
}
/**
- * cdns_uart_tx_empty - Check whether TX is empty
- * @port: Handle to the uart port structure
- *
- * Return: TIOCSER_TEMT on success, 0 otherwise
- */
-static unsigned int cdns_uart_tx_empty(struct uart_port *port)
-{
- unsigned int status;
-
- status = readl(port->membase + CDNS_UART_SR) &
- (CDNS_UART_SR_TXEMPTY | CDNS_UART_SR_TACTIVE);
- return (status == CDNS_UART_SR_TXEMPTY) ? TIOCSER_TEMT : 0;
-}
-
-/**
* cdns_uart_break_ctl - Based on the input ctl we have to start or stop
* transmitting char breaks
* @port: Handle to the uart port structure
@@ -829,6 +964,9 @@ static int cdns_uart_startup(struct uart_port *port)
(CDNS_UART_CR_TXRST | CDNS_UART_CR_RXRST))
cpu_relax();
+ if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED)
+ cdns_rs485_rx_setup(cdns_uart);
+
/*
* Clear the RX disable bit and then set the RX enable bit to enable
* the receiver.
@@ -888,6 +1026,10 @@ static void cdns_uart_shutdown(struct uart_port *port)
{
int status;
unsigned long flags;
+ struct cdns_uart *cdns_uart = port->private_data;
+
+ if (cdns_uart->port->rs485.flags & SER_RS485_ENABLED)
+ hrtimer_cancel(&cdns_uart->tx_timer);
uart_port_lock_irqsave(port, &flags);
@@ -1033,6 +1175,8 @@ static void cdns_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (mctrl & TIOCM_RTS)
val |= CDNS_UART_MODEMCR_RTS;
+ if (cdns_uart_data->gpiod_rts)
+ gpiod_set_value(cdns_uart_data->gpiod_rts, !(mctrl & TIOCM_RTS));
if (mctrl & TIOCM_DTR)
val |= CDNS_UART_MODEMCR_DTR;
if (mctrl & TIOCM_LOOP)
@@ -1456,6 +1600,39 @@ MODULE_DEVICE_TABLE(of, cdns_uart_of_match);
static int instances;
/**
+ * cdns_rs485_config - Called when an application calls TIOCSRS485 ioctl.
+ * @port: Pointer to the uart_port structure
+ * @termios: Pointer to the ktermios structure
+ * @rs485: Pointer to the serial_rs485 structure
+ *
+ * Return: 0
+ */
+static int cdns_rs485_config(struct uart_port *port, struct ktermios *termios,
+ struct serial_rs485 *rs485)
+{
+ u32 val;
+ struct cdns_uart *cdns_uart = port->private_data;
+
+ if (rs485->flags & SER_RS485_ENABLED) {
+ dev_dbg(port->dev, "Setting UART to RS485\n");
+ /* Make sure auto RTS is disabled */
+ val = readl(port->membase + CDNS_UART_MODEMCR);
+ val &= ~CDNS_UART_MODEMCR_FCM;
+ writel(val, port->membase + CDNS_UART_MODEMCR);
+
+ /* Timer setup */
+ hrtimer_init(&cdns_uart->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cdns_uart->tx_timer.function = &cdns_rs485_tx_callback;
+
+ /* Disable transmitter and make Rx setup*/
+ cdns_uart_stop_tx(port);
+ } else {
+ hrtimer_cancel(&cdns_uart->tx_timer);
+ }
+ return 0;
+}
+
+/**
* cdns_uart_probe - Platform driver probe
* @pdev: Pointer to the platform device structure
*
@@ -1597,9 +1774,23 @@ static int cdns_uart_probe(struct platform_device *pdev)
port->private_data = cdns_uart_data;
port->read_status_mask = CDNS_UART_IXR_TXEMPTY | CDNS_UART_IXR_RXTRIG |
CDNS_UART_IXR_OVERRUN | CDNS_UART_IXR_TOUT;
+ port->rs485_config = cdns_rs485_config;
+ port->rs485_supported = cdns_rs485_supported;
cdns_uart_data->port = port;
platform_set_drvdata(pdev, port);
+ rc = uart_get_rs485_mode(port);
+ if (rc)
+ goto err_out_clk_notifier;
+
+ cdns_uart_data->gpiod_rts = devm_gpiod_get_optional(&pdev->dev, "rts",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(cdns_uart_data->gpiod_rts)) {
+ rc = PTR_ERR(cdns_uart_data->gpiod_rts);
+ dev_err(port->dev, "xuartps: devm_gpiod_get_optional failed\n");
+ goto err_out_clk_notifier;
+ }
+
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, UART_AUTOSUSPEND_TIMEOUT);
pm_runtime_set_active(&pdev->dev);
@@ -1618,6 +1809,8 @@ static int cdns_uart_probe(struct platform_device *pdev)
console_port = port;
}
#endif
+ if (cdns_uart_data->port->rs485.flags & SER_RS485_ENABLED)
+ cdns_rs485_rx_setup(cdns_uart_data);
rc = uart_add_one_port(&cdns_uart_uart_driver, port);
if (rc) {
@@ -1646,6 +1839,7 @@ err_out_pm_disable:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
pm_runtime_dont_use_autosuspend(&pdev->dev);
+err_out_clk_notifier:
#ifdef CONFIG_COMMON_CLK
clk_notifier_unregister(cdns_uart_data->uartclk,
&cdns_uart_data->clk_rate_change_nb);
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index f8883afbeeba..79f0ff94ce00 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -7,6 +7,7 @@
#include <linux/errno.h>
#include <linux/minmax.h>
#include <linux/tty.h>
+#include <linux/tty_buffer.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/timer.h>
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
index b3dfe9d5717e..2c8ce8b592ed 100644
--- a/drivers/tty/vt/Makefile
+++ b/drivers/tty/vt/Makefile
@@ -5,9 +5,9 @@
FONTMAPFILE = cp437.uni
obj-$(CONFIG_VT) += vt_ioctl.o vc_screen.o \
- selection.o keyboard.o
+ selection.o keyboard.o \
+ vt.o defkeymap.o
obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
-obj-$(CONFIG_HW_CONSOLE) += vt.o defkeymap.o
# Files generated that shall be removed upon make clean
clean-files := consolemap_deftbl.c defkeymap.c
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 8967c3a0d916..564341f1a74f 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -7,7 +7,7 @@
* 'int set_selection_kernel(struct tiocl_selection *, struct tty_struct *)'
* 'void clear_selection(void)'
* 'int paste_selection(struct tty_struct *)'
- * 'int sel_loadlut(char __user *)'
+ * 'int sel_loadlut(u32 __user *)'
*
* Now that /dev/vcs exists, most of this can disappear again.
*/
@@ -73,10 +73,12 @@ sel_pos(int n, bool unicode)
}
/**
- * clear_selection - remove current selection
+ * clear_selection - remove current selection
*
- * Remove the current selection highlight, if any from the console
- * holding the selection. The caller must hold the console lock.
+ * Remove the current selection highlight, if any from the console holding the
+ * selection.
+ *
+ * Locking: The caller must hold the console lock.
*/
void clear_selection(void)
{
@@ -88,7 +90,7 @@ void clear_selection(void)
}
EXPORT_SYMBOL_GPL(clear_selection);
-bool vc_is_sel(struct vc_data *vc)
+bool vc_is_sel(const struct vc_data *vc)
{
return vc == vc_sel.cons;
}
@@ -110,18 +112,25 @@ static inline int inword(const u32 c)
}
/**
- * sel_loadlut() - load the LUT table
- * @p: user table
+ * sel_loadlut() - load the LUT table
+ * @lut: user table
+ *
+ * Load the LUT table from user space. Make a temporary copy so a partial
+ * update doesn't make a mess.
*
- * Load the LUT table from user space. The caller must hold the console
- * lock. Make a temporary copy so a partial update doesn't make a mess.
+ * Locking: The console lock is acquired.
*/
-int sel_loadlut(char __user *p)
+int sel_loadlut(u32 __user *lut)
{
u32 tmplut[ARRAY_SIZE(inwordLut)];
- if (copy_from_user(tmplut, (u32 __user *)(p+4), sizeof(inwordLut)))
+
+ if (copy_from_user(tmplut, lut, sizeof(inwordLut)))
return -EFAULT;
+
+ console_lock();
memcpy(inwordLut, tmplut, sizeof(inwordLut));
+ console_unlock();
+
return 0;
}
@@ -166,14 +175,14 @@ static int store_utf8(u32 c, char *p)
}
/**
- * set_selection_user - set the current selection.
- * @sel: user selection info
- * @tty: the console tty
+ * set_selection_user - set the current selection.
+ * @sel: user selection info
+ * @tty: the console tty
*
- * Invoked by the ioctl handle for the vt layer.
+ * Invoked by the ioctl handle for the vt layer.
*
- * The entire selection process is managed under the console_lock. It's
- * a lot under the lock but its hardly a performance path
+ * Locking: The entire selection process is managed under the console_lock.
+ * It's a lot under the lock but its hardly a performance path.
*/
int set_selection_user(const struct tiocl_selection __user *sel,
struct tty_struct *tty)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 38a765eadbe2..9b5b98dfc8b4 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -145,7 +145,7 @@ static void gotoxy(struct vc_data *vc, int new_x, int new_y);
static void save_cur(struct vc_data *vc);
static void reset_terminal(struct vc_data *vc, int do_clear);
static void con_flush_chars(struct tty_struct *tty);
-static int set_vesa_blanking(char __user *p);
+static int set_vesa_blanking(u8 __user *mode);
static void set_cursor(struct vc_data *vc);
static void hide_cursor(struct vc_data *vc);
static void console_callback(struct work_struct *ignored);
@@ -175,7 +175,7 @@ int do_poke_blanked_console;
int console_blanked;
EXPORT_SYMBOL(console_blanked);
-static int vesa_blank_mode; /* 0:none 1:suspendV 2:suspendH 3:powerdown */
+static enum vesa_blank_mode vesa_blank_mode;
static int vesa_off_interval;
static int blankinterval;
core_param(consoleblank, blankinterval, int, 0444);
@@ -286,18 +286,20 @@ static inline bool con_should_update(const struct vc_data *vc)
return con_is_visible(vc) && !console_blanked;
}
-static inline unsigned short *screenpos(const struct vc_data *vc, int offset,
- bool viewed)
+static inline u16 *screenpos(const struct vc_data *vc, unsigned int offset,
+ bool viewed)
{
- unsigned short *p;
-
- if (!viewed)
- p = (unsigned short *)(vc->vc_origin + offset);
- else if (!vc->vc_sw->con_screen_pos)
- p = (unsigned short *)(vc->vc_visible_origin + offset);
+ unsigned long origin = viewed ? vc->vc_visible_origin : vc->vc_origin;
+
+ return (u16 *)(origin + offset);
+}
+
+static void con_putc(struct vc_data *vc, u16 ca, unsigned int y, unsigned int x)
+{
+ if (vc->vc_sw->con_putc)
+ vc->vc_sw->con_putc(vc, ca, y, x);
else
- p = vc->vc_sw->con_screen_pos(vc, offset);
- return p;
+ vc->vc_sw->con_putcs(vc, &ca, 1, y, x);
}
/* Called from the keyboard irq path.. */
@@ -591,18 +593,12 @@ static void con_scroll(struct vc_data *vc, unsigned int top,
static void do_update_region(struct vc_data *vc, unsigned long start, int count)
{
unsigned int xx, yy, offset;
- u16 *p;
+ u16 *p = (u16 *)start;
+
+ offset = (start - vc->vc_origin) / 2;
+ xx = offset % vc->vc_cols;
+ yy = offset / vc->vc_cols;
- p = (u16 *) start;
- if (!vc->vc_sw->con_getxy) {
- offset = (start - vc->vc_origin) / 2;
- xx = offset % vc->vc_cols;
- yy = offset / vc->vc_cols;
- } else {
- int nxx, nyy;
- start = vc->vc_sw->con_getxy(vc, start, &nxx, &nyy);
- xx = nxx; yy = nyy;
- }
for(;;) {
u16 attrib = scr_readw(p) & 0xff00;
int startx = xx;
@@ -625,10 +621,6 @@ static void do_update_region(struct vc_data *vc, unsigned long start, int count)
break;
xx = 0;
yy++;
- if (vc->vc_sw->con_getxy) {
- p = (u16 *)start;
- start = vc->vc_sw->con_getxy(vc, start, NULL, NULL);
- }
}
}
@@ -703,7 +695,7 @@ static void update_attr(struct vc_data *vc)
/* Note: inverting the screen twice should revert to the original state */
void invert_screen(struct vc_data *vc, int offset, int count, bool viewed)
{
- unsigned short *p;
+ u16 *p;
WARN_CONSOLE_UNLOCKED();
@@ -762,7 +754,7 @@ void complement_pos(struct vc_data *vc, int offset)
old_offset < vc->vc_screenbuf_size) {
scr_writew(old, screenpos(vc, old_offset, true));
if (con_should_update(vc))
- vc->vc_sw->con_putc(vc, old, oldy, oldx);
+ con_putc(vc, old, oldy, oldx);
notify_update(vc);
}
@@ -771,15 +763,14 @@ void complement_pos(struct vc_data *vc, int offset)
if (offset != -1 && offset >= 0 &&
offset < vc->vc_screenbuf_size) {
unsigned short new;
- unsigned short *p;
- p = screenpos(vc, offset, true);
+ u16 *p = screenpos(vc, offset, true);
old = scr_readw(p);
new = old ^ vc->vc_complement_mask;
scr_writew(new, p);
if (con_should_update(vc)) {
oldx = (offset >> 1) % vc->vc_cols;
oldy = (offset >> 1) / vc->vc_cols;
- vc->vc_sw->con_putc(vc, new, oldy, oldx);
+ con_putc(vc, new, oldy, oldx);
}
notify_update(vc);
}
@@ -833,7 +824,7 @@ static void add_softcursor(struct vc_data *vc)
i ^= CUR_FG;
scr_writew(i, (u16 *)vc->vc_pos);
if (con_should_update(vc))
- vc->vc_sw->con_putc(vc, i, vc->state.y, vc->state.x);
+ con_putc(vc, i, vc->state.y, vc->state.x);
}
static void hide_softcursor(struct vc_data *vc)
@@ -841,8 +832,8 @@ static void hide_softcursor(struct vc_data *vc)
if (softcursor_original != -1) {
scr_writew(softcursor_original, (u16 *)vc->vc_pos);
if (con_should_update(vc))
- vc->vc_sw->con_putc(vc, softcursor_original,
- vc->state.y, vc->state.x);
+ con_putc(vc, softcursor_original, vc->state.y,
+ vc->state.x);
softcursor_original = -1;
}
}
@@ -852,7 +843,7 @@ static void hide_cursor(struct vc_data *vc)
if (vc_is_sel(vc))
clear_selection();
- vc->vc_sw->con_cursor(vc, CM_ERASE);
+ vc->vc_sw->con_cursor(vc, false);
hide_softcursor(vc);
}
@@ -865,7 +856,7 @@ static void set_cursor(struct vc_data *vc)
clear_selection();
add_softcursor(vc);
if (CUR_SIZE(vc->vc_cursor_type) != CUR_NONE)
- vc->vc_sw->con_cursor(vc, CM_DRAW);
+ vc->vc_sw->con_cursor(vc, true);
} else
hide_cursor(vc);
}
@@ -897,21 +888,18 @@ static void flush_scrollback(struct vc_data *vc)
WARN_CONSOLE_UNLOCKED();
set_origin(vc);
- if (vc->vc_sw->con_flush_scrollback) {
- vc->vc_sw->con_flush_scrollback(vc);
- } else if (con_is_visible(vc)) {
- /*
- * When no con_flush_scrollback method is provided then the
- * legacy way for flushing the scrollback buffer is to use
- * a side effect of the con_switch method. We do it only on
- * the foreground console as background consoles have no
- * scrollback buffers in that case and we obviously don't
- * want to switch to them.
- */
- hide_cursor(vc);
- vc->vc_sw->con_switch(vc);
- set_cursor(vc);
- }
+ if (!con_is_visible(vc))
+ return;
+
+ /*
+ * The legacy way for flushing the scrollback buffer is to use a side
+ * effect of the con_switch method. We do it only on the foreground
+ * console as background consoles have no scrollback buffers in that
+ * case and we obviously don't want to switch to them.
+ */
+ hide_cursor(vc);
+ vc->vc_sw->con_switch(vc);
+ set_cursor(vc);
}
/*
@@ -962,7 +950,7 @@ void redraw_screen(struct vc_data *vc, int is_switch)
}
if (redraw) {
- int update;
+ bool update;
int old_was_color = vc->vc_can_do_color;
set_origin(vc);
@@ -999,7 +987,7 @@ int vc_cons_allocated(unsigned int i)
return (i < MAX_NR_CONSOLES && vc_cons[i].d);
}
-static void visual_init(struct vc_data *vc, int num, int init)
+static void visual_init(struct vc_data *vc, int num, bool init)
{
/* ++Geert: vc->vc_sw->con_init determines console size */
if (vc->vc_sw)
@@ -1083,7 +1071,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
vc->port.ops = &vc_port_ops;
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
- visual_init(vc, currcons, 1);
+ visual_init(vc, currcons, true);
if (!*vc->uni_pagedict_loc)
con_set_default_unimap(vc);
@@ -1115,51 +1103,44 @@ err_free:
}
static inline int resize_screen(struct vc_data *vc, int width, int height,
- int user)
+ bool from_user)
{
/* Resizes the resolution of the display adapater */
int err = 0;
if (vc->vc_sw->con_resize)
- err = vc->vc_sw->con_resize(vc, width, height, user);
+ err = vc->vc_sw->con_resize(vc, width, height, from_user);
return err;
}
/**
- * vc_do_resize - resizing method for the tty
- * @tty: tty being resized
- * @vc: virtual console private data
- * @cols: columns
- * @lines: lines
+ * vc_do_resize - resizing method for the tty
+ * @tty: tty being resized
+ * @vc: virtual console private data
+ * @cols: columns
+ * @lines: lines
+ * @from_user: invoked by a user?
*
- * Resize a virtual console, clipping according to the actual constraints.
- * If the caller passes a tty structure then update the termios winsize
- * information and perform any necessary signal handling.
+ * Resize a virtual console, clipping according to the actual constraints. If
+ * the caller passes a tty structure then update the termios winsize
+ * information and perform any necessary signal handling.
*
- * Caller must hold the console semaphore. Takes the termios rwsem and
- * ctrl.lock of the tty IFF a tty is passed.
+ * Locking: Caller must hold the console semaphore. Takes the termios rwsem and
+ * ctrl.lock of the tty IFF a tty is passed.
*/
-
static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
- unsigned int cols, unsigned int lines)
+ unsigned int cols, unsigned int lines, bool from_user)
{
unsigned long old_origin, new_origin, new_scr_end, rlth, rrem, err = 0;
unsigned long end;
unsigned int old_rows, old_row_size, first_copied_row;
unsigned int new_cols, new_rows, new_row_size, new_screen_size;
- unsigned int user;
unsigned short *oldscreen, *newscreen;
u32 **new_uniscr = NULL;
WARN_CONSOLE_UNLOCKED();
- if (!vc)
- return -ENXIO;
-
- user = vc->vc_resize_user;
- vc->vc_resize_user = 0;
-
if (cols > VC_MAXCOL || lines > VC_MAXROW)
return -EINVAL;
@@ -1185,7 +1166,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
* to deal with possible errors from the code below, we call
* the resize_screen here as well.
*/
- return resize_screen(vc, new_cols, new_rows, user);
+ return resize_screen(vc, new_cols, new_rows, from_user);
}
if (new_screen_size > KMALLOC_MAX_SIZE || !new_screen_size)
@@ -1208,7 +1189,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
old_rows = vc->vc_rows;
old_row_size = vc->vc_size_row;
- err = resize_screen(vc, new_cols, new_rows, user);
+ err = resize_screen(vc, new_cols, new_rows, from_user);
if (err) {
kfree(newscreen);
vc_uniscr_free(new_uniscr);
@@ -1295,34 +1276,35 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
}
/**
- * vc_resize - resize a VT
- * @vc: virtual console
- * @cols: columns
- * @rows: rows
+ * __vc_resize - resize a VT
+ * @vc: virtual console
+ * @cols: columns
+ * @rows: rows
+ * @from_user: invoked by a user?
+ *
+ * Resize a virtual console as seen from the console end of things. We use the
+ * common vc_do_resize() method to update the structures.
*
- * Resize a virtual console as seen from the console end of things. We
- * use the common vc_do_resize methods to update the structures. The
- * caller must hold the console sem to protect console internals and
- * vc->port.tty
+ * Locking: The caller must hold the console sem to protect console internals
+ * and @vc->port.tty.
*/
-
-int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows)
+int __vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows,
+ bool from_user)
{
- return vc_do_resize(vc->port.tty, vc, cols, rows);
+ return vc_do_resize(vc->port.tty, vc, cols, rows, from_user);
}
-EXPORT_SYMBOL(vc_resize);
+EXPORT_SYMBOL(__vc_resize);
/**
- * vt_resize - resize a VT
- * @tty: tty to resize
- * @ws: winsize attributes
+ * vt_resize - resize a VT
+ * @tty: tty to resize
+ * @ws: winsize attributes
*
- * Resize a virtual terminal. This is called by the tty layer as we
- * register our own handler for resizing. The mutual helper does all
- * the actual work.
+ * Resize a virtual terminal. This is called by the tty layer as we register
+ * our own handler for resizing. The mutual helper does all the actual work.
*
- * Takes the console sem and the called methods then take the tty
- * termios_rwsem and the tty ctrl.lock in that order.
+ * Locking: Takes the console sem and the called methods then take the tty
+ * termios_rwsem and the tty ctrl.lock in that order.
*/
static int vt_resize(struct tty_struct *tty, struct winsize *ws)
{
@@ -1330,7 +1312,7 @@ static int vt_resize(struct tty_struct *tty, struct winsize *ws)
int ret;
console_lock();
- ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row);
+ ret = vc_do_resize(tty, vc, ws->ws_col, ws->ws_row, false);
console_unlock();
return ret;
}
@@ -1503,36 +1485,43 @@ static inline void del(struct vc_data *vc)
/* ignored */
}
-static void csi_J(struct vc_data *vc, int vpar)
+enum CSI_J {
+ CSI_J_CURSOR_TO_END = 0,
+ CSI_J_START_TO_CURSOR = 1,
+ CSI_J_VISIBLE = 2,
+ CSI_J_FULL = 3,
+};
+
+static void csi_J(struct vc_data *vc, enum CSI_J vpar)
{
+ unsigned short *start;
unsigned int count;
- unsigned short * start;
switch (vpar) {
- case 0: /* erase from cursor to end of display */
- vc_uniscr_clear_line(vc, vc->state.x,
- vc->vc_cols - vc->state.x);
- vc_uniscr_clear_lines(vc, vc->state.y + 1,
- vc->vc_rows - vc->state.y - 1);
- count = (vc->vc_scr_end - vc->vc_pos) >> 1;
- start = (unsigned short *)vc->vc_pos;
- break;
- case 1: /* erase from start to cursor */
- vc_uniscr_clear_line(vc, 0, vc->state.x + 1);
- vc_uniscr_clear_lines(vc, 0, vc->state.y);
- count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
- start = (unsigned short *)vc->vc_origin;
- break;
- case 3: /* include scrollback */
- flush_scrollback(vc);
- fallthrough;
- case 2: /* erase whole display */
- vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
- count = vc->vc_cols * vc->vc_rows;
- start = (unsigned short *)vc->vc_origin;
- break;
- default:
- return;
+ case CSI_J_CURSOR_TO_END:
+ vc_uniscr_clear_line(vc, vc->state.x,
+ vc->vc_cols - vc->state.x);
+ vc_uniscr_clear_lines(vc, vc->state.y + 1,
+ vc->vc_rows - vc->state.y - 1);
+ count = (vc->vc_scr_end - vc->vc_pos) >> 1;
+ start = (unsigned short *)vc->vc_pos;
+ break;
+ case CSI_J_START_TO_CURSOR:
+ vc_uniscr_clear_line(vc, 0, vc->state.x + 1);
+ vc_uniscr_clear_lines(vc, 0, vc->state.y);
+ count = ((vc->vc_pos - vc->vc_origin) >> 1) + 1;
+ start = (unsigned short *)vc->vc_origin;
+ break;
+ case CSI_J_FULL:
+ flush_scrollback(vc);
+ fallthrough;
+ case CSI_J_VISIBLE:
+ vc_uniscr_clear_lines(vc, 0, vc->vc_rows);
+ count = vc->vc_cols * vc->vc_rows;
+ start = (unsigned short *)vc->vc_origin;
+ break;
+ default:
+ return;
}
scr_memsetw(start, vc->vc_video_erase_char, 2 * count);
if (con_should_update(vc))
@@ -1540,27 +1529,33 @@ static void csi_J(struct vc_data *vc, int vpar)
vc->vc_need_wrap = 0;
}
-static void csi_K(struct vc_data *vc, int vpar)
+enum {
+ CSI_K_CURSOR_TO_LINEEND = 0,
+ CSI_K_LINESTART_TO_CURSOR = 1,
+ CSI_K_LINE = 2,
+};
+
+static void csi_K(struct vc_data *vc)
{
unsigned int count;
unsigned short *start = (unsigned short *)vc->vc_pos;
int offset;
- switch (vpar) {
- case 0: /* erase from cursor to end of line */
- offset = 0;
- count = vc->vc_cols - vc->state.x;
- break;
- case 1: /* erase from start of line to cursor */
- offset = -vc->state.x;
- count = vc->state.x + 1;
- break;
- case 2: /* erase whole line */
- offset = -vc->state.x;
- count = vc->vc_cols;
- break;
- default:
- return;
+ switch (vc->vc_par[0]) {
+ case CSI_K_CURSOR_TO_LINEEND:
+ offset = 0;
+ count = vc->vc_cols - vc->state.x;
+ break;
+ case CSI_K_LINESTART_TO_CURSOR:
+ offset = -vc->state.x;
+ count = vc->state.x + 1;
+ break;
+ case CSI_K_LINE:
+ offset = -vc->state.x;
+ count = vc->vc_cols;
+ break;
+ default:
+ return;
}
vc_uniscr_clear_line(vc, vc->state.x + offset, count);
scr_memsetw(start + offset, vc->vc_video_erase_char, 2 * count);
@@ -1569,20 +1564,15 @@ static void csi_K(struct vc_data *vc, int vpar)
do_update_region(vc, (unsigned long)(start + offset), count);
}
-/* erase the following vpar positions */
-static void csi_X(struct vc_data *vc, unsigned int vpar)
+/* erase the following count positions */
+static void csi_X(struct vc_data *vc)
{ /* not vt100? */
- unsigned int count;
-
- if (!vpar)
- vpar++;
-
- count = min(vpar, vc->vc_cols - vc->state.x);
+ unsigned int count = clamp(vc->vc_par[0], 1, vc->vc_cols - vc->state.x);
vc_uniscr_clear_line(vc, vc->state.x, count);
scr_memsetw((unsigned short *)vc->vc_pos, vc->vc_video_erase_char, 2 * count);
if (con_should_update(vc))
- vc->vc_sw->con_clear(vc, vc->state.y, vc->state.x, 1, count);
+ vc->vc_sw->con_clear(vc, vc->state.y, vc->state.x, count);
vc->vc_need_wrap = 0;
}
@@ -1598,7 +1588,7 @@ static void default_attr(struct vc_data *vc)
struct rgb { u8 r; u8 g; u8 b; };
-static void rgb_from_256(int i, struct rgb *c)
+static void rgb_from_256(unsigned int i, struct rgb *c)
{
if (i < 8) { /* Standard colours. */
c->r = i&1 ? 0xaa : 0x00;
@@ -1609,9 +1599,12 @@ static void rgb_from_256(int i, struct rgb *c)
c->g = i&2 ? 0xff : 0x55;
c->b = i&4 ? 0xff : 0x55;
} else if (i < 232) { /* 6x6x6 colour cube. */
- c->r = (i - 16) / 36 * 85 / 2;
- c->g = (i - 16) / 6 % 6 * 85 / 2;
- c->b = (i - 16) % 6 * 85 / 2;
+ i -= 16;
+ c->b = i % 6 * 255 / 6;
+ i /= 6;
+ c->g = i % 6 * 255 / 6;
+ i /= 6;
+ c->r = i * 255 / 6;
} else /* Grayscale ramp. */
c->r = c->g = c->b = i * 10 - 2312;
}
@@ -1681,6 +1674,39 @@ static int vc_t416_color(struct vc_data *vc, int i,
return i;
}
+enum {
+ CSI_m_DEFAULT = 0,
+ CSI_m_BOLD = 1,
+ CSI_m_HALF_BRIGHT = 2,
+ CSI_m_ITALIC = 3,
+ CSI_m_UNDERLINE = 4,
+ CSI_m_BLINK = 5,
+ CSI_m_REVERSE = 7,
+ CSI_m_PRI_FONT = 10,
+ CSI_m_ALT_FONT1 = 11,
+ CSI_m_ALT_FONT2 = 12,
+ CSI_m_DOUBLE_UNDERLINE = 21,
+ CSI_m_NORMAL_INTENSITY = 22,
+ CSI_m_NO_ITALIC = 23,
+ CSI_m_NO_UNDERLINE = 24,
+ CSI_m_NO_BLINK = 25,
+ CSI_m_NO_REVERSE = 27,
+ CSI_m_FG_COLOR_BEG = 30,
+ CSI_m_FG_COLOR_END = 37,
+ CSI_m_FG_COLOR = 38,
+ CSI_m_DEFAULT_FG_COLOR = 39,
+ CSI_m_BG_COLOR_BEG = 40,
+ CSI_m_BG_COLOR_END = 47,
+ CSI_m_BG_COLOR = 48,
+ CSI_m_DEFAULT_BG_COLOR = 49,
+ CSI_m_BRIGHT_FG_COLOR_BEG = 90,
+ CSI_m_BRIGHT_FG_COLOR_END = 97,
+ CSI_m_BRIGHT_FG_COLOR_OFF = CSI_m_BRIGHT_FG_COLOR_BEG - CSI_m_FG_COLOR_BEG,
+ CSI_m_BRIGHT_BG_COLOR_BEG = 100,
+ CSI_m_BRIGHT_BG_COLOR_END = 107,
+ CSI_m_BRIGHT_BG_COLOR_OFF = CSI_m_BRIGHT_BG_COLOR_BEG - CSI_m_BG_COLOR_BEG,
+};
+
/* console_lock is held */
static void csi_m(struct vc_data *vc)
{
@@ -1688,33 +1714,33 @@ static void csi_m(struct vc_data *vc)
for (i = 0; i <= vc->vc_npar; i++)
switch (vc->vc_par[i]) {
- case 0: /* all attributes off */
+ case CSI_m_DEFAULT: /* all attributes off */
default_attr(vc);
break;
- case 1:
+ case CSI_m_BOLD:
vc->state.intensity = VCI_BOLD;
break;
- case 2:
+ case CSI_m_HALF_BRIGHT:
vc->state.intensity = VCI_HALF_BRIGHT;
break;
- case 3:
+ case CSI_m_ITALIC:
vc->state.italic = true;
break;
- case 21:
+ case CSI_m_DOUBLE_UNDERLINE:
/*
* No console drivers support double underline, so
* convert it to a single underline.
*/
- case 4:
+ case CSI_m_UNDERLINE:
vc->state.underline = true;
break;
- case 5:
+ case CSI_m_BLINK:
vc->state.blink = true;
break;
- case 7:
+ case CSI_m_REVERSE:
vc->state.reverse = true;
break;
- case 10: /* ANSI X3.64-1979 (SCO-ish?)
+ case CSI_m_PRI_FONT: /* ANSI X3.64-1979 (SCO-ish?)
* Select primary font, don't display control chars if
* defined, don't set bit 8 on output.
*/
@@ -1722,7 +1748,7 @@ static void csi_m(struct vc_data *vc)
vc->vc_disp_ctrl = 0;
vc->vc_toggle_meta = 0;
break;
- case 11: /* ANSI X3.64-1979 (SCO-ish?)
+ case CSI_m_ALT_FONT1: /* ANSI X3.64-1979 (SCO-ish?)
* Select first alternate font, lets chars < 32 be
* displayed as ROM chars.
*/
@@ -1730,7 +1756,7 @@ static void csi_m(struct vc_data *vc)
vc->vc_disp_ctrl = 1;
vc->vc_toggle_meta = 0;
break;
- case 12: /* ANSI X3.64-1979 (SCO-ish?)
+ case CSI_m_ALT_FONT2: /* ANSI X3.64-1979 (SCO-ish?)
* Select second alternate font, toggle high bit
* before displaying as ROM char.
*/
@@ -1738,47 +1764,51 @@ static void csi_m(struct vc_data *vc)
vc->vc_disp_ctrl = 1;
vc->vc_toggle_meta = 1;
break;
- case 22:
+ case CSI_m_NORMAL_INTENSITY:
vc->state.intensity = VCI_NORMAL;
break;
- case 23:
+ case CSI_m_NO_ITALIC:
vc->state.italic = false;
break;
- case 24:
+ case CSI_m_NO_UNDERLINE:
vc->state.underline = false;
break;
- case 25:
+ case CSI_m_NO_BLINK:
vc->state.blink = false;
break;
- case 27:
+ case CSI_m_NO_REVERSE:
vc->state.reverse = false;
break;
- case 38:
+ case CSI_m_FG_COLOR:
i = vc_t416_color(vc, i, rgb_foreground);
break;
- case 48:
+ case CSI_m_BG_COLOR:
i = vc_t416_color(vc, i, rgb_background);
break;
- case 39:
+ case CSI_m_DEFAULT_FG_COLOR:
vc->state.color = (vc->vc_def_color & 0x0f) |
(vc->state.color & 0xf0);
break;
- case 49:
+ case CSI_m_DEFAULT_BG_COLOR:
vc->state.color = (vc->vc_def_color & 0xf0) |
(vc->state.color & 0x0f);
break;
- default:
- if (vc->vc_par[i] >= 90 && vc->vc_par[i] <= 107) {
- if (vc->vc_par[i] < 100)
- vc->state.intensity = VCI_BOLD;
- vc->vc_par[i] -= 60;
- }
- if (vc->vc_par[i] >= 30 && vc->vc_par[i] <= 37)
- vc->state.color = color_table[vc->vc_par[i] - 30]
- | (vc->state.color & 0xf0);
- else if (vc->vc_par[i] >= 40 && vc->vc_par[i] <= 47)
- vc->state.color = (color_table[vc->vc_par[i] - 40] << 4)
- | (vc->state.color & 0x0f);
+ case CSI_m_BRIGHT_FG_COLOR_BEG ... CSI_m_BRIGHT_FG_COLOR_END:
+ vc->state.intensity = VCI_BOLD;
+ vc->vc_par[i] -= CSI_m_BRIGHT_FG_COLOR_OFF;
+ fallthrough;
+ case CSI_m_FG_COLOR_BEG ... CSI_m_FG_COLOR_END:
+ vc->vc_par[i] -= CSI_m_FG_COLOR_BEG;
+ vc->state.color = color_table[vc->vc_par[i]] |
+ (vc->state.color & 0xf0);
+ break;
+ case CSI_m_BRIGHT_BG_COLOR_BEG ... CSI_m_BRIGHT_BG_COLOR_END:
+ vc->vc_par[i] -= CSI_m_BRIGHT_BG_COLOR_OFF;
+ fallthrough;
+ case CSI_m_BG_COLOR_BEG ... CSI_m_BG_COLOR_END:
+ vc->vc_par[i] -= CSI_m_BG_COLOR_BEG;
+ vc->state.color = (color_table[vc->vc_par[i]] << 4) |
+ (vc->state.color & 0x0f);
break;
}
update_attr(vc);
@@ -1832,133 +1862,175 @@ int mouse_reporting(void)
return vc_cons[fg_console].d->vc_report_mouse;
}
+enum {
+ CSI_DEC_hl_CURSOR_KEYS = 1, /* CKM: cursor keys send ^[Ox/^[[x */
+ CSI_DEC_hl_132_COLUMNS = 3, /* COLM: 80/132 mode switch */
+ CSI_DEC_hl_REVERSE_VIDEO = 5, /* SCNM */
+ CSI_DEC_hl_ORIGIN_MODE = 6, /* OM: origin relative/absolute */
+ CSI_DEC_hl_AUTOWRAP = 7, /* AWM */
+ CSI_DEC_hl_AUTOREPEAT = 8, /* ARM */
+ CSI_DEC_hl_MOUSE_X10 = 9,
+ CSI_DEC_hl_SHOW_CURSOR = 25, /* TCEM */
+ CSI_DEC_hl_MOUSE_VT200 = 1000,
+};
+
/* console_lock is held */
-static void set_mode(struct vc_data *vc, int on_off)
+static void csi_DEC_hl(struct vc_data *vc, bool on_off)
{
- int i;
+ unsigned int i;
for (i = 0; i <= vc->vc_npar; i++)
- if (vc->vc_priv == EPdec) {
- switch(vc->vc_par[i]) { /* DEC private modes set/reset */
- case 1: /* Cursor keys send ^[Ox/^[[x */
- if (on_off)
- set_kbd(vc, decckm);
- else
- clr_kbd(vc, decckm);
- break;
- case 3: /* 80/132 mode switch unimplemented */
+ switch (vc->vc_par[i]) {
+ case CSI_DEC_hl_CURSOR_KEYS:
+ if (on_off)
+ set_kbd(vc, decckm);
+ else
+ clr_kbd(vc, decckm);
+ break;
+ case CSI_DEC_hl_132_COLUMNS: /* unimplemented */
#if 0
- vc_resize(deccolm ? 132 : 80, vc->vc_rows);
- /* this alone does not suffice; some user mode
- utility has to change the hardware regs */
+ vc_resize(deccolm ? 132 : 80, vc->vc_rows);
+ /* this alone does not suffice; some user mode
+ utility has to change the hardware regs */
#endif
- break;
- case 5: /* Inverted screen on/off */
- if (vc->vc_decscnm != on_off) {
- vc->vc_decscnm = on_off;
- invert_screen(vc, 0,
- vc->vc_screenbuf_size,
- false);
- update_attr(vc);
- }
- break;
- case 6: /* Origin relative/absolute */
- vc->vc_decom = on_off;
- gotoxay(vc, 0, 0);
- break;
- case 7: /* Autowrap on/off */
- vc->vc_decawm = on_off;
- break;
- case 8: /* Autorepeat on/off */
- if (on_off)
- set_kbd(vc, decarm);
- else
- clr_kbd(vc, decarm);
- break;
- case 9:
- vc->vc_report_mouse = on_off ? 1 : 0;
- break;
- case 25: /* Cursor on/off */
- vc->vc_deccm = on_off;
- break;
- case 1000:
- vc->vc_report_mouse = on_off ? 2 : 0;
- break;
- }
- } else {
- switch(vc->vc_par[i]) { /* ANSI modes set/reset */
- case 3: /* Monitor (display ctrls) */
- vc->vc_disp_ctrl = on_off;
- break;
- case 4: /* Insert Mode on/off */
- vc->vc_decim = on_off;
- break;
- case 20: /* Lf, Enter == CrLf/Lf */
- if (on_off)
- set_kbd(vc, lnm);
- else
- clr_kbd(vc, lnm);
- break;
+ break;
+ case CSI_DEC_hl_REVERSE_VIDEO:
+ if (vc->vc_decscnm != on_off) {
+ vc->vc_decscnm = on_off;
+ invert_screen(vc, 0, vc->vc_screenbuf_size,
+ false);
+ update_attr(vc);
}
+ break;
+ case CSI_DEC_hl_ORIGIN_MODE:
+ vc->vc_decom = on_off;
+ gotoxay(vc, 0, 0);
+ break;
+ case CSI_DEC_hl_AUTOWRAP:
+ vc->vc_decawm = on_off;
+ break;
+ case CSI_DEC_hl_AUTOREPEAT:
+ if (on_off)
+ set_kbd(vc, decarm);
+ else
+ clr_kbd(vc, decarm);
+ break;
+ case CSI_DEC_hl_MOUSE_X10:
+ vc->vc_report_mouse = on_off ? 1 : 0;
+ break;
+ case CSI_DEC_hl_SHOW_CURSOR:
+ vc->vc_deccm = on_off;
+ break;
+ case CSI_DEC_hl_MOUSE_VT200:
+ vc->vc_report_mouse = on_off ? 2 : 0;
+ break;
}
}
+enum {
+ CSI_hl_DISPLAY_CTRL = 3, /* handle ansi control chars */
+ CSI_hl_INSERT = 4, /* IRM: insert/replace */
+ CSI_hl_AUTO_NL = 20, /* LNM: Enter == CrLf/Lf */
+};
+
/* console_lock is held */
-static void setterm_command(struct vc_data *vc)
+static void csi_hl(struct vc_data *vc, bool on_off)
+{
+ unsigned int i;
+
+ for (i = 0; i <= vc->vc_npar; i++)
+ switch (vc->vc_par[i]) { /* ANSI modes set/reset */
+ case CSI_hl_DISPLAY_CTRL:
+ vc->vc_disp_ctrl = on_off;
+ break;
+ case CSI_hl_INSERT:
+ vc->vc_decim = on_off;
+ break;
+ case CSI_hl_AUTO_NL:
+ if (on_off)
+ set_kbd(vc, lnm);
+ else
+ clr_kbd(vc, lnm);
+ break;
+ }
+}
+
+enum CSI_right_square_bracket {
+ CSI_RSB_COLOR_FOR_UNDERLINE = 1,
+ CSI_RSB_COLOR_FOR_HALF_BRIGHT = 2,
+ CSI_RSB_MAKE_CUR_COLOR_DEFAULT = 8,
+ CSI_RSB_BLANKING_INTERVAL = 9,
+ CSI_RSB_BELL_FREQUENCY = 10,
+ CSI_RSB_BELL_DURATION = 11,
+ CSI_RSB_BRING_CONSOLE_TO_FRONT = 12,
+ CSI_RSB_UNBLANK = 13,
+ CSI_RSB_VESA_OFF_INTERVAL = 14,
+ CSI_RSB_BRING_PREV_CONSOLE_TO_FRONT = 15,
+ CSI_RSB_CURSOR_BLINK_INTERVAL = 16,
+};
+
+/*
+ * csi_RSB - csi+] (Right Square Bracket) handler
+ *
+ * These are linux console private sequences.
+ *
+ * console_lock is held
+ */
+static void csi_RSB(struct vc_data *vc)
{
switch (vc->vc_par[0]) {
- case 1: /* set color for underline mode */
+ case CSI_RSB_COLOR_FOR_UNDERLINE:
if (vc->vc_can_do_color && vc->vc_par[1] < 16) {
vc->vc_ulcolor = color_table[vc->vc_par[1]];
if (vc->state.underline)
update_attr(vc);
}
break;
- case 2: /* set color for half intensity mode */
+ case CSI_RSB_COLOR_FOR_HALF_BRIGHT:
if (vc->vc_can_do_color && vc->vc_par[1] < 16) {
vc->vc_halfcolor = color_table[vc->vc_par[1]];
if (vc->state.intensity == VCI_HALF_BRIGHT)
update_attr(vc);
}
break;
- case 8: /* store colors as defaults */
+ case CSI_RSB_MAKE_CUR_COLOR_DEFAULT:
vc->vc_def_color = vc->vc_attr;
if (vc->vc_hi_font_mask == 0x100)
vc->vc_def_color >>= 1;
default_attr(vc);
update_attr(vc);
break;
- case 9: /* set blanking interval */
+ case CSI_RSB_BLANKING_INTERVAL:
blankinterval = min(vc->vc_par[1], 60U) * 60;
poke_blanked_console();
break;
- case 10: /* set bell frequency in Hz */
+ case CSI_RSB_BELL_FREQUENCY:
if (vc->vc_npar >= 1)
vc->vc_bell_pitch = vc->vc_par[1];
else
vc->vc_bell_pitch = DEFAULT_BELL_PITCH;
break;
- case 11: /* set bell duration in msec */
+ case CSI_RSB_BELL_DURATION:
if (vc->vc_npar >= 1)
vc->vc_bell_duration = (vc->vc_par[1] < 2000) ?
msecs_to_jiffies(vc->vc_par[1]) : 0;
else
vc->vc_bell_duration = DEFAULT_BELL_DURATION;
break;
- case 12: /* bring specified console to the front */
+ case CSI_RSB_BRING_CONSOLE_TO_FRONT:
if (vc->vc_par[1] >= 1 && vc_cons_allocated(vc->vc_par[1] - 1))
set_console(vc->vc_par[1] - 1);
break;
- case 13: /* unblank the screen */
+ case CSI_RSB_UNBLANK:
poke_blanked_console();
break;
- case 14: /* set vesa powerdown interval */
+ case CSI_RSB_VESA_OFF_INTERVAL:
vesa_off_interval = min(vc->vc_par[1], 60U) * 60 * HZ;
break;
- case 15: /* activate the previous console */
+ case CSI_RSB_BRING_PREV_CONSOLE_TO_FRONT:
set_console(last_console);
break;
- case 16: /* set cursor blink duration in msec */
+ case CSI_RSB_CURSOR_BLINK_INTERVAL:
if (vc->vc_npar >= 1 && vc->vc_par[1] >= 50 &&
vc->vc_par[1] <= USHRT_MAX)
vc->vc_cur_blink_ms = vc->vc_par[1];
@@ -1971,41 +2043,32 @@ static void setterm_command(struct vc_data *vc)
/* console_lock is held */
static void csi_at(struct vc_data *vc, unsigned int nr)
{
- if (nr > vc->vc_cols - vc->state.x)
- nr = vc->vc_cols - vc->state.x;
- else if (!nr)
- nr = 1;
+ nr = clamp(nr, 1, vc->vc_cols - vc->state.x);
insert_char(vc, nr);
}
/* console_lock is held */
-static void csi_L(struct vc_data *vc, unsigned int nr)
+static void csi_L(struct vc_data *vc)
{
- if (nr > vc->vc_rows - vc->state.y)
- nr = vc->vc_rows - vc->state.y;
- else if (!nr)
- nr = 1;
+ unsigned int nr = clamp(vc->vc_par[0], 1, vc->vc_rows - vc->state.y);
+
con_scroll(vc, vc->state.y, vc->vc_bottom, SM_DOWN, nr);
vc->vc_need_wrap = 0;
}
/* console_lock is held */
-static void csi_P(struct vc_data *vc, unsigned int nr)
+static void csi_P(struct vc_data *vc)
{
- if (nr > vc->vc_cols - vc->state.x)
- nr = vc->vc_cols - vc->state.x;
- else if (!nr)
- nr = 1;
+ unsigned int nr = clamp(vc->vc_par[0], 1, vc->vc_cols - vc->state.x);
+
delete_char(vc, nr);
}
/* console_lock is held */
-static void csi_M(struct vc_data *vc, unsigned int nr)
+static void csi_M(struct vc_data *vc)
{
- if (nr > vc->vc_rows - vc->state.y)
- nr = vc->vc_rows - vc->state.y;
- else if (!nr)
- nr=1;
+ unsigned int nr = clamp(vc->vc_par[0], 1, vc->vc_rows - vc->state.y);
+
con_scroll(vc, vc->state.y, vc->vc_bottom, SM_UP, nr);
vc->vc_need_wrap = 0;
}
@@ -2028,9 +2091,48 @@ static void restore_cur(struct vc_data *vc)
vc->vc_need_wrap = 0;
}
-enum { ESnormal, ESesc, ESsquare, ESgetpars, ESfunckey,
- EShash, ESsetG0, ESsetG1, ESpercent, EScsiignore, ESnonstd,
- ESpalette, ESosc, ESapc, ESpm, ESdcs };
+/**
+ * enum vc_ctl_state - control characters state of a vt
+ *
+ * @ESnormal: initial state, no control characters parsed
+ * @ESesc: ESC parsed
+ * @ESsquare: CSI parsed -- modifiers/parameters/ctrl chars expected
+ * @ESgetpars: CSI parsed -- parameters/ctrl chars expected
+ * @ESfunckey: CSI [ parsed
+ * @EShash: ESC # parsed
+ * @ESsetG0: ESC ( parsed
+ * @ESsetG1: ESC ) parsed
+ * @ESpercent: ESC % parsed
+ * @EScsiignore: CSI [0x20-0x3f] parsed
+ * @ESnonstd: OSC parsed
+ * @ESpalette: OSC P parsed
+ * @ESosc: OSC [0-9] parsed
+ * @ESANSI_first: first state for ignoring ansi control sequences
+ * @ESapc: ESC _ parsed
+ * @ESpm: ESC ^ parsed
+ * @ESdcs: ESC P parsed
+ * @ESANSI_last: last state for ignoring ansi control sequences
+ */
+enum vc_ctl_state {
+ ESnormal,
+ ESesc,
+ ESsquare,
+ ESgetpars,
+ ESfunckey,
+ EShash,
+ ESsetG0,
+ ESsetG1,
+ ESpercent,
+ EScsiignore,
+ ESnonstd,
+ ESpalette,
+ ESosc,
+ ESANSI_first = ESosc,
+ ESapc,
+ ESpm,
+ ESdcs,
+ ESANSI_last = ESdcs,
+};
/* console_lock is held (except via vc_init()) */
static void reset_terminal(struct vc_data *vc, int do_clear)
@@ -2078,10 +2180,10 @@ static void reset_terminal(struct vc_data *vc, int do_clear)
gotoxy(vc, 0, 0);
save_cur(vc);
if (do_clear)
- csi_J(vc, 2);
+ csi_J(vc, CSI_J_VISIBLE);
}
-static void vc_setGx(struct vc_data *vc, unsigned int which, int c)
+static void vc_setGx(struct vc_data *vc, unsigned int which, u8 c)
{
unsigned char *charset = &vc->state.Gx_charset[which];
@@ -2104,36 +2206,54 @@ static void vc_setGx(struct vc_data *vc, unsigned int which, int c)
vc->vc_translate = set_translate(*charset, vc);
}
-/* is this state an ANSI control string? */
-static bool ansi_control_string(unsigned int state)
+static bool ansi_control_string(enum vc_ctl_state state)
{
- if (state == ESosc || state == ESapc || state == ESpm || state == ESdcs)
- return true;
- return false;
+ return state >= ESANSI_first && state <= ESANSI_last;
}
-/* console_lock is held */
-static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
+enum {
+ ASCII_NULL = 0,
+ ASCII_BELL = 7,
+ ASCII_BACKSPACE = 8,
+ ASCII_IGNORE_FIRST = ASCII_BACKSPACE,
+ ASCII_HTAB = 9,
+ ASCII_LINEFEED = 10,
+ ASCII_VTAB = 11,
+ ASCII_FORMFEED = 12,
+ ASCII_CAR_RET = 13,
+ ASCII_IGNORE_LAST = ASCII_CAR_RET,
+ ASCII_SHIFTOUT = 14,
+ ASCII_SHIFTIN = 15,
+ ASCII_CANCEL = 24,
+ ASCII_SUBSTITUTE = 26,
+ ASCII_ESCAPE = 27,
+ ASCII_CSI_IGNORE_FIRST = ' ', /* 0x2x, 0x3a and 0x3c - 0x3f */
+ ASCII_CSI_IGNORE_LAST = '?',
+ ASCII_DEL = 127,
+ ASCII_EXT_CSI = 128 + ASCII_ESCAPE,
+};
+
+/*
+ * Handle ascii characters in control sequences and change states accordingly.
+ * E.g. ESC sets the state of vc to ESesc.
+ *
+ * Returns: true if @c handled.
+ */
+static bool handle_ascii(struct tty_struct *tty, struct vc_data *vc, u8 c)
{
- /*
- * Control characters can be used in the _middle_
- * of an escape sequence, aside from ANSI control strings.
- */
- if (ansi_control_string(vc->vc_state) && c >= 8 && c <= 13)
- return;
switch (c) {
- case 0:
- return;
- case 7:
+ case ASCII_NULL:
+ return true;
+ case ASCII_BELL:
if (ansi_control_string(vc->vc_state))
vc->vc_state = ESnormal;
else if (vc->vc_bell_duration)
kd_mksound(vc->vc_bell_pitch, vc->vc_bell_duration);
- return;
- case 8:
+ return true;
+ case ASCII_BACKSPACE:
bs(vc);
- return;
- case 9:
+ return true;
+ case ASCII_HTAB:
vc->vc_pos -= (vc->state.x << 1);
vc->state.x = find_next_bit(vc->vc_tab_stop,
@@ -2144,119 +2264,330 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
vc->vc_pos += (vc->state.x << 1);
notify_write(vc, '\t');
- return;
- case 10: case 11: case 12:
+ return true;
+ case ASCII_LINEFEED:
+ case ASCII_VTAB:
+ case ASCII_FORMFEED:
lf(vc);
if (!is_kbd(vc, lnm))
- return;
+ return true;
fallthrough;
- case 13:
+ case ASCII_CAR_RET:
cr(vc);
- return;
- case 14:
+ return true;
+ case ASCII_SHIFTOUT:
vc->state.charset = 1;
vc->vc_translate = set_translate(vc->state.Gx_charset[1], vc);
vc->vc_disp_ctrl = 1;
- return;
- case 15:
+ return true;
+ case ASCII_SHIFTIN:
vc->state.charset = 0;
vc->vc_translate = set_translate(vc->state.Gx_charset[0], vc);
vc->vc_disp_ctrl = 0;
- return;
- case 24: case 26:
+ return true;
+ case ASCII_CANCEL:
+ case ASCII_SUBSTITUTE:
vc->vc_state = ESnormal;
- return;
- case 27:
+ return true;
+ case ASCII_ESCAPE:
vc->vc_state = ESesc;
- return;
- case 127:
+ return true;
+ case ASCII_DEL:
del(vc);
- return;
- case 128+27:
+ return true;
+ case ASCII_EXT_CSI:
vc->vc_state = ESsquare;
- return;
+ return true;
}
- switch(vc->vc_state) {
- case ESesc:
- vc->vc_state = ESnormal;
- switch (c) {
- case '[':
- vc->vc_state = ESsquare;
- return;
- case ']':
- vc->vc_state = ESnonstd;
- return;
- case '_':
- vc->vc_state = ESapc;
- return;
- case '^':
- vc->vc_state = ESpm;
- return;
- case '%':
- vc->vc_state = ESpercent;
- return;
- case 'E':
- cr(vc);
- lf(vc);
- return;
- case 'M':
- ri(vc);
- return;
- case 'D':
- lf(vc);
- return;
- case 'H':
- if (vc->state.x < VC_TABSTOPS_COUNT)
- set_bit(vc->state.x, vc->vc_tab_stop);
- return;
- case 'P':
- vc->vc_state = ESdcs;
- return;
- case 'Z':
+
+ return false;
+}
+
+/*
+ * Handle a character (@c) following an ESC (when @vc is in the ESesc state).
+ * E.g. previous ESC with @c == '[' here yields the ESsquare state (that is:
+ * CSI).
+ */
+static void handle_esc(struct tty_struct *tty, struct vc_data *vc, u8 c)
+{
+ vc->vc_state = ESnormal;
+ switch (c) {
+ case '[':
+ vc->vc_state = ESsquare;
+ break;
+ case ']':
+ vc->vc_state = ESnonstd;
+ break;
+ case '_':
+ vc->vc_state = ESapc;
+ break;
+ case '^':
+ vc->vc_state = ESpm;
+ break;
+ case '%':
+ vc->vc_state = ESpercent;
+ break;
+ case 'E':
+ cr(vc);
+ lf(vc);
+ break;
+ case 'M':
+ ri(vc);
+ break;
+ case 'D':
+ lf(vc);
+ break;
+ case 'H':
+ if (vc->state.x < VC_TABSTOPS_COUNT)
+ set_bit(vc->state.x, vc->vc_tab_stop);
+ break;
+ case 'P':
+ vc->vc_state = ESdcs;
+ break;
+ case 'Z':
+ respond_ID(tty);
+ break;
+ case '7':
+ save_cur(vc);
+ break;
+ case '8':
+ restore_cur(vc);
+ break;
+ case '(':
+ vc->vc_state = ESsetG0;
+ break;
+ case ')':
+ vc->vc_state = ESsetG1;
+ break;
+ case '#':
+ vc->vc_state = EShash;
+ break;
+ case 'c':
+ reset_terminal(vc, 1);
+ break;
+ case '>': /* Numeric keypad */
+ clr_kbd(vc, kbdapplic);
+ break;
+ case '=': /* Appl. keypad */
+ set_kbd(vc, kbdapplic);
+ break;
+ }
+}
+
+/*
+ * Handle special DEC control sequences ("ESC [ ? parameters char"). Parameters
+ * are in @vc->vc_par and the char is in @c here.
+ */
+static void csi_DEC(struct tty_struct *tty, struct vc_data *vc, u8 c)
+{
+ switch (c) {
+ case 'h':
+ csi_DEC_hl(vc, true);
+ break;
+ case 'l':
+ csi_DEC_hl(vc, false);
+ break;
+ case 'c':
+ if (vc->vc_par[0])
+ vc->vc_cursor_type = CUR_MAKE(vc->vc_par[0],
+ vc->vc_par[1],
+ vc->vc_par[2]);
+ else
+ vc->vc_cursor_type = cur_default;
+ break;
+ case 'm':
+ clear_selection();
+ if (vc->vc_par[0])
+ vc->vc_complement_mask = vc->vc_par[0] << 8 | vc->vc_par[1];
+ else
+ vc->vc_complement_mask = vc->vc_s_complement_mask;
+ break;
+ case 'n':
+ if (vc->vc_par[0] == 5)
+ status_report(tty);
+ else if (vc->vc_par[0] == 6)
+ cursor_report(vc, tty);
+ break;
+ }
+}
+
+/*
+ * Handle Control Sequence Introducer control characters. That is
+ * "ESC [ parameters char". Parameters are in @vc->vc_par and the char is in
+ * @c here.
+ */
+static void csi_ECMA(struct tty_struct *tty, struct vc_data *vc, u8 c)
+{
+ switch (c) {
+ case 'G':
+ case '`':
+ if (vc->vc_par[0])
+ vc->vc_par[0]--;
+ gotoxy(vc, vc->vc_par[0], vc->state.y);
+ break;
+ case 'A':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, vc->state.x, vc->state.y - vc->vc_par[0]);
+ break;
+ case 'B':
+ case 'e':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, vc->state.x, vc->state.y + vc->vc_par[0]);
+ break;
+ case 'C':
+ case 'a':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, vc->state.x + vc->vc_par[0], vc->state.y);
+ break;
+ case 'D':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, vc->state.x - vc->vc_par[0], vc->state.y);
+ break;
+ case 'E':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, 0, vc->state.y + vc->vc_par[0]);
+ break;
+ case 'F':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ gotoxy(vc, 0, vc->state.y - vc->vc_par[0]);
+ break;
+ case 'd':
+ if (vc->vc_par[0])
+ vc->vc_par[0]--;
+ gotoxay(vc, vc->state.x ,vc->vc_par[0]);
+ break;
+ case 'H':
+ case 'f':
+ if (vc->vc_par[0])
+ vc->vc_par[0]--;
+ if (vc->vc_par[1])
+ vc->vc_par[1]--;
+ gotoxay(vc, vc->vc_par[1], vc->vc_par[0]);
+ break;
+ case 'J':
+ csi_J(vc, vc->vc_par[0]);
+ break;
+ case 'K':
+ csi_K(vc);
+ break;
+ case 'L':
+ csi_L(vc);
+ break;
+ case 'M':
+ csi_M(vc);
+ break;
+ case 'P':
+ csi_P(vc);
+ break;
+ case 'c':
+ if (!vc->vc_par[0])
respond_ID(tty);
- return;
- case '7':
- save_cur(vc);
- return;
- case '8':
- restore_cur(vc);
- return;
- case '(':
- vc->vc_state = ESsetG0;
- return;
- case ')':
- vc->vc_state = ESsetG1;
- return;
- case '#':
- vc->vc_state = EShash;
- return;
- case 'c':
- reset_terminal(vc, 1);
- return;
- case '>': /* Numeric keypad */
- clr_kbd(vc, kbdapplic);
- return;
- case '=': /* Appl. keypad */
- set_kbd(vc, kbdapplic);
- return;
+ break;
+ case 'g':
+ if (!vc->vc_par[0] && vc->state.x < VC_TABSTOPS_COUNT)
+ set_bit(vc->state.x, vc->vc_tab_stop);
+ else if (vc->vc_par[0] == 3)
+ bitmap_zero(vc->vc_tab_stop, VC_TABSTOPS_COUNT);
+ break;
+ case 'h':
+ csi_hl(vc, true);
+ break;
+ case 'l':
+ csi_hl(vc, false);
+ break;
+ case 'm':
+ csi_m(vc);
+ break;
+ case 'n':
+ if (vc->vc_par[0] == 5)
+ status_report(tty);
+ else if (vc->vc_par[0] == 6)
+ cursor_report(vc, tty);
+ break;
+ case 'q': /* DECLL - but only 3 leds */
+ /* map 0,1,2,3 to 0,1,2,4 */
+ if (vc->vc_par[0] < 4)
+ vt_set_led_state(vc->vc_num,
+ (vc->vc_par[0] < 3) ? vc->vc_par[0] : 4);
+ break;
+ case 'r':
+ if (!vc->vc_par[0])
+ vc->vc_par[0]++;
+ if (!vc->vc_par[1])
+ vc->vc_par[1] = vc->vc_rows;
+ /* Minimum allowed region is 2 lines */
+ if (vc->vc_par[0] < vc->vc_par[1] &&
+ vc->vc_par[1] <= vc->vc_rows) {
+ vc->vc_top = vc->vc_par[0] - 1;
+ vc->vc_bottom = vc->vc_par[1];
+ gotoxay(vc, 0, 0);
}
+ break;
+ case 's':
+ save_cur(vc);
+ break;
+ case 'u':
+ restore_cur(vc);
+ break;
+ case 'X':
+ csi_X(vc);
+ break;
+ case '@':
+ csi_at(vc, vc->vc_par[0]);
+ break;
+ case ']':
+ csi_RSB(vc);
+ break;
+ }
+
+}
+
+static void vc_reset_params(struct vc_data *vc)
+{
+ memset(vc->vc_par, 0, sizeof(vc->vc_par));
+ vc->vc_npar = 0;
+}
+
+/* console_lock is held */
+static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, u8 c)
+{
+ /*
+ * Control characters can be used in the _middle_
+ * of an escape sequence, aside from ANSI control strings.
+ */
+ if (ansi_control_string(vc->vc_state) && c >= ASCII_IGNORE_FIRST &&
+ c <= ASCII_IGNORE_LAST)
+ return;
+
+ if (handle_ascii(tty, vc, c))
+ return;
+
+ switch(vc->vc_state) {
+ case ESesc: /* ESC */
+ handle_esc(tty, vc, c);
return;
- case ESnonstd:
- if (c=='P') { /* palette escape sequence */
- for (vc->vc_npar = 0; vc->vc_npar < NPAR; vc->vc_npar++)
- vc->vc_par[vc->vc_npar] = 0;
- vc->vc_npar = 0;
+ case ESnonstd: /* ESC ] aka OSC */
+ switch (c) {
+ case 'P': /* palette escape sequence */
+ vc_reset_params(vc);
vc->vc_state = ESpalette;
return;
- } else if (c=='R') { /* reset palette */
+ case 'R': /* reset palette */
reset_palette(vc);
- vc->vc_state = ESnormal;
- } else if (c>='0' && c<='9')
+ break;
+ case '0' ... '9':
vc->vc_state = ESosc;
- else
- vc->vc_state = ESnormal;
+ return;
+ }
+ vc->vc_state = ESnormal;
return;
- case ESpalette:
+ case ESpalette: /* ESC ] P aka OSC P */
if (isxdigit(c)) {
vc->vc_par[vc->vc_npar++] = hex_to_bin(c);
if (vc->vc_npar == 7) {
@@ -2273,16 +2604,14 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
} else
vc->vc_state = ESnormal;
return;
- case ESsquare:
- for (vc->vc_npar = 0; vc->vc_npar < NPAR; vc->vc_npar++)
- vc->vc_par[vc->vc_npar] = 0;
- vc->vc_npar = 0;
+ case ESsquare: /* ESC [ aka CSI, parameters or modifiers expected */
+ vc_reset_params(vc);
+
vc->vc_state = ESgetpars;
- if (c == '[') { /* Function key */
- vc->vc_state=ESfunckey;
- return;
- }
switch (c) {
+ case '[': /* Function key */
+ vc->vc_state = ESfunckey;
+ return;
case '?':
vc->vc_priv = EPdec;
return;
@@ -2298,182 +2627,44 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
}
vc->vc_priv = EPecma;
fallthrough;
- case ESgetpars:
- if (c == ';' && vc->vc_npar < NPAR - 1) {
- vc->vc_npar++;
- return;
- } else if (c>='0' && c<='9') {
+ case ESgetpars: /* ESC [ aka CSI, parameters expected */
+ switch (c) {
+ case ';':
+ if (vc->vc_npar < NPAR - 1) {
+ vc->vc_npar++;
+ return;
+ }
+ break;
+ case '0' ... '9':
vc->vc_par[vc->vc_npar] *= 10;
vc->vc_par[vc->vc_npar] += c - '0';
return;
}
- if (c >= 0x20 && c <= 0x3f) { /* 0x2x, 0x3a and 0x3c - 0x3f */
+ if (c >= ASCII_CSI_IGNORE_FIRST && c <= ASCII_CSI_IGNORE_LAST) {
vc->vc_state = EScsiignore;
return;
}
+
+ /* parameters done, handle the control char @c */
+
vc->vc_state = ESnormal;
- switch(c) {
- case 'h':
- if (vc->vc_priv <= EPdec)
- set_mode(vc, 1);
- return;
- case 'l':
- if (vc->vc_priv <= EPdec)
- set_mode(vc, 0);
- return;
- case 'c':
- if (vc->vc_priv == EPdec) {
- if (vc->vc_par[0])
- vc->vc_cursor_type =
- CUR_MAKE(vc->vc_par[0],
- vc->vc_par[1],
- vc->vc_par[2]);
- else
- vc->vc_cursor_type = cur_default;
- return;
- }
- break;
- case 'm':
- if (vc->vc_priv == EPdec) {
- clear_selection();
- if (vc->vc_par[0])
- vc->vc_complement_mask = vc->vc_par[0] << 8 | vc->vc_par[1];
- else
- vc->vc_complement_mask = vc->vc_s_complement_mask;
- return;
- }
- break;
- case 'n':
- if (vc->vc_priv == EPecma) {
- if (vc->vc_par[0] == 5)
- status_report(tty);
- else if (vc->vc_par[0] == 6)
- cursor_report(vc, tty);
- }
- return;
- }
- if (vc->vc_priv != EPecma) {
- vc->vc_priv = EPecma;
- return;
- }
- switch(c) {
- case 'G': case '`':
- if (vc->vc_par[0])
- vc->vc_par[0]--;
- gotoxy(vc, vc->vc_par[0], vc->state.y);
- return;
- case 'A':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- gotoxy(vc, vc->state.x, vc->state.y - vc->vc_par[0]);
- return;
- case 'B': case 'e':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- gotoxy(vc, vc->state.x, vc->state.y + vc->vc_par[0]);
- return;
- case 'C': case 'a':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- gotoxy(vc, vc->state.x + vc->vc_par[0], vc->state.y);
- return;
- case 'D':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- gotoxy(vc, vc->state.x - vc->vc_par[0], vc->state.y);
- return;
- case 'E':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- gotoxy(vc, 0, vc->state.y + vc->vc_par[0]);
- return;
- case 'F':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- gotoxy(vc, 0, vc->state.y - vc->vc_par[0]);
- return;
- case 'd':
- if (vc->vc_par[0])
- vc->vc_par[0]--;
- gotoxay(vc, vc->state.x ,vc->vc_par[0]);
- return;
- case 'H': case 'f':
- if (vc->vc_par[0])
- vc->vc_par[0]--;
- if (vc->vc_par[1])
- vc->vc_par[1]--;
- gotoxay(vc, vc->vc_par[1], vc->vc_par[0]);
- return;
- case 'J':
- csi_J(vc, vc->vc_par[0]);
- return;
- case 'K':
- csi_K(vc, vc->vc_par[0]);
- return;
- case 'L':
- csi_L(vc, vc->vc_par[0]);
- return;
- case 'M':
- csi_M(vc, vc->vc_par[0]);
- return;
- case 'P':
- csi_P(vc, vc->vc_par[0]);
- return;
- case 'c':
- if (!vc->vc_par[0])
- respond_ID(tty);
- return;
- case 'g':
- if (!vc->vc_par[0] && vc->state.x < VC_TABSTOPS_COUNT)
- set_bit(vc->state.x, vc->vc_tab_stop);
- else if (vc->vc_par[0] == 3)
- bitmap_zero(vc->vc_tab_stop, VC_TABSTOPS_COUNT);
- return;
- case 'm':
- csi_m(vc);
- return;
- case 'q': /* DECLL - but only 3 leds */
- /* map 0,1,2,3 to 0,1,2,4 */
- if (vc->vc_par[0] < 4)
- vt_set_led_state(vc->vc_num,
- (vc->vc_par[0] < 3) ? vc->vc_par[0] : 4);
- return;
- case 'r':
- if (!vc->vc_par[0])
- vc->vc_par[0]++;
- if (!vc->vc_par[1])
- vc->vc_par[1] = vc->vc_rows;
- /* Minimum allowed region is 2 lines */
- if (vc->vc_par[0] < vc->vc_par[1] &&
- vc->vc_par[1] <= vc->vc_rows) {
- vc->vc_top = vc->vc_par[0] - 1;
- vc->vc_bottom = vc->vc_par[1];
- gotoxay(vc, 0, 0);
- }
- return;
- case 's':
- save_cur(vc);
- return;
- case 'u':
- restore_cur(vc);
- return;
- case 'X':
- csi_X(vc, vc->vc_par[0]);
+
+ switch (vc->vc_priv) {
+ case EPdec:
+ csi_DEC(tty, vc, c);
return;
- case '@':
- csi_at(vc, vc->vc_par[0]);
+ case EPecma:
+ csi_ECMA(tty, vc, c);
return;
- case ']': /* setterm functions */
- setterm_command(vc);
+ default:
return;
}
- return;
case EScsiignore:
- if (c >= 20 && c <= 0x3f)
+ if (c >= ASCII_CSI_IGNORE_FIRST && c <= ASCII_CSI_IGNORE_LAST)
return;
vc->vc_state = ESnormal;
return;
- case ESpercent:
+ case ESpercent: /* ESC % */
vc->vc_state = ESnormal;
switch (c) {
case '@': /* defined in ISO 2022 */
@@ -2485,36 +2676,36 @@ static void do_con_trol(struct tty_struct *tty, struct vc_data *vc, int c)
return;
}
return;
- case ESfunckey:
+ case ESfunckey: /* ESC [ [ aka CSI [ */
vc->vc_state = ESnormal;
return;
- case EShash:
+ case EShash: /* ESC # */
vc->vc_state = ESnormal;
if (c == '8') {
/* DEC screen alignment test. kludge :-) */
vc->vc_video_erase_char =
(vc->vc_video_erase_char & 0xff00) | 'E';
- csi_J(vc, 2);
+ csi_J(vc, CSI_J_VISIBLE);
vc->vc_video_erase_char =
(vc->vc_video_erase_char & 0xff00) | ' ';
do_update_region(vc, vc->vc_origin, vc->vc_screenbuf_size / 2);
}
return;
- case ESsetG0:
+ case ESsetG0: /* ESC ( */
vc_setGx(vc, 0, c);
vc->vc_state = ESnormal;
return;
- case ESsetG1:
+ case ESsetG1: /* ESC ) */
vc_setGx(vc, 1, c);
vc->vc_state = ESnormal;
return;
- case ESapc:
+ case ESapc: /* ESC _ */
return;
- case ESosc:
+ case ESosc: /* ESC ] [0-9] aka OSC [0-9] */
return;
- case ESpm:
+ case ESpm: /* ESC ^ */
return;
- case ESdcs:
+ case ESdcs: /* ESC P */
return;
default:
vc->vc_state = ESnormal;
@@ -2588,33 +2779,39 @@ static inline int vc_translate_ascii(const struct vc_data *vc, int c)
/**
- * vc_sanitize_unicode - Replace invalid Unicode code points with U+FFFD
- * @c: the received character, or U+FFFD for invalid sequences.
+ * vc_sanitize_unicode - Replace invalid Unicode code points with ``U+FFFD``
+ * @c: the received code point
*/
static inline int vc_sanitize_unicode(const int c)
{
- if ((c >= 0xd800 && c <= 0xdfff) || c == 0xfffe || c == 0xffff)
+ if (c >= 0xd800 && c <= 0xdfff)
return 0xfffd;
return c;
}
/**
- * vc_translate_unicode - Combine UTF-8 into Unicode in @vc_utf_char
+ * vc_translate_unicode - Combine UTF-8 into Unicode in &vc_data.vc_utf_char
* @vc: virtual console
- * @c: character to translate
- * @rescan: we return true if we need more (continuation) data
+ * @c: UTF-8 byte to translate
+ * @rescan: set to true iff @c wasn't consumed here and needs to be re-processed
+ *
+ * * &vc_data.vc_utf_char is the being-constructed Unicode code point.
+ * * &vc_data.vc_utf_count is the number of continuation bytes still expected to
+ * arrive.
+ * * &vc_data.vc_npar is the number of continuation bytes arrived so far.
*
- * @vc_utf_char is the being-constructed unicode character.
- * @vc_utf_count is the number of continuation bytes still expected to arrive.
- * @vc_npar is the number of continuation bytes arrived so far.
+ * Return:
+ * * %-1 - Input OK so far, @c consumed, further bytes expected.
+ * * %0xFFFD - Possibility 1: input invalid, @c may have been consumed (see
+ * desc. of @rescan). Possibility 2: input OK, @c consumed,
+ * ``U+FFFD`` is the resulting code point. ``U+FFFD`` is valid,
+ * ``REPLACEMENT CHARACTER``.
+ * * otherwise - Input OK, @c consumed, resulting code point returned.
*/
static int vc_translate_unicode(struct vc_data *vc, int c, bool *rescan)
{
- static const u32 utf8_length_changes[] = {
- 0x0000007f, 0x000007ff, 0x0000ffff,
- 0x001fffff, 0x03ffffff, 0x7fffffff
- };
+ static const u32 utf8_length_changes[] = {0x7f, 0x7ff, 0xffff, 0x10ffff};
/* Continuation byte received */
if ((c & 0xc0) == 0x80) {
@@ -2660,14 +2857,7 @@ static int vc_translate_unicode(struct vc_data *vc, int c, bool *rescan)
} else if ((c & 0xf8) == 0xf0) {
vc->vc_utf_count = 3;
vc->vc_utf_char = (c & 0x07);
- } else if ((c & 0xfc) == 0xf8) {
- vc->vc_utf_count = 4;
- vc->vc_utf_char = (c & 0x03);
- } else if ((c & 0xfe) == 0xfc) {
- vc->vc_utf_count = 5;
- vc->vc_utf_char = (c & 0x01);
} else {
- /* 254 and 255 are invalid */
return 0xfffd;
}
@@ -2711,9 +2901,13 @@ static bool vc_is_control(struct vc_data *vc, int tc, int c)
* as cursor movement) and should not be displayed as a glyph unless
* the disp_ctrl mode is explicitly enabled.
*/
- static const u32 CTRL_ACTION = 0x0d00ff81;
+ static const u32 CTRL_ACTION = BIT(ASCII_NULL) |
+ GENMASK(ASCII_SHIFTIN, ASCII_BELL) | BIT(ASCII_CANCEL) |
+ BIT(ASCII_SUBSTITUTE) | BIT(ASCII_ESCAPE);
/* Cannot be overridden by disp_ctrl */
- static const u32 CTRL_ALWAYS = 0x0800f501;
+ static const u32 CTRL_ALWAYS = BIT(ASCII_NULL) | BIT(ASCII_BACKSPACE) |
+ BIT(ASCII_LINEFEED) | BIT(ASCII_SHIFTIN) | BIT(ASCII_SHIFTOUT) |
+ BIT(ASCII_CAR_RET) | BIT(ASCII_FORMFEED) | BIT(ASCII_ESCAPE);
if (vc->vc_state != ESnormal)
return true;
@@ -2730,17 +2924,17 @@ static bool vc_is_control(struct vc_data *vc, int tc, int c)
* useless without them; to display an arbitrary font position use the
* direct-to-font zone in UTF-8 mode.
*/
- if (c < 32) {
+ if (c < BITS_PER_TYPE(CTRL_ALWAYS)) {
if (vc->vc_disp_ctrl)
return CTRL_ALWAYS & BIT(c);
else
return vc->vc_utf || (CTRL_ACTION & BIT(c));
}
- if (c == 127 && !vc->vc_disp_ctrl)
+ if (c == ASCII_DEL && !vc->vc_disp_ctrl)
return true;
- if (c == 128 + 27)
+ if (c == ASCII_EXT_CSI)
return true;
return false;
@@ -2852,7 +3046,7 @@ static int do_con_write(struct tty_struct *tty, const u8 *buf, int count)
};
int c, tc, n = 0;
unsigned int currcons;
- struct vc_data *vc;
+ struct vc_data *vc = tty->driver_data;
struct vt_notifier_param param;
bool rescan;
@@ -2860,13 +3054,6 @@ static int do_con_write(struct tty_struct *tty, const u8 *buf, int count)
return count;
console_lock();
- vc = tty->driver_data;
- if (vc == NULL) {
- pr_err("vt: argh, driver_data is NULL !\n");
- console_unlock();
- return 0;
- }
-
currcons = vc->vc_num;
if (!vc_cons_allocated(currcons)) {
/* could this happen? */
@@ -2883,7 +3070,7 @@ static int do_con_write(struct tty_struct *tty, const u8 *buf, int count)
param.vc = vc;
while (!tty->flow.stopped && count) {
- int orig = *buf;
+ u8 orig = *buf;
buf++;
n++;
count--;
@@ -2992,16 +3179,16 @@ struct tty_driver *console_driver;
#ifdef CONFIG_VT_CONSOLE
/**
- * vt_kmsg_redirect() - Sets/gets the kernel message console
- * @new: The new virtual terminal number or -1 if the console should stay
- * unchanged
+ * vt_kmsg_redirect() - sets/gets the kernel message console
+ * @new: the new virtual terminal number or -1 if the console should stay
+ * unchanged
*
* By default, the kernel messages are always printed on the current virtual
* console. However, the user may modify that default with the
- * TIOCL_SETKMSGREDIRECT ioctl call.
+ * %TIOCL_SETKMSGREDIRECT ioctl call.
*
* This function sets the kernel message console to be @new. It returns the old
- * virtual console number. The virtual terminal number 0 (both as parameter and
+ * virtual console number. The virtual terminal number %0 (both as parameter and
* return value) means no redirection (i.e. always printed on the currently
* active console).
*
@@ -3009,8 +3196,8 @@ struct tty_driver *console_driver;
* value is not modified. You may use the macro vt_get_kmsg_redirect() in that
* case to make the code more understandable.
*
- * When the kernel is compiled without CONFIG_VT_CONSOLE, this function ignores
- * the parameter and always returns 0.
+ * When the kernel is compiled without %CONFIG_VT_CONSOLE, this function ignores
+ * the parameter and always returns %0.
*/
int vt_kmsg_redirect(int new)
{
@@ -3065,22 +3252,23 @@ static void vt_console_print(struct console *co, const char *b, unsigned count)
cnt = 0;
while (count--) {
c = *b++;
- if (c == 10 || c == 13 || c == 8 || vc->vc_need_wrap) {
+ if (c == ASCII_LINEFEED || c == ASCII_CAR_RET ||
+ c == ASCII_BACKSPACE || vc->vc_need_wrap) {
if (cnt && con_is_visible(vc))
vc->vc_sw->con_putcs(vc, start, cnt, vc->state.y, start_x);
cnt = 0;
- if (c == 8) { /* backspace */
+ if (c == ASCII_BACKSPACE) {
bs(vc);
start = (ushort *)vc->vc_pos;
start_x = vc->state.x;
continue;
}
- if (c != 13)
+ if (c != ASCII_CAR_RET)
lf(vc);
cr(vc);
start = (ushort *)vc->vc_pos;
start_x = vc->state.x;
- if (c == 10 || c == 13)
+ if (c == ASCII_LINEFEED || c == ASCII_CAR_RET)
continue;
}
vc_uniscr_putc(vc, c);
@@ -3144,6 +3332,8 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
{
char type, data;
char __user *p = (char __user *)arg;
+ void __user *param_aligned32 = (u32 __user *)arg + 1;
+ void __user *param = (void __user *)arg + 1;
int lines;
int ret;
@@ -3157,8 +3347,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
case TIOCL_SETSEL:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- return set_selection_user((struct tiocl_selection
- __user *)(p+1), tty);
+ return set_selection_user(param, tty);
case TIOCL_PASTESEL:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -3171,10 +3360,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
case TIOCL_SELLOADLUT:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- console_lock();
- ret = sel_loadlut(p);
- console_unlock();
- break;
+ return sel_loadlut(param_aligned32);
case TIOCL_GETSHIFTSTATE:
/*
* Make it possible to react to Shift+Mousebutton. Note that
@@ -3190,10 +3376,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
console_unlock();
return put_user(data, p);
case TIOCL_SETVESABLANK:
- console_lock();
- ret = set_vesa_blanking(p);
- console_unlock();
- break;
+ return set_vesa_blanking(param);
case TIOCL_GETKMSGREDIRECT:
data = vt_get_kmsg_redirect();
return put_user(data, p);
@@ -3214,7 +3397,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
*/
return fg_console;
case TIOCL_SCROLLCONSOLE:
- if (get_user(lines, (s32 __user *)(p+4)))
+ if (get_user(lines, (s32 __user *)param_aligned32))
return -EFAULT;
/*
@@ -3312,16 +3495,13 @@ static void con_start(struct tty_struct *tty)
static void con_flush_chars(struct tty_struct *tty)
{
- struct vc_data *vc;
+ struct vc_data *vc = tty->driver_data;
if (in_interrupt()) /* from flush_to_ldisc */
return;
- /* if we race with con_close(), vt may be null */
console_lock();
- vc = tty->driver_data;
- if (vc)
- set_cursor(vc);
+ set_cursor(vc);
console_unlock();
}
@@ -3471,7 +3651,7 @@ static int __init con_init(void)
vc_cons[currcons].d = vc = kzalloc(sizeof(struct vc_data), GFP_NOWAIT);
INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK);
tty_port_init(&vc->port);
- visual_init(vc, currcons, 1);
+ visual_init(vc, currcons, true);
/* Assuming vc->vc_{cols,rows,screenbuf_size} are sane here. */
vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_NOWAIT);
vc_init(vc, currcons || !vc->vc_sw->con_save_screen);
@@ -3481,7 +3661,7 @@ static int __init con_init(void)
set_origin(vc);
save_screen(vc);
gotoxy(vc, vc->state.x, vc->state.y);
- csi_J(vc, 0);
+ csi_J(vc, CSI_J_CURSOR_TO_END);
update_screen(vc);
pr_info("Console: %s %s %dx%d\n",
vc->vc_can_do_color ? "colour" : "mono",
@@ -3640,7 +3820,7 @@ static int do_bind_con_driver(const struct consw *csw, int first, int last,
old_was_color = vc->vc_can_do_color;
vc->vc_sw->con_deinit(vc);
vc->vc_origin = (unsigned long)vc->vc_screenbuf;
- visual_init(vc, i, 0);
+ visual_init(vc, i, false);
set_origin(vc);
update_attr(vc);
@@ -3930,7 +4110,7 @@ static void vtconsole_deinit_device(struct con_driver *con)
* RETURNS: zero if unbound, nonzero if bound
*
* Drivers can call this and if zero, they should release
- * all resources allocated on con_startup()
+ * all resources allocated on &consw.con_startup()
*/
int con_is_bound(const struct consw *csw)
{
@@ -3970,15 +4150,9 @@ EXPORT_SYMBOL(con_is_visible);
* Called when the console is taken over by the kernel debugger, this
* function needs to save the current console state, then put the console
* into a state suitable for the kernel debugger.
- *
- * RETURNS:
- * Zero on success, nonzero if a failure occurred when trying to prepare
- * the console for the debugger.
*/
-int con_debug_enter(struct vc_data *vc)
+void con_debug_enter(struct vc_data *vc)
{
- int ret = 0;
-
saved_fg_console = fg_console;
saved_last_console = last_console;
saved_want_console = want_console;
@@ -3987,7 +4161,7 @@ int con_debug_enter(struct vc_data *vc)
vc->vc_mode = KD_TEXT;
console_blanked = 0;
if (vc->vc_sw->con_debug_enter)
- ret = vc->vc_sw->con_debug_enter(vc);
+ vc->vc_sw->con_debug_enter(vc);
#ifdef CONFIG_KGDB_KDB
/* Set the initial LINES variable if it is not already set */
if (vc->vc_rows < 999) {
@@ -4017,7 +4191,6 @@ int con_debug_enter(struct vc_data *vc)
}
}
#endif /* CONFIG_KGDB_KDB */
- return ret;
}
EXPORT_SYMBOL_GPL(con_debug_enter);
@@ -4026,15 +4199,10 @@ EXPORT_SYMBOL_GPL(con_debug_enter);
*
* Restore the console state to what it was before the kernel debugger
* was invoked.
- *
- * RETURNS:
- * Zero on success, nonzero if a failure occurred when trying to restore
- * the console.
*/
-int con_debug_leave(void)
+void con_debug_leave(void)
{
struct vc_data *vc;
- int ret = 0;
fg_console = saved_fg_console;
last_console = saved_last_console;
@@ -4044,8 +4212,7 @@ int con_debug_leave(void)
vc = vc_cons[fg_console].d;
if (vc->vc_sw->con_debug_leave)
- ret = vc->vc_sw->con_debug_leave(vc);
- return ret;
+ vc->vc_sw->con_debug_leave(vc);
}
EXPORT_SYMBOL_GPL(con_debug_leave);
@@ -4275,14 +4442,17 @@ postcore_initcall(vtconsole_class_init);
* Screen blanking
*/
-static int set_vesa_blanking(char __user *p)
+static int set_vesa_blanking(u8 __user *mode_user)
{
- unsigned int mode;
+ u8 mode;
- if (get_user(mode, p + 1))
+ if (get_user(mode, mode_user))
return -EFAULT;
- vesa_blank_mode = (mode < 4) ? mode : 0;
+ console_lock();
+ vesa_blank_mode = (mode <= VESA_BLANK_MAX) ? mode : VESA_NO_BLANKING;
+ console_unlock();
+
return 0;
}
@@ -4307,7 +4477,7 @@ void do_blank_screen(int entering_gfx)
if (entering_gfx) {
hide_cursor(vc);
save_screen(vc);
- vc->vc_sw->con_blank(vc, -1, 1);
+ vc->vc_sw->con_blank(vc, VESA_VSYNC_SUSPEND, 1);
console_blanked = fg_console + 1;
blank_state = blank_off;
set_origin(vc);
@@ -4328,7 +4498,8 @@ void do_blank_screen(int entering_gfx)
save_screen(vc);
/* In case we need to reset origin, blanking hook returns 1 */
- i = vc->vc_sw->con_blank(vc, vesa_off_interval ? 1 : (vesa_blank_mode + 1), 0);
+ i = vc->vc_sw->con_blank(vc, vesa_off_interval ? VESA_VSYNC_SUSPEND :
+ (vesa_blank_mode + 1), 0);
console_blanked = fg_console + 1;
if (i)
set_origin(vc);
@@ -4379,7 +4550,7 @@ void do_unblank_screen(int leaving_gfx)
}
console_blanked = 0;
- if (vc->vc_sw->con_blank(vc, 0, leaving_gfx))
+ if (vc->vc_sw->con_blank(vc, VESA_NO_BLANKING, leaving_gfx))
/* Low-level driver cannot restore -> do it ourselves */
update_screen(vc);
if (console_blank_hook)
@@ -4584,7 +4755,7 @@ out:
return rc;
}
-static int con_font_set(struct vc_data *vc, struct console_font_op *op)
+static int con_font_set(struct vc_data *vc, const struct console_font_op *op)
{
struct console_font font;
int rc = -EINVAL;
@@ -4748,43 +4919,3 @@ void vcs_scr_updated(struct vc_data *vc)
{
notify_update(vc);
}
-
-void vc_scrolldelta_helper(struct vc_data *c, int lines,
- unsigned int rolled_over, void *base, unsigned int size)
-{
- unsigned long ubase = (unsigned long)base;
- ptrdiff_t scr_end = (void *)c->vc_scr_end - base;
- ptrdiff_t vorigin = (void *)c->vc_visible_origin - base;
- ptrdiff_t origin = (void *)c->vc_origin - base;
- int margin = c->vc_size_row * 4;
- int from, wrap, from_off, avail;
-
- /* Turn scrollback off */
- if (!lines) {
- c->vc_visible_origin = c->vc_origin;
- return;
- }
-
- /* Do we have already enough to allow jumping from 0 to the end? */
- if (rolled_over > scr_end + margin) {
- from = scr_end;
- wrap = rolled_over + c->vc_size_row;
- } else {
- from = 0;
- wrap = size;
- }
-
- from_off = (vorigin - from + wrap) % wrap + lines * c->vc_size_row;
- avail = (origin - from + wrap) % wrap;
-
- /* Only a little piece would be left? Show all incl. the piece! */
- if (avail < 2 * margin)
- margin = 0;
- if (from_off < margin)
- from_off = 0;
- if (from_off > avail - margin)
- from_off = avail;
-
- c->vc_visible_origin = ubase + (from + from_off) % wrap;
-}
-EXPORT_SYMBOL_GPL(vc_scrolldelta_helper);
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 8c685b501404..4b91072f3a4e 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -714,8 +714,7 @@ static int vt_resizex(struct vc_data *vc, struct vt_consize __user *cs)
vcp->vc_scan_lines = v.v_vlin;
if (v.v_clin)
vcp->vc_cell_height = v.v_clin;
- vcp->vc_resize_user = 1;
- ret = vc_resize(vcp, v.v_cols, v.v_rows);
+ ret = __vc_resize(vcp, v.v_cols, v.v_rows, true);
if (ret) {
vcp->vc_scan_lines = save_scan_lines;
vcp->vc_cell_height = save_cell_height;
@@ -923,9 +922,8 @@ int vt_ioctl(struct tty_struct *tty,
vc = vc_cons[i].d;
if (vc) {
- vc->vc_resize_user = 1;
/* FIXME: review v tty lock */
- vc_resize(vc_cons[i].d, cc, ll);
+ __vc_resize(vc_cons[i].d, cc, ll, true);
}
}
console_unlock();
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 2d572f6c8ec8..bb77de6fa067 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -24,6 +24,7 @@
#include <linux/kobject.h>
#include <linux/cdev.h>
#include <linux/uio_driver.h>
+#include <linux/dma-mapping.h>
#define UIO_MAX_DEVICES (1U << MINORBITS)
@@ -759,6 +760,49 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
vma->vm_page_prot);
}
+static int uio_mmap_dma_coherent(struct vm_area_struct *vma)
+{
+ struct uio_device *idev = vma->vm_private_data;
+ struct uio_mem *mem;
+ void *addr;
+ int ret = 0;
+ int mi;
+
+ mi = uio_find_mem_index(vma);
+ if (mi < 0)
+ return -EINVAL;
+
+ mem = idev->info->mem + mi;
+
+ if (mem->addr & ~PAGE_MASK)
+ return -ENODEV;
+ if (mem->dma_addr & ~PAGE_MASK)
+ return -ENODEV;
+ if (!mem->dma_device)
+ return -ENODEV;
+ if (vma->vm_end - vma->vm_start > mem->size)
+ return -EINVAL;
+
+ dev_warn(mem->dma_device,
+ "use of UIO_MEM_DMA_COHERENT is highly discouraged");
+
+ /*
+ * UIO uses offset to index into the maps for a device.
+ * We need to clear vm_pgoff for dma_mmap_coherent.
+ */
+ vma->vm_pgoff = 0;
+
+ addr = (void *)mem->addr;
+ ret = dma_mmap_coherent(mem->dma_device,
+ vma,
+ addr,
+ mem->dma_addr,
+ vma->vm_end - vma->vm_start);
+ vma->vm_pgoff = mi;
+
+ return ret;
+}
+
static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
{
struct uio_listener *listener = filep->private_data;
@@ -806,6 +850,9 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
case UIO_MEM_VIRTUAL:
ret = uio_mmap_logical(vma);
break;
+ case UIO_MEM_DMA_COHERENT:
+ ret = uio_mmap_dma_coherent(vma);
+ break;
default:
ret = -EINVAL;
}
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 5313307c2754..d5f9384df125 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -36,7 +36,6 @@ struct uio_dmem_genirq_platdata {
struct platform_device *pdev;
unsigned int dmem_region_start;
unsigned int num_dmem_regions;
- void *dmem_region_vaddr[MAX_UIO_MAPS];
struct mutex alloc_lock;
unsigned int refcnt;
};
@@ -50,7 +49,6 @@ static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
{
struct uio_dmem_genirq_platdata *priv = info->priv;
struct uio_mem *uiomem;
- int dmem_region = priv->dmem_region_start;
uiomem = &priv->uioinfo->mem[priv->dmem_region_start];
@@ -61,11 +59,8 @@ static int uio_dmem_genirq_open(struct uio_info *info, struct inode *inode)
break;
addr = dma_alloc_coherent(&priv->pdev->dev, uiomem->size,
- (dma_addr_t *)&uiomem->addr, GFP_KERNEL);
- if (!addr) {
- uiomem->addr = DMEM_MAP_ERROR;
- }
- priv->dmem_region_vaddr[dmem_region++] = addr;
+ &uiomem->dma_addr, GFP_KERNEL);
+ uiomem->addr = addr ? (phys_addr_t) addr : DMEM_MAP_ERROR;
++uiomem;
}
priv->refcnt++;
@@ -80,7 +75,6 @@ static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
{
struct uio_dmem_genirq_platdata *priv = info->priv;
struct uio_mem *uiomem;
- int dmem_region = priv->dmem_region_start;
/* Tell the Runtime PM code that the device has become idle */
pm_runtime_put_sync(&priv->pdev->dev);
@@ -93,13 +87,12 @@ static int uio_dmem_genirq_release(struct uio_info *info, struct inode *inode)
while (!priv->refcnt && uiomem < &priv->uioinfo->mem[MAX_UIO_MAPS]) {
if (!uiomem->size)
break;
- if (priv->dmem_region_vaddr[dmem_region]) {
- dma_free_coherent(&priv->pdev->dev, uiomem->size,
- priv->dmem_region_vaddr[dmem_region],
- uiomem->addr);
+ if (uiomem->addr) {
+ dma_free_coherent(uiomem->dma_device, uiomem->size,
+ (void *) uiomem->addr,
+ uiomem->dma_addr);
}
uiomem->addr = DMEM_MAP_ERROR;
- ++dmem_region;
++uiomem;
}
@@ -264,7 +257,8 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
" dynamic and fixed memory regions.\n");
break;
}
- uiomem->memtype = UIO_MEM_PHYS;
+ uiomem->memtype = UIO_MEM_DMA_COHERENT;
+ uiomem->dma_device = &pdev->dev;
uiomem->addr = DMEM_MAP_ERROR;
uiomem->size = pdata->dynamic_region_sizes[i];
++uiomem;
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index 77e2dc404885..72b33f7d4c40 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -191,9 +191,11 @@ static int pruss_probe(struct platform_device *pdev)
p->mem[1].size = sram_pool_sz;
p->mem[1].memtype = UIO_MEM_PHYS;
- p->mem[2].addr = gdev->ddr_paddr;
+ p->mem[2].addr = (phys_addr_t) gdev->ddr_vaddr;
+ p->mem[2].dma_addr = gdev->ddr_paddr;
p->mem[2].size = extram_pool_sz;
- p->mem[2].memtype = UIO_MEM_PHYS;
+ p->mem[2].memtype = UIO_MEM_DMA_COHERENT;
+ p->mem[2].dma_device = dev;
p->name = devm_kasprintf(dev, GFP_KERNEL, "pruss_evt%d", cnt);
p->version = DRV_VERSION;
diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
index ee917f1b091c..8b936a2e93a0 100644
--- a/drivers/usb/cdns3/drd.c
+++ b/drivers/usb/cdns3/drd.c
@@ -435,7 +435,7 @@ int cdns_drd_init(struct cdns *cdns)
writel(1, &cdns->otg_v1_regs->simulate);
cdns->version = CDNS3_CONTROLLER_V1;
} else {
- dev_err(cdns->dev, "not supporte DID=0x%08x\n", state);
+ dev_err(cdns->dev, "not supported DID=0x%08x\n", state);
return -EINVAL;
}
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 351ede4b5de2..58e3ca7e4793 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -116,3 +116,30 @@ config USB_AUTOSUSPEND_DELAY
The default value Linux has always had is 2 seconds. Change
this value if you want a different delay and cannot modify
the command line or module parameter.
+
+config USB_DEFAULT_AUTHORIZATION_MODE
+ int "Default authorization mode for USB devices"
+ range 0 2
+ default 1
+ depends on USB
+ help
+ Select the default USB device authorization mode. Can be overridden
+ with usbcore.authorized_default command line or module parameter.
+
+ This option allows you to choose whether USB devices that are
+ connected to the system can be used by default, or if they are
+ locked down.
+
+ With value 0 all connected USB devices with the exception of root
+ hub require user space authorization before they can be used.
+
+ With value 1 (default) no user space authorization is required to
+ use connected USB devices.
+
+ With value 2 all connected USB devices with exception of internal
+ USB devices require user space authorization before they can be
+ used. Note that in this mode the differentiation between internal
+ and external USB devices relies on ACPI, and on systems without
+ ACPI selecting value 2 is analogous to selecting value 0.
+
+ If unsure, keep the default value.
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index e01b1913d02b..e02ba15f6e34 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1710,9 +1710,7 @@ int usb_autoresume_device(struct usb_device *udev)
{
int status;
- status = pm_runtime_get_sync(&udev->dev);
- if (status < 0)
- pm_runtime_put_sync(&udev->dev);
+ status = pm_runtime_resume_and_get(&udev->dev);
dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&udev->dev.power.usage_count),
status);
@@ -1818,9 +1816,7 @@ int usb_autopm_get_interface(struct usb_interface *intf)
{
int status;
- status = pm_runtime_get_sync(&intf->dev);
- if (status < 0)
- pm_runtime_put_sync(&intf->dev);
+ status = pm_runtime_resume_and_get(&intf->dev);
dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
__func__, atomic_read(&intf->dev.power.usage_count),
status);
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index a2530811cf7d..4b38b87a1343 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -141,7 +141,7 @@ static void ep_device_release(struct device *dev)
kfree(ep_dev);
}
-struct device_type usb_ep_device_type = {
+const struct device_type usb_ep_device_type = {
.name = "usb_endpoint",
.release = ep_device_release,
};
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index edf74458474a..c0e005670d67 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -357,12 +357,10 @@ static const u8 ss_rh_config_descriptor[] = {
#define USB_AUTHORIZE_ALL 1
#define USB_AUTHORIZE_INTERNAL 2
-static int authorized_default = USB_AUTHORIZE_WIRED;
+static int authorized_default = CONFIG_USB_DEFAULT_AUTHORIZATION_MODE;
module_param(authorized_default, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(authorized_default,
- "Default USB device authorization: 0 is not authorized, 1 is "
- "authorized, 2 is authorized for internal devices, -1 is "
- "authorized (default, same as 1)");
+ "Default USB device authorization: 0 is not authorized, 1 is authorized (default), 2 is authorized for internal devices, -1 is authorized (same as 1)");
/*-------------------------------------------------------------------------*/
/**
@@ -2795,10 +2793,16 @@ int usb_add_hcd(struct usb_hcd *hcd,
struct usb_device *rhdev;
struct usb_hcd *shared_hcd;
- if (!hcd->skip_phy_initialization && usb_hcd_is_primary_hcd(hcd)) {
- hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
- if (IS_ERR(hcd->phy_roothub))
- return PTR_ERR(hcd->phy_roothub);
+ if (!hcd->skip_phy_initialization) {
+ if (usb_hcd_is_primary_hcd(hcd)) {
+ hcd->phy_roothub = usb_phy_roothub_alloc(hcd->self.sysdev);
+ if (IS_ERR(hcd->phy_roothub))
+ return PTR_ERR(hcd->phy_roothub);
+ } else {
+ hcd->phy_roothub = usb_phy_roothub_alloc_usb3_phy(hcd->self.sysdev);
+ if (IS_ERR(hcd->phy_roothub))
+ return PTR_ERR(hcd->phy_roothub);
+ }
retval = usb_phy_roothub_init(hcd->phy_roothub);
if (retval)
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index e38a4124f610..3ee8455585b6 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -37,6 +37,7 @@
#include <asm/byteorder.h>
#include "hub.h"
+#include "phy.h"
#include "otg_productlist.h"
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
@@ -634,6 +635,34 @@ static int hub_ext_port_status(struct usb_hub *hub, int port1, int type,
ret = 0;
}
mutex_unlock(&hub->status_mutex);
+
+ /*
+ * There is no need to lock status_mutex here, because status_mutex
+ * protects hub->status, and the phy driver only checks the port
+ * status without changing the status.
+ */
+ if (!ret) {
+ struct usb_device *hdev = hub->hdev;
+
+ /*
+ * Only roothub will be notified of connection changes,
+ * since the USB PHY only cares about changes at the next
+ * level.
+ */
+ if (is_root_hub(hdev)) {
+ struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
+ bool connect;
+ bool connect_change;
+
+ connect_change = *change & USB_PORT_STAT_C_CONNECTION;
+ connect = *status & USB_PORT_STAT_CONNECTION;
+ if (connect_change && connect)
+ usb_phy_roothub_notify_connect(hcd->phy_roothub, port1 - 1);
+ else if (connect_change)
+ usb_phy_roothub_notify_disconnect(hcd->phy_roothub, port1 - 1);
+ }
+ }
+
return ret;
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 077dfe48d01c..d2b2787be409 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1198,6 +1198,8 @@ EXPORT_SYMBOL_GPL(usb_get_status);
* same status code used to report a true stall.
*
* This call is synchronous, and may not be used in an interrupt context.
+ * If a thread in your driver uses this call, make sure your disconnect()
+ * method can wait for it to complete.
*
* Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
@@ -1516,7 +1518,8 @@ void usb_enable_interface(struct usb_device *dev,
* This call is synchronous, and may not be used in an interrupt context.
* Also, drivers must not change altsettings while urbs are scheduled for
* endpoints in that interface; all such urbs must first be completed
- * (perhaps forced by unlinking).
+ * (perhaps forced by unlinking). If a thread in your driver uses this call,
+ * make sure your disconnect() method can wait for it to complete.
*
* Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
@@ -1849,7 +1852,7 @@ static int usb_if_uevent(const struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-struct device_type usb_if_device_type = {
+const struct device_type usb_if_device_type = {
.name = "usb_interface",
.release = usb_release_interface,
.uevent = usb_if_uevent,
diff --git a/drivers/usb/core/of.c b/drivers/usb/core/of.c
index db4ccf9ce3d9..f1a499ee482c 100644
--- a/drivers/usb/core/of.c
+++ b/drivers/usb/core/of.c
@@ -8,6 +8,7 @@
*/
#include <linux/of.h>
+#include <linux/of_graph.h>
#include <linux/usb/of.h>
/**
@@ -75,6 +76,76 @@ bool usb_of_has_combined_node(struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(usb_of_has_combined_node);
+static bool usb_of_has_devices_or_graph(const struct usb_device *hub)
+{
+ const struct device_node *np = hub->dev.of_node;
+ struct device_node *child;
+
+ if (of_graph_is_present(np))
+ return true;
+
+ for_each_child_of_node(np, child)
+ if (of_property_present(child, "reg"))
+ return true;
+
+ return false;
+}
+
+/**
+ * usb_of_get_connect_type() - get a USB hub's port connect_type
+ * @hub: hub to which port is for @port1
+ * @port1: one-based index of port
+ *
+ * Get the connect_type of @port1 based on the device node for @hub. If the
+ * port is described in the OF graph, the connect_type is "hotplug". If the
+ * @hub has a child device has with a 'reg' property equal to @port1 the
+ * connect_type is "hard-wired". If there isn't an OF graph or child node at
+ * all then the connect_type is "unknown". Otherwise, the port is considered
+ * "unused" because it isn't described at all.
+ *
+ * Return: A connect_type for @port1 based on the device node for @hub.
+ */
+enum usb_port_connect_type usb_of_get_connect_type(struct usb_device *hub, int port1)
+{
+ struct device_node *np, *child, *ep, *remote_np;
+ enum usb_port_connect_type connect_type;
+
+ /* Only set connect_type if binding has ports/hardwired devices. */
+ if (!usb_of_has_devices_or_graph(hub))
+ return USB_PORT_CONNECT_TYPE_UNKNOWN;
+
+ /* Assume port is unused if there's a graph or a child node. */
+ connect_type = USB_PORT_NOT_USED;
+
+ np = hub->dev.of_node;
+ /*
+ * Hotplug ports are connected to an available remote node, e.g.
+ * usb-a-connector compatible node, in the OF graph.
+ */
+ if (of_graph_is_present(np)) {
+ ep = of_graph_get_endpoint_by_regs(np, port1, -1);
+ if (ep) {
+ remote_np = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (of_device_is_available(remote_np))
+ connect_type = USB_PORT_CONNECT_TYPE_HOT_PLUG;
+ of_node_put(remote_np);
+ }
+ }
+
+ /*
+ * Hard-wired ports are child nodes with a reg property corresponding
+ * to the port number, i.e. a usb device.
+ */
+ child = usb_of_get_device_node(hub, port1);
+ if (of_device_is_available(child))
+ connect_type = USB_PORT_CONNECT_TYPE_HARD_WIRED;
+ of_node_put(child);
+
+ return connect_type;
+}
+EXPORT_SYMBOL_GPL(usb_of_get_connect_type);
+
/**
* usb_of_get_interface_node() - get a USB interface node
* @udev: USB device of interface
diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c
index fb1588e7c282..faa20054ad5a 100644
--- a/drivers/usb/core/phy.c
+++ b/drivers/usb/core/phy.c
@@ -19,6 +19,30 @@ struct usb_phy_roothub {
struct list_head list;
};
+/* Allocate the roothub_entry by specific name of phy */
+static int usb_phy_roothub_add_phy_by_name(struct device *dev, const char *name,
+ struct list_head *list)
+{
+ struct usb_phy_roothub *roothub_entry;
+ struct phy *phy;
+
+ phy = devm_of_phy_get(dev, dev->of_node, name);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
+
+ roothub_entry = devm_kzalloc(dev, sizeof(*roothub_entry), GFP_KERNEL);
+ if (!roothub_entry)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&roothub_entry->list);
+
+ roothub_entry->phy = phy;
+
+ list_add_tail(&roothub_entry->list, list);
+
+ return 0;
+}
+
static int usb_phy_roothub_add_phy(struct device *dev, int index,
struct list_head *list)
{
@@ -65,6 +89,9 @@ struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
INIT_LIST_HEAD(&phy_roothub->list);
+ if (!usb_phy_roothub_add_phy_by_name(dev, "usb2-phy", &phy_roothub->list))
+ return phy_roothub;
+
for (i = 0; i < num_phys; i++) {
err = usb_phy_roothub_add_phy(dev, i, &phy_roothub->list);
if (err)
@@ -75,6 +102,41 @@ struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev)
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc);
+/**
+ * usb_phy_roothub_alloc_usb3_phy - alloc the roothub
+ * @dev: the device of the host controller
+ *
+ * Allocate the usb phy roothub if the host use a generic usb3-phy.
+ *
+ * Return: On success, a pointer to the usb_phy_roothub. Otherwise,
+ * %NULL if no use usb3 phy or %-ENOMEM if out of memory.
+ */
+struct usb_phy_roothub *usb_phy_roothub_alloc_usb3_phy(struct device *dev)
+{
+ struct usb_phy_roothub *phy_roothub;
+ int num_phys;
+
+ if (!IS_ENABLED(CONFIG_GENERIC_PHY))
+ return NULL;
+
+ num_phys = of_count_phandle_with_args(dev->of_node, "phys",
+ "#phy-cells");
+ if (num_phys <= 0)
+ return NULL;
+
+ phy_roothub = devm_kzalloc(dev, sizeof(*phy_roothub), GFP_KERNEL);
+ if (!phy_roothub)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&phy_roothub->list);
+
+ if (!usb_phy_roothub_add_phy_by_name(dev, "usb3-phy", &phy_roothub->list))
+ return phy_roothub;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_alloc_usb3_phy);
+
int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub)
{
struct usb_phy_roothub *roothub_entry;
@@ -172,6 +234,64 @@ int usb_phy_roothub_calibrate(struct usb_phy_roothub *phy_roothub)
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_calibrate);
+/**
+ * usb_phy_roothub_notify_connect() - connect notification
+ * @phy_roothub: the phy of roothub, if the host use a generic phy.
+ * @port: the port index for connect
+ *
+ * If the phy needs to get connection status, the callback can be used.
+ * Returns: %0 if successful, a negative error code otherwise
+ */
+int usb_phy_roothub_notify_connect(struct usb_phy_roothub *phy_roothub, int port)
+{
+ struct usb_phy_roothub *roothub_entry;
+ struct list_head *head;
+ int err;
+
+ if (!phy_roothub)
+ return 0;
+
+ head = &phy_roothub->list;
+
+ list_for_each_entry(roothub_entry, head, list) {
+ err = phy_notify_connect(roothub_entry->phy, port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_notify_connect);
+
+/**
+ * usb_phy_roothub_notify_disconnect() - disconnect notification
+ * @phy_roothub: the phy of roothub, if the host use a generic phy.
+ * @port: the port index for disconnect
+ *
+ * If the phy needs to get connection status, the callback can be used.
+ * Returns: %0 if successful, a negative error code otherwise
+ */
+int usb_phy_roothub_notify_disconnect(struct usb_phy_roothub *phy_roothub, int port)
+{
+ struct usb_phy_roothub *roothub_entry;
+ struct list_head *head;
+ int err;
+
+ if (!phy_roothub)
+ return 0;
+
+ head = &phy_roothub->list;
+
+ list_for_each_entry(roothub_entry, head, list) {
+ err = phy_notify_disconnect(roothub_entry->phy, port);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_phy_roothub_notify_disconnect);
+
int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub)
{
struct usb_phy_roothub *roothub_entry;
diff --git a/drivers/usb/core/phy.h b/drivers/usb/core/phy.h
index 20a267cd986b..88b49c0ea6b5 100644
--- a/drivers/usb/core/phy.h
+++ b/drivers/usb/core/phy.h
@@ -12,6 +12,7 @@ struct device;
struct usb_phy_roothub;
struct usb_phy_roothub *usb_phy_roothub_alloc(struct device *dev);
+struct usb_phy_roothub *usb_phy_roothub_alloc_usb3_phy(struct device *dev);
int usb_phy_roothub_init(struct usb_phy_roothub *phy_roothub);
int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub);
@@ -19,6 +20,8 @@ int usb_phy_roothub_exit(struct usb_phy_roothub *phy_roothub);
int usb_phy_roothub_set_mode(struct usb_phy_roothub *phy_roothub,
enum phy_mode mode);
int usb_phy_roothub_calibrate(struct usb_phy_roothub *phy_roothub);
+int usb_phy_roothub_notify_connect(struct usb_phy_roothub *phy_roothub, int port);
+int usb_phy_roothub_notify_disconnect(struct usb_phy_roothub *phy_roothub, int port);
int usb_phy_roothub_power_on(struct usb_phy_roothub *phy_roothub);
void usb_phy_roothub_power_off(struct usb_phy_roothub *phy_roothub);
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index 4d63496f98b6..5b5e613a11e5 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/pm_qos.h>
#include <linux/component.h>
+#include <linux/usb/of.h>
#include "hub.h"
@@ -429,7 +430,7 @@ static const struct dev_pm_ops usb_port_pm_ops = {
#endif
};
-struct device_type usb_port_device_type = {
+const struct device_type usb_port_device_type = {
.name = "usb_port",
.release = usb_port_device_release,
.pm = &usb_port_pm_ops,
@@ -709,6 +710,7 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
return -ENOMEM;
}
+ port_dev->connect_type = usb_of_get_connect_type(hdev, port1);
hub->ports[port1 - 1] = port_dev;
port_dev->portnum = port1;
set_bit(port1, hub->power_bits);
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 5d21718afb05..f98263e21c2a 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -273,9 +273,10 @@ static ssize_t avoid_reset_quirk_store(struct device *dev,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
- int val, rc;
+ bool val;
+ int rc;
- if (sscanf(buf, "%d", &val) != 1 || val < 0 || val > 1)
+ if (kstrtobool(buf, &val) != 0)
return -EINVAL;
rc = usb_lock_device_interruptible(udev);
if (rc < 0)
@@ -322,13 +323,14 @@ static ssize_t persist_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
- int value, rc;
+ bool value;
+ int rc;
/* Hubs are always enabled for USB_PERSIST */
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB)
return -EPERM;
- if (sscanf(buf, "%d", &value) != 1)
+ if (kstrtobool(buf, &value) != 0)
return -EINVAL;
rc = usb_lock_device_interruptible(udev);
@@ -739,14 +741,14 @@ static ssize_t authorized_store(struct device *dev,
{
ssize_t result;
struct usb_device *usb_dev = to_usb_device(dev);
- unsigned val;
- result = sscanf(buf, "%u\n", &val);
- if (result != 1)
+ bool val;
+
+ if (kstrtobool(buf, &val) != 0)
result = -EINVAL;
- else if (val == 0)
- result = usb_deauthorize_device(usb_dev);
- else
+ else if (val)
result = usb_authorize_device(usb_dev);
+ else
+ result = usb_deauthorize_device(usb_dev);
return result < 0 ? result : size;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(authorized, S_IRUGO | S_IWUSR,
@@ -847,16 +849,10 @@ static const struct attribute_group dev_string_attr_grp = {
.is_visible = dev_string_attrs_are_visible,
};
-const struct attribute_group *usb_device_groups[] = {
- &dev_attr_grp,
- &dev_string_attr_grp,
- NULL
-};
-
/* Binary descriptors */
static ssize_t
-read_descriptors(struct file *filp, struct kobject *kobj,
+descriptors_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
@@ -878,7 +874,7 @@ read_descriptors(struct file *filp, struct kobject *kobj,
srclen = sizeof(struct usb_device_descriptor);
} else {
src = udev->rawdescriptors[cfgno];
- srclen = __le16_to_cpu(udev->config[cfgno].desc.
+ srclen = le16_to_cpu(udev->config[cfgno].desc.
wTotalLength);
}
if (off < srclen) {
@@ -893,11 +889,69 @@ read_descriptors(struct file *filp, struct kobject *kobj,
}
return count - nleft;
}
+static BIN_ATTR_RO(descriptors, 18 + 65535); /* dev descr + max-size raw descriptor */
+
+static ssize_t
+bos_descriptors_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct usb_device *udev = to_usb_device(dev);
+ struct usb_host_bos *bos = udev->bos;
+ struct usb_bos_descriptor *desc;
+ size_t desclen, n = 0;
+
+ if (bos) {
+ desc = bos->desc;
+ desclen = le16_to_cpu(desc->wTotalLength);
+ if (off < desclen) {
+ n = min(count, desclen - (size_t) off);
+ memcpy(buf, (void *) desc + off, n);
+ }
+ }
+ return n;
+}
+static BIN_ATTR_RO(bos_descriptors, 65535); /* max-size BOS */
-static struct bin_attribute dev_bin_attr_descriptors = {
- .attr = {.name = "descriptors", .mode = 0444},
- .read = read_descriptors,
- .size = 18 + 65535, /* dev descr + max-size raw descriptor */
+/* When modifying this list, be sure to modify dev_bin_attrs_are_visible()
+ * accordingly.
+ */
+static struct bin_attribute *dev_bin_attrs[] = {
+ &bin_attr_descriptors,
+ &bin_attr_bos_descriptors,
+ NULL
+};
+
+static umode_t dev_bin_attrs_are_visible(struct kobject *kobj,
+ struct bin_attribute *a, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct usb_device *udev = to_usb_device(dev);
+
+ /*
+ * There's no need to check if the descriptors attribute should
+ * be visible because all devices have a device descriptor. The
+ * bos_descriptors attribute should be visible if and only if
+ * the device has a BOS, so check if it exists here.
+ */
+ if (a == &bin_attr_bos_descriptors) {
+ if (udev->bos == NULL)
+ return 0;
+ }
+ return a->attr.mode;
+}
+
+static const struct attribute_group dev_bin_attr_grp = {
+ .bin_attrs = dev_bin_attrs,
+ .is_bin_visible = dev_bin_attrs_are_visible,
+};
+
+const struct attribute_group *usb_device_groups[] = {
+ &dev_attr_grp,
+ &dev_string_attr_grp,
+ &dev_bin_attr_grp,
+ NULL
};
/*
@@ -1015,10 +1069,6 @@ int usb_create_sysfs_dev_files(struct usb_device *udev)
struct device *dev = &udev->dev;
int retval;
- retval = device_create_bin_file(dev, &dev_bin_attr_descriptors);
- if (retval)
- goto error;
-
retval = add_persist_attributes(dev);
if (retval)
goto error;
@@ -1048,7 +1098,6 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev)
remove_power_attributes(dev);
remove_persist_attributes(dev);
- device_remove_bin_file(dev, &dev_bin_attr_descriptors);
}
/* Interface Association Descriptor fields */
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index a34b22537d7c..7f8a912d4fe2 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -142,12 +142,19 @@ int usb_acpi_set_power_state(struct usb_device *hdev, int index, bool enable)
}
EXPORT_SYMBOL_GPL(usb_acpi_set_power_state);
-static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
- struct acpi_pld_info *pld)
+/*
+ * Private to usb-acpi, all the core needs to know is that
+ * port_dev->location is non-zero when it has been set by the firmware.
+ */
+#define USB_ACPI_LOCATION_VALID (1 << 31)
+
+static void
+usb_acpi_get_connect_type(struct usb_port *port_dev, acpi_handle *handle)
{
enum usb_port_connect_type connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *upc = NULL;
+ struct acpi_pld_info *pld = NULL;
acpi_status status;
/*
@@ -158,6 +165,12 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
* a usb device is directly hard-wired to the port. If no visible and
* no connectable, the port would be not used.
*/
+
+ status = acpi_get_physical_device_location(handle, &pld);
+ if (ACPI_SUCCESS(status) && pld)
+ port_dev->location = USB_ACPI_LOCATION_VALID |
+ pld->group_token << 8 | pld->group_position;
+
status = acpi_evaluate_object(handle, "_UPC", NULL, &buffer);
if (ACPI_FAILURE(status))
goto out;
@@ -166,25 +179,22 @@ static enum usb_port_connect_type usb_acpi_get_connect_type(acpi_handle handle,
if (!upc || (upc->type != ACPI_TYPE_PACKAGE) || upc->package.count != 4)
goto out;
+ /* UPC states port is connectable */
if (upc->package.elements[0].integer.value)
- if (pld->user_visible)
+ if (!pld)
+ ; /* keep connect_type as unknown */
+ else if (pld->user_visible)
connect_type = USB_PORT_CONNECT_TYPE_HOT_PLUG;
else
connect_type = USB_PORT_CONNECT_TYPE_HARD_WIRED;
- else if (!pld->user_visible)
+ else
connect_type = USB_PORT_NOT_USED;
out:
+ port_dev->connect_type = connect_type;
kfree(upc);
- return connect_type;
+ ACPI_FREE(pld);
}
-
-/*
- * Private to usb-acpi, all the core needs to know is that
- * port_dev->location is non-zero when it has been set by the firmware.
- */
-#define USB_ACPI_LOCATION_VALID (1 << 31)
-
static struct acpi_device *
usb_acpi_get_companion_for_port(struct usb_port *port_dev)
{
@@ -222,22 +232,12 @@ static struct acpi_device *
usb_acpi_find_companion_for_port(struct usb_port *port_dev)
{
struct acpi_device *adev;
- struct acpi_pld_info *pld;
- acpi_handle *handle;
- acpi_status status;
adev = usb_acpi_get_companion_for_port(port_dev);
if (!adev)
return NULL;
- handle = adev->handle;
- status = acpi_get_physical_device_location(handle, &pld);
- if (ACPI_SUCCESS(status) && pld) {
- port_dev->location = USB_ACPI_LOCATION_VALID
- | pld->group_token << 8 | pld->group_position;
- port_dev->connect_type = usb_acpi_get_connect_type(handle, pld);
- ACPI_FREE(pld);
- }
+ usb_acpi_get_connect_type(port_dev, adev->handle);
return adev;
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index dc8d9228a5e7..a0c432b14b20 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -592,7 +592,7 @@ static char *usb_devnode(const struct device *dev,
usb_dev->bus->busnum, usb_dev->devnum);
}
-struct device_type usb_device_type = {
+const struct device_type usb_device_type = {
.name = "usb_device",
.release = usb_release_dev,
.uevent = usb_dev_uevent,
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index bfecb50773b6..b8324ea05b20 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -144,10 +144,10 @@ static inline int usb_disable_usb2_hardware_lpm(struct usb_device *udev)
extern const struct class usbmisc_class;
extern const struct bus_type usb_bus_type;
extern struct mutex usb_port_peer_mutex;
-extern struct device_type usb_device_type;
-extern struct device_type usb_if_device_type;
-extern struct device_type usb_ep_device_type;
-extern struct device_type usb_port_device_type;
+extern const struct device_type usb_device_type;
+extern const struct device_type usb_if_device_type;
+extern const struct device_type usb_ep_device_type;
+extern const struct device_type usb_port_device_type;
extern struct usb_device_driver usb_generic_driver;
static inline int is_usb_device(const struct device *dev)
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 5fc27b20df63..31078f3d41b8 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -131,7 +131,7 @@ config USB_DWC3_QCOM
tristate "Qualcomm Platform"
depends on ARCH_QCOM || COMPILE_TEST
depends on EXTCON || !EXTCON
- depends on (OF || ACPI)
+ depends on OF
default USB_DWC3
help
Some Qualcomm SoCs use DesignWare Core IP for USB2/3
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index e120611a5174..c07edfc954f7 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -755,6 +755,7 @@ struct dwc3_ep {
#define DWC3_EP_PENDING_CLEAR_STALL BIT(11)
#define DWC3_EP_TXFIFO_RESIZED BIT(12)
#define DWC3_EP_DELAY_STOP BIT(13)
+#define DWC3_EP_RESOURCE_ALLOCATED BIT(14)
/* This last one is specific to EP0 */
#define DWC3_EP0_DIR_IN BIT(31)
@@ -1257,6 +1258,7 @@ struct dwc3 {
#define DWC31_REVISION_170A 0x3137302a
#define DWC31_REVISION_180A 0x3138302a
#define DWC31_REVISION_190A 0x3139302a
+#define DWC31_REVISION_200A 0x3230302a
#define DWC32_REVISION_ANY 0x0
#define DWC32_REVISION_100A 0x3130302a
diff --git a/drivers/usb/dwc3/dwc3-am62.c b/drivers/usb/dwc3/dwc3-am62.c
index 90a587bc29b7..fad151e78fd6 100644
--- a/drivers/usb/dwc3/dwc3-am62.c
+++ b/drivers/usb/dwc3/dwc3-am62.c
@@ -97,9 +97,15 @@
#define USBSS_VBUS_STAT_SESSVALID BIT(2)
#define USBSS_VBUS_STAT_VBUSVALID BIT(0)
-/* Mask for PHY PLL REFCLK */
+/* USB_PHY_CTRL register bits in CTRL_MMR */
+#define PHY_CORE_VOLTAGE_MASK BIT(31)
#define PHY_PLL_REFCLK_MASK GENMASK(3, 0)
+/* USB PHY2 register offsets */
+#define USB_PHY_PLL_REG12 0x130
+#define USB_PHY_PLL_LDO_REF_EN BIT(5)
+#define USB_PHY_PLL_LDO_REF_EN_EN BIT(4)
+
#define DWC3_AM62_AUTOSUSPEND_DELAY 100
struct dwc3_am62 {
@@ -162,6 +168,13 @@ static int phy_syscon_pll_refclk(struct dwc3_am62 *am62)
am62->offset = args.args[0];
+ /* Core voltage. PHY_CORE_VOLTAGE bit Recommended to be 0 always */
+ ret = regmap_update_bits(am62->syscon, am62->offset, PHY_CORE_VOLTAGE_MASK, 0);
+ if (ret) {
+ dev_err(dev, "failed to set phy core voltage\n");
+ return ret;
+ }
+
ret = regmap_update_bits(am62->syscon, am62->offset, PHY_PLL_REFCLK_MASK, am62->rate_code);
if (ret) {
dev_err(dev, "failed to set phy pll reference clock rate\n");
@@ -176,8 +189,9 @@ static int dwc3_ti_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct dwc3_am62 *am62;
- int i, ret;
unsigned long rate;
+ void __iomem *phy;
+ int i, ret;
u32 reg;
am62 = devm_kzalloc(dev, sizeof(*am62), GFP_KERNEL);
@@ -219,6 +233,17 @@ static int dwc3_ti_probe(struct platform_device *pdev)
if (ret)
return ret;
+ /* Workaround Errata i2409 */
+ phy = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(phy)) {
+ dev_err(dev, "can't map PHY IOMEM resource. Won't apply i2409 fix.\n");
+ phy = NULL;
+ } else {
+ reg = readl(phy + USB_PHY_PLL_REG12);
+ reg |= USB_PHY_PLL_LDO_REF_EN | USB_PHY_PLL_LDO_REF_EN_EN;
+ writel(reg, phy + USB_PHY_PLL_REG12);
+ }
+
/* VBUS divider select */
am62->vbus_divider = device_property_read_bool(dev, "ti,vbus-divider");
reg = dwc3_ti_readl(am62, USBSS_PHY_CONFIG);
@@ -267,21 +292,15 @@ err_pm_disable:
return ret;
}
-static int dwc3_ti_remove_core(struct device *dev, void *c)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- platform_device_unregister(pdev);
- return 0;
-}
-
static void dwc3_ti_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dwc3_am62 *am62 = platform_get_drvdata(pdev);
u32 reg;
- device_for_each_child(dev, NULL, dwc3_ti_remove_core);
+ pm_runtime_get_sync(dev);
+ device_init_wakeup(dev, false);
+ of_platform_depopulate(dev);
/* Clear mode valid bit */
reg = dwc3_ti_readl(am62, USBSS_MODE_CONTROL);
@@ -289,7 +308,6 @@ static void dwc3_ti_remove(struct platform_device *pdev)
dwc3_ti_writel(am62, USBSS_MODE_CONTROL, reg);
pm_runtime_put_sync(dev);
- clk_disable_unprepare(am62->usb2_refclk);
pm_runtime_disable(dev);
pm_runtime_set_suspended(dev);
}
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index d1539fc9eabd..be7be00ecb34 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -52,8 +52,7 @@ static int dwc3_of_simple_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "rockchip,rk3399-dwc3"))
simple->need_reset = true;
- simple->resets = of_reset_control_array_get(np, false, true,
- true);
+ simple->resets = of_reset_control_array_get_optional_exclusive(np);
if (IS_ERR(simple->resets)) {
ret = PTR_ERR(simple->resets);
dev_err(dev, "failed to get device resets, err=%d\n", ret);
@@ -173,6 +172,7 @@ static const struct of_device_id of_dwc3_simple_match[] = {
{ .compatible = "sprd,sc9860-dwc3" },
{ .compatible = "allwinner,sun50i-h6-dwc3" },
{ .compatible = "hisilicon,hi3670-dwc3" },
+ { .compatible = "hisilicon,hi3798mv200-dwc3" },
{ .compatible = "intel,keembay-dwc3" },
{ /* Sentinel */ }
};
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index dbd6a5b2b289..f6b2fab49d5e 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -4,7 +4,6 @@
* Inspired by dwc3-of-simple.c
*/
-#include <linux/acpi.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/clk.h>
@@ -53,22 +52,10 @@
#define APPS_USB_AVG_BW 0
#define APPS_USB_PEAK_BW MBps_to_icc(40)
-struct dwc3_acpi_pdata {
- u32 qscratch_base_offset;
- u32 qscratch_base_size;
- u32 dwc3_core_base_size;
- int qusb2_phy_irq_index;
- int dp_hs_phy_irq_index;
- int dm_hs_phy_irq_index;
- int ss_phy_irq_index;
- bool is_urs;
-};
-
struct dwc3_qcom {
struct device *dev;
void __iomem *qscratch_base;
struct platform_device *dwc3;
- struct platform_device *urs_usb;
struct clk **clks;
int num_clocks;
struct reset_control *resets;
@@ -84,8 +71,6 @@ struct dwc3_qcom {
struct notifier_block vbus_nb;
struct notifier_block host_nb;
- const struct dwc3_acpi_pdata *acpi_pdata;
-
enum usb_dr_mode mode;
bool is_suspended;
bool pm_suspended;
@@ -248,9 +233,6 @@ static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom)
struct device *dev = qcom->dev;
int ret;
- if (has_acpi_companion(dev))
- return 0;
-
qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr");
if (IS_ERR(qcom->icc_path_ddr)) {
return dev_err_probe(dev, PTR_ERR(qcom->icc_path_ddr),
@@ -519,31 +501,13 @@ static void dwc3_qcom_select_utmi_clk(struct dwc3_qcom *qcom)
PIPE_UTMI_CLK_DIS);
}
-static int dwc3_qcom_get_irq(struct platform_device *pdev,
- const char *name, int num)
-{
- struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb : pdev;
- struct device_node *np = pdev->dev.of_node;
- int ret;
-
- if (np)
- ret = platform_get_irq_byname_optional(pdev_irq, name);
- else
- ret = platform_get_irq_optional(pdev_irq, num);
-
- return ret;
-}
-
static int dwc3_qcom_setup_irq(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- const struct dwc3_acpi_pdata *pdata = qcom->acpi_pdata;
int irq;
int ret;
- irq = dwc3_qcom_get_irq(pdev, "qusb2_phy",
- pdata ? pdata->qusb2_phy_irq_index : -1);
+ irq = platform_get_irq_byname_optional(pdev, "qusb2_phy");
if (irq > 0) {
/* Keep wakeup interrupts disabled until suspend */
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
@@ -557,8 +521,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
qcom->qusb2_phy_irq = irq;
}
- irq = dwc3_qcom_get_irq(pdev, "dp_hs_phy_irq",
- pdata ? pdata->dp_hs_phy_irq_index : -1);
+ irq = platform_get_irq_byname_optional(pdev, "dp_hs_phy_irq");
if (irq > 0) {
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
@@ -571,8 +534,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
qcom->dp_hs_phy_irq = irq;
}
- irq = dwc3_qcom_get_irq(pdev, "dm_hs_phy_irq",
- pdata ? pdata->dm_hs_phy_irq_index : -1);
+ irq = platform_get_irq_byname_optional(pdev, "dm_hs_phy_irq");
if (irq > 0) {
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
@@ -585,8 +547,7 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev)
qcom->dm_hs_phy_irq = irq;
}
- irq = dwc3_qcom_get_irq(pdev, "ss_phy_irq",
- pdata ? pdata->ss_phy_irq_index : -1);
+ irq = platform_get_irq_byname_optional(pdev, "ss_phy_irq");
if (irq > 0) {
ret = devm_request_threaded_irq(qcom->dev, irq, NULL,
qcom_dwc3_resume_irq,
@@ -649,88 +610,6 @@ static int dwc3_qcom_clk_init(struct dwc3_qcom *qcom, int count)
return 0;
}
-static const struct property_entry dwc3_qcom_acpi_properties[] = {
- PROPERTY_ENTRY_STRING("dr_mode", "host"),
- {}
-};
-
-static const struct software_node dwc3_qcom_swnode = {
- .properties = dwc3_qcom_acpi_properties,
-};
-
-static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
-{
- struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- struct device *dev = &pdev->dev;
- struct resource *res, *child_res = NULL;
- struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb :
- pdev;
- int irq;
- int ret;
-
- qcom->dwc3 = platform_device_alloc("dwc3", PLATFORM_DEVID_AUTO);
- if (!qcom->dwc3)
- return -ENOMEM;
-
- qcom->dwc3->dev.parent = dev;
- qcom->dwc3->dev.type = dev->type;
- qcom->dwc3->dev.dma_mask = dev->dma_mask;
- qcom->dwc3->dev.dma_parms = dev->dma_parms;
- qcom->dwc3->dev.coherent_dma_mask = dev->coherent_dma_mask;
-
- child_res = kcalloc(2, sizeof(*child_res), GFP_KERNEL);
- if (!child_res) {
- platform_device_put(qcom->dwc3);
- return -ENOMEM;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get memory resource\n");
- ret = -ENODEV;
- goto out;
- }
-
- child_res[0].flags = res->flags;
- child_res[0].start = res->start;
- child_res[0].end = child_res[0].start +
- qcom->acpi_pdata->dwc3_core_base_size;
-
- irq = platform_get_irq(pdev_irq, 0);
- if (irq < 0) {
- ret = irq;
- goto out;
- }
- child_res[1].flags = IORESOURCE_IRQ;
- child_res[1].start = child_res[1].end = irq;
-
- ret = platform_device_add_resources(qcom->dwc3, child_res, 2);
- if (ret) {
- dev_err(&pdev->dev, "failed to add resources\n");
- goto out;
- }
-
- ret = device_add_software_node(&qcom->dwc3->dev, &dwc3_qcom_swnode);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to add properties\n");
- goto out;
- }
-
- ret = platform_device_add(qcom->dwc3);
- if (ret) {
- dev_err(&pdev->dev, "failed to add device\n");
- device_remove_software_node(&qcom->dwc3->dev);
- goto out;
- }
- kfree(child_res);
- return 0;
-
-out:
- platform_device_put(qcom->dwc3);
- kfree(child_res);
- return ret;
-}
-
static int dwc3_qcom_of_register_core(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
@@ -763,57 +642,12 @@ node_put:
return ret;
}
-static struct platform_device *dwc3_qcom_create_urs_usb_platdev(struct device *dev)
-{
- struct platform_device *urs_usb = NULL;
- struct fwnode_handle *fwh;
- struct acpi_device *adev;
- char name[8];
- int ret;
- int id;
-
- /* Figure out device id */
- ret = sscanf(fwnode_get_name(dev->fwnode), "URS%d", &id);
- if (!ret)
- return NULL;
-
- /* Find the child using name */
- snprintf(name, sizeof(name), "USB%d", id);
- fwh = fwnode_get_named_child_node(dev->fwnode, name);
- if (!fwh)
- return NULL;
-
- adev = to_acpi_device_node(fwh);
- if (!adev)
- goto err_put_handle;
-
- urs_usb = acpi_create_platform_device(adev, NULL);
- if (IS_ERR_OR_NULL(urs_usb))
- goto err_put_handle;
-
- return urs_usb;
-
-err_put_handle:
- fwnode_handle_put(fwh);
-
- return urs_usb;
-}
-
-static void dwc3_qcom_destroy_urs_usb_platdev(struct platform_device *urs_usb)
-{
- struct fwnode_handle *fwh = urs_usb->dev.fwnode;
-
- platform_device_unregister(urs_usb);
- fwnode_handle_put(fwh);
-}
-
static int dwc3_qcom_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct dwc3_qcom *qcom;
- struct resource *res, *parent_res = NULL;
- struct resource local_res;
+ struct resource *res;
int ret, i;
bool ignore_pipe_clk;
bool wakeup_source;
@@ -825,14 +659,6 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qcom);
qcom->dev = &pdev->dev;
- if (has_acpi_companion(dev)) {
- qcom->acpi_pdata = acpi_device_get_match_data(dev);
- if (!qcom->acpi_pdata) {
- dev_err(&pdev->dev, "no supporting ACPI device data\n");
- return -EINVAL;
- }
- }
-
qcom->resets = devm_reset_control_array_get_optional_exclusive(dev);
if (IS_ERR(qcom->resets)) {
return dev_err_probe(&pdev->dev, PTR_ERR(qcom->resets),
@@ -861,40 +687,16 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (np) {
- parent_res = res;
- } else {
- memcpy(&local_res, res, sizeof(struct resource));
- parent_res = &local_res;
-
- parent_res->start = res->start +
- qcom->acpi_pdata->qscratch_base_offset;
- parent_res->end = parent_res->start +
- qcom->acpi_pdata->qscratch_base_size;
-
- if (qcom->acpi_pdata->is_urs) {
- qcom->urs_usb = dwc3_qcom_create_urs_usb_platdev(dev);
- if (IS_ERR_OR_NULL(qcom->urs_usb)) {
- dev_err(dev, "failed to create URS USB platdev\n");
- if (!qcom->urs_usb)
- ret = -ENODEV;
- else
- ret = PTR_ERR(qcom->urs_usb);
- goto clk_disable;
- }
- }
- }
-
- qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
+ qcom->qscratch_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qcom->qscratch_base)) {
ret = PTR_ERR(qcom->qscratch_base);
- goto free_urs;
+ goto clk_disable;
}
ret = dwc3_qcom_setup_irq(pdev);
if (ret) {
dev_err(dev, "failed to setup IRQs, err=%d\n", ret);
- goto free_urs;
+ goto clk_disable;
}
/*
@@ -906,14 +708,10 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
if (ignore_pipe_clk)
dwc3_qcom_select_utmi_clk(qcom);
- if (np)
- ret = dwc3_qcom_of_register_core(pdev);
- else
- ret = dwc3_qcom_acpi_register_core(pdev);
-
+ ret = dwc3_qcom_of_register_core(pdev);
if (ret) {
dev_err(dev, "failed to register DWC3 Core, err=%d\n", ret);
- goto free_urs;
+ goto clk_disable;
}
ret = dwc3_qcom_interconnect_init(qcom);
@@ -945,16 +743,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
interconnect_exit:
dwc3_qcom_interconnect_exit(qcom);
depopulate:
- if (np) {
- of_platform_depopulate(&pdev->dev);
- } else {
- device_remove_software_node(&qcom->dwc3->dev);
- platform_device_del(qcom->dwc3);
- }
+ of_platform_depopulate(&pdev->dev);
platform_device_put(qcom->dwc3);
-free_urs:
- if (qcom->urs_usb)
- dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
clk_disable:
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
@@ -969,21 +759,12 @@ reset_assert:
static void dwc3_qcom_remove(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
- struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
int i;
- if (np) {
- of_platform_depopulate(&pdev->dev);
- } else {
- device_remove_software_node(&qcom->dwc3->dev);
- platform_device_del(qcom->dwc3);
- }
+ of_platform_depopulate(&pdev->dev);
platform_device_put(qcom->dwc3);
- if (qcom->urs_usb)
- dwc3_qcom_destroy_urs_usb_platdev(qcom->urs_usb);
-
for (i = qcom->num_clocks - 1; i >= 0; i--) {
clk_disable_unprepare(qcom->clks[i]);
clk_put(qcom->clks[i]);
@@ -1053,38 +834,6 @@ static const struct of_device_id dwc3_qcom_of_match[] = {
};
MODULE_DEVICE_TABLE(of, dwc3_qcom_of_match);
-#ifdef CONFIG_ACPI
-static const struct dwc3_acpi_pdata sdm845_acpi_pdata = {
- .qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
- .qscratch_base_size = SDM845_QSCRATCH_SIZE,
- .dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
- .qusb2_phy_irq_index = 1,
- .dp_hs_phy_irq_index = 4,
- .dm_hs_phy_irq_index = 3,
- .ss_phy_irq_index = 2
-};
-
-static const struct dwc3_acpi_pdata sdm845_acpi_urs_pdata = {
- .qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
- .qscratch_base_size = SDM845_QSCRATCH_SIZE,
- .dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
- .qusb2_phy_irq_index = 1,
- .dp_hs_phy_irq_index = 4,
- .dm_hs_phy_irq_index = 3,
- .ss_phy_irq_index = 2,
- .is_urs = true,
-};
-
-static const struct acpi_device_id dwc3_qcom_acpi_match[] = {
- { "QCOM2430", (unsigned long)&sdm845_acpi_pdata },
- { "QCOM0304", (unsigned long)&sdm845_acpi_urs_pdata },
- { "QCOM0497", (unsigned long)&sdm845_acpi_urs_pdata },
- { "QCOM04A6", (unsigned long)&sdm845_acpi_pdata },
- { },
-};
-MODULE_DEVICE_TABLE(acpi, dwc3_qcom_acpi_match);
-#endif
-
static struct platform_driver dwc3_qcom_driver = {
.probe = dwc3_qcom_probe,
.remove_new = dwc3_qcom_remove,
@@ -1092,7 +841,6 @@ static struct platform_driver dwc3_qcom_driver = {
.name = "dwc3-qcom",
.pm = &dwc3_qcom_dev_pm_ops,
.of_match_table = dwc3_qcom_of_match,
- .acpi_match_table = ACPI_PTR(dwc3_qcom_acpi_match),
},
};
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 6ae8a36f21cf..72bb722da2f2 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -646,6 +646,7 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
return -EINVAL;
case USB_STATE_ADDRESS:
+ dwc3_gadget_start_config(dwc, 2);
dwc3_gadget_clear_tx_fifos(dwc);
ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 28f49400f3e8..40c52dbc28d3 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -519,77 +519,56 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
static int dwc3_gadget_set_xfer_resource(struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
+ int ret;
+
+ if (dep->flags & DWC3_EP_RESOURCE_ALLOCATED)
+ return 0;
memset(&params, 0x00, sizeof(params));
params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
- return dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
+ ret = dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETTRANSFRESOURCE,
&params);
+ if (ret)
+ return ret;
+
+ dep->flags |= DWC3_EP_RESOURCE_ALLOCATED;
+ return 0;
}
/**
- * dwc3_gadget_start_config - configure ep resources
- * @dep: endpoint that is being enabled
- *
- * Issue a %DWC3_DEPCMD_DEPSTARTCFG command to @dep. After the command's
- * completion, it will set Transfer Resource for all available endpoints.
- *
- * The assignment of transfer resources cannot perfectly follow the data book
- * due to the fact that the controller driver does not have all knowledge of the
- * configuration in advance. It is given this information piecemeal by the
- * composite gadget framework after every SET_CONFIGURATION and
- * SET_INTERFACE. Trying to follow the databook programming model in this
- * scenario can cause errors. For two reasons:
- *
- * 1) The databook says to do %DWC3_DEPCMD_DEPSTARTCFG for every
- * %USB_REQ_SET_CONFIGURATION and %USB_REQ_SET_INTERFACE (8.1.5). This is
- * incorrect in the scenario of multiple interfaces.
- *
- * 2) The databook does not mention doing more %DWC3_DEPCMD_DEPXFERCFG for new
- * endpoint on alt setting (8.1.6).
- *
- * The following simplified method is used instead:
+ * dwc3_gadget_start_config - reset endpoint resources
+ * @dwc: pointer to the DWC3 context
+ * @resource_index: DEPSTARTCFG.XferRscIdx value (must be 0 or 2)
*
- * All hardware endpoints can be assigned a transfer resource and this setting
- * will stay persistent until either a core reset or hibernation. So whenever we
- * do a %DWC3_DEPCMD_DEPSTARTCFG(0) we can go ahead and do
- * %DWC3_DEPCMD_DEPXFERCFG for every hardware endpoint as well. We are
- * guaranteed that there are as many transfer resources as endpoints.
+ * Set resource_index=0 to reset all endpoints' resources allocation. Do this as
+ * part of the power-on/soft-reset initialization.
*
- * This function is called for each endpoint when it is being enabled but is
- * triggered only when called for EP0-out, which always happens first, and which
- * should only happen in one of the above conditions.
+ * Set resource_index=2 to reset only non-control endpoints' resources. Do this
+ * on receiving the SET_CONFIGURATION request or hibernation resume.
*/
-static int dwc3_gadget_start_config(struct dwc3_ep *dep)
+int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index)
{
struct dwc3_gadget_ep_cmd_params params;
- struct dwc3 *dwc;
u32 cmd;
int i;
int ret;
- if (dep->number)
- return 0;
+ if (resource_index != 0 && resource_index != 2)
+ return -EINVAL;
memset(&params, 0x00, sizeof(params));
cmd = DWC3_DEPCMD_DEPSTARTCFG;
- dwc = dep->dwc;
+ cmd |= DWC3_DEPCMD_PARAM(resource_index);
- ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+ ret = dwc3_send_gadget_ep_cmd(dwc->eps[0], cmd, &params);
if (ret)
return ret;
- for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
- struct dwc3_ep *dep = dwc->eps[i];
-
- if (!dep)
- continue;
-
- ret = dwc3_gadget_set_xfer_resource(dep);
- if (ret)
- return ret;
- }
+ /* Reset resource allocation flags */
+ for (i = resource_index; i < dwc->num_eps && dwc->eps[i]; i++)
+ dwc->eps[i]->flags &= ~DWC3_EP_RESOURCE_ALLOCATED;
return 0;
}
@@ -884,16 +863,18 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep, unsigned int action)
ret = dwc3_gadget_resize_tx_fifos(dep);
if (ret)
return ret;
-
- ret = dwc3_gadget_start_config(dep);
- if (ret)
- return ret;
}
ret = dwc3_gadget_set_ep_config(dep, action);
if (ret)
return ret;
+ if (!(dep->flags & DWC3_EP_RESOURCE_ALLOCATED)) {
+ ret = dwc3_gadget_set_xfer_resource(dep);
+ if (ret)
+ return ret;
+ }
+
if (!(dep->flags & DWC3_EP_ENABLED)) {
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
@@ -1047,7 +1028,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->stream_capable = false;
dep->type = 0;
- mask = DWC3_EP_TXFIFO_RESIZED;
+ mask = DWC3_EP_TXFIFO_RESIZED | DWC3_EP_RESOURCE_ALLOCATED;
/*
* dwc3_remove_requests() can exit early if DWC3 EP delayed stop is
* set. Do not clear DEP flags, so that the end transfer command will
@@ -2913,6 +2894,12 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+ ret = dwc3_gadget_start_config(dwc, 0);
+ if (ret) {
+ dev_err(dwc->dev, "failed to config endpoints\n");
+ return ret;
+ }
+
dep = dwc->eps[0];
dep->flags = 0;
ret = __dwc3_gadget_ep_enable(dep, DWC3_DEPCFG_ACTION_INIT);
@@ -3428,7 +3415,7 @@ static int dwc3_gadget_ep_reclaim_trb_sg(struct dwc3_ep *dep,
struct dwc3_request *req, const struct dwc3_event_depevt *event,
int status)
{
- struct dwc3_trb *trb = &dep->trb_pool[dep->trb_dequeue];
+ struct dwc3_trb *trb;
struct scatterlist *sg = req->sg;
struct scatterlist *s;
unsigned int num_queued = req->num_queued_sgs;
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 55a56cf67d73..d73e735e4081 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -119,6 +119,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
void dwc3_ep0_send_delayed_status(struct dwc3 *dwc);
void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool interrupt);
+int dwc3_gadget_start_config(struct dwc3 *dwc, unsigned int resource_index);
/**
* dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 43230915323c..5a5cb6ce9946 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -11,8 +11,52 @@
#include <linux/of.h>
#include <linux/platform_device.h>
+#include "../host/xhci-port.h"
+#include "../host/xhci-ext-caps.h"
+#include "../host/xhci-caps.h"
#include "core.h"
+#define XHCI_HCSPARAMS1 0x4
+#define XHCI_PORTSC_BASE 0x400
+
+/**
+ * dwc3_power_off_all_roothub_ports - Power off all Root hub ports
+ * @dwc: Pointer to our controller context structure
+ */
+static void dwc3_power_off_all_roothub_ports(struct dwc3 *dwc)
+{
+ void __iomem *xhci_regs;
+ u32 op_regs_base;
+ int port_num;
+ u32 offset;
+ u32 reg;
+ int i;
+
+ /* xhci regs is not mapped yet, do it temperary here */
+ if (dwc->xhci_resources[0].start) {
+ xhci_regs = ioremap(dwc->xhci_resources[0].start, DWC3_XHCI_REGS_END);
+ if (!xhci_regs) {
+ dev_err(dwc->dev, "Failed to ioremap xhci_regs\n");
+ return;
+ }
+
+ op_regs_base = HC_LENGTH(readl(xhci_regs));
+ reg = readl(xhci_regs + XHCI_HCSPARAMS1);
+ port_num = HCS_MAX_PORTS(reg);
+
+ for (i = 1; i <= port_num; i++) {
+ offset = op_regs_base + XHCI_PORTSC_BASE + 0x10 * (i - 1);
+ reg = readl(xhci_regs + offset);
+ reg &= ~PORT_POWER;
+ writel(reg, xhci_regs + offset);
+ }
+
+ iounmap(xhci_regs);
+ } else {
+ dev_err(dwc->dev, "xhci base reg invalid\n");
+ }
+}
+
static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc,
int irq, char *name)
{
@@ -66,6 +110,12 @@ int dwc3_host_init(struct dwc3 *dwc)
int ret, irq;
int prop_idx = 0;
+ /*
+ * Some platforms need to power off all Root hub ports immediately after DWC3 set to host
+ * mode to avoid VBUS glitch happen when xhci get reset later.
+ */
+ dwc3_power_off_all_roothub_ports(dwc);
+
irq = dwc3_host_get_irq(dwc);
if (irq < 0)
return irq;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b3592bcb0f96..566ff0b1282a 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -190,6 +190,7 @@ config USB_F_MASS_STORAGE
tristate
config USB_F_FS
+ select DMA_SHARED_BUFFER
tristate
config USB_F_UAC1
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 6bff6cb93789..bffbc1dc651f 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -15,6 +15,9 @@
/* #define VERBOSE_DEBUG */
#include <linux/blkdev.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-resv.h>
#include <linux/pagemap.h>
#include <linux/export.h>
#include <linux/fs_parser.h>
@@ -43,6 +46,8 @@
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
+MODULE_IMPORT_NS(DMA_BUF);
+
/* Reference counter handling */
static void ffs_data_get(struct ffs_data *ffs);
static void ffs_data_put(struct ffs_data *ffs);
@@ -124,6 +129,25 @@ struct ffs_ep {
u8 num;
};
+struct ffs_dmabuf_priv {
+ struct list_head entry;
+ struct kref ref;
+ struct ffs_data *ffs;
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+ enum dma_data_direction dir;
+ spinlock_t lock;
+ u64 context;
+ struct usb_request *req; /* P: ffs->eps_lock */
+ struct usb_ep *ep; /* P: ffs->eps_lock */
+};
+
+struct ffs_dma_fence {
+ struct dma_fence base;
+ struct ffs_dmabuf_priv *priv;
+ struct work_struct work;
+};
+
struct ffs_epfile {
/* Protects ep->ep and ep->req. */
struct mutex mutex;
@@ -197,6 +221,11 @@ struct ffs_epfile {
unsigned char isoc; /* P: ffs->eps_lock */
unsigned char _pad;
+
+ /* Protects dmabufs */
+ struct mutex dmabufs_mutex;
+ struct list_head dmabufs; /* P: dmabufs_mutex */
+ atomic_t seqno;
};
struct ffs_buffer {
@@ -934,31 +963,44 @@ static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
return ret;
}
-static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
+static struct ffs_ep *ffs_epfile_wait_ep(struct file *file)
{
struct ffs_epfile *epfile = file->private_data;
- struct usb_request *req;
struct ffs_ep *ep;
- char *data = NULL;
- ssize_t ret, data_len = -EINVAL;
- int halt;
-
- /* Are we still active? */
- if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
- return -ENODEV;
+ int ret;
/* Wait for endpoint to be enabled */
ep = epfile->ep;
if (!ep) {
if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
+ return ERR_PTR(-EAGAIN);
ret = wait_event_interruptible(
epfile->ffs->wait, (ep = epfile->ep));
if (ret)
- return -EINTR;
+ return ERR_PTR(-EINTR);
}
+ return ep;
+}
+
+static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
+{
+ struct ffs_epfile *epfile = file->private_data;
+ struct usb_request *req;
+ struct ffs_ep *ep;
+ char *data = NULL;
+ ssize_t ret, data_len = -EINVAL;
+ int halt;
+
+ /* Are we still active? */
+ if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
+ return -ENODEV;
+
+ ep = ffs_epfile_wait_ep(file);
+ if (IS_ERR(ep))
+ return PTR_ERR(ep);
+
/* Do we halt? */
halt = (!io_data->read == !epfile->in);
if (halt && epfile->isoc)
@@ -1258,10 +1300,58 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
return res;
}
+static void ffs_dmabuf_release(struct kref *ref)
+{
+ struct ffs_dmabuf_priv *priv = container_of(ref, struct ffs_dmabuf_priv, ref);
+ struct dma_buf_attachment *attach = priv->attach;
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ pr_vdebug("FFS DMABUF release\n");
+ dma_resv_lock(dmabuf->resv, NULL);
+ dma_buf_unmap_attachment(attach, priv->sgt, priv->dir);
+ dma_resv_unlock(dmabuf->resv);
+
+ dma_buf_detach(attach->dmabuf, attach);
+ dma_buf_put(dmabuf);
+ kfree(priv);
+}
+
+static void ffs_dmabuf_get(struct dma_buf_attachment *attach)
+{
+ struct ffs_dmabuf_priv *priv = attach->importer_priv;
+
+ kref_get(&priv->ref);
+}
+
+static void ffs_dmabuf_put(struct dma_buf_attachment *attach)
+{
+ struct ffs_dmabuf_priv *priv = attach->importer_priv;
+
+ kref_put(&priv->ref, ffs_dmabuf_release);
+}
+
static int
ffs_epfile_release(struct inode *inode, struct file *file)
{
struct ffs_epfile *epfile = inode->i_private;
+ struct ffs_dmabuf_priv *priv, *tmp;
+ struct ffs_data *ffs = epfile->ffs;
+
+ mutex_lock(&epfile->dmabufs_mutex);
+
+ /* Close all attached DMABUFs */
+ list_for_each_entry_safe(priv, tmp, &epfile->dmabufs, entry) {
+ /* Cancel any pending transfer */
+ spin_lock_irq(&ffs->eps_lock);
+ if (priv->ep && priv->req)
+ usb_ep_dequeue(priv->ep, priv->req);
+ spin_unlock_irq(&ffs->eps_lock);
+
+ list_del(&priv->entry);
+ ffs_dmabuf_put(priv->attach);
+ }
+
+ mutex_unlock(&epfile->dmabufs_mutex);
__ffs_epfile_read_buffer_free(epfile);
ffs_data_closed(epfile->ffs);
@@ -1269,6 +1359,356 @@ ffs_epfile_release(struct inode *inode, struct file *file)
return 0;
}
+static void ffs_dmabuf_cleanup(struct work_struct *work)
+{
+ struct ffs_dma_fence *dma_fence =
+ container_of(work, struct ffs_dma_fence, work);
+ struct ffs_dmabuf_priv *priv = dma_fence->priv;
+ struct dma_buf_attachment *attach = priv->attach;
+ struct dma_fence *fence = &dma_fence->base;
+
+ ffs_dmabuf_put(attach);
+ dma_fence_put(fence);
+}
+
+static void ffs_dmabuf_signal_done(struct ffs_dma_fence *dma_fence, int ret)
+{
+ struct ffs_dmabuf_priv *priv = dma_fence->priv;
+ struct dma_fence *fence = &dma_fence->base;
+ bool cookie = dma_fence_begin_signalling();
+
+ dma_fence_get(fence);
+ fence->error = ret;
+ dma_fence_signal(fence);
+ dma_fence_end_signalling(cookie);
+
+ /*
+ * The fence will be unref'd in ffs_dmabuf_cleanup.
+ * It can't be done here, as the unref functions might try to lock
+ * the resv object, which would deadlock.
+ */
+ INIT_WORK(&dma_fence->work, ffs_dmabuf_cleanup);
+ queue_work(priv->ffs->io_completion_wq, &dma_fence->work);
+}
+
+static void ffs_epfile_dmabuf_io_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ pr_vdebug("FFS: DMABUF transfer complete, status=%d\n", req->status);
+ ffs_dmabuf_signal_done(req->context, req->status);
+ usb_ep_free_request(ep, req);
+}
+
+static const char *ffs_dmabuf_get_driver_name(struct dma_fence *fence)
+{
+ return "functionfs";
+}
+
+static const char *ffs_dmabuf_get_timeline_name(struct dma_fence *fence)
+{
+ return "";
+}
+
+static void ffs_dmabuf_fence_release(struct dma_fence *fence)
+{
+ struct ffs_dma_fence *dma_fence =
+ container_of(fence, struct ffs_dma_fence, base);
+
+ kfree(dma_fence);
+}
+
+static const struct dma_fence_ops ffs_dmabuf_fence_ops = {
+ .get_driver_name = ffs_dmabuf_get_driver_name,
+ .get_timeline_name = ffs_dmabuf_get_timeline_name,
+ .release = ffs_dmabuf_fence_release,
+};
+
+static int ffs_dma_resv_lock(struct dma_buf *dmabuf, bool nonblock)
+{
+ if (!nonblock)
+ return dma_resv_lock_interruptible(dmabuf->resv, NULL);
+
+ if (!dma_resv_trylock(dmabuf->resv))
+ return -EBUSY;
+
+ return 0;
+}
+
+static struct dma_buf_attachment *
+ffs_dmabuf_find_attachment(struct ffs_epfile *epfile, struct dma_buf *dmabuf)
+{
+ struct device *dev = epfile->ffs->gadget->dev.parent;
+ struct dma_buf_attachment *attach = NULL;
+ struct ffs_dmabuf_priv *priv;
+
+ mutex_lock(&epfile->dmabufs_mutex);
+
+ list_for_each_entry(priv, &epfile->dmabufs, entry) {
+ if (priv->attach->dev == dev
+ && priv->attach->dmabuf == dmabuf) {
+ attach = priv->attach;
+ break;
+ }
+ }
+
+ if (attach)
+ ffs_dmabuf_get(attach);
+
+ mutex_unlock(&epfile->dmabufs_mutex);
+
+ return attach ?: ERR_PTR(-EPERM);
+}
+
+static int ffs_dmabuf_attach(struct file *file, int fd)
+{
+ bool nonblock = file->f_flags & O_NONBLOCK;
+ struct ffs_epfile *epfile = file->private_data;
+ struct usb_gadget *gadget = epfile->ffs->gadget;
+ struct dma_buf_attachment *attach;
+ struct ffs_dmabuf_priv *priv;
+ enum dma_data_direction dir;
+ struct sg_table *sg_table;
+ struct dma_buf *dmabuf;
+ int err;
+
+ if (!gadget || !gadget->sg_supported)
+ return -EPERM;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ attach = dma_buf_attach(dmabuf, gadget->dev.parent);
+ if (IS_ERR(attach)) {
+ err = PTR_ERR(attach);
+ goto err_dmabuf_put;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ err = -ENOMEM;
+ goto err_dmabuf_detach;
+ }
+
+ dir = epfile->in ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ err = ffs_dma_resv_lock(dmabuf, nonblock);
+ if (err)
+ goto err_free_priv;
+
+ sg_table = dma_buf_map_attachment(attach, dir);
+ dma_resv_unlock(dmabuf->resv);
+
+ if (IS_ERR(sg_table)) {
+ err = PTR_ERR(sg_table);
+ goto err_free_priv;
+ }
+
+ attach->importer_priv = priv;
+
+ priv->sgt = sg_table;
+ priv->dir = dir;
+ priv->ffs = epfile->ffs;
+ priv->attach = attach;
+ spin_lock_init(&priv->lock);
+ kref_init(&priv->ref);
+ priv->context = dma_fence_context_alloc(1);
+
+ mutex_lock(&epfile->dmabufs_mutex);
+ list_add(&priv->entry, &epfile->dmabufs);
+ mutex_unlock(&epfile->dmabufs_mutex);
+
+ return 0;
+
+err_free_priv:
+ kfree(priv);
+err_dmabuf_detach:
+ dma_buf_detach(dmabuf, attach);
+err_dmabuf_put:
+ dma_buf_put(dmabuf);
+
+ return err;
+}
+
+static int ffs_dmabuf_detach(struct file *file, int fd)
+{
+ struct ffs_epfile *epfile = file->private_data;
+ struct ffs_data *ffs = epfile->ffs;
+ struct device *dev = ffs->gadget->dev.parent;
+ struct ffs_dmabuf_priv *priv, *tmp;
+ struct dma_buf *dmabuf;
+ int ret = -EPERM;
+
+ dmabuf = dma_buf_get(fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ mutex_lock(&epfile->dmabufs_mutex);
+
+ list_for_each_entry_safe(priv, tmp, &epfile->dmabufs, entry) {
+ if (priv->attach->dev == dev
+ && priv->attach->dmabuf == dmabuf) {
+ /* Cancel any pending transfer */
+ spin_lock_irq(&ffs->eps_lock);
+ if (priv->ep && priv->req)
+ usb_ep_dequeue(priv->ep, priv->req);
+ spin_unlock_irq(&ffs->eps_lock);
+
+ list_del(&priv->entry);
+
+ /* Unref the reference from ffs_dmabuf_attach() */
+ ffs_dmabuf_put(priv->attach);
+ ret = 0;
+ break;
+ }
+ }
+
+ mutex_unlock(&epfile->dmabufs_mutex);
+ dma_buf_put(dmabuf);
+
+ return ret;
+}
+
+static int ffs_dmabuf_transfer(struct file *file,
+ const struct usb_ffs_dmabuf_transfer_req *req)
+{
+ bool nonblock = file->f_flags & O_NONBLOCK;
+ struct ffs_epfile *epfile = file->private_data;
+ struct dma_buf_attachment *attach;
+ struct ffs_dmabuf_priv *priv;
+ struct ffs_dma_fence *fence;
+ struct usb_request *usb_req;
+ struct dma_buf *dmabuf;
+ struct ffs_ep *ep;
+ bool cookie;
+ u32 seqno;
+ int ret;
+
+ if (req->flags & ~USB_FFS_DMABUF_TRANSFER_MASK)
+ return -EINVAL;
+
+ dmabuf = dma_buf_get(req->fd);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ if (req->length > dmabuf->size || req->length == 0) {
+ ret = -EINVAL;
+ goto err_dmabuf_put;
+ }
+
+ attach = ffs_dmabuf_find_attachment(epfile, dmabuf);
+ if (IS_ERR(attach)) {
+ ret = PTR_ERR(attach);
+ goto err_dmabuf_put;
+ }
+
+ priv = attach->importer_priv;
+
+ ep = ffs_epfile_wait_ep(file);
+ if (IS_ERR(ep)) {
+ ret = PTR_ERR(ep);
+ goto err_attachment_put;
+ }
+
+ ret = ffs_dma_resv_lock(dmabuf, nonblock);
+ if (ret)
+ goto err_attachment_put;
+
+ /* Make sure we don't have writers */
+ if (!dma_resv_test_signaled(dmabuf->resv, DMA_RESV_USAGE_WRITE)) {
+ pr_vdebug("FFS WRITE fence is not signaled\n");
+ ret = -EBUSY;
+ goto err_resv_unlock;
+ }
+
+ /* If we're writing to the DMABUF, make sure we don't have readers */
+ if (epfile->in &&
+ !dma_resv_test_signaled(dmabuf->resv, DMA_RESV_USAGE_READ)) {
+ pr_vdebug("FFS READ fence is not signaled\n");
+ ret = -EBUSY;
+ goto err_resv_unlock;
+ }
+
+ ret = dma_resv_reserve_fences(dmabuf->resv, 1);
+ if (ret)
+ goto err_resv_unlock;
+
+ fence = kmalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence) {
+ ret = -ENOMEM;
+ goto err_resv_unlock;
+ }
+
+ fence->priv = priv;
+
+ spin_lock_irq(&epfile->ffs->eps_lock);
+
+ /* In the meantime, endpoint got disabled or changed. */
+ if (epfile->ep != ep) {
+ ret = -ESHUTDOWN;
+ goto err_fence_put;
+ }
+
+ usb_req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC);
+ if (!usb_req) {
+ ret = -ENOMEM;
+ goto err_fence_put;
+ }
+
+ /*
+ * usb_ep_queue() guarantees that all transfers are processed in the
+ * order they are enqueued, so we can use a simple incrementing
+ * sequence number for the dma_fence.
+ */
+ seqno = atomic_add_return(1, &epfile->seqno);
+
+ dma_fence_init(&fence->base, &ffs_dmabuf_fence_ops,
+ &priv->lock, priv->context, seqno);
+
+ dma_resv_add_fence(dmabuf->resv, &fence->base,
+ dma_resv_usage_rw(epfile->in));
+ dma_resv_unlock(dmabuf->resv);
+
+ /* Now that the dma_fence is in place, queue the transfer. */
+
+ usb_req->length = req->length;
+ usb_req->buf = NULL;
+ usb_req->sg = priv->sgt->sgl;
+ usb_req->num_sgs = sg_nents_for_len(priv->sgt->sgl, req->length);
+ usb_req->sg_was_mapped = true;
+ usb_req->context = fence;
+ usb_req->complete = ffs_epfile_dmabuf_io_complete;
+
+ cookie = dma_fence_begin_signalling();
+ ret = usb_ep_queue(ep->ep, usb_req, GFP_ATOMIC);
+ dma_fence_end_signalling(cookie);
+ if (!ret) {
+ priv->req = usb_req;
+ priv->ep = ep->ep;
+ } else {
+ pr_warn("FFS: Failed to queue DMABUF: %d\n", ret);
+ ffs_dmabuf_signal_done(fence, ret);
+ usb_ep_free_request(ep->ep, usb_req);
+ }
+
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ dma_buf_put(dmabuf);
+
+ return ret;
+
+err_fence_put:
+ spin_unlock_irq(&epfile->ffs->eps_lock);
+ dma_fence_put(&fence->base);
+err_resv_unlock:
+ dma_resv_unlock(dmabuf->resv);
+err_attachment_put:
+ ffs_dmabuf_put(attach);
+err_dmabuf_put:
+ dma_buf_put(dmabuf);
+
+ return ret;
+}
+
static long ffs_epfile_ioctl(struct file *file, unsigned code,
unsigned long value)
{
@@ -1279,18 +1719,49 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
- /* Wait for endpoint to be enabled */
- ep = epfile->ep;
- if (!ep) {
- if (file->f_flags & O_NONBLOCK)
- return -EAGAIN;
+ switch (code) {
+ case FUNCTIONFS_DMABUF_ATTACH:
+ {
+ int fd;
- ret = wait_event_interruptible(
- epfile->ffs->wait, (ep = epfile->ep));
- if (ret)
- return -EINTR;
+ if (copy_from_user(&fd, (void __user *)value, sizeof(fd))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ return ffs_dmabuf_attach(file, fd);
+ }
+ case FUNCTIONFS_DMABUF_DETACH:
+ {
+ int fd;
+
+ if (copy_from_user(&fd, (void __user *)value, sizeof(fd))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ return ffs_dmabuf_detach(file, fd);
+ }
+ case FUNCTIONFS_DMABUF_TRANSFER:
+ {
+ struct usb_ffs_dmabuf_transfer_req req;
+
+ if (copy_from_user(&req, (void __user *)value, sizeof(req))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ return ffs_dmabuf_transfer(file, &req);
+ }
+ default:
+ break;
}
+ /* Wait for endpoint to be enabled */
+ ep = ffs_epfile_wait_ep(file);
+ if (IS_ERR(ep))
+ return PTR_ERR(ep);
+
spin_lock_irq(&epfile->ffs->eps_lock);
/* In the meantime, endpoint got disabled or changed. */
@@ -1863,6 +2334,8 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
for (i = 1; i <= count; ++i, ++epfile) {
epfile->ffs = ffs;
mutex_init(&epfile->mutex);
+ mutex_init(&epfile->dmabufs_mutex);
+ INIT_LIST_HEAD(&epfile->dmabufs);
if (ffs->user_flags & FUNCTIONFS_VIRTUAL_ADDR)
sprintf(epfile->name, "ep%02x", ffs->eps_addrmap[i]);
else
@@ -3445,6 +3918,25 @@ static inline struct f_fs_opts *to_ffs_opts(struct config_item *item)
func_inst.group);
}
+static ssize_t f_fs_opts_ready_show(struct config_item *item, char *page)
+{
+ struct f_fs_opts *opts = to_ffs_opts(item);
+ int ready;
+
+ ffs_dev_lock();
+ ready = opts->dev->desc_ready;
+ ffs_dev_unlock();
+
+ return sprintf(page, "%d\n", ready);
+}
+
+CONFIGFS_ATTR_RO(f_fs_opts_, ready);
+
+static struct configfs_attribute *ffs_attrs[] = {
+ &f_fs_opts_attr_ready,
+ NULL,
+};
+
static void ffs_attr_release(struct config_item *item)
{
struct f_fs_opts *opts = to_ffs_opts(item);
@@ -3458,6 +3950,7 @@ static struct configfs_item_operations ffs_item_ops = {
static const struct config_item_type ffs_func_type = {
.ct_item_ops = &ffs_item_ops,
+ .ct_attrs = ffs_attrs,
.ct_owner = THIS_MODULE,
};
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 3c5a6f6ac341..444212c0b5a9 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -718,7 +718,7 @@ static const struct net_device_ops eth_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static struct device_type gadget_type = {
+static const struct device_type gadget_type = {
.name = "gadget",
};
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index dd3241fc6939..d41f5f31dadd 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -35,6 +35,9 @@ uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
data[1] = UVC_STREAM_EOH | video->fid;
+ if (video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE)
+ data[1] |= UVC_STREAM_ERR;
+
if (video->queue.buf_used == 0 && ts.tv_sec) {
/* dwClockFrequency is 48 MHz */
u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
@@ -370,6 +373,7 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
struct uvc_video *video = ureq->video;
struct uvc_video_queue *queue = &video->queue;
struct uvc_buffer *last_buf;
+ struct usb_request *to_queue = req;
unsigned long flags;
bool is_bulk = video->max_payload_size;
int ret = 0;
@@ -397,7 +401,8 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
case -EXDEV:
uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n");
- queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
+ if (req->length != 0)
+ queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
break;
case -ESHUTDOWN: /* disconnect from host. */
@@ -425,59 +430,59 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
* we're still streaming before queueing the usb_request
* back to req_free
*/
- if (video->is_enabled) {
+ if (!video->is_enabled) {
+ uvc_video_free_request(ureq, ep);
+ spin_unlock_irqrestore(&video->req_lock, flags);
+ uvcg_queue_cancel(queue, 0);
+
+ return;
+ }
+
+ /*
+ * Here we check whether any request is available in the ready
+ * list. If it is, queue it to the ep and add the current
+ * usb_request to the req_free list - for video_pump to fill in.
+ * Otherwise, just use the current usb_request to queue a 0
+ * length request to the ep. Since we always add to the req_free
+ * list if we dequeue from the ready list, there will never
+ * be a situation where the req_free list is completely out of
+ * requests and cannot recover.
+ */
+ to_queue->length = 0;
+ if (!list_empty(&video->req_ready)) {
+ to_queue = list_first_entry(&video->req_ready,
+ struct usb_request, list);
+ list_del(&to_queue->list);
+ list_add_tail(&req->list, &video->req_free);
/*
- * Here we check whether any request is available in the ready
- * list. If it is, queue it to the ep and add the current
- * usb_request to the req_free list - for video_pump to fill in.
- * Otherwise, just use the current usb_request to queue a 0
- * length request to the ep. Since we always add to the req_free
- * list if we dequeue from the ready list, there will never
- * be a situation where the req_free list is completely out of
- * requests and cannot recover.
+ * Queue work to the wq as well since it is possible that a
+ * buffer may not have been completely encoded with the set of
+ * in-flight usb requests for whih the complete callbacks are
+ * firing.
+ * In that case, if we do not queue work to the worker thread,
+ * the buffer will never be marked as complete - and therefore
+ * not be returned to userpsace. As a result,
+ * dequeue -> queue -> dequeue flow of uvc buffers will not
+ * happen.
*/
- struct usb_request *to_queue = req;
-
- to_queue->length = 0;
- if (!list_empty(&video->req_ready)) {
- to_queue = list_first_entry(&video->req_ready,
- struct usb_request, list);
- list_del(&to_queue->list);
- list_add_tail(&req->list, &video->req_free);
- /*
- * Queue work to the wq as well since it is possible that a
- * buffer may not have been completely encoded with the set of
- * in-flight usb requests for whih the complete callbacks are
- * firing.
- * In that case, if we do not queue work to the worker thread,
- * the buffer will never be marked as complete - and therefore
- * not be returned to userpsace. As a result,
- * dequeue -> queue -> dequeue flow of uvc buffers will not
- * happen.
- */
- queue_work(video->async_wq, &video->pump);
- }
+ queue_work(video->async_wq, &video->pump);
+ }
+ /*
+ * Queue to the endpoint. The actual queueing to ep will
+ * only happen on one thread - the async_wq for bulk endpoints
+ * and this thread for isoc endpoints.
+ */
+ ret = uvcg_video_usb_req_queue(video, to_queue, !is_bulk);
+ if (ret < 0) {
/*
- * Queue to the endpoint. The actual queueing to ep will
- * only happen on one thread - the async_wq for bulk endpoints
- * and this thread for isoc endpoints.
+ * Endpoint error, but the stream is still enabled.
+ * Put request back in req_free for it to be cleaned
+ * up later.
*/
- ret = uvcg_video_usb_req_queue(video, to_queue, !is_bulk);
- if (ret < 0) {
- /*
- * Endpoint error, but the stream is still enabled.
- * Put request back in req_free for it to be cleaned
- * up later.
- */
- list_add_tail(&to_queue->list, &video->req_free);
- }
- } else {
- uvc_video_free_request(ureq, ep);
- ret = 0;
+ list_add_tail(&to_queue->list, &video->req_free);
}
+
spin_unlock_irqrestore(&video->req_lock, flags);
- if (ret < 0)
- uvcg_queue_cancel(queue, 0);
}
static int
@@ -594,10 +599,7 @@ static void uvcg_video_pump(struct work_struct *work)
*/
spin_lock_irqsave(&queue->irqlock, flags);
buf = uvcg_queue_head(queue);
-
- if (buf != NULL) {
- video->encode(req, video, buf);
- } else {
+ if (!buf) {
/*
* Either the queue has been disconnected or no video buffer
* available for bulk transfer. Either way, stop processing
@@ -607,6 +609,8 @@ static void uvcg_video_pump(struct work_struct *work)
break;
}
+ video->encode(req, video, buf);
+
spin_unlock_irqrestore(&queue->irqlock, flags);
spin_lock_irqsave(&video->req_lock, flags);
@@ -623,14 +627,7 @@ static void uvcg_video_pump(struct work_struct *work)
uvcg_queue_cancel(queue, 0);
break;
}
-
- /* The request is owned by the endpoint / ready list. */
- req = NULL;
}
-
- if (!req)
- return;
-
spin_lock_irqsave(&video->req_lock, flags);
if (video->is_enabled)
list_add_tail(&req->list, &video->req_free);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index d59f94464b87..9d4150124fdb 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -903,6 +903,11 @@ int usb_gadget_map_request_by_dev(struct device *dev,
if (req->length == 0)
return 0;
+ if (req->sg_was_mapped) {
+ req->num_mapped_sgs = req->num_sgs;
+ return 0;
+ }
+
if (req->num_sgs) {
int mapped;
@@ -948,7 +953,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_map_request);
void usb_gadget_unmap_request_by_dev(struct device *dev,
struct usb_request *req, int is_in)
{
- if (req->length == 0)
+ if (req->length == 0 || req->sg_was_mapped)
return;
if (req->num_mapped_sgs) {
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index e8042c158f6d..e82d03224f94 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -13,7 +13,7 @@
* code from Dave Liu and Shlomi Gridish.
*/
-#undef VERBOSE
+#define pr_fmt(x) "udc: " x
#include <linux/module.h>
#include <linux/kernel.h>
@@ -183,9 +183,9 @@ __acquires(ep->udc->lock)
usb_gadget_unmap_request(&ep->udc->gadget, &req->req, ep_is_in(ep));
if (status && (status != -ESHUTDOWN))
- VDBG("complete %s req %p stat %d len %u/%u",
- ep->ep.name, &req->req, status,
- req->req.actual, req->req.length);
+ dev_vdbg(&udc->gadget.dev, "complete %s req %p stat %d len %u/%u\n",
+ ep->ep.name, &req->req, status,
+ req->req.actual, req->req.length);
ep->stopped = 1;
@@ -285,7 +285,7 @@ static int dr_controller_setup(struct fsl_udc *udc)
timeout = jiffies + FSL_UDC_RESET_TIMEOUT;
while (fsl_readl(&dr_regs->usbcmd) & USB_CMD_CTRL_RESET) {
if (time_after(jiffies, timeout)) {
- ERR("udc reset timeout!\n");
+ dev_err(&udc->gadget.dev, "udc reset timeout!\n");
return -ETIMEDOUT;
}
cpu_relax();
@@ -308,9 +308,10 @@ static int dr_controller_setup(struct fsl_udc *udc)
tmp &= USB_EP_LIST_ADDRESS_MASK;
fsl_writel(tmp, &dr_regs->endpointlistaddr);
- VDBG("vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x",
- udc->ep_qh, (int)tmp,
- fsl_readl(&dr_regs->endpointlistaddr));
+ dev_vdbg(&udc->gadget.dev,
+ "vir[qh_base] is %p phy[qh_base] is 0x%8x reg is 0x%8x\n",
+ udc->ep_qh, (int)tmp,
+ fsl_readl(&dr_regs->endpointlistaddr));
max_no_of_ep = (0x0000001F & fsl_readl(&dr_regs->dccparams));
for (ep_num = 1; ep_num < max_no_of_ep; ep_num++) {
@@ -498,7 +499,7 @@ static void struct_ep_qh_setup(struct fsl_udc *udc, unsigned char ep_num,
tmp = max_pkt_len << EP_QUEUE_HEAD_MAX_PKT_LEN_POS;
break;
default:
- VDBG("error ep type is %d", ep_type);
+ dev_vdbg(&udc->gadget.dev, "error ep type is %d\n", ep_type);
return;
}
if (zlt)
@@ -611,10 +612,10 @@ static int fsl_ep_enable(struct usb_ep *_ep,
spin_unlock_irqrestore(&udc->lock, flags);
retval = 0;
- VDBG("enabled %s (ep%d%s) maxpacket %d",ep->ep.name,
- ep->ep.desc->bEndpointAddress & 0x0f,
- (desc->bEndpointAddress & USB_DIR_IN)
- ? "in" : "out", max);
+ dev_vdbg(&udc->gadget.dev, "enabled %s (ep%d%s) maxpacket %d\n",
+ ep->ep.name, ep->ep.desc->bEndpointAddress & 0x0f,
+ (desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
+ max);
en_done:
return retval;
}
@@ -633,7 +634,10 @@ static int fsl_ep_disable(struct usb_ep *_ep)
ep = container_of(_ep, struct fsl_ep, ep);
if (!_ep || !ep->ep.desc) {
- VDBG("%s not enabled", _ep ? ep->ep.name : NULL);
+ /*
+ * dev_vdbg(&udc->gadget.dev, "%s not enabled\n",
+ * _ep ? ep->ep.name : NULL);
+ */
return -EINVAL;
}
@@ -659,7 +663,7 @@ static int fsl_ep_disable(struct usb_ep *_ep)
ep->stopped = 1;
spin_unlock_irqrestore(&udc->lock, flags);
- VDBG("disabled %s OK", _ep->name);
+ dev_vdbg(&udc->gadget.dev, "disabled %s OK\n", _ep->name);
return 0;
}
@@ -719,8 +723,8 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
{
u32 temp, bitmask, tmp_stat;
- /* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
- VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
+ /* dev_vdbg(&udc->gadget.dev, "QH addr Register 0x%8x\n", dr_regs->endpointlistaddr);
+ dev_vdbg(&udc->gadget.dev, "ep_qh[%d] addr is 0x%8x\n", i, (u32)&(ep->udc->ep_qh[i])); */
bitmask = ep_is_in(ep)
? (1 << (ep_index(ep) + 16))
@@ -808,7 +812,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
*is_last = 0;
if ((*is_last) == 0)
- VDBG("multi-dtd request!");
+ dev_vdbg(&udc_controller->gadget.dev, "multi-dtd request!\n");
/* Fill in the transfer size; set active bit */
swap_temp = ((*length << DTD_LENGTH_BIT_POS) | DTD_STATUS_ACTIVE);
@@ -820,7 +824,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length,
mb();
- VDBG("length = %d address= 0x%x", *length, (int)*dma);
+ dev_vdbg(&udc_controller->gadget.dev, "length = %d address= 0x%x\n", *length, (int)*dma);
return dtd;
}
@@ -871,11 +875,11 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
/* catch various bogus parameters */
if (!_req || !req->req.complete || !req->req.buf
|| !list_empty(&req->queue)) {
- VDBG("%s, bad params", __func__);
+ dev_vdbg(&udc->gadget.dev, "%s, bad params\n", __func__);
return -EINVAL;
}
if (unlikely(!_ep || !ep->ep.desc)) {
- VDBG("%s, bad ep", __func__);
+ dev_vdbg(&udc->gadget.dev, "%s, bad ep\n", __func__);
return -EINVAL;
}
if (usb_endpoint_xfer_isoc(ep->ep.desc)) {
@@ -1036,8 +1040,8 @@ static int fsl_ep_set_halt(struct usb_ep *_ep, int value)
udc->ep0_dir = 0;
}
out:
- VDBG(" %s %s halt stat %d", ep->ep.name,
- value ? "set" : "clear", status);
+ dev_vdbg(&udc->gadget.dev, "%s %s halt stat %d\n", ep->ep.name,
+ value ? "set" : "clear", status);
return status;
}
@@ -1105,7 +1109,8 @@ static void fsl_ep_fifo_flush(struct usb_ep *_ep)
/* Wait until flush complete */
while (fsl_readl(&dr_regs->endptflush)) {
if (time_after(jiffies, timeout)) {
- ERR("ep flush timeout\n");
+ dev_err(&udc_controller->gadget.dev,
+ "ep flush timeout\n");
return;
}
cpu_relax();
@@ -1177,7 +1182,7 @@ static int fsl_vbus_session(struct usb_gadget *gadget, int is_active)
udc = container_of(gadget, struct fsl_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
- VDBG("VBUS %s", is_active ? "on" : "off");
+ dev_vdbg(&gadget->dev, "VBUS %s\n", is_active ? "on" : "off");
udc->vbus_active = (is_active != 0);
if (can_pullup(udc))
fsl_writel((fsl_readl(&dr_regs->usbcmd) | USB_CMD_RUN_STOP),
@@ -1543,7 +1548,7 @@ static void ep0_req_complete(struct fsl_udc *udc, struct fsl_ep *ep0,
udc->ep0_state = WAIT_FOR_SETUP;
break;
case WAIT_FOR_SETUP:
- ERR("Unexpected ep0 packets\n");
+ dev_err(&udc->gadget.dev, "Unexpected ep0 packets\n");
break;
default:
ep0stall(udc);
@@ -1612,7 +1617,7 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
errors = hc32_to_cpu(curr_td->size_ioc_sts);
if (errors & DTD_ERROR_MASK) {
if (errors & DTD_STATUS_HALTED) {
- ERR("dTD error %08x QH=%d\n", errors, pipe);
+ dev_err(&udc->gadget.dev, "dTD error %08x QH=%d\n", errors, pipe);
/* Clear the errors and Halt condition */
tmp = hc32_to_cpu(curr_qh->size_ioc_int_sts);
tmp &= ~errors;
@@ -1623,32 +1628,35 @@ static int process_ep_req(struct fsl_udc *udc, int pipe,
break;
}
if (errors & DTD_STATUS_DATA_BUFF_ERR) {
- VDBG("Transfer overflow");
+ dev_vdbg(&udc->gadget.dev, "Transfer overflow\n");
status = -EPROTO;
break;
} else if (errors & DTD_STATUS_TRANSACTION_ERR) {
- VDBG("ISO error");
+ dev_vdbg(&udc->gadget.dev, "ISO error\n");
status = -EILSEQ;
break;
} else
- ERR("Unknown error has occurred (0x%x)!\n",
+ dev_err(&udc->gadget.dev,
+ "Unknown error has occurred (0x%x)!\n",
errors);
} else if (hc32_to_cpu(curr_td->size_ioc_sts)
& DTD_STATUS_ACTIVE) {
- VDBG("Request not complete");
+ dev_vdbg(&udc->gadget.dev, "Request not complete\n");
status = REQ_UNCOMPLETE;
return status;
} else if (remaining_length) {
if (direction) {
- VDBG("Transmit dTD remaining length not zero");
+ dev_vdbg(&udc->gadget.dev,
+ "Transmit dTD remaining length not zero\n");
status = -EPROTO;
break;
} else {
break;
}
} else {
- VDBG("dTD transmitted successful");
+ dev_vdbg(&udc->gadget.dev,
+ "dTD transmitted successful\n");
}
if (j != curr_req->dtd_count - 1)
@@ -1691,7 +1699,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
/* If the ep is configured */
if (!curr_ep->ep.name) {
- WARNING("Invalid EP?");
+ dev_warn(&udc->gadget.dev, "Invalid EP?\n");
continue;
}
@@ -1700,8 +1708,9 @@ static void dtd_complete_irq(struct fsl_udc *udc)
queue) {
status = process_ep_req(udc, i, curr_req);
- VDBG("status of process_ep_req= %d, ep = %d",
- status, ep_num);
+ dev_vdbg(&udc->gadget.dev,
+ "status of process_ep_req= %d, ep = %d\n",
+ status, ep_num);
if (status == REQ_UNCOMPLETE)
break;
/* write back status to req */
@@ -1820,7 +1829,7 @@ static void reset_irq(struct fsl_udc *udc)
while (fsl_readl(&dr_regs->endpointprime)) {
/* Wait until all endptprime bits cleared */
if (time_after(jiffies, timeout)) {
- ERR("Timeout for reset\n");
+ dev_err(&udc->gadget.dev, "Timeout for reset\n");
break;
}
cpu_relax();
@@ -1830,7 +1839,7 @@ static void reset_irq(struct fsl_udc *udc)
fsl_writel(0xffffffff, &dr_regs->endptflush);
if (fsl_readl(&dr_regs->portsc1) & PORTSCX_PORT_RESET) {
- VDBG("Bus reset");
+ dev_vdbg(&udc->gadget.dev, "Bus reset\n");
/* Bus is reseting */
udc->bus_reset = 1;
/* Reset all the queues, include XD, dTD, EP queue
@@ -1838,7 +1847,7 @@ static void reset_irq(struct fsl_udc *udc)
reset_queues(udc, true);
udc->usb_state = USB_STATE_DEFAULT;
} else {
- VDBG("Controller reset");
+ dev_vdbg(&udc->gadget.dev, "Controller reset\n");
/* initialize usb hw reg except for regs for EP, not
* touch usbintr reg */
dr_controller_setup(udc);
@@ -1872,7 +1881,7 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
/* Clear notification bits */
fsl_writel(irq_src, &dr_regs->usbsts);
- /* VDBG("irq_src [0x%8x]", irq_src); */
+ /* dev_vdbg(&udc->gadget.dev, "irq_src [0x%8x]", irq_src); */
/* Need to resume? */
if (udc->usb_state == USB_STATE_SUSPENDED)
@@ -1881,7 +1890,7 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
/* USB Interrupt */
if (irq_src & USB_STS_INT) {
- VDBG("Packet int");
+ dev_vdbg(&udc->gadget.dev, "Packet int\n");
/* Setup package, we only support ep0 as control ep */
if (fsl_readl(&dr_regs->endptsetupstat) & EP_SETUP_STATUS_EP0) {
tripwire_handler(udc, 0,
@@ -1910,7 +1919,7 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
/* Reset Received */
if (irq_src & USB_STS_RESET) {
- VDBG("reset int");
+ dev_vdbg(&udc->gadget.dev, "reset int\n");
reset_irq(udc);
status = IRQ_HANDLED;
}
@@ -1922,7 +1931,7 @@ static irqreturn_t fsl_udc_irq(int irq, void *_udc)
}
if (irq_src & (USB_STS_ERR | USB_STS_SYS_ERR)) {
- VDBG("Error IRQ %x", irq_src);
+ dev_vdbg(&udc->gadget.dev, "Error IRQ %x\n", irq_src);
}
spin_unlock_irqrestore(&udc->lock, flags);
@@ -1958,7 +1967,7 @@ static int fsl_udc_start(struct usb_gadget *g,
udc_controller->transceiver->otg,
&udc_controller->gadget);
if (retval < 0) {
- ERR("can't bind to transceiver\n");
+ dev_err(&udc_controller->gadget.dev, "can't bind to transceiver\n");
udc_controller->driver = NULL;
return retval;
}
@@ -2243,7 +2252,7 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->eps = kcalloc(udc->max_ep, sizeof(struct fsl_ep), GFP_KERNEL);
if (!udc->eps) {
- ERR("kmalloc udc endpoint status failed\n");
+ dev_err(&udc->gadget.dev, "kmalloc udc endpoint status failed\n");
goto eps_alloc_failed;
}
@@ -2258,7 +2267,7 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->ep_qh = dma_alloc_coherent(&pdev->dev, size,
&udc->ep_qh_dma, GFP_KERNEL);
if (!udc->ep_qh) {
- ERR("malloc QHs for udc failed\n");
+ dev_err(&udc->gadget.dev, "malloc QHs for udc failed\n");
goto ep_queue_alloc_failed;
}
@@ -2269,14 +2278,14 @@ static int struct_udc_setup(struct fsl_udc *udc,
udc->status_req = container_of(fsl_alloc_request(NULL, GFP_KERNEL),
struct fsl_req, req);
if (!udc->status_req) {
- ERR("kzalloc for udc status request failed\n");
+ dev_err(&udc->gadget.dev, "kzalloc for udc status request failed\n");
goto udc_status_alloc_failed;
}
/* allocate a small amount of memory to get valid address */
udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
if (!udc->status_req->req.buf) {
- ERR("kzalloc for udc request buffer failed\n");
+ dev_err(&udc->gadget.dev, "kzalloc for udc request buffer failed\n");
goto udc_req_buf_alloc_failed;
}
@@ -2373,7 +2382,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
if (pdata->operating_mode == FSL_USB2_DR_OTG) {
udc_controller->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(udc_controller->transceiver)) {
- ERR("Can't find OTG driver!\n");
+ dev_err(&udc_controller->gadget.dev, "Can't find OTG driver!\n");
ret = -ENODEV;
goto err_kfree;
}
@@ -2389,7 +2398,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
if (pdata->operating_mode == FSL_USB2_DR_DEVICE) {
if (!request_mem_region(res->start, resource_size(res),
driver_name)) {
- ERR("request mem region for %s failed\n", pdev->name);
+ dev_err(&udc_controller->gadget.dev, "request mem region for %s failed\n", pdev->name);
ret = -EBUSY;
goto err_kfree;
}
@@ -2420,7 +2429,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
/* Read Device Controller Capability Parameters register */
dccparams = fsl_readl(&dr_regs->dccparams);
if (!(dccparams & DCCPARAMS_DC)) {
- ERR("This SOC doesn't support device role\n");
+ dev_err(&udc_controller->gadget.dev, "This SOC doesn't support device role\n");
ret = -ENODEV;
goto err_exit;
}
@@ -2438,14 +2447,14 @@ static int fsl_udc_probe(struct platform_device *pdev)
ret = request_irq(udc_controller->irq, fsl_udc_irq, IRQF_SHARED,
driver_name, udc_controller);
if (ret != 0) {
- ERR("cannot request irq %d err %d\n",
+ dev_err(&udc_controller->gadget.dev, "cannot request irq %d err %d\n",
udc_controller->irq, ret);
goto err_exit;
}
/* Initialize the udc structure including QH member and other member */
if (struct_udc_setup(udc_controller, pdev)) {
- ERR("Can't initialize udc data structure\n");
+ dev_err(&udc_controller->gadget.dev, "Can't initialize udc data structure\n");
ret = -ENOMEM;
goto err_free_irq;
}
@@ -2486,7 +2495,7 @@ static int fsl_udc_probe(struct platform_device *pdev)
/* setup the udc->eps[] for non-control endpoints and link
* to gadget.ep_list */
for (i = 1; i < (int)(udc_controller->max_ep / 2); i++) {
- char name[14];
+ char name[16];
sprintf(name, "ep%dout", i);
struct_ep_setup(udc_controller, i * 2, name, 1);
@@ -2666,6 +2675,15 @@ static const struct platform_device_id fsl_udc_devtype[] = {
}
};
MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
+
+static const struct of_device_id fsl_udc_dt_ids[] = {
+ { .compatible = "fsl-usb2-dr" },
+ { .compatible = "fsl-usb2-mph" },
+ { .compatible = "fsl,mpc5121-usb2-dr" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_udc_dt_ids);
+
static struct platform_driver udc_driver = {
.probe = fsl_udc_probe,
.remove_new = fsl_udc_remove,
@@ -2675,6 +2693,7 @@ static struct platform_driver udc_driver = {
.resume = fsl_udc_resume,
.driver = {
.name = driver_name,
+ .of_match_table = fsl_udc_dt_ids,
/* udc suspend/resume called from OTG driver */
.suspend = fsl_udc_otg_suspend,
.resume = fsl_udc_otg_resume,
diff --git a/drivers/usb/gadget/udc/fsl_usb2_udc.h b/drivers/usb/gadget/udc/fsl_usb2_udc.h
index 2efc5a930b48..cc1756f3e89d 100644
--- a/drivers/usb/gadget/udc/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/udc/fsl_usb2_udc.h
@@ -508,53 +508,6 @@ struct fsl_udc {
/*-------------------------------------------------------------------------*/
-#ifdef DEBUG
-#define DBG(fmt, args...) printk(KERN_DEBUG "[%s] " fmt "\n", \
- __func__, ## args)
-#else
-#define DBG(fmt, args...) do{}while(0)
-#endif
-
-#if 0
-static void dump_msg(const char *label, const u8 * buf, unsigned int length)
-{
- unsigned int start, num, i;
- char line[52], *p;
-
- if (length >= 512)
- return;
- DBG("%s, length %u:\n", label, length);
- start = 0;
- while (length > 0) {
- num = min(length, 16u);
- p = line;
- for (i = 0; i < num; ++i) {
- if (i == 8)
- *p++ = ' ';
- sprintf(p, " %02x", buf[i]);
- p += 3;
- }
- *p = 0;
- printk(KERN_DEBUG "%6x: %s\n", start, line);
- buf += num;
- start += num;
- length -= num;
- }
-}
-#endif
-
-#ifdef VERBOSE
-#define VDBG DBG
-#else
-#define VDBG(stuff...) do{}while(0)
-#endif
-
-#define ERR(stuff...) pr_err("udc: " stuff)
-#define WARNING(stuff...) pr_warn("udc: " stuff)
-#define INFO(stuff...) pr_info("udc: " stuff)
-
-/*-------------------------------------------------------------------------*/
-
/* ### Add board specific defines here
*/
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 12e76bb62c20..19bbc38f3d35 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2650,7 +2650,7 @@ net2272_plat_probe(struct platform_device *pdev)
goto err_req;
}
- ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
+ ret = net2272_probe_fin(dev, irqflags);
if (ret)
goto err_io;
diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
index 61424cfd2e1c..1a6317e4b2a3 100644
--- a/drivers/usb/gadget/udc/pxa27x_udc.c
+++ b/drivers/usb/gadget/udc/pxa27x_udc.c
@@ -24,7 +24,6 @@
#include <linux/byteorder/generic.h>
#include <linux/platform_data/pxa2xx_udc.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index 547af2ed9e5e..ba5a06690507 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -8,7 +8,6 @@
#include <linux/extcon.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/phy/phy.h>
#include <linux/module.h>
diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c
index cb85168fd00c..7aa46d426f31 100644
--- a/drivers/usb/gadget/udc/tegra-xudc.c
+++ b/drivers/usb/gadget/udc/tegra-xudc.c
@@ -3491,8 +3491,8 @@ static void tegra_xudc_device_params_init(struct tegra_xudc *xudc)
static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
{
- int err = 0, usb3;
- unsigned int i;
+ int err = 0, usb3_companion_port;
+ unsigned int i, j;
xudc->utmi_phy = devm_kcalloc(xudc->dev, xudc->soc->num_phys,
sizeof(*xudc->utmi_phy), GFP_KERNEL);
@@ -3520,7 +3520,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
if (IS_ERR(xudc->utmi_phy[i])) {
err = PTR_ERR(xudc->utmi_phy[i]);
dev_err_probe(xudc->dev, err,
- "failed to get usb2-%d PHY\n", i);
+ "failed to get PHY for phy-name usb2-%d\n", i);
goto clean_up;
} else if (xudc->utmi_phy[i]) {
/* Get usb-phy, if utmi phy is available */
@@ -3539,19 +3539,30 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc)
}
/* Get USB3 phy */
- usb3 = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
- if (usb3 < 0)
+ usb3_companion_port = tegra_xusb_padctl_get_usb3_companion(xudc->padctl, i);
+ if (usb3_companion_port < 0)
continue;
- snprintf(phy_name, sizeof(phy_name), "usb3-%d", usb3);
- xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
- if (IS_ERR(xudc->usb3_phy[i])) {
- err = PTR_ERR(xudc->usb3_phy[i]);
- dev_err_probe(xudc->dev, err,
- "failed to get usb3-%d PHY\n", usb3);
- goto clean_up;
- } else if (xudc->usb3_phy[i])
- dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3);
+ for (j = 0; j < xudc->soc->num_phys; j++) {
+ snprintf(phy_name, sizeof(phy_name), "usb3-%d", j);
+ xudc->usb3_phy[i] = devm_phy_optional_get(xudc->dev, phy_name);
+ if (IS_ERR(xudc->usb3_phy[i])) {
+ err = PTR_ERR(xudc->usb3_phy[i]);
+ dev_err_probe(xudc->dev, err,
+ "failed to get PHY for phy-name usb3-%d\n", j);
+ goto clean_up;
+ } else if (xudc->usb3_phy[i]) {
+ int usb2_port =
+ tegra_xusb_padctl_get_port_number(xudc->utmi_phy[i]);
+ int usb3_port =
+ tegra_xusb_padctl_get_port_number(xudc->usb3_phy[i]);
+ if (usb3_port == usb3_companion_port) {
+ dev_dbg(xudc->dev, "USB2 port %d is paired with USB3 port %d for device mode port %d\n",
+ usb2_port, usb3_port, i);
+ break;
+ }
+ }
+ }
}
return err;
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 6c47ab0a491d..ad145a54ca74 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -66,6 +66,15 @@ struct orion_ehci_hcd {
static struct hc_driver __read_mostly ehci_orion_hc_driver;
/*
+ * Legacy DMA mask is 32 bit.
+ * AC5 has the DDR starting at 8GB, hence it requires
+ * a larger (34-bit) DMA mask, in order for DMA allocations
+ * to succeed:
+ */
+static const u64 dma_mask_orion = DMA_BIT_MASK(32);
+static const u64 dma_mask_ac5 = DMA_BIT_MASK(34);
+
+/*
* Implement Orion USB controller specification guidelines
*/
static void orion_usb_phy_v1_setup(struct usb_hcd *hcd)
@@ -211,6 +220,7 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
int irq, err;
enum orion_ehci_phy_ver phy_version;
struct orion_ehci_hcd *priv;
+ u64 *dma_mask_ptr;
if (usb_disabled())
return -ENODEV;
@@ -228,7 +238,8 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
* set. Since shared usb code relies on it, set it here for
* now. Once we have dma capability bindings this can go away.
*/
- err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ dma_mask_ptr = (u64 *)of_device_get_match_data(&pdev->dev);
+ err = dma_coerce_mask_and_coherent(&pdev->dev, *dma_mask_ptr);
if (err)
goto err;
@@ -332,8 +343,9 @@ static void ehci_orion_drv_remove(struct platform_device *pdev)
}
static const struct of_device_id ehci_orion_dt_ids[] = {
- { .compatible = "marvell,orion-ehci", },
- { .compatible = "marvell,armada-3700-ehci", },
+ { .compatible = "marvell,orion-ehci", .data = &dma_mask_orion},
+ { .compatible = "marvell,armada-3700-ehci", .data = &dma_mask_orion},
+ { .compatible = "marvell,ac5-ehci", .data = &dma_mask_ac5},
{},
};
MODULE_DEVICE_TABLE(of, ehci_orion_dt_ids);
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 357d9aee38a3..3348c25ddb18 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -27,7 +27,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/platform_data/usb-ohci-pxa27x.h>
#include <linux/platform_data/pxa2xx_udc.h>
#include <linux/platform_device.h>
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 0956495bba57..2b871540bb50 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -585,6 +585,7 @@ done(struct sl811 *sl811, struct sl811h_ep *ep, u8 bank)
finish_request(sl811, ep, urb, urbstat);
}
+#ifdef QUIRK2
static inline u8 checkdone(struct sl811 *sl811)
{
u8 ctl;
@@ -616,6 +617,7 @@ static inline u8 checkdone(struct sl811 *sl811)
#endif
return irqstat;
}
+#endif
static irqreturn_t sl811h_irq(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/host/xhci-caps.h b/drivers/usb/host/xhci-caps.h
new file mode 100644
index 000000000000..9e94cebf4a56
--- /dev/null
+++ b/drivers/usb/host/xhci-caps.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* hc_capbase bitmasks */
+/* bits 7:0 - how long is the Capabilities register */
+#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
+/* bits 31:16 */
+#define HC_VERSION(p) (((p) >> 16) & 0xffff)
+
+/* HCSPARAMS1 - hcs_params1 - bitmasks */
+/* bits 0:7, Max Device Slots */
+#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
+#define HCS_SLOTS_MASK 0xff
+/* bits 8:18, Max Interrupters */
+#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
+/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
+#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
+
+/* HCSPARAMS2 - hcs_params2 - bitmasks */
+/* bits 0:3, frames or uframes that SW needs to queue transactions
+ * ahead of the HW to meet periodic deadlines */
+#define HCS_IST(p) (((p) >> 0) & 0xf)
+/* bits 4:7, max number of Event Ring segments */
+#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
+/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
+/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
+/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
+#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
+
+/* HCSPARAMS3 - hcs_params3 - bitmasks */
+/* bits 0:7, Max U1 to U0 latency for the roothub ports */
+#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
+/* bits 16:31, Max U2 to U0 latency for the roothub ports */
+#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
+
+/* HCCPARAMS - hcc_params - bitmasks */
+/* true: HC can use 64-bit address pointers */
+#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
+/* true: HC can do bandwidth negotiation */
+#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
+/* true: HC uses 64-byte Device Context structures
+ * FIXME 64-byte context structures aren't supported yet.
+ */
+#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
+/* true: HC has port power switches */
+#define HCC_PPC(p) ((p) & (1 << 3))
+/* true: HC has port indicators */
+#define HCS_INDICATOR(p) ((p) & (1 << 4))
+/* true: HC has Light HC Reset Capability */
+#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
+/* true: HC supports latency tolerance messaging */
+#define HCC_LTC(p) ((p) & (1 << 6))
+/* true: no secondary Stream ID Support */
+#define HCC_NSS(p) ((p) & (1 << 7))
+/* true: HC supports Stopped - Short Packet */
+#define HCC_SPC(p) ((p) & (1 << 9))
+/* true: HC has Contiguous Frame ID Capability */
+#define HCC_CFC(p) ((p) & (1 << 11))
+/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
+#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
+/* Extended Capabilities pointer from PCI base - section 5.3.6 */
+#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
+
+#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+
+/* db_off bitmask - bits 0:1 reserved */
+#define DBOFF_MASK (~0x3)
+
+/* run_regs_off bitmask - bits 0:4 reserved */
+#define RTSOFF_MASK (~0x1f)
+
+/* HCCPARAMS2 - hcc_params2 - bitmasks */
+/* true: HC supports U3 entry Capability */
+#define HCC2_U3C(p) ((p) & (1 << 0))
+/* true: HC supports Configure endpoint command Max exit latency too large */
+#define HCC2_CMC(p) ((p) & (1 << 1))
+/* true: HC supports Force Save context Capability */
+#define HCC2_FSC(p) ((p) & (1 << 2))
+/* true: HC supports Compliance Transition Capability */
+#define HCC2_CTC(p) ((p) & (1 << 3))
+/* true: HC support Large ESIT payload Capability > 48k */
+#define HCC2_LEC(p) ((p) & (1 << 4))
+/* true: HC support Configuration Information Capability */
+#define HCC2_CIC(p) ((p) & (1 << 5))
+/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
+#define HCC2_ETC(p) ((p) & (1 << 6))
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index d82935d31126..8a9869ef0db6 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -634,7 +634,8 @@ static int xhci_dbc_start(struct xhci_dbc *dbc)
return ret;
}
- return mod_delayed_work(system_wq, &dbc->event_work, 1);
+ return mod_delayed_work(system_wq, &dbc->event_work,
+ msecs_to_jiffies(dbc->poll_interval));
}
static void xhci_dbc_stop(struct xhci_dbc *dbc)
@@ -899,8 +900,10 @@ static void xhci_dbc_handle_events(struct work_struct *work)
enum evtreturn evtr;
struct xhci_dbc *dbc;
unsigned long flags;
+ unsigned int poll_interval;
dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work);
+ poll_interval = dbc->poll_interval;
spin_lock_irqsave(&dbc->lock, flags);
evtr = xhci_dbc_do_handle_events(dbc);
@@ -916,13 +919,18 @@ static void xhci_dbc_handle_events(struct work_struct *work)
dbc->driver->disconnect(dbc);
break;
case EVT_DONE:
+ /* set fast poll rate if there are pending data transfers */
+ if (!list_empty(&dbc->eps[BULK_OUT].list_pending) ||
+ !list_empty(&dbc->eps[BULK_IN].list_pending))
+ poll_interval = 1;
break;
default:
dev_info(dbc->dev, "stop handling dbc events\n");
return;
}
- mod_delayed_work(system_wq, &dbc->event_work, 1);
+ mod_delayed_work(system_wq, &dbc->event_work,
+ msecs_to_jiffies(poll_interval));
}
static const char * const dbc_state_strings[DS_MAX] = {
@@ -1175,6 +1183,7 @@ xhci_alloc_dbc(struct device *dev, void __iomem *base, const struct dbc_driver *
dbc->idVendor = DBC_VENDOR_ID;
dbc->bcdDevice = DBC_DEVICE_REV;
dbc->bInterfaceProtocol = DBC_PROTOCOL;
+ dbc->poll_interval = DBC_POLL_INTERVAL_DEFAULT;
if (readl(&dbc->regs->control) & DBC_CTRL_DBC_ENABLE)
goto err;
diff --git a/drivers/usb/host/xhci-dbgcap.h b/drivers/usb/host/xhci-dbgcap.h
index e39e3ae1677a..92661b555c2a 100644
--- a/drivers/usb/host/xhci-dbgcap.h
+++ b/drivers/usb/host/xhci-dbgcap.h
@@ -94,6 +94,7 @@ struct dbc_ep {
#define DBC_QUEUE_SIZE 16
#define DBC_WRITE_BUF_SIZE 8192
+#define DBC_POLL_INTERVAL_DEFAULT 64 /* milliseconds */
/*
* Private structure for DbC hardware state:
@@ -140,6 +141,7 @@ struct xhci_dbc {
enum dbc_state state;
struct delayed_work event_work;
+ unsigned int poll_interval; /* ms */
unsigned resume_required:1;
struct dbc_ep eps[2];
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 0980ade2a234..61f083de6e19 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -448,38 +448,6 @@ u32 xhci_port_state_to_neutral(u32 state)
}
EXPORT_SYMBOL_GPL(xhci_port_state_to_neutral);
-/**
- * xhci_find_slot_id_by_port() - Find slot id of a usb device on a roothub port
- * @hcd: pointer to hcd of the roothub
- * @xhci: pointer to xhci structure
- * @port: one-based port number of the port in this roothub.
- *
- * Return: Slot id of the usb device connected to the root port, 0 if not found
- */
-
-int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
- u16 port)
-{
- int slot_id;
- int i;
- enum usb_device_speed speed;
-
- slot_id = 0;
- for (i = 0; i < MAX_HC_SLOTS; i++) {
- if (!xhci->devs[i] || !xhci->devs[i]->udev)
- continue;
- speed = xhci->devs[i]->udev->speed;
- if (((speed >= USB_SPEED_SUPER) == (hcd->speed >= HCD_USB3))
- && xhci->devs[i]->fake_port == port) {
- slot_id = i;
- break;
- }
- }
-
- return slot_id;
-}
-EXPORT_SYMBOL_GPL(xhci_find_slot_id_by_port);
-
/*
* Stop device
* It issues stop endpoint command for EP 0 to 30. And wait the last command
@@ -930,7 +898,6 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
struct xhci_bus_state *bus_state;
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
- int slot_id;
u32 wIndex;
hcd = port->rhub->hcd;
@@ -986,13 +953,11 @@ static int xhci_handle_usb2_port_link_resume(struct xhci_port *port,
spin_lock_irqsave(&xhci->lock, *flags);
if (time_left) {
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- wIndex + 1);
- if (!slot_id) {
+ if (!port->slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
return -ENODEV;
}
- xhci_ring_device(xhci, slot_id);
+ xhci_ring_device(xhci, port->slot_id);
} else {
int port_status = readl(port->addr);
@@ -1202,7 +1167,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
unsigned long flags;
u32 temp, status;
int retval = 0;
- int slot_id;
struct xhci_bus_state *bus_state;
u16 link_state = 0;
u16 wake_mask = 0;
@@ -1332,15 +1296,13 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
goto error;
}
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- portnum1);
- if (!slot_id) {
+ if (!port->slot_id) {
xhci_warn(xhci, "slot_id is zero\n");
goto error;
}
/* unlock to execute stop endpoint commands */
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_stop_device(xhci, slot_id, 1);
+ xhci_stop_device(xhci, port->slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
xhci_set_link_state(xhci, port, XDEV_U3);
@@ -1463,14 +1425,12 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (link_state == USB_SS_PORT_LS_U3) {
int retries = 16;
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- portnum1);
- if (slot_id) {
+ if (port->slot_id) {
/* unlock to execute stop endpoint
* commands */
spin_unlock_irqrestore(&xhci->lock,
flags);
- xhci_stop_device(xhci, slot_id, 1);
+ xhci_stop_device(xhci, port->slot_id, 1);
spin_lock_irqsave(&xhci->lock, flags);
}
xhci_set_link_state(xhci, port, USB_SS_PORT_LS_U3);
@@ -1584,13 +1544,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
}
bus_state->port_c_suspend |= 1 << wIndex;
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- portnum1);
- if (!slot_id) {
+ if (!port->slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
goto error;
}
- xhci_ring_device(xhci, slot_id);
+ xhci_ring_device(xhci, port->slot_id);
break;
case USB_PORT_FEAT_C_SUSPEND:
bus_state->port_c_suspend &= ~(1 << wIndex);
@@ -1821,10 +1779,7 @@ retry:
if (!portsc_buf[port_index])
continue;
if (test_bit(port_index, &bus_state->bus_suspended)) {
- int slot_id;
-
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- port_index + 1);
+ int slot_id = ports[port_index]->slot_id;
if (slot_id) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_stop_device(xhci, slot_id, 1);
@@ -1877,7 +1832,6 @@ int xhci_bus_resume(struct usb_hcd *hcd)
struct xhci_bus_state *bus_state;
unsigned long flags;
int max_ports, port_index;
- int slot_id;
int sret;
u32 next_state;
u32 temp, portsc;
@@ -1970,9 +1924,8 @@ int xhci_bus_resume(struct usb_hcd *hcd)
continue;
}
xhci_test_and_clear_bit(xhci, ports[port_index], PORT_PLC);
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, port_index + 1);
- if (slot_id)
- xhci_ring_device(xhci, slot_id);
+ if (ports[port_index]->slot_id)
+ xhci_ring_device(xhci, ports[port_index]->slot_id);
}
(void) readl(&xhci->op_regs->command);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index a7716202a8dd..69dd86669883 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -84,7 +84,7 @@ static void xhci_free_segments_for_ring(struct xhci_hcd *xhci,
struct xhci_segment *seg;
seg = first->next;
- while (seg != first) {
+ while (seg && seg != first) {
struct xhci_segment *next = seg->next;
xhci_segment_free(xhci, seg);
seg = next;
@@ -351,17 +351,10 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
next = xhci_segment_alloc(xhci, cycle_state, max_packet, num,
flags);
- if (!next) {
- prev = *first;
- while (prev) {
- next = prev->next;
- xhci_segment_free(xhci, prev);
- prev = next;
- }
- return -ENOMEM;
- }
- xhci_link_segments(prev, next, type, chain_links);
+ if (!next)
+ goto free_segments;
+ xhci_link_segments(prev, next, type, chain_links);
prev = next;
num++;
}
@@ -369,6 +362,10 @@ static int xhci_alloc_segments_for_ring(struct xhci_hcd *xhci,
*last = prev;
return 0;
+
+free_segments:
+ xhci_free_segments_for_ring(xhci, *first);
+ return -ENOMEM;
}
/*
@@ -444,19 +441,11 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
if (ret)
return -ENOMEM;
- if (ring->type == TYPE_STREAM)
+ if (ring->type == TYPE_STREAM) {
ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
ring, first, last, flags);
- if (ret) {
- struct xhci_segment *next;
- do {
- next = first->next;
- xhci_segment_free(xhci, first);
- if (first == last)
- break;
- first = next;
- } while (true);
- return ret;
+ if (ret)
+ goto free_segments;
}
xhci_link_rings(xhci, ring, first, last, num_new_segs);
@@ -466,6 +455,10 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring->num_segs);
return 0;
+
+free_segments:
+ xhci_free_segments_for_ring(xhci, first);
+ return ret;
}
struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
@@ -789,15 +782,14 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
bool slot_found = false;
/* If the device never made it past the Set Address stage,
- * it may not have the real_port set correctly.
+ * it may not have the root hub port pointer set correctly.
*/
- if (virt_dev->real_port == 0 ||
- virt_dev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
- xhci_dbg(xhci, "Bad real port.\n");
+ if (!virt_dev->rhub_port) {
+ xhci_dbg(xhci, "Bad rhub port.\n");
return;
}
- tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
+ tt_list_head = &(xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* Multi-TT hubs will have more than one entry */
if (tt_info->slot_id == slot_id) {
@@ -834,7 +826,7 @@ int xhci_alloc_tt_info(struct xhci_hcd *xhci,
goto free_tts;
INIT_LIST_HEAD(&tt_info->tt_list);
list_add(&tt_info->tt_list,
- &xhci->rh_bw[virt_dev->real_port - 1].tts);
+ &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].tts);
tt_info->slot_id = virt_dev->udev->slot_id;
if (tt->multi)
tt_info->ttport = i+1;
@@ -908,6 +900,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
if (dev->udev && dev->udev->slot_id)
dev->udev->slot_id = 0;
+ if (dev->rhub_port && dev->rhub_port->slot_id == slot_id)
+ dev->rhub_port->slot_id = 0;
kfree(xhci->devs[slot_id]);
xhci->devs[slot_id] = NULL;
}
@@ -929,13 +923,12 @@ static void xhci_free_virt_devices_depth_first(struct xhci_hcd *xhci, int slot_i
if (!vdev)
return;
- if (vdev->real_port == 0 ||
- vdev->real_port > HCS_MAX_PORTS(xhci->hcs_params1)) {
- xhci_dbg(xhci, "Bad vdev->real_port.\n");
+ if (!vdev->rhub_port) {
+ xhci_dbg(xhci, "Bad rhub port.\n");
goto out;
}
- tt_list_head = &(xhci->rh_bw[vdev->real_port - 1].tts);
+ tt_list_head = &(xhci->rh_bw[vdev->rhub_port->hw_portnum].tts);
list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
/* is this a hub device that added a tt_info to the tts list */
if (tt_info->slot_id == slot_id) {
@@ -1051,16 +1044,16 @@ void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
* The xHCI roothub may have ports of differing speeds in any order in the port
* status registers.
*
- * The xHCI hardware wants to know the roothub port number that the USB device
+ * The xHCI hardware wants to know the roothub port that the USB device
* is attached to (or the roothub port its ancestor hub is attached to). All we
* know is the index of that port under either the USB 2.0 or the USB 3.0
* roothub, but that doesn't give us the real index into the HW port status
- * registers. Call xhci_find_raw_port_number() to get real index.
+ * registers.
*/
-static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
- struct usb_device *udev)
+static struct xhci_port *xhci_find_rhub_port(struct xhci_hcd *xhci, struct usb_device *udev)
{
struct usb_device *top_dev;
+ struct xhci_hub *rhub;
struct usb_hcd *hcd;
if (udev->speed >= USB_SPEED_SUPER)
@@ -1072,7 +1065,8 @@ static u32 xhci_find_real_port_number(struct xhci_hcd *xhci,
top_dev = top_dev->parent)
/* Found device below root hub */;
- return xhci_find_raw_port_number(hcd, top_dev->portnum);
+ rhub = xhci_get_rhub(hcd);
+ return rhub->ports[top_dev->portnum - 1];
}
/* Setup an xHCI virtual device for a Set Address command */
@@ -1081,9 +1075,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
struct xhci_virt_device *dev;
struct xhci_ep_ctx *ep0_ctx;
struct xhci_slot_ctx *slot_ctx;
- u32 port_num;
u32 max_packets;
- struct usb_device *top_dev;
dev = xhci->devs[udev->slot_id];
/* Slot ID 0 is reserved */
@@ -1124,18 +1116,15 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
return -EINVAL;
}
/* Find the root hub port this device is under */
- port_num = xhci_find_real_port_number(xhci, udev);
- if (!port_num)
+ dev->rhub_port = xhci_find_rhub_port(xhci, udev);
+ if (!dev->rhub_port)
return -EINVAL;
- slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(port_num));
- /* Set the port number in the virtual_device to the faked port number */
- for (top_dev = udev; top_dev->parent && top_dev->parent->parent;
- top_dev = top_dev->parent)
- /* Found device below root hub */;
- dev->fake_port = top_dev->portnum;
- dev->real_port = port_num;
- xhci_dbg(xhci, "Set root hub portnum to %d\n", port_num);
- xhci_dbg(xhci, "Set fake root hub portnum to %d\n", dev->fake_port);
+ /* Slot ID is set to the device directly below the root hub */
+ if (!udev->parent->parent)
+ dev->rhub_port->slot_id = udev->slot_id;
+ slot_ctx->dev_info2 |= cpu_to_le32(ROOT_HUB_PORT(dev->rhub_port->hw_portnum + 1));
+ xhci_dbg(xhci, "Slot ID %d: HW portnum %d, hcd portnum %d\n",
+ udev->slot_id, dev->rhub_port->hw_portnum, dev->rhub_port->hcd_portnum);
/* Find the right bandwidth table that this device will be a part of.
* If this is a full speed device attached directly to a root port (or a
@@ -1144,12 +1133,12 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
* will never be created for the HS root hub.
*/
if (!udev->tt || !udev->tt->hub->parent) {
- dev->bw_table = &xhci->rh_bw[port_num - 1].bw_table;
+ dev->bw_table = &xhci->rh_bw[dev->rhub_port->hw_portnum].bw_table;
} else {
struct xhci_root_port_bw_info *rh_bw;
struct xhci_tt_bw_info *tt_bw;
- rh_bw = &xhci->rh_bw[port_num - 1];
+ rh_bw = &xhci->rh_bw[dev->rhub_port->hw_portnum];
/* Find the right TT. */
list_for_each_entry(tt_bw, &rh_bw->tts, tt_list) {
if (tt_bw->slot_id != udev->tt->hub->slot_id)
@@ -2533,7 +2522,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
if (xhci_add_interrupter(xhci, ir, 0))
goto fail;
- xhci->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
+ ir->isoc_bei_interval = AVOID_BEI_INTERVAL_MAX;
/*
* XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 61f3f8bbdcea..27eb384a3963 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -122,10 +122,6 @@ static u32 get_bw_boundary(enum usb_device_speed speed)
* each HS root port is treated as a single bandwidth domain,
* but each SS root port is treated as two bandwidth domains, one for IN eps,
* one for OUT eps.
-* @real_port value is defined as follow according to xHCI spec:
-* 1 for SSport0, ..., N+1 for SSportN, N+2 for HSport0, N+3 for HSport1, etc
-* so the bandwidth domain array is organized as follow for simplification:
-* SSport0-OUT, SSport0-IN, ..., SSportX-OUT, SSportX-IN, HSport0, ..., HSportY
*/
static struct mu3h_sch_bw_info *
get_bw_info(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
@@ -136,19 +132,19 @@ get_bw_info(struct xhci_hcd_mtk *mtk, struct usb_device *udev,
int bw_index;
virt_dev = xhci->devs[udev->slot_id];
- if (!virt_dev->real_port) {
- WARN_ONCE(1, "%s invalid real_port\n", dev_name(&udev->dev));
+ if (!virt_dev->rhub_port) {
+ WARN_ONCE(1, "%s invalid rhub port\n", dev_name(&udev->dev));
return NULL;
}
if (udev->speed >= USB_SPEED_SUPER) {
if (usb_endpoint_dir_out(&ep->desc))
- bw_index = (virt_dev->real_port - 1) * 2;
+ bw_index = (virt_dev->rhub_port->hw_portnum) * 2;
else
- bw_index = (virt_dev->real_port - 1) * 2 + 1;
+ bw_index = (virt_dev->rhub_port->hw_portnum) * 2 + 1;
} else {
/* add one more for each SS port */
- bw_index = virt_dev->real_port + xhci->usb3_rhub.num_ports - 1;
+ bw_index = virt_dev->rhub_port->hw_portnum + xhci->usb3_rhub.num_ports;
}
return &mtk->sch_array[bw_index];
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b534ca9752be..93b697648018 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -307,8 +307,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
- if (pdev->vendor == PCI_VENDOR_ID_AMD)
+ if (pdev->vendor == PCI_VENDOR_ID_AMD) {
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if (pdev->device == 0x43f7)
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+ }
if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
@@ -820,7 +823,6 @@ static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_port *port;
struct usb_device *udev;
- unsigned int slot_id;
u32 portsc;
int i;
@@ -843,15 +845,14 @@ static int xhci_pci_poweroff_late(struct usb_hcd *hcd, bool do_wakeup)
if ((portsc & PORT_PLS_MASK) != XDEV_U3)
continue;
- slot_id = xhci_find_slot_id_by_port(port->rhub->hcd, xhci,
- port->hcd_portnum + 1);
- if (!slot_id || !xhci->devs[slot_id]) {
+ if (!port->slot_id || !xhci->devs[port->slot_id]) {
xhci_err(xhci, "No dev for slot_id %d for port %d-%d in U3\n",
- slot_id, port->rhub->hcd->self.busnum, port->hcd_portnum + 1);
+ port->slot_id, port->rhub->hcd->self.busnum,
+ port->hcd_portnum + 1);
continue;
}
- udev = xhci->devs[slot_id]->udev;
+ udev = xhci->devs[port->slot_id]->udev;
/* if wakeup is enabled then don't disable the port */
if (udev->do_remote_wakeup && do_wakeup)
diff --git a/drivers/usb/host/xhci-port.h b/drivers/usb/host/xhci-port.h
new file mode 100644
index 000000000000..f19efb966d18
--- /dev/null
+++ b/drivers/usb/host/xhci-port.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
+/* true: device connected */
+#define PORT_CONNECT (1 << 0)
+/* true: port enabled */
+#define PORT_PE (1 << 1)
+/* bit 2 reserved and zeroed */
+/* true: port has an over-current condition */
+#define PORT_OC (1 << 3)
+/* true: port reset signaling asserted */
+#define PORT_RESET (1 << 4)
+/* Port Link State - bits 5:8
+ * A read gives the current link PM state of the port,
+ * a write with Link State Write Strobe set sets the link state.
+ */
+#define PORT_PLS_MASK (0xf << 5)
+#define XDEV_U0 (0x0 << 5)
+#define XDEV_U1 (0x1 << 5)
+#define XDEV_U2 (0x2 << 5)
+#define XDEV_U3 (0x3 << 5)
+#define XDEV_DISABLED (0x4 << 5)
+#define XDEV_RXDETECT (0x5 << 5)
+#define XDEV_INACTIVE (0x6 << 5)
+#define XDEV_POLLING (0x7 << 5)
+#define XDEV_RECOVERY (0x8 << 5)
+#define XDEV_HOT_RESET (0x9 << 5)
+#define XDEV_COMP_MODE (0xa << 5)
+#define XDEV_TEST_MODE (0xb << 5)
+#define XDEV_RESUME (0xf << 5)
+
+/* true: port has power (see HCC_PPC) */
+#define PORT_POWER (1 << 9)
+/* bits 10:13 indicate device speed:
+ * 0 - undefined speed - port hasn't be initialized by a reset yet
+ * 1 - full speed
+ * 2 - low speed
+ * 3 - high speed
+ * 4 - super speed
+ * 5-15 reserved
+ */
+#define DEV_SPEED_MASK (0xf << 10)
+#define XDEV_FS (0x1 << 10)
+#define XDEV_LS (0x2 << 10)
+#define XDEV_HS (0x3 << 10)
+#define XDEV_SS (0x4 << 10)
+#define XDEV_SSP (0x5 << 10)
+#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
+#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
+#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
+#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
+#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
+#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
+#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
+#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
+
+/* Bits 20:23 in the Slot Context are the speed for the device */
+#define SLOT_SPEED_FS (XDEV_FS << 10)
+#define SLOT_SPEED_LS (XDEV_LS << 10)
+#define SLOT_SPEED_HS (XDEV_HS << 10)
+#define SLOT_SPEED_SS (XDEV_SS << 10)
+#define SLOT_SPEED_SSP (XDEV_SSP << 10)
+/* Port Indicator Control */
+#define PORT_LED_OFF (0 << 14)
+#define PORT_LED_AMBER (1 << 14)
+#define PORT_LED_GREEN (2 << 14)
+#define PORT_LED_MASK (3 << 14)
+/* Port Link State Write Strobe - set this when changing link state */
+#define PORT_LINK_STROBE (1 << 16)
+/* true: connect status change */
+#define PORT_CSC (1 << 17)
+/* true: port enable change */
+#define PORT_PEC (1 << 18)
+/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
+ * into an enabled state, and the device into the default state. A "warm" reset
+ * also resets the link, forcing the device through the link training sequence.
+ * SW can also look at the Port Reset register to see when warm reset is done.
+ */
+#define PORT_WRC (1 << 19)
+/* true: over-current change */
+#define PORT_OCC (1 << 20)
+/* true: reset change - 1 to 0 transition of PORT_RESET */
+#define PORT_RC (1 << 21)
+/* port link status change - set on some port link state transitions:
+ * Transition Reason
+ * ------------------------------------------------------------------------------
+ * - U3 to Resume Wakeup signaling from a device
+ * - Resume to Recovery to U0 USB 3.0 device resume
+ * - Resume to U0 USB 2.0 device resume
+ * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
+ * - U3 to U0 Software resume of USB 2.0 device complete
+ * - U2 to U0 L1 resume of USB 2.1 device complete
+ * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
+ * - U0 to disabled L1 entry error with USB 2.1 device
+ * - Any state to inactive Error on USB 3.0 port
+ */
+#define PORT_PLC (1 << 22)
+/* port configure error change - port failed to configure its link partner */
+#define PORT_CEC (1 << 23)
+#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+ PORT_RC | PORT_PLC | PORT_CEC)
+
+
+/* Cold Attach Status - xHC can set this bit to report device attached during
+ * Sx state. Warm port reset should be perfomed to clear this bit and move port
+ * to connected state.
+ */
+#define PORT_CAS (1 << 24)
+/* wake on connect (enable) */
+#define PORT_WKCONN_E (1 << 25)
+/* wake on disconnect (enable) */
+#define PORT_WKDISC_E (1 << 26)
+/* wake on over-current (enable) */
+#define PORT_WKOC_E (1 << 27)
+/* bits 28:29 reserved */
+/* true: device is non-removable - for USB 3.0 roothub emulation */
+#define PORT_DEV_REMOVE (1 << 30)
+/* Initiate a warm port reset - complete when PORT_WRC is '1' */
+#define PORT_WR (1 << 31)
+
+/* We mark duplicate entries with -1 */
+#define DUPLICATE_ENTRY ((u8)(-1))
+
+/* Port Power Management Status and Control - port_power_base bitmasks */
+/* Inactivity timer value for transitions into U1, in microseconds.
+ * Timeout can be up to 127us. 0xFF means an infinite timeout.
+ */
+#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
+#define PORT_U1_TIMEOUT_MASK 0xff
+/* Inactivity timer value for transitions into U2 */
+#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
+#define PORT_U2_TIMEOUT_MASK (0xff << 8)
+/* Bits 24:31 for port testing */
+
+/* USB2 Protocol PORTSPMSC */
+#define PORT_L1S_MASK 7
+#define PORT_L1S_SUCCESS 1
+#define PORT_RWE (1 << 3)
+#define PORT_HIRD(p) (((p) & 0xf) << 4)
+#define PORT_HIRD_MASK (0xf << 4)
+#define PORT_L1DS_MASK (0xff << 8)
+#define PORT_L1DS(p) (((p) & 0xff) << 8)
+#define PORT_HLE (1 << 16)
+#define PORT_TEST_MODE_SHIFT 28
+
+/* USB3 Protocol PORTLI Port Link Information */
+#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
+#define PORT_TX_LANES(p) (((p) >> 20) & 0xf)
+
+/* USB2 Protocol PORTHLPMC */
+#define PORT_HIRDM(p)((p) & 3)
+#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
+#define PORT_BESLD(p)(((p) & 0xf) << 10)
+
+/* use 512 microseconds as USB2 LPM L1 default timeout. */
+#define XHCI_L1_TIMEOUT 512
+
+/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
+ * Safe to use with mixed HIRD and BESL systems (host and device) and is used
+ * by other operating systems.
+ *
+ * XHCI 1.0 errata 8/14/12 Table 13 notes:
+ * "Software should choose xHC BESL/BESLD field values that do not violate a
+ * device's resume latency requirements,
+ * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
+ * or not program values < '4' if BLC = '0' and a BESL device is attached.
+ */
+#define XHCI_DEFAULT_BESL 4
+
+/*
+ * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
+ * to complete link training. usually link trainig completes much faster
+ * so check status 10 times with 36ms sleep in places we need to wait for
+ * polling to complete.
+ */
+#define XHCI_PORT_POLLING_LFPS_TIME 36
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 4f64b814d4aa..52278afea94b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -113,6 +113,12 @@ static bool last_td_in_urb(struct xhci_td *td)
return urb_priv->num_tds_done == urb_priv->num_tds;
}
+static bool unhandled_event_trb(struct xhci_ring *ring)
+{
+ return ((le32_to_cpu(ring->dequeue->event_cmd.flags) & TRB_CYCLE) ==
+ ring->cycle_state);
+}
+
static void inc_td_cnt(struct urb *urb)
{
struct urb_priv *urb_priv = urb->hcpriv;
@@ -1154,6 +1160,15 @@ static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
break;
ep->ep_state &= ~EP_STOP_CMD_PENDING;
return;
+ case EP_STATE_STOPPED:
+ /*
+ * NEC uPD720200 sometimes sets this state and fails with
+ * Context Error while continuing to process TRBs.
+ * Be conservative and trust EP_CTX_STATE on other chips.
+ */
+ if (!(xhci->quirks & XHCI_NEC_HOST))
+ break;
+ fallthrough;
case EP_STATE_RUNNING:
/* Race, HW handled stop ep cmd before ep was running */
xhci_dbg(xhci, "Stop ep completion ctx error, ep is running\n");
@@ -1870,7 +1885,6 @@ static void handle_port_status(struct xhci_hcd *xhci,
u32 port_id;
u32 portsc, cmd_reg;
int max_ports;
- int slot_id;
unsigned int hcd_portnum;
struct xhci_bus_state *bus_state;
bool bogus_port_status = false;
@@ -1922,9 +1936,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
if (hcd->speed >= HCD_USB3 &&
(portsc & PORT_PLS_MASK) == XDEV_INACTIVE) {
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
- if (slot_id && xhci->devs[slot_id])
- xhci->devs[slot_id]->flags |= VDEV_PORT_ERROR;
+ if (port->slot_id && xhci->devs[port->slot_id])
+ xhci->devs[port->slot_id]->flags |= VDEV_PORT_ERROR;
}
if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_RESUME) {
@@ -1982,9 +1995,8 @@ static void handle_port_status(struct xhci_hcd *xhci,
* so the roothub behavior is consistent with external
* USB 3.0 hub behavior.
*/
- slot_id = xhci_find_slot_id_by_port(hcd, xhci, hcd_portnum + 1);
- if (slot_id && xhci->devs[slot_id])
- xhci_ring_device(xhci, slot_id);
+ if (port->slot_id && xhci->devs[port->slot_id])
+ xhci_ring_device(xhci, port->slot_id);
if (bus_state->port_remote_wakeup & (1 << hcd_portnum)) {
xhci_test_and_clear_bit(xhci, port, PORT_PLC);
usb_wakeup_notification(hcd->self.root_hub,
@@ -2816,7 +2828,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td_num--;
/* Is this a TRB in the currently executing TD? */
- ep_seg = trb_in_td(xhci, ep_ring->deq_seg, ep_ring->dequeue,
+ ep_seg = trb_in_td(xhci, td->start_seg, td->first_trb,
td->last_trb, ep_trb_dma, false);
/*
@@ -2884,9 +2896,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
"part of current TD ep_index %d "
"comp_code %u\n", ep_index,
trb_comp_code);
- trb_in_td(xhci, ep_ring->deq_seg,
- ep_ring->dequeue, td->last_trb,
- ep_trb_dma, true);
+ trb_in_td(xhci, td->start_seg, td->first_trb,
+ td->last_trb, ep_trb_dma, true);
return -ESHUTDOWN;
}
}
@@ -2962,32 +2973,18 @@ err_out:
}
/*
- * This function handles all OS-owned events on the event ring. It may drop
+ * This function handles one OS-owned event on the event ring. It may drop
* xhci->lock between event processing (e.g. to pass up port status changes).
- * Returns >0 for "possibly more events to process" (caller should call again),
- * otherwise 0 if done. In future, <0 returns should indicate error code.
*/
-static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+static int xhci_handle_event_trb(struct xhci_hcd *xhci, struct xhci_interrupter *ir,
+ union xhci_trb *event)
{
- union xhci_trb *event;
u32 trb_type;
- /* Event ring hasn't been allocated yet. */
- if (!ir || !ir->event_ring || !ir->event_ring->dequeue) {
- xhci_err(xhci, "ERROR interrupter not ready\n");
- return -ENOMEM;
- }
-
- event = ir->event_ring->dequeue;
- /* Does the HC or OS own the TRB? */
- if ((le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE) !=
- ir->event_ring->cycle_state)
- return 0;
-
trace_xhci_handle_event(ir->event_ring, &event->generic);
/*
- * Barrier between reading the TRB_CYCLE (valid) flag above and any
+ * Barrier between reading the TRB_CYCLE (valid) flag before, and any
* speculative reads of the event's flags/data below.
*/
rmb();
@@ -3017,18 +3014,11 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
* to make sure a watchdog timer didn't mark the host as non-responsive.
*/
if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_dbg(xhci, "xHCI host dying, returning from "
- "event handler.\n");
- return 0;
+ xhci_dbg(xhci, "xHCI host dying, returning from event handler.\n");
+ return -ENODEV;
}
- /* Update SW event ring dequeue pointer */
- inc_deq(xhci, ir->event_ring);
-
- /* Are there more items on the event ring? Caller will call us again to
- * check.
- */
- return 1;
+ return 0;
}
/*
@@ -3038,30 +3028,26 @@ static int xhci_handle_event(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
*/
static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
struct xhci_interrupter *ir,
- union xhci_trb *event_ring_deq,
bool clear_ehb)
{
u64 temp_64;
dma_addr_t deq;
temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- /* If necessary, update the HW's version of the event ring deq ptr. */
- if (event_ring_deq != ir->event_ring->dequeue) {
- deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
- ir->event_ring->dequeue);
- if (deq == 0)
- xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
- /*
- * Per 4.9.4, Software writes to the ERDP register shall
- * always advance the Event Ring Dequeue Pointer value.
- */
- if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK))
- return;
+ deq = xhci_trb_virt_to_dma(ir->event_ring->deq_seg,
+ ir->event_ring->dequeue);
+ if (deq == 0)
+ xhci_warn(xhci, "WARN something wrong with SW event ring dequeue ptr\n");
+ /*
+ * Per 4.9.4, Software writes to the ERDP register shall always advance
+ * the Event Ring Dequeue Pointer value.
+ */
+ if ((temp_64 & ERST_PTR_MASK) == (deq & ERST_PTR_MASK) && !clear_ehb)
+ return;
- /* Update HC event ring dequeue pointer */
- temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
- temp_64 |= deq & ERST_PTR_MASK;
- }
+ /* Update HC event ring dequeue pointer */
+ temp_64 = ir->event_ring->deq_seg->num & ERST_DESI_MASK;
+ temp_64 |= deq & ERST_PTR_MASK;
/* Clear the event handler busy flag (RW1C) */
if (clear_ehb)
@@ -3069,6 +3055,76 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
xhci_write_64(xhci, temp_64, &ir->ir_set->erst_dequeue);
}
+/* Clear the interrupt pending bit for a specific interrupter. */
+static void xhci_clear_interrupt_pending(struct xhci_hcd *xhci,
+ struct xhci_interrupter *ir)
+{
+ if (!ir->ip_autoclear) {
+ u32 irq_pending;
+
+ irq_pending = readl(&ir->ir_set->irq_pending);
+ irq_pending |= IMAN_IP;
+ writel(irq_pending, &ir->ir_set->irq_pending);
+ }
+}
+
+/*
+ * Handle all OS-owned events on an interrupter event ring. It may drop
+ * and reaquire xhci->lock between event processing.
+ */
+static int xhci_handle_events(struct xhci_hcd *xhci, struct xhci_interrupter *ir)
+{
+ int event_loop = 0;
+ int err;
+ u64 temp;
+
+ xhci_clear_interrupt_pending(xhci, ir);
+
+ /* Event ring hasn't been allocated yet. */
+ if (!ir->event_ring || !ir->event_ring->dequeue) {
+ xhci_err(xhci, "ERROR interrupter event ring not ready\n");
+ return -ENOMEM;
+ }
+
+ if (xhci->xhc_state & XHCI_STATE_DYING ||
+ xhci->xhc_state & XHCI_STATE_HALTED) {
+ xhci_dbg(xhci, "xHCI dying, ignoring interrupt. Shouldn't IRQs be disabled?\n");
+
+ /* Clear the event handler busy flag (RW1C) */
+ temp = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp | ERST_EHB, &ir->ir_set->erst_dequeue);
+ return -ENODEV;
+ }
+
+ /* Process all OS owned event TRBs on this event ring */
+ while (unhandled_event_trb(ir->event_ring)) {
+ err = xhci_handle_event_trb(xhci, ir, ir->event_ring->dequeue);
+
+ /*
+ * If half a segment of events have been handled in one go then
+ * update ERDP, and force isoc trbs to interrupt more often
+ */
+ if (event_loop++ > TRBS_PER_SEGMENT / 2) {
+ xhci_update_erst_dequeue(xhci, ir, false);
+
+ if (ir->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
+ ir->isoc_bei_interval = ir->isoc_bei_interval / 2;
+
+ event_loop = 0;
+ }
+
+ /* Update SW event ring dequeue pointer */
+ inc_deq(xhci, ir->event_ring);
+
+ if (err)
+ break;
+ }
+
+ xhci_update_erst_dequeue(xhci, ir, true);
+
+ return 0;
+}
+
/*
* xHCI spec says we can get an interrupt, and if the HC has an error condition,
* we might get bad data out of the event ring. Section 4.10.2.7 has a list of
@@ -3077,12 +3133,8 @@ static void xhci_update_erst_dequeue(struct xhci_hcd *xhci,
irqreturn_t xhci_irq(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- union xhci_trb *event_ring_deq;
- struct xhci_interrupter *ir;
irqreturn_t ret = IRQ_NONE;
- u64 temp_64;
u32 status;
- int event_loop = 0;
spin_lock(&xhci->lock);
/* Check if the xHC generated the interrupt, or the irq is shared */
@@ -3115,50 +3167,10 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
*/
status |= STS_EINT;
writel(status, &xhci->op_regs->status);
-
- /* This is the handler of the primary interrupter */
- ir = xhci->interrupters[0];
- if (!hcd->msi_enabled) {
- u32 irq_pending;
- irq_pending = readl(&ir->ir_set->irq_pending);
- irq_pending |= IMAN_IP;
- writel(irq_pending, &ir->ir_set->irq_pending);
- }
-
- if (xhci->xhc_state & XHCI_STATE_DYING ||
- xhci->xhc_state & XHCI_STATE_HALTED) {
- xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
- "Shouldn't IRQs be disabled?\n");
- /* Clear the event handler busy flag (RW1C);
- * the event ring should be empty.
- */
- temp_64 = xhci_read_64(xhci, &ir->ir_set->erst_dequeue);
- xhci_write_64(xhci, temp_64 | ERST_EHB,
- &ir->ir_set->erst_dequeue);
- ret = IRQ_HANDLED;
- goto out;
- }
-
- event_ring_deq = ir->event_ring->dequeue;
- /* FIXME this should be a delayed service routine
- * that clears the EHB.
- */
- while (xhci_handle_event(xhci, ir) > 0) {
- if (event_loop++ < TRBS_PER_SEGMENT / 2)
- continue;
- xhci_update_erst_dequeue(xhci, ir, event_ring_deq, false);
- event_ring_deq = ir->event_ring->dequeue;
-
- /* ring is half-full, force isoc trbs to interrupt more often */
- if (xhci->isoc_bei_interval > AVOID_BEI_INTERVAL_MIN)
- xhci->isoc_bei_interval = xhci->isoc_bei_interval / 2;
-
- event_loop = 0;
- }
-
- xhci_update_erst_dequeue(xhci, ir, event_ring_deq, true);
ret = IRQ_HANDLED;
+ /* This is the handler of the primary interrupter */
+ xhci_handle_events(xhci, xhci->interrupters[0]);
out:
spin_unlock(&xhci->lock);
@@ -4019,7 +4031,8 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci,
}
/* Check if we should generate event interrupt for a TD in an isoc URB */
-static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
+static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i,
+ struct xhci_interrupter *ir)
{
if (xhci->hci_version < 0x100)
return false;
@@ -4030,8 +4043,8 @@ static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
* If AVOID_BEI is set the host handles full event rings poorly,
* generate an event at least every 8th TD to clear the event ring
*/
- if (i && xhci->quirks & XHCI_AVOID_BEI)
- return !!(i % xhci->isoc_bei_interval);
+ if (i && ir->isoc_bei_interval && xhci->quirks & XHCI_AVOID_BEI)
+ return !!(i % ir->isoc_bei_interval);
return true;
}
@@ -4040,6 +4053,7 @@ static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i)
static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
+ struct xhci_interrupter *ir;
struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct xhci_td *td;
@@ -4057,6 +4071,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
xep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
+ ir = xhci->interrupters[0];
num_tds = urb->number_of_packets;
if (num_tds < 1) {
@@ -4144,7 +4159,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
td->last_trb = ep_ring->enqueue;
td->last_trb_seg = ep_ring->enq_seg;
field |= TRB_IOC;
- if (trb_block_event_intr(xhci, num_tds, i))
+ if (trb_block_event_intr(xhci, num_tds, i, ir))
field |= TRB_BEI;
}
/* Calculate TRB length */
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index ac47b1c0544a..1740000d54c2 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -172,8 +172,8 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__field(void *, vdev)
__field(unsigned long long, out_ctx)
__field(unsigned long long, in_ctx)
- __field(u8, fake_port)
- __field(u8, real_port)
+ __field(int, hcd_portnum)
+ __field(int, hw_portnum)
__field(u16, current_mel)
),
@@ -181,13 +181,13 @@ DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
__entry->vdev = vdev;
__entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
__entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
- __entry->fake_port = (u8) vdev->fake_port;
- __entry->real_port = (u8) vdev->real_port;
+ __entry->hcd_portnum = (int) vdev->rhub_port->hcd_portnum;
+ __entry->hw_portnum = (int) vdev->rhub_port->hw_portnum;
__entry->current_mel = (u16) vdev->current_mel;
),
- TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
+ TP_printk("vdev %p ctx %llx | %llx hcd_portnum %d hw_portnum %d current_mel %d",
__entry->vdev, __entry->in_ctx, __entry->out_ctx,
- __entry->fake_port, __entry->real_port, __entry->current_mel
+ __entry->hcd_portnum, __entry->hw_portnum, __entry->current_mel
)
);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index c057c42c36f4..8579603edaff 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -346,6 +346,23 @@ static int xhci_disable_interrupter(struct xhci_interrupter *ir)
return 0;
}
+/* interrupt moderation interval imod_interval in nanoseconds */
+static int xhci_set_interrupter_moderation(struct xhci_interrupter *ir,
+ u32 imod_interval)
+{
+ u32 imod;
+
+ if (!ir || !ir->ir_set || imod_interval > U16_MAX * 250)
+ return -EINVAL;
+
+ imod = readl(&ir->ir_set->irq_control);
+ imod &= ~ER_IRQ_INTERVAL_MASK;
+ imod |= (imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
+ writel(imod, &ir->ir_set->irq_control);
+
+ return 0;
+}
+
static void compliance_mode_recovery(struct timer_list *t)
{
struct xhci_hcd *xhci;
@@ -528,7 +545,6 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
*/
int xhci_run(struct usb_hcd *hcd)
{
- u32 temp;
u64 temp_64;
int ret;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
@@ -538,6 +554,9 @@ int xhci_run(struct usb_hcd *hcd)
*/
hcd->uses_new_polling = 1;
+ if (hcd->msi_enabled)
+ ir->ip_autoclear = true;
+
if (!usb_hcd_is_primary_hcd(hcd))
return xhci_run_finished(xhci);
@@ -548,12 +567,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
- xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Set the interrupt modulation register");
- temp = readl(&ir->ir_set->irq_control);
- temp &= ~ER_IRQ_INTERVAL_MASK;
- temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK;
- writel(temp, &ir->ir_set->irq_control);
+ xhci_set_interrupter_moderation(ir, xhci->imod_interval);
if (xhci->quirks & XHCI_NEC_HOST) {
struct xhci_command *command;
@@ -780,19 +794,7 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
seg = seg->next;
} while (seg != ring->deq_seg);
- /* Reset the software enqueue and dequeue pointers */
- ring->deq_seg = ring->first_seg;
- ring->dequeue = ring->first_seg->trbs;
- ring->enq_seg = ring->deq_seg;
- ring->enqueue = ring->dequeue;
-
- ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
- /*
- * Ring is now zeroed, so the HW should look for change of ownership
- * when the cycle bit is set to 1.
- */
- ring->cycle_state = 1;
-
+ xhci_initialize_ring_info(ring, 1);
/*
* Reset the hardware dequeue pointer.
* Yes, this will need to be re-written after resume, but we're paranoid
@@ -1217,6 +1219,8 @@ static int xhci_map_temp_buffer(struct usb_hcd *hcd, struct urb *urb)
temp = kzalloc_node(buf_len, GFP_ATOMIC,
dev_to_node(hcd->self.sysdev));
+ if (!temp)
+ return -ENOMEM;
if (usb_urb_dir_out(urb))
sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
@@ -2259,7 +2263,7 @@ static int xhci_check_tt_bw_table(struct xhci_hcd *xhci,
struct xhci_tt_bw_info *tt_info;
/* Find the bandwidth table for the root port this TT is attached to. */
- bw_table = &xhci->rh_bw[virt_dev->real_port - 1].bw_table;
+ bw_table = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum].bw_table;
tt_info = virt_dev->tt_info;
/* If this TT already had active endpoints, the bandwidth for this TT
* has already been added. Removing all periodic endpoints (and thus
@@ -2377,7 +2381,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
if (virt_dev->tt_info) {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Recalculating BW for rootport %u",
- virt_dev->real_port);
+ virt_dev->rhub_port->hw_portnum + 1);
if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
xhci_warn(xhci, "Not enough bandwidth on HS bus for "
"newly activated TT.\n");
@@ -2390,7 +2394,7 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
} else {
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"Recalculating BW for rootport %u",
- virt_dev->real_port);
+ virt_dev->rhub_port->hw_portnum + 1);
}
/* Add in how much bandwidth will be used for interval zero, or the
@@ -2487,14 +2491,12 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
bw_used += overhead + packet_size;
if (!virt_dev->tt_info && virt_dev->udev->speed == USB_SPEED_HIGH) {
- unsigned int port_index = virt_dev->real_port - 1;
-
/* OK, we're manipulating a HS device attached to a
* root port bandwidth domain. Include the number of active TTs
* in the bandwidth used.
*/
bw_used += TT_HS_OVERHEAD *
- xhci->rh_bw[port_index].num_active_tts;
+ xhci->rh_bw[virt_dev->rhub_port->hw_portnum].num_active_tts;
}
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
@@ -2681,7 +2683,7 @@ void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
if (!virt_dev->tt_info)
return;
- rh_bw_info = &xhci->rh_bw[virt_dev->real_port - 1];
+ rh_bw_info = &xhci->rh_bw[virt_dev->rhub_port->hw_portnum];
if (old_active_eps == 0 &&
virt_dev->tt_info->active_eps != 0) {
rh_bw_info->num_active_tts += 1;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 6f82d404883f..6f4bf98a6282 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -22,6 +22,9 @@
#include "xhci-ext-caps.h"
#include "pci-quirks.h"
+#include "xhci-port.h"
+#include "xhci-caps.h"
+
/* max buffer size for trace and debug messages */
#define XHCI_MSG_MAX 500
@@ -62,90 +65,6 @@ struct xhci_cap_regs {
/* Reserved up to (CAPLENGTH - 0x1C) */
};
-/* hc_capbase bitmasks */
-/* bits 7:0 - how long is the Capabilities register */
-#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
-/* bits 31:16 */
-#define HC_VERSION(p) (((p) >> 16) & 0xffff)
-
-/* HCSPARAMS1 - hcs_params1 - bitmasks */
-/* bits 0:7, Max Device Slots */
-#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
-#define HCS_SLOTS_MASK 0xff
-/* bits 8:18, Max Interrupters */
-#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
-/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
-#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
-
-/* HCSPARAMS2 - hcs_params2 - bitmasks */
-/* bits 0:3, frames or uframes that SW needs to queue transactions
- * ahead of the HW to meet periodic deadlines */
-#define HCS_IST(p) (((p) >> 0) & 0xf)
-/* bits 4:7, max number of Event Ring segments */
-#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
-/* bits 21:25 Hi 5 bits of Scratchpad buffers SW must allocate for the HW */
-/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
-/* bits 27:31 Lo 5 bits of Scratchpad buffers SW must allocate for the HW */
-#define HCS_MAX_SCRATCHPAD(p) ((((p) >> 16) & 0x3e0) | (((p) >> 27) & 0x1f))
-
-/* HCSPARAMS3 - hcs_params3 - bitmasks */
-/* bits 0:7, Max U1 to U0 latency for the roothub ports */
-#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
-/* bits 16:31, Max U2 to U0 latency for the roothub ports */
-#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
-
-/* HCCPARAMS - hcc_params - bitmasks */
-/* true: HC can use 64-bit address pointers */
-#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
-/* true: HC can do bandwidth negotiation */
-#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
-/* true: HC uses 64-byte Device Context structures
- * FIXME 64-byte context structures aren't supported yet.
- */
-#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
-/* true: HC has port power switches */
-#define HCC_PPC(p) ((p) & (1 << 3))
-/* true: HC has port indicators */
-#define HCS_INDICATOR(p) ((p) & (1 << 4))
-/* true: HC has Light HC Reset Capability */
-#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
-/* true: HC supports latency tolerance messaging */
-#define HCC_LTC(p) ((p) & (1 << 6))
-/* true: no secondary Stream ID Support */
-#define HCC_NSS(p) ((p) & (1 << 7))
-/* true: HC supports Stopped - Short Packet */
-#define HCC_SPC(p) ((p) & (1 << 9))
-/* true: HC has Contiguous Frame ID Capability */
-#define HCC_CFC(p) ((p) & (1 << 11))
-/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
-#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
-/* Extended Capabilities pointer from PCI base - section 5.3.6 */
-#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
-
-#define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
-
-/* db_off bitmask - bits 0:1 reserved */
-#define DBOFF_MASK (~0x3)
-
-/* run_regs_off bitmask - bits 0:4 reserved */
-#define RTSOFF_MASK (~0x1f)
-
-/* HCCPARAMS2 - hcc_params2 - bitmasks */
-/* true: HC supports U3 entry Capability */
-#define HCC2_U3C(p) ((p) & (1 << 0))
-/* true: HC supports Configure endpoint command Max exit latency too large */
-#define HCC2_CMC(p) ((p) & (1 << 1))
-/* true: HC supports Force Save context Capability */
-#define HCC2_FSC(p) ((p) & (1 << 2))
-/* true: HC supports Compliance Transition Capability */
-#define HCC2_CTC(p) ((p) & (1 << 3))
-/* true: HC support Large ESIT payload Capability > 48k */
-#define HCC2_LEC(p) ((p) & (1 << 4))
-/* true: HC support Configuration Information Capability */
-#define HCC2_CIC(p) ((p) & (1 << 5))
-/* true: HC support Extended TBC Capability, Isoc burst count > 65535 */
-#define HCC2_ETC(p) ((p) & (1 << 6))
-
/* Number of registers per port */
#define NUM_PORT_REGS 4
@@ -291,181 +210,6 @@ struct xhci_op_regs {
#define CONFIG_CIE (1 << 9)
/* bits 10:31 - reserved and should be preserved */
-/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
-/* true: device connected */
-#define PORT_CONNECT (1 << 0)
-/* true: port enabled */
-#define PORT_PE (1 << 1)
-/* bit 2 reserved and zeroed */
-/* true: port has an over-current condition */
-#define PORT_OC (1 << 3)
-/* true: port reset signaling asserted */
-#define PORT_RESET (1 << 4)
-/* Port Link State - bits 5:8
- * A read gives the current link PM state of the port,
- * a write with Link State Write Strobe set sets the link state.
- */
-#define PORT_PLS_MASK (0xf << 5)
-#define XDEV_U0 (0x0 << 5)
-#define XDEV_U1 (0x1 << 5)
-#define XDEV_U2 (0x2 << 5)
-#define XDEV_U3 (0x3 << 5)
-#define XDEV_DISABLED (0x4 << 5)
-#define XDEV_RXDETECT (0x5 << 5)
-#define XDEV_INACTIVE (0x6 << 5)
-#define XDEV_POLLING (0x7 << 5)
-#define XDEV_RECOVERY (0x8 << 5)
-#define XDEV_HOT_RESET (0x9 << 5)
-#define XDEV_COMP_MODE (0xa << 5)
-#define XDEV_TEST_MODE (0xb << 5)
-#define XDEV_RESUME (0xf << 5)
-
-/* true: port has power (see HCC_PPC) */
-#define PORT_POWER (1 << 9)
-/* bits 10:13 indicate device speed:
- * 0 - undefined speed - port hasn't be initialized by a reset yet
- * 1 - full speed
- * 2 - low speed
- * 3 - high speed
- * 4 - super speed
- * 5-15 reserved
- */
-#define DEV_SPEED_MASK (0xf << 10)
-#define XDEV_FS (0x1 << 10)
-#define XDEV_LS (0x2 << 10)
-#define XDEV_HS (0x3 << 10)
-#define XDEV_SS (0x4 << 10)
-#define XDEV_SSP (0x5 << 10)
-#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
-#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
-#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
-#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
-#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
-#define DEV_SUPERSPEEDPLUS(p) (((p) & DEV_SPEED_MASK) == XDEV_SSP)
-#define DEV_SUPERSPEED_ANY(p) (((p) & DEV_SPEED_MASK) >= XDEV_SS)
-#define DEV_PORT_SPEED(p) (((p) >> 10) & 0x0f)
-
-/* Bits 20:23 in the Slot Context are the speed for the device */
-#define SLOT_SPEED_FS (XDEV_FS << 10)
-#define SLOT_SPEED_LS (XDEV_LS << 10)
-#define SLOT_SPEED_HS (XDEV_HS << 10)
-#define SLOT_SPEED_SS (XDEV_SS << 10)
-#define SLOT_SPEED_SSP (XDEV_SSP << 10)
-/* Port Indicator Control */
-#define PORT_LED_OFF (0 << 14)
-#define PORT_LED_AMBER (1 << 14)
-#define PORT_LED_GREEN (2 << 14)
-#define PORT_LED_MASK (3 << 14)
-/* Port Link State Write Strobe - set this when changing link state */
-#define PORT_LINK_STROBE (1 << 16)
-/* true: connect status change */
-#define PORT_CSC (1 << 17)
-/* true: port enable change */
-#define PORT_PEC (1 << 18)
-/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
- * into an enabled state, and the device into the default state. A "warm" reset
- * also resets the link, forcing the device through the link training sequence.
- * SW can also look at the Port Reset register to see when warm reset is done.
- */
-#define PORT_WRC (1 << 19)
-/* true: over-current change */
-#define PORT_OCC (1 << 20)
-/* true: reset change - 1 to 0 transition of PORT_RESET */
-#define PORT_RC (1 << 21)
-/* port link status change - set on some port link state transitions:
- * Transition Reason
- * ------------------------------------------------------------------------------
- * - U3 to Resume Wakeup signaling from a device
- * - Resume to Recovery to U0 USB 3.0 device resume
- * - Resume to U0 USB 2.0 device resume
- * - U3 to Recovery to U0 Software resume of USB 3.0 device complete
- * - U3 to U0 Software resume of USB 2.0 device complete
- * - U2 to U0 L1 resume of USB 2.1 device complete
- * - U0 to U0 (???) L1 entry rejection by USB 2.1 device
- * - U0 to disabled L1 entry error with USB 2.1 device
- * - Any state to inactive Error on USB 3.0 port
- */
-#define PORT_PLC (1 << 22)
-/* port configure error change - port failed to configure its link partner */
-#define PORT_CEC (1 << 23)
-#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
- PORT_RC | PORT_PLC | PORT_CEC)
-
-
-/* Cold Attach Status - xHC can set this bit to report device attached during
- * Sx state. Warm port reset should be perfomed to clear this bit and move port
- * to connected state.
- */
-#define PORT_CAS (1 << 24)
-/* wake on connect (enable) */
-#define PORT_WKCONN_E (1 << 25)
-/* wake on disconnect (enable) */
-#define PORT_WKDISC_E (1 << 26)
-/* wake on over-current (enable) */
-#define PORT_WKOC_E (1 << 27)
-/* bits 28:29 reserved */
-/* true: device is non-removable - for USB 3.0 roothub emulation */
-#define PORT_DEV_REMOVE (1 << 30)
-/* Initiate a warm port reset - complete when PORT_WRC is '1' */
-#define PORT_WR (1 << 31)
-
-/* We mark duplicate entries with -1 */
-#define DUPLICATE_ENTRY ((u8)(-1))
-
-/* Port Power Management Status and Control - port_power_base bitmasks */
-/* Inactivity timer value for transitions into U1, in microseconds.
- * Timeout can be up to 127us. 0xFF means an infinite timeout.
- */
-#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
-#define PORT_U1_TIMEOUT_MASK 0xff
-/* Inactivity timer value for transitions into U2 */
-#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
-#define PORT_U2_TIMEOUT_MASK (0xff << 8)
-/* Bits 24:31 for port testing */
-
-/* USB2 Protocol PORTSPMSC */
-#define PORT_L1S_MASK 7
-#define PORT_L1S_SUCCESS 1
-#define PORT_RWE (1 << 3)
-#define PORT_HIRD(p) (((p) & 0xf) << 4)
-#define PORT_HIRD_MASK (0xf << 4)
-#define PORT_L1DS_MASK (0xff << 8)
-#define PORT_L1DS(p) (((p) & 0xff) << 8)
-#define PORT_HLE (1 << 16)
-#define PORT_TEST_MODE_SHIFT 28
-
-/* USB3 Protocol PORTLI Port Link Information */
-#define PORT_RX_LANES(p) (((p) >> 16) & 0xf)
-#define PORT_TX_LANES(p) (((p) >> 20) & 0xf)
-
-/* USB2 Protocol PORTHLPMC */
-#define PORT_HIRDM(p)((p) & 3)
-#define PORT_L1_TIMEOUT(p)(((p) & 0xff) << 2)
-#define PORT_BESLD(p)(((p) & 0xf) << 10)
-
-/* use 512 microseconds as USB2 LPM L1 default timeout. */
-#define XHCI_L1_TIMEOUT 512
-
-/* Set default HIRD/BESL value to 4 (350/400us) for USB2 L1 LPM resume latency.
- * Safe to use with mixed HIRD and BESL systems (host and device) and is used
- * by other operating systems.
- *
- * XHCI 1.0 errata 8/14/12 Table 13 notes:
- * "Software should choose xHC BESL/BESLD field values that do not violate a
- * device's resume latency requirements,
- * e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
- * or not program values < '4' if BLC = '0' and a BESL device is attached.
- */
-#define XHCI_DEFAULT_BESL 4
-
-/*
- * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
- * to complete link training. usually link trainig completes much faster
- * so check status 10 times with 36ms sleep in places we need to wait for
- * polling to complete.
- */
-#define XHCI_PORT_POLLING_LFPS_TIME 36
-
/**
* struct xhci_intr_reg - Interrupt Register Set
* @irq_pending: IMAN - Interrupt Management Register. Used to enable
@@ -995,8 +739,7 @@ struct xhci_virt_device {
/* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx;
struct xhci_virt_ep eps[EP_CTX_PER_DEV];
- u8 fake_port;
- u8 real_port;
+ struct xhci_port *rhub_port;
struct xhci_interval_bw_table *bw_table;
struct xhci_tt_bw_info *tt_info;
/*
@@ -1688,6 +1431,8 @@ struct xhci_interrupter {
struct xhci_erst erst;
struct xhci_intr_reg __iomem *ir_set;
unsigned int intr_num;
+ bool ip_autoclear;
+ u32 isoc_bei_interval;
/* For interrupter registers save and restore over suspend/resume */
u32 s3_irq_pending;
u32 s3_irq_control;
@@ -1717,6 +1462,8 @@ struct xhci_port {
unsigned int lpm_incapable:1;
unsigned long resume_timestamp;
bool rexit_active;
+ /* Slot ID is the index of the device directly connected to the port */
+ int slot_id;
struct completion rexit_done;
struct completion u3exit_done;
};
@@ -1760,7 +1507,6 @@ struct xhci_hcd {
u8 isoc_threshold;
/* imod_interval in ns (I * 250ns) */
u32 imod_interval;
- u32 isoc_bei_interval;
int event_ring_max;
/* 4KB min, 128MB max */
int page_size;
@@ -2200,8 +1946,6 @@ unsigned long xhci_get_resuming_ports(struct usb_hcd *hcd);
#endif /* CONFIG_PM */
u32 xhci_port_state_to_neutral(u32 state);
-int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
- u16 port);
void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);
/* xHCI contexts */
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index 67f098579fb4..7b7e1554ea20 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -631,7 +631,6 @@ static int mdc800_device_open (struct inode* inode, struct file *file)
mdc800->camera_busy=0;
mdc800->camera_request_ready=0;
- retval=0;
mdc800->irq_urb->dev = mdc800->dev;
retval = usb_submit_urb (mdc800->irq_urb, GFP_KERNEL);
if (retval) {
diff --git a/drivers/usb/misc/onboard_usb_hub.c b/drivers/usb/misc/onboard_usb_hub.c
index 0dd2b032c90b..c6101ed2d9d4 100644
--- a/drivers/usb/misc/onboard_usb_hub.c
+++ b/drivers/usb/misc/onboard_usb_hub.c
@@ -260,7 +260,7 @@ static int onboard_hub_probe(struct platform_device *pdev)
if (!hub)
return -ENOMEM;
- hub->pdata = device_get_match_data(&pdev->dev);
+ hub->pdata = device_get_match_data(dev);
if (!hub->pdata)
return -EINVAL;
@@ -454,6 +454,8 @@ static const struct usb_device_id onboard_hub_id_table[] = {
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5411) }, /* RTS5411 USB 2.1 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x0414) }, /* RTS5414 USB 3.2 */
{ USB_DEVICE(VENDOR_ID_REALTEK, 0x5414) }, /* RTS5414 USB 2.1 */
+ { USB_DEVICE(VENDOR_ID_TI, 0x8025) }, /* TI USB8020B 3.0 */
+ { USB_DEVICE(VENDOR_ID_TI, 0x8027) }, /* TI USB8020B 2.0 */
{ USB_DEVICE(VENDOR_ID_TI, 0x8140) }, /* TI USB8041 3.0 */
{ USB_DEVICE(VENDOR_ID_TI, 0x8142) }, /* TI USB8041 2.0 */
{ USB_DEVICE(VENDOR_ID_VIA, 0x0817) }, /* VIA VL817 3.1 */
diff --git a/drivers/usb/misc/onboard_usb_hub.h b/drivers/usb/misc/onboard_usb_hub.h
index f360d5cf8d8a..b4b15d45f84d 100644
--- a/drivers/usb/misc/onboard_usb_hub.h
+++ b/drivers/usb/misc/onboard_usb_hub.h
@@ -26,6 +26,11 @@ static const struct onboard_hub_pdata realtek_rts5411_data = {
.num_supplies = 1,
};
+static const struct onboard_hub_pdata ti_tusb8020b_data = {
+ .reset_us = 3000,
+ .num_supplies = 1,
+};
+
static const struct onboard_hub_pdata ti_tusb8041_data = {
.reset_us = 3000,
.num_supplies = 1,
@@ -62,6 +67,8 @@ static const struct of_device_id onboard_hub_match[] = {
{ .compatible = "usb424,2517", .data = &microchip_usb424_data, },
{ .compatible = "usb424,2744", .data = &microchip_usb5744_data, },
{ .compatible = "usb424,5744", .data = &microchip_usb5744_data, },
+ { .compatible = "usb451,8025", .data = &ti_tusb8020b_data, },
+ { .compatible = "usb451,8027", .data = &ti_tusb8020b_data, },
{ .compatible = "usb451,8140", .data = &ti_tusb8041_data, },
{ .compatible = "usb451,8142", .data = &ti_tusb8041_data, },
{ .compatible = "usb4b4,6504", .data = &cypress_hx3_data, },
diff --git a/drivers/usb/mtu3/mtu3_host.c b/drivers/usb/mtu3/mtu3_host.c
index 9f2be22af844..7c657ea2dabd 100644
--- a/drivers/usb/mtu3/mtu3_host.c
+++ b/drivers/usb/mtu3/mtu3_host.c
@@ -34,6 +34,18 @@
#define WC0_SSUSB0_CDEN BIT(6)
#define WC0_IS_SPM_EN BIT(1)
+/* mt8195 */
+#define PERI_WK_CTRL0_8195 0x04
+#define WC0_IS_P_95 BIT(30) /* polarity */
+#define WC0_IS_C_95(x) ((u32)(((x) & 0x7) << 27))
+#define WC0_IS_EN_P3_95 BIT(26)
+#define WC0_IS_EN_P2_95 BIT(25)
+
+#define PERI_WK_CTRL1_8195 0x20
+#define WC1_IS_C_95(x) ((u32)(((x) & 0xf) << 28))
+#define WC1_IS_P_95 BIT(12)
+#define WC1_IS_EN_P0_95 BIT(6)
+
/* mt2712 etc */
#define PERI_SSUSB_SPM_CTRL 0x0
#define SSC_IP_SLEEP_EN BIT(4)
@@ -44,6 +56,9 @@ enum ssusb_uwk_vers {
SSUSB_UWK_V2,
SSUSB_UWK_V1_1 = 101, /* specific revision 1.01 */
SSUSB_UWK_V1_2, /* specific revision 1.02 */
+ SSUSB_UWK_V1_3, /* mt8195 IP0 */
+ SSUSB_UWK_V1_5 = 105, /* mt8195 IP2 */
+ SSUSB_UWK_V1_6, /* mt8195 IP3 */
};
/*
@@ -70,6 +85,21 @@ static void ssusb_wakeup_ip_sleep_set(struct ssusb_mtk *ssusb, bool enable)
msk = WC0_SSUSB0_CDEN | WC0_IS_SPM_EN;
val = enable ? msk : 0;
break;
+ case SSUSB_UWK_V1_3:
+ reg = ssusb->uwk_reg_base + PERI_WK_CTRL1_8195;
+ msk = WC1_IS_EN_P0_95 | WC1_IS_C_95(0xf) | WC1_IS_P_95;
+ val = enable ? (WC1_IS_EN_P0_95 | WC1_IS_C_95(0x1)) : 0;
+ break;
+ case SSUSB_UWK_V1_5:
+ reg = ssusb->uwk_reg_base + PERI_WK_CTRL0_8195;
+ msk = WC0_IS_EN_P2_95 | WC0_IS_C_95(0x7) | WC0_IS_P_95;
+ val = enable ? (WC0_IS_EN_P2_95 | WC0_IS_C_95(0x1)) : 0;
+ break;
+ case SSUSB_UWK_V1_6:
+ reg = ssusb->uwk_reg_base + PERI_WK_CTRL0_8195;
+ msk = WC0_IS_EN_P3_95 | WC0_IS_C_95(0x7) | WC0_IS_P_95;
+ val = enable ? (WC0_IS_EN_P3_95 | WC0_IS_C_95(0x1)) : 0;
+ break;
case SSUSB_UWK_V2:
reg = ssusb->uwk_reg_base + PERI_SSUSB_SPM_CTRL;
msk = SSC_IP_SLEEP_EN | SSC_SPM_INT_EN;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 051c6da7cf6d..55df0ee413d8 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1744,7 +1744,6 @@ static inline void musb_g_init_endpoints(struct musb *musb)
{
u8 epnum;
struct musb_hw_ep *hw_ep;
- unsigned count = 0;
/* initialize endpoint list just once */
INIT_LIST_HEAD(&(musb->g.ep_list));
@@ -1754,17 +1753,14 @@ static inline void musb_g_init_endpoints(struct musb *musb)
epnum++, hw_ep++) {
if (hw_ep->is_shared_fifo /* || !epnum */) {
init_peripheral_ep(musb, &hw_ep->ep_in, epnum, 0);
- count++;
} else {
if (hw_ep->max_packet_sz_tx) {
init_peripheral_ep(musb, &hw_ep->ep_in,
epnum, 1);
- count++;
}
if (hw_ep->max_packet_sz_rx) {
init_peripheral_ep(musb, &hw_ep->ep_out,
epnum, 0);
- count++;
}
}
}
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 9ab50f26db60..8f735a86cd19 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -74,33 +74,26 @@ static void nop_reset(struct usb_phy_generic *nop)
}
/* interface to regulator framework */
-static void nop_set_vbus_draw(struct usb_phy_generic *nop, unsigned mA)
+static int nop_set_vbus(struct usb_otg *otg, bool enable)
{
- struct regulator *vbus_draw = nop->vbus_draw;
- int enabled;
- int ret;
+ int ret = 0;
+ struct usb_phy_generic *nop = dev_get_drvdata(otg->usb_phy->dev);
- if (!vbus_draw)
- return;
+ if (!nop->vbus_draw)
+ return 0;
- enabled = nop->vbus_draw_enabled;
- if (mA) {
- regulator_set_current_limit(vbus_draw, 0, 1000 * mA);
- if (!enabled) {
- ret = regulator_enable(vbus_draw);
- if (ret < 0)
- return;
- nop->vbus_draw_enabled = 1;
- }
- } else {
- if (enabled) {
- ret = regulator_disable(vbus_draw);
- if (ret < 0)
- return;
- nop->vbus_draw_enabled = 0;
- }
+ if (enable && !nop->vbus_draw_enabled) {
+ ret = regulator_enable(nop->vbus_draw);
+ if (ret)
+ nop->vbus_draw_enabled = false;
+ else
+ nop->vbus_draw_enabled = true;
+
+ } else if (!enable && nop->vbus_draw_enabled) {
+ ret = regulator_disable(nop->vbus_draw);
+ nop->vbus_draw_enabled = false;
}
- nop->mA = mA;
+ return ret;
}
@@ -120,14 +113,9 @@ static irqreturn_t nop_gpio_vbus_thread(int irq, void *data)
otg->state = OTG_STATE_B_PERIPHERAL;
nop->phy.last_event = status;
- /* drawing a "unit load" is *always* OK, except for OTG */
- nop_set_vbus_draw(nop, 100);
-
atomic_notifier_call_chain(&nop->phy.notifier, status,
otg->gadget);
} else {
- nop_set_vbus_draw(nop, 0);
-
status = USB_EVENT_NONE;
otg->state = OTG_STATE_B_IDLE;
nop->phy.last_event = status;
@@ -274,6 +262,13 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
"could not get vbus regulator\n");
+ nop->vbus_draw = devm_regulator_get_exclusive(dev, "vbus");
+ if (PTR_ERR(nop->vbus_draw) == -ENODEV)
+ nop->vbus_draw = NULL;
+ if (IS_ERR(nop->vbus_draw))
+ return dev_err_probe(dev, PTR_ERR(nop->vbus_draw),
+ "could not get vbus regulator\n");
+
nop->dev = dev;
nop->phy.dev = nop->dev;
nop->phy.label = "nop-xceiv";
@@ -284,6 +279,7 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
nop->phy.otg->usb_phy = &nop->phy;
nop->phy.otg->set_host = nop_set_host;
nop->phy.otg->set_peripheral = nop_set_peripheral;
+ nop->phy.otg->set_vbus = nop_set_vbus;
return 0;
}
@@ -341,6 +337,9 @@ static void usb_phy_generic_remove(struct platform_device *pdev)
struct usb_phy_generic *nop = platform_get_drvdata(pdev);
usb_remove_phy(&nop->phy);
+
+ if (nop->vbus_draw && nop->vbus_draw_enabled)
+ regulator_disable(nop->vbus_draw);
}
static const struct of_device_id nop_xceiv_dt_ids[] = {
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index 4b468bde19cf..06e0fb23566c 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -699,7 +699,7 @@ out:
}
EXPORT_SYMBOL_GPL(usb_add_phy);
-static struct device_type usb_phy_dev_type = {
+static const struct device_type usb_phy_dev_type = {
.name = "usb_phy",
.uevent = usb_phy_uevent,
};
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index 70165dd86b5d..d7aa913ceb8a 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -7,6 +7,7 @@
* Hans de Goede <hdegoede@redhat.com>
*/
+#include <linux/component.h>
#include <linux/usb/role.h>
#include <linux/property.h>
#include <linux/device.h>
@@ -36,6 +37,32 @@ struct usb_role_switch {
#define to_role_switch(d) container_of(d, struct usb_role_switch, dev)
+static int connector_bind(struct device *dev, struct device *connector, void *data)
+{
+ int ret;
+
+ ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector");
+ if (ret)
+ return ret;
+
+ ret = sysfs_create_link(&connector->kobj, &dev->kobj, "usb-role-switch");
+ if (ret)
+ sysfs_remove_link(&dev->kobj, "connector");
+
+ return ret;
+}
+
+static void connector_unbind(struct device *dev, struct device *connector, void *data)
+{
+ sysfs_remove_link(&connector->kobj, "usb-role-switch");
+ sysfs_remove_link(&dev->kobj, "connector");
+}
+
+static const struct component_ops connector_ops = {
+ .bind = connector_bind,
+ .unbind = connector_unbind,
+};
+
/**
* usb_role_switch_set_role - Set USB role for a switch
* @sw: USB role switch
@@ -361,6 +388,12 @@ usb_role_switch_register(struct device *parent,
return ERR_PTR(ret);
}
+ if (dev_fwnode(&sw->dev)) {
+ ret = component_add(&sw->dev, &connector_ops);
+ if (ret)
+ dev_warn(&sw->dev, "failed to add component\n");
+ }
+
sw->registered = true;
/* TODO: Symlinks for the host port and the device controller. */
@@ -377,10 +410,12 @@ EXPORT_SYMBOL_GPL(usb_role_switch_register);
*/
void usb_role_switch_unregister(struct usb_role_switch *sw)
{
- if (!IS_ERR_OR_NULL(sw)) {
- sw->registered = false;
- device_unregister(&sw->dev);
- }
+ if (IS_ERR_OR_NULL(sw))
+ return;
+ sw->registered = false;
+ if (dev_fwnode(&sw->dev))
+ component_del(&sw->dev, &connector_ops);
+ device_unregister(&sw->dev);
}
EXPORT_SYMBOL_GPL(usb_role_switch_unregister);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 923e0ed85444..21fd26609252 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -56,6 +56,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
+ { USB_DEVICE(0x04BF, 0x1301) }, /* TDK Corporation NC0110013M - Network Controller */
+ { USB_DEVICE(0x04BF, 0x1303) }, /* TDK Corporation MM0110113M - i3 Micro Module */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
{ USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
@@ -144,6 +146,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
+ { USB_DEVICE(0x10C4, 0x863C) }, /* MGP Instruments PDS100 */
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
{ USB_DEVICE(0x10C4, 0x87ED) }, /* IMST USB-Stick for Smart Meter */
@@ -177,6 +180,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */
{ USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */
{ USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */
+ { USB_DEVICE(0x11CA, 0x0212) }, /* Verifone USB to Printer (UART, CP2102) */
{ USB_DEVICE(0x12B8, 0xEC60) }, /* Link G4 ECU */
{ USB_DEVICE(0x12B8, 0xEC62) }, /* Link G4+ ECU */
{ USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 13a56783830d..76a04ab41100 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1077,6 +1077,8 @@ static const struct usb_device_id id_table_combined[] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_FALCONIA_JTAG_UNBUF_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ /* GMC devices */
+ { USB_DEVICE(GMC_VID, GMC_Z216C_PID) },
{ } /* Terminating entry */
};
@@ -2610,7 +2612,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
struct device *ddev = &port->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct ktermios *termios = &tty->termios;
- unsigned int cflag = termios->c_cflag;
+ unsigned int cflag;
u16 value, index;
int ret;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 21a2b5a25fc0..5ee60ba2a73c 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1606,3 +1606,9 @@
#define UBLOX_VID 0x1546
#define UBLOX_C099F9P_ZED_PID 0x0502
#define UBLOX_C099F9P_ODIN_PID 0x0503
+
+/*
+ * GMC devices
+ */
+#define GMC_VID 0x1cd7
+#define GMC_Z216C_PID 0x0217 /* GMC Z216C Adapter IR-USB */
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 93b17e0e05a3..0a783985197c 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -921,7 +921,6 @@ static void usa28_indat_callback(struct urb *urb)
port = urb->context;
p_priv = usb_get_serial_port_data(port);
- data = urb->transfer_buffer;
if (urb != p_priv->in_urbs[p_priv->in_flip])
return;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2ae124c49d44..55a65d941ccb 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -613,6 +613,11 @@ static void option_instat_callback(struct urb *urb);
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
#define LUAT_PRODUCT_AIR720U 0x4e00
+/* MeiG Smart Technology products */
+#define MEIGSMART_VENDOR_ID 0x2dee
+/* MeiG Smart SLM320 based on UNISOC UIS8910 */
+#define MEIGSMART_PRODUCT_SLM320 0x4d41
+
/* Device flags */
/* Highest interface number which can be used with NCTRL() and RSVD() */
@@ -2282,6 +2287,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 6365cfe5402c..fa07f6ff9ecc 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -409,7 +409,6 @@ static void oti6858_set_termios(struct tty_struct *tty,
cflag = tty->termios.c_cflag;
spin_lock_irqsave(&priv->lock, flags);
- divisor = priv->pending_setup.divisor;
frame_fmt = priv->pending_setup.frame_fmt;
control = priv->pending_setup.control;
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index 2b098b55c4cb..c3ce51c2dabd 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -534,7 +534,6 @@ static void pdump(struct us_data *us, void *ibuffer, int length)
}
line[offset] = 0;
usb_stor_dbg(us, "%s\n", line);
- offset = 0;
}
#endif
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index 15dc25801cdc..0aa079405d23 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -196,7 +196,7 @@ static int sddr55_read_data(struct us_data *us,
unsigned char *buffer;
unsigned int pba;
- unsigned long address;
+ unsigned int address;
unsigned short pages;
unsigned int len, offset;
@@ -316,7 +316,7 @@ static int sddr55_write_data(struct us_data *us,
unsigned int pba;
unsigned int new_pba;
- unsigned long address;
+ unsigned int address;
unsigned short pages;
int i;
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index f8ea3054be54..038dc51f429d 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -50,13 +50,17 @@ enum {
enum dp_state {
DP_STATE_IDLE,
DP_STATE_ENTER,
+ DP_STATE_ENTER_PRIME,
DP_STATE_UPDATE,
DP_STATE_CONFIGURE,
+ DP_STATE_CONFIGURE_PRIME,
DP_STATE_EXIT,
+ DP_STATE_EXIT_PRIME,
};
struct dp_altmode {
struct typec_displayport_data data;
+ struct typec_displayport_data data_prime;
enum dp_state state;
bool hpd;
@@ -67,6 +71,7 @@ struct dp_altmode {
struct typec_altmode *alt;
const struct typec_altmode *port;
struct fwnode_handle *connector_fwnode;
+ struct typec_altmode *plug_prime;
};
static int dp_altmode_notify(struct dp_altmode *dp)
@@ -99,12 +104,18 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
conf |= DP_CONF_UFP_U_AS_DFP_D;
pin_assign = DP_CAP_UFP_D_PIN_ASSIGN(dp->alt->vdo) &
DP_CAP_DFP_D_PIN_ASSIGN(dp->port->vdo);
+ /* Account for active cable capabilities */
+ if (dp->plug_prime)
+ pin_assign &= DP_CAP_DFP_D_PIN_ASSIGN(dp->plug_prime->vdo);
break;
case DP_STATUS_CON_UFP_D:
case DP_STATUS_CON_BOTH: /* NOTE: First acting as DP source */
conf |= DP_CONF_UFP_U_AS_UFP_D;
pin_assign = DP_CAP_PIN_ASSIGN_UFP_D(dp->alt->vdo) &
DP_CAP_PIN_ASSIGN_DFP_D(dp->port->vdo);
+ /* Account for active cable capabilities */
+ if (dp->plug_prime)
+ pin_assign &= DP_CAP_UFP_D_PIN_ASSIGN(dp->plug_prime->vdo);
break;
default:
break;
@@ -130,6 +141,8 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
}
dp->data.conf = conf;
+ if (dp->plug_prime)
+ dp->data_prime.conf = conf;
return 0;
}
@@ -143,13 +156,16 @@ static int dp_altmode_status_update(struct dp_altmode *dp)
if (configured && (dp->data.status & DP_STATUS_SWITCH_TO_USB)) {
dp->data.conf = 0;
- dp->state = DP_STATE_CONFIGURE;
+ dp->data_prime.conf = 0;
+ dp->state = dp->plug_prime ? DP_STATE_CONFIGURE_PRIME :
+ DP_STATE_CONFIGURE;
} else if (dp->data.status & DP_STATUS_EXIT_DP_MODE) {
dp->state = DP_STATE_EXIT;
} else if (!(con & DP_CONF_CURRENTLY(dp->data.conf))) {
ret = dp_altmode_configure(dp, con);
if (!ret) {
- dp->state = DP_STATE_CONFIGURE;
+ dp->state = dp->plug_prime ? DP_STATE_CONFIGURE_PRIME :
+ DP_STATE_CONFIGURE;
if (dp->hpd != hpd) {
dp->hpd = hpd;
dp->pending_hpd = true;
@@ -209,6 +225,19 @@ static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf)
return ret;
}
+static int dp_altmode_configure_vdm_cable(struct dp_altmode *dp, u32 conf)
+{
+ int svdm_version = typec_altmode_get_cable_svdm_version(dp->plug_prime);
+ u32 header;
+
+ if (svdm_version < 0)
+ return svdm_version;
+
+ header = DP_HEADER(dp, svdm_version, DP_CMD_CONFIGURE);
+
+ return typec_cable_altmode_vdm(dp->plug_prime, TYPEC_PLUG_SOP_P, header, &conf, 2);
+}
+
static void dp_altmode_work(struct work_struct *work)
{
struct dp_altmode *dp = container_of(work, struct dp_altmode, work);
@@ -225,6 +254,19 @@ static void dp_altmode_work(struct work_struct *work)
if (ret && ret != -EBUSY)
dev_err(&dp->alt->dev, "failed to enter mode\n");
break;
+ case DP_STATE_ENTER_PRIME:
+ ret = typec_cable_altmode_enter(dp->alt, TYPEC_PLUG_SOP_P, NULL);
+ /*
+ * If we fail to enter Alt Mode on SOP', then we should drop the
+ * plug from the driver and attempt to run the driver without
+ * it.
+ */
+ if (ret && ret != -EBUSY) {
+ dev_err(&dp->alt->dev, "plug failed to enter mode\n");
+ dp->state = DP_STATE_ENTER;
+ goto disable_prime;
+ }
+ break;
case DP_STATE_UPDATE:
svdm_version = typec_altmode_get_svdm_version(dp->alt);
if (svdm_version < 0)
@@ -243,10 +285,24 @@ static void dp_altmode_work(struct work_struct *work)
dev_err(&dp->alt->dev,
"unable to send Configure command (%d)\n", ret);
break;
+ case DP_STATE_CONFIGURE_PRIME:
+ ret = dp_altmode_configure_vdm_cable(dp, dp->data_prime.conf);
+ if (ret) {
+ dev_err(&dp->plug_prime->dev,
+ "unable to send Configure command (%d)\n",
+ ret);
+ dp->state = DP_STATE_CONFIGURE;
+ goto disable_prime;
+ }
+ break;
case DP_STATE_EXIT:
if (typec_altmode_exit(dp->alt))
dev_err(&dp->alt->dev, "Exit Mode Failed!\n");
break;
+ case DP_STATE_EXIT_PRIME:
+ if (typec_cable_altmode_exit(dp->plug_prime, TYPEC_PLUG_SOP_P))
+ dev_err(&dp->plug_prime->dev, "Exit Mode Failed!\n");
+ break;
default:
break;
}
@@ -254,6 +310,13 @@ static void dp_altmode_work(struct work_struct *work)
dp->state = DP_STATE_IDLE;
mutex_unlock(&dp->lock);
+ return;
+
+disable_prime:
+ typec_altmode_put_plug(dp->plug_prime);
+ dp->plug_prime = NULL;
+ schedule_work(&dp->work);
+ mutex_unlock(&dp->lock);
}
static void dp_altmode_attention(struct typec_altmode *alt, const u32 vdo)
@@ -314,6 +377,8 @@ static int dp_altmode_vdm(struct typec_altmode *alt,
dp->hpd = false;
sysfs_notify(&dp->alt->dev.kobj, "displayport", "hpd");
}
+ if (dp->plug_prime)
+ dp->state = DP_STATE_EXIT_PRIME;
break;
case DP_CMD_STATUS_UPDATE:
dp->data.status = *vdo;
@@ -348,10 +413,84 @@ err_unlock:
return ret;
}
+static int dp_cable_altmode_vdm(struct typec_altmode *alt, enum typec_plug_index sop,
+ const u32 hdr, const u32 *vdo, int count)
+{
+ struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
+ int cmd_type = PD_VDO_CMDT(hdr);
+ int cmd = PD_VDO_CMD(hdr);
+ int ret = 0;
+
+ mutex_lock(&dp->lock);
+
+ if (dp->state != DP_STATE_IDLE) {
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+
+ switch (cmd_type) {
+ case CMDT_RSP_ACK:
+ switch (cmd) {
+ case CMD_ENTER_MODE:
+ typec_altmode_update_active(dp->plug_prime, true);
+ dp->state = DP_STATE_ENTER;
+ break;
+ case CMD_EXIT_MODE:
+ dp->data_prime.status = 0;
+ dp->data_prime.conf = 0;
+ typec_altmode_update_active(dp->plug_prime, false);
+ break;
+ case DP_CMD_CONFIGURE:
+ dp->state = DP_STATE_CONFIGURE;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CMDT_RSP_NAK:
+ switch (cmd) {
+ case DP_CMD_CONFIGURE:
+ dp->data_prime.conf = 0;
+ /* Attempt to configure on SOP, drop plug */
+ typec_altmode_put_plug(dp->plug_prime);
+ dp->plug_prime = NULL;
+ dp->state = DP_STATE_CONFIGURE;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (dp->state != DP_STATE_IDLE)
+ schedule_work(&dp->work);
+
+err_unlock:
+ mutex_unlock(&dp->lock);
+ return ret;
+}
+
static int dp_altmode_activate(struct typec_altmode *alt, int activate)
{
- return activate ? typec_altmode_enter(alt, NULL) :
- typec_altmode_exit(alt);
+ struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
+ int ret;
+
+ if (activate) {
+ if (dp->plug_prime) {
+ ret = typec_cable_altmode_enter(alt, TYPEC_PLUG_SOP_P, NULL);
+ if (ret < 0) {
+ typec_altmode_put_plug(dp->plug_prime);
+ dp->plug_prime = NULL;
+ } else {
+ return ret;
+ }
+ }
+ return typec_altmode_enter(alt, NULL);
+ } else {
+ return typec_altmode_exit(alt);
+ }
}
static const struct typec_altmode_ops dp_altmode_ops = {
@@ -360,6 +499,10 @@ static const struct typec_altmode_ops dp_altmode_ops = {
.activate = dp_altmode_activate,
};
+static const struct typec_cable_ops dp_cable_ops = {
+ .vdm = dp_cable_altmode_vdm,
+};
+
static const char * const configurations[] = {
[DP_CONF_USB] = "USB",
[DP_CONF_DFP_D] = "source",
@@ -501,6 +644,7 @@ pin_assignment_store(struct device *dev, struct device_attribute *attr,
/* Only send Configure command if a configuration has been set */
if (dp->alt->active && DP_CONF_CURRENTLY(dp->data.conf)) {
+ /* todo: send manual configure over SOP'*/
ret = dp_altmode_configure_vdm(dp, conf);
if (ret)
goto out_unlock;
@@ -579,6 +723,7 @@ static const struct attribute_group *displayport_groups[] = {
int dp_altmode_probe(struct typec_altmode *alt)
{
const struct typec_altmode *port = typec_altmode_get_partner(alt);
+ struct typec_altmode *plug = typec_altmode_get_plug(alt, TYPEC_PLUG_SOP_P);
struct fwnode_handle *fwnode;
struct dp_altmode *dp;
@@ -603,6 +748,13 @@ int dp_altmode_probe(struct typec_altmode *alt)
alt->desc = "DisplayPort";
alt->ops = &dp_altmode_ops;
+ if (plug) {
+ plug->desc = "Displayport";
+ plug->cable_ops = &dp_cable_ops;
+ }
+
+ dp->plug_prime = plug;
+
fwnode = dev_fwnode(alt->dev.parent->parent); /* typec_port fwnode */
if (fwnode_property_present(fwnode, "displayport"))
dp->connector_fwnode = fwnode_find_reference(fwnode, "displayport", 0);
@@ -612,8 +764,10 @@ int dp_altmode_probe(struct typec_altmode *alt)
dp->connector_fwnode = NULL;
typec_altmode_set_drvdata(alt, dp);
+ if (plug)
+ typec_altmode_set_drvdata(plug, dp);
- dp->state = DP_STATE_ENTER;
+ dp->state = plug ? DP_STATE_ENTER_PRIME : DP_STATE_ENTER;
schedule_work(&dp->work);
return 0;
@@ -625,6 +779,7 @@ void dp_altmode_remove(struct typec_altmode *alt)
struct dp_altmode *dp = typec_altmode_get_drvdata(alt);
cancel_work_sync(&dp->work);
+ typec_altmode_put_plug(dp->plug_prime);
if (dp->connector_fwnode) {
drm_connector_oob_hotplug_event(dp->connector_fwnode,
diff --git a/drivers/usb/typec/bus.c b/drivers/usb/typec/bus.c
index e95ec7e382bb..6ea103e1abae 100644
--- a/drivers/usb/typec/bus.c
+++ b/drivers/usb/typec/bus.c
@@ -245,6 +245,108 @@ typec_altmode_get_partner(struct typec_altmode *adev)
EXPORT_SYMBOL_GPL(typec_altmode_get_partner);
/* -------------------------------------------------------------------------- */
+/* API for cable alternate modes */
+
+/**
+ * typec_cable_altmode_enter - Enter Mode
+ * @adev: The alternate mode
+ * @sop: Cable plug target for Enter Mode command
+ * @vdo: VDO for the Enter Mode command
+ *
+ * Alternate mode drivers use this function to enter mode on the cable plug.
+ * If the alternate mode does not require VDO, @vdo must be NULL.
+ */
+int typec_cable_altmode_enter(struct typec_altmode *adev, enum typec_plug_index sop, u32 *vdo)
+{
+ struct altmode *partner = to_altmode(adev)->partner;
+ struct typec_altmode *pdev;
+
+ if (!adev || adev->active)
+ return 0;
+
+ if (!partner)
+ return -ENODEV;
+
+ pdev = &partner->adev;
+
+ if (!pdev->active)
+ return -EPERM;
+
+ if (!pdev->cable_ops || !pdev->cable_ops->enter)
+ return -EOPNOTSUPP;
+
+ return pdev->cable_ops->enter(pdev, sop, vdo);
+}
+EXPORT_SYMBOL_GPL(typec_cable_altmode_enter);
+
+/**
+ * typec_cable_altmode_exit - Exit Mode
+ * @adev: The alternate mode
+ * @sop: Cable plug target for Exit Mode command
+ *
+ * The alternate mode drivers use this function to exit mode on the cable plug.
+ */
+int typec_cable_altmode_exit(struct typec_altmode *adev, enum typec_plug_index sop)
+{
+ struct altmode *partner = to_altmode(adev)->partner;
+ struct typec_altmode *pdev;
+
+ if (!adev || !adev->active)
+ return 0;
+
+ if (!partner)
+ return -ENODEV;
+
+ pdev = &partner->adev;
+
+ if (!pdev->cable_ops || !pdev->cable_ops->exit)
+ return -EOPNOTSUPP;
+
+ return pdev->cable_ops->exit(pdev, sop);
+}
+EXPORT_SYMBOL_GPL(typec_cable_altmode_exit);
+
+/**
+ * typec_cable_altmode_vdm - Send Vendor Defined Messages (VDM) between the cable plug and port.
+ * @adev: Alternate mode handle
+ * @sop: Cable plug target for VDM
+ * @header: VDM Header
+ * @vdo: Array of Vendor Defined Data Objects
+ * @count: Number of Data Objects
+ *
+ * The alternate mode drivers use this function for SVID specific communication
+ * with the cable plugs. The port drivers use it to deliver the Structured VDMs
+ * received from the cable plugs to the alternate mode drivers.
+ */
+int typec_cable_altmode_vdm(struct typec_altmode *adev, enum typec_plug_index sop,
+ const u32 header, const u32 *vdo, int count)
+{
+ struct altmode *altmode;
+ struct typec_altmode *pdev;
+
+ if (!adev)
+ return 0;
+
+ altmode = to_altmode(adev);
+
+ if (is_typec_plug(adev->dev.parent)) {
+ if (!altmode->partner)
+ return -ENODEV;
+ pdev = &altmode->partner->adev;
+ } else {
+ if (!altmode->plug[sop])
+ return -ENODEV;
+ pdev = &altmode->plug[sop]->adev;
+ }
+
+ if (!pdev->cable_ops || !pdev->cable_ops->vdm)
+ return -EOPNOTSUPP;
+
+ return pdev->cable_ops->vdm(pdev, sop, header, vdo, count);
+}
+EXPORT_SYMBOL_GPL(typec_cable_altmode_vdm);
+
+/* -------------------------------------------------------------------------- */
/* API for the alternate mode drivers */
/**
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 015aa9253353..389c7f0b8d93 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -21,7 +21,7 @@
static DEFINE_IDA(typec_index_ida);
-struct class typec_class = {
+const struct class typec_class = {
.name = "typec",
};
@@ -2132,6 +2132,46 @@ int typec_get_negotiated_svdm_version(struct typec_port *port)
EXPORT_SYMBOL_GPL(typec_get_negotiated_svdm_version);
/**
+ * typec_get_cable_svdm_version - Get cable negotiated SVDM Version
+ * @port: USB Type-C Port.
+ *
+ * Get the negotiated SVDM Version for the cable. The Version is set to the port
+ * default value based on the PD Revision during cable registration, and updated
+ * after a successful Discover Identity if the negotiated value is less than the
+ * default.
+ *
+ * Returns usb_pd_svdm_ver if the cable has been registered otherwise -ENODEV.
+ */
+int typec_get_cable_svdm_version(struct typec_port *port)
+{
+ enum usb_pd_svdm_ver svdm_version;
+ struct device *cable_dev;
+
+ cable_dev = device_find_child(&port->dev, NULL, cable_match);
+ if (!cable_dev)
+ return -ENODEV;
+
+ svdm_version = to_typec_cable(cable_dev)->svdm_version;
+ put_device(cable_dev);
+
+ return svdm_version;
+}
+EXPORT_SYMBOL_GPL(typec_get_cable_svdm_version);
+
+/**
+ * typec_cable_set_svdm_version - Set negotiated Structured VDM (SVDM) Version
+ * @cable: USB Type-C Active Cable that supports SVDM
+ * @svdm_version: Negotiated SVDM Version
+ *
+ * This routine is used to save the negotiated SVDM Version.
+ */
+void typec_cable_set_svdm_version(struct typec_cable *cable, enum usb_pd_svdm_ver svdm_version)
+{
+ cable->svdm_version = svdm_version;
+}
+EXPORT_SYMBOL_GPL(typec_cable_set_svdm_version);
+
+/**
* typec_get_drvdata - Return private driver data pointer
* @port: USB Type-C port
*/
@@ -2281,6 +2321,25 @@ void typec_port_register_altmodes(struct typec_port *port,
EXPORT_SYMBOL_GPL(typec_port_register_altmodes);
/**
+ * typec_port_register_cable_ops - Register typec_cable_ops to port altmodes
+ * @altmodes: USB Type-C Port's altmode vector
+ * @max_altmodes: The maximum number of alt modes supported by the port
+ * @ops: Cable alternate mode vector
+ */
+void typec_port_register_cable_ops(struct typec_altmode **altmodes, int max_altmodes,
+ const struct typec_cable_ops *ops)
+{
+ int i;
+
+ for (i = 0; i < max_altmodes; i++) {
+ if (!altmodes[i])
+ return;
+ altmodes[i]->cable_ops = ops;
+ }
+}
+EXPORT_SYMBOL_GPL(typec_port_register_cable_ops);
+
+/**
* typec_register_port - Register a USB Type-C Port
* @parent: Parent device
* @cap: Description of the port
diff --git a/drivers/usb/typec/class.h b/drivers/usb/typec/class.h
index c36761ba3f59..7485cdb9dd20 100644
--- a/drivers/usb/typec/class.h
+++ b/drivers/usb/typec/class.h
@@ -23,6 +23,7 @@ struct typec_cable {
struct usb_pd_identity *identity;
unsigned int active:1;
u16 pd_revision; /* 0300H = "3.0" */
+ enum usb_pd_svdm_ver svdm_version;
};
struct typec_partner {
@@ -92,9 +93,9 @@ extern const struct device_type typec_port_dev_type;
#define is_typec_plug(dev) ((dev)->type == &typec_plug_dev_type)
#define is_typec_port(dev) ((dev)->type == &typec_port_dev_type)
-extern struct class typec_mux_class;
-extern struct class retimer_class;
-extern struct class typec_class;
+extern const struct class typec_mux_class;
+extern const struct class retimer_class;
+extern const struct class typec_class;
#if defined(CONFIG_ACPI)
int typec_link_ports(struct typec_port *connector);
diff --git a/drivers/usb/typec/mux.c b/drivers/usb/typec/mux.c
index 80dd91938d96..49926d6e72c7 100644
--- a/drivers/usb/typec/mux.c
+++ b/drivers/usb/typec/mux.c
@@ -469,6 +469,6 @@ void *typec_mux_get_drvdata(struct typec_mux_dev *mux_dev)
}
EXPORT_SYMBOL_GPL(typec_mux_get_drvdata);
-struct class typec_mux_class = {
+const struct class typec_mux_class = {
.name = "typec_mux",
};
diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig
index d2cb5e733e57..399c7b0983df 100644
--- a/drivers/usb/typec/mux/Kconfig
+++ b/drivers/usb/typec/mux/Kconfig
@@ -36,6 +36,16 @@ config TYPEC_MUX_INTEL_PMC
control the USB role switch and also the multiplexer/demultiplexer
switches used with USB Type-C Alternate Modes.
+config TYPEC_MUX_IT5205
+ tristate "ITE IT5205 Type-C USB Alt Mode Passive MUX driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Driver for the ITE IT5205 Type-C USB Alternate Mode Passive MUX
+ which provides support for muxing DisplayPort and sideband signals
+ on a common USB Type-C connector.
+ If compiled as a module, the module will be named it5205.
+
config TYPEC_MUX_NB7VPQ904M
tristate "On Semiconductor NB7VPQ904M Type-C redriver driver"
depends on I2C
diff --git a/drivers/usb/typec/mux/Makefile b/drivers/usb/typec/mux/Makefile
index 57dc9ac6f8dc..bb96f30267af 100644
--- a/drivers/usb/typec/mux/Makefile
+++ b/drivers/usb/typec/mux/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_TYPEC_MUX_FSA4480) += fsa4480.o
obj-$(CONFIG_TYPEC_MUX_GPIO_SBU) += gpio-sbu-mux.o
obj-$(CONFIG_TYPEC_MUX_PI3USB30532) += pi3usb30532.o
obj-$(CONFIG_TYPEC_MUX_INTEL_PMC) += intel_pmc_mux.o
+obj-$(CONFIG_TYPEC_MUX_IT5205) += it5205.o
obj-$(CONFIG_TYPEC_MUX_NB7VPQ904M) += nb7vpq904m.o
obj-$(CONFIG_TYPEC_MUX_PTN36502) += ptn36502.o
obj-$(CONFIG_TYPEC_MUX_WCD939X_USBSS) += wcd939x-usbss.o
diff --git a/drivers/usb/typec/mux/it5205.c b/drivers/usb/typec/mux/it5205.c
new file mode 100644
index 000000000000..5535932e42cd
--- /dev/null
+++ b/drivers/usb/typec/mux/it5205.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ITE IT5205 Type-C USB alternate mode passive mux
+ *
+ * Copyright (c) 2020 MediaTek Inc.
+ * Copyright (c) 2024 Collabora Ltd.
+ * AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/tcpm.h>
+#include <linux/usb/typec.h>
+#include <linux/usb/typec_dp.h>
+#include <linux/usb/typec_mux.h>
+
+#define IT5205_REG_CHIP_ID(x) (0x4 + (x))
+#define IT5205FN_CHIP_ID 0x35323035 /* "5205" */
+
+/* MUX power down register */
+#define IT5205_REG_MUXPDR 0x10
+#define IT5205_MUX_POWER_DOWN BIT(0)
+
+/* MUX control register */
+#define IT5205_REG_MUXCR 0x11
+#define IT5205_POLARITY_INVERTED BIT(4)
+#define IT5205_DP_USB_CTRL_MASK GENMASK(3, 0)
+#define IT5205_DP 0x0f
+#define IT5205_DP_USB 0x03
+#define IT5205_USB 0x07
+
+/* Vref Select Register */
+#define IT5205_REG_VSR 0x10
+#define IT5205_VREF_SELECT_MASK GENMASK(5, 4)
+#define IT5205_VREF_SELECT_3_3V 0x00
+#define IT5205_VREF_SELECT_OFF 0x20
+
+/* CSBU Over Voltage Protection Register */
+#define IT5205_REG_CSBUOVPSR 0x1e
+#define IT5205_OVP_SELECT_MASK GENMASK(5, 4)
+#define IT5205_OVP_3_90V 0x00
+#define IT5205_OVP_3_68V 0x10
+#define IT5205_OVP_3_62V 0x20
+#define IT5205_OVP_3_57V 0x30
+
+/* CSBU Switch Register */
+#define IT5205_REG_CSBUSR 0x22
+#define IT5205_CSBUSR_SWITCH BIT(0)
+
+/* Interrupt Switch Register */
+#define IT5205_REG_ISR 0x25
+#define IT5205_ISR_CSBU_MASK BIT(4)
+#define IT5205_ISR_CSBU_OVP BIT(0)
+
+struct it5205 {
+ struct i2c_client *client;
+ struct regmap *regmap;
+ struct typec_switch_dev *sw;
+ struct typec_mux_dev *mux;
+};
+
+static int it5205_switch_set(struct typec_switch_dev *sw, enum typec_orientation orientation)
+{
+ struct it5205 *it = typec_switch_get_drvdata(sw);
+
+ switch (orientation) {
+ case TYPEC_ORIENTATION_NORMAL:
+ regmap_update_bits(it->regmap, IT5205_REG_MUXCR,
+ IT5205_POLARITY_INVERTED, 0);
+ break;
+ case TYPEC_ORIENTATION_REVERSE:
+ regmap_update_bits(it->regmap, IT5205_REG_MUXCR,
+ IT5205_POLARITY_INVERTED, IT5205_POLARITY_INVERTED);
+ break;
+ case TYPEC_ORIENTATION_NONE:
+ fallthrough;
+ default:
+ regmap_write(it->regmap, IT5205_REG_MUXCR, 0);
+ break;
+ }
+
+ return 0;
+}
+
+static int it5205_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state)
+{
+ struct it5205 *it = typec_mux_get_drvdata(mux);
+ u8 val;
+
+ if (state->mode >= TYPEC_STATE_MODAL &&
+ state->alt->svid != USB_TYPEC_DP_SID)
+ return -EINVAL;
+
+ switch (state->mode) {
+ case TYPEC_STATE_USB:
+ val = IT5205_USB;
+ break;
+ case TYPEC_DP_STATE_C:
+ fallthrough;
+ case TYPEC_DP_STATE_E:
+ val = IT5205_DP;
+ break;
+ case TYPEC_DP_STATE_D:
+ val = IT5205_DP_USB;
+ break;
+ case TYPEC_STATE_SAFE:
+ fallthrough;
+ default:
+ val = 0;
+ break;
+ }
+
+ return regmap_update_bits(it->regmap, IT5205_REG_MUXCR,
+ IT5205_DP_USB_CTRL_MASK, val);
+}
+
+static irqreturn_t it5205_irq_handler(int irq, void *data)
+{
+ struct it5205 *it = data;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(it->regmap, IT5205_REG_ISR, &val);
+ if (ret)
+ return IRQ_NONE;
+
+ if (val & IT5205_ISR_CSBU_OVP) {
+ dev_warn(&it->client->dev, "Overvoltage detected!\n");
+
+ /* Reset CSBU */
+ regmap_update_bits(it->regmap, IT5205_REG_CSBUSR,
+ IT5205_CSBUSR_SWITCH, 0);
+ regmap_update_bits(it->regmap, IT5205_REG_CSBUSR,
+ IT5205_CSBUSR_SWITCH, IT5205_CSBUSR_SWITCH);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void it5205_enable_ovp(struct it5205 *it)
+{
+ /* Select Vref 3.3v */
+ regmap_update_bits(it->regmap, IT5205_REG_VSR,
+ IT5205_VREF_SELECT_MASK, IT5205_VREF_SELECT_3_3V);
+
+ /* Trigger OVP at 3.68V */
+ regmap_update_bits(it->regmap, IT5205_REG_CSBUOVPSR,
+ IT5205_OVP_SELECT_MASK, IT5205_OVP_3_68V);
+
+ /* Unmask OVP interrupt */
+ regmap_update_bits(it->regmap, IT5205_REG_ISR,
+ IT5205_ISR_CSBU_MASK, 0);
+
+ /* Enable CSBU Interrupt */
+ regmap_update_bits(it->regmap, IT5205_REG_CSBUSR,
+ IT5205_CSBUSR_SWITCH, IT5205_CSBUSR_SWITCH);
+}
+
+static const struct regmap_config it5205_regmap = {
+ .max_register = 0x2f,
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int it5205_probe(struct i2c_client *client)
+{
+ struct typec_switch_desc sw_desc = { };
+ struct typec_mux_desc mux_desc = { };
+ struct device *dev = &client->dev;
+ struct it5205 *it;
+ u32 val, chipid = 0;
+ int i, ret;
+
+ it = devm_kzalloc(dev, sizeof(*it), GFP_KERNEL);
+ if (!it)
+ return -ENOMEM;
+
+ ret = devm_regulator_get_enable(dev, "vcc");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get regulator\n");
+
+ it->client = client;
+
+ it->regmap = devm_regmap_init_i2c(client, &it5205_regmap);
+ if (IS_ERR(it->regmap))
+ return dev_err_probe(dev, PTR_ERR(it->regmap),
+ "Failed to init regmap\n");
+
+ /* IT5205 needs a long time to power up after enabling regulator */
+ msleep(50);
+
+ /* Unset poweroff bit */
+ ret = regmap_write(it->regmap, IT5205_REG_MUXPDR, 0);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to set power on\n");
+
+ /* Read the 32 bits ChipID */
+ for (i = 3; i >= 0; i--) {
+ ret = regmap_read(it->regmap, IT5205_REG_CHIP_ID(i), &val);
+ if (ret)
+ return ret;
+
+ chipid |= val << (i * 8);
+ }
+
+ if (chipid != IT5205FN_CHIP_ID)
+ return dev_err_probe(dev, -EINVAL,
+ "Unknown ChipID 0x%x\n", chipid);
+
+ /* Initialize as USB mode with default (non-inverted) polarity */
+ ret = regmap_write(it->regmap, IT5205_REG_MUXCR, IT5205_USB);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot set mode to USB\n");
+
+ sw_desc.drvdata = it;
+ sw_desc.fwnode = dev_fwnode(dev);
+ sw_desc.set = it5205_switch_set;
+
+ it->sw = typec_switch_register(dev, &sw_desc);
+ if (IS_ERR(it->sw))
+ return dev_err_probe(dev, PTR_ERR(it->sw),
+ "failed to register typec switch\n");
+
+ mux_desc.drvdata = it;
+ mux_desc.fwnode = dev_fwnode(dev);
+ mux_desc.set = it5205_mux_set;
+
+ it->mux = typec_mux_register(dev, &mux_desc);
+ if (IS_ERR(it->mux)) {
+ typec_switch_unregister(it->sw);
+ return dev_err_probe(dev, PTR_ERR(it->mux),
+ "failed to register typec mux\n");
+ }
+
+ i2c_set_clientdata(client, it);
+
+ if (of_property_read_bool(dev->of_node, "ite,ovp-enable") && client->irq) {
+ it5205_enable_ovp(it);
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ it5205_irq_handler,
+ IRQF_ONESHOT, dev_name(dev), it);
+ if (ret) {
+ typec_mux_unregister(it->mux);
+ typec_switch_unregister(it->sw);
+ return dev_err_probe(dev, ret, "Failed to request irq\n");
+ }
+ }
+
+ return 0;
+}
+
+static void it5205_remove(struct i2c_client *client)
+{
+ struct it5205 *it = i2c_get_clientdata(client);
+
+ typec_mux_unregister(it->mux);
+ typec_switch_unregister(it->sw);
+}
+
+static const struct i2c_device_id it5205_table[] = {
+ { "it5205" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(i2c, it5205_table);
+
+static const struct of_device_id it5205_of_table[] = {
+ { .compatible = "ite,it5205" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, it5205_of_table);
+
+static struct i2c_driver it5205_driver = {
+ .driver = {
+ .name = "it5205",
+ .of_match_table = it5205_of_table,
+ },
+ .probe = it5205_probe,
+ .remove = it5205_remove,
+ .id_table = it5205_table,
+};
+module_i2c_driver(it5205_driver);
+
+MODULE_AUTHOR("Tianping Fang <tianping.fang@mediatek.com>");
+MODULE_AUTHOR("AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>");
+MODULE_DESCRIPTION("ITE IT5205 alternate mode passive MUX driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/typec/pd.c b/drivers/usb/typec/pd.c
index b9cca2be76fc..d78c04a421bc 100644
--- a/drivers/usb/typec/pd.c
+++ b/drivers/usb/typec/pd.c
@@ -157,7 +157,7 @@ static const struct attribute_group source_fixed_supply_group = {
};
__ATTRIBUTE_GROUPS(source_fixed_supply);
-static struct device_type source_fixed_supply_type = {
+static const struct device_type source_fixed_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_fixed_supply_groups,
@@ -182,7 +182,7 @@ static const struct attribute_group sink_fixed_supply_group = {
};
__ATTRIBUTE_GROUPS(sink_fixed_supply);
-static struct device_type sink_fixed_supply_type = {
+static const struct device_type sink_fixed_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_fixed_supply_groups,
@@ -213,7 +213,7 @@ static struct attribute *source_variable_supply_attrs[] = {
};
ATTRIBUTE_GROUPS(source_variable_supply);
-static struct device_type source_variable_supply_type = {
+static const struct device_type source_variable_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_variable_supply_groups,
@@ -227,7 +227,7 @@ static struct attribute *sink_variable_supply_attrs[] = {
};
ATTRIBUTE_GROUPS(sink_variable_supply);
-static struct device_type sink_variable_supply_type = {
+static const struct device_type sink_variable_supply_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_variable_supply_groups,
@@ -258,7 +258,7 @@ static struct attribute *source_battery_attrs[] = {
};
ATTRIBUTE_GROUPS(source_battery);
-static struct device_type source_battery_type = {
+static const struct device_type source_battery_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_battery_groups,
@@ -272,7 +272,7 @@ static struct attribute *sink_battery_attrs[] = {
};
ATTRIBUTE_GROUPS(sink_battery);
-static struct device_type sink_battery_type = {
+static const struct device_type sink_battery_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_battery_groups,
@@ -339,7 +339,7 @@ static struct attribute *source_pps_attrs[] = {
};
ATTRIBUTE_GROUPS(source_pps);
-static struct device_type source_pps_type = {
+static const struct device_type source_pps_type = {
.name = "pdo",
.release = pdo_release,
.groups = source_pps_groups,
@@ -353,7 +353,7 @@ static struct attribute *sink_pps_attrs[] = {
};
ATTRIBUTE_GROUPS(sink_pps);
-static struct device_type sink_pps_type = {
+static const struct device_type sink_pps_type = {
.name = "pdo",
.release = pdo_release,
.groups = sink_pps_groups,
@@ -371,30 +371,30 @@ static const char * const apdo_supply_name[] = {
[APDO_TYPE_PPS] = "programmable_supply",
};
-static struct device_type *source_type[] = {
+static const struct device_type *source_type[] = {
[PDO_TYPE_FIXED] = &source_fixed_supply_type,
[PDO_TYPE_BATT] = &source_battery_type,
[PDO_TYPE_VAR] = &source_variable_supply_type,
};
-static struct device_type *source_apdo_type[] = {
+static const struct device_type *source_apdo_type[] = {
[APDO_TYPE_PPS] = &source_pps_type,
};
-static struct device_type *sink_type[] = {
+static const struct device_type *sink_type[] = {
[PDO_TYPE_FIXED] = &sink_fixed_supply_type,
[PDO_TYPE_BATT] = &sink_battery_type,
[PDO_TYPE_VAR] = &sink_variable_supply_type,
};
-static struct device_type *sink_apdo_type[] = {
+static const struct device_type *sink_apdo_type[] = {
[APDO_TYPE_PPS] = &sink_pps_type,
};
/* REVISIT: Export when EPR_*_Capabilities need to be supported. */
static int add_pdo(struct usb_power_delivery_capabilities *cap, u32 pdo, int position)
{
- struct device_type *type;
+ const struct device_type *type;
const char *name;
struct pdo *p;
int ret;
@@ -460,7 +460,7 @@ static void pd_capabilities_release(struct device *dev)
kfree(to_usb_power_delivery_capabilities(dev));
}
-static struct device_type pd_capabilities_type = {
+static const struct device_type pd_capabilities_type = {
.name = "capabilities",
.release = pd_capabilities_release,
};
@@ -575,7 +575,7 @@ static void pd_release(struct device *dev)
kfree(pd);
}
-static struct device_type pd_type = {
+static const struct device_type pd_type = {
.name = "usb_power_delivery",
.release = pd_release,
.groups = pd_groups,
diff --git a/drivers/usb/typec/retimer.c b/drivers/usb/typec/retimer.c
index 4a7d1b5c4d86..b519fcf358ca 100644
--- a/drivers/usb/typec/retimer.c
+++ b/drivers/usb/typec/retimer.c
@@ -155,6 +155,6 @@ void *typec_retimer_get_drvdata(struct typec_retimer *retimer)
}
EXPORT_SYMBOL_GPL(typec_retimer_get_drvdata);
-struct class retimer_class = {
+const struct class retimer_class = {
.name = "retimer",
};
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index bc21006e979c..ef18a448b740 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1467,7 +1467,7 @@ static int fusb302_pd_read_message(struct fusb302_chip *chip,
if ((!len) && (pd_header_type_le(msg->header) == PD_CTRL_GOOD_CRC))
tcpm_pd_transmit_complete(chip->tcpm_port, TCPC_TX_SUCCESS);
else
- tcpm_pd_receive(chip->tcpm_port, msg);
+ tcpm_pd_receive(chip->tcpm_port, msg, TCPC_TX_SOP);
return ret;
}
diff --git a/drivers/usb/typec/tcpm/qcom/Makefile b/drivers/usb/typec/tcpm/qcom/Makefile
index dc1e8832e197..cc23042b9487 100644
--- a/drivers/usb/typec/tcpm/qcom/Makefile
+++ b/drivers/usb/typec/tcpm/qcom/Makefile
@@ -3,4 +3,5 @@
obj-$(CONFIG_TYPEC_QCOM_PMIC) += qcom_pmic_tcpm.o
qcom_pmic_tcpm-y += qcom_pmic_typec.o \
qcom_pmic_typec_port.o \
- qcom_pmic_typec_pdphy.o
+ qcom_pmic_typec_pdphy.o \
+ qcom_pmic_typec_pdphy_stub.o \
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
index 1a2b4bddaa97..e48412cdcb0f 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.c
@@ -20,130 +20,15 @@
#include <drm/bridge/aux-bridge.h>
+#include "qcom_pmic_typec.h"
#include "qcom_pmic_typec_pdphy.h"
#include "qcom_pmic_typec_port.h"
struct pmic_typec_resources {
- struct pmic_typec_pdphy_resources *pdphy_res;
- struct pmic_typec_port_resources *port_res;
+ const struct pmic_typec_pdphy_resources *pdphy_res;
+ const struct pmic_typec_port_resources *port_res;
};
-struct pmic_typec {
- struct device *dev;
- struct tcpm_port *tcpm_port;
- struct tcpc_dev tcpc;
- struct pmic_typec_pdphy *pmic_typec_pdphy;
- struct pmic_typec_port *pmic_typec_port;
- bool vbus_enabled;
- struct mutex lock; /* VBUS state serialization */
-};
-
-#define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
-
-static int qcom_pmic_typec_get_vbus(struct tcpc_dev *tcpc)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
- int ret;
-
- mutex_lock(&tcpm->lock);
- ret = tcpm->vbus_enabled || qcom_pmic_typec_port_get_vbus(tcpm->pmic_typec_port);
- mutex_unlock(&tcpm->lock);
-
- return ret;
-}
-
-static int qcom_pmic_typec_set_vbus(struct tcpc_dev *tcpc, bool on, bool sink)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
- int ret = 0;
-
- mutex_lock(&tcpm->lock);
- if (tcpm->vbus_enabled == on)
- goto done;
-
- ret = qcom_pmic_typec_port_set_vbus(tcpm->pmic_typec_port, on);
- if (ret)
- goto done;
-
- tcpm->vbus_enabled = on;
- tcpm_vbus_change(tcpm->tcpm_port);
-
-done:
- dev_dbg(tcpm->dev, "set_vbus set: %d result %d\n", on, ret);
- mutex_unlock(&tcpm->lock);
-
- return ret;
-}
-
-static int qcom_pmic_typec_set_vconn(struct tcpc_dev *tcpc, bool on)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_port_set_vconn(tcpm->pmic_typec_port, on);
-}
-
-static int qcom_pmic_typec_get_cc(struct tcpc_dev *tcpc,
- enum typec_cc_status *cc1,
- enum typec_cc_status *cc2)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_port_get_cc(tcpm->pmic_typec_port, cc1, cc2);
-}
-
-static int qcom_pmic_typec_set_cc(struct tcpc_dev *tcpc,
- enum typec_cc_status cc)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_port_set_cc(tcpm->pmic_typec_port, cc);
-}
-
-static int qcom_pmic_typec_set_polarity(struct tcpc_dev *tcpc,
- enum typec_cc_polarity pol)
-{
- /* Polarity is set separately by phy-qcom-qmp.c */
- return 0;
-}
-
-static int qcom_pmic_typec_start_toggling(struct tcpc_dev *tcpc,
- enum typec_port_type port_type,
- enum typec_cc_status cc)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_port_start_toggling(tcpm->pmic_typec_port,
- port_type, cc);
-}
-
-static int qcom_pmic_typec_set_roles(struct tcpc_dev *tcpc, bool attached,
- enum typec_role power_role,
- enum typec_data_role data_role)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_pdphy_set_roles(tcpm->pmic_typec_pdphy,
- data_role, power_role);
-}
-
-static int qcom_pmic_typec_set_pd_rx(struct tcpc_dev *tcpc, bool on)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_pdphy_set_pd_rx(tcpm->pmic_typec_pdphy, on);
-}
-
-static int qcom_pmic_typec_pd_transmit(struct tcpc_dev *tcpc,
- enum tcpm_transmit_type type,
- const struct pd_message *msg,
- unsigned int negotiated_rev)
-{
- struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
-
- return qcom_pmic_typec_pdphy_pd_transmit(tcpm->pmic_typec_pdphy, type,
- msg, negotiated_rev);
-}
-
static int qcom_pmic_typec_init(struct tcpc_dev *tcpc)
{
return 0;
@@ -157,7 +42,7 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
const struct pmic_typec_resources *res;
struct regmap *regmap;
struct device *bridge_dev;
- u32 base[2];
+ u32 base;
int ret;
res = of_device_get_match_data(dev);
@@ -170,16 +55,6 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
tcpm->dev = dev;
tcpm->tcpc.init = qcom_pmic_typec_init;
- tcpm->tcpc.get_vbus = qcom_pmic_typec_get_vbus;
- tcpm->tcpc.set_vbus = qcom_pmic_typec_set_vbus;
- tcpm->tcpc.set_cc = qcom_pmic_typec_set_cc;
- tcpm->tcpc.get_cc = qcom_pmic_typec_get_cc;
- tcpm->tcpc.set_polarity = qcom_pmic_typec_set_polarity;
- tcpm->tcpc.set_vconn = qcom_pmic_typec_set_vconn;
- tcpm->tcpc.start_toggling = qcom_pmic_typec_start_toggling;
- tcpm->tcpc.set_pd_rx = qcom_pmic_typec_set_pd_rx;
- tcpm->tcpc.set_roles = qcom_pmic_typec_set_roles;
- tcpm->tcpc.pd_transmit = qcom_pmic_typec_pd_transmit;
regmap = dev_get_regmap(dev->parent, NULL);
if (!regmap) {
@@ -187,29 +62,30 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
return -ENODEV;
}
- ret = of_property_read_u32_array(np, "reg", base, 2);
+ ret = of_property_read_u32_index(np, "reg", 0, &base);
if (ret)
return ret;
- tcpm->pmic_typec_port = qcom_pmic_typec_port_alloc(dev);
- if (IS_ERR(tcpm->pmic_typec_port))
- return PTR_ERR(tcpm->pmic_typec_port);
-
- tcpm->pmic_typec_pdphy = qcom_pmic_typec_pdphy_alloc(dev);
- if (IS_ERR(tcpm->pmic_typec_pdphy))
- return PTR_ERR(tcpm->pmic_typec_pdphy);
-
- ret = qcom_pmic_typec_port_probe(pdev, tcpm->pmic_typec_port,
- res->port_res, regmap, base[0]);
+ ret = qcom_pmic_typec_port_probe(pdev, tcpm,
+ res->port_res, regmap, base);
if (ret)
return ret;
- ret = qcom_pmic_typec_pdphy_probe(pdev, tcpm->pmic_typec_pdphy,
- res->pdphy_res, regmap, base[1]);
- if (ret)
- return ret;
+ if (res->pdphy_res) {
+ ret = of_property_read_u32_index(np, "reg", 1, &base);
+ if (ret)
+ return ret;
+
+ ret = qcom_pmic_typec_pdphy_probe(pdev, tcpm,
+ res->pdphy_res, regmap, base);
+ if (ret)
+ return ret;
+ } else {
+ ret = qcom_pmic_typec_pdphy_stub_probe(pdev, tcpm);
+ if (ret)
+ return ret;
+ }
- mutex_init(&tcpm->lock);
platform_set_drvdata(pdev, tcpm);
tcpm->tcpc.fwnode = device_get_named_child_node(tcpm->dev, "connector");
@@ -226,13 +102,11 @@ static int qcom_pmic_typec_probe(struct platform_device *pdev)
goto fwnode_remove;
}
- ret = qcom_pmic_typec_port_start(tcpm->pmic_typec_port,
- tcpm->tcpm_port);
+ ret = tcpm->port_start(tcpm, tcpm->tcpm_port);
if (ret)
goto fwnode_remove;
- ret = qcom_pmic_typec_pdphy_start(tcpm->pmic_typec_pdphy,
- tcpm->tcpm_port);
+ ret = tcpm->pdphy_start(tcpm, tcpm->tcpm_port);
if (ret)
goto fwnode_remove;
@@ -248,91 +122,25 @@ static void qcom_pmic_typec_remove(struct platform_device *pdev)
{
struct pmic_typec *tcpm = platform_get_drvdata(pdev);
- qcom_pmic_typec_pdphy_stop(tcpm->pmic_typec_pdphy);
- qcom_pmic_typec_port_stop(tcpm->pmic_typec_port);
+ tcpm->pdphy_stop(tcpm);
+ tcpm->port_stop(tcpm);
tcpm_unregister_port(tcpm->tcpm_port);
fwnode_remove_software_node(tcpm->tcpc.fwnode);
}
-static struct pmic_typec_pdphy_resources pm8150b_pdphy_res = {
- .irq_params = {
- {
- .virq = PMIC_PDPHY_SIG_TX_IRQ,
- .irq_name = "sig-tx",
- },
- {
- .virq = PMIC_PDPHY_SIG_RX_IRQ,
- .irq_name = "sig-rx",
- },
- {
- .virq = PMIC_PDPHY_MSG_TX_IRQ,
- .irq_name = "msg-tx",
- },
- {
- .virq = PMIC_PDPHY_MSG_RX_IRQ,
- .irq_name = "msg-rx",
- },
- {
- .virq = PMIC_PDPHY_MSG_TX_FAIL_IRQ,
- .irq_name = "msg-tx-failed",
- },
- {
- .virq = PMIC_PDPHY_MSG_TX_DISCARD_IRQ,
- .irq_name = "msg-tx-discarded",
- },
- {
- .virq = PMIC_PDPHY_MSG_RX_DISCARD_IRQ,
- .irq_name = "msg-rx-discarded",
- },
- },
- .nr_irqs = 7,
-};
-
-static struct pmic_typec_port_resources pm8150b_port_res = {
- .irq_params = {
- {
- .irq_name = "vpd-detect",
- .virq = PMIC_TYPEC_VPD_IRQ,
- },
-
- {
- .irq_name = "cc-state-change",
- .virq = PMIC_TYPEC_CC_STATE_IRQ,
- },
- {
- .irq_name = "vconn-oc",
- .virq = PMIC_TYPEC_VCONN_OC_IRQ,
- },
-
- {
- .irq_name = "vbus-change",
- .virq = PMIC_TYPEC_VBUS_IRQ,
- },
-
- {
- .irq_name = "attach-detach",
- .virq = PMIC_TYPEC_ATTACH_DETACH_IRQ,
- },
- {
- .irq_name = "legacy-cable-detect",
- .virq = PMIC_TYPEC_LEGACY_CABLE_IRQ,
- },
-
- {
- .irq_name = "try-snk-src-detect",
- .virq = PMIC_TYPEC_TRY_SNK_SRC_IRQ,
- },
- },
- .nr_irqs = 7,
+static const struct pmic_typec_resources pm8150b_typec_res = {
+ .pdphy_res = &pm8150b_pdphy_res,
+ .port_res = &pm8150b_port_res,
};
-static struct pmic_typec_resources pm8150b_typec_res = {
- .pdphy_res = &pm8150b_pdphy_res,
+static const struct pmic_typec_resources pmi632_typec_res = {
+ /* PD PHY not present */
.port_res = &pm8150b_port_res,
};
static const struct of_device_id qcom_pmic_typec_table[] = {
{ .compatible = "qcom,pm8150b-typec", .data = &pm8150b_typec_res },
+ { .compatible = "qcom,pmi632-typec", .data = &pmi632_typec_res },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_pmic_typec_table);
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.h b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.h
new file mode 100644
index 000000000000..3c75820c9187
--- /dev/null
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec.h
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2023, Linaro Ltd. All rights reserved.
+ */
+
+#ifndef __QCOM_PMIC_TYPEC_H__
+#define __QCOM_PMIC_TYPEC_H__
+
+struct pmic_typec {
+ struct device *dev;
+ struct tcpm_port *tcpm_port;
+ struct tcpc_dev tcpc;
+ struct pmic_typec_pdphy *pmic_typec_pdphy;
+ struct pmic_typec_port *pmic_typec_port;
+
+ int (*pdphy_start)(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port);
+ void (*pdphy_stop)(struct pmic_typec *tcpm);
+
+ int (*port_start)(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port);
+ void (*port_stop)(struct pmic_typec *tcpm);
+};
+
+#define tcpc_to_tcpm(_tcpc_) container_of(_tcpc_, struct pmic_typec, tcpc)
+
+#endif
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
index 52c81378e36e..6560f4fc98d5 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.c
@@ -14,8 +14,74 @@
#include <linux/slab.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpm.h>
+#include "qcom_pmic_typec.h"
#include "qcom_pmic_typec_pdphy.h"
+/* PD PHY register offsets and bit fields */
+#define USB_PDPHY_MSG_CONFIG_REG 0x40
+#define MSG_CONFIG_PORT_DATA_ROLE BIT(3)
+#define MSG_CONFIG_PORT_POWER_ROLE BIT(2)
+#define MSG_CONFIG_SPEC_REV_MASK (BIT(1) | BIT(0))
+
+#define USB_PDPHY_EN_CONTROL_REG 0x46
+#define CONTROL_ENABLE BIT(0)
+
+#define USB_PDPHY_RX_STATUS_REG 0x4A
+#define RX_FRAME_TYPE (BIT(0) | BIT(1) | BIT(2))
+
+#define USB_PDPHY_FRAME_FILTER_REG 0x4C
+#define FRAME_FILTER_EN_HARD_RESET BIT(5)
+#define FRAME_FILTER_EN_SOP BIT(0)
+
+#define USB_PDPHY_TX_SIZE_REG 0x42
+#define TX_SIZE_MASK 0xF
+
+#define USB_PDPHY_TX_CONTROL_REG 0x44
+#define TX_CONTROL_RETRY_COUNT(n) (((n) & 0x3) << 5)
+#define TX_CONTROL_FRAME_TYPE(n) (((n) & 0x7) << 2)
+#define TX_CONTROL_FRAME_TYPE_CABLE_RESET (0x1 << 2)
+#define TX_CONTROL_SEND_SIGNAL BIT(1)
+#define TX_CONTROL_SEND_MSG BIT(0)
+
+#define USB_PDPHY_RX_SIZE_REG 0x48
+
+#define USB_PDPHY_RX_ACKNOWLEDGE_REG 0x4B
+#define RX_BUFFER_TOKEN BIT(0)
+
+#define USB_PDPHY_BIST_MODE_REG 0x4E
+#define BIST_MODE_MASK 0xF
+#define BIST_ENABLE BIT(7)
+#define PD_MSG_BIST 0x3
+#define PD_BIST_TEST_DATA_MODE 0x8
+
+#define USB_PDPHY_TX_BUFFER_HDR_REG 0x60
+#define USB_PDPHY_TX_BUFFER_DATA_REG 0x62
+
+#define USB_PDPHY_RX_BUFFER_REG 0x80
+
+/* VDD regulator */
+#define VDD_PDPHY_VOL_MIN 2800000 /* uV */
+#define VDD_PDPHY_VOL_MAX 3300000 /* uV */
+#define VDD_PDPHY_HPM_LOAD 3000 /* uA */
+
+/* Message Spec Rev field */
+#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
+
+/* timers */
+#define RECEIVER_RESPONSE_TIME 15 /* tReceiverResponse */
+#define HARD_RESET_COMPLETE_TIME 5 /* tHardResetComplete */
+
+/* Interrupt numbers */
+#define PMIC_PDPHY_SIG_TX_IRQ 0x0
+#define PMIC_PDPHY_SIG_RX_IRQ 0x1
+#define PMIC_PDPHY_MSG_TX_IRQ 0x2
+#define PMIC_PDPHY_MSG_RX_IRQ 0x3
+#define PMIC_PDPHY_MSG_TX_FAIL_IRQ 0x4
+#define PMIC_PDPHY_MSG_TX_DISCARD_IRQ 0x5
+#define PMIC_PDPHY_MSG_RX_DISCARD_IRQ 0x6
+#define PMIC_PDPHY_FR_SWAP_IRQ 0x7
+
+
struct pmic_typec_pdphy_irq_data {
int virq;
int irq;
@@ -231,11 +297,13 @@ done:
return ret;
}
-int qcom_pmic_typec_pdphy_pd_transmit(struct pmic_typec_pdphy *pmic_typec_pdphy,
- enum tcpm_transmit_type type,
- const struct pd_message *msg,
- unsigned int negotiated_rev)
+static int qcom_pmic_typec_pdphy_pd_transmit(struct tcpc_dev *tcpc,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg,
+ unsigned int negotiated_rev)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
struct device *dev = pmic_typec_pdphy->dev;
int ret;
@@ -299,7 +367,7 @@ done:
if (!ret) {
dev_vdbg(dev, "pd_receive: handing %d bytes to tcpm\n", size);
- tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg);
+ tcpm_pd_receive(pmic_typec_pdphy->tcpm_port, &msg, TCPC_TX_SOP);
}
}
@@ -336,8 +404,10 @@ static irqreturn_t qcom_pmic_typec_pdphy_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, bool on)
+static int qcom_pmic_typec_pdphy_set_pd_rx(struct tcpc_dev *tcpc, bool on)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
unsigned long flags;
int ret;
@@ -353,9 +423,12 @@ int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, b
return ret;
}
-int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
- bool data_role_host, bool power_role_src)
+static int qcom_pmic_typec_pdphy_set_roles(struct tcpc_dev *tcpc, bool attached,
+ enum typec_role power_role,
+ enum typec_data_role data_role)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
struct device *dev = pmic_typec_pdphy->dev;
unsigned long flags;
int ret;
@@ -366,12 +439,13 @@ int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
pmic_typec_pdphy->base + USB_PDPHY_MSG_CONFIG_REG,
MSG_CONFIG_PORT_DATA_ROLE |
MSG_CONFIG_PORT_POWER_ROLE,
- data_role_host << 3 | power_role_src << 2);
+ (data_role == TYPEC_HOST ? MSG_CONFIG_PORT_DATA_ROLE : 0) |
+ (power_role == TYPEC_SOURCE ? MSG_CONFIG_PORT_POWER_ROLE : 0));
spin_unlock_irqrestore(&pmic_typec_pdphy->lock, flags);
dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n",
- data_role_host, power_role_src);
+ data_role, power_role);
return ret;
}
@@ -435,9 +509,10 @@ done:
return ret;
}
-int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
- struct tcpm_port *tcpm_port)
+static int qcom_pmic_typec_pdphy_start(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port)
{
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
int i;
int ret;
@@ -457,8 +532,9 @@ int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
return 0;
}
-void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
+static void qcom_pmic_typec_pdphy_stop(struct pmic_typec *tcpm)
{
+ struct pmic_typec_pdphy *pmic_typec_pdphy = tcpm->pmic_typec_pdphy;
int i;
for (i = 0; i < pmic_typec_pdphy->nr_irqs; i++)
@@ -469,21 +545,21 @@ void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy)
regulator_disable(pmic_typec_pdphy->vdd_pdphy);
}
-struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev)
-{
- return devm_kzalloc(dev, sizeof(struct pmic_typec_pdphy), GFP_KERNEL);
-}
-
int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
- struct pmic_typec_pdphy *pmic_typec_pdphy,
- struct pmic_typec_pdphy_resources *res,
+ struct pmic_typec *tcpm,
+ const struct pmic_typec_pdphy_resources *res,
struct regmap *regmap,
u32 base)
{
+ struct pmic_typec_pdphy *pmic_typec_pdphy;
struct device *dev = &pdev->dev;
struct pmic_typec_pdphy_irq_data *irq_data;
int i, ret, irq;
+ pmic_typec_pdphy = devm_kzalloc(dev, sizeof(*pmic_typec_pdphy), GFP_KERNEL);
+ if (!pmic_typec_pdphy)
+ return -ENOMEM;
+
if (!res->nr_irqs || res->nr_irqs > PMIC_PDPHY_MAX_IRQS)
return -EINVAL;
@@ -522,5 +598,48 @@ int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
return ret;
}
+ tcpm->pmic_typec_pdphy = pmic_typec_pdphy;
+
+ tcpm->tcpc.set_pd_rx = qcom_pmic_typec_pdphy_set_pd_rx;
+ tcpm->tcpc.set_roles = qcom_pmic_typec_pdphy_set_roles;
+ tcpm->tcpc.pd_transmit = qcom_pmic_typec_pdphy_pd_transmit;
+
+ tcpm->pdphy_start = qcom_pmic_typec_pdphy_start;
+ tcpm->pdphy_stop = qcom_pmic_typec_pdphy_stop;
+
return 0;
}
+
+const struct pmic_typec_pdphy_resources pm8150b_pdphy_res = {
+ .irq_params = {
+ {
+ .virq = PMIC_PDPHY_SIG_TX_IRQ,
+ .irq_name = "sig-tx",
+ },
+ {
+ .virq = PMIC_PDPHY_SIG_RX_IRQ,
+ .irq_name = "sig-rx",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_TX_IRQ,
+ .irq_name = "msg-tx",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_RX_IRQ,
+ .irq_name = "msg-rx",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_TX_FAIL_IRQ,
+ .irq_name = "msg-tx-failed",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_TX_DISCARD_IRQ,
+ .irq_name = "msg-tx-discarded",
+ },
+ {
+ .virq = PMIC_PDPHY_MSG_RX_DISCARD_IRQ,
+ .irq_name = "msg-rx-discarded",
+ },
+ },
+ .nr_irqs = 7,
+};
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h
index e67954e31b14..04dee20293cf 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy.h
@@ -8,74 +8,6 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/usb/tcpm.h>
-
-#define USB_PDPHY_MAX_DATA_OBJ_LEN 28
-#define USB_PDPHY_MSG_HDR_LEN 2
-
-/* PD PHY register offsets and bit fields */
-#define USB_PDPHY_MSG_CONFIG_REG 0x40
-#define MSG_CONFIG_PORT_DATA_ROLE BIT(3)
-#define MSG_CONFIG_PORT_POWER_ROLE BIT(2)
-#define MSG_CONFIG_SPEC_REV_MASK (BIT(1) | BIT(0))
-
-#define USB_PDPHY_EN_CONTROL_REG 0x46
-#define CONTROL_ENABLE BIT(0)
-
-#define USB_PDPHY_RX_STATUS_REG 0x4A
-#define RX_FRAME_TYPE (BIT(0) | BIT(1) | BIT(2))
-
-#define USB_PDPHY_FRAME_FILTER_REG 0x4C
-#define FRAME_FILTER_EN_HARD_RESET BIT(5)
-#define FRAME_FILTER_EN_SOP BIT(0)
-
-#define USB_PDPHY_TX_SIZE_REG 0x42
-#define TX_SIZE_MASK 0xF
-
-#define USB_PDPHY_TX_CONTROL_REG 0x44
-#define TX_CONTROL_RETRY_COUNT(n) (((n) & 0x3) << 5)
-#define TX_CONTROL_FRAME_TYPE(n) (((n) & 0x7) << 2)
-#define TX_CONTROL_FRAME_TYPE_CABLE_RESET (0x1 << 2)
-#define TX_CONTROL_SEND_SIGNAL BIT(1)
-#define TX_CONTROL_SEND_MSG BIT(0)
-
-#define USB_PDPHY_RX_SIZE_REG 0x48
-
-#define USB_PDPHY_RX_ACKNOWLEDGE_REG 0x4B
-#define RX_BUFFER_TOKEN BIT(0)
-
-#define USB_PDPHY_BIST_MODE_REG 0x4E
-#define BIST_MODE_MASK 0xF
-#define BIST_ENABLE BIT(7)
-#define PD_MSG_BIST 0x3
-#define PD_BIST_TEST_DATA_MODE 0x8
-
-#define USB_PDPHY_TX_BUFFER_HDR_REG 0x60
-#define USB_PDPHY_TX_BUFFER_DATA_REG 0x62
-
-#define USB_PDPHY_RX_BUFFER_REG 0x80
-
-/* VDD regulator */
-#define VDD_PDPHY_VOL_MIN 2800000 /* uV */
-#define VDD_PDPHY_VOL_MAX 3300000 /* uV */
-#define VDD_PDPHY_HPM_LOAD 3000 /* uA */
-
-/* Message Spec Rev field */
-#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
-
-/* timers */
-#define RECEIVER_RESPONSE_TIME 15 /* tReceiverResponse */
-#define HARD_RESET_COMPLETE_TIME 5 /* tHardResetComplete */
-
-/* Interrupt numbers */
-#define PMIC_PDPHY_SIG_TX_IRQ 0x0
-#define PMIC_PDPHY_SIG_RX_IRQ 0x1
-#define PMIC_PDPHY_MSG_TX_IRQ 0x2
-#define PMIC_PDPHY_MSG_RX_IRQ 0x3
-#define PMIC_PDPHY_MSG_TX_FAIL_IRQ 0x4
-#define PMIC_PDPHY_MSG_TX_DISCARD_IRQ 0x5
-#define PMIC_PDPHY_MSG_RX_DISCARD_IRQ 0x6
-#define PMIC_PDPHY_FR_SWAP_IRQ 0x7
/* Resources */
#define PMIC_PDPHY_MAX_IRQS 0x08
@@ -87,33 +19,19 @@ struct pmic_typec_pdphy_irq_params {
struct pmic_typec_pdphy_resources {
unsigned int nr_irqs;
- struct pmic_typec_pdphy_irq_params irq_params[PMIC_PDPHY_MAX_IRQS];
+ const struct pmic_typec_pdphy_irq_params irq_params[PMIC_PDPHY_MAX_IRQS];
};
/* API */
struct pmic_typec_pdphy;
-struct pmic_typec_pdphy *qcom_pmic_typec_pdphy_alloc(struct device *dev);
-
+extern const struct pmic_typec_pdphy_resources pm8150b_pdphy_res;
int qcom_pmic_typec_pdphy_probe(struct platform_device *pdev,
- struct pmic_typec_pdphy *pmic_typec_pdphy,
- struct pmic_typec_pdphy_resources *res,
+ struct pmic_typec *tcpm,
+ const struct pmic_typec_pdphy_resources *res,
struct regmap *regmap,
u32 base);
-
-int qcom_pmic_typec_pdphy_start(struct pmic_typec_pdphy *pmic_typec_pdphy,
- struct tcpm_port *tcpm_port);
-
-void qcom_pmic_typec_pdphy_stop(struct pmic_typec_pdphy *pmic_typec_pdphy);
-
-int qcom_pmic_typec_pdphy_set_roles(struct pmic_typec_pdphy *pmic_typec_pdphy,
- bool power_role_src, bool data_role_host);
-
-int qcom_pmic_typec_pdphy_set_pd_rx(struct pmic_typec_pdphy *pmic_typec_pdphy, bool on);
-
-int qcom_pmic_typec_pdphy_pd_transmit(struct pmic_typec_pdphy *pmic_typec_pdphy,
- enum tcpm_transmit_type type,
- const struct pd_message *msg,
- unsigned int negotiated_rev);
+int qcom_pmic_typec_pdphy_stub_probe(struct platform_device *pdev,
+ struct pmic_typec *tcpm);
#endif /* __QCOM_PMIC_TYPEC_PDPHY_H__ */
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
new file mode 100644
index 000000000000..df79059cda67
--- /dev/null
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_pdphy_stub.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2024, Linaro Ltd. All rights reserved.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/usb/pd.h>
+#include <linux/usb/tcpm.h>
+#include "qcom_pmic_typec.h"
+#include "qcom_pmic_typec_pdphy.h"
+
+static int qcom_pmic_typec_pdphy_stub_pd_transmit(struct tcpc_dev *tcpc,
+ enum tcpm_transmit_type type,
+ const struct pd_message *msg,
+ unsigned int negotiated_rev)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct device *dev = tcpm->dev;
+
+ dev_dbg(dev, "pdphy_transmit: type=%d\n", type);
+
+ tcpm_pd_transmit_complete(tcpm->tcpm_port,
+ TCPC_TX_SUCCESS);
+
+ return 0;
+}
+
+static int qcom_pmic_typec_pdphy_stub_set_pd_rx(struct tcpc_dev *tcpc, bool on)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct device *dev = tcpm->dev;
+
+ dev_dbg(dev, "set_pd_rx: %s\n", on ? "on" : "off");
+
+ return 0;
+}
+
+static int qcom_pmic_typec_pdphy_stub_set_roles(struct tcpc_dev *tcpc, bool attached,
+ enum typec_role power_role,
+ enum typec_data_role data_role)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct device *dev = tcpm->dev;
+
+ dev_dbg(dev, "pdphy_set_roles: data_role_host=%d power_role_src=%d\n",
+ data_role, power_role);
+
+ return 0;
+}
+
+static int qcom_pmic_typec_pdphy_stub_start(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port)
+{
+ return 0;
+}
+
+static void qcom_pmic_typec_pdphy_stub_stop(struct pmic_typec *tcpm)
+{
+}
+
+int qcom_pmic_typec_pdphy_stub_probe(struct platform_device *pdev,
+ struct pmic_typec *tcpm)
+{
+ tcpm->tcpc.set_pd_rx = qcom_pmic_typec_pdphy_stub_set_pd_rx;
+ tcpm->tcpc.set_roles = qcom_pmic_typec_pdphy_stub_set_roles;
+ tcpm->tcpc.pd_transmit = qcom_pmic_typec_pdphy_stub_pd_transmit;
+
+ tcpm->pdphy_start = qcom_pmic_typec_pdphy_stub_start;
+ tcpm->pdphy_stop = qcom_pmic_typec_pdphy_stub_stop;
+
+ return 0;
+}
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
index a8f3f4d3a450..a747baa29784 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.c
@@ -16,8 +16,147 @@
#include <linux/usb/tcpm.h>
#include <linux/usb/typec_mux.h>
#include <linux/workqueue.h>
+
+#include "qcom_pmic_typec.h"
#include "qcom_pmic_typec_port.h"
+#define TYPEC_SNK_STATUS_REG 0x06
+#define DETECTED_SNK_TYPE_MASK GENMASK(6, 0)
+#define SNK_DAM_MASK GENMASK(6, 4)
+#define SNK_DAM_500MA BIT(6)
+#define SNK_DAM_1500MA BIT(5)
+#define SNK_DAM_3000MA BIT(4)
+#define SNK_RP_STD BIT(3)
+#define SNK_RP_1P5 BIT(2)
+#define SNK_RP_3P0 BIT(1)
+#define SNK_RP_SHORT BIT(0)
+
+#define TYPEC_SRC_STATUS_REG 0x08
+#define DETECTED_SRC_TYPE_MASK GENMASK(4, 0)
+#define SRC_HIGH_BATT BIT(5)
+#define SRC_DEBUG_ACCESS BIT(4)
+#define SRC_RD_OPEN BIT(3)
+#define SRC_RD_RA_VCONN BIT(2)
+#define SRC_RA_OPEN BIT(1)
+#define AUDIO_ACCESS_RA_RA BIT(0)
+
+#define TYPEC_STATE_MACHINE_STATUS_REG 0x09
+#define TYPEC_ATTACH_DETACH_STATE BIT(5)
+
+#define TYPEC_SM_STATUS_REG 0x0A
+#define TYPEC_SM_VBUS_VSAFE5V BIT(5)
+#define TYPEC_SM_VBUS_VSAFE0V BIT(6)
+#define TYPEC_SM_USBIN_LT_LV BIT(7)
+
+#define TYPEC_MISC_STATUS_REG 0x0B
+#define TYPEC_WATER_DETECTION_STATUS BIT(7)
+#define SNK_SRC_MODE BIT(6)
+#define TYPEC_VBUS_DETECT BIT(5)
+#define TYPEC_VBUS_ERROR_STATUS BIT(4)
+#define TYPEC_DEBOUNCE_DONE BIT(3)
+#define CC_ORIENTATION BIT(1)
+#define CC_ATTACHED BIT(0)
+
+#define LEGACY_CABLE_STATUS_REG 0x0D
+#define TYPEC_LEGACY_CABLE_STATUS BIT(1)
+#define TYPEC_NONCOMP_LEGACY_CABLE_STATUS BIT(0)
+
+#define TYPEC_U_USB_STATUS_REG 0x0F
+#define U_USB_GROUND_NOVBUS BIT(6)
+#define U_USB_GROUND BIT(4)
+#define U_USB_FMB1 BIT(3)
+#define U_USB_FLOAT1 BIT(2)
+#define U_USB_FMB2 BIT(1)
+#define U_USB_FLOAT2 BIT(0)
+
+#define TYPEC_MODE_CFG_REG 0x44
+#define TYPEC_TRY_MODE_MASK GENMASK(4, 3)
+#define EN_TRY_SNK BIT(4)
+#define EN_TRY_SRC BIT(3)
+#define TYPEC_POWER_ROLE_CMD_MASK GENMASK(2, 0)
+#define EN_SRC_ONLY BIT(2)
+#define EN_SNK_ONLY BIT(1)
+#define TYPEC_DISABLE_CMD BIT(0)
+
+#define TYPEC_VCONN_CONTROL_REG 0x46
+#define VCONN_EN_ORIENTATION BIT(2)
+#define VCONN_EN_VALUE BIT(1)
+#define VCONN_EN_SRC BIT(0)
+
+#define TYPEC_CCOUT_CONTROL_REG 0x48
+#define TYPEC_CCOUT_BUFFER_EN BIT(2)
+#define TYPEC_CCOUT_VALUE BIT(1)
+#define TYPEC_CCOUT_SRC BIT(0)
+
+#define DEBUG_ACCESS_SRC_CFG_REG 0x4C
+#define EN_UNORIENTED_DEBUG_ACCESS_SRC BIT(0)
+
+#define TYPE_C_CRUDE_SENSOR_CFG_REG 0x4e
+#define EN_SRC_CRUDE_SENSOR BIT(1)
+#define EN_SNK_CRUDE_SENSOR BIT(0)
+
+#define TYPEC_EXIT_STATE_CFG_REG 0x50
+#define BYPASS_VSAFE0V_DURING_ROLE_SWAP BIT(3)
+#define SEL_SRC_UPPER_REF BIT(2)
+#define USE_TPD_FOR_EXITING_ATTACHSRC BIT(1)
+#define EXIT_SNK_BASED_ON_CC BIT(0)
+
+#define TYPEC_CURRSRC_CFG_REG 0x52
+#define TYPEC_SRC_RP_SEL_330UA BIT(1)
+#define TYPEC_SRC_RP_SEL_180UA BIT(0)
+#define TYPEC_SRC_RP_SEL_80UA 0
+#define TYPEC_SRC_RP_SEL_MASK GENMASK(1, 0)
+
+#define TYPEC_INTERRUPT_EN_CFG_1_REG 0x5E
+#define TYPEC_LEGACY_CABLE_INT_EN BIT(7)
+#define TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN BIT(6)
+#define TYPEC_TRYSOURCE_DETECT_INT_EN BIT(5)
+#define TYPEC_TRYSINK_DETECT_INT_EN BIT(4)
+#define TYPEC_CCOUT_DETACH_INT_EN BIT(3)
+#define TYPEC_CCOUT_ATTACH_INT_EN BIT(2)
+#define TYPEC_VBUS_DEASSERT_INT_EN BIT(1)
+#define TYPEC_VBUS_ASSERT_INT_EN BIT(0)
+
+#define TYPEC_INTERRUPT_EN_CFG_2_REG 0x60
+#define TYPEC_SRC_BATT_HPWR_INT_EN BIT(6)
+#define MICRO_USB_STATE_CHANGE_INT_EN BIT(5)
+#define TYPEC_STATE_MACHINE_CHANGE_INT_EN BIT(4)
+#define TYPEC_DEBUG_ACCESS_DETECT_INT_EN BIT(3)
+#define TYPEC_WATER_DETECTION_INT_EN BIT(2)
+#define TYPEC_VBUS_ERROR_INT_EN BIT(1)
+#define TYPEC_DEBOUNCE_DONE_INT_EN BIT(0)
+
+#define TYPEC_DEBOUNCE_OPTION_REG 0x62
+#define REDUCE_TCCDEBOUNCE_TO_2MS BIT(2)
+
+#define TYPE_C_SBU_CFG_REG 0x6A
+#define SEL_SBU1_ISRC_VAL 0x04
+#define SEL_SBU2_ISRC_VAL 0x01
+
+#define TYPEC_U_USB_CFG_REG 0x70
+#define EN_MICRO_USB_FACTORY_MODE BIT(1)
+#define EN_MICRO_USB_MODE BIT(0)
+
+#define TYPEC_PMI632_U_USB_WATER_PROTECTION_CFG_REG 0x72
+
+#define TYPEC_U_USB_WATER_PROTECTION_CFG_REG 0x73
+#define EN_MICRO_USB_WATER_PROTECTION BIT(4)
+#define MICRO_USB_DETECTION_ON_TIME_CFG_MASK GENMASK(3, 2)
+#define MICRO_USB_DETECTION_PERIOD_CFG_MASK GENMASK(1, 0)
+
+#define TYPEC_PMI632_MICRO_USB_MODE_REG 0x73
+#define MICRO_USB_MODE_ONLY BIT(0)
+
+/* Interrupt numbers */
+#define PMIC_TYPEC_OR_RID_IRQ 0x0
+#define PMIC_TYPEC_VPD_IRQ 0x1
+#define PMIC_TYPEC_CC_STATE_IRQ 0x2
+#define PMIC_TYPEC_VCONN_OC_IRQ 0x3
+#define PMIC_TYPEC_VBUS_IRQ 0x4
+#define PMIC_TYPEC_ATTACH_DETACH_IRQ 0x5
+#define PMIC_TYPEC_LEGACY_CABLE_IRQ 0x6
+#define PMIC_TYPEC_TRY_SNK_SRC_IRQ 0x7
+
struct pmic_typec_port_irq_data {
int virq;
int irq;
@@ -33,6 +172,8 @@ struct pmic_typec_port {
struct pmic_typec_port_irq_data *irq_data;
struct regulator *vdd_vbus;
+ bool vbus_enabled;
+ struct mutex vbus_lock; /* VBUS state serialization */
int cc;
bool debouncing_cc;
@@ -131,7 +272,7 @@ done:
return IRQ_HANDLED;
}
-int qcom_pmic_typec_port_get_vbus(struct pmic_typec_port *pmic_typec_port)
+static int qcom_pmic_typec_port_vbus_detect(struct pmic_typec_port *pmic_typec_port)
{
struct device *dev = pmic_typec_port->dev;
unsigned int misc;
@@ -148,7 +289,7 @@ int qcom_pmic_typec_port_get_vbus(struct pmic_typec_port *pmic_typec_port)
return !!(misc & TYPEC_VBUS_DETECT);
}
-int qcom_pmic_typec_port_set_vbus(struct pmic_typec_port *pmic_typec_port, bool on)
+static int qcom_pmic_typec_port_vbus_toggle(struct pmic_typec_port *pmic_typec_port, bool on)
{
u32 sm_stat;
u32 val;
@@ -179,10 +320,49 @@ int qcom_pmic_typec_port_set_vbus(struct pmic_typec_port *pmic_typec_port, bool
return 0;
}
-int qcom_pmic_typec_port_get_cc(struct pmic_typec_port *pmic_typec_port,
- enum typec_cc_status *cc1,
- enum typec_cc_status *cc2)
+static int qcom_pmic_typec_port_get_vbus(struct tcpc_dev *tcpc)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
+ int ret;
+
+ mutex_lock(&pmic_typec_port->vbus_lock);
+ ret = pmic_typec_port->vbus_enabled || qcom_pmic_typec_port_vbus_detect(pmic_typec_port);
+ mutex_unlock(&pmic_typec_port->vbus_lock);
+
+ return ret;
+}
+
+static int qcom_pmic_typec_port_set_vbus(struct tcpc_dev *tcpc, bool on, bool sink)
+{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
+ int ret = 0;
+
+ mutex_lock(&pmic_typec_port->vbus_lock);
+ if (pmic_typec_port->vbus_enabled == on)
+ goto done;
+
+ ret = qcom_pmic_typec_port_vbus_toggle(pmic_typec_port, on);
+ if (ret)
+ goto done;
+
+ pmic_typec_port->vbus_enabled = on;
+ tcpm_vbus_change(tcpm->tcpm_port);
+
+done:
+ dev_dbg(tcpm->dev, "set_vbus set: %d result %d\n", on, ret);
+ mutex_unlock(&pmic_typec_port->vbus_lock);
+
+ return ret;
+}
+
+static int qcom_pmic_typec_port_get_cc(struct tcpc_dev *tcpc,
+ enum typec_cc_status *cc1,
+ enum typec_cc_status *cc2)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
struct device *dev = pmic_typec_port->dev;
unsigned int misc, val;
bool attached;
@@ -275,9 +455,11 @@ static void qcom_pmic_set_cc_debounce(struct pmic_typec_port *pmic_typec_port)
msecs_to_jiffies(2));
}
-int qcom_pmic_typec_port_set_cc(struct pmic_typec_port *pmic_typec_port,
- enum typec_cc_status cc)
+static int qcom_pmic_typec_port_set_cc(struct tcpc_dev *tcpc,
+ enum typec_cc_status cc)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
struct device *dev = pmic_typec_port->dev;
unsigned int mode, currsrc;
unsigned int misc;
@@ -341,8 +523,17 @@ done:
return ret;
}
-int qcom_pmic_typec_port_set_vconn(struct pmic_typec_port *pmic_typec_port, bool on)
+static int qcom_pmic_typec_port_set_polarity(struct tcpc_dev *tcpc,
+ enum typec_cc_polarity pol)
+{
+ /* Polarity is set separately by phy-qcom-qmp.c */
+ return 0;
+}
+
+static int qcom_pmic_typec_port_set_vconn(struct tcpc_dev *tcpc, bool on)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
struct device *dev = pmic_typec_port->dev;
unsigned int orientation, misc, mask, value;
unsigned long flags;
@@ -377,10 +568,12 @@ done:
return ret;
}
-int qcom_pmic_typec_port_start_toggling(struct pmic_typec_port *pmic_typec_port,
- enum typec_port_type port_type,
- enum typec_cc_status cc)
+static int qcom_pmic_typec_port_start_toggling(struct tcpc_dev *tcpc,
+ enum typec_port_type port_type,
+ enum typec_cc_status cc)
{
+ struct pmic_typec *tcpm = tcpc_to_tcpm(tcpc);
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
struct device *dev = pmic_typec_port->dev;
unsigned int misc;
u8 mode = 0;
@@ -441,9 +634,10 @@ done:
(TYPEC_STATE_MACHINE_CHANGE_INT_EN | TYPEC_VBUS_ERROR_INT_EN | \
TYPEC_DEBOUNCE_DONE_INT_EN)
-int qcom_pmic_typec_port_start(struct pmic_typec_port *pmic_typec_port,
- struct tcpm_port *tcpm_port)
+static int qcom_pmic_typec_port_start(struct pmic_typec *tcpm,
+ struct tcpm_port *tcpm_port)
{
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
int i;
int mask;
int ret;
@@ -491,29 +685,30 @@ done:
return ret;
}
-void qcom_pmic_typec_port_stop(struct pmic_typec_port *pmic_typec_port)
+static void qcom_pmic_typec_port_stop(struct pmic_typec *tcpm)
{
+ struct pmic_typec_port *pmic_typec_port = tcpm->pmic_typec_port;
int i;
for (i = 0; i < pmic_typec_port->nr_irqs; i++)
disable_irq(pmic_typec_port->irq_data[i].irq);
}
-struct pmic_typec_port *qcom_pmic_typec_port_alloc(struct device *dev)
-{
- return devm_kzalloc(dev, sizeof(struct pmic_typec_port), GFP_KERNEL);
-}
-
int qcom_pmic_typec_port_probe(struct platform_device *pdev,
- struct pmic_typec_port *pmic_typec_port,
- struct pmic_typec_port_resources *res,
+ struct pmic_typec *tcpm,
+ const struct pmic_typec_port_resources *res,
struct regmap *regmap,
u32 base)
{
struct device *dev = &pdev->dev;
struct pmic_typec_port_irq_data *irq_data;
+ struct pmic_typec_port *pmic_typec_port;
int i, ret, irq;
+ pmic_typec_port = devm_kzalloc(dev, sizeof(*pmic_typec_port), GFP_KERNEL);
+ if (!pmic_typec_port)
+ return -ENOMEM;
+
if (!res->nr_irqs || res->nr_irqs > PMIC_TYPEC_MAX_IRQS)
return -EINVAL;
@@ -522,6 +717,8 @@ int qcom_pmic_typec_port_probe(struct platform_device *pdev,
if (!irq_data)
return -ENOMEM;
+ mutex_init(&pmic_typec_port->vbus_lock);
+
pmic_typec_port->vdd_vbus = devm_regulator_get(dev, "vdd-vbus");
if (IS_ERR(pmic_typec_port->vdd_vbus))
return PTR_ERR(pmic_typec_port->vdd_vbus);
@@ -556,5 +753,56 @@ int qcom_pmic_typec_port_probe(struct platform_device *pdev,
return ret;
}
+ tcpm->pmic_typec_port = pmic_typec_port;
+
+ tcpm->tcpc.get_vbus = qcom_pmic_typec_port_get_vbus;
+ tcpm->tcpc.set_vbus = qcom_pmic_typec_port_set_vbus;
+ tcpm->tcpc.set_cc = qcom_pmic_typec_port_set_cc;
+ tcpm->tcpc.get_cc = qcom_pmic_typec_port_get_cc;
+ tcpm->tcpc.set_polarity = qcom_pmic_typec_port_set_polarity;
+ tcpm->tcpc.set_vconn = qcom_pmic_typec_port_set_vconn;
+ tcpm->tcpc.start_toggling = qcom_pmic_typec_port_start_toggling;
+
+ tcpm->port_start = qcom_pmic_typec_port_start;
+ tcpm->port_stop = qcom_pmic_typec_port_stop;
+
return 0;
}
+
+const struct pmic_typec_port_resources pm8150b_port_res = {
+ .irq_params = {
+ {
+ .irq_name = "vpd-detect",
+ .virq = PMIC_TYPEC_VPD_IRQ,
+ },
+
+ {
+ .irq_name = "cc-state-change",
+ .virq = PMIC_TYPEC_CC_STATE_IRQ,
+ },
+ {
+ .irq_name = "vconn-oc",
+ .virq = PMIC_TYPEC_VCONN_OC_IRQ,
+ },
+
+ {
+ .irq_name = "vbus-change",
+ .virq = PMIC_TYPEC_VBUS_IRQ,
+ },
+
+ {
+ .irq_name = "attach-detach",
+ .virq = PMIC_TYPEC_ATTACH_DETACH_IRQ,
+ },
+ {
+ .irq_name = "legacy-cable-detect",
+ .virq = PMIC_TYPEC_LEGACY_CABLE_IRQ,
+ },
+
+ {
+ .irq_name = "try-snk-src-detect",
+ .virq = PMIC_TYPEC_TRY_SNK_SRC_IRQ,
+ },
+ },
+ .nr_irqs = 7,
+};
diff --git a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h
index d4d358c680b6..2ca83a46cf3b 100644
--- a/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h
+++ b/drivers/usb/typec/tcpm/qcom/qcom_pmic_typec_port.h
@@ -3,149 +3,12 @@
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2023, Linaro Ltd. All rights reserved.
*/
-#ifndef __QCOM_PMIC_TYPEC_H__
-#define __QCOM_PMIC_TYPEC_H__
+#ifndef __QCOM_PMIC_TYPEC_PORT_H__
+#define __QCOM_PMIC_TYPEC_PORT_H__
#include <linux/platform_device.h>
#include <linux/usb/tcpm.h>
-#define TYPEC_SNK_STATUS_REG 0x06
-#define DETECTED_SNK_TYPE_MASK GENMASK(6, 0)
-#define SNK_DAM_MASK GENMASK(6, 4)
-#define SNK_DAM_500MA BIT(6)
-#define SNK_DAM_1500MA BIT(5)
-#define SNK_DAM_3000MA BIT(4)
-#define SNK_RP_STD BIT(3)
-#define SNK_RP_1P5 BIT(2)
-#define SNK_RP_3P0 BIT(1)
-#define SNK_RP_SHORT BIT(0)
-
-#define TYPEC_SRC_STATUS_REG 0x08
-#define DETECTED_SRC_TYPE_MASK GENMASK(4, 0)
-#define SRC_HIGH_BATT BIT(5)
-#define SRC_DEBUG_ACCESS BIT(4)
-#define SRC_RD_OPEN BIT(3)
-#define SRC_RD_RA_VCONN BIT(2)
-#define SRC_RA_OPEN BIT(1)
-#define AUDIO_ACCESS_RA_RA BIT(0)
-
-#define TYPEC_STATE_MACHINE_STATUS_REG 0x09
-#define TYPEC_ATTACH_DETACH_STATE BIT(5)
-
-#define TYPEC_SM_STATUS_REG 0x0A
-#define TYPEC_SM_VBUS_VSAFE5V BIT(5)
-#define TYPEC_SM_VBUS_VSAFE0V BIT(6)
-#define TYPEC_SM_USBIN_LT_LV BIT(7)
-
-#define TYPEC_MISC_STATUS_REG 0x0B
-#define TYPEC_WATER_DETECTION_STATUS BIT(7)
-#define SNK_SRC_MODE BIT(6)
-#define TYPEC_VBUS_DETECT BIT(5)
-#define TYPEC_VBUS_ERROR_STATUS BIT(4)
-#define TYPEC_DEBOUNCE_DONE BIT(3)
-#define CC_ORIENTATION BIT(1)
-#define CC_ATTACHED BIT(0)
-
-#define LEGACY_CABLE_STATUS_REG 0x0D
-#define TYPEC_LEGACY_CABLE_STATUS BIT(1)
-#define TYPEC_NONCOMP_LEGACY_CABLE_STATUS BIT(0)
-
-#define TYPEC_U_USB_STATUS_REG 0x0F
-#define U_USB_GROUND_NOVBUS BIT(6)
-#define U_USB_GROUND BIT(4)
-#define U_USB_FMB1 BIT(3)
-#define U_USB_FLOAT1 BIT(2)
-#define U_USB_FMB2 BIT(1)
-#define U_USB_FLOAT2 BIT(0)
-
-#define TYPEC_MODE_CFG_REG 0x44
-#define TYPEC_TRY_MODE_MASK GENMASK(4, 3)
-#define EN_TRY_SNK BIT(4)
-#define EN_TRY_SRC BIT(3)
-#define TYPEC_POWER_ROLE_CMD_MASK GENMASK(2, 0)
-#define EN_SRC_ONLY BIT(2)
-#define EN_SNK_ONLY BIT(1)
-#define TYPEC_DISABLE_CMD BIT(0)
-
-#define TYPEC_VCONN_CONTROL_REG 0x46
-#define VCONN_EN_ORIENTATION BIT(2)
-#define VCONN_EN_VALUE BIT(1)
-#define VCONN_EN_SRC BIT(0)
-
-#define TYPEC_CCOUT_CONTROL_REG 0x48
-#define TYPEC_CCOUT_BUFFER_EN BIT(2)
-#define TYPEC_CCOUT_VALUE BIT(1)
-#define TYPEC_CCOUT_SRC BIT(0)
-
-#define DEBUG_ACCESS_SRC_CFG_REG 0x4C
-#define EN_UNORIENTED_DEBUG_ACCESS_SRC BIT(0)
-
-#define TYPE_C_CRUDE_SENSOR_CFG_REG 0x4e
-#define EN_SRC_CRUDE_SENSOR BIT(1)
-#define EN_SNK_CRUDE_SENSOR BIT(0)
-
-#define TYPEC_EXIT_STATE_CFG_REG 0x50
-#define BYPASS_VSAFE0V_DURING_ROLE_SWAP BIT(3)
-#define SEL_SRC_UPPER_REF BIT(2)
-#define USE_TPD_FOR_EXITING_ATTACHSRC BIT(1)
-#define EXIT_SNK_BASED_ON_CC BIT(0)
-
-#define TYPEC_CURRSRC_CFG_REG 0x52
-#define TYPEC_SRC_RP_SEL_330UA BIT(1)
-#define TYPEC_SRC_RP_SEL_180UA BIT(0)
-#define TYPEC_SRC_RP_SEL_80UA 0
-#define TYPEC_SRC_RP_SEL_MASK GENMASK(1, 0)
-
-#define TYPEC_INTERRUPT_EN_CFG_1_REG 0x5E
-#define TYPEC_LEGACY_CABLE_INT_EN BIT(7)
-#define TYPEC_NONCOMPLIANT_LEGACY_CABLE_INT_EN BIT(6)
-#define TYPEC_TRYSOURCE_DETECT_INT_EN BIT(5)
-#define TYPEC_TRYSINK_DETECT_INT_EN BIT(4)
-#define TYPEC_CCOUT_DETACH_INT_EN BIT(3)
-#define TYPEC_CCOUT_ATTACH_INT_EN BIT(2)
-#define TYPEC_VBUS_DEASSERT_INT_EN BIT(1)
-#define TYPEC_VBUS_ASSERT_INT_EN BIT(0)
-
-#define TYPEC_INTERRUPT_EN_CFG_2_REG 0x60
-#define TYPEC_SRC_BATT_HPWR_INT_EN BIT(6)
-#define MICRO_USB_STATE_CHANGE_INT_EN BIT(5)
-#define TYPEC_STATE_MACHINE_CHANGE_INT_EN BIT(4)
-#define TYPEC_DEBUG_ACCESS_DETECT_INT_EN BIT(3)
-#define TYPEC_WATER_DETECTION_INT_EN BIT(2)
-#define TYPEC_VBUS_ERROR_INT_EN BIT(1)
-#define TYPEC_DEBOUNCE_DONE_INT_EN BIT(0)
-
-#define TYPEC_DEBOUNCE_OPTION_REG 0x62
-#define REDUCE_TCCDEBOUNCE_TO_2MS BIT(2)
-
-#define TYPE_C_SBU_CFG_REG 0x6A
-#define SEL_SBU1_ISRC_VAL 0x04
-#define SEL_SBU2_ISRC_VAL 0x01
-
-#define TYPEC_U_USB_CFG_REG 0x70
-#define EN_MICRO_USB_FACTORY_MODE BIT(1)
-#define EN_MICRO_USB_MODE BIT(0)
-
-#define TYPEC_PMI632_U_USB_WATER_PROTECTION_CFG_REG 0x72
-
-#define TYPEC_U_USB_WATER_PROTECTION_CFG_REG 0x73
-#define EN_MICRO_USB_WATER_PROTECTION BIT(4)
-#define MICRO_USB_DETECTION_ON_TIME_CFG_MASK GENMASK(3, 2)
-#define MICRO_USB_DETECTION_PERIOD_CFG_MASK GENMASK(1, 0)
-
-#define TYPEC_PMI632_MICRO_USB_MODE_REG 0x73
-#define MICRO_USB_MODE_ONLY BIT(0)
-
-/* Interrupt numbers */
-#define PMIC_TYPEC_OR_RID_IRQ 0x0
-#define PMIC_TYPEC_VPD_IRQ 0x1
-#define PMIC_TYPEC_CC_STATE_IRQ 0x2
-#define PMIC_TYPEC_VCONN_OC_IRQ 0x3
-#define PMIC_TYPEC_VBUS_IRQ 0x4
-#define PMIC_TYPEC_ATTACH_DETACH_IRQ 0x5
-#define PMIC_TYPEC_LEGACY_CABLE_IRQ 0x6
-#define PMIC_TYPEC_TRY_SNK_SRC_IRQ 0x7
-
/* Resources */
#define PMIC_TYPEC_MAX_IRQS 0x08
@@ -156,40 +19,17 @@ struct pmic_typec_port_irq_params {
struct pmic_typec_port_resources {
unsigned int nr_irqs;
- struct pmic_typec_port_irq_params irq_params[PMIC_TYPEC_MAX_IRQS];
+ const struct pmic_typec_port_irq_params irq_params[PMIC_TYPEC_MAX_IRQS];
};
/* API */
-struct pmic_typec;
-struct pmic_typec_port *qcom_pmic_typec_port_alloc(struct device *dev);
+extern const struct pmic_typec_port_resources pm8150b_port_res;
int qcom_pmic_typec_port_probe(struct platform_device *pdev,
- struct pmic_typec_port *pmic_typec_port,
- struct pmic_typec_port_resources *res,
+ struct pmic_typec *tcpm,
+ const struct pmic_typec_port_resources *res,
struct regmap *regmap,
u32 base);
-int qcom_pmic_typec_port_start(struct pmic_typec_port *pmic_typec_port,
- struct tcpm_port *tcpm_port);
-
-void qcom_pmic_typec_port_stop(struct pmic_typec_port *pmic_typec_port);
-
-int qcom_pmic_typec_port_get_cc(struct pmic_typec_port *pmic_typec_port,
- enum typec_cc_status *cc1,
- enum typec_cc_status *cc2);
-
-int qcom_pmic_typec_port_set_cc(struct pmic_typec_port *pmic_typec_port,
- enum typec_cc_status cc);
-
-int qcom_pmic_typec_port_get_vbus(struct pmic_typec_port *pmic_typec_port);
-
-int qcom_pmic_typec_port_set_vconn(struct pmic_typec_port *pmic_typec_port, bool on);
-
-int qcom_pmic_typec_port_start_toggling(struct pmic_typec_port *pmic_typec_port,
- enum typec_port_type port_type,
- enum typec_cc_status cc);
-
-int qcom_pmic_typec_port_set_vbus(struct pmic_typec_port *pmic_typec_port, bool on);
-
#endif /* __QCOM_PMIC_TYPE_C_PORT_H__ */
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index 0ee3e6e29bb1..c962014bba4e 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -445,8 +445,11 @@ static int tcpci_set_pd_rx(struct tcpc_dev *tcpc, bool enable)
unsigned int reg = 0;
int ret;
- if (enable)
+ if (enable) {
reg = TCPC_RX_DETECT_SOP | TCPC_RX_DETECT_HARD_RESET;
+ if (tcpci->data->cable_comm_capable)
+ reg |= TCPC_RX_DETECT_SOP1;
+ }
ret = regmap_write(tcpci->regmap, TCPC_RX_DETECT, reg);
if (ret < 0)
return ret;
@@ -584,6 +587,23 @@ static int tcpci_pd_transmit(struct tcpc_dev *tcpc, enum tcpm_transmit_type type
return 0;
}
+static bool tcpci_cable_comm_capable(struct tcpc_dev *tcpc)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+
+ return tcpci->data->cable_comm_capable;
+}
+
+static bool tcpci_attempt_vconn_swap_discovery(struct tcpc_dev *tcpc)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+
+ if (tcpci->data->attempt_vconn_swap_discovery)
+ return tcpci->data->attempt_vconn_swap_discovery(tcpci, tcpci->data);
+
+ return false;
+}
+
static int tcpci_init(struct tcpc_dev *tcpc)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
@@ -712,7 +732,7 @@ irqreturn_t tcpci_irq(struct tcpci *tcpci)
/* Read complete, clear RX status alert bit */
tcpci_write16(tcpci, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
- tcpm_pd_receive(tcpci->port, &msg);
+ tcpm_pd_receive(tcpci->port, &msg, TCPC_TX_SOP);
}
if (tcpci->data->vbus_vsafe0v && (status & TCPC_ALERT_EXTENDED_STATUS)) {
@@ -793,6 +813,8 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
tcpci->tcpc.enable_frs = tcpci_enable_frs;
tcpci->tcpc.frs_sourcing_vbus = tcpci_frs_sourcing_vbus;
tcpci->tcpc.set_partner_usb_comm_capable = tcpci_set_partner_usb_comm_capable;
+ tcpci->tcpc.cable_comm_capable = tcpci_cable_comm_capable;
+ tcpci->tcpc.attempt_vconn_swap_discovery = tcpci_attempt_vconn_swap_discovery;
if (tcpci->data->check_contaminant)
tcpci->tcpc.check_contaminant = tcpci_check_contaminant;
@@ -889,6 +911,7 @@ MODULE_DEVICE_TABLE(i2c, tcpci_id);
#ifdef CONFIG_OF
static const struct of_device_id tcpci_of_match[] = {
{ .compatible = "nxp,ptn5110", },
+ { .compatible = "tcpci", },
{},
};
MODULE_DEVICE_TABLE(of, tcpci_of_match);
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.h b/drivers/usb/typec/tcpm/tcpci_maxim.h
index 2c1c4d161b0d..78ff3b73ee7e 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.h
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.h
@@ -62,6 +62,7 @@ struct max_tcpci_chip {
struct i2c_client *client;
struct tcpm_port *port;
enum contamiant_state contaminant_state;
+ bool veto_vconn_swap;
};
static inline int max_tcpci_read16(struct max_tcpci_chip *chip, unsigned int reg, u16 *val)
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim_core.c b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
index 7fb966fd639b..eec3bcec119c 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim_core.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim_core.c
@@ -128,6 +128,7 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
u8 count, frame_type, rx_buf[TCPC_RECEIVE_BUFFER_LEN];
int ret, payload_index;
u8 *rx_buf_ptr;
+ enum tcpm_transmit_type rx_type;
/*
* READABLE_BYTE_COUNT: Indicates the number of bytes in the RX_BUF_BYTE_x registers
@@ -143,10 +144,23 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
count = rx_buf[TCPC_RECEIVE_BUFFER_COUNT_OFFSET];
frame_type = rx_buf[TCPC_RECEIVE_BUFFER_FRAME_TYPE_OFFSET];
- if (count == 0 || frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP) {
+ switch (frame_type) {
+ case TCPC_RX_BUF_FRAME_TYPE_SOP1:
+ rx_type = TCPC_TX_SOP_PRIME;
+ break;
+ case TCPC_RX_BUF_FRAME_TYPE_SOP:
+ rx_type = TCPC_TX_SOP;
+ break;
+ default:
+ rx_type = TCPC_TX_SOP;
+ break;
+ }
+
+ if (count == 0 || (frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP &&
+ frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP1)) {
max_tcpci_write16(chip, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
dev_err(chip->dev, "%s\n", count == 0 ? "error: count is 0" :
- "error frame_type is not SOP");
+ "error frame_type is not SOP/SOP'");
return;
}
@@ -183,7 +197,7 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
if (ret < 0)
return;
- tcpm_pd_receive(chip->port, &msg);
+ tcpm_pd_receive(chip->port, &msg, rx_type);
}
static int max_tcpci_set_vbus(struct tcpci *tcpci, struct tcpci_data *tdata, bool source, bool sink)
@@ -309,8 +323,10 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
if (ret < 0)
return ret;
- if (reg_status & TCPC_FAULT_STATUS_VCONN_OC)
+ if (reg_status & TCPC_FAULT_STATUS_VCONN_OC) {
+ chip->veto_vconn_swap = true;
tcpm_port_error_recovery(chip->port);
+ }
}
if (status & TCPC_ALERT_EXTND) {
@@ -444,6 +460,18 @@ static void max_tcpci_check_contaminant(struct tcpci *tcpci, struct tcpci_data *
tcpm_port_clean(chip->port);
}
+static bool max_tcpci_attempt_vconn_swap_discovery(struct tcpci *tcpci, struct tcpci_data *tdata)
+{
+ struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata);
+
+ if (chip->veto_vconn_swap) {
+ chip->veto_vconn_swap = false;
+ return false;
+ }
+
+ return true;
+}
+
static int max_tcpci_probe(struct i2c_client *client)
{
int ret;
@@ -478,6 +506,8 @@ static int max_tcpci_probe(struct i2c_client *client)
chip->data.vbus_vsafe0v = true;
chip->data.set_partner_usb_comm_capable = max_tcpci_set_partner_usb_comm_capable;
chip->data.check_contaminant = max_tcpci_check_contaminant;
+ chip->data.cable_comm_capable = true;
+ chip->data.attempt_vconn_swap_discovery = max_tcpci_attempt_vconn_swap_discovery;
max_tcpci_init_regs(chip);
chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 096597231027..ae2b6c94482d 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -108,6 +108,7 @@
S(VCONN_SWAP_WAIT_FOR_VCONN), \
S(VCONN_SWAP_TURN_ON_VCONN), \
S(VCONN_SWAP_TURN_OFF_VCONN), \
+ S(VCONN_SWAP_SEND_SOFT_RESET), \
\
S(FR_SWAP_SEND), \
S(FR_SWAP_SEND_TIMEOUT), \
@@ -145,7 +146,9 @@
S(PORT_RESET_WAIT_OFF), \
\
S(AMS_START), \
- S(CHUNK_NOT_SUPP)
+ S(CHUNK_NOT_SUPP), \
+ \
+ S(SRC_VDM_IDENTITY_REQUEST)
#define FOREACH_AMS(S) \
S(NONE_AMS), \
@@ -327,6 +330,12 @@ struct tcpm_port {
struct typec_partner_desc partner_desc;
struct typec_partner *partner;
+ struct usb_pd_identity cable_ident;
+ struct typec_cable_desc cable_desc;
+ struct typec_cable *cable;
+ struct typec_plug_desc plug_prime_desc;
+ struct typec_plug *plug_prime;
+
enum typec_cc_status cc_req;
enum typec_cc_status src_rp; /* work only if pd_supported == false */
@@ -468,7 +477,9 @@ struct tcpm_port {
/* Alternate mode data */
struct pd_mode_data mode_data;
+ struct pd_mode_data mode_data_prime;
struct typec_altmode *partner_altmode[ALTMODE_DISCOVERY_MAX];
+ struct typec_altmode *plug_prime_altmode[ALTMODE_DISCOVERY_MAX];
struct typec_altmode *port_altmode[ALTMODE_DISCOVERY_MAX];
/* Deadline in jiffies to exit src_try_wait state */
@@ -505,6 +516,41 @@ struct tcpm_port {
* transitions.
*/
bool potential_contaminant;
+
+ /* SOP* Related Fields */
+ /*
+ * Flag to determine if SOP' Discover Identity is available. The flag
+ * is set if Discover Identity on SOP' does not immediately follow
+ * Discover Identity on SOP.
+ */
+ bool send_discover_prime;
+ /*
+ * tx_sop_type determines which SOP* a message is being sent on.
+ * For messages that are queued and not sent immediately such as in
+ * tcpm_queue_message or messages that send after state changes,
+ * the tx_sop_type is set accordingly.
+ */
+ enum tcpm_transmit_type tx_sop_type;
+ /*
+ * Prior to discovering the port partner's Specification Revision, the
+ * Vconn source and cable plug will use the lower of their two revisions.
+ *
+ * When the port partner's Specification Revision is discovered, the following
+ * rules are put in place.
+ * 1. If the cable revision (1) is lower than the revision negotiated
+ * between the port and partner (2), the port and partner will communicate
+ * on revision (2), but the port and cable will communicate on revision (1).
+ * 2. If the cable revision (1) is higher than the revision negotiated
+ * between the port and partner (2), the port and partner will communicate
+ * on revision (2), and the port and cable will communicate on revision (2)
+ * as well.
+ */
+ unsigned int negotiated_rev_prime;
+ /*
+ * Each SOP* type must maintain their own tx and rx message IDs
+ */
+ unsigned int message_id_prime;
+ unsigned int rx_msgid_prime;
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct mutex logbuffer_lock; /* log buffer access lock */
@@ -518,6 +564,7 @@ struct pd_rx_event {
struct kthread_work work;
struct tcpm_port *port;
struct pd_message msg;
+ enum tcpm_transmit_type rx_sop_type;
};
static const char * const pd_rev[] = {
@@ -893,19 +940,30 @@ static void tcpm_ams_finish(struct tcpm_port *port)
}
static int tcpm_pd_transmit(struct tcpm_port *port,
- enum tcpm_transmit_type type,
+ enum tcpm_transmit_type tx_sop_type,
const struct pd_message *msg)
{
unsigned long timeout;
int ret;
+ unsigned int negotiated_rev;
+
+ switch (tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ negotiated_rev = port->negotiated_rev_prime;
+ break;
+ case TCPC_TX_SOP:
+ default:
+ negotiated_rev = port->negotiated_rev;
+ break;
+ }
if (msg)
tcpm_log(port, "PD TX, header: %#x", le16_to_cpu(msg->header));
else
- tcpm_log(port, "PD TX, type: %#x", type);
+ tcpm_log(port, "PD TX, type: %#x", tx_sop_type);
reinit_completion(&port->tx_complete);
- ret = port->tcpc->pd_transmit(port->tcpc, type, msg, port->negotiated_rev);
+ ret = port->tcpc->pd_transmit(port->tcpc, tx_sop_type, msg, negotiated_rev);
if (ret < 0)
return ret;
@@ -918,7 +976,17 @@ static int tcpm_pd_transmit(struct tcpm_port *port,
switch (port->tx_status) {
case TCPC_TX_SUCCESS:
- port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
+ switch (tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ port->message_id_prime = (port->message_id_prime + 1) &
+ PD_HEADER_ID_MASK;
+ break;
+ case TCPC_TX_SOP:
+ default:
+ port->message_id = (port->message_id + 1) &
+ PD_HEADER_ID_MASK;
+ break;
+ }
/*
* USB PD rev 2.0, 8.3.2.2.1:
* USB PD rev 3.0, 8.3.2.1.3:
@@ -1099,6 +1167,12 @@ static int tcpm_set_roles(struct tcpm_port *port, bool attached,
if (ret < 0)
return ret;
+ if (port->tcpc->set_orientation) {
+ ret = port->tcpc->set_orientation(port->tcpc, orientation);
+ if (ret < 0)
+ return ret;
+ }
+
port->pwr_role = role;
port->data_role = data;
typec_set_data_role(port->typec_port, data);
@@ -1456,7 +1530,7 @@ static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
* VDM/VDO handling functions
*/
static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
- const u32 *data, int cnt)
+ const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
{
u32 vdo_hdr = port->vdo_data[0];
@@ -1464,7 +1538,10 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
/* If is sending discover_identity, handle received message first */
if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMD(vdo_hdr) == CMD_DISCOVER_IDENT) {
- port->send_discover = true;
+ if (tx_sop_type == TCPC_TX_SOP_PRIME)
+ port->send_discover_prime = true;
+ else
+ port->send_discover = true;
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
} else {
/* Make sure we are not still processing a previous VDM packet */
@@ -1479,14 +1556,16 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header,
port->vdm_state = VDM_STATE_READY;
port->vdm_sm_running = true;
+ port->tx_sop_type = tx_sop_type;
+
mod_vdm_delayed_work(port, 0);
}
static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header,
- const u32 *data, int cnt)
+ const u32 *data, int cnt, enum tcpm_transmit_type tx_sop_type)
{
mutex_lock(&port->lock);
- tcpm_queue_vdm(port, header, data, cnt);
+ tcpm_queue_vdm(port, header, data, cnt, TCPC_TX_SOP);
mutex_unlock(&port->lock);
}
@@ -1508,9 +1587,68 @@ static void svdm_consume_identity(struct tcpm_port *port, const u32 *p, int cnt)
PD_PRODUCT_PID(product), product & 0xffff);
}
-static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt)
+static void svdm_consume_identity_sop_prime(struct tcpm_port *port, const u32 *p, int cnt)
{
- struct pd_mode_data *pmdata = &port->mode_data;
+ u32 idh = p[VDO_INDEX_IDH];
+ u32 product = p[VDO_INDEX_PRODUCT];
+ int svdm_version;
+
+ /*
+ * Attempt to consume identity only if cable currently is not set
+ */
+ if (!IS_ERR_OR_NULL(port->cable))
+ goto register_plug;
+
+ /* Reset cable identity */
+ memset(&port->cable_ident, 0, sizeof(port->cable_ident));
+
+ /* Fill out id header, cert, product, cable VDO 1 */
+ port->cable_ident.id_header = idh;
+ port->cable_ident.cert_stat = p[VDO_INDEX_CSTAT];
+ port->cable_ident.product = product;
+ port->cable_ident.vdo[0] = p[VDO_INDEX_CABLE_1];
+
+ /* Fill out cable desc, infer svdm_version from pd revision */
+ port->cable_desc.type = (enum typec_plug_type) (VDO_TYPEC_CABLE_TYPE(p[VDO_INDEX_CABLE_1]) +
+ USB_PLUG_TYPE_A);
+ port->cable_desc.active = PD_IDH_PTYPE(idh) == IDH_PTYPE_ACABLE ? 1 : 0;
+ /* Log PD Revision and additional cable VDO from negotiated revision */
+ switch (port->negotiated_rev_prime) {
+ case PD_REV30:
+ port->cable_desc.pd_revision = 0x0300;
+ if (port->cable_desc.active)
+ port->cable_ident.vdo[1] = p[VDO_INDEX_CABLE_2];
+ break;
+ case PD_REV20:
+ port->cable_desc.pd_revision = 0x0200;
+ break;
+ default:
+ port->cable_desc.pd_revision = 0x0200;
+ break;
+ }
+ port->cable_desc.identity = &port->cable_ident;
+ /* Register Cable, set identity and svdm_version */
+ port->cable = typec_register_cable(port->typec_port, &port->cable_desc);
+ if (IS_ERR_OR_NULL(port->cable))
+ return;
+ typec_cable_set_identity(port->cable);
+ /* Get SVDM version */
+ svdm_version = PD_VDO_SVDM_VER(p[VDO_INDEX_HDR]);
+ typec_cable_set_svdm_version(port->cable, svdm_version);
+
+register_plug:
+ if (IS_ERR_OR_NULL(port->plug_prime)) {
+ port->plug_prime_desc.index = TYPEC_PLUG_SOP_P;
+ port->plug_prime = typec_register_plug(port->cable,
+ &port->plug_prime_desc);
+ }
+}
+
+static bool svdm_consume_svids(struct tcpm_port *port, const u32 *p, int cnt,
+ enum tcpm_transmit_type rx_sop_type)
+{
+ struct pd_mode_data *pmdata = rx_sop_type == TCPC_TX_SOP_PRIME ?
+ &port->mode_data_prime : &port->mode_data;
int i;
for (i = 1; i < cnt; i++) {
@@ -1556,14 +1694,29 @@ abort:
return false;
}
-static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt)
+static void svdm_consume_modes(struct tcpm_port *port, const u32 *p, int cnt,
+ enum tcpm_transmit_type rx_sop_type)
{
struct pd_mode_data *pmdata = &port->mode_data;
struct typec_altmode_desc *paltmode;
int i;
- if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
- /* Already logged in svdm_consume_svids() */
+ switch (rx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ pmdata = &port->mode_data_prime;
+ if (pmdata->altmodes >= ARRAY_SIZE(port->plug_prime_altmode)) {
+ /* Already logged in svdm_consume_svids() */
+ return;
+ }
+ break;
+ case TCPC_TX_SOP:
+ pmdata = &port->mode_data;
+ if (pmdata->altmodes >= ARRAY_SIZE(port->partner_altmode)) {
+ /* Already logged in svdm_consume_svids() */
+ return;
+ }
+ break;
+ default:
return;
}
@@ -1601,20 +1754,129 @@ static void tcpm_register_partner_altmodes(struct tcpm_port *port)
}
}
+static void tcpm_register_plug_altmodes(struct tcpm_port *port)
+{
+ struct pd_mode_data *modep = &port->mode_data_prime;
+ struct typec_altmode *altmode;
+ int i;
+
+ typec_plug_set_num_altmodes(port->plug_prime, modep->altmodes);
+
+ for (i = 0; i < modep->altmodes; i++) {
+ altmode = typec_plug_register_altmode(port->plug_prime,
+ &modep->altmode_desc[i]);
+ if (IS_ERR(altmode)) {
+ tcpm_log(port, "Failed to register plug SVID 0x%04x",
+ modep->altmode_desc[i].svid);
+ altmode = NULL;
+ }
+ port->plug_prime_altmode[i] = altmode;
+ }
+}
+
#define supports_modal(port) PD_IDH_MODAL_SUPP((port)->partner_ident.id_header)
+#define supports_modal_cable(port) PD_IDH_MODAL_SUPP((port)->cable_ident.id_header)
+#define supports_host(port) PD_IDH_HOST_SUPP((port->partner_ident.id_header))
+
+/*
+ * Helper to determine whether the port is capable of SOP' communication at the
+ * current point in time.
+ */
+static bool tcpm_can_communicate_sop_prime(struct tcpm_port *port)
+{
+ /* Check to see if tcpc supports SOP' communication */
+ if (!port->tcpc->cable_comm_capable || !port->tcpc->cable_comm_capable(port->tcpc))
+ return false;
+ /*
+ * Power Delivery 2.0 Section 6.3.11
+ * Before communicating with a Cable Plug a Port Should ensure that it
+ * is the Vconn Source and that the Cable Plugs are powered by
+ * performing a Vconn swap if necessary. Since it cannot be guaranteed
+ * that the present Vconn Source is supplying Vconn, the only means to
+ * ensure that the Cable Plugs are powered is for a Port wishing to
+ * communicate with a Cable Plug is to become the Vconn Source.
+ *
+ * Power Delivery 3.0 Section 6.3.11
+ * Before communicating with a Cable Plug a Port Shall ensure that it
+ * is the Vconn source.
+ */
+ if (port->vconn_role != TYPEC_SOURCE)
+ return false;
+ /*
+ * Power Delivery 2.0 Section 2.4.4
+ * When no Contract or an Implicit Contract is in place the Source can
+ * communicate with a Cable Plug using SOP' packets in order to discover
+ * its characteristics.
+ *
+ * Power Delivery 3.0 Section 2.4.4
+ * When no Contract or an Implicit Contract is in place only the Source
+ * port that is supplying Vconn is allowed to send packets to a Cable
+ * Plug and is allowed to respond to packets from the Cable Plug.
+ */
+ if (!port->explicit_contract)
+ return port->pwr_role == TYPEC_SOURCE;
+ if (port->negotiated_rev == PD_REV30)
+ return true;
+ /*
+ * Power Delivery 2.0 Section 2.4.4
+ *
+ * When an Explicit Contract is in place the DFP (either the Source or
+ * the Sink) can communicate with the Cable Plug(s) using SOP’/SOP”
+ * Packets (see Figure 2-3).
+ */
+ if (port->negotiated_rev == PD_REV20)
+ return port->data_role == TYPEC_HOST;
+ return false;
+}
+
+static bool tcpm_attempt_vconn_swap_discovery(struct tcpm_port *port)
+{
+ if (!port->tcpc->attempt_vconn_swap_discovery)
+ return false;
+
+ /* Port is already source, no need to perform swap */
+ if (port->vconn_role == TYPEC_SOURCE)
+ return false;
+
+ /*
+ * Partner needs to support Alternate Modes with modal support. If
+ * partner is also capable of being a USB Host, it could be a device
+ * that supports Alternate Modes as the DFP.
+ */
+ if (!supports_modal(port) || supports_host(port))
+ return false;
+
+ if ((port->negotiated_rev == PD_REV20 && port->data_role == TYPEC_HOST) ||
+ port->negotiated_rev == PD_REV30)
+ return port->tcpc->attempt_vconn_swap_discovery(port->tcpc);
+
+ return false;
+}
+
+
+static bool tcpm_cable_vdm_supported(struct tcpm_port *port)
+{
+ return !IS_ERR_OR_NULL(port->cable) &&
+ typec_cable_is_active(port->cable) &&
+ supports_modal_cable(port) &&
+ tcpm_can_communicate_sop_prime(port);
+}
static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
const u32 *p, int cnt, u32 *response,
- enum adev_actions *adev_action)
+ enum adev_actions *adev_action,
+ enum tcpm_transmit_type rx_sop_type,
+ enum tcpm_transmit_type *response_tx_sop_type)
{
struct typec_port *typec = port->typec_port;
- struct typec_altmode *pdev;
- struct pd_mode_data *modep;
+ struct typec_altmode *pdev, *pdev_prime;
+ struct pd_mode_data *modep, *modep_prime;
int svdm_version;
int rlen = 0;
int cmd_type;
int cmd;
int i;
+ int ret;
cmd_type = PD_VDO_CMDT(p[0]);
cmd = PD_VDO_CMD(p[0]);
@@ -1622,17 +1884,54 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
tcpm_log(port, "Rx VDM cmd 0x%x type %d cmd %d len %d",
p[0], cmd_type, cmd, cnt);
- modep = &port->mode_data;
-
- pdev = typec_match_altmode(port->partner_altmode, ALTMODE_DISCOVERY_MAX,
- PD_VDO_VID(p[0]), PD_VDO_OPOS(p[0]));
-
- svdm_version = typec_get_negotiated_svdm_version(typec);
- if (svdm_version < 0)
- return 0;
+ switch (rx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ modep_prime = &port->mode_data_prime;
+ pdev_prime = typec_match_altmode(port->plug_prime_altmode,
+ ALTMODE_DISCOVERY_MAX,
+ PD_VDO_VID(p[0]),
+ PD_VDO_OPOS(p[0]));
+ svdm_version = typec_get_cable_svdm_version(typec);
+ /*
+ * Update SVDM version if cable was discovered before port partner.
+ */
+ if (!IS_ERR_OR_NULL(port->cable) &&
+ PD_VDO_SVDM_VER(p[0]) < svdm_version)
+ typec_cable_set_svdm_version(port->cable, svdm_version);
+ break;
+ case TCPC_TX_SOP:
+ modep = &port->mode_data;
+ pdev = typec_match_altmode(port->partner_altmode,
+ ALTMODE_DISCOVERY_MAX,
+ PD_VDO_VID(p[0]),
+ PD_VDO_OPOS(p[0]));
+ svdm_version = typec_get_negotiated_svdm_version(typec);
+ if (svdm_version < 0)
+ return 0;
+ break;
+ default:
+ modep = &port->mode_data;
+ pdev = typec_match_altmode(port->partner_altmode,
+ ALTMODE_DISCOVERY_MAX,
+ PD_VDO_VID(p[0]),
+ PD_VDO_OPOS(p[0]));
+ svdm_version = typec_get_negotiated_svdm_version(typec);
+ if (svdm_version < 0)
+ return 0;
+ break;
+ }
switch (cmd_type) {
case CMDT_INIT:
+ /*
+ * Only the port or port partner is allowed to initialize SVDM
+ * commands over SOP'. In case the port partner initializes a
+ * sequence when it is not allowed to send SOP' messages, drop
+ * the message should the TCPM port try to process it.
+ */
+ if (rx_sop_type == TCPC_TX_SOP_PRIME)
+ return 0;
+
switch (cmd) {
case CMD_DISCOVER_IDENT:
if (PD_VDO_VID(p[0]) != USB_SID_PD)
@@ -1699,55 +1998,186 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
(VDO_SVDM_VERS(typec_get_negotiated_svdm_version(typec)));
break;
case CMDT_RSP_ACK:
- /* silently drop message if we are not connected */
- if (IS_ERR_OR_NULL(port->partner))
+ /*
+ * Silently drop message if we are not connected, but can process
+ * if SOP' Discover Identity prior to explicit contract.
+ */
+ if (IS_ERR_OR_NULL(port->partner) &&
+ !(rx_sop_type == TCPC_TX_SOP_PRIME && cmd == CMD_DISCOVER_IDENT))
break;
tcpm_ams_finish(port);
switch (cmd) {
+ /*
+ * SVDM Command Flow for SOP and SOP':
+ * SOP Discover Identity
+ * SOP' Discover Identity
+ * SOP Discover SVIDs
+ * Discover Modes
+ * (Active Cables)
+ * SOP' Discover SVIDs
+ * Discover Modes
+ *
+ * Perform Discover SOP' if the port can communicate with cable
+ * plug.
+ */
case CMD_DISCOVER_IDENT:
- if (PD_VDO_SVDM_VER(p[0]) < svdm_version)
- typec_partner_set_svdm_version(port->partner,
- PD_VDO_SVDM_VER(p[0]));
- /* 6.4.4.3.1 */
- svdm_consume_identity(port, p, cnt);
- response[0] = VDO(USB_SID_PD, 1, typec_get_negotiated_svdm_version(typec),
- CMD_DISCOVER_SVID);
- rlen = 1;
+ switch (rx_sop_type) {
+ case TCPC_TX_SOP:
+ if (PD_VDO_SVDM_VER(p[0]) < svdm_version) {
+ typec_partner_set_svdm_version(port->partner,
+ PD_VDO_SVDM_VER(p[0]));
+ /* If cable is discovered before partner, downgrade svdm */
+ if (!IS_ERR_OR_NULL(port->cable) &&
+ (typec_get_cable_svdm_version(port->typec_port) >
+ svdm_version))
+ typec_cable_set_svdm_version(port->cable,
+ svdm_version);
+ }
+ /* 6.4.4.3.1 */
+ svdm_consume_identity(port, p, cnt);
+ /* Attempt Vconn swap, delay SOP' discovery if necessary */
+ if (tcpm_attempt_vconn_swap_discovery(port)) {
+ port->send_discover_prime = true;
+ port->upcoming_state = VCONN_SWAP_SEND;
+ ret = tcpm_ams_start(port, VCONN_SWAP);
+ if (!ret)
+ return 0;
+ /* Cannot perform Vconn swap */
+ port->upcoming_state = INVALID_STATE;
+ port->send_discover_prime = false;
+ }
+
+ /*
+ * Attempt Discover Identity on SOP' if the
+ * cable was not discovered previously, and use
+ * the SVDM version of the partner to probe.
+ */
+ if (IS_ERR_OR_NULL(port->cable) &&
+ tcpm_can_communicate_sop_prime(port)) {
+ *response_tx_sop_type = TCPC_TX_SOP_PRIME;
+ port->send_discover_prime = true;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_negotiated_svdm_version(typec),
+ CMD_DISCOVER_IDENT);
+ rlen = 1;
+ } else {
+ *response_tx_sop_type = TCPC_TX_SOP;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_negotiated_svdm_version(typec),
+ CMD_DISCOVER_SVID);
+ rlen = 1;
+ }
+ break;
+ case TCPC_TX_SOP_PRIME:
+ /*
+ * svdm_consume_identity_sop_prime will determine
+ * the svdm_version for the cable moving forward.
+ */
+ svdm_consume_identity_sop_prime(port, p, cnt);
+
+ /*
+ * If received in SRC_VDM_IDENTITY_REQUEST, continue
+ * to SRC_SEND_CAPABILITIES
+ */
+ if (port->state == SRC_VDM_IDENTITY_REQUEST) {
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ return 0;
+ }
+
+ *response_tx_sop_type = TCPC_TX_SOP;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_negotiated_svdm_version(typec),
+ CMD_DISCOVER_SVID);
+ rlen = 1;
+ break;
+ default:
+ return 0;
+ }
break;
case CMD_DISCOVER_SVID:
+ *response_tx_sop_type = rx_sop_type;
/* 6.4.4.3.2 */
- if (svdm_consume_svids(port, p, cnt)) {
+ if (svdm_consume_svids(port, p, cnt, rx_sop_type)) {
response[0] = VDO(USB_SID_PD, 1, svdm_version, CMD_DISCOVER_SVID);
rlen = 1;
- } else if (modep->nsvids && supports_modal(port)) {
- response[0] = VDO(modep->svids[0], 1, svdm_version,
- CMD_DISCOVER_MODES);
- rlen = 1;
+ } else {
+ if (rx_sop_type == TCPC_TX_SOP) {
+ if (modep->nsvids && supports_modal(port)) {
+ response[0] = VDO(modep->svids[0], 1, svdm_version,
+ CMD_DISCOVER_MODES);
+ rlen = 1;
+ }
+ } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
+ if (modep_prime->nsvids) {
+ response[0] = VDO(modep_prime->svids[0], 1,
+ svdm_version, CMD_DISCOVER_MODES);
+ rlen = 1;
+ }
+ }
}
break;
case CMD_DISCOVER_MODES:
- /* 6.4.4.3.3 */
- svdm_consume_modes(port, p, cnt);
- modep->svid_index++;
- if (modep->svid_index < modep->nsvids) {
- u16 svid = modep->svids[modep->svid_index];
- response[0] = VDO(svid, 1, svdm_version, CMD_DISCOVER_MODES);
- rlen = 1;
- } else {
- tcpm_register_partner_altmodes(port);
+ if (rx_sop_type == TCPC_TX_SOP) {
+ /* 6.4.4.3.3 */
+ svdm_consume_modes(port, p, cnt, rx_sop_type);
+ modep->svid_index++;
+ if (modep->svid_index < modep->nsvids) {
+ u16 svid = modep->svids[modep->svid_index];
+ *response_tx_sop_type = TCPC_TX_SOP;
+ response[0] = VDO(svid, 1, svdm_version,
+ CMD_DISCOVER_MODES);
+ rlen = 1;
+ } else if (tcpm_cable_vdm_supported(port)) {
+ *response_tx_sop_type = TCPC_TX_SOP_PRIME;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_cable_svdm_version(typec),
+ CMD_DISCOVER_SVID);
+ rlen = 1;
+ } else {
+ tcpm_register_partner_altmodes(port);
+ }
+ } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
+ /* 6.4.4.3.3 */
+ svdm_consume_modes(port, p, cnt, rx_sop_type);
+ modep_prime->svid_index++;
+ if (modep_prime->svid_index < modep_prime->nsvids) {
+ u16 svid = modep_prime->svids[modep_prime->svid_index];
+ *response_tx_sop_type = TCPC_TX_SOP_PRIME;
+ response[0] = VDO(svid, 1,
+ typec_get_cable_svdm_version(typec),
+ CMD_DISCOVER_MODES);
+ rlen = 1;
+ } else {
+ tcpm_register_plug_altmodes(port);
+ tcpm_register_partner_altmodes(port);
+ }
}
break;
case CMD_ENTER_MODE:
- if (adev && pdev)
- *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
+ *response_tx_sop_type = rx_sop_type;
+ if (rx_sop_type == TCPC_TX_SOP) {
+ if (adev && pdev) {
+ typec_altmode_update_active(pdev, true);
+ *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
+ }
+ } else if (rx_sop_type == TCPC_TX_SOP_PRIME) {
+ if (adev && pdev_prime) {
+ typec_altmode_update_active(pdev_prime, true);
+ *adev_action = ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL;
+ }
+ }
return 0;
case CMD_EXIT_MODE:
- if (adev && pdev) {
- /* Back to USB Operation */
- *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
- return 0;
+ *response_tx_sop_type = rx_sop_type;
+ if (rx_sop_type == TCPC_TX_SOP) {
+ if (adev && pdev) {
+ typec_altmode_update_active(pdev, false);
+ /* Back to USB Operation */
+ *adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
+ return 0;
+ }
}
break;
case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
@@ -1800,13 +2230,15 @@ static void tcpm_pd_handle_msg(struct tcpm_port *port,
enum tcpm_ams ams);
static void tcpm_handle_vdm_request(struct tcpm_port *port,
- const __le32 *payload, int cnt)
+ const __le32 *payload, int cnt,
+ enum tcpm_transmit_type rx_sop_type)
{
enum adev_actions adev_action = ADEV_NONE;
struct typec_altmode *adev;
u32 p[PD_MAX_PAYLOAD];
u32 response[8] = { };
int i, rlen = 0;
+ enum tcpm_transmit_type response_tx_sop_type = TCPC_TX_SOP;
for (i = 0; i < cnt; i++)
p[i] = le32_to_cpu(payload[i]);
@@ -1841,7 +2273,8 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
* - We will send NAK and the flag will be cleared in the state machine.
*/
port->vdm_sm_running = true;
- rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
+ rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action,
+ rx_sop_type, &response_tx_sop_type);
} else {
if (port->negotiated_rev >= PD_REV30)
tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
@@ -1877,19 +2310,37 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
typec_altmode_vdm(adev, p[0], &p[1], cnt);
break;
case ADEV_QUEUE_VDM:
- typec_altmode_vdm(adev, p[0], &p[1], cnt);
+ if (response_tx_sop_type == TCPC_TX_SOP_PRIME)
+ typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P, p[0], &p[1], cnt);
+ else
+ typec_altmode_vdm(adev, p[0], &p[1], cnt);
break;
case ADEV_QUEUE_VDM_SEND_EXIT_MODE_ON_FAIL:
- if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
- int svdm_version = typec_get_negotiated_svdm_version(
- port->typec_port);
- if (svdm_version < 0)
- break;
-
- response[0] = VDO(adev->svid, 1, svdm_version,
- CMD_EXIT_MODE);
- response[0] |= VDO_OPOS(adev->mode);
- rlen = 1;
+ if (response_tx_sop_type == TCPC_TX_SOP_PRIME) {
+ if (typec_cable_altmode_vdm(adev, TYPEC_PLUG_SOP_P,
+ p[0], &p[1], cnt)) {
+ int svdm_version = typec_get_cable_svdm_version(
+ port->typec_port);
+ if (svdm_version < 0)
+ break;
+
+ response[0] = VDO(adev->svid, 1, svdm_version,
+ CMD_EXIT_MODE);
+ response[0] |= VDO_OPOS(adev->mode);
+ rlen = 1;
+ }
+ } else {
+ if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
+ int svdm_version = typec_get_negotiated_svdm_version(
+ port->typec_port);
+ if (svdm_version < 0)
+ break;
+
+ response[0] = VDO(adev->svid, 1, svdm_version,
+ CMD_EXIT_MODE);
+ response[0] |= VDO_OPOS(adev->mode);
+ rlen = 1;
+ }
}
break;
case ADEV_ATTENTION:
@@ -1909,19 +2360,38 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
mutex_lock(&port->lock);
if (rlen > 0)
- tcpm_queue_vdm(port, response[0], &response[1], rlen - 1);
+ tcpm_queue_vdm(port, response[0], &response[1], rlen - 1, response_tx_sop_type);
else
port->vdm_sm_running = false;
}
static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
- const u32 *data, int count)
+ const u32 *data, int count, enum tcpm_transmit_type tx_sop_type)
{
- int svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
+ int svdm_version;
u32 header;
- if (svdm_version < 0)
- return;
+ switch (tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ /*
+ * If the port partner is discovered, then the port partner's
+ * SVDM Version will be returned
+ */
+ svdm_version = typec_get_cable_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ svdm_version = SVDM_VER_MAX;
+ break;
+ case TCPC_TX_SOP:
+ svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return;
+ break;
+ default:
+ svdm_version = typec_get_negotiated_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return;
+ break;
+ }
if (WARN_ON(count > VDO_MAX_SIZE - 1))
count = VDO_MAX_SIZE - 1;
@@ -1930,7 +2400,7 @@ static void tcpm_send_vdm(struct tcpm_port *port, u32 vid, int cmd,
header = VDO(vid, ((vid & USB_SID_PD) == USB_SID_PD) ?
1 : (PD_VDO_CMD(cmd) <= CMD_ATTENTION),
svdm_version, cmd);
- tcpm_queue_vdm(port, header, data, count);
+ tcpm_queue_vdm(port, header, data, count, tx_sop_type);
}
static unsigned int vdm_ready_timeout(u32 vdm_hdr)
@@ -1964,6 +2434,7 @@ static void vdm_run_state_machine(struct tcpm_port *port)
struct pd_message msg;
int i, res = 0;
u32 vdo_hdr = port->vdo_data[0];
+ u32 response[8] = { };
switch (port->vdm_state) {
case VDM_STATE_READY:
@@ -1977,7 +2448,8 @@ static void vdm_run_state_machine(struct tcpm_port *port)
* if there's traffic or we're not in PDO ready state don't send
* a VDM.
*/
- if (port->state != SRC_READY && port->state != SNK_READY) {
+ if (port->state != SRC_READY && port->state != SNK_READY &&
+ port->state != SRC_VDM_IDENTITY_REQUEST) {
port->vdm_sm_running = false;
break;
}
@@ -1988,7 +2460,17 @@ static void vdm_run_state_machine(struct tcpm_port *port)
case CMD_DISCOVER_IDENT:
res = tcpm_ams_start(port, DISCOVER_IDENTITY);
if (res == 0) {
- port->send_discover = false;
+ switch (port->tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ port->send_discover_prime = false;
+ break;
+ case TCPC_TX_SOP:
+ port->send_discover = false;
+ break;
+ default:
+ port->send_discover = false;
+ break;
+ }
} else if (res == -EAGAIN) {
port->vdo_data[0] = 0;
mod_send_discover_delayed_work(port,
@@ -2044,12 +2526,21 @@ static void vdm_run_state_machine(struct tcpm_port *port)
break;
case VDM_STATE_ERR_SEND:
/*
+ * When sending Discover Identity to SOP' before establishing an
+ * explicit contract, do not retry. Instead, weave sending
+ * Source_Capabilities over SOP and Discover Identity over SOP'.
+ */
+ if (port->state == SRC_VDM_IDENTITY_REQUEST) {
+ tcpm_ams_finish(port);
+ port->vdm_state = VDM_STATE_DONE;
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ /*
* A partner which does not support USB PD will not reply,
* so this is not a fatal error. At the same time, some
* devices may not return GoodCRC under some circumstances,
* so we need to retry.
*/
- if (port->vdm_retries < 3) {
+ } else if (port->vdm_retries < 3) {
tcpm_log(port, "VDM Tx error, retry");
port->vdm_retries++;
port->vdm_state = VDM_STATE_READY;
@@ -2057,19 +2548,59 @@ static void vdm_run_state_machine(struct tcpm_port *port)
tcpm_ams_finish(port);
} else {
tcpm_ams_finish(port);
+ if (port->tx_sop_type == TCPC_TX_SOP)
+ break;
+ /* Handle SOP' Transmission Errors */
+ switch (PD_VDO_CMD(vdo_hdr)) {
+ /*
+ * If Discover Identity fails on SOP', then resume
+ * discovery process on SOP only.
+ */
+ case CMD_DISCOVER_IDENT:
+ port->vdo_data[0] = 0;
+ response[0] = VDO(USB_SID_PD, 1,
+ typec_get_negotiated_svdm_version(
+ port->typec_port),
+ CMD_DISCOVER_SVID);
+ tcpm_queue_vdm(port, response[0], &response[1],
+ 0, TCPC_TX_SOP);
+ break;
+ /*
+ * If Discover SVIDs or Discover Modes fail, then
+ * proceed with Alt Mode discovery process on SOP.
+ */
+ case CMD_DISCOVER_SVID:
+ tcpm_register_partner_altmodes(port);
+ break;
+ case CMD_DISCOVER_MODES:
+ tcpm_register_partner_altmodes(port);
+ break;
+ default:
+ break;
+ }
}
break;
case VDM_STATE_SEND_MESSAGE:
/* Prepare and send VDM */
memset(&msg, 0, sizeof(msg));
- msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
- port->pwr_role,
- port->data_role,
- port->negotiated_rev,
- port->message_id, port->vdo_count);
+ if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
+ msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
+ 0, /* Cable Plug Indicator for DFP/UFP */
+ 0, /* Reserved */
+ port->negotiated_rev_prime,
+ port->message_id_prime,
+ port->vdo_count);
+ } else {
+ msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
+ port->pwr_role,
+ port->data_role,
+ port->negotiated_rev,
+ port->message_id,
+ port->vdo_count);
+ }
for (i = 0; i < port->vdo_count; i++)
msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
- res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+ res = tcpm_pd_transmit(port, port->tx_sop_type, &msg);
if (res < 0) {
port->vdm_state = VDM_STATE_ERR_SEND;
} else {
@@ -2244,7 +2775,7 @@ static int tcpm_altmode_enter(struct typec_altmode *altmode, u32 *vdo)
header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
header |= VDO_OPOS(altmode->mode);
- tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0);
+ tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP);
return 0;
}
@@ -2261,7 +2792,7 @@ static int tcpm_altmode_exit(struct typec_altmode *altmode)
header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
header |= VDO_OPOS(altmode->mode);
- tcpm_queue_vdm_unlocked(port, header, NULL, 0);
+ tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP);
return 0;
}
@@ -2270,7 +2801,7 @@ static int tcpm_altmode_vdm(struct typec_altmode *altmode,
{
struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
- tcpm_queue_vdm_unlocked(port, header, data, count - 1);
+ tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP);
return 0;
}
@@ -2281,6 +2812,58 @@ static const struct typec_altmode_ops tcpm_altmode_ops = {
.vdm = tcpm_altmode_vdm,
};
+
+static int tcpm_cable_altmode_enter(struct typec_altmode *altmode, enum typec_plug_index sop,
+ u32 *vdo)
+{
+ struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+ int svdm_version;
+ u32 header;
+
+ svdm_version = typec_get_cable_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ header = VDO(altmode->svid, vdo ? 2 : 1, svdm_version, CMD_ENTER_MODE);
+ header |= VDO_OPOS(altmode->mode);
+
+ tcpm_queue_vdm_unlocked(port, header, vdo, vdo ? 1 : 0, TCPC_TX_SOP_PRIME);
+ return 0;
+}
+
+static int tcpm_cable_altmode_exit(struct typec_altmode *altmode, enum typec_plug_index sop)
+{
+ struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+ int svdm_version;
+ u32 header;
+
+ svdm_version = typec_get_cable_svdm_version(port->typec_port);
+ if (svdm_version < 0)
+ return svdm_version;
+
+ header = VDO(altmode->svid, 1, svdm_version, CMD_EXIT_MODE);
+ header |= VDO_OPOS(altmode->mode);
+
+ tcpm_queue_vdm_unlocked(port, header, NULL, 0, TCPC_TX_SOP_PRIME);
+ return 0;
+}
+
+static int tcpm_cable_altmode_vdm(struct typec_altmode *altmode, enum typec_plug_index sop,
+ u32 header, const u32 *data, int count)
+{
+ struct tcpm_port *port = typec_altmode_get_drvdata(altmode);
+
+ tcpm_queue_vdm_unlocked(port, header, data, count - 1, TCPC_TX_SOP_PRIME);
+
+ return 0;
+}
+
+static const struct typec_cable_ops tcpm_cable_ops = {
+ .enter = tcpm_cable_altmode_enter,
+ .exit = tcpm_cable_altmode_exit,
+ .vdm = tcpm_cable_altmode_vdm,
+};
+
/*
* PD (data, control) command handling functions
*/
@@ -2293,7 +2876,8 @@ static inline enum tcpm_state ready_state(struct tcpm_port *port)
}
static int tcpm_pd_send_control(struct tcpm_port *port,
- enum pd_ctrl_msg_type type);
+ enum pd_ctrl_msg_type type,
+ enum tcpm_transmit_type tx_sop_type);
static void tcpm_handle_alert(struct tcpm_port *port, const __le32 *payload,
int cnt)
@@ -2455,7 +3039,8 @@ static int tcpm_register_sink_caps(struct tcpm_port *port)
}
static void tcpm_pd_data_request(struct tcpm_port *port,
- const struct pd_message *msg)
+ const struct pd_message *msg,
+ enum tcpm_transmit_type rx_sop_type)
{
enum pd_data_msg_type type = pd_header_type_le(msg->header);
unsigned int cnt = pd_header_cnt_le(msg->header);
@@ -2496,8 +3081,11 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
break;
}
- if (rev < PD_MAX_REV)
+ if (rev < PD_MAX_REV) {
port->negotiated_rev = rev;
+ if (port->negotiated_rev_prime > port->negotiated_rev)
+ port->negotiated_rev_prime = port->negotiated_rev;
+ }
if (port->pwr_role == TYPEC_SOURCE) {
if (port->ams == GET_SOURCE_CAPABILITIES)
@@ -2548,8 +3136,11 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
break;
}
- if (rev < PD_MAX_REV)
+ if (rev < PD_MAX_REV) {
port->negotiated_rev = rev;
+ if (port->negotiated_rev_prime > port->negotiated_rev)
+ port->negotiated_rev_prime = port->negotiated_rev;
+ }
if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
tcpm_pd_handle_msg(port,
@@ -2605,7 +3196,7 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
NONE_AMS);
break;
case PD_DATA_VENDOR_DEF:
- tcpm_handle_vdm_request(port, msg->payload, cnt);
+ tcpm_handle_vdm_request(port, msg->payload, cnt, rx_sop_type);
break;
case PD_DATA_BIST:
port->bist_request = le32_to_cpu(msg->payload[0]);
@@ -2647,10 +3238,12 @@ static void tcpm_pps_complete(struct tcpm_port *port, int result)
}
static void tcpm_pd_ctrl_request(struct tcpm_port *port,
- const struct pd_message *msg)
+ const struct pd_message *msg,
+ enum tcpm_transmit_type rx_sop_type)
{
enum pd_ctrl_msg_type type = pd_header_type_le(msg->header);
enum tcpm_state next_state;
+ unsigned int rev = pd_header_rev_le(msg->header);
/*
* Stop VDM state machine if interrupted by other Messages while NOT_SUPP is allowed in
@@ -2815,6 +3408,16 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
case SOFT_RESET_SEND:
if (port->ams == SOFT_RESET_AMS)
tcpm_ams_finish(port);
+ /*
+ * SOP' Soft Reset is done after Vconn Swap,
+ * which returns to ready state
+ */
+ if (rx_sop_type == TCPC_TX_SOP_PRIME) {
+ if (rev < port->negotiated_rev_prime)
+ port->negotiated_rev_prime = rev;
+ tcpm_set_state(port, ready_state(port), 0);
+ break;
+ }
if (port->pwr_role == TYPEC_SOURCE) {
port->upcoming_state = SRC_SEND_CAPABILITIES;
tcpm_ams_start(port, POWER_NEGOTIATION);
@@ -2981,6 +3584,7 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
const struct pd_message *msg = &event->msg;
unsigned int cnt = pd_header_cnt_le(msg->header);
struct tcpm_port *port = event->port;
+ enum tcpm_transmit_type rx_sop_type = event->rx_sop_type;
mutex_lock(&port->lock);
@@ -2992,6 +3596,14 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
unsigned int msgid = pd_header_msgid_le(msg->header);
/*
+ * Drop SOP' messages if cannot receive via
+ * tcpm_can_communicate_sop_prime
+ */
+ if (rx_sop_type == TCPC_TX_SOP_PRIME &&
+ !tcpm_can_communicate_sop_prime(port))
+ goto done;
+
+ /*
* USB PD standard, 6.6.1.2:
* "... if MessageID value in a received Message is the
* same as the stored value, the receiver shall return a
@@ -3000,16 +3612,26 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
* Message). Note: this shall not apply to the Soft_Reset
* Message which always has a MessageID value of zero."
*/
- if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
- goto done;
- port->rx_msgid = msgid;
+ switch (rx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ if (msgid == port->rx_msgid_prime)
+ goto done;
+ port->rx_msgid_prime = msgid;
+ break;
+ case TCPC_TX_SOP:
+ default:
+ if (msgid == port->rx_msgid && type != PD_CTRL_SOFT_RESET)
+ goto done;
+ port->rx_msgid = msgid;
+ break;
+ }
/*
* If both ends believe to be DFP/host, we have a data role
* mismatch.
*/
if (!!(le16_to_cpu(msg->header) & PD_HEADER_DATA_ROLE) ==
- (port->data_role == TYPEC_HOST)) {
+ (port->data_role == TYPEC_HOST) && rx_sop_type == TCPC_TX_SOP) {
tcpm_log(port,
"Data role mismatch, initiating error recovery");
tcpm_set_state(port, ERROR_RECOVERY, 0);
@@ -3017,9 +3639,9 @@ static void tcpm_pd_rx_handler(struct kthread_work *work)
if (le16_to_cpu(msg->header) & PD_HEADER_EXT_HDR)
tcpm_pd_ext_msg_request(port, msg);
else if (cnt)
- tcpm_pd_data_request(port, msg);
+ tcpm_pd_data_request(port, msg, rx_sop_type);
else
- tcpm_pd_ctrl_request(port, msg);
+ tcpm_pd_ctrl_request(port, msg, rx_sop_type);
}
}
@@ -3028,7 +3650,8 @@ done:
kfree(event);
}
-void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
+void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg,
+ enum tcpm_transmit_type rx_sop_type)
{
struct pd_rx_event *event;
@@ -3038,23 +3661,47 @@ void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg)
kthread_init_work(&event->work, tcpm_pd_rx_handler);
event->port = port;
+ event->rx_sop_type = rx_sop_type;
memcpy(&event->msg, msg, sizeof(*msg));
kthread_queue_work(port->wq, &event->work);
}
EXPORT_SYMBOL_GPL(tcpm_pd_receive);
static int tcpm_pd_send_control(struct tcpm_port *port,
- enum pd_ctrl_msg_type type)
+ enum pd_ctrl_msg_type type,
+ enum tcpm_transmit_type tx_sop_type)
{
struct pd_message msg;
memset(&msg, 0, sizeof(msg));
- msg.header = PD_HEADER_LE(type, port->pwr_role,
- port->data_role,
- port->negotiated_rev,
- port->message_id, 0);
+ switch (tx_sop_type) {
+ case TCPC_TX_SOP_PRIME:
+ msg.header = PD_HEADER_LE(type,
+ 0, /* Cable Plug Indicator for DFP/UFP */
+ 0, /* Reserved */
+ port->negotiated_rev,
+ port->message_id_prime,
+ 0);
+ break;
+ case TCPC_TX_SOP:
+ msg.header = PD_HEADER_LE(type,
+ port->pwr_role,
+ port->data_role,
+ port->negotiated_rev,
+ port->message_id,
+ 0);
+ break;
+ default:
+ msg.header = PD_HEADER_LE(type,
+ port->pwr_role,
+ port->data_role,
+ port->negotiated_rev,
+ port->message_id,
+ 0);
+ break;
+ }
- return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+ return tcpm_pd_transmit(port, tx_sop_type, &msg);
}
/*
@@ -3073,13 +3720,13 @@ static bool tcpm_send_queued_message(struct tcpm_port *port)
switch (queued_message) {
case PD_MSG_CTRL_WAIT:
- tcpm_pd_send_control(port, PD_CTRL_WAIT);
+ tcpm_pd_send_control(port, PD_CTRL_WAIT, TCPC_TX_SOP);
break;
case PD_MSG_CTRL_REJECT:
- tcpm_pd_send_control(port, PD_CTRL_REJECT);
+ tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
break;
case PD_MSG_CTRL_NOT_SUPP:
- tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
+ tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
break;
case PD_MSG_DATA_SINK_CAP:
ret = tcpm_pd_send_sink_caps(port);
@@ -3649,6 +4296,7 @@ static int tcpm_src_attach(struct tcpm_port *port)
port->attached = true;
port->send_discover = true;
+ port->send_discover_prime = false;
return 0;
@@ -3665,6 +4313,15 @@ out_disable_mux:
static void tcpm_typec_disconnect(struct tcpm_port *port)
{
+ /*
+ * Unregister plug/cable outside of port->connected because cable can
+ * be discovered before SRC_READY/SNK_READY states where port->connected
+ * is set.
+ */
+ typec_unregister_plug(port->plug_prime);
+ typec_unregister_cable(port->cable);
+ port->plug_prime = NULL;
+ port->cable = NULL;
if (port->connected) {
typec_partner_set_usb_power_delivery(port->partner, NULL);
typec_unregister_partner(port->partner);
@@ -3676,14 +4333,20 @@ static void tcpm_typec_disconnect(struct tcpm_port *port)
static void tcpm_unregister_altmodes(struct tcpm_port *port)
{
struct pd_mode_data *modep = &port->mode_data;
+ struct pd_mode_data *modep_prime = &port->mode_data_prime;
int i;
for (i = 0; i < modep->altmodes; i++) {
typec_unregister_altmode(port->partner_altmode[i]);
port->partner_altmode[i] = NULL;
}
+ for (i = 0; i < modep_prime->altmodes; i++) {
+ typec_unregister_altmode(port->plug_prime_altmode[i]);
+ port->plug_prime_altmode[i] = NULL;
+ }
memset(modep, 0, sizeof(*modep));
+ memset(modep_prime, 0, sizeof(*modep_prime));
}
static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
@@ -3712,6 +4375,7 @@ static void tcpm_reset_port(struct tcpm_port *port)
* we can check tcpm_pd_rx_handler() if we had seen it before.
*/
port->rx_msgid = -1;
+ port->rx_msgid_prime = -1;
port->tcpc->set_pd_rx(port->tcpc, false);
tcpm_init_vbus(port); /* also disables charging */
@@ -3783,6 +4447,7 @@ static int tcpm_snk_attach(struct tcpm_port *port)
port->attached = true;
port->send_discover = true;
+ port->send_discover_prime = false;
return 0;
}
@@ -4023,8 +4688,11 @@ static void run_state_machine(struct tcpm_port *port)
port->pwr_opmode = TYPEC_PWR_MODE_USB;
port->caps_count = 0;
port->negotiated_rev = PD_MAX_REV;
+ port->negotiated_rev_prime = PD_MAX_REV;
port->message_id = 0;
+ port->message_id_prime = 0;
port->rx_msgid = -1;
+ port->rx_msgid_prime = -1;
port->explicit_contract = false;
/* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
if (port->ams == POWER_ROLE_SWAP ||
@@ -4045,8 +4713,12 @@ static void run_state_machine(struct tcpm_port *port)
}
ret = tcpm_pd_send_source_caps(port);
if (ret < 0) {
- tcpm_set_state(port, SRC_SEND_CAPABILITIES,
- PD_T_SEND_SOURCE_CAP);
+ if (tcpm_can_communicate_sop_prime(port) &&
+ IS_ERR_OR_NULL(port->cable))
+ tcpm_set_state(port, SRC_VDM_IDENTITY_REQUEST, 0);
+ else
+ tcpm_set_state(port, SRC_SEND_CAPABILITIES,
+ PD_T_SEND_SOURCE_CAP);
} else {
/*
* Per standard, we should clear the reset counter here.
@@ -4087,7 +4759,7 @@ static void run_state_machine(struct tcpm_port *port)
case SRC_NEGOTIATE_CAPABILITIES:
ret = tcpm_pd_check_request(port);
if (ret < 0) {
- tcpm_pd_send_control(port, PD_CTRL_REJECT);
+ tcpm_pd_send_control(port, PD_CTRL_REJECT, TCPC_TX_SOP);
if (!port->explicit_contract) {
tcpm_set_state(port,
SRC_WAIT_NEW_CAPABILITIES, 0);
@@ -4095,7 +4767,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, SRC_READY, 0);
}
} else {
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
tcpm_set_partner_usb_comm_capable(port,
!!(port->sink_request & RDO_USB_COMM));
tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
@@ -4104,7 +4776,7 @@ static void run_state_machine(struct tcpm_port *port)
break;
case SRC_TRANSITION_SUPPLY:
/* XXX: regulator_set_voltage(vbus, ...) */
- tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
port->explicit_contract = true;
typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
port->pwr_opmode = TYPEC_PWR_MODE_PD;
@@ -4141,14 +4813,23 @@ static void run_state_machine(struct tcpm_port *port)
* 6.4.4.3.1 Discover Identity
* "The Discover Identity Command Shall only be sent to SOP when there is an
* Explicit Contract."
- * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
- * port->explicit_contract to decide whether to send the command.
+ *
+ * Discover Identity on SOP' should be discovered prior to the
+ * ready state, but if done after a Vconn Swap following Discover
+ * Identity on SOP then the discovery process can be run here
+ * as well.
*/
if (port->explicit_contract) {
- tcpm_set_initial_svdm_version(port);
+ if (port->send_discover_prime) {
+ port->tx_sop_type = TCPC_TX_SOP_PRIME;
+ } else {
+ port->tx_sop_type = TCPC_TX_SOP;
+ tcpm_set_initial_svdm_version(port);
+ }
mod_send_discover_delayed_work(port, 0);
} else {
port->send_discover = false;
+ port->send_discover_prime = false;
}
/*
@@ -4264,8 +4945,11 @@ static void run_state_machine(struct tcpm_port *port)
typec_set_pwr_opmode(port->typec_port, opmode);
port->pwr_opmode = TYPEC_PWR_MODE_USB;
port->negotiated_rev = PD_MAX_REV;
+ port->negotiated_rev_prime = PD_MAX_REV;
port->message_id = 0;
+ port->message_id_prime = 0;
port->rx_msgid = -1;
+ port->rx_msgid_prime = -1;
port->explicit_contract = false;
if (port->ams == POWER_ROLE_SWAP ||
@@ -4437,14 +5121,23 @@ static void run_state_machine(struct tcpm_port *port)
* 6.4.4.3.1 Discover Identity
* "The Discover Identity Command Shall only be sent to SOP when there is an
* Explicit Contract."
- * For now, this driver only supports SOP for DISCOVER_IDENTITY, thus using
- * port->explicit_contract.
+ *
+ * Discover Identity on SOP' should be discovered prior to the
+ * ready state, but if done after a Vconn Swap following Discover
+ * Identity on SOP then the discovery process can be run here
+ * as well.
*/
if (port->explicit_contract) {
- tcpm_set_initial_svdm_version(port);
+ if (port->send_discover_prime) {
+ port->tx_sop_type = TCPC_TX_SOP_PRIME;
+ } else {
+ port->tx_sop_type = TCPC_TX_SOP;
+ tcpm_set_initial_svdm_version(port);
+ }
mod_send_discover_delayed_work(port, 0);
} else {
port->send_discover = false;
+ port->send_discover_prime = false;
}
power_supply_changed(port->psy);
@@ -4485,6 +5178,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_unregister_altmodes(port);
port->nr_sink_caps = 0;
port->send_discover = true;
+ port->send_discover_prime = false;
if (port->pwr_role == TYPEC_SOURCE)
tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF,
PD_T_PS_HARD_RESET);
@@ -4586,7 +5280,7 @@ static void run_state_machine(struct tcpm_port *port)
/* remove existing capabilities */
usb_power_delivery_unregister_capabilities(port->partner_source_caps);
port->partner_source_caps = NULL;
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
tcpm_ams_finish(port);
if (port->pwr_role == TYPEC_SOURCE) {
port->upcoming_state = SRC_SEND_CAPABILITIES;
@@ -4603,35 +5297,53 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_ams_start(port, SOFT_RESET_AMS);
break;
case SOFT_RESET_SEND:
- port->message_id = 0;
- port->rx_msgid = -1;
- /* remove existing capabilities */
- usb_power_delivery_unregister_capabilities(port->partner_source_caps);
- port->partner_source_caps = NULL;
- if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET))
- tcpm_set_state_cond(port, hard_reset_state(port), 0);
- else
- tcpm_set_state_cond(port, hard_reset_state(port),
- PD_T_SENDER_RESPONSE);
+ /*
+ * Power Delivery 3.0 Section 6.3.13
+ *
+ * A Soft_Reset Message Shall be targeted at a specific entity
+ * depending on the type of SOP* packet used.
+ */
+ if (port->tx_sop_type == TCPC_TX_SOP_PRIME) {
+ port->message_id_prime = 0;
+ port->rx_msgid_prime = -1;
+ tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP_PRIME);
+ tcpm_set_state_cond(port, ready_state(port), PD_T_SENDER_RESPONSE);
+ } else {
+ port->message_id = 0;
+ port->rx_msgid = -1;
+ /* remove existing capabilities */
+ usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+ port->partner_source_caps = NULL;
+ if (tcpm_pd_send_control(port, PD_CTRL_SOFT_RESET, TCPC_TX_SOP))
+ tcpm_set_state_cond(port, hard_reset_state(port), 0);
+ else
+ tcpm_set_state_cond(port, hard_reset_state(port),
+ PD_T_SENDER_RESPONSE);
+ }
break;
/* DR_Swap states */
case DR_SWAP_SEND:
- tcpm_pd_send_control(port, PD_CTRL_DR_SWAP);
- if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
+ tcpm_pd_send_control(port, PD_CTRL_DR_SWAP, TCPC_TX_SOP);
+ if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
port->send_discover = true;
+ port->send_discover_prime = false;
+ }
tcpm_set_state_cond(port, DR_SWAP_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
case DR_SWAP_ACCEPT:
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
- if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20)
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
+ if (port->data_role == TYPEC_DEVICE || port->negotiated_rev > PD_REV20) {
port->send_discover = true;
+ port->send_discover_prime = false;
+ }
tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
break;
case DR_SWAP_SEND_TIMEOUT:
tcpm_swap_complete(port, -ETIMEDOUT);
port->send_discover = false;
+ port->send_discover_prime = false;
tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
break;
@@ -4648,7 +5360,7 @@ static void run_state_machine(struct tcpm_port *port)
break;
case FR_SWAP_SEND:
- if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP)) {
+ if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP, TCPC_TX_SOP)) {
tcpm_set_state(port, ERROR_RECOVERY, 0);
break;
}
@@ -4668,7 +5380,7 @@ static void run_state_machine(struct tcpm_port *port)
break;
case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
tcpm_set_pwr_role(port, TYPEC_SOURCE);
- if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
+ if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
tcpm_set_state(port, ERROR_RECOVERY, 0);
break;
}
@@ -4678,11 +5390,11 @@ static void run_state_machine(struct tcpm_port *port)
/* PR_Swap states */
case PR_SWAP_ACCEPT:
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
tcpm_set_state(port, PR_SWAP_START, 0);
break;
case PR_SWAP_SEND:
- tcpm_pd_send_control(port, PD_CTRL_PR_SWAP);
+ tcpm_pd_send_control(port, PD_CTRL_PR_SWAP, TCPC_TX_SOP);
tcpm_set_state_cond(port, PR_SWAP_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
@@ -4724,7 +5436,7 @@ static void run_state_machine(struct tcpm_port *port)
* supply is turned off"
*/
tcpm_set_pwr_role(port, TYPEC_SINK);
- if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) {
+ if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP)) {
tcpm_set_state(port, ERROR_RECOVERY, 0);
break;
}
@@ -4771,17 +5483,17 @@ static void run_state_machine(struct tcpm_port *port)
* Source."
*/
tcpm_set_pwr_role(port, TYPEC_SOURCE);
- tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START);
break;
case VCONN_SWAP_ACCEPT:
- tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT, TCPC_TX_SOP);
tcpm_ams_finish(port);
tcpm_set_state(port, VCONN_SWAP_START, 0);
break;
case VCONN_SWAP_SEND:
- tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP);
+ tcpm_pd_send_control(port, PD_CTRL_VCONN_SWAP, TCPC_TX_SOP);
tcpm_set_state(port, VCONN_SWAP_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
@@ -4800,14 +5512,34 @@ static void run_state_machine(struct tcpm_port *port)
PD_T_VCONN_SOURCE_ON);
break;
case VCONN_SWAP_TURN_ON_VCONN:
- tcpm_set_vconn(port, true);
- tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
- tcpm_set_state(port, ready_state(port), 0);
+ ret = tcpm_set_vconn(port, true);
+ tcpm_pd_send_control(port, PD_CTRL_PS_RDY, TCPC_TX_SOP);
+ /*
+ * USB PD 3.0 Section 6.4.4.3.1
+ *
+ * Note that a Cable Plug or VPD will not be ready for PD
+ * Communication until tVCONNStable after VCONN has been applied
+ */
+ if (!ret)
+ tcpm_set_state(port, VCONN_SWAP_SEND_SOFT_RESET,
+ PD_T_VCONN_STABLE);
+ else
+ tcpm_set_state(port, ready_state(port), 0);
break;
case VCONN_SWAP_TURN_OFF_VCONN:
tcpm_set_vconn(port, false);
tcpm_set_state(port, ready_state(port), 0);
break;
+ case VCONN_SWAP_SEND_SOFT_RESET:
+ tcpm_swap_complete(port, port->swap_status);
+ if (tcpm_can_communicate_sop_prime(port)) {
+ port->tx_sop_type = TCPC_TX_SOP_PRIME;
+ port->upcoming_state = SOFT_RESET_SEND;
+ tcpm_ams_start(port, SOFT_RESET_AMS);
+ } else {
+ tcpm_set_state(port, ready_state(port), 0);
+ }
+ break;
case DR_SWAP_CANCEL:
case PR_SWAP_CANCEL:
@@ -4843,7 +5575,7 @@ static void run_state_machine(struct tcpm_port *port)
}
break;
case GET_STATUS_SEND:
- tcpm_pd_send_control(port, PD_CTRL_GET_STATUS);
+ tcpm_pd_send_control(port, PD_CTRL_GET_STATUS, TCPC_TX_SOP);
tcpm_set_state(port, GET_STATUS_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
@@ -4851,7 +5583,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, ready_state(port), 0);
break;
case GET_PPS_STATUS_SEND:
- tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS);
+ tcpm_pd_send_control(port, PD_CTRL_GET_PPS_STATUS, TCPC_TX_SOP);
tcpm_set_state(port, GET_PPS_STATUS_SEND_TIMEOUT,
PD_T_SENDER_RESPONSE);
break;
@@ -4859,7 +5591,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, ready_state(port), 0);
break;
case GET_SINK_CAP:
- tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP);
+ tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP, TCPC_TX_SOP);
tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE);
break;
case GET_SINK_CAP_TIMEOUT:
@@ -4902,9 +5634,18 @@ static void run_state_machine(struct tcpm_port *port)
/* Chunk state */
case CHUNK_NOT_SUPP:
- tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
+ tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP, TCPC_TX_SOP);
tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
break;
+
+ /* Cable states */
+ case SRC_VDM_IDENTITY_REQUEST:
+ port->send_discover_prime = true;
+ port->tx_sop_type = TCPC_TX_SOP_PRIME;
+ mod_send_discover_delayed_work(port, 0);
+ port->upcoming_state = SRC_SEND_CAPABILITIES;
+ break;
+
default:
WARN(1, "Unexpected port state %d\n", port->state);
break;
@@ -5596,7 +6337,8 @@ static void tcpm_enable_frs_work(struct kthread_work *work)
goto unlock;
/* Send when the state machine is idle */
- if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover)
+ if (port->state != SNK_READY || port->vdm_sm_running || port->send_discover ||
+ port->send_discover_prime)
goto resched;
port->upcoming_state = GET_SINK_CAP;
@@ -5619,21 +6361,23 @@ static void tcpm_send_discover_work(struct kthread_work *work)
mutex_lock(&port->lock);
/* No need to send DISCOVER_IDENTITY anymore */
- if (!port->send_discover)
+ if (!port->send_discover && !port->send_discover_prime)
goto unlock;
if (port->data_role == TYPEC_DEVICE && port->negotiated_rev < PD_REV30) {
port->send_discover = false;
+ port->send_discover_prime = false;
goto unlock;
}
/* Retry if the port is not idle */
- if ((port->state != SRC_READY && port->state != SNK_READY) || port->vdm_sm_running) {
+ if ((port->state != SRC_READY && port->state != SNK_READY &&
+ port->state != SRC_VDM_IDENTITY_REQUEST) || port->vdm_sm_running) {
mod_send_discover_delayed_work(port, SEND_DISCOVER_RETRY_MS);
goto unlock;
}
- tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0);
+ tcpm_send_vdm(port, USB_SID_PD, CMD_DISCOVER_IDENT, NULL, 0, port->tx_sop_type);
unlock:
mutex_unlock(&port->lock);
@@ -6860,6 +7604,8 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
typec_port_register_altmodes(port->typec_port,
&tcpm_altmode_ops, port,
port->port_altmode, ALTMODE_DISCOVERY_MAX);
+ typec_port_register_cable_ops(port->port_altmode, ARRAY_SIZE(port->port_altmode),
+ &tcpm_cable_ops);
port->registered = true;
mutex_lock(&port->lock);
diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
index 87d4abde0ea2..cf719307b3f6 100644
--- a/drivers/usb/typec/tcpm/wcove.c
+++ b/drivers/usb/typec/tcpm/wcove.c
@@ -535,7 +535,7 @@ static irqreturn_t wcove_typec_irq(int irq, void *data)
goto err;
}
- tcpm_pd_receive(wcove->tcpm, &msg);
+ tcpm_pd_receive(wcove->tcpm, &msg, TCPC_TX_SOP);
ret = regmap_read(wcove->regmap, USBC_RXSTATUS,
&status);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 14f5a7bfae2e..cf52cb34d285 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -36,6 +36,19 @@
*/
#define UCSI_SWAP_TIMEOUT_MS 5000
+static int ucsi_read_message_in(struct ucsi *ucsi, void *buf,
+ size_t buf_size)
+{
+ /*
+ * Below UCSI 2.0, MESSAGE_IN was limited to 16 bytes. Truncate the
+ * reads here.
+ */
+ if (ucsi->version <= UCSI_VERSION_1_2)
+ buf_size = clamp(buf_size, 0, 16);
+
+ return ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, buf, buf_size);
+}
+
static int ucsi_acknowledge_command(struct ucsi *ucsi)
{
u64 ctrl;
@@ -72,7 +85,7 @@ static int ucsi_read_error(struct ucsi *ucsi)
if (ret < 0)
return ret;
- ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, &error, sizeof(error));
+ ret = ucsi_read_message_in(ucsi, &error, sizeof(error));
if (ret)
return ret;
@@ -170,7 +183,7 @@ int ucsi_send_command(struct ucsi *ucsi, u64 command,
length = ret;
if (data) {
- ret = ucsi->ops->read(ucsi, UCSI_MESSAGE_IN, data, size);
+ ret = ucsi_read_message_in(ucsi, data, size);
if (ret)
goto out;
}
@@ -386,6 +399,27 @@ static int ucsi_register_altmode(struct ucsi_connector *con,
con->partner_altmode[i] = alt;
break;
+ case UCSI_RECIPIENT_SOP_P:
+ i = ucsi_next_altmode(con->plug_altmode);
+ if (i < 0) {
+ ret = i;
+ goto err;
+ }
+
+ ret = ucsi_altmode_next_mode(con->plug_altmode, desc->svid);
+ if (ret < 0)
+ return ret;
+
+ desc->mode = ret;
+
+ alt = typec_plug_register_altmode(con->plug, desc);
+ if (IS_ERR(alt)) {
+ ret = PTR_ERR(alt);
+ goto err;
+ }
+
+ con->plug_altmode[i] = alt;
+ break;
default:
return -EINVAL;
}
@@ -553,6 +587,9 @@ static void ucsi_unregister_altmodes(struct ucsi_connector *con, u8 recipient)
case UCSI_RECIPIENT_SOP:
adev = con->partner_altmode;
break;
+ case UCSI_RECIPIENT_SOP_P:
+ adev = con->plug_altmode;
+ break;
default:
return;
}
@@ -633,6 +670,108 @@ static int ucsi_get_src_pdos(struct ucsi_connector *con)
return ret;
}
+static int ucsi_read_identity(struct ucsi_connector *con, u8 recipient,
+ u8 offset, u8 bytes, void *resp)
+{
+ struct ucsi *ucsi = con->ucsi;
+ u64 command;
+ int ret;
+
+ command = UCSI_COMMAND(UCSI_GET_PD_MESSAGE) |
+ UCSI_CONNECTOR_NUMBER(con->num);
+ command |= UCSI_GET_PD_MESSAGE_RECIPIENT(recipient);
+ command |= UCSI_GET_PD_MESSAGE_OFFSET(offset);
+ command |= UCSI_GET_PD_MESSAGE_BYTES(bytes);
+ command |= UCSI_GET_PD_MESSAGE_TYPE(UCSI_GET_PD_MESSAGE_TYPE_IDENTITY);
+
+ ret = ucsi_send_command(ucsi, command, resp, bytes);
+ if (ret < 0)
+ dev_err(ucsi->dev, "UCSI_GET_PD_MESSAGE failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int ucsi_get_identity(struct ucsi_connector *con, u8 recipient,
+ struct usb_pd_identity *id)
+{
+ struct ucsi *ucsi = con->ucsi;
+ struct ucsi_pd_message_disc_id resp = {};
+ int ret;
+
+ if (ucsi->version < UCSI_VERSION_2_0) {
+ /*
+ * Before UCSI v2.0, MESSAGE_IN is 16 bytes which cannot fit
+ * the 28 byte identity response including the VDM header.
+ * First request the VDM header, ID Header VDO, Cert Stat VDO
+ * and Product VDO.
+ */
+ ret = ucsi_read_identity(con, recipient, 0, 0x10, &resp);
+ if (ret < 0)
+ return ret;
+
+
+ /* Then request Product Type VDO1 through Product Type VDO3. */
+ ret = ucsi_read_identity(con, recipient, 0x10, 0xc,
+ &resp.vdo[0]);
+ if (ret < 0)
+ return ret;
+
+ } else {
+ /*
+ * In UCSI v2.0 and after, MESSAGE_IN is large enough to request
+ * the large enough to request the full Discover Identity
+ * response at once.
+ */
+ ret = ucsi_read_identity(con, recipient, 0x0, 0x1c, &resp);
+ if (ret < 0)
+ return ret;
+ }
+
+ id->id_header = resp.id_header;
+ id->cert_stat = resp.cert_stat;
+ id->product = resp.product;
+ id->vdo[0] = resp.vdo[0];
+ id->vdo[1] = resp.vdo[1];
+ id->vdo[2] = resp.vdo[2];
+ return 0;
+}
+
+static int ucsi_get_partner_identity(struct ucsi_connector *con)
+{
+ int ret;
+
+ ret = ucsi_get_identity(con, UCSI_RECIPIENT_SOP,
+ &con->partner_identity);
+ if (ret < 0)
+ return ret;
+
+ ret = typec_partner_set_identity(con->partner);
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "Failed to set partner identity (%d)\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int ucsi_get_cable_identity(struct ucsi_connector *con)
+{
+ int ret;
+
+ ret = ucsi_get_identity(con, UCSI_RECIPIENT_SOP_P,
+ &con->cable_identity);
+ if (ret < 0)
+ return ret;
+
+ ret = typec_cable_set_identity(con->cable);
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "Failed to set cable identity (%d)\n",
+ ret);
+ }
+
+ return ret;
+}
+
static int ucsi_check_altmodes(struct ucsi_connector *con)
{
int ret, num_partner_am;
@@ -721,6 +860,82 @@ static void ucsi_unregister_partner_pdos(struct ucsi_connector *con)
con->partner_pd = NULL;
}
+static int ucsi_register_plug(struct ucsi_connector *con)
+{
+ struct typec_plug *plug;
+ struct typec_plug_desc desc = {.index = TYPEC_PLUG_SOP_P};
+
+ plug = typec_register_plug(con->cable, &desc);
+ if (IS_ERR(plug)) {
+ dev_err(con->ucsi->dev,
+ "con%d: failed to register plug (%ld)\n", con->num,
+ PTR_ERR(plug));
+ return PTR_ERR(plug);
+ }
+
+ con->plug = plug;
+ return 0;
+}
+
+static void ucsi_unregister_plug(struct ucsi_connector *con)
+{
+ if (!con->plug)
+ return;
+
+ ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP_P);
+ typec_unregister_plug(con->plug);
+ con->plug = NULL;
+}
+
+static int ucsi_register_cable(struct ucsi_connector *con)
+{
+ struct typec_cable *cable;
+ struct typec_cable_desc desc = {};
+
+ switch (UCSI_CABLE_PROP_FLAG_PLUG_TYPE(con->cable_prop.flags)) {
+ case UCSI_CABLE_PROPERTY_PLUG_TYPE_A:
+ desc.type = USB_PLUG_TYPE_A;
+ break;
+ case UCSI_CABLE_PROPERTY_PLUG_TYPE_B:
+ desc.type = USB_PLUG_TYPE_B;
+ break;
+ case UCSI_CABLE_PROPERTY_PLUG_TYPE_C:
+ desc.type = USB_PLUG_TYPE_C;
+ break;
+ default:
+ desc.type = USB_PLUG_NONE;
+ break;
+ }
+
+ desc.identity = &con->cable_identity;
+ desc.active = !!(UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE &
+ con->cable_prop.flags);
+ desc.pd_revision = UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV_AS_BCD(
+ con->cable_prop.flags);
+
+ cable = typec_register_cable(con->port, &desc);
+ if (IS_ERR(cable)) {
+ dev_err(con->ucsi->dev,
+ "con%d: failed to register cable (%ld)\n", con->num,
+ PTR_ERR(cable));
+ return PTR_ERR(cable);
+ }
+
+ con->cable = cable;
+ return 0;
+}
+
+static void ucsi_unregister_cable(struct ucsi_connector *con)
+{
+ if (!con->cable)
+ return;
+
+ ucsi_unregister_plug(con);
+ typec_unregister_cable(con->cable);
+ memset(&con->cable_identity, 0, sizeof(con->cable_identity));
+ con->cable = NULL;
+}
+
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
{
switch (UCSI_CONSTAT_PWR_OPMODE(con->status.flags)) {
@@ -768,7 +983,9 @@ static int ucsi_register_partner(struct ucsi_connector *con)
break;
}
+ desc.identity = &con->partner_identity;
desc.usb_pd = pwr_opmode == UCSI_CONSTAT_PWR_OPMODE_PD;
+ desc.pd_revision = UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(con->cap.flags);
partner = typec_register_partner(con->port, &desc);
if (IS_ERR(partner)) {
@@ -793,7 +1010,9 @@ static void ucsi_unregister_partner(struct ucsi_connector *con)
typec_partner_set_usb_power_delivery(con->partner, NULL);
ucsi_unregister_partner_pdos(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_SOP);
+ ucsi_unregister_cable(con);
typec_unregister_partner(con->partner);
+ memset(&con->partner_identity, 0, sizeof(con->partner_identity));
con->partner = NULL;
}
@@ -843,6 +1062,27 @@ static void ucsi_partner_change(struct ucsi_connector *con)
con->num, u_role);
}
+static int ucsi_check_connector_capability(struct ucsi_connector *con)
+{
+ u64 command;
+ int ret;
+
+ if (!con->partner || con->ucsi->version < UCSI_VERSION_2_0)
+ return 0;
+
+ command = UCSI_GET_CONNECTOR_CAPABILITY | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_send_command(con->ucsi, command, &con->cap, sizeof(con->cap));
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "GET_CONNECTOR_CAPABILITY failed (%d)\n", ret);
+ return ret;
+ }
+
+ typec_partner_set_pd_revision(con->partner,
+ UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(con->cap.flags));
+
+ return ret;
+}
+
static int ucsi_check_connection(struct ucsi_connector *con)
{
u8 prev_flags = con->status.flags;
@@ -872,6 +1112,42 @@ static int ucsi_check_connection(struct ucsi_connector *con)
return 0;
}
+static int ucsi_check_cable(struct ucsi_connector *con)
+{
+ u64 command;
+ int ret;
+
+ if (con->cable)
+ return 0;
+
+ command = UCSI_GET_CABLE_PROPERTY | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_send_command(con->ucsi, command, &con->cable_prop,
+ sizeof(con->cable_prop));
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "GET_CABLE_PROPERTY failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ ret = ucsi_register_cable(con);
+ if (ret < 0)
+ return ret;
+
+ ret = ucsi_get_cable_identity(con);
+ if (ret < 0)
+ return ret;
+
+ ret = ucsi_register_plug(con);
+ if (ret < 0)
+ return ret;
+
+ ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP_P);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static void ucsi_handle_connector_change(struct work_struct *work)
{
struct ucsi_connector *con = container_of(work, struct ucsi_connector,
@@ -912,6 +1188,9 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
ucsi_register_partner(con);
ucsi_partner_task(con, ucsi_check_connection, 1, HZ);
+ ucsi_partner_task(con, ucsi_check_connector_capability, 1, HZ);
+ ucsi_partner_task(con, ucsi_get_partner_identity, 1, HZ);
+ ucsi_partner_task(con, ucsi_check_cable, 1, HZ);
if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
UCSI_CONSTAT_PWR_OPMODE_PD)
@@ -1310,6 +1589,8 @@ static int ucsi_register_port(struct ucsi *ucsi, struct ucsi_connector *con)
ucsi_register_partner(con);
ucsi_pwr_opmode_change(con);
ucsi_port_psy_changed(con);
+ ucsi_get_partner_identity(con);
+ ucsi_check_cable(con);
}
/* Only notify USB controller if partner supports USB data */
@@ -1558,6 +1839,15 @@ int ucsi_register(struct ucsi *ucsi)
if (!ucsi->version)
return -ENODEV;
+ /*
+ * Version format is JJ.M.N (JJ = Major version, M = Minor version,
+ * N = sub-minor version).
+ */
+ dev_dbg(ucsi->dev, "Registered UCSI interface with version %x.%x.%x",
+ UCSI_BCD_GET_MAJOR(ucsi->version),
+ UCSI_BCD_GET_MINOR(ucsi->version),
+ UCSI_BCD_GET_SUBMINOR(ucsi->version));
+
queue_delayed_work(system_long_wq, &ucsi->work, 0);
ucsi_debugfs_register(ucsi);
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 6478016d5cb8..32daf5f58650 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -10,6 +10,7 @@
#include <linux/usb/typec.h>
#include <linux/usb/pd.h>
#include <linux/usb/role.h>
+#include <asm/unaligned.h>
/* -------------------------------------------------------------------------- */
@@ -23,6 +24,23 @@ struct dentry;
#define UCSI_CONTROL 8
#define UCSI_MESSAGE_IN 16
#define UCSI_MESSAGE_OUT 32
+#define UCSIv2_MESSAGE_OUT 272
+
+/* UCSI versions */
+#define UCSI_VERSION_1_2 0x0120
+#define UCSI_VERSION_2_0 0x0200
+#define UCSI_VERSION_2_1 0x0210
+#define UCSI_VERSION_3_0 0x0300
+
+#define UCSI_BCD_GET_MAJOR(_v_) (((_v_) >> 8) & 0xFF)
+#define UCSI_BCD_GET_MINOR(_v_) (((_v_) >> 4) & 0x0F)
+#define UCSI_BCD_GET_SUBMINOR(_v_) ((_v_) & 0x0F)
+
+/*
+ * Per USB PD 3.2, Section 6.2.1.1.5, the spec revision is represented by 2 bits
+ * 0b00 = 1.0, 0b01 = 2.0, 0b10 = 3.0, 0b11 = Reserved, Shall NOT be used.
+ */
+#define UCSI_SPEC_REVISION_TO_BCD(_v_) (((_v_) + 1) << 8)
/* Command Status and Connector Change Indication (CCI) bits */
#define UCSI_CCI_CONNECTOR(_c_) (((_c_) & GENMASK(7, 1)) >> 1)
@@ -88,6 +106,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_GET_CABLE_PROPERTY 0x11
#define UCSI_GET_CONNECTOR_STATUS 0x12
#define UCSI_GET_ERROR_STATUS 0x13
+#define UCSI_GET_PD_MESSAGE 0x15
#define UCSI_CONNECTOR_NUMBER(_num_) ((u64)(_num_) << 16)
#define UCSI_COMMAND(_cmd_) ((_cmd_) & 0xff)
@@ -141,6 +160,18 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num);
#define UCSI_MAX_PDOS (4)
#define UCSI_GET_PDOS_SRC_PDOS ((u64)1 << 34)
+/* GET_PD_MESSAGE command bits */
+#define UCSI_GET_PD_MESSAGE_RECIPIENT(_r_) ((u64)(_r_) << 23)
+#define UCSI_GET_PD_MESSAGE_OFFSET(_r_) ((u64)(_r_) << 26)
+#define UCSI_GET_PD_MESSAGE_BYTES(_r_) ((u64)(_r_) << 34)
+#define UCSI_GET_PD_MESSAGE_TYPE(_r_) ((u64)(_r_) << 42)
+#define UCSI_GET_PD_MESSAGE_TYPE_SNK_CAP_EXT 0
+#define UCSI_GET_PD_MESSAGE_TYPE_SRC_CAP_EXT 1
+#define UCSI_GET_PD_MESSAGE_TYPE_BAT_CAP 2
+#define UCSI_GET_PD_MESSAGE_TYPE_BAT_STAT 3
+#define UCSI_GET_PD_MESSAGE_TYPE_IDENTITY 4
+#define UCSI_GET_PD_MESSAGE_TYPE_REVISION 5
+
/* -------------------------------------------------------------------------- */
/* Error information returned by PPM in response to GET_ERROR_STATUS command. */
@@ -203,9 +234,29 @@ struct ucsi_connector_capability {
#define UCSI_CONCAP_OPMODE_USB2 BIT(5)
#define UCSI_CONCAP_OPMODE_USB3 BIT(6)
#define UCSI_CONCAP_OPMODE_ALT_MODE BIT(7)
- u8 flags;
+ u32 flags;
#define UCSI_CONCAP_FLAG_PROVIDER BIT(0)
#define UCSI_CONCAP_FLAG_CONSUMER BIT(1)
+#define UCSI_CONCAP_FLAG_SWAP_TO_DFP BIT(2)
+#define UCSI_CONCAP_FLAG_SWAP_TO_UFP BIT(3)
+#define UCSI_CONCAP_FLAG_SWAP_TO_SRC BIT(4)
+#define UCSI_CONCAP_FLAG_SWAP_TO_SINK BIT(5)
+#define UCSI_CONCAP_FLAG_EX_OP_MODE(_f_) \
+ (((_f_) & GENMASK(13, 6)) >> 6)
+#define UCSI_CONCAP_EX_OP_MODE_USB4_GEN2 BIT(0)
+#define UCSI_CONCAP_EX_OP_MODE_EPR_SRC BIT(1)
+#define UCSI_CONCAP_EX_OP_MODE_EPR_SINK BIT(2)
+#define UCSI_CONCAP_EX_OP_MODE_USB4_GEN3 BIT(3)
+#define UCSI_CONCAP_EX_OP_MODE_USB4_GEN4 BIT(4)
+#define UCSI_CONCAP_FLAG_MISC_CAPS(_f_) \
+ (((_f_) & GENMASK(17, 14)) >> 14)
+#define UCSI_CONCAP_MISC_CAP_FW_UPDATE BIT(0)
+#define UCSI_CONCAP_MISC_CAP_SECURITY BIT(1)
+#define UCSI_CONCAP_FLAG_REV_CURR_PROT_SUPPORT BIT(18)
+#define UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV(_f_) \
+ (((_f_) & GENMASK(20, 19)) >> 19)
+#define UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV_AS_BCD(_f_) \
+ UCSI_SPEC_REVISION_TO_BCD(UCSI_CONCAP_FLAG_PARTNER_PD_MAJOR_REV(_f_))
} __packed;
struct ucsi_altmode {
@@ -221,12 +272,15 @@ struct ucsi_cable_property {
#define UCSI_CABLE_PROP_FLAG_VBUS_IN_CABLE BIT(0)
#define UCSI_CABLE_PROP_FLAG_ACTIVE_CABLE BIT(1)
#define UCSI_CABLE_PROP_FLAG_DIRECTIONALITY BIT(2)
-#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) ((_f_) & GENMASK(3, 0))
+#define UCSI_CABLE_PROP_FLAG_PLUG_TYPE(_f_) (((_f_) & GENMASK(4, 3)) >> 3)
#define UCSI_CABLE_PROPERTY_PLUG_TYPE_A 0
#define UCSI_CABLE_PROPERTY_PLUG_TYPE_B 1
#define UCSI_CABLE_PROPERTY_PLUG_TYPE_C 2
#define UCSI_CABLE_PROPERTY_PLUG_OTHER 3
-#define UCSI_CABLE_PROP_MODE_SUPPORT BIT(5)
+#define UCSI_CABLE_PROP_FLAG_MODE_SUPPORT BIT(5)
+#define UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV(_f_) (((_f_) & GENMASK(7, 6)) >> 6)
+#define UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV_AS_BCD(_f_) \
+ UCSI_SPEC_REVISION_TO_BCD(UCSI_CABLE_PROP_FLAG_PD_MAJOR_REV(_f_))
u8 latency;
} __packed;
@@ -265,15 +319,48 @@ struct ucsi_connector_status {
#define UCSI_CONSTAT_PARTNER_TYPE_DEBUG 5
#define UCSI_CONSTAT_PARTNER_TYPE_AUDIO 6
u32 request_data_obj;
- u8 pwr_status;
-#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_) & GENMASK(2, 0))
+
+ u8 pwr_status[3];
+#define UCSI_CONSTAT_BC_STATUS(_p_) ((_p_[0]) & GENMASK(1, 0))
#define UCSI_CONSTAT_BC_NOT_CHARGING 0
#define UCSI_CONSTAT_BC_NOMINAL_CHARGING 1
#define UCSI_CONSTAT_BC_SLOW_CHARGING 2
#define UCSI_CONSTAT_BC_TRICKLE_CHARGING 3
-#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) (((_p_) & GENMASK(6, 3)) >> 3)
+#define UCSI_CONSTAT_PROVIDER_CAP_LIMIT(_p_) (((_p_[0]) & GENMASK(5, 2)) >> 2)
#define UCSI_CONSTAT_CAP_PWR_LOWERED 0
#define UCSI_CONSTAT_CAP_PWR_BUDGET_LIMIT 1
+#define UCSI_CONSTAT_PROVIDER_PD_VERSION_OPER_MODE(_p_) \
+ ((get_unaligned_le32(_p_) & GENMASK(21, 6)) >> 6)
+#define UCSI_CONSTAT_ORIENTATION(_p_) (((_p_[2]) & GENMASK(6, 6)) >> 6)
+#define UCSI_CONSTAT_ORIENTATION_DIRECT 0
+#define UCSI_CONSTAT_ORIENTATION_FLIPPED 1
+#define UCSI_CONSTAT_SINK_PATH_STATUS(_p_) (((_p_[2]) & GENMASK(7, 7)) >> 7)
+#define UCSI_CONSTAT_SINK_PATH_DISABLED 0
+#define UCSI_CONSTAT_SINK_PATH_ENABLED 1
+ u8 pwr_readings[9];
+#define UCSI_CONSTAT_REV_CURR_PROT_STATUS(_p_) ((_p_[0]) & 0x1)
+#define UCSI_CONSTAT_PWR_READING_VALID(_p_) (((_p_[0]) & GENMASK(1, 1)) >> 1)
+#define UCSI_CONSTAT_CURRENT_SCALE(_p_) (((_p_[0]) & GENMASK(4, 2)) >> 2)
+#define UCSI_CONSTAT_PEAK_CURRENT(_p_) \
+ ((get_unaligned_le32(_p_) & GENMASK(20, 5)) >> 5)
+#define UCSI_CONSTAT_AVG_CURRENT(_p_) \
+ ((get_unaligned_le32(&(_p_)[2]) & GENMASK(20, 5)) >> 5)
+#define UCSI_CONSTAT_VOLTAGE_SCALE(_p_) \
+ ((get_unaligned_le16(&(_p_)[4]) & GENMASK(8, 5)) >> 5)
+#define UCSI_CONSTAT_VOLTAGE_READING(_p_) \
+ ((get_unaligned_le32(&(_p_)[5]) & GENMASK(16, 1)) >> 1)
+} __packed;
+
+/*
+ * Data structure filled by PPM in response to GET_PD_MESSAGE command with the
+ * Response Message Type set to Discover Identity Response.
+ */
+struct ucsi_pd_message_disc_id {
+ u32 vdm_header;
+ u32 id_header;
+ u32 cert_stat;
+ u32 product;
+ u32 vdo[3];
} __packed;
/* -------------------------------------------------------------------------- */
@@ -341,14 +428,18 @@ struct ucsi_connector {
struct typec_port *port;
struct typec_partner *partner;
+ struct typec_cable *cable;
+ struct typec_plug *plug;
struct typec_altmode *port_altmode[UCSI_MAX_ALTMODES];
struct typec_altmode *partner_altmode[UCSI_MAX_ALTMODES];
+ struct typec_altmode *plug_altmode[UCSI_MAX_ALTMODES];
struct typec_capability typec_cap;
struct ucsi_connector_status status;
struct ucsi_connector_capability cap;
+ struct ucsi_cable_property cable_prop;
struct power_supply *psy;
struct power_supply_desc psy_desc;
u32 rdo;
@@ -364,6 +455,10 @@ struct ucsi_connector {
struct usb_power_delivery_capabilities *partner_sink_caps;
struct usb_role_switch *usb_role_sw;
+
+ /* USB PD identity */
+ struct usb_pd_identity partner_identity;
+ struct usb_pd_identity cable_identity;
};
int ucsi_send_command(struct ucsi *ucsi, u64 command,
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 449c125f6f87..dda7c7c94e08 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -192,6 +192,12 @@ struct ucsi_ccg_altmode {
bool checked;
} __packed;
+#define CCGX_MESSAGE_IN_MAX 4
+struct op_region {
+ __le32 cci;
+ __le32 message_in[CCGX_MESSAGE_IN_MAX];
+};
+
struct ucsi_ccg {
struct device *dev;
struct ucsi *ucsi;
@@ -222,6 +228,13 @@ struct ucsi_ccg {
bool has_multiple_dp;
struct ucsi_ccg_altmode orig[UCSI_MAX_ALTMODES];
struct ucsi_ccg_altmode updated[UCSI_MAX_ALTMODES];
+
+ /*
+ * This spinlock protects op_data which includes CCI and MESSAGE_IN that
+ * will be updated in ISR
+ */
+ spinlock_t op_lock;
+ struct op_region op_data;
};
static int ccg_read(struct ucsi_ccg *uc, u16 rab, u8 *data, u32 len)
@@ -305,12 +318,42 @@ static int ccg_write(struct ucsi_ccg *uc, u16 rab, const u8 *data, u32 len)
return 0;
}
+static int ccg_op_region_update(struct ucsi_ccg *uc, u32 cci)
+{
+ u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_MESSAGE_IN);
+ struct op_region *data = &uc->op_data;
+ unsigned char *buf;
+ size_t size = sizeof(data->message_in);
+
+ buf = kzalloc(size, GFP_ATOMIC);
+ if (!buf)
+ return -ENOMEM;
+ if (UCSI_CCI_LENGTH(cci)) {
+ int ret = ccg_read(uc, reg, (void *)buf, size);
+
+ if (ret) {
+ kfree(buf);
+ return ret;
+ }
+ }
+
+ spin_lock(&uc->op_lock);
+ data->cci = cpu_to_le32(cci);
+ if (UCSI_CCI_LENGTH(cci))
+ memcpy(&data->message_in, buf, size);
+ spin_unlock(&uc->op_lock);
+ kfree(buf);
+ return 0;
+}
+
static int ucsi_ccg_init(struct ucsi_ccg *uc)
{
unsigned int count = 10;
u8 data;
int status;
+ spin_lock_init(&uc->op_lock);
+
data = CCGX_RAB_UCSI_CONTROL_STOP;
status = ccg_write(uc, CCGX_RAB_UCSI_CONTROL, &data, sizeof(data));
if (status < 0)
@@ -520,9 +563,20 @@ static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
struct ucsi_capability *cap;
struct ucsi_altmode *alt;
- int ret;
+ int ret = 0;
+
+ if (offset == UCSI_CCI) {
+ spin_lock(&uc->op_lock);
+ memcpy(val, &(uc->op_data).cci, val_len);
+ spin_unlock(&uc->op_lock);
+ } else if (offset == UCSI_MESSAGE_IN) {
+ spin_lock(&uc->op_lock);
+ memcpy(val, &(uc->op_data).message_in, val_len);
+ spin_unlock(&uc->op_lock);
+ } else {
+ ret = ccg_read(uc, reg, val, val_len);
+ }
- ret = ccg_read(uc, reg, val, val_len);
if (ret)
return ret;
@@ -559,9 +613,18 @@ static int ucsi_ccg_read(struct ucsi *ucsi, unsigned int offset,
static int ucsi_ccg_async_write(struct ucsi *ucsi, unsigned int offset,
const void *val, size_t val_len)
{
+ struct ucsi_ccg *uc = ucsi_get_drvdata(ucsi);
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(offset);
- return ccg_write(ucsi_get_drvdata(ucsi), reg, val, val_len);
+ /*
+ * UCSI may read CCI instantly after async_write,
+ * clear CCI to avoid caller getting wrong data before we get CCI from ISR
+ */
+ spin_lock(&uc->op_lock);
+ uc->op_data.cci = 0;
+ spin_unlock(&uc->op_lock);
+
+ return ccg_write(uc, reg, val, val_len);
}
static int ucsi_ccg_sync_write(struct ucsi *ucsi, unsigned int offset,
@@ -615,13 +678,18 @@ static irqreturn_t ccg_irq_handler(int irq, void *data)
u16 reg = CCGX_RAB_UCSI_DATA_BLOCK(UCSI_CCI);
struct ucsi_ccg *uc = data;
u8 intr_reg;
- u32 cci;
- int ret;
+ u32 cci = 0;
+ int ret = 0;
ret = ccg_read(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
if (ret)
return ret;
+ if (!intr_reg)
+ return IRQ_HANDLED;
+ else if (!(intr_reg & UCSI_READ_INT))
+ goto err_clear_irq;
+
ret = ccg_read(uc, reg, (void *)&cci, sizeof(cci));
if (ret)
goto err_clear_irq;
@@ -629,13 +697,21 @@ static irqreturn_t ccg_irq_handler(int irq, void *data)
if (UCSI_CCI_CONNECTOR(cci))
ucsi_connector_change(uc->ucsi, UCSI_CCI_CONNECTOR(cci));
- if (test_bit(DEV_CMD_PENDING, &uc->flags) &&
- cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
- complete(&uc->complete);
+ /*
+ * As per CCGx UCSI interface guide, copy CCI and MESSAGE_IN
+ * to the OpRegion before clear the UCSI interrupt
+ */
+ ret = ccg_op_region_update(uc, cci);
+ if (ret)
+ goto err_clear_irq;
err_clear_irq:
ccg_write(uc, CCGX_RAB_INTR_REG, &intr_reg, sizeof(intr_reg));
+ if (!ret && test_bit(DEV_CMD_PENDING, &uc->flags) &&
+ cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
+ complete(&uc->complete);
+
return IRQ_HANDLED;
}
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index faccc942b381..932e7bf69447 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -298,6 +298,7 @@ static void pmic_glink_ucsi_destroy(void *data)
}
static const struct of_device_id pmic_glink_ucsi_of_quirks[] = {
+ { .compatible = "qcom,qcm6490-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{ .compatible = "qcom,sc8180x-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{ .compatible = "qcom,sc8280xp-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
{ .compatible = "qcom,sm8350-pmic-glink", .data = (void *)UCSI_NO_PARTNER_PDOS, },
diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c
index cce3d1837104..ad7f3447fe90 100644
--- a/drivers/vdpa/alibaba/eni_vdpa.c
+++ b/drivers/vdpa/alibaba/eni_vdpa.c
@@ -254,6 +254,13 @@ static u16 eni_vdpa_get_vq_num_min(struct vdpa_device *vdpa)
return vp_legacy_get_queue_size(ldev, 0);
}
+static u16 eni_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+
+ return vp_legacy_get_queue_size(ldev, qid);
+}
+
static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
struct vdpa_vq_state *state)
{
@@ -416,6 +423,7 @@ static const struct vdpa_config_ops eni_vdpa_ops = {
.reset = eni_vdpa_reset,
.get_vq_num_max = eni_vdpa_get_vq_num_max,
.get_vq_num_min = eni_vdpa_get_vq_num_min,
+ .get_vq_size = eni_vdpa_get_vq_size,
.get_vq_state = eni_vdpa_get_vq_state,
.set_vq_state = eni_vdpa_set_vq_state,
.set_vq_cb = eni_vdpa_set_vq_cb,
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
index 060f837a4f9f..472daa588a9d 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.c
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -69,20 +69,19 @@ static int ifcvf_read_config_range(struct pci_dev *dev,
return 0;
}
-static u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid)
+u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid)
{
u16 queue_size;
+ if (qid >= hw->nr_vring)
+ return 0;
+
vp_iowrite16(qid, &hw->common_cfg->queue_select);
queue_size = vp_ioread16(&hw->common_cfg->queue_size);
return queue_size;
}
-/* This function returns the max allowed safe size for
- * all virtqueues. It is the minimal size that can be
- * suppprted by all virtqueues.
- */
u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw)
{
u16 queue_size, max_size, qid;
@@ -94,7 +93,7 @@ u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw)
if (!queue_size)
continue;
- max_size = min(queue_size, max_size);
+ max_size = max(queue_size, max_size);
}
return max_size;
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
index b57849c643f6..0f347717021a 100644
--- a/drivers/vdpa/ifcvf/ifcvf_base.h
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -28,6 +28,7 @@
#define IFCVF_PCI_MAX_RESOURCE 6
#define IFCVF_LM_BAR 4
+#define IFCVF_MIN_VQ_SIZE 64
#define IFCVF_ERR(pdev, fmt, ...) dev_err(&pdev->dev, fmt, ##__VA_ARGS__)
#define IFCVF_DBG(pdev, fmt, ...) dev_dbg(&pdev->dev, fmt, ##__VA_ARGS__)
@@ -131,4 +132,5 @@ void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready);
void ifcvf_set_driver_features(struct ifcvf_hw *hw, u64 features);
u64 ifcvf_get_driver_features(struct ifcvf_hw *hw);
u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw);
+u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid);
#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index e98fa8100f3c..80d0a0460885 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -456,6 +456,11 @@ static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
return ifcvf_get_max_vq_size(vf);
}
+static u16 ifcvf_vdpa_get_vq_num_min(struct vdpa_device *vdpa_dev)
+{
+ return IFCVF_MIN_VQ_SIZE;
+}
+
static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
struct vdpa_vq_state *state)
{
@@ -597,6 +602,14 @@ static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev,
return -EINVAL;
}
+static u16 ifcvf_vdpa_get_vq_size(struct vdpa_device *vdpa_dev,
+ u16 qid)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_get_vq_size(vf, qid);
+}
+
static struct vdpa_notification_area ifcvf_get_vq_notification(struct vdpa_device *vdpa_dev,
u16 idx)
{
@@ -624,6 +637,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.set_status = ifcvf_vdpa_set_status,
.reset = ifcvf_vdpa_reset,
.get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
+ .get_vq_num_min = ifcvf_vdpa_get_vq_num_min,
.get_vq_state = ifcvf_vdpa_get_vq_state,
.set_vq_state = ifcvf_vdpa_set_vq_state,
.set_vq_cb = ifcvf_vdpa_set_vq_cb,
@@ -632,6 +646,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = {
.set_vq_num = ifcvf_vdpa_set_vq_num,
.set_vq_address = ifcvf_vdpa_set_vq_address,
.get_vq_irq = ifcvf_vdpa_get_vq_irq,
+ .get_vq_size = ifcvf_vdpa_get_vq_size,
.kick_vq = ifcvf_vdpa_kick_vq,
.get_generation = ifcvf_vdpa_get_generation,
.get_device_id = ifcvf_vdpa_get_device_id,
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index 778821bab7d9..ecfc16151d61 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -151,8 +151,6 @@ static void teardown_driver(struct mlx5_vdpa_net *ndev);
static bool mlx5_vdpa_debug;
-#define MLX5_CVQ_MAX_ENT 16
-
#define MLX5_LOG_VIO_FLAG(_feature) \
do { \
if (features & BIT_ULL(_feature)) \
@@ -2276,9 +2274,16 @@ static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num)
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
- if (!is_index_valid(mvdev, idx) || is_ctrl_vq_idx(mvdev, idx))
+ if (!is_index_valid(mvdev, idx))
return;
+ if (is_ctrl_vq_idx(mvdev, idx)) {
+ struct mlx5_control_vq *cvq = &mvdev->cvq;
+
+ cvq->vring.vring.num = num;
+ return;
+ }
+
mvq = &ndev->vqs[idx];
mvq->num_ent = num;
}
@@ -2963,7 +2968,7 @@ static int setup_cvq_vring(struct mlx5_vdpa_dev *mvdev)
u16 idx = cvq->vring.last_avail_idx;
err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features,
- MLX5_CVQ_MAX_ENT, false,
+ cvq->vring.vring.num, false,
(struct vring_desc *)(uintptr_t)cvq->desc_addr,
(struct vring_avail *)(uintptr_t)cvq->driver_addr,
(struct vring_used *)(uintptr_t)cvq->device_addr);
diff --git a/drivers/vdpa/pds/aux_drv.c b/drivers/vdpa/pds/aux_drv.c
index 186e9ee22eb1..f57330cf9024 100644
--- a/drivers/vdpa/pds/aux_drv.c
+++ b/drivers/vdpa/pds/aux_drv.c
@@ -93,8 +93,8 @@ static void pds_vdpa_remove(struct auxiliary_device *aux_dev)
struct device *dev = &aux_dev->dev;
vdpa_mgmtdev_unregister(&vdpa_aux->vdpa_mdev);
+ pds_vdpa_release_irqs(vdpa_aux->pdsv);
vp_modern_remove(&vdpa_aux->vd_mdev);
- pci_free_irq_vectors(vdpa_aux->padev->vf_pdev);
pds_vdpa_debugfs_del_vdpadev(vdpa_aux);
kfree(vdpa_aux);
diff --git a/drivers/vdpa/pds/vdpa_dev.c b/drivers/vdpa/pds/vdpa_dev.c
index 25c0fe5ec3d5..301d95e08596 100644
--- a/drivers/vdpa/pds/vdpa_dev.c
+++ b/drivers/vdpa/pds/vdpa_dev.c
@@ -426,12 +426,18 @@ err_release:
return err;
}
-static void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv)
+void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv)
{
- struct pci_dev *pdev = pdsv->vdpa_aux->padev->vf_pdev;
- struct pds_vdpa_aux *vdpa_aux = pdsv->vdpa_aux;
+ struct pds_vdpa_aux *vdpa_aux;
+ struct pci_dev *pdev;
int qid;
+ if (!pdsv)
+ return;
+
+ pdev = pdsv->vdpa_aux->padev->vf_pdev;
+ vdpa_aux = pdsv->vdpa_aux;
+
if (!vdpa_aux->nintrs)
return;
@@ -612,6 +618,7 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
struct device *dma_dev;
struct pci_dev *pdev;
struct device *dev;
+ u8 status;
int err;
int i;
@@ -638,6 +645,13 @@ static int pds_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
dma_dev = &pdev->dev;
pdsv->vdpa_dev.dma_dev = dma_dev;
+ status = pds_vdpa_get_status(&pdsv->vdpa_dev);
+ if (status == 0xff) {
+ dev_err(dev, "Broken PCI - status %#x\n", status);
+ err = -ENXIO;
+ goto err_unmap;
+ }
+
pdsv->supported_features = mgmt->supported_features;
if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
diff --git a/drivers/vdpa/pds/vdpa_dev.h b/drivers/vdpa/pds/vdpa_dev.h
index d984ba24a7da..84bdb45871ff 100644
--- a/drivers/vdpa/pds/vdpa_dev.h
+++ b/drivers/vdpa/pds/vdpa_dev.h
@@ -46,5 +46,6 @@ struct pds_vdpa_device {
#define PDS_VDPA_PACKED_INVERT_IDX 0x8000
+void pds_vdpa_release_irqs(struct pds_vdpa_device *pdsv);
int pds_vdpa_get_mgmt_info(struct pds_vdpa_aux *vdpa_aux);
#endif /* _VDPA_DEV_H_ */
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index d0695680b282..b246067e074b 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -115,7 +115,7 @@ static const struct attribute_group vdpa_dev_group = {
};
__ATTRIBUTE_GROUPS(vdpa_dev);
-static struct bus_type vdpa_bus = {
+static const struct bus_type vdpa_bus = {
.name = "vdpa",
.dev_groups = vdpa_dev_groups,
.match = vdpa_dev_match,
@@ -945,6 +945,215 @@ static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *ms
}
static int
+vdpa_dev_blk_capacity_config_fill(struct sk_buff *msg,
+ const struct virtio_blk_config *config)
+{
+ u64 val_u64;
+
+ val_u64 = __virtio64_to_cpu(true, config->capacity);
+
+ return nla_put_u64_64bit(msg, VDPA_ATTR_DEV_BLK_CFG_CAPACITY,
+ val_u64, VDPA_ATTR_PAD);
+}
+
+static int
+vdpa_dev_blk_seg_size_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u32 val_u32;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_SIZE_MAX)) == 0)
+ return 0;
+
+ val_u32 = __virtio32_to_cpu(true, config->size_max);
+
+ return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SEG_SIZE, val_u32);
+}
+
+/* fill the block size*/
+static int
+vdpa_dev_blk_block_size_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u32 val_u32;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_BLK_SIZE)) == 0)
+ return 0;
+
+ val_u32 = __virtio32_to_cpu(true, config->blk_size);
+
+ return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_BLK_SIZE, val_u32);
+}
+
+static int
+vdpa_dev_blk_seg_max_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u32 val_u32;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_SEG_MAX)) == 0)
+ return 0;
+
+ val_u32 = __virtio32_to_cpu(true, config->seg_max);
+
+ return nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_SEG_MAX, val_u32);
+}
+
+static int vdpa_dev_blk_mq_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u16 val_u16;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_MQ)) == 0)
+ return 0;
+
+ val_u16 = __virtio16_to_cpu(true, config->num_queues);
+
+ return nla_put_u16(msg, VDPA_ATTR_DEV_BLK_CFG_NUM_QUEUES, val_u16);
+}
+
+static int vdpa_dev_blk_topology_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u16 min_io_size;
+ u32 opt_io_size;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_TOPOLOGY)) == 0)
+ return 0;
+
+ min_io_size = __virtio16_to_cpu(true, config->min_io_size);
+ opt_io_size = __virtio32_to_cpu(true, config->opt_io_size);
+
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_PHY_BLK_EXP,
+ config->physical_block_exp))
+ return -EMSGSIZE;
+
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_ALIGN_OFFSET,
+ config->alignment_offset))
+ return -EMSGSIZE;
+
+ if (nla_put_u16(msg, VDPA_ATTR_DEV_BLK_CFG_MIN_IO_SIZE, min_io_size))
+ return -EMSGSIZE;
+
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_OPT_IO_SIZE, opt_io_size))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int vdpa_dev_blk_discard_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u32 val_u32;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_DISCARD)) == 0)
+ return 0;
+
+ val_u32 = __virtio32_to_cpu(true, config->max_discard_sectors);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_DISCARD_SEC, val_u32))
+ return -EMSGSIZE;
+
+ val_u32 = __virtio32_to_cpu(true, config->max_discard_seg);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_DISCARD_SEG, val_u32))
+ return -EMSGSIZE;
+
+ val_u32 = __virtio32_to_cpu(true, config->discard_sector_alignment);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_DISCARD_SEC_ALIGN, val_u32))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
+vdpa_dev_blk_write_zeroes_config_fill(struct sk_buff *msg, u64 features,
+ const struct virtio_blk_config *config)
+{
+ u32 val_u32;
+
+ if ((features & BIT_ULL(VIRTIO_BLK_F_WRITE_ZEROES)) == 0)
+ return 0;
+
+ val_u32 = __virtio32_to_cpu(true, config->max_write_zeroes_sectors);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEC, val_u32))
+ return -EMSGSIZE;
+
+ val_u32 = __virtio32_to_cpu(true, config->max_write_zeroes_seg);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_BLK_CFG_MAX_WRITE_ZEROES_SEG, val_u32))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int vdpa_dev_blk_ro_config_fill(struct sk_buff *msg, u64 features)
+{
+ u8 ro;
+
+ ro = ((features & BIT_ULL(VIRTIO_BLK_F_RO)) == 0) ? 0 : 1;
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_READ_ONLY, ro))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int vdpa_dev_blk_flush_config_fill(struct sk_buff *msg, u64 features)
+{
+ u8 flush;
+
+ flush = ((features & BIT_ULL(VIRTIO_BLK_F_FLUSH)) == 0) ? 0 : 1;
+ if (nla_put_u8(msg, VDPA_ATTR_DEV_BLK_CFG_FLUSH, flush))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int vdpa_dev_blk_config_fill(struct vdpa_device *vdev,
+ struct sk_buff *msg)
+{
+ struct virtio_blk_config config = {};
+ u64 features_device;
+
+ vdev->config->get_config(vdev, 0, &config, sizeof(config));
+
+ features_device = vdev->config->get_device_features(vdev);
+
+ if (nla_put_u64_64bit(msg, VDPA_ATTR_DEV_FEATURES, features_device,
+ VDPA_ATTR_PAD))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_capacity_config_fill(msg, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_seg_size_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_block_size_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_seg_max_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_mq_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_topology_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_discard_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_write_zeroes_config_fill(msg, features_device, &config))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_ro_config_fill(msg, features_device))
+ return -EMSGSIZE;
+
+ if (vdpa_dev_blk_flush_config_fill(msg, features_device))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
+static int
vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
int flags, struct netlink_ext_ack *extack)
{
@@ -988,6 +1197,9 @@ vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid,
case VIRTIO_ID_NET:
err = vdpa_dev_net_config_fill(vdev, msg);
break;
+ case VIRTIO_ID_BLOCK:
+ err = vdpa_dev_blk_config_fill(vdev, msg);
+ break;
default:
err = -EOPNOTSUPP;
break;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index be2925d0d283..8ffea8430f95 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -160,7 +160,7 @@ static void vdpasim_do_reset(struct vdpasim *vdpasim, u32 flags)
}
}
- vdpasim->running = true;
+ vdpasim->running = false;
spin_unlock(&vdpasim->iommu_lock);
vdpasim->features = 0;
@@ -311,6 +311,17 @@ static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
vq->num = num;
}
+static u16 vdpasim_get_vq_size(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ if (vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)
+ return vq->num;
+ else
+ return VDPASIM_QUEUE_MAX;
+}
+
static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
@@ -483,6 +494,7 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
mutex_lock(&vdpasim->mutex);
vdpasim->status = status;
+ vdpasim->running = (status & VIRTIO_CONFIG_S_DRIVER_OK) != 0;
mutex_unlock(&vdpasim->mutex);
}
@@ -774,6 +786,7 @@ static const struct vdpa_config_ops vdpasim_config_ops = {
.get_driver_features = vdpasim_get_driver_features,
.set_config_cb = vdpasim_set_config_cb,
.get_vq_num_max = vdpasim_get_vq_num_max,
+ .get_vq_size = vdpasim_get_vq_size,
.get_device_id = vdpasim_get_device_id,
.get_vendor_id = vdpasim_get_vendor_id,
.get_status = vdpasim_get_status,
diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 5e4a77b9bae6..791d38d6284c 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -373,6 +373,26 @@ static void vduse_domain_free_iova(struct iova_domain *iovad,
free_iova_fast(iovad, iova >> shift, iova_len);
}
+void vduse_domain_sync_single_for_device(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ read_lock(&domain->bounce_lock);
+ if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+ vduse_domain_bounce(domain, dma_addr, size, DMA_TO_DEVICE);
+ read_unlock(&domain->bounce_lock);
+}
+
+void vduse_domain_sync_single_for_cpu(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ read_lock(&domain->bounce_lock);
+ if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+ vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
+ read_unlock(&domain->bounce_lock);
+}
+
dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
@@ -393,7 +413,8 @@ dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
goto err_unlock;
- if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
read_unlock(&domain->bounce_lock);
@@ -411,9 +432,9 @@ void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
enum dma_data_direction dir, unsigned long attrs)
{
struct iova_domain *iovad = &domain->stream_iovad;
-
read_lock(&domain->bounce_lock);
- if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
+ if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
+ (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index 173e979b84a9..f92f22a7267d 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -44,6 +44,14 @@ int vduse_domain_set_map(struct vduse_iova_domain *domain,
void vduse_domain_clear_map(struct vduse_iova_domain *domain,
struct vhost_iotlb *iotlb);
+void vduse_domain_sync_single_for_device(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir);
+
+void vduse_domain_sync_single_for_cpu(struct vduse_iova_domain *domain,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir);
+
dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
struct page *page, unsigned long offset,
size_t size, enum dma_data_direction dir,
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 1d24da79c399..73c89701fc9d 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -541,6 +541,17 @@ static void vduse_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
vq->num = num;
}
+static u16 vduse_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vduse_dev *dev = vdpa_to_vduse(vdpa);
+ struct vduse_virtqueue *vq = dev->vqs[idx];
+
+ if (vq->num)
+ return vq->num;
+ else
+ return vq->num_max;
+}
+
static void vduse_vdpa_set_vq_ready(struct vdpa_device *vdpa,
u16 idx, bool ready)
{
@@ -773,6 +784,7 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
.kick_vq = vduse_vdpa_kick_vq,
.set_vq_cb = vduse_vdpa_set_vq_cb,
.set_vq_num = vduse_vdpa_set_vq_num,
+ .get_vq_size = vduse_vdpa_get_vq_size,
.set_vq_ready = vduse_vdpa_set_vq_ready,
.get_vq_ready = vduse_vdpa_get_vq_ready,
.set_vq_state = vduse_vdpa_set_vq_state,
@@ -798,6 +810,26 @@ static const struct vdpa_config_ops vduse_vdpa_config_ops = {
.free = vduse_vdpa_free,
};
+static void vduse_dev_sync_single_for_device(struct device *dev,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ vduse_domain_sync_single_for_device(domain, dma_addr, size, dir);
+}
+
+static void vduse_dev_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct vduse_dev *vdev = dev_to_vduse(dev);
+ struct vduse_iova_domain *domain = vdev->domain;
+
+ vduse_domain_sync_single_for_cpu(domain, dma_addr, size, dir);
+}
+
static dma_addr_t vduse_dev_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
@@ -858,6 +890,8 @@ static size_t vduse_dev_max_mapping_size(struct device *dev)
}
static const struct dma_map_ops vduse_dev_dma_ops = {
+ .sync_single_for_device = vduse_dev_sync_single_for_device,
+ .sync_single_for_cpu = vduse_dev_sync_single_for_cpu,
.map_page = vduse_dev_map_page,
.unmap_page = vduse_dev_unmap_page,
.alloc = vduse_dev_alloc_coherent,
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index 281287fae89f..df5f4a3bccb5 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -328,6 +328,13 @@ static void vp_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
vp_modern_set_queue_size(mdev, qid, num);
}
+static u16 vp_vdpa_get_vq_size(struct vdpa_device *vdpa, u16 qid)
+{
+ struct virtio_pci_modern_device *mdev = vdpa_to_mdev(vdpa);
+
+ return vp_modern_get_queue_size(mdev, qid);
+}
+
static int vp_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
u64 desc_area, u64 driver_area,
u64 device_area)
@@ -449,6 +456,7 @@ static const struct vdpa_config_ops vp_vdpa_ops = {
.set_vq_ready = vp_vdpa_set_vq_ready,
.get_vq_ready = vp_vdpa_get_vq_ready,
.set_vq_num = vp_vdpa_set_vq_num,
+ .get_vq_size = vp_vdpa_get_vq_size,
.set_vq_address = vp_vdpa_set_vq_address,
.kick_vq = vp_vdpa_kick_vq,
.get_generation = vp_vdpa_get_generation,
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 4b2fcb228a0a..c64ded183f8d 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -697,6 +697,9 @@ static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
hdr = buf;
gso = &hdr->gso;
+ if (!sock_hlen)
+ memset(buf, 0, pad);
+
if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
vhost16_to_cpu(vq, gso->csum_start) +
vhost16_to_cpu(vq, gso->csum_offset) + 2 >
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index bc4a51e4638b..ba52d128aeb7 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -595,6 +595,9 @@ static long vhost_vdpa_suspend(struct vhost_vdpa *v)
const struct vdpa_config_ops *ops = vdpa->config;
int ret;
+ if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
+ return 0;
+
if (!ops->suspend)
return -EOPNOTSUPP;
@@ -615,6 +618,9 @@ static long vhost_vdpa_resume(struct vhost_vdpa *v)
const struct vdpa_config_ops *ops = vdpa->config;
int ret;
+ if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
+ return 0;
+
if (!ops->resume)
return -EOPNOTSUPP;
@@ -681,6 +687,14 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
if (!ops->set_group_asid)
return -EOPNOTSUPP;
return ops->set_group_asid(vdpa, idx, s.num);
+ case VHOST_VDPA_GET_VRING_SIZE:
+ if (!ops->get_vq_size)
+ return -EOPNOTSUPP;
+ s.index = idx;
+ s.num = ops->get_vq_size(vdpa, idx);
+ if (copy_to_user(argp, &s, sizeof(s)))
+ return -EFAULT;
+ return 0;
case VHOST_GET_VRING_BASE:
r = ops->get_vq_state(v->vdpa, idx, &vq_state);
if (r)
diff --git a/drivers/video/console/dummycon.c b/drivers/video/console/dummycon.c
index 14af5d9e13b0..139049368fdc 100644
--- a/drivers/video/console/dummycon.c
+++ b/drivers/video/console/dummycon.c
@@ -50,7 +50,8 @@ void dummycon_unregister_output_notifier(struct notifier_block *nb)
raw_notifier_chain_unregister(&dummycon_output_nh, nb);
}
-static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos)
+static void dummycon_putc(struct vc_data *vc, u16 c, unsigned int y,
+ unsigned int x)
{
WARN_CONSOLE_UNLOCKED();
@@ -58,10 +59,10 @@ static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos)
raw_notifier_call_chain(&dummycon_output_nh, 0, NULL);
}
-static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos)
+static void dummycon_putcs(struct vc_data *vc, const u16 *s, unsigned int count,
+ unsigned int ypos, unsigned int xpos)
{
- int i;
+ unsigned int i;
if (!dummycon_putc_called) {
/* Ignore erases */
@@ -78,18 +79,21 @@ static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
raw_notifier_call_chain(&dummycon_output_nh, 0, NULL);
}
-static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch)
+static bool dummycon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
+ bool mode_switch)
{
/* Redraw, so that we get putc(s) for output done while blanked */
- return 1;
+ return true;
}
#else
-static void dummycon_putc(struct vc_data *vc, int c, int ypos, int xpos) { }
-static void dummycon_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos) { }
-static int dummycon_blank(struct vc_data *vc, int blank, int mode_switch)
+static void dummycon_putc(struct vc_data *vc, u16 c, unsigned int y,
+ unsigned int x) { }
+static void dummycon_putcs(struct vc_data *vc, const u16 *s, unsigned int count,
+ unsigned int ypos, unsigned int xpos) { }
+static bool dummycon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
+ bool mode_switch)
{
- return 0;
+ return false;
}
#endif
@@ -98,7 +102,7 @@ static const char *dummycon_startup(void)
return "dummy device";
}
-static void dummycon_init(struct vc_data *vc, int init)
+static void dummycon_init(struct vc_data *vc, bool init)
{
vc->vc_can_do_color = 1;
if (init) {
@@ -109,9 +113,9 @@ static void dummycon_init(struct vc_data *vc, int init)
}
static void dummycon_deinit(struct vc_data *vc) { }
-static void dummycon_clear(struct vc_data *vc, int sy, int sx, int height,
- int width) { }
-static void dummycon_cursor(struct vc_data *vc, int mode) { }
+static void dummycon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
+ unsigned int width) { }
+static void dummycon_cursor(struct vc_data *vc, bool enable) { }
static bool dummycon_scroll(struct vc_data *vc, unsigned int top,
unsigned int bottom, enum con_scroll dir,
@@ -120,9 +124,9 @@ static bool dummycon_scroll(struct vc_data *vc, unsigned int top,
return false;
}
-static int dummycon_switch(struct vc_data *vc)
+static bool dummycon_switch(struct vc_data *vc)
{
- return 0;
+ return false;
}
/*
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c
index ef29b321967f..c0e1f4554a44 100644
--- a/drivers/video/console/mdacon.c
+++ b/drivers/video/console/mdacon.c
@@ -352,7 +352,7 @@ static const char *mdacon_startup(void)
return "MDA-2";
}
-static void mdacon_init(struct vc_data *c, int init)
+static void mdacon_init(struct vc_data *c, bool init)
{
c->vc_complement_mask = 0x0800; /* reverse video */
c->vc_display_fg = &mda_display_fg;
@@ -427,13 +427,8 @@ static inline u16 *mda_addr(unsigned int x, unsigned int y)
return mda_vram_base + y * mda_num_columns + x;
}
-static void mdacon_putc(struct vc_data *c, int ch, int y, int x)
-{
- scr_writew(mda_convert_attr(ch), mda_addr(x, y));
-}
-
-static void mdacon_putcs(struct vc_data *c, const unsigned short *s,
- int count, int y, int x)
+static void mdacon_putcs(struct vc_data *c, const u16 *s, unsigned int count,
+ unsigned int y, unsigned int x)
{
u16 *dest = mda_addr(x, y);
@@ -442,29 +437,22 @@ static void mdacon_putcs(struct vc_data *c, const unsigned short *s,
}
}
-static void mdacon_clear(struct vc_data *c, int y, int x,
- int height, int width)
+static void mdacon_clear(struct vc_data *c, unsigned int y, unsigned int x,
+ unsigned int width)
{
u16 *dest = mda_addr(x, y);
u16 eattr = mda_convert_attr(c->vc_video_erase_char);
- if (width <= 0 || height <= 0)
- return;
-
- if (x==0 && width==mda_num_columns) {
- scr_memsetw(dest, eattr, height*width*2);
- } else {
- for (; height > 0; height--, dest+=mda_num_columns)
- scr_memsetw(dest, eattr, width*2);
- }
+ scr_memsetw(dest, eattr, width * 2);
}
-
-static int mdacon_switch(struct vc_data *c)
+
+static bool mdacon_switch(struct vc_data *c)
{
- return 1; /* redrawing needed */
+ return true; /* redrawing needed */
}
-static int mdacon_blank(struct vc_data *c, int blank, int mode_switch)
+static bool mdacon_blank(struct vc_data *c, enum vesa_blank_mode blank,
+ bool mode_switch)
{
if (mda_type == TYPE_MDA) {
if (blank)
@@ -472,20 +460,20 @@ static int mdacon_blank(struct vc_data *c, int blank, int mode_switch)
mda_convert_attr(c->vc_video_erase_char),
c->vc_screenbuf_size);
/* Tell console.c that it has to restore the screen itself */
- return 1;
+ return true;
} else {
if (blank)
outb_p(0x00, mda_mode_port); /* disable video */
else
outb_p(MDA_MODE_VIDEO_EN | MDA_MODE_BLINK_EN,
mda_mode_port);
- return 0;
+ return false;
}
}
-static void mdacon_cursor(struct vc_data *c, int mode)
+static void mdacon_cursor(struct vc_data *c, bool enable)
{
- if (mode == CM_ERASE) {
+ if (!enable) {
mda_set_cursor(mda_vram_len - 1);
return;
}
@@ -544,7 +532,6 @@ static const struct consw mda_con = {
.con_init = mdacon_init,
.con_deinit = mdacon_deinit,
.con_clear = mdacon_clear,
- .con_putc = mdacon_putc,
.con_putcs = mdacon_putcs,
.con_cursor = mdacon_cursor,
.con_scroll = mdacon_scroll,
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index e8e4f82cd4a1..a51cfc1d560e 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -324,7 +324,7 @@ out_unmap:
return NULL;
}
-static void newport_init(struct vc_data *vc, int init)
+static void newport_init(struct vc_data *vc, bool init)
{
int cols, rows;
@@ -346,12 +346,12 @@ static void newport_deinit(struct vc_data *c)
}
}
-static void newport_clear(struct vc_data *vc, int sy, int sx, int height,
- int width)
+static void newport_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
+ unsigned int width)
{
int xend = ((sx + width) << 3) - 1;
int ystart = ((sy << 4) + topscan) & 0x3ff;
- int yend = (((sy + height) << 4) + topscan - 1) & 0x3ff;
+ int yend = (((sy + 1) << 4) + topscan - 1) & 0x3ff;
if (logo_active)
return;
@@ -367,8 +367,8 @@ static void newport_clear(struct vc_data *vc, int sy, int sx, int height,
}
}
-static void newport_putc(struct vc_data *vc, int charattr, int ypos,
- int xpos)
+static void newport_putc(struct vc_data *vc, u16 charattr, unsigned int ypos,
+ unsigned int xpos)
{
unsigned char *p;
@@ -396,12 +396,13 @@ static void newport_putc(struct vc_data *vc, int charattr, int ypos,
RENDER(npregs, p);
}
-static void newport_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos)
+static void newport_putcs(struct vc_data *vc, const u16 *s,
+ unsigned int count, unsigned int ypos,
+ unsigned int xpos)
{
- int i;
- int charattr;
unsigned char *p;
+ unsigned int i;
+ u16 charattr;
charattr = (scr_readw(s) >> 8) & 0xff;
@@ -437,32 +438,28 @@ static void newport_putcs(struct vc_data *vc, const unsigned short *s,
}
}
-static void newport_cursor(struct vc_data *vc, int mode)
+static void newport_cursor(struct vc_data *vc, bool enable)
{
unsigned short treg;
int xcurs, ycurs;
- switch (mode) {
- case CM_ERASE:
- treg = newport_vc2_get(npregs, VC2_IREG_CONTROL);
- newport_vc2_set(npregs, VC2_IREG_CONTROL,
- (treg & ~(VC2_CTRL_ECDISP)));
- break;
+ treg = newport_vc2_get(npregs, VC2_IREG_CONTROL);
- case CM_MOVE:
- case CM_DRAW:
- treg = newport_vc2_get(npregs, VC2_IREG_CONTROL);
+ if (!enable) {
newport_vc2_set(npregs, VC2_IREG_CONTROL,
- (treg | VC2_CTRL_ECDISP));
- xcurs = (vc->vc_pos - vc->vc_visible_origin) / 2;
- ycurs = ((xcurs / vc->vc_cols) << 4) + 31;
- xcurs = ((xcurs % vc->vc_cols) << 3) + xcurs_correction;
- newport_vc2_set(npregs, VC2_IREG_CURSX, xcurs);
- newport_vc2_set(npregs, VC2_IREG_CURSY, ycurs);
+ (treg & ~(VC2_CTRL_ECDISP)));
+ return;
}
+
+ newport_vc2_set(npregs, VC2_IREG_CONTROL, (treg | VC2_CTRL_ECDISP));
+ xcurs = (vc->vc_pos - vc->vc_visible_origin) / 2;
+ ycurs = ((xcurs / vc->vc_cols) << 4) + 31;
+ xcurs = ((xcurs % vc->vc_cols) << 3) + xcurs_correction;
+ newport_vc2_set(npregs, VC2_IREG_CURSX, xcurs);
+ newport_vc2_set(npregs, VC2_IREG_CURSY, ycurs);
}
-static int newport_switch(struct vc_data *vc)
+static bool newport_switch(struct vc_data *vc)
{
static int logo_drawn = 0;
@@ -476,14 +473,15 @@ static int newport_switch(struct vc_data *vc)
}
}
- return 1;
+ return true;
}
-static int newport_blank(struct vc_data *c, int blank, int mode_switch)
+static bool newport_blank(struct vc_data *c, enum vesa_blank_mode blank,
+ bool mode_switch)
{
unsigned short treg;
- if (blank == 0) {
+ if (blank == VESA_NO_BLANKING) {
/* unblank console */
treg = newport_vc2_get(npregs, VC2_IREG_CONTROL);
newport_vc2_set(npregs, VC2_IREG_CONTROL,
@@ -494,10 +492,12 @@ static int newport_blank(struct vc_data *c, int blank, int mode_switch)
newport_vc2_set(npregs, VC2_IREG_CONTROL,
(treg & ~(VC2_CTRL_EDISP)));
}
- return 1;
+
+ return true;
}
-static int newport_set_font(int unit, struct console_font *op, unsigned int vpitch)
+static int newport_set_font(int unit, const struct console_font *op,
+ unsigned int vpitch)
{
int w = op->width;
int h = op->height;
@@ -564,12 +564,13 @@ static int newport_set_def_font(int unit, struct console_font *op)
return 0;
}
-static int newport_font_default(struct vc_data *vc, struct console_font *op, char *name)
+static int newport_font_default(struct vc_data *vc, struct console_font *op,
+ const char *name)
{
return newport_set_def_font(vc->vc_num, op);
}
-static int newport_font_set(struct vc_data *vc, struct console_font *font,
+static int newport_font_set(struct vc_data *vc, const struct console_font *font,
unsigned int vpitch, unsigned int flags)
{
return newport_set_font(vc->vc_num, font, vpitch);
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c
index 992a4fa431aa..4c7b4959a1aa 100644
--- a/drivers/video/console/sticon.c
+++ b/drivers/video/console/sticon.c
@@ -71,19 +71,8 @@ static const char *sticon_startup(void)
return "STI console";
}
-static void sticon_putc(struct vc_data *conp, int c, int ypos, int xpos)
-{
- if (vga_is_gfx || console_blanked)
- return;
-
- if (conp->vc_mode != KD_TEXT)
- return;
-
- sti_putc(sticon_sti, c, ypos, xpos, font_data[conp->vc_num]);
-}
-
-static void sticon_putcs(struct vc_data *conp, const unsigned short *s,
- int count, int ypos, int xpos)
+static void sticon_putcs(struct vc_data *conp, const u16 *s, unsigned int count,
+ unsigned int ypos, unsigned int xpos)
{
if (vga_is_gfx || console_blanked)
return;
@@ -97,7 +86,7 @@ static void sticon_putcs(struct vc_data *conp, const unsigned short *s,
}
}
-static void sticon_cursor(struct vc_data *conp, int mode)
+static void sticon_cursor(struct vc_data *conp, bool enable)
{
unsigned short car1;
@@ -106,23 +95,20 @@ static void sticon_cursor(struct vc_data *conp, int mode)
return;
car1 = conp->vc_screenbuf[conp->state.x + conp->state.y * conp->vc_cols];
- switch (mode) {
- case CM_ERASE:
+ if (!enable) {
sti_putc(sticon_sti, car1, conp->state.y, conp->state.x,
font_data[conp->vc_num]);
- break;
- case CM_MOVE:
- case CM_DRAW:
- switch (CUR_SIZE(conp->vc_cursor_type)) {
- case CUR_UNDERLINE:
- case CUR_LOWER_THIRD:
- case CUR_LOWER_HALF:
- case CUR_TWO_THIRDS:
- case CUR_BLOCK:
- sti_putc(sticon_sti, (car1 & 255) + (0 << 8) + (7 << 11),
- conp->state.y, conp->state.x, font_data[conp->vc_num]);
- break;
- }
+ return;
+ }
+
+ switch (CUR_SIZE(conp->vc_cursor_type)) {
+ case CUR_UNDERLINE:
+ case CUR_LOWER_THIRD:
+ case CUR_LOWER_HALF:
+ case CUR_TWO_THIRDS:
+ case CUR_BLOCK:
+ sti_putc(sticon_sti, (car1 & 255) + (0 << 8) + (7 << 11),
+ conp->state.y, conp->state.x, font_data[conp->vc_num]);
break;
}
}
@@ -135,7 +121,7 @@ static bool sticon_scroll(struct vc_data *conp, unsigned int t,
if (vga_is_gfx)
return false;
- sticon_cursor(conp, CM_ERASE);
+ sticon_cursor(conp, false);
switch (dir) {
case SM_UP:
@@ -167,7 +153,7 @@ static void sticon_set_def_font(int unit)
}
}
-static int sticon_set_font(struct vc_data *vc, struct console_font *op,
+static int sticon_set_font(struct vc_data *vc, const struct console_font *op,
unsigned int vpitch)
{
struct sti_struct *sti = sticon_sti;
@@ -260,20 +246,21 @@ static int sticon_set_font(struct vc_data *vc, struct console_font *op,
return 0;
}
-static int sticon_font_default(struct vc_data *vc, struct console_font *op, char *name)
+static int sticon_font_default(struct vc_data *vc, struct console_font *op,
+ const char *name)
{
sticon_set_def_font(vc->vc_num);
return 0;
}
-static int sticon_font_set(struct vc_data *vc, struct console_font *font,
+static int sticon_font_set(struct vc_data *vc, const struct console_font *font,
unsigned int vpitch, unsigned int flags)
{
return sticon_set_font(vc, font, vpitch);
}
-static void sticon_init(struct vc_data *c, int init)
+static void sticon_init(struct vc_data *c, bool init)
{
struct sti_struct *sti = sticon_sti;
int vc_cols, vc_rows;
@@ -300,33 +287,32 @@ static void sticon_deinit(struct vc_data *c)
sticon_set_def_font(i);
}
-static void sticon_clear(struct vc_data *conp, int sy, int sx, int height,
- int width)
+static void sticon_clear(struct vc_data *conp, unsigned int sy, unsigned int sx,
+ unsigned int width)
{
- if (!height || !width)
- return;
-
- sti_clear(sticon_sti, sy, sx, height, width,
+ sti_clear(sticon_sti, sy, sx, 1, width,
conp->vc_video_erase_char, font_data[conp->vc_num]);
}
-static int sticon_switch(struct vc_data *conp)
+static bool sticon_switch(struct vc_data *conp)
{
- return 1; /* needs refreshing */
+ return true; /* needs refreshing */
}
-static int sticon_blank(struct vc_data *c, int blank, int mode_switch)
+static bool sticon_blank(struct vc_data *c, enum vesa_blank_mode blank,
+ bool mode_switch)
{
- if (blank == 0) {
+ if (blank == VESA_NO_BLANKING) {
if (mode_switch)
vga_is_gfx = 0;
- return 1;
+ return true;
}
sti_clear(sticon_sti, 0, 0, c->vc_rows, c->vc_cols, BLANK,
font_data[c->vc_num]);
if (mode_switch)
vga_is_gfx = 1;
- return 1;
+
+ return true;
}
static u8 sticon_build_attr(struct vc_data *conp, u8 color,
@@ -365,7 +351,6 @@ static const struct consw sti_con = {
.con_init = sticon_init,
.con_deinit = sticon_deinit,
.con_clear = sticon_clear,
- .con_putc = sticon_putc,
.con_putcs = sticon_putcs,
.con_cursor = sticon_cursor,
.con_scroll = sticon_scroll,
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 8ef1579fa57f..7597f04b0dc7 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -65,7 +65,7 @@ static struct vgastate vgastate;
* Interface used by the world
*/
-static int vgacon_set_origin(struct vc_data *c);
+static bool vgacon_set_origin(struct vc_data *c);
static struct uni_pagedict *vgacon_uni_pagedir;
static int vgacon_refcount;
@@ -81,7 +81,7 @@ static unsigned int vga_video_num_lines; /* Number of text lines */
static bool vga_can_do_color; /* Do we support colors? */
static unsigned int vga_default_font_height __read_mostly; /* Height of default screen font */
static unsigned char vga_video_type __read_mostly; /* Card type */
-static int vga_vesa_blanked;
+static enum vesa_blank_mode vga_vesa_blanked;
static bool vga_palette_blanked;
static bool vga_is_gfx;
static bool vga_512_chars;
@@ -138,8 +138,40 @@ static inline void vga_set_mem_top(struct vc_data *c)
static void vgacon_scrolldelta(struct vc_data *c, int lines)
{
- vc_scrolldelta_helper(c, lines, vga_rolled_over, (void *)vga_vram_base,
- vga_vram_size);
+ unsigned long scr_end = c->vc_scr_end - vga_vram_base;
+ unsigned long vorigin = c->vc_visible_origin - vga_vram_base;
+ unsigned long origin = c->vc_origin - vga_vram_base;
+ int margin = c->vc_size_row * 4;
+ int from, wrap, from_off, avail;
+
+ /* Turn scrollback off */
+ if (!lines) {
+ c->vc_visible_origin = c->vc_origin;
+ return;
+ }
+
+ /* Do we have already enough to allow jumping from 0 to the end? */
+ if (vga_rolled_over > scr_end + margin) {
+ from = scr_end;
+ wrap = vga_rolled_over + c->vc_size_row;
+ } else {
+ from = 0;
+ wrap = vga_vram_size;
+ }
+
+ from_off = (vorigin - from + wrap) % wrap + lines * c->vc_size_row;
+ avail = (origin - from + wrap) % wrap;
+
+ /* Only a little piece would be left? Show all incl. the piece! */
+ if (avail < 2 * margin)
+ margin = 0;
+ if (from_off < margin)
+ from_off = 0;
+ if (from_off > avail - margin)
+ from_off = avail;
+
+ c->vc_visible_origin = vga_vram_base + (from + from_off) % wrap;
+
vga_set_mem_top(c);
}
@@ -335,7 +367,7 @@ static const char *vgacon_startup(void)
return display_desc;
}
-static void vgacon_init(struct vc_data *c, int init)
+static void vgacon_init(struct vc_data *c, bool init)
{
struct uni_pagedict *p;
@@ -352,7 +384,7 @@ static void vgacon_init(struct vc_data *c, int init)
c->vc_scan_lines = vga_scan_lines;
c->vc_font.height = c->vc_cell_height = vga_video_font_height;
- /* set dimensions manually if init != 0 since vc_resize() will fail */
+ /* set dimensions manually if init is true since vc_resize() will fail */
if (init) {
c->vc_cols = vga_video_num_columns;
c->vc_rows = vga_video_num_lines;
@@ -471,7 +503,7 @@ static void vgacon_set_cursor_size(int from, int to)
raw_spin_unlock_irqrestore(&vga_lock, flags);
}
-static void vgacon_cursor(struct vc_data *c, int mode)
+static void vgacon_cursor(struct vc_data *c, bool enable)
{
unsigned int c_height;
@@ -482,47 +514,41 @@ static void vgacon_cursor(struct vc_data *c, int mode)
c_height = c->vc_cell_height;
- switch (mode) {
- case CM_ERASE:
- write_vga(14, (c->vc_pos - vga_vram_base) / 2);
+ write_vga(14, (c->vc_pos - vga_vram_base) / 2);
+
+ if (!enable) {
if (vga_video_type >= VIDEO_TYPE_VGAC)
vgacon_set_cursor_size(31, 30);
else
vgacon_set_cursor_size(31, 31);
- break;
+ return;
+ }
- case CM_MOVE:
- case CM_DRAW:
- write_vga(14, (c->vc_pos - vga_vram_base) / 2);
- switch (CUR_SIZE(c->vc_cursor_type)) {
- case CUR_UNDERLINE:
- vgacon_set_cursor_size(c_height -
- (c_height < 10 ? 2 : 3),
- c_height -
- (c_height < 10 ? 1 : 2));
- break;
- case CUR_TWO_THIRDS:
- vgacon_set_cursor_size(c_height / 3, c_height -
- (c_height < 10 ? 1 : 2));
- break;
- case CUR_LOWER_THIRD:
- vgacon_set_cursor_size(c_height * 2 / 3, c_height -
- (c_height < 10 ? 1 : 2));
- break;
- case CUR_LOWER_HALF:
- vgacon_set_cursor_size(c_height / 2, c_height -
- (c_height < 10 ? 1 : 2));
- break;
- case CUR_NONE:
- if (vga_video_type >= VIDEO_TYPE_VGAC)
- vgacon_set_cursor_size(31, 30);
- else
- vgacon_set_cursor_size(31, 31);
- break;
- default:
- vgacon_set_cursor_size(1, c_height);
- break;
- }
+ switch (CUR_SIZE(c->vc_cursor_type)) {
+ case CUR_UNDERLINE:
+ vgacon_set_cursor_size(c_height - (c_height < 10 ? 2 : 3),
+ c_height - (c_height < 10 ? 1 : 2));
+ break;
+ case CUR_TWO_THIRDS:
+ vgacon_set_cursor_size(c_height / 3,
+ c_height - (c_height < 10 ? 1 : 2));
+ break;
+ case CUR_LOWER_THIRD:
+ vgacon_set_cursor_size(c_height * 2 / 3,
+ c_height - (c_height < 10 ? 1 : 2));
+ break;
+ case CUR_LOWER_HALF:
+ vgacon_set_cursor_size(c_height / 2,
+ c_height - (c_height < 10 ? 1 : 2));
+ break;
+ case CUR_NONE:
+ if (vga_video_type >= VIDEO_TYPE_VGAC)
+ vgacon_set_cursor_size(31, 30);
+ else
+ vgacon_set_cursor_size(31, 31);
+ break;
+ default:
+ vgacon_set_cursor_size(1, c_height);
break;
}
}
@@ -588,7 +614,7 @@ static void vgacon_doresize(struct vc_data *c,
raw_spin_unlock_irqrestore(&vga_lock, flags);
}
-static int vgacon_switch(struct vc_data *c)
+static bool vgacon_switch(struct vc_data *c)
{
int x = c->vc_cols * VGA_FONTWIDTH;
int y = c->vc_rows * c->vc_cell_height;
@@ -617,7 +643,7 @@ static int vgacon_switch(struct vc_data *c)
vgacon_doresize(c, c->vc_cols, c->vc_rows);
}
- return 0; /* Redrawing not needed */
+ return false; /* Redrawing not needed */
}
static void vga_set_palette(struct vc_data *vc, const unsigned char *table)
@@ -657,7 +683,7 @@ static struct {
unsigned char ClockingMode; /* Seq-Controller:01h */
} vga_state;
-static void vga_vesa_blank(struct vgastate *state, int mode)
+static void vga_vesa_blank(struct vgastate *state, enum vesa_blank_mode mode)
{
/* save original values of VGA controller registers */
if (!vga_vesa_blanked) {
@@ -771,13 +797,14 @@ static void vga_pal_blank(struct vgastate *state)
}
}
-static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
+static bool vgacon_blank(struct vc_data *c, enum vesa_blank_mode blank,
+ bool mode_switch)
{
switch (blank) {
- case 0: /* Unblank */
+ case VESA_NO_BLANKING: /* Unblank */
if (vga_vesa_blanked) {
vga_vesa_unblank(&vgastate);
- vga_vesa_blanked = 0;
+ vga_vesa_blanked = VESA_NO_BLANKING;
}
if (vga_palette_blanked) {
vga_set_palette(c, color_table);
@@ -787,8 +814,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
vga_is_gfx = false;
/* Tell console.c that it has to restore the screen itself */
return 1;
- case 1: /* Normal blanking */
- case -1: /* Obsolete */
+ case VESA_VSYNC_SUSPEND: /* Normal blanking */
if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) {
vga_pal_blank(&vgastate);
vga_palette_blanked = true;
@@ -1004,7 +1030,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
/* void size to cause regs to be rewritten */
cursor_size_lastfrom = 0;
cursor_size_lastto = 0;
- c->vc_sw->con_cursor(c, CM_DRAW);
+ c->vc_sw->con_cursor(c, true);
}
c->vc_font.height = c->vc_cell_height = fontheight;
vc_resize(c, 0, rows); /* Adjust console size */
@@ -1013,7 +1039,7 @@ static int vgacon_adjust_height(struct vc_data *vc, unsigned fontheight)
return 0;
}
-static int vgacon_font_set(struct vc_data *c, struct console_font *font,
+static int vgacon_font_set(struct vc_data *c, const struct console_font *font,
unsigned int vpitch, unsigned int flags)
{
unsigned charcount = font->charcount;
@@ -1049,12 +1075,12 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font, unsigne
}
static int vgacon_resize(struct vc_data *c, unsigned int width,
- unsigned int height, unsigned int user)
+ unsigned int height, bool from_user)
{
if ((width << 1) * height > vga_vram_size)
return -EINVAL;
- if (user) {
+ if (from_user) {
/*
* Ho ho! Someone (svgatextmode, eh?) may have reprogrammed
* the video mode! Set the new defaults then and go away.
@@ -1074,15 +1100,15 @@ static int vgacon_resize(struct vc_data *c, unsigned int width,
return 0;
}
-static int vgacon_set_origin(struct vc_data *c)
+static bool vgacon_set_origin(struct vc_data *c)
{
if (vga_is_gfx || /* We don't play origin tricks in graphic modes */
(console_blanked && !vga_palette_blanked)) /* Nor we write to blanked screens */
- return 0;
+ return false;
c->vc_origin = c->vc_visible_origin = vga_vram_base;
vga_set_mem_top(c);
vga_rolled_over = 0;
- return 1;
+ return true;
}
static void vgacon_save_screen(struct vc_data *c)
@@ -1159,11 +1185,10 @@ static bool vgacon_scroll(struct vc_data *c, unsigned int t, unsigned int b,
* The console `switch' structure for the VGA based console
*/
-static void vgacon_clear(struct vc_data *vc, int sy, int sx, int height,
- int width) { }
-static void vgacon_putc(struct vc_data *vc, int c, int ypos, int xpos) { }
-static void vgacon_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos) { }
+static void vgacon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
+ unsigned int width) { }
+static void vgacon_putcs(struct vc_data *vc, const u16 *s, unsigned int count,
+ unsigned int ypos, unsigned int xpos) { }
const struct consw vga_con = {
.owner = THIS_MODULE,
@@ -1171,7 +1196,6 @@ const struct consw vga_con = {
.con_init = vgacon_init,
.con_deinit = vgacon_deinit,
.con_clear = vgacon_clear,
- .con_putc = vgacon_putc,
.con_putcs = vgacon_putcs,
.con_cursor = vgacon_cursor,
.con_scroll = vgacon_scroll,
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index a61b8260b8f3..e3179e987cdb 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -1523,7 +1523,7 @@ config FB_FSL_DIU
config FB_SH_MOBILE_LCDC
tristate "SuperH Mobile LCDC framebuffer support"
depends on FB && HAVE_CLK && HAS_IOMEM
- depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
+ depends on SUPERH || COMPILE_TEST
depends on FB_DEVICE
select FB_BACKLIGHT
select FB_DEFERRED_IO
diff --git a/drivers/video/fbdev/arkfb.c b/drivers/video/fbdev/arkfb.c
index dca9c0325b3f..082501feceb9 100644
--- a/drivers/video/fbdev/arkfb.c
+++ b/drivers/video/fbdev/arkfb.c
@@ -622,8 +622,13 @@ static int arkfb_set_par(struct fb_info *info)
info->tileops = NULL;
/* in 4bpp supports 8p wide tiles only, any tiles otherwise */
- info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
- info->pixmap.blit_y = ~(u32)0;
+ if (bpp == 4) {
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ } else {
+ bitmap_fill(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ }
+ bitmap_fill(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
offset_value = (info->var.xres_virtual * bpp) / 64;
screen_size = info->var.yres_virtual * info->fix.line_length;
@@ -635,8 +640,10 @@ static int arkfb_set_par(struct fb_info *info)
info->tileops = &arkfb_tile_ops;
/* supports 8x16 tiles only */
- info->pixmap.blit_x = 1 << (8 - 1);
- info->pixmap.blit_y = 1 << (16 - 1);
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ bitmap_zero(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
+ set_bit(16 - 1, info->pixmap.blit_y);
offset_value = info->var.xres_virtual / 16;
screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c
index 8587c9da0670..3ff1b2a8659e 100644
--- a/drivers/video/fbdev/core/bitblit.c
+++ b/drivers/video/fbdev/core/bitblit.c
@@ -233,7 +233,7 @@ static void bit_clear_margins(struct vc_data *vc, struct fb_info *info,
}
}
-static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+static void bit_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
@@ -348,16 +348,7 @@ static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode,
mask[i++] = msk;
}
- switch (mode) {
- case CM_ERASE:
- ops->cursor_state.enable = 0;
- break;
- case CM_DRAW:
- case CM_MOVE:
- default:
- ops->cursor_state.enable = (use_sw) ? 0 : 1;
- break;
- }
+ ops->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
cursor.image.fg_color = ops->cursor_state.image.fg_color;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 46823c2e2ba1..fcabc668e9fb 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -351,7 +351,7 @@ static void fb_flashcursor(struct work_struct *work)
struct fb_info *info;
struct vc_data *vc = NULL;
int c;
- int mode;
+ bool enable;
int ret;
/* FIXME: we should sort out the unbind locking instead */
@@ -375,9 +375,8 @@ static void fb_flashcursor(struct work_struct *work)
}
c = scr_readw((u16 *) vc->vc_pos);
- mode = (!ops->cursor_flash || ops->cursor_state.enable) ?
- CM_ERASE : CM_DRAW;
- ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
+ enable = ops->cursor_flash && !ops->cursor_state.enable;
+ ops->cursor(vc, info, enable, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
console_unlock();
@@ -920,7 +919,7 @@ static void display_to_var(struct fb_var_screeninfo *var,
static const char *fbcon_startup(void)
{
- const char *display_desc = "frame buffer device";
+ static const char display_desc[] = "frame buffer device";
struct fbcon_display *p = &fb_display[fg_console];
struct vc_data *vc = vc_cons[fg_console].d;
const struct font_desc *font = NULL;
@@ -987,7 +986,7 @@ static const char *fbcon_startup(void)
return display_desc;
}
-static void fbcon_init(struct vc_data *vc, int init)
+static void fbcon_init(struct vc_data *vc, bool init)
{
struct fb_info *info;
struct fbcon_ops *ops;
@@ -1234,8 +1233,8 @@ finished:
* restriction is simplicity & efficiency at the moment.
*/
-static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
- int width)
+static void __fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
+ unsigned int height, unsigned int width)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
@@ -1272,8 +1271,14 @@ static void fbcon_clear(struct vc_data *vc, int sy, int sx, int height,
ops->clear(vc, info, real_y(p, sy), sx, height, width);
}
-static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
- int count, int ypos, int xpos)
+static void fbcon_clear(struct vc_data *vc, unsigned int sy, unsigned int sx,
+ unsigned int width)
+{
+ __fbcon_clear(vc, sy, sx, 1, width);
+}
+
+static void fbcon_putcs(struct vc_data *vc, const u16 *s, unsigned int count,
+ unsigned int ypos, unsigned int xpos)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_display *p = &fb_display[vc->vc_num];
@@ -1285,14 +1290,6 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s,
get_color(vc, info, scr_readw(s), 0));
}
-static void fbcon_putc(struct vc_data *vc, int c, int ypos, int xpos)
-{
- unsigned short chr;
-
- scr_writew(c, &chr);
- fbcon_putcs(vc, &chr, 1, ypos, xpos);
-}
-
static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
@@ -1302,7 +1299,7 @@ static void fbcon_clear_margins(struct vc_data *vc, int bottom_only)
ops->clear_margins(vc, info, margin_color, bottom_only);
}
-static void fbcon_cursor(struct vc_data *vc, int mode)
+static void fbcon_cursor(struct vc_data *vc, bool enable)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
@@ -1318,12 +1315,12 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
else
fbcon_add_cursor_work(info);
- ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
+ ops->cursor_flash = enable;
if (!ops->cursor)
return;
- ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
+ ops->cursor(vc, info, enable, get_color(vc, info, c, 1),
get_color(vc, info, c, 0));
}
@@ -1743,7 +1740,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
if (fbcon_is_inactive(vc, info))
return true;
- fbcon_cursor(vc, CM_ERASE);
+ fbcon_cursor(vc, false);
/*
* ++Geert: Only use ywrap/ypan if the console is in text mode
@@ -1759,7 +1756,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
case SCROLL_MOVE:
fbcon_redraw_blit(vc, info, p, t, b - t - count,
count);
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
scr_memsetw((unsigned short *) (vc->vc_origin +
vc->vc_size_row *
(b - count)),
@@ -1782,7 +1779,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
b - t - count, vc->vc_cols);
else
goto redraw_up;
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
break;
case SCROLL_PAN_REDRAW:
@@ -1800,7 +1797,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
vc->vc_rows - b, b);
} else
fbcon_redraw_move(vc, p, t + count, b - t - count, t);
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
break;
case SCROLL_PAN_MOVE:
@@ -1823,14 +1820,14 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
b - t - count, vc->vc_cols);
else
goto redraw_up;
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
break;
case SCROLL_REDRAW:
redraw_up:
fbcon_redraw(vc, t, b - t - count,
count * vc->vc_cols);
- fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
scr_memsetw((unsigned short *) (vc->vc_origin +
vc->vc_size_row *
(b - count)),
@@ -1847,7 +1844,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
case SCROLL_MOVE:
fbcon_redraw_blit(vc, info, p, b - 1, b - t - count,
-count);
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, t, 0, count, vc->vc_cols);
scr_memsetw((unsigned short *) (vc->vc_origin +
vc->vc_size_row *
t),
@@ -1870,7 +1867,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
b - t - count, vc->vc_cols);
else
goto redraw_down;
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, t, 0, count, vc->vc_cols);
break;
case SCROLL_PAN_MOVE:
@@ -1892,7 +1889,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
b - t - count, vc->vc_cols);
else
goto redraw_down;
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, t, 0, count, vc->vc_cols);
break;
case SCROLL_PAN_REDRAW:
@@ -1909,14 +1906,14 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b,
fbcon_redraw_move(vc, p, count, t, 0);
} else
fbcon_redraw_move(vc, p, t, b - t - count, t + count);
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, t, 0, count, vc->vc_cols);
break;
case SCROLL_REDRAW:
redraw_down:
fbcon_redraw(vc, b - 1, b - t - count,
-count * vc->vc_cols);
- fbcon_clear(vc, t, 0, count, vc->vc_cols);
+ __fbcon_clear(vc, t, 0, count, vc->vc_cols);
scr_memsetw((unsigned short *) (vc->vc_origin +
vc->vc_size_row *
t),
@@ -1995,7 +1992,7 @@ static void updatescrollmode(struct fbcon_display *p,
#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */
static int fbcon_resize(struct vc_data *vc, unsigned int width,
- unsigned int height, unsigned int user)
+ unsigned int height, bool from_user)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
@@ -2058,7 +2055,7 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
return 0;
}
-static int fbcon_switch(struct vc_data *vc)
+static bool fbcon_switch(struct vc_data *vc)
{
struct fb_info *info, *old_info = NULL;
struct fbcon_ops *ops;
@@ -2180,9 +2177,9 @@ static int fbcon_switch(struct vc_data *vc)
vc->vc_origin + vc->vc_size_row * vc->vc_top,
vc->vc_size_row * (vc->vc_bottom -
vc->vc_top) / 2);
- return 0;
+ return false;
}
- return 1;
+ return true;
}
static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info,
@@ -2195,12 +2192,13 @@ static void fbcon_generic_blank(struct vc_data *vc, struct fb_info *info,
oldc = vc->vc_video_erase_char;
vc->vc_video_erase_char &= charmask;
- fbcon_clear(vc, 0, 0, vc->vc_rows, vc->vc_cols);
+ __fbcon_clear(vc, 0, 0, vc->vc_rows, vc->vc_cols);
vc->vc_video_erase_char = oldc;
}
}
-static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
+static bool fbcon_blank(struct vc_data *vc, enum vesa_blank_mode blank,
+ bool mode_switch)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
@@ -2222,7 +2220,7 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
if (!fbcon_is_inactive(vc, info)) {
if (ops->blank_state != blank) {
ops->blank_state = blank;
- fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
+ fbcon_cursor(vc, !blank);
ops->cursor_flash = (!blank);
if (fb_blank(info, blank))
@@ -2239,10 +2237,10 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
else
fbcon_add_cursor_work(info);
- return 0;
+ return false;
}
-static int fbcon_debug_enter(struct vc_data *vc)
+static void fbcon_debug_enter(struct vc_data *vc)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
@@ -2252,10 +2250,9 @@ static int fbcon_debug_enter(struct vc_data *vc)
if (info->fbops->fb_debug_enter)
info->fbops->fb_debug_enter(info);
fbcon_set_palette(vc, color_table);
- return 0;
}
-static int fbcon_debug_leave(struct vc_data *vc)
+static void fbcon_debug_leave(struct vc_data *vc)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
struct fbcon_ops *ops = info->fbcon_par;
@@ -2263,7 +2260,6 @@ static int fbcon_debug_leave(struct vc_data *vc)
ops->graphics = ops->save_graphics;
if (info->fbops->fb_debug_leave)
info->fbops->fb_debug_leave(info);
- return 0;
}
static int fbcon_get_font(struct vc_data *vc, struct console_font *font, unsigned int vpitch)
@@ -2461,7 +2457,7 @@ err_out:
* but lets not assume that, since charcount of 512 is small for unicode support.
*/
-static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
+static int fbcon_set_font(struct vc_data *vc, const struct console_font *font,
unsigned int vpitch, unsigned int flags)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
@@ -2483,12 +2479,12 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
h > FBCON_SWAP(info->var.rotate, info->var.yres, info->var.xres))
return -EINVAL;
- if (font->width > 32 || font->height > 32)
+ if (font->width > FB_MAX_BLIT_WIDTH || font->height > FB_MAX_BLIT_HEIGHT)
return -EINVAL;
/* Make sure drawing engine can handle the font */
- if (!(info->pixmap.blit_x & BIT(font->width - 1)) ||
- !(info->pixmap.blit_y & BIT(font->height - 1)))
+ if (!test_bit(font->width - 1, info->pixmap.blit_x) ||
+ !test_bit(font->height - 1, info->pixmap.blit_y))
return -EINVAL;
/* Make sure driver can handle the font length */
@@ -2534,7 +2530,8 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
return fbcon_do_set_font(vc, font->width, font->height, charcount, new_data, 1);
}
-static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font, char *name)
+static int fbcon_set_def_font(struct vc_data *vc, struct console_font *font,
+ const char *name)
{
struct fb_info *info = fbcon_info_from_console(vc->vc_num);
const struct font_desc *f;
@@ -2593,35 +2590,6 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table)
fb_set_cmap(&palette_cmap, info);
}
-static u16 *fbcon_screen_pos(const struct vc_data *vc, int offset)
-{
- return (u16 *) (vc->vc_origin + offset);
-}
-
-static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos,
- int *px, int *py)
-{
- unsigned long ret;
- int x, y;
-
- if (pos >= vc->vc_origin && pos < vc->vc_scr_end) {
- unsigned long offset = (pos - vc->vc_origin) / 2;
-
- x = offset % vc->vc_cols;
- y = offset / vc->vc_cols;
- ret = pos + (vc->vc_cols - x) * 2;
- } else {
- /* Should not happen */
- x = y = 0;
- ret = vc->vc_origin;
- }
- if (px)
- *px = x;
- if (py)
- *py = y;
- return ret;
-}
-
/* As we might be inside of softback, we may work with non-contiguous buffer,
that's why we have to use a separate routine. */
static void fbcon_invert_region(struct vc_data *vc, u16 * p, int cnt)
@@ -2650,7 +2618,7 @@ void fbcon_suspended(struct fb_info *info)
vc = vc_cons[ops->currcon].d;
/* Clear cursor, restore saved data */
- fbcon_cursor(vc, CM_ERASE);
+ fbcon_cursor(vc, false);
}
void fbcon_resumed(struct fb_info *info)
@@ -3082,8 +3050,8 @@ void fbcon_get_requirement(struct fb_info *info,
vc = vc_cons[i].d;
if (vc && vc->vc_mode == KD_TEXT &&
info->node == con2fb_map[i]) {
- caps->x |= 1 << (vc->vc_font.width - 1);
- caps->y |= 1 << (vc->vc_font.height - 1);
+ set_bit(vc->vc_font.width - 1, caps->x);
+ set_bit(vc->vc_font.height - 1, caps->y);
charcnt = vc->vc_font.charcount;
if (caps->len < charcnt)
caps->len = charcnt;
@@ -3094,8 +3062,10 @@ void fbcon_get_requirement(struct fb_info *info,
if (vc && vc->vc_mode == KD_TEXT &&
info->node == con2fb_map[fg_console]) {
- caps->x = 1 << (vc->vc_font.width - 1);
- caps->y = 1 << (vc->vc_font.height - 1);
+ bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH);
+ set_bit(vc->vc_font.width - 1, caps->x);
+ bitmap_zero(caps->y, FB_MAX_BLIT_HEIGHT);
+ set_bit(vc->vc_font.height - 1, caps->y);
caps->len = vc->vc_font.charcount;
}
}
@@ -3152,7 +3122,6 @@ static const struct consw fb_con = {
.con_init = fbcon_init,
.con_deinit = fbcon_deinit,
.con_clear = fbcon_clear,
- .con_putc = fbcon_putc,
.con_putcs = fbcon_putcs,
.con_cursor = fbcon_cursor,
.con_scroll = fbcon_scroll,
@@ -3163,8 +3132,6 @@ static const struct consw fb_con = {
.con_font_default = fbcon_set_def_font,
.con_set_palette = fbcon_set_palette,
.con_invert_region = fbcon_invert_region,
- .con_screen_pos = fbcon_screen_pos,
- .con_getxy = fbcon_getxy,
.con_resize = fbcon_resize,
.con_debug_enter = fbcon_debug_enter,
.con_debug_leave = fbcon_debug_leave,
diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h
index 0eaf54a21151..df70ea5ec5b3 100644
--- a/drivers/video/fbdev/core/fbcon.h
+++ b/drivers/video/fbdev/core/fbcon.h
@@ -61,8 +61,8 @@ struct fbcon_ops {
int fg, int bg);
void (*clear_margins)(struct vc_data *vc, struct fb_info *info,
int color, int bottom_only);
- void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode,
- int fg, int bg);
+ void (*cursor)(struct vc_data *vc, struct fb_info *info,
+ bool enable, int fg, int bg);
int (*update_start)(struct fb_info *info);
int (*rotate_font)(struct fb_info *info, struct vc_data *vc);
struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */
diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c
index 2789ace79634..f9b794ff7d39 100644
--- a/drivers/video/fbdev/core/fbcon_ccw.c
+++ b/drivers/video/fbdev/core/fbcon_ccw.c
@@ -218,7 +218,7 @@ static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
}
}
-static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+static void ccw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
@@ -349,16 +349,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
kfree(tmp);
}
- switch (mode) {
- case CM_ERASE:
- ops->cursor_state.enable = 0;
- break;
- case CM_DRAW:
- case CM_MOVE:
- default:
- ops->cursor_state.enable = (use_sw) ? 0 : 1;
- break;
- }
+ ops->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
cursor.image.fg_color = ops->cursor_state.image.fg_color;
diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c
index 86a254c1b2b7..903f6fc174e1 100644
--- a/drivers/video/fbdev/core/fbcon_cw.c
+++ b/drivers/video/fbdev/core/fbcon_cw.c
@@ -201,7 +201,7 @@ static void cw_clear_margins(struct vc_data *vc, struct fb_info *info,
}
}
-static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+static void cw_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
@@ -332,16 +332,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
kfree(tmp);
}
- switch (mode) {
- case CM_ERASE:
- ops->cursor_state.enable = 0;
- break;
- case CM_DRAW:
- case CM_MOVE:
- default:
- ops->cursor_state.enable = (use_sw) ? 0 : 1;
- break;
- }
+ ops->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
cursor.image.fg_color = ops->cursor_state.image.fg_color;
diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c
index 23bc045769d0..594331936fd3 100644
--- a/drivers/video/fbdev/core/fbcon_ud.c
+++ b/drivers/video/fbdev/core/fbcon_ud.c
@@ -248,7 +248,7 @@ static void ud_clear_margins(struct vc_data *vc, struct fb_info *info,
}
}
-static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+static void ud_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_cursor cursor;
@@ -372,16 +372,7 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode,
mask[i++] = ~msk;
}
- switch (mode) {
- case CM_ERASE:
- ops->cursor_state.enable = 0;
- break;
- case CM_DRAW:
- case CM_MOVE:
- default:
- ops->cursor_state.enable = (use_sw) ? 0 : 1;
- break;
- }
+ ops->cursor_state.enable = enable && !use_sw;
cursor.image.data = src;
cursor.image.fg_color = ops->cursor_state.image.fg_color;
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 48287366e0d4..4c4ad0a86a50 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -212,8 +212,8 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
fbcon_get_requirement(info, &caps);
info->fbops->fb_get_caps(info, &fbcaps, var);
- if (((fbcaps.x ^ caps.x) & caps.x) ||
- ((fbcaps.y ^ caps.y) & caps.y) ||
+ if (!bitmap_subset(caps.x, fbcaps.x, FB_MAX_BLIT_WIDTH) ||
+ !bitmap_subset(caps.y, fbcaps.y, FB_MAX_BLIT_HEIGHT) ||
(fbcaps.len < caps.len))
err = -EINVAL;
@@ -420,11 +420,11 @@ static int do_register_framebuffer(struct fb_info *fb_info)
}
fb_info->pixmap.offset = 0;
- if (!fb_info->pixmap.blit_x)
- fb_info->pixmap.blit_x = ~(u32)0;
+ if (bitmap_empty(fb_info->pixmap.blit_x, FB_MAX_BLIT_WIDTH))
+ bitmap_fill(fb_info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
- if (!fb_info->pixmap.blit_y)
- fb_info->pixmap.blit_y = ~(u32)0;
+ if (bitmap_empty(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT))
+ bitmap_fill(fb_info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
if (!fb_info->modelist.prev || !fb_info->modelist.next)
INIT_LIST_HEAD(&fb_info->modelist);
diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c
index 79e5bfbdd34c..0a26399dbc89 100644
--- a/drivers/video/fbdev/core/fbmon.c
+++ b/drivers/video/fbdev/core/fbmon.c
@@ -1311,7 +1311,7 @@ int fb_get_mode(int flags, u32 val, struct fb_var_screeninfo *var, struct fb_inf
int fb_videomode_from_videomode(const struct videomode *vm,
struct fb_videomode *fbmode)
{
- unsigned int htotal, vtotal;
+ unsigned int htotal, vtotal, total;
fbmode->xres = vm->hactive;
fbmode->left_margin = vm->hback_porch;
@@ -1344,8 +1344,9 @@ int fb_videomode_from_videomode(const struct videomode *vm,
vtotal = vm->vactive + vm->vfront_porch + vm->vback_porch +
vm->vsync_len;
/* prevent division by zero */
- if (htotal && vtotal) {
- fbmode->refresh = vm->pixelclock / (htotal * vtotal);
+ total = htotal * vtotal;
+ if (total) {
+ fbmode->refresh = vm->pixelclock / total;
/* a mode must have htotal and vtotal != 0 or it is invalid */
} else {
fbmode->refresh = 0;
diff --git a/drivers/video/fbdev/core/svgalib.c b/drivers/video/fbdev/core/svgalib.c
index 2cba158888ea..821b89a0a645 100644
--- a/drivers/video/fbdev/core/svgalib.c
+++ b/drivers/video/fbdev/core/svgalib.c
@@ -354,12 +354,19 @@ void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps,
{
if (var->bits_per_pixel == 0) {
/* can only support 256 8x16 bitmap */
- caps->x = 1 << (8 - 1);
- caps->y = 1 << (16 - 1);
+ bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, caps->x);
+ bitmap_zero(caps->y, FB_MAX_BLIT_HEIGHT);
+ set_bit(16 - 1, caps->y);
caps->len = 256;
} else {
- caps->x = (var->bits_per_pixel == 4) ? 1 << (8 - 1) : ~(u32)0;
- caps->y = ~(u32)0;
+ if (var->bits_per_pixel == 4) {
+ bitmap_zero(caps->x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, caps->x);
+ } else {
+ bitmap_fill(caps->x, FB_MAX_BLIT_WIDTH);
+ }
+ bitmap_fill(caps->y, FB_MAX_BLIT_HEIGHT);
caps->len = ~(u32)0;
}
}
diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c
index 2768eff247ba..eff7ec4da167 100644
--- a/drivers/video/fbdev/core/tileblit.c
+++ b/drivers/video/fbdev/core/tileblit.c
@@ -79,7 +79,7 @@ static void tile_clear_margins(struct vc_data *vc, struct fb_info *info,
return;
}
-static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
+static void tile_cursor(struct vc_data *vc, struct fb_info *info, bool enable,
int fg, int bg)
{
struct fb_tilecursor cursor;
@@ -87,7 +87,7 @@ static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode,
cursor.sx = vc->state.x;
cursor.sy = vc->state.y;
- cursor.mode = (mode == CM_ERASE || use_sw) ? 0 : 1;
+ cursor.mode = enable && !use_sw;
cursor.fg = fg;
cursor.bg = bg;
diff --git a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
index 7c402e9fd7a9..baec312d7b33 100644
--- a/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
@@ -32,15 +32,6 @@
#define CARMINE_MEM_SIZE 0x8000000
#define DRV_NAME "mb862xxfb"
-#if defined(CONFIG_SOCRATES)
-static struct mb862xx_gc_mode socrates_gc_mode = {
- /* Mode for Prime View PM070WL4 TFT LCD Panel */
- { "800x480", 45, 800, 480, 40000, 86, 42, 33, 10, 128, 2, 0, 0, 0 },
- /* 16 bits/pixel, 16MB, 133MHz, SDRAM memory mode value */
- 16, 0x1000000, GC_CCF_COT_133, 0x4157ba63
-};
-#endif
-
/* Helpers */
static inline int h_total(struct fb_var_screeninfo *var)
{
@@ -666,6 +657,15 @@ static int mb862xx_gdc_init(struct mb862xxfb_par *par)
return 0;
}
+#if defined(CONFIG_SOCRATES)
+static struct mb862xx_gc_mode socrates_gc_mode = {
+ /* Mode for Prime View PM070WL4 TFT LCD Panel */
+ { "800x480", 45, 800, 480, 40000, 86, 42, 33, 10, 128, 2, 0, 0, 0 },
+ /* 16 bits/pixel, 16MB, 133MHz, SDRAM memory mode value */
+ 16, 0x1000000, GC_CCF_COT_133, 0x4157ba63
+};
+#endif
+
static int of_platform_mb862xx_probe(struct platform_device *ofdev)
{
struct device_node *np = ofdev->dev.of_node;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
index 477789cff8e0..d487941853e6 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
@@ -225,17 +225,12 @@ static ssize_t tpo_td043_gamma_show(struct device *dev,
{
struct panel_drv_data *ddata = dev_get_drvdata(dev);
ssize_t len = 0;
- int ret;
int i;
- for (i = 0; i < ARRAY_SIZE(ddata->gamma); i++) {
- ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
- ddata->gamma[i]);
- if (ret < 0)
- return ret;
- len += ret;
- }
- buf[len - 1] = '\n';
+ for (i = 0; i < ARRAY_SIZE(ddata->gamma); i++)
+ len += sysfs_emit_at(buf, len, "%u ", ddata->gamma[i]);
+ if (len)
+ buf[len - 1] = '\n';
return len;
}
diff --git a/drivers/video/fbdev/s3fb.c b/drivers/video/fbdev/s3fb.c
index 07722a5ea8ef..ff84106ecf1c 100644
--- a/drivers/video/fbdev/s3fb.c
+++ b/drivers/video/fbdev/s3fb.c
@@ -617,8 +617,13 @@ static int s3fb_set_par(struct fb_info *info)
info->tileops = NULL;
/* in 4bpp supports 8p wide tiles only, any tiles otherwise */
- info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
- info->pixmap.blit_y = ~(u32)0;
+ if (bpp == 4) {
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ } else {
+ bitmap_fill(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ }
+ bitmap_fill(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
offset_value = (info->var.xres_virtual * bpp) / 64;
screen_size = info->var.yres_virtual * info->fix.line_length;
@@ -630,8 +635,10 @@ static int s3fb_set_par(struct fb_info *info)
info->tileops = fasttext ? &s3fb_fast_tile_ops : &s3fb_tile_ops;
/* supports 8x16 tiles only */
- info->pixmap.blit_x = 1 << (8 - 1);
- info->pixmap.blit_y = 1 << (16 - 1);
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ bitmap_zero(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
+ set_bit(16 - 1, info->pixmap.blit_y);
offset_value = info->var.xres_virtual / 16;
screen_size = (info->var.xres_virtual * info->var.yres_virtual) / 64;
diff --git a/drivers/video/fbdev/tgafb.c b/drivers/video/fbdev/tgafb.c
index ca43774f3156..dccfc38cfbd5 100644
--- a/drivers/video/fbdev/tgafb.c
+++ b/drivers/video/fbdev/tgafb.c
@@ -380,7 +380,7 @@ tgafb_set_par(struct fb_info *info)
BT463_LOAD_ADDR(par, 0x0000);
TGA_WRITE_REG(par, BT463_PALETTE << 2, TGA_RAMDAC_SETUP_REG);
-#ifdef CONFIG_HW_CONSOLE
+#ifdef CONFIG_VT
for (i = 0; i < 16; i++) {
int j = color_table[i];
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index e1f421e91b4f..73f00c079a94 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1546,7 +1546,7 @@ static ssize_t uvesafb_show_vbe_ver(struct device *dev,
struct fb_info *info = dev_get_drvdata(dev);
struct uvesafb_par *par = info->par;
- return snprintf(buf, PAGE_SIZE, "%.4x\n", par->vbe_ib.vbe_version);
+ return sysfs_emit(buf, "%.4x\n", par->vbe_ib.vbe_version);
}
static DEVICE_ATTR(vbe_version, S_IRUGO, uvesafb_show_vbe_ver, NULL);
diff --git a/drivers/video/fbdev/vga16fb.c b/drivers/video/fbdev/vga16fb.c
index b485e9198201..a87bafbb119c 100644
--- a/drivers/video/fbdev/vga16fb.c
+++ b/drivers/video/fbdev/vga16fb.c
@@ -1353,7 +1353,11 @@ static int vga16fb_probe(struct platform_device *dev)
info->var = vga16fb_defined;
info->fix = vga16fb_fix;
/* supports rectangles with widths of multiples of 8 */
- info->pixmap.blit_x = 1 << 7 | 1 << 15 | 1 << 23 | 1 << 31;
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ set_bit(16 - 1, info->pixmap.blit_x);
+ set_bit(24 - 1, info->pixmap.blit_x);
+ set_bit(32 - 1, info->pixmap.blit_x);
info->flags = FBINFO_HWACCEL_YPAN;
i = (info->var.bits_per_pixel == 8) ? 256 : 16;
diff --git a/drivers/video/fbdev/via/accel.c b/drivers/video/fbdev/via/accel.c
index 0a1bc7a4d785..1e04026f0809 100644
--- a/drivers/video/fbdev/via/accel.c
+++ b/drivers/video/fbdev/via/accel.c
@@ -115,7 +115,7 @@ static int hw_bitblt_1(void __iomem *engine, u8 op, u32 width, u32 height,
if (op != VIA_BITBLT_FILL) {
tmp = src_mem ? 0 : src_addr;
- if (dst_addr & 0xE0000007) {
+ if (tmp & 0xE0000007) {
printk(KERN_WARNING "hw_bitblt_1: Unsupported source "
"address %X\n", tmp);
return -EINVAL;
@@ -260,7 +260,7 @@ static int hw_bitblt_2(void __iomem *engine, u8 op, u32 width, u32 height,
writel(tmp, engine + 0x18);
tmp = src_mem ? 0 : src_addr;
- if (dst_addr & 0xE0000007) {
+ if (tmp & 0xE0000007) {
printk(KERN_WARNING "hw_bitblt_2: Unsupported source "
"address %X\n", tmp);
return -EINVAL;
diff --git a/drivers/video/fbdev/vt8623fb.c b/drivers/video/fbdev/vt8623fb.c
index f8d022cb61e8..df984f3a7ff6 100644
--- a/drivers/video/fbdev/vt8623fb.c
+++ b/drivers/video/fbdev/vt8623fb.c
@@ -390,8 +390,13 @@ static int vt8623fb_set_par(struct fb_info *info)
info->tileops = NULL;
/* in 4bpp supports 8p wide tiles only, any tiles otherwise */
- info->pixmap.blit_x = (bpp == 4) ? (1 << (8 - 1)) : (~(u32)0);
- info->pixmap.blit_y = ~(u32)0;
+ if (bpp == 4) {
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ } else {
+ bitmap_fill(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ }
+ bitmap_fill(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
offset_value = (info->var.xres_virtual * bpp) / 64;
fetch_value = ((info->var.xres * bpp) / 128) + 4;
@@ -408,8 +413,10 @@ static int vt8623fb_set_par(struct fb_info *info)
info->tileops = &vt8623fb_tile_ops;
/* supports 8x16 tiles only */
- info->pixmap.blit_x = 1 << (8 - 1);
- info->pixmap.blit_y = 1 << (16 - 1);
+ bitmap_zero(info->pixmap.blit_x, FB_MAX_BLIT_WIDTH);
+ set_bit(8 - 1, info->pixmap.blit_x);
+ bitmap_zero(info->pixmap.blit_y, FB_MAX_BLIT_HEIGHT);
+ set_bit(16 - 1, info->pixmap.blit_y);
offset_value = info->var.xres_virtual / 16;
fetch_value = (info->var.xres / 8) + 8;
diff --git a/drivers/video/sticore.c b/drivers/video/sticore.c
index 7115b325817f..88a1758616e0 100644
--- a/drivers/video/sticore.c
+++ b/drivers/video/sticore.c
@@ -529,7 +529,7 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
if (fbfont_name && strlen(fbfont_name))
fbfont = find_font(fbfont_name);
if (!fbfont)
- fbfont = get_default_font(1024,768, ~(u32)0, ~(u32)0);
+ fbfont = get_default_font(1024, 768, NULL, NULL);
if (!fbfont)
return NULL;
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index f4080692b351..f173587893cb 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -353,7 +353,7 @@ static void virtio_dev_remove(struct device *_d)
of_node_put(dev->dev.of_node);
}
-static struct bus_type virtio_bus = {
+static const struct bus_type virtio_bus = {
.name = "virtio",
.match = virtio_dev_match,
.dev_groups = virtio_dev_groups,
@@ -510,8 +510,10 @@ int virtio_device_freeze(struct virtio_device *dev)
if (drv && drv->freeze) {
ret = drv->freeze(dev);
- if (ret)
+ if (ret) {
+ virtio_config_enable(dev);
return ret;
+ }
}
if (dev->config->destroy_avq)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 49299b1f9ec7..6f7e5010a673 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1340,7 +1340,7 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
sizeof(struct vring_packed_desc));
vq->packed.vring.desc[head].id = cpu_to_le16(id);
- if (vq->do_unmap) {
+ if (vq->use_dma_api) {
vq->packed.desc_extra[id].addr = addr;
vq->packed.desc_extra[id].len = total_sg *
sizeof(struct vring_packed_desc);
@@ -1481,7 +1481,7 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
desc[i].len = cpu_to_le32(sg->length);
desc[i].id = cpu_to_le16(id);
- if (unlikely(vq->do_unmap)) {
+ if (unlikely(vq->use_dma_api)) {
vq->packed.desc_extra[curr].addr = addr;
vq->packed.desc_extra[curr].len = sg->length;
vq->packed.desc_extra[curr].flags =
@@ -1615,7 +1615,7 @@ static void detach_buf_packed(struct vring_virtqueue *vq,
vq->free_head = id;
vq->vq.num_free += state->num;
- if (unlikely(vq->do_unmap)) {
+ if (unlikely(vq->use_dma_api)) {
curr = id;
for (i = 0; i < state->num; i++) {
vring_unmap_extra_packed(vq,
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 8d63e5923d24..e803db0da307 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -183,8 +183,11 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
+ if (ops->get_vq_size)
+ max_num = ops->get_vq_size(vdpa, index);
+ else
+ max_num = ops->get_vq_num_max(vdpa);
- max_num = ops->get_vq_num_max(vdpa);
if (max_num == 0) {
err = -ENOENT;
goto error_new_virtqueue;
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 513c0b114337..e6049a75b35b 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -78,5 +78,15 @@ config W1_MASTER_SGI
This support is also available as a module. If so, the module
will be called sgi_w1.
+config W1_MASTER_UART
+ tristate "UART 1-wire driver"
+ depends on SERIAL_DEV_BUS
+ help
+ Say Y here if you want to communicate with your 1-wire devices using
+ UART interface.
+
+ This support is also available as a module. If so, the module
+ will be called w1-uart.
+
endmenu
diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile
index 6c5a21f9b88c..227f80987e69 100644
--- a/drivers/w1/masters/Makefile
+++ b/drivers/w1/masters/Makefile
@@ -12,3 +12,4 @@ obj-$(CONFIG_W1_MASTER_MXC) += mxc_w1.o
obj-$(CONFIG_W1_MASTER_GPIO) += w1-gpio.o
obj-$(CONFIG_HDQ_MASTER_OMAP) += omap_hdq.o
obj-$(CONFIG_W1_MASTER_SGI) += sgi_w1.o
+obj-$(CONFIG_W1_MASTER_UART) += w1-uart.o
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index 090cbbf9e1e2..ba1d0866d1c4 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -151,15 +151,13 @@ out_disable_clk:
/*
* disassociate the w1 device from the driver
*/
-static int mxc_w1_remove(struct platform_device *pdev)
+static void mxc_w1_remove(struct platform_device *pdev)
{
struct mxc_w1_device *mdev = platform_get_drvdata(pdev);
w1_remove_master_device(&mdev->bus_master);
clk_disable_unprepare(mdev->clk);
-
- return 0;
}
static const struct of_device_id mxc_w1_dt_ids[] = {
@@ -174,7 +172,7 @@ static struct platform_driver mxc_w1_driver = {
.of_match_table = mxc_w1_dt_ids,
},
.probe = mxc_w1_probe,
- .remove = mxc_w1_remove,
+ .remove_new = mxc_w1_remove,
};
module_platform_driver(mxc_w1_driver);
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index 6a39b71eb718..d1cb5190445a 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -647,7 +647,7 @@ err_w1:
return ret;
}
-static int omap_hdq_remove(struct platform_device *pdev)
+static void omap_hdq_remove(struct platform_device *pdev)
{
int active;
@@ -661,8 +661,6 @@ static int omap_hdq_remove(struct platform_device *pdev)
if (active >= 0)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
- return 0;
}
static const struct of_device_id omap_hdq_dt_ids[] = {
@@ -674,7 +672,7 @@ MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
static struct platform_driver omap_hdq_driver = {
.probe = omap_hdq_probe,
- .remove = omap_hdq_remove,
+ .remove_new = omap_hdq_remove,
.driver = {
.name = "omap_hdq",
.of_match_table = omap_hdq_dt_ids,
diff --git a/drivers/w1/masters/sgi_w1.c b/drivers/w1/masters/sgi_w1.c
index d7fbc3c146e1..7bb7876aa70e 100644
--- a/drivers/w1/masters/sgi_w1.c
+++ b/drivers/w1/masters/sgi_w1.c
@@ -105,13 +105,11 @@ static int sgi_w1_probe(struct platform_device *pdev)
/*
* disassociate the w1 device from the driver
*/
-static int sgi_w1_remove(struct platform_device *pdev)
+static void sgi_w1_remove(struct platform_device *pdev)
{
struct sgi_w1_device *sdev = platform_get_drvdata(pdev);
w1_remove_master_device(&sdev->bus_master);
-
- return 0;
}
static struct platform_driver sgi_w1_driver = {
@@ -119,7 +117,7 @@ static struct platform_driver sgi_w1_driver = {
.name = "sgi_w1",
},
.probe = sgi_w1_probe,
- .remove = sgi_w1_remove,
+ .remove_new = sgi_w1_remove,
};
module_platform_driver(sgi_w1_driver);
diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c
index 05c67038ed20..34128e6bbbfa 100644
--- a/drivers/w1/masters/w1-gpio.c
+++ b/drivers/w1/masters/w1-gpio.c
@@ -141,7 +141,7 @@ static int w1_gpio_probe(struct platform_device *pdev)
return 0;
}
-static int w1_gpio_remove(struct platform_device *pdev)
+static void w1_gpio_remove(struct platform_device *pdev)
{
struct w1_bus_master *master = platform_get_drvdata(pdev);
struct w1_gpio_ddata *ddata = master->data;
@@ -150,8 +150,6 @@ static int w1_gpio_remove(struct platform_device *pdev)
gpiod_set_value(ddata->pullup_gpiod, 0);
w1_remove_master_device(master);
-
- return 0;
}
static struct platform_driver w1_gpio_driver = {
@@ -160,7 +158,7 @@ static struct platform_driver w1_gpio_driver = {
.of_match_table = of_match_ptr(w1_gpio_dt_ids),
},
.probe = w1_gpio_probe,
- .remove = w1_gpio_remove,
+ .remove_new = w1_gpio_remove,
};
module_platform_driver(w1_gpio_driver);
diff --git a/drivers/w1/masters/w1-uart.c b/drivers/w1/masters/w1-uart.c
new file mode 100644
index 000000000000..a31782e56ba7
--- /dev/null
+++ b/drivers/w1/masters/w1-uart.c
@@ -0,0 +1,415 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * w1-uart - UART 1-Wire bus driver
+ *
+ * Uses the UART interface (via Serial Device Bus) to create the 1-Wire
+ * timing patterns. Implements the following 1-Wire master interface:
+ *
+ * - reset_bus: requests baud-rate 9600
+ *
+ * - touch_bit: requests baud-rate 115200
+ *
+ * Author: Christoph Winklhofer <cj.winklhofer@gmail.com>
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/serdev.h>
+#include <linux/w1.h>
+
+/* UART packet contains start and stop bit */
+#define W1_UART_BITS_PER_PACKET (BITS_PER_BYTE + 2)
+
+/* Timeout to wait for completion of serdev-receive */
+#define W1_UART_TIMEOUT msecs_to_jiffies(500)
+
+/**
+ * struct w1_uart_config - configuration for 1-Wire operation
+ * @baudrate: baud-rate returned from serdev
+ * @delay_us: delay to complete a 1-Wire cycle (in us)
+ * @tx_byte: byte to generate 1-Wire timing pattern
+ */
+struct w1_uart_config {
+ unsigned int baudrate;
+ unsigned int delay_us;
+ u8 tx_byte;
+};
+
+/**
+ * struct w1_uart_device - 1-Wire UART device structure
+ * @serdev: serial device
+ * @bus: w1-bus master
+ * @cfg_reset: config for 1-Wire reset
+ * @cfg_touch_0: config for 1-Wire write-0 cycle
+ * @cfg_touch_1: config for 1-Wire write-1 and read cycle
+ * @rx_byte_received: completion for serdev receive
+ * @rx_mutex: mutex to protect rx_err and rx_byte
+ * @rx_err: indicates an error in serdev-receive
+ * @rx_byte: result byte from serdev-receive
+ */
+struct w1_uart_device {
+ struct serdev_device *serdev;
+ struct w1_bus_master bus;
+
+ struct w1_uart_config cfg_reset;
+ struct w1_uart_config cfg_touch_0;
+ struct w1_uart_config cfg_touch_1;
+
+ struct completion rx_byte_received;
+ /*
+ * protect rx_err and rx_byte from concurrent access in
+ * w1-callbacks and serdev-receive.
+ */
+ struct mutex rx_mutex;
+ int rx_err;
+ u8 rx_byte;
+};
+
+/**
+ * struct w1_uart_limits - limits for 1-Wire operations
+ * @baudrate: Requested baud-rate to create 1-Wire timing pattern
+ * @bit_min_us: minimum time for a bit (in us)
+ * @bit_max_us: maximum time for a bit (in us)
+ * @sample_us: timespan to sample 1-Wire response
+ * @cycle_us: duration of the 1-Wire cycle
+ */
+struct w1_uart_limits {
+ unsigned int baudrate;
+ unsigned int bit_min_us;
+ unsigned int bit_max_us;
+ unsigned int sample_us;
+ unsigned int cycle_us;
+};
+
+static inline unsigned int baud_to_bit_ns(unsigned int baud)
+{
+ return NSEC_PER_SEC / baud;
+}
+
+static inline unsigned int to_ns(unsigned int us)
+{
+ return us * NSEC_PER_USEC;
+}
+
+/*
+ * Set baud-rate, delay and tx-byte to create a 1-Wire pulse and adapt
+ * the tx-byte according to the actual baud-rate.
+ *
+ * Reject when:
+ * - time for a bit outside min/max range
+ * - a 1-Wire response is not detectable for sent byte
+ */
+static int w1_uart_set_config(struct serdev_device *serdev,
+ const struct w1_uart_limits *limits,
+ struct w1_uart_config *w1cfg)
+{
+ unsigned int packet_ns;
+ unsigned int bits_low;
+ unsigned int bit_ns;
+ unsigned int low_ns;
+
+ w1cfg->baudrate = serdev_device_set_baudrate(serdev, limits->baudrate);
+ if (w1cfg->baudrate == 0)
+ return -EINVAL;
+
+ /* Compute in nanoseconds for accuracy */
+ bit_ns = baud_to_bit_ns(w1cfg->baudrate);
+ bits_low = to_ns(limits->bit_min_us) / bit_ns;
+ /* start bit is always low */
+ low_ns = bit_ns * (bits_low + 1);
+
+ if (low_ns < to_ns(limits->bit_min_us))
+ return -EINVAL;
+
+ if (low_ns > to_ns(limits->bit_max_us))
+ return -EINVAL;
+
+ /* 1-Wire response detectable for sent byte */
+ if (limits->sample_us > 0 &&
+ bit_ns * BITS_PER_BYTE < low_ns + to_ns(limits->sample_us))
+ return -EINVAL;
+
+ /* delay: 1-Wire cycle takes longer than the UART packet */
+ packet_ns = bit_ns * W1_UART_BITS_PER_PACKET;
+ w1cfg->delay_us = 0;
+ if (to_ns(limits->cycle_us) > packet_ns)
+ w1cfg->delay_us =
+ (to_ns(limits->cycle_us) - packet_ns) / NSEC_PER_USEC;
+
+ /* byte to create 1-Wire pulse */
+ w1cfg->tx_byte = 0xff << bits_low;
+
+ return 0;
+}
+
+/*
+ * Configuration for reset and presence detect
+ * - bit_min_us is 480us, add margin and use 485us
+ * - limits for sample time 60us-75us, use 65us
+ */
+static int w1_uart_set_config_reset(struct w1_uart_device *w1dev)
+{
+ struct serdev_device *serdev = w1dev->serdev;
+ struct device_node *np = serdev->dev.of_node;
+
+ struct w1_uart_limits limits = { .baudrate = 9600,
+ .bit_min_us = 485,
+ .bit_max_us = 640,
+ .sample_us = 65,
+ .cycle_us = 960 };
+
+ of_property_read_u32(np, "reset-bps", &limits.baudrate);
+
+ return w1_uart_set_config(serdev, &limits, &w1dev->cfg_reset);
+}
+
+/*
+ * Configuration for write-0 cycle (touch bit 0)
+ * - bit_min_us is 60us, add margin and use 65us
+ * - no sampling required, sample_us = 0
+ */
+static int w1_uart_set_config_touch_0(struct w1_uart_device *w1dev)
+{
+ struct serdev_device *serdev = w1dev->serdev;
+ struct device_node *np = serdev->dev.of_node;
+
+ struct w1_uart_limits limits = { .baudrate = 115200,
+ .bit_min_us = 65,
+ .bit_max_us = 120,
+ .sample_us = 0,
+ .cycle_us = 70 };
+
+ of_property_read_u32(np, "write-0-bps", &limits.baudrate);
+
+ return w1_uart_set_config(serdev, &limits, &w1dev->cfg_touch_0);
+}
+
+/*
+ * Configuration for write-1 and read cycle (touch bit 1)
+ * - bit_min_us is 5us, add margin and use 6us
+ * - limits for sample time 5us-15us, use 15us
+ */
+static int w1_uart_set_config_touch_1(struct w1_uart_device *w1dev)
+{
+ struct serdev_device *serdev = w1dev->serdev;
+ struct device_node *np = serdev->dev.of_node;
+
+ struct w1_uart_limits limits = { .baudrate = 115200,
+ .bit_min_us = 6,
+ .bit_max_us = 15,
+ .sample_us = 15,
+ .cycle_us = 70 };
+
+ of_property_read_u32(np, "write-1-bps", &limits.baudrate);
+
+ return w1_uart_set_config(serdev, &limits, &w1dev->cfg_touch_1);
+}
+
+/*
+ * Configure and open the serial device
+ */
+static int w1_uart_serdev_open(struct w1_uart_device *w1dev)
+{
+ struct serdev_device *serdev = w1dev->serdev;
+ struct device *dev = &serdev->dev;
+ int ret;
+
+ ret = devm_serdev_device_open(dev, serdev);
+ if (ret < 0)
+ return ret;
+
+ ret = serdev_device_set_parity(serdev, SERDEV_PARITY_NONE);
+ if (ret < 0) {
+ dev_err(dev, "set parity failed\n");
+ return ret;
+ }
+
+ ret = w1_uart_set_config_reset(w1dev);
+ if (ret < 0) {
+ dev_err(dev, "config for reset failed\n");
+ return ret;
+ }
+
+ ret = w1_uart_set_config_touch_0(w1dev);
+ if (ret < 0) {
+ dev_err(dev, "config for touch-0 failed\n");
+ return ret;
+ }
+
+ ret = w1_uart_set_config_touch_1(w1dev);
+ if (ret < 0) {
+ dev_err(dev, "config for touch-1 failed\n");
+ return ret;
+ }
+
+ serdev_device_set_flow_control(serdev, false);
+
+ return 0;
+}
+
+/*
+ * Send one byte (tx_byte) and read one byte (rx_byte) via serdev.
+ */
+static int w1_uart_serdev_tx_rx(struct w1_uart_device *w1dev,
+ const struct w1_uart_config *w1cfg, u8 *rx_byte)
+{
+ struct serdev_device *serdev = w1dev->serdev;
+ int ret;
+
+ serdev_device_write_flush(serdev);
+ serdev_device_set_baudrate(serdev, w1cfg->baudrate);
+
+ /* write and immediately read one byte */
+ reinit_completion(&w1dev->rx_byte_received);
+ ret = serdev_device_write_buf(serdev, &w1cfg->tx_byte, 1);
+ if (ret != 1)
+ return -EIO;
+ ret = wait_for_completion_interruptible_timeout(
+ &w1dev->rx_byte_received, W1_UART_TIMEOUT);
+ if (ret <= 0)
+ return -EIO;
+
+ /* locking could fail when serdev is unexpectedly receiving. */
+ if (!mutex_trylock(&w1dev->rx_mutex))
+ return -EIO;
+
+ ret = w1dev->rx_err;
+ if (ret == 0)
+ *rx_byte = w1dev->rx_byte;
+
+ mutex_unlock(&w1dev->rx_mutex);
+
+ if (w1cfg->delay_us > 0)
+ fsleep(w1cfg->delay_us);
+
+ return ret;
+}
+
+static size_t w1_uart_serdev_receive_buf(struct serdev_device *serdev,
+ const u8 *buf, size_t count)
+{
+ struct w1_uart_device *w1dev = serdev_device_get_drvdata(serdev);
+
+ mutex_lock(&w1dev->rx_mutex);
+
+ /* sent a single byte and receive one single byte */
+ if (count == 1) {
+ w1dev->rx_byte = buf[0];
+ w1dev->rx_err = 0;
+ } else {
+ w1dev->rx_err = -EIO;
+ }
+
+ mutex_unlock(&w1dev->rx_mutex);
+ complete(&w1dev->rx_byte_received);
+
+ return count;
+}
+
+static const struct serdev_device_ops w1_uart_serdev_ops = {
+ .receive_buf = w1_uart_serdev_receive_buf,
+ .write_wakeup = serdev_device_write_wakeup,
+};
+
+/*
+ * 1-wire reset and presence detect: A present slave will manipulate
+ * the received byte by pulling the 1-Wire low.
+ */
+static u8 w1_uart_reset_bus(void *data)
+{
+ struct w1_uart_device *w1dev = data;
+ const struct w1_uart_config *w1cfg = &w1dev->cfg_reset;
+ int ret;
+ u8 val;
+
+ ret = w1_uart_serdev_tx_rx(w1dev, w1cfg, &val);
+ if (ret < 0)
+ return -1;
+
+ /* Device present (0) or no device (1) */
+ return val != w1cfg->tx_byte ? 0 : 1;
+}
+
+/*
+ * 1-Wire read and write cycle: Only the read-0 manipulates the
+ * received byte, all others left the line untouched.
+ */
+static u8 w1_uart_touch_bit(void *data, u8 bit)
+{
+ struct w1_uart_device *w1dev = data;
+ const struct w1_uart_config *w1cfg = bit ? &w1dev->cfg_touch_1 :
+ &w1dev->cfg_touch_0;
+ int ret;
+ u8 val;
+
+ ret = w1_uart_serdev_tx_rx(w1dev, w1cfg, &val);
+
+ /* return inactive bus state on error */
+ if (ret < 0)
+ return 1;
+
+ return val == w1cfg->tx_byte ? 1 : 0;
+}
+
+static int w1_uart_probe(struct serdev_device *serdev)
+{
+ struct device *dev = &serdev->dev;
+ struct w1_uart_device *w1dev;
+ int ret;
+
+ w1dev = devm_kzalloc(dev, sizeof(*w1dev), GFP_KERNEL);
+ if (!w1dev)
+ return -ENOMEM;
+ w1dev->bus.data = w1dev;
+ w1dev->bus.reset_bus = w1_uart_reset_bus;
+ w1dev->bus.touch_bit = w1_uart_touch_bit;
+ w1dev->serdev = serdev;
+
+ init_completion(&w1dev->rx_byte_received);
+ mutex_init(&w1dev->rx_mutex);
+
+ ret = w1_uart_serdev_open(w1dev);
+ if (ret < 0)
+ return ret;
+ serdev_device_set_drvdata(serdev, w1dev);
+ serdev_device_set_client_ops(serdev, &w1_uart_serdev_ops);
+
+ return w1_add_master_device(&w1dev->bus);
+}
+
+static void w1_uart_remove(struct serdev_device *serdev)
+{
+ struct w1_uart_device *w1dev = serdev_device_get_drvdata(serdev);
+
+ /*
+ * Waits until w1-uart callbacks are finished, serdev is closed
+ * and its device data released automatically by devres (waits
+ * until serdev-receive is finished).
+ */
+ w1_remove_master_device(&w1dev->bus);
+}
+
+static const struct of_device_id w1_uart_of_match[] = {
+ { .compatible = "w1-uart" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, w1_uart_of_match);
+
+static struct serdev_device_driver w1_uart_driver = {
+ .driver = {
+ .name = "w1-uart",
+ .of_match_table = w1_uart_of_match,
+ },
+ .probe = w1_uart_probe,
+ .remove = w1_uart_remove,
+};
+
+module_serdev_device_driver(w1_uart_driver);
+
+MODULE_DESCRIPTION("UART w1 bus driver");
+MODULE_AUTHOR("Christoph Winklhofer <cj.winklhofer@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 5353cbd75126..afb1cc4606c5 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -167,7 +167,7 @@ static struct w1_family w1_default_family = {
static int w1_uevent(const struct device *dev, struct kobj_uevent_env *env);
-static struct bus_type w1_bus_type = {
+static const struct bus_type w1_bus_type = {
.name = "w1",
.uevent = w1_uevent,
};
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 976c6cdf9ee6..aaf2514fcfa4 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -672,7 +672,6 @@ EXPORT_SYMBOL(xen_free_ballooned_pages);
static void __init balloon_add_regions(void)
{
-#if defined(CONFIG_XEN_PV)
unsigned long start_pfn, pages;
unsigned long pfn, extra_pfn_end;
unsigned int i;
@@ -696,7 +695,6 @@ static void __init balloon_add_regions(void)
balloon_stats.total_pages += extra_pfn_end - start_pfn;
}
-#endif
}
static int __init balloon_init(void)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 2faa4bf78c7a..81effbd53dc5 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -1190,7 +1190,7 @@ int xen_pirq_from_irq(unsigned irq)
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
- struct xenbus_device *dev)
+ struct xenbus_device *dev, bool shared)
{
int ret = -ENOMEM;
struct irq_info *info;
@@ -1224,7 +1224,8 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
*/
bind_evtchn_to_cpu(info, 0, false);
} else if (!WARN_ON(info->type != IRQT_EVTCHN)) {
- info->refcnt++;
+ if (shared && !WARN_ON(info->refcnt < 0))
+ info->refcnt++;
}
ret = info->irq;
@@ -1237,13 +1238,13 @@ out:
int bind_evtchn_to_irq(evtchn_port_t evtchn)
{
- return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL);
+ return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL, false);
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
{
- return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL);
+ return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip, NULL, false);
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
@@ -1295,7 +1296,8 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
evtchn_port_t remote_port,
- struct irq_chip *chip)
+ struct irq_chip *chip,
+ bool shared)
{
struct evtchn_bind_interdomain bind_interdomain;
int err;
@@ -1307,14 +1309,14 @@ static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
&bind_interdomain);
return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
- chip, dev);
+ chip, dev, shared);
}
int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
evtchn_port_t remote_port)
{
return bind_interdomain_evtchn_to_irq_chip(dev, remote_port,
- &xen_lateeoi_chip);
+ &xen_lateeoi_chip, false);
}
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
@@ -1430,7 +1432,8 @@ static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
{
int irq, retval;
- irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL);
+ irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL,
+ irqflags & IRQF_SHARED);
if (irq < 0)
return irq;
retval = request_irq(irq, handler, irqflags, devname, dev_id);
@@ -1471,7 +1474,8 @@ static int bind_interdomain_evtchn_to_irqhandler_chip(
{
int irq, retval;
- irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip);
+ irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip,
+ irqflags & IRQF_SHARED);
if (irq < 0)
return irq;
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 59717628ca42..f6a2216c2c87 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -85,6 +85,7 @@ struct user_evtchn {
struct per_user_data *user;
evtchn_port_t port;
bool enabled;
+ bool unbinding;
};
static void evtchn_free_ring(evtchn_port_t *ring)
@@ -164,6 +165,10 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
struct per_user_data *u = evtchn->user;
unsigned int prod, cons;
+ /* Handler might be called when tearing down the IRQ. */
+ if (evtchn->unbinding)
+ return IRQ_HANDLED;
+
WARN(!evtchn->enabled,
"Interrupt for port %u, but apparently not enabled; per-user %p\n",
evtchn->port, u);
@@ -421,6 +426,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u,
BUG_ON(irq < 0);
+ evtchn->unbinding = true;
unbind_from_irqhandler(irq, evtchn);
del_evtchn(u, evtchn);
diff --git a/drivers/xen/grant-dma-iommu.c b/drivers/xen/grant-dma-iommu.c
index 6a9fe02c6bfc..2ee750a03c2f 100644
--- a/drivers/xen/grant-dma-iommu.c
+++ b/drivers/xen/grant-dma-iommu.c
@@ -51,14 +51,12 @@ static int grant_dma_iommu_probe(struct platform_device *pdev)
return 0;
}
-static int grant_dma_iommu_remove(struct platform_device *pdev)
+static void grant_dma_iommu_remove(struct platform_device *pdev)
{
struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev);
platform_set_drvdata(pdev, NULL);
iommu_device_unregister(&mmu->iommu);
-
- return 0;
}
static struct platform_driver grant_dma_iommu_driver = {
@@ -67,7 +65,7 @@ static struct platform_driver grant_dma_iommu_driver = {
.of_match_table = grant_dma_iommu_of_match,
},
.probe = grant_dma_iommu_probe,
- .remove = grant_dma_iommu_remove,
+ .remove_new = grant_dma_iommu_remove,
};
static int __init grant_dma_iommu_init(void)