summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/accel/amdxdna/amdxdna_mailbox.c1
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c5
-rw-r--r--drivers/accel/ivpu/ivpu_drv.c8
-rw-r--r--drivers/accel/ivpu/ivpu_pm.c84
-rw-r--r--drivers/acpi/arm64/gtdt.c12
-rw-r--r--drivers/acpi/platform_profile.c102
-rw-r--r--drivers/acpi/prmt.c4
-rw-r--r--drivers/acpi/property.c10
-rw-r--r--drivers/acpi/resource.c6
-rw-r--r--drivers/android/binder_internal.h1
-rw-r--r--drivers/android/binderfs.c1
-rw-r--r--drivers/ata/ahci.h8
-rw-r--r--drivers/ata/libahci.c1
-rw-r--r--drivers/ata/libahci_platform.c2
-rw-r--r--drivers/ata/libata-core.c4
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/core.c1
-rw-r--r--drivers/base/faux.c232
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/power/main.c21
-rw-r--r--drivers/base/regmap/regmap-irq.c2
-rw-r--r--drivers/block/sunvdc.c4
-rw-r--r--drivers/block/ublk_drv.c7
-rw-r--r--drivers/bluetooth/btintel_pcie.c5
-rw-r--r--drivers/bluetooth/btusb.c7
-rw-r--r--drivers/bus/mhi/host/pci_generic.c5
-rw-r--r--drivers/bus/moxtet.c2
-rw-r--r--drivers/bus/simple-pm-bus.c22
-rw-r--r--drivers/cdx/cdx.c6
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/sonypi.c11
-rw-r--r--drivers/char/virtio_console.c13
-rw-r--r--drivers/clocksource/jcore-pit.c15
-rw-r--r--drivers/counter/microchip-tcb-capture.c160
-rw-r--r--drivers/counter/ti-eqep.c32
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/amd-pstate.c20
-rw-r--r--drivers/cpufreq/cpufreq.c3
-rw-r--r--drivers/crypto/ccp/sp-dev.c14
-rw-r--r--drivers/dma/qcom/bam_dma.c24
-rw-r--r--drivers/dma/tegra210-adma.c22
-rw-r--r--drivers/edac/qcom_edac.c4
-rw-r--r--drivers/firmware/Kconfig2
-rw-r--r--drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c4
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c24
-rw-r--r--drivers/firmware/efi/cper-arm.c2
-rw-r--r--drivers/firmware/efi/cper-x86.c2
-rw-r--r--drivers/firmware/efi/efi.c6
-rw-r--r--drivers/firmware/efi/libstub/randomalloc.c3
-rw-r--r--drivers/firmware/efi/libstub/relocate.c3
-rw-r--r--drivers/firmware/efi/mokvar-table.c57
-rw-r--r--drivers/firmware/imx/Kconfig1
-rw-r--r--drivers/firmware/iscsi_ibft.c5
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-aggregator.c20
-rw-r--r--drivers/gpio/gpio-bcm-kona.c71
-rw-r--r--drivers/gpio/gpio-pca953x.c19
-rw-r--r--drivers/gpio/gpio-rcar.c31
-rw-r--r--drivers/gpio/gpio-sim.c13
-rw-r--r--drivers/gpio/gpio-stmpe.c15
-rw-r--r--drivers/gpio/gpio-vf610.c4
-rw-r--r--drivers/gpio/gpiolib-acpi.c14
-rw-r--r--drivers/gpio/gpiolib.c110
-rw-r--r--drivers/gpio/gpiolib.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c86
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/Makefile22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c3
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c25
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c26
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c12
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c4
-rw-r--r--drivers/gpu/drm/ast/ast_dp.c2
-rw-r--r--drivers/gpu/drm/display/drm_dp_cec.c14
-rw-r--r--drivers/gpu/drm/display/drm_dp_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c217
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c15
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c40
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/imagination/Makefile2
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_meta.c6
-rw-r--r--drivers/gpu/drm/imagination/pvr_fw_trace.c4
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.c18
-rw-r--r--drivers/gpu/drm/imagination/pvr_queue.h4
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c134
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.h3
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c11
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c5
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c53
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h11
-rw-r--r--drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml11
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c2
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c8
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c1
-rw-r--r--drivers/gpu/drm/radeon/r300.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/rs400.c18
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h4
-rw-r--r--drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c200
-rw-r--r--drivers/gpu/drm/tiny/bochs.c5
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c2
-rw-r--r--drivers/gpu/drm/xe/display/xe_plane_initial.c10
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_oa_regs.h6
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.c40
-rw-r--r--drivers/gpu/drm/xe/xe_devcoredump.h2
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c2
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c14
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c7
-rw-r--r--drivers/gpu/drm/xe/xe_guc_log.c5
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.c188
-rw-r--r--drivers/gpu/drm/xe/xe_hmm.h7
-rw-r--r--drivers/gpu/drm/xe/xe_irq.c14
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c26
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c96
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.c3
-rw-r--r--drivers/gpu/drm/xe/xe_pt_walk.h4
-rw-r--r--drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c54
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c140
-rw-r--r--drivers/gpu/drm/xe/xe_vm.h10
-rw-r--r--drivers/gpu/drm/xe/xe_vm_types.h8
-rw-r--r--drivers/gpu/host1x/dev.c2
-rw-r--r--drivers/gpu/host1x/intr.c2
-rw-r--r--drivers/greybus/gb-beagleplay.c4
-rw-r--r--drivers/hid/Kconfig15
-rw-r--r--drivers/hid/amd-sfh-hid/Kconfig1
-rw-r--r--drivers/hid/hid-apple.c13
-rw-r--r--drivers/hid/hid-appleir.c2
-rw-r--r--drivers/hid/hid-corsair-void.c86
-rw-r--r--drivers/hid/hid-debug.c2
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-lenovo.c7
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-nintendo.c14
-rw-r--r--drivers/hid/hid-quirks.c1
-rw-r--r--drivers/hid/hid-steam.c48
-rw-r--r--drivers/hid/hid-thrustmaster.c2
-rw-r--r--drivers/hid/hid-topre.c7
-rw-r--r--drivers/hid/hid-winwing.c2
-rw-r--r--drivers/hid/i2c-hid/Kconfig2
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hid/intel-ish-hid/Kconfig1
-rw-r--r--drivers/hid/intel-ish-hid/ipc/hw-ish.h2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/ipc.c15
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c7
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid-client.c2
-rw-r--r--drivers/hid/intel-ish-hid/ishtp-hid.c4
-rw-r--r--drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h2
-rw-r--r--drivers/hid/intel-thc-hid/Kconfig1
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c2
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c2
-rw-r--r--drivers/hid/surface-hid/Kconfig2
-rw-r--r--drivers/hid/usbhid/Kconfig3
-rw-r--r--drivers/hwmon/ad7314.c10
-rw-r--r--drivers/hwmon/ntc_thermistor.c66
-rw-r--r--drivers/hwmon/peci/dimmtemp.c10
-rw-r--r--drivers/hwmon/pmbus/pmbus.c2
-rw-r--r--drivers/hwmon/xgene-hwmon.c2
-rw-r--r--drivers/hwtracing/intel_th/msu.c13
-rw-r--r--drivers/hwtracing/intel_th/pci.c15
-rw-r--r--drivers/i2c/busses/i2c-amd-asf-plat.c1
-rw-r--r--drivers/i2c/busses/i2c-ls2x.c16
-rw-r--r--drivers/i2c/busses/i2c-npcm7xx.c7
-rw-r--r--drivers/i2c/i2c-core-base.c128
-rw-r--r--drivers/idle/intel_idle.c4
-rw-r--r--drivers/iio/accel/adxl345.h1
-rw-r--r--drivers/iio/accel/adxl345_core.c78
-rw-r--r--drivers/iio/accel/adxl367.c194
-rw-r--r--drivers/iio/accel/adxl372.c7
-rw-r--r--drivers/iio/accel/adxl380.c7
-rw-r--r--drivers/iio/accel/bma180.c7
-rw-r--r--drivers/iio/accel/bma400_core.c2
-rw-r--r--drivers/iio/accel/bmi088-accel-core.c9
-rw-r--r--drivers/iio/accel/fxls8962af-core.c21
-rw-r--r--drivers/iio/accel/kionix-kx022a.c78
-rw-r--r--drivers/iio/accel/mc3230.c95
-rw-r--r--drivers/iio/accel/mma8452.c86
-rw-r--r--drivers/iio/accel/msa311.c34
-rw-r--r--drivers/iio/adc/Kconfig56
-rw-r--r--drivers/iio/adc/Makefile4
-rw-r--r--drivers/iio/adc/ad4000.c60
-rw-r--r--drivers/iio/adc/ad4030.c1230
-rw-r--r--drivers/iio/adc/ad4130.c139
-rw-r--r--drivers/iio/adc/ad4695.c1094
-rw-r--r--drivers/iio/adc/ad4851.c1315
-rw-r--r--drivers/iio/adc/ad7091r-base.c1
-rw-r--r--drivers/iio/adc/ad7124.c343
-rw-r--r--drivers/iio/adc/ad7173.c707
-rw-r--r--drivers/iio/adc/ad7191.c554
-rw-r--r--drivers/iio/adc/ad7192.c126
-rw-r--r--drivers/iio/adc/ad7266.c7
-rw-r--r--drivers/iio/adc/ad7298.c7
-rw-r--r--drivers/iio/adc/ad7380.c917
-rw-r--r--drivers/iio/adc/ad7476.c7
-rw-r--r--drivers/iio/adc/ad7606.c176
-rw-r--r--drivers/iio/adc/ad7606.h103
-rw-r--r--drivers/iio/adc/ad7606_bus_iface.h16
-rw-r--r--drivers/iio/adc/ad7606_par.c52
-rw-r--r--drivers/iio/adc/ad7606_spi.c137
-rw-r--r--drivers/iio/adc/ad7625.c13
-rw-r--r--drivers/iio/adc/ad7768-1.c47
-rw-r--r--drivers/iio/adc/ad7779.c101
-rw-r--r--drivers/iio/adc/ad7791.c31
-rw-r--r--drivers/iio/adc/ad7793.c80
-rw-r--r--drivers/iio/adc/ad7887.c7
-rw-r--r--drivers/iio/adc/ad7923.c7
-rw-r--r--drivers/iio/adc/ad7944.c314
-rw-r--r--drivers/iio/adc/ad799x.c14
-rw-r--r--drivers/iio/adc/ad9467.c23
-rw-r--r--drivers/iio/adc/ad_sigma_delta.c24
-rw-r--r--drivers/iio/adc/adi-axi-adc.c305
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c122
-rw-r--r--drivers/iio/adc/dln2-adc.c7
-rw-r--r--drivers/iio/adc/max1027.c37
-rw-r--r--drivers/iio/adc/max11410.c72
-rw-r--r--drivers/iio/adc/max1363.c165
-rw-r--r--drivers/iio/adc/max34408.c1
-rw-r--r--drivers/iio/adc/pac1921.c3
-rw-r--r--drivers/iio/adc/rockchip_saradc.c42
-rw-r--r--drivers/iio/adc/rtq6056.c46
-rw-r--r--drivers/iio/adc/stm32-adc-core.c6
-rw-r--r--drivers/iio/adc/stm32-adc.c7
-rw-r--r--drivers/iio/adc/stm32-dfsdm-adc.c76
-rw-r--r--drivers/iio/adc/ti-adc084s021.c9
-rw-r--r--drivers/iio/adc/ti-adc108s102.c7
-rw-r--r--drivers/iio/adc/ti-adc161s626.c14
-rw-r--r--drivers/iio/adc/ti-ads1119.c17
-rw-r--r--drivers/iio/adc/ti-ads124s08.c2
-rw-r--r--drivers/iio/adc/ti-ads1298.c7
-rw-r--r--drivers/iio/adc/ti-ads131e08.c14
-rw-r--r--drivers/iio/adc/ti-ads7138.c749
-rw-r--r--drivers/iio/adc/ti-ads7924.c7
-rw-r--r--drivers/iio/adc/ti-tlc4541.c7
-rw-r--r--drivers/iio/addac/ad74413r.c14
-rw-r--r--drivers/iio/amplifiers/hmc425a.c3
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c4
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c144
-rw-r--r--drivers/iio/chemical/ens160_core.c32
-rw-r--r--drivers/iio/chemical/scd30_core.c70
-rw-r--r--drivers/iio/common/cros_ec_sensors/Makefile3
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c9
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.c32
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.h56
-rw-r--r--drivers/iio/dac/Kconfig3
-rw-r--r--drivers/iio/dac/ad3552r-common.c50
-rw-r--r--drivers/iio/dac/ad3552r-hs.c333
-rw-r--r--drivers/iio/dac/ad3552r-hs.h8
-rw-r--r--drivers/iio/dac/ad3552r.c42
-rw-r--r--drivers/iio/dac/ad3552r.h9
-rw-r--r--drivers/iio/dac/ad5791.c181
-rw-r--r--drivers/iio/dac/ad8460.c18
-rw-r--r--drivers/iio/dac/adi-axi-dac.c35
-rw-r--r--drivers/iio/dummy/iio_simple_dummy.c119
-rw-r--r--drivers/iio/filter/admv8818.c14
-rw-r--r--drivers/iio/frequency/adf4371.c45
-rw-r--r--drivers/iio/gyro/bmg160_i2c.c1
-rw-r--r--drivers/iio/gyro/bmg160_spi.c10
-rw-r--r--drivers/iio/humidity/dht11.c3
-rw-r--r--drivers/iio/imu/Kconfig13
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis.c35
-rw-r--r--drivers/iio/imu/adis16550.c1147
-rw-r--r--drivers/iio/imu/bmi270/bmi270.h17
-rw-r--r--drivers/iio/imu/bmi270/bmi270_core.c374
-rw-r--r--drivers/iio/imu/bmi323/bmi323_core.c44
-rw-r--r--drivers/iio/industrialio-backend.c64
-rw-r--r--drivers/iio/industrialio-core.c9
-rw-r--r--drivers/iio/industrialio-event.c2
-rw-r--r--drivers/iio/industrialio-gts-helper.c283
-rw-r--r--drivers/iio/light/Kconfig22
-rw-r--r--drivers/iio/light/Makefile2
-rw-r--r--drivers/iio/light/adux1020.c1
-rw-r--r--drivers/iio/light/al3000a.c209
-rw-r--r--drivers/iio/light/apds9160.c1594
-rw-r--r--drivers/iio/light/apds9306.c4
-rw-r--r--drivers/iio/light/bh1745.c18
-rw-r--r--drivers/iio/light/cm32181.c2
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/iio/light/hid-sensor-prox.c7
-rw-r--r--drivers/iio/light/veml6030.c608
-rw-r--r--drivers/iio/light/veml6075.c8
-rw-r--r--drivers/iio/magnetometer/Kconfig11
-rw-r--r--drivers/iio/magnetometer/Makefile2
-rw-r--r--drivers/iio/magnetometer/af8133j.c1
-rw-r--r--drivers/iio/magnetometer/si7210.c446
-rw-r--r--drivers/iio/pressure/zpa2326_i2c.c1
-rw-r--r--drivers/iio/pressure/zpa2326_spi.c1
-rw-r--r--drivers/iio/proximity/hx9023s.c3
-rw-r--r--drivers/iio/proximity/irsd200.c3
-rw-r--r--drivers/iio/proximity/sx9310.c19
-rw-r--r--drivers/iio/proximity/sx9324.c19
-rw-r--r--drivers/iio/proximity/sx9360.c19
-rw-r--r--drivers/iio/resolver/ad2s1210.c17
-rw-r--r--drivers/iio/temperature/tmp006.c33
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c4
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c2
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c22
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h8
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c64
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.h2
-rw-r--r--drivers/infiniband/hw/mana/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/ah.c3
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c16
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c1
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c10
-rw-r--r--drivers/infiniband/hw/mlx5/qp.h1
-rw-r--r--drivers/infiniband/hw/mlx5/umr.c83
-rw-r--r--drivers/iommu/amd/amd_iommu_types.h1
-rw-r--r--drivers/iommu/amd/init.c38
-rw-r--r--drivers/iommu/amd/iommu.c4
-rw-r--r--drivers/iommu/exynos-iommu.c6
-rw-r--r--drivers/iommu/intel/dmar.c1
-rw-r--r--drivers/iommu/intel/iommu.c10
-rw-r--r--drivers/iommu/intel/prq.c4
-rw-r--r--drivers/iommu/io-pgfault.c1
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/irqchip/Kconfig1
-rw-r--r--drivers/irqchip/irq-apple-aic.c3
-rw-r--r--drivers/irqchip/irq-gic-v3.c53
-rw-r--r--drivers/irqchip/irq-jcore-aic.c2
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c3
-rw-r--r--drivers/irqchip/irq-partition-percpu.c2
-rw-r--r--drivers/irqchip/irq-riscv-imsic-early.c2
-rw-r--r--drivers/irqchip/irq-thead-c900-aclint-sswi.c2
-rw-r--r--drivers/irqchip/qcom-pdc.c67
-rw-r--r--drivers/md/dm-integrity.c25
-rw-r--r--drivers/md/dm-vdo/dedupe.c1
-rw-r--r--drivers/md/md-linear.c4
-rw-r--r--drivers/md/raid0.c4
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/mfd/syscon.c29
-rw-r--r--drivers/misc/cardreader/rtsx_usb.c15
-rw-r--r--drivers/misc/eeprom/digsy_mtc_eeprom.c2
-rw-r--r--drivers/misc/eeprom/ee1004.c4
-rw-r--r--drivers/misc/mei/bus.c52
-rw-r--r--drivers/misc/mei/client.c22
-rw-r--r--drivers/misc/mei/client.h2
-rw-r--r--drivers/misc/mei/hw-me-regs.h2
-rw-r--r--drivers/misc/mei/hw-txe.c45
-rw-r--r--drivers/misc/mei/hw-txe.h2
-rw-r--r--drivers/misc/mei/pci-me.c2
-rw-r--r--drivers/misc/mei/vsc-tp.c2
-rw-r--r--drivers/misc/ntsync.c7
-rw-r--r--drivers/mmc/host/mtk-sd.c31
-rw-r--r--drivers/mmc/host/sdhci_am654.c30
-rw-r--r--drivers/mtd/nand/raw/cadence-nand-controller.c44
-rw-r--r--drivers/mtd/nand/raw/qcom_nandc.c24
-rw-r--r--drivers/mtd/spi-nor/sst.c2
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c5
-rw-r--r--drivers/net/can/ctucanfd/ctucanfd_base.c10
-rw-r--r--drivers/net/can/rockchip/rockchip_canfd-core.c2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_devlink.c6
-rw-r--r--drivers/net/dsa/mt7530.c8
-rw-r--r--drivers/net/dsa/realtek/Kconfig6
-rw-r--r--drivers/net/dsa/realtek/Makefile3
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb-leds.c177
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c258
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.h107
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c58
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c12
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c197
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c103
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc4_pf.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c7
-rw-r--r--drivers/net/ethernet/google/gve/gve.h10
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c6
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c4
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c14
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c150
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/cmsg.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c40
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c50
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c21
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c1
-rw-r--r--drivers/net/geneve.c16
-rw-r--r--drivers/net/gtp.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c18
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c21
-rw-r--r--drivers/net/loopback.c14
-rw-r--r--drivers/net/mctp/mctp-i3c.c3
-rw-r--r--drivers/net/netdevsim/ethtool.c2
-rw-r--r--drivers/net/phy/phylink.c15
-rw-r--r--drivers/net/phy/qcom/qca807x.c2
-rw-r--r--drivers/net/ppp/ppp_generic.c28
-rw-r--r--drivers/net/pse-pd/pd692x0.c2
-rw-r--r--drivers/net/pse-pd/pse_core.c4
-rw-r--r--drivers/net/team/team_core.c4
-rw-r--r--drivers/net/tun.c14
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/vmxnet3/vmxnet3_xdp.c14
-rw-r--r--drivers/net/vxlan/vxlan_core.c7
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c61
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c86
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c77
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c20
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c2
-rw-r--r--drivers/nvme/host/apple.c55
-rw-r--r--drivers/nvme/host/core.c10
-rw-r--r--drivers/nvme/host/fc.c68
-rw-r--r--drivers/nvme/host/ioctl.c15
-rw-r--r--drivers/nvme/host/pci.c35
-rw-r--r--drivers/nvme/host/sysfs.c2
-rw-r--r--drivers/nvme/host/tcp.c85
-rw-r--r--drivers/nvme/target/admin-cmd.c1
-rw-r--r--drivers/nvme/target/core.c40
-rw-r--r--drivers/nvme/target/fabrics-cmd.c2
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c2
-rw-r--r--drivers/nvme/target/nvmet.h17
-rw-r--r--drivers/nvme/target/pci-epf.c39
-rw-r--r--drivers/nvme/target/rdma.c33
-rw-r--r--drivers/nvme/target/tcp.c15
-rw-r--r--drivers/of/address.c5
-rw-r--r--drivers/of/of_private.h4
-rw-r--r--drivers/of/of_reserved_mem.c4
-rw-r--r--drivers/of/of_test.c119
-rw-r--r--drivers/pci/pcie/aspm.c3
-rw-r--r--drivers/pci/probe.c5
-rw-r--r--drivers/pci/quirks.c3
-rw-r--r--drivers/pci/tph.c2
-rw-r--r--drivers/phy/freescale/phy-fsl-samsung-hdmi.c13
-rw-r--r--drivers/phy/rockchip/Kconfig1
-rw-r--r--drivers/phy/rockchip/phy-rockchip-naneng-combphy.c5
-rw-r--r--drivers/phy/samsung/phy-exynos5-usbdrd.c25
-rw-r--r--drivers/phy/st/phy-stm32-combophy.c38
-rw-r--r--drivers/phy/tegra/xusb-tegra186.c11
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c15
-rw-r--r--drivers/pinctrl/pinconf-generic.c8
-rw-r--r--drivers/pinctrl/pinctrl-cy8c95x0.c42
-rw-r--r--drivers/platform/cznic/Kconfig1
-rw-r--r--drivers/platform/x86/amd/pmf/core.c2
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h5
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c11
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c52
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/intel/hid.c7
-rw-r--r--drivers/platform/x86/intel/ifs/ifs.h9
-rw-r--r--drivers/platform/x86/intel/int3472/discrete.c85
-rw-r--r--drivers/platform/x86/intel/pmc/core.c4
-rw-r--r--drivers/platform/x86/intel/vsec.c7
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c62
-rw-r--r--drivers/power/supply/axp20x_battery.c31
-rw-r--r--drivers/power/supply/da9150-fg.c4
-rw-r--r--drivers/power/supply/power_supply_core.c8
-rw-r--r--drivers/powercap/powercap_sys.c3
-rw-r--r--drivers/pps/generators/Kconfig16
-rw-r--r--drivers/pps/generators/Makefile1
-rw-r--r--drivers/pps/generators/pps_gen-dummy.c2
-rw-r--r--drivers/pps/generators/pps_gen.c14
-rw-r--r--drivers/pps/generators/pps_gen_tio.c272
-rw-r--r--drivers/pps/generators/sysfs.c6
-rw-r--r--drivers/ptp/ptp_vmclock.c47
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c3
-rw-r--r--drivers/rapidio/rio-scan.c5
-rw-r--r--drivers/regulator/core.c61
-rw-r--r--drivers/s390/cio/chp.c3
-rw-r--r--drivers/s390/net/ism_drv.c14
-rw-r--r--drivers/s390/net/qeth_core_main.c8
-rw-r--r--drivers/scsi/qla1280.c2
-rw-r--r--drivers/scsi/scsi_lib.c23
-rw-r--r--drivers/scsi/scsi_lib_test.c7
-rw-r--r--drivers/scsi/scsi_scan.c2
-rw-r--r--drivers/scsi/storvsc_drv.c1
-rw-r--r--drivers/slimbus/messaging.c5
-rw-r--r--drivers/soc/loongson/loongson2_guts.c5
-rw-r--r--drivers/soc/qcom/smp2p.c2
-rw-r--r--drivers/spi/Kconfig15
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/atmel-quadspi.c4
-rw-r--r--drivers/spi/spi-offload-trigger-pwm.c162
-rw-r--r--drivers/spi/spi-offload.c465
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-sn-f-ospi.c3
-rw-r--r--drivers/spi/spi.c10
-rw-r--r--drivers/staging/iio/accel/Kconfig12
-rw-r--r--drivers/staging/iio/accel/Makefile1
-rw-r--r--drivers/staging/iio/accel/adis16240.c443
-rw-r--r--drivers/staging/iio/frequency/ad9832.c37
-rw-r--r--drivers/staging/iio/frequency/ad9834.c22
-rw-r--r--drivers/target/target_core_stat.c4
-rw-r--r--drivers/tee/optee/supp.c35
-rw-r--r--drivers/thermal/cpufreq_cooling.c2
-rw-r--r--drivers/thermal/gov_power_allocator.c32
-rw-r--r--drivers/thermal/thermal_of.c50
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serial/8250/8250.h2
-rw-r--r--drivers/tty/serial/8250/8250_dma.c16
-rw-r--r--drivers/tty/serial/8250/8250_of.c1
-rw-r--r--drivers/tty/serial/8250/8250_platform.c9
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c10
-rw-r--r--drivers/tty/serial/8250/8250_port.c9
-rw-r--r--drivers/tty/serial/sc16is7xx.c2
-rw-r--r--drivers/tty/serial/serial_port.c12
-rw-r--r--drivers/ufs/core/ufs_bsg.c6
-rw-r--r--drivers/ufs/core/ufshcd.c106
-rw-r--r--drivers/ufs/host/ufshcd-pci.c2
-rw-r--r--drivers/ufs/host/ufshcd-pltfrm.c28
-rw-r--r--drivers/usb/atm/cxacru.c13
-rw-r--r--drivers/usb/class/cdc-acm.c28
-rw-r--r--drivers/usb/core/hub.c47
-rw-r--r--drivers/usb/core/quirks.c10
-rw-r--r--drivers/usb/dwc2/gadget.c1
-rw-r--r--drivers/usb/dwc3/core.c85
-rw-r--r--drivers/usb/dwc3/core.h3
-rw-r--r--drivers/usb/dwc3/drd.c4
-rw-r--r--drivers/usb/dwc3/gadget.c44
-rw-r--r--drivers/usb/gadget/composite.c17
-rw-r--r--drivers/usb/gadget/function/f_midi.c19
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/gadget/function/uvc_video.c2
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/host/pci-quirks.c9
-rw-r--r--drivers/usb/host/xhci-hub.c8
-rw-r--r--drivers/usb/host/xhci-mem.c3
-rw-r--r--drivers/usb/host/xhci-pci.c17
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/phy/phy-generic.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c6
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c2
-rw-r--r--drivers/usb/roles/class.c5
-rw-r--r--drivers/usb/serial/option.c49
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c11
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c3
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c25
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h2
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c21
-rw-r--r--drivers/usb/typec/ucsi/ucsi_ccg.c1
-rw-r--r--drivers/usb/typec/ucsi/ucsi_glink.c1
-rw-r--r--drivers/usb/typec/ucsi/ucsi_stm32g0.c1
-rw-r--r--drivers/usb/typec/ucsi/ucsi_yoga_c630.c1
-rw-r--r--drivers/vhost/vhost.c2
-rw-r--r--drivers/virt/acrn/hsm.c6
-rw-r--r--drivers/virt/coco/sev-guest/sev-guest.c58
-rw-r--r--drivers/virt/vboxguest/Kconfig3
-rw-r--r--drivers/xen/swiotlb-xen.c22
640 files changed, 20466 insertions, 5960 deletions
diff --git a/drivers/accel/amdxdna/amdxdna_mailbox.c b/drivers/accel/amdxdna/amdxdna_mailbox.c
index 814b16bb1953..e5301fac1397 100644
--- a/drivers/accel/amdxdna/amdxdna_mailbox.c
+++ b/drivers/accel/amdxdna/amdxdna_mailbox.c
@@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
+#include <linux/slab.h>
#include <linux/xarray.h>
#define CREATE_TRACE_POINTS
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index 97d4a032171f..f5b8497cf5ad 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -21,6 +21,11 @@
#define AMDXDNA_AUTOSUSPEND_DELAY 5000 /* milliseconds */
+MODULE_FIRMWARE("amdnpu/1502_00/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_10/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_11/npu.sbin");
+MODULE_FIRMWARE("amdnpu/17f0_20/npu.sbin");
+
/*
* Bind the driver base on (vendor_id, device_id) pair and later use the
* (device_id, rev_id) pair as a key to select the devices. The devices with
diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c
index 1e8ffbe25eee..38cf1c342c72 100644
--- a/drivers/accel/ivpu/ivpu_drv.c
+++ b/drivers/accel/ivpu/ivpu_drv.c
@@ -397,15 +397,19 @@ int ivpu_boot(struct ivpu_device *vdev)
if (ivpu_fw_is_cold_boot(vdev)) {
ret = ivpu_pm_dct_init(vdev);
if (ret)
- goto err_diagnose_failure;
+ goto err_disable_ipc;
ret = ivpu_hw_sched_init(vdev);
if (ret)
- goto err_diagnose_failure;
+ goto err_disable_ipc;
}
return 0;
+err_disable_ipc:
+ ivpu_ipc_disable(vdev);
+ ivpu_hw_irq_disable(vdev);
+ disable_irq(vdev->irq);
err_diagnose_failure:
ivpu_hw_diagnose_failure(vdev);
ivpu_mmu_evtq_dump(vdev);
diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c
index 87d7411ae059..5060c5dd40d1 100644
--- a/drivers/accel/ivpu/ivpu_pm.c
+++ b/drivers/accel/ivpu/ivpu_pm.c
@@ -115,41 +115,57 @@ err_power_down:
return ret;
}
-static void ivpu_pm_recovery_work(struct work_struct *work)
+static void ivpu_pm_reset_begin(struct ivpu_device *vdev)
{
- struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
- struct ivpu_device *vdev = pm->vdev;
- char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
- int ret;
-
- ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
-
- ret = pm_runtime_resume_and_get(vdev->drm.dev);
- if (ret)
- ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
-
- ivpu_jsm_state_dump(vdev);
- ivpu_dev_coredump(vdev);
+ pm_runtime_disable(vdev->drm.dev);
atomic_inc(&vdev->pm->reset_counter);
atomic_set(&vdev->pm->reset_pending, 1);
down_write(&vdev->pm->reset_lock);
+}
+
+static void ivpu_pm_reset_complete(struct ivpu_device *vdev)
+{
+ int ret;
- ivpu_suspend(vdev);
ivpu_pm_prepare_cold_boot(vdev);
ivpu_jobs_abort_all(vdev);
ivpu_ms_cleanup_all(vdev);
ret = ivpu_resume(vdev);
- if (ret)
+ if (ret) {
ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ } else {
+ pm_runtime_set_active(vdev->drm.dev);
+ }
up_write(&vdev->pm->reset_lock);
atomic_set(&vdev->pm->reset_pending, 0);
- kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ pm_runtime_enable(vdev->drm.dev);
+}
+
+static void ivpu_pm_recovery_work(struct work_struct *work)
+{
+ struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, recovery_work);
+ struct ivpu_device *vdev = pm->vdev;
+ char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
+
+ ivpu_err(vdev, "Recovering the NPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_jsm_state_dump(vdev);
+ ivpu_dev_coredump(vdev);
+ ivpu_suspend(vdev);
+ }
+
+ ivpu_pm_reset_complete(vdev);
+
+ kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
}
void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
@@ -309,7 +325,10 @@ int ivpu_rpm_get(struct ivpu_device *vdev)
int ret;
ret = pm_runtime_resume_and_get(vdev->drm.dev);
- drm_WARN_ON(&vdev->drm, ret < 0);
+ if (ret < 0) {
+ ivpu_err(vdev, "Failed to resume NPU: %d\n", ret);
+ pm_runtime_set_suspended(vdev->drm.dev);
+ }
return ret;
}
@@ -325,16 +344,13 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
struct ivpu_device *vdev = pci_get_drvdata(pdev);
ivpu_dbg(vdev, PM, "Pre-reset..\n");
- atomic_inc(&vdev->pm->reset_counter);
- atomic_set(&vdev->pm->reset_pending, 1);
- pm_runtime_get_sync(vdev->drm.dev);
- down_write(&vdev->pm->reset_lock);
- ivpu_prepare_for_reset(vdev);
- ivpu_hw_reset(vdev);
- ivpu_pm_prepare_cold_boot(vdev);
- ivpu_jobs_abort_all(vdev);
- ivpu_ms_cleanup_all(vdev);
+ ivpu_pm_reset_begin(vdev);
+
+ if (!pm_runtime_status_suspended(vdev->drm.dev)) {
+ ivpu_prepare_for_reset(vdev);
+ ivpu_hw_reset(vdev);
+ }
ivpu_dbg(vdev, PM, "Pre-reset done.\n");
}
@@ -342,18 +358,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
{
struct ivpu_device *vdev = pci_get_drvdata(pdev);
- int ret;
ivpu_dbg(vdev, PM, "Post-reset..\n");
- ret = ivpu_resume(vdev);
- if (ret)
- ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
- up_write(&vdev->pm->reset_lock);
- atomic_set(&vdev->pm->reset_pending, 0);
- ivpu_dbg(vdev, PM, "Post-reset done.\n");
- pm_runtime_mark_last_busy(vdev->drm.dev);
- pm_runtime_put_autosuspend(vdev->drm.dev);
+ ivpu_pm_reset_complete(vdev);
+
+ ivpu_dbg(vdev, PM, "Post-reset done.\n");
}
void ivpu_pm_init(struct ivpu_device *vdev)
diff --git a/drivers/acpi/arm64/gtdt.c b/drivers/acpi/arm64/gtdt.c
index 3561553eff8b..70f8290b659d 100644
--- a/drivers/acpi/arm64/gtdt.c
+++ b/drivers/acpi/arm64/gtdt.c
@@ -163,7 +163,7 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
{
void *platform_timer;
struct acpi_table_gtdt *gtdt;
- int cnt = 0;
+ u32 cnt = 0;
gtdt = container_of(table, struct acpi_table_gtdt, header);
acpi_gtdt_desc.gtdt = gtdt;
@@ -188,13 +188,17 @@ int __init acpi_gtdt_init(struct acpi_table_header *table,
cnt++;
if (cnt != gtdt->platform_timer_count) {
+ cnt = min(cnt, gtdt->platform_timer_count);
+ pr_err(FW_BUG "limiting Platform Timer count to %d\n", cnt);
+ }
+
+ if (!cnt) {
acpi_gtdt_desc.platform_timer = NULL;
- pr_err(FW_BUG "invalid timer data.\n");
- return -EINVAL;
+ return 0;
}
if (platform_timer_count)
- *platform_timer_count = gtdt->platform_timer_count;
+ *platform_timer_count = cnt;
return 0;
}
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index fc92e43d0fe9..ef9444482db1 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -21,9 +21,15 @@ struct platform_profile_handler {
struct device dev;
int minor;
unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ unsigned long hidden_choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
const struct platform_profile_ops *ops;
};
+struct aggregate_choices_data {
+ unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ int count;
+};
+
static const char * const profile_names[] = {
[PLATFORM_PROFILE_LOW_POWER] = "low-power",
[PLATFORM_PROFILE_COOL] = "cool",
@@ -73,7 +79,7 @@ static int _store_class_profile(struct device *dev, void *data)
lockdep_assert_held(&profile_lock);
handler = to_pprof_handler(dev);
- if (!test_bit(*bit, handler->choices))
+ if (!test_bit(*bit, handler->choices) && !test_bit(*bit, handler->hidden_choices))
return -EOPNOTSUPP;
return handler->ops->profile_set(dev, *bit);
@@ -239,21 +245,44 @@ static const struct class platform_profile_class = {
/**
* _aggregate_choices - Aggregate the available profile choices
* @dev: The device
- * @data: The available profile choices
+ * @arg: struct aggregate_choices_data
*
* Return: 0 on success, -errno on failure
*/
-static int _aggregate_choices(struct device *dev, void *data)
+static int _aggregate_choices(struct device *dev, void *arg)
{
+ unsigned long tmp[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ struct aggregate_choices_data *data = arg;
struct platform_profile_handler *handler;
- unsigned long *aggregate = data;
lockdep_assert_held(&profile_lock);
handler = to_pprof_handler(dev);
- if (test_bit(PLATFORM_PROFILE_LAST, aggregate))
- bitmap_copy(aggregate, handler->choices, PLATFORM_PROFILE_LAST);
+ bitmap_or(tmp, handler->choices, handler->hidden_choices, PLATFORM_PROFILE_LAST);
+ if (test_bit(PLATFORM_PROFILE_LAST, data->aggregate))
+ bitmap_copy(data->aggregate, tmp, PLATFORM_PROFILE_LAST);
else
- bitmap_and(aggregate, handler->choices, aggregate, PLATFORM_PROFILE_LAST);
+ bitmap_and(data->aggregate, tmp, data->aggregate, PLATFORM_PROFILE_LAST);
+ data->count++;
+
+ return 0;
+}
+
+/**
+ * _remove_hidden_choices - Remove hidden choices from aggregate data
+ * @dev: The device
+ * @arg: struct aggregate_choices_data
+ *
+ * Return: 0 on success, -errno on failure
+ */
+static int _remove_hidden_choices(struct device *dev, void *arg)
+{
+ struct aggregate_choices_data *data = arg;
+ struct platform_profile_handler *handler;
+
+ lockdep_assert_held(&profile_lock);
+ handler = to_pprof_handler(dev);
+ bitmap_andnot(data->aggregate, handler->choices,
+ handler->hidden_choices, PLATFORM_PROFILE_LAST);
return 0;
}
@@ -270,22 +299,31 @@ static ssize_t platform_profile_choices_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- unsigned long aggregate[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
int err;
- set_bit(PLATFORM_PROFILE_LAST, aggregate);
+ set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
- aggregate, _aggregate_choices);
+ &data, _aggregate_choices);
if (err)
return err;
+ if (data.count == 1) {
+ err = class_for_each_device(&platform_profile_class, NULL,
+ &data, _remove_hidden_choices);
+ if (err)
+ return err;
+ }
}
/* no profile handler registered any more */
- if (bitmap_empty(aggregate, PLATFORM_PROFILE_LAST))
+ if (bitmap_empty(data.aggregate, PLATFORM_PROFILE_LAST))
return -EINVAL;
- return _commmon_choices_show(aggregate, buf);
+ return _commmon_choices_show(data.aggregate, buf);
}
/**
@@ -373,7 +411,10 @@ static ssize_t platform_profile_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
int ret;
int i;
@@ -381,13 +422,13 @@ static ssize_t platform_profile_store(struct device *dev,
i = sysfs_match_string(profile_names, buf);
if (i < 0 || i == PLATFORM_PROFILE_CUSTOM)
return -EINVAL;
- set_bit(PLATFORM_PROFILE_LAST, choices);
+ set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
ret = class_for_each_device(&platform_profile_class, NULL,
- choices, _aggregate_choices);
+ &data, _aggregate_choices);
if (ret)
return ret;
- if (!test_bit(i, choices))
+ if (!test_bit(i, data.aggregate))
return -EOPNOTSUPP;
ret = class_for_each_device(&platform_profile_class, NULL, &i,
@@ -417,8 +458,14 @@ static int profile_class_registered(struct device *dev, const void *data)
static umode_t profile_class_is_visible(struct kobject *kobj, struct attribute *attr, int idx)
{
- if (!class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered))
+ struct device *dev;
+
+ dev = class_find_device(&platform_profile_class, NULL, NULL, profile_class_registered);
+ if (!dev)
return 0;
+
+ put_device(dev);
+
return attr->mode;
}
@@ -447,12 +494,15 @@ EXPORT_SYMBOL_GPL(platform_profile_notify);
*/
int platform_profile_cycle(void)
{
+ struct aggregate_choices_data data = {
+ .aggregate = { [0 ... BITS_TO_LONGS(PLATFORM_PROFILE_LAST) - 1] = ~0UL },
+ .count = 0,
+ };
enum platform_profile_option next = PLATFORM_PROFILE_LAST;
enum platform_profile_option profile = PLATFORM_PROFILE_LAST;
- unsigned long choices[BITS_TO_LONGS(PLATFORM_PROFILE_LAST)];
int err;
- set_bit(PLATFORM_PROFILE_LAST, choices);
+ set_bit(PLATFORM_PROFILE_LAST, data.aggregate);
scoped_cond_guard(mutex_intr, return -ERESTARTSYS, &profile_lock) {
err = class_for_each_device(&platform_profile_class, NULL,
&profile, _aggregate_profiles);
@@ -464,14 +514,14 @@ int platform_profile_cycle(void)
return -EINVAL;
err = class_for_each_device(&platform_profile_class, NULL,
- choices, _aggregate_choices);
+ &data, _aggregate_choices);
if (err)
return err;
/* never iterate into a custom if all drivers supported it */
- clear_bit(PLATFORM_PROFILE_CUSTOM, choices);
+ clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate);
- next = find_next_bit_wrap(choices,
+ next = find_next_bit_wrap(data.aggregate,
PLATFORM_PROFILE_LAST,
profile + 1);
@@ -526,6 +576,14 @@ struct device *platform_profile_register(struct device *dev, const char *name,
return ERR_PTR(-EINVAL);
}
+ if (ops->hidden_choices) {
+ err = ops->hidden_choices(drvdata, pprof->hidden_choices);
+ if (err) {
+ dev_err(dev, "platform_profile hidden_choices failed\n");
+ return ERR_PTR(err);
+ }
+ }
+
guard(mutex)(&profile_lock);
/* create class interface for individual handler */
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 747f83f7114d..e549914a636c 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -287,9 +287,7 @@ static acpi_status acpi_platformrt_space_handler(u32 function,
if (!handler || !module)
goto invalid_guid;
- if (!handler->handler_addr ||
- !handler->static_data_buffer_addr ||
- !handler->acpi_param_buffer_addr) {
+ if (!handler->handler_addr) {
buffer->prm_status = PRM_HANDLER_ERROR;
return AE_OK;
}
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 98d93ed58315..436019d96027 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1187,8 +1187,6 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
}
break;
}
- if (nval == 0)
- return -EINVAL;
if (obj->type == ACPI_TYPE_BUFFER) {
if (proptype != DEV_PROP_U8)
@@ -1212,9 +1210,11 @@ static int acpi_data_prop_read(const struct acpi_device_data *data,
ret = acpi_copy_property_array_uint(items, (u64 *)val, nval);
break;
case DEV_PROP_STRING:
- ret = acpi_copy_property_array_string(
- items, (char **)val,
- min_t(u32, nval, obj->package.count));
+ nval = min_t(u32, nval, obj->package.count);
+ if (nval == 0)
+ return -ENODATA;
+
+ ret = acpi_copy_property_array_string(items, (char **)val, nval);
break;
default:
ret = -EINVAL;
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 90aaec923889..b4cd14e7fa76 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -564,6 +564,12 @@ static const struct dmi_system_id irq1_edge_low_force_override[] = {
},
},
{
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Eluktronics Inc."),
+ DMI_MATCH(DMI_BOARD_NAME, "MECH-17"),
+ },
+ },
+ {
/* TongFang GM6XGxX/TUXEDO Stellaris 16 Gen5 AMD */
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index e4eb8357989c..6a66c9769c6c 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -3,7 +3,6 @@
#ifndef _LINUX_BINDER_INTERNAL_H
#define _LINUX_BINDER_INTERNAL_H
-#include <linux/export.h>
#include <linux/fs.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index bc6bae76ccaf..94c6446604fc 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -274,6 +274,7 @@ static void binderfs_evict_inode(struct inode *inode)
mutex_unlock(&binderfs_minors_mutex);
if (refcount_dec_and_test(&device->ref)) {
+ hlist_del_init(&device->hlist);
kfree(device->context.name);
kfree(device);
}
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 8e895ae45c86..c842e2de6ef9 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -386,8 +386,12 @@ struct ahci_host_priv {
static inline bool ahci_ignore_port(struct ahci_host_priv *hpriv,
unsigned int portid)
{
- return portid >= hpriv->nports ||
- !(hpriv->mask_port_map & (1 << portid));
+ if (portid >= hpriv->nports)
+ return true;
+ /* mask_port_map not set means that all ports are available */
+ if (!hpriv->mask_port_map)
+ return false;
+ return !(hpriv->mask_port_map & (1 << portid));
}
extern int ahci_ignore_sss;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index fdfa7b266218..e7ace4b10f15 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -541,6 +541,7 @@ void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
hpriv->saved_port_map = port_map;
}
+ /* mask_port_map not set means that all ports are available */
if (hpriv->mask_port_map) {
dev_warn(dev, "masking port_map 0x%lx -> 0x%lx\n",
port_map,
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 53b2c7719dc5..91d44302eac9 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -651,8 +651,6 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev,
* If no sub-node was found, keep this for device tree
* compatibility
*/
- hpriv->mask_port_map |= BIT(0);
-
rc = ahci_platform_get_phy(hpriv, 0, dev, dev->of_node);
if (rc)
goto err_out;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 63ec2f218431..c085dd81ebe7 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4143,10 +4143,6 @@ static const struct ata_dev_quirks_entry __ata_dev_quirks[] = {
{ "Samsung SSD 860*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
- { "Samsung SSD 870 QVO*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
- ATA_QUIRK_ZERO_AFTER_TRIM |
- ATA_QUIRK_NO_NCQ_ON_ATI |
- ATA_QUIRK_NOLPM },
{ "Samsung SSD 870*", NULL, ATA_QUIRK_NO_NCQ_TRIM |
ATA_QUIRK_ZERO_AFTER_TRIM |
ATA_QUIRK_NO_NCQ_ON_ATI },
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 7fb21768ca36..8074a10183dc 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,7 +6,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
cpu.o firmware.o init.o map.o devres.o \
attribute_container.o transport_class.o \
topology.o container.o property.o cacheinfo.o \
- swnode.o
+ swnode.o faux.o
obj-$(CONFIG_AUXILIARY_BUS) += auxiliary.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 8cf04a557bdb..0042e4774b0c 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -137,6 +137,7 @@ int hypervisor_init(void);
static inline int hypervisor_init(void) { return 0; }
#endif
int platform_bus_init(void);
+int faux_bus_init(void);
void cpu_dev_init(void);
void container_dev_init(void);
#ifdef CONFIG_AUXILIARY_BUS
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 5a1f05198114..2fde698430df 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2079,6 +2079,7 @@ static bool __fw_devlink_relax_cycles(struct fwnode_handle *con_handle,
out:
sup_handle->flags &= ~FWNODE_FLAG_VISITED;
put_device(sup_dev);
+ put_device(con_dev);
put_device(par_dev);
return ret;
}
diff --git a/drivers/base/faux.c b/drivers/base/faux.c
new file mode 100644
index 000000000000..531e9d789ee0
--- /dev/null
+++ b/drivers/base/faux.c
@@ -0,0 +1,232 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2025 The Linux Foundation
+ *
+ * A "simple" faux bus that allows devices to be created and added
+ * automatically to it. This is to be used whenever you need to create a
+ * device that is not associated with any "real" system resources, and do
+ * not want to have to deal with a bus/driver binding logic. It is
+ * intended to be very simple, with only a create and a destroy function
+ * available.
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/container_of.h>
+#include <linux/device/faux.h>
+#include "base.h"
+
+/*
+ * Internal wrapper structure so we can hold a pointer to the
+ * faux_device_ops for this device.
+ */
+struct faux_object {
+ struct faux_device faux_dev;
+ const struct faux_device_ops *faux_ops;
+};
+#define to_faux_object(dev) container_of_const(dev, struct faux_object, faux_dev.dev)
+
+static struct device faux_bus_root = {
+ .init_name = "faux",
+};
+
+static int faux_match(struct device *dev, const struct device_driver *drv)
+{
+ /* Match always succeeds, we only have one driver */
+ return 1;
+}
+
+static int faux_probe(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+ struct faux_device *faux_dev = &faux_obj->faux_dev;
+ const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+ int ret = 0;
+
+ if (faux_ops && faux_ops->probe)
+ ret = faux_ops->probe(faux_dev);
+
+ return ret;
+}
+
+static void faux_remove(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+ struct faux_device *faux_dev = &faux_obj->faux_dev;
+ const struct faux_device_ops *faux_ops = faux_obj->faux_ops;
+
+ if (faux_ops && faux_ops->remove)
+ faux_ops->remove(faux_dev);
+}
+
+static const struct bus_type faux_bus_type = {
+ .name = "faux",
+ .match = faux_match,
+ .probe = faux_probe,
+ .remove = faux_remove,
+};
+
+static struct device_driver faux_driver = {
+ .name = "faux_driver",
+ .bus = &faux_bus_type,
+ .probe_type = PROBE_FORCE_SYNCHRONOUS,
+};
+
+static void faux_device_release(struct device *dev)
+{
+ struct faux_object *faux_obj = to_faux_object(dev);
+
+ kfree(faux_obj);
+}
+
+/**
+ * faux_device_create_with_groups - Create and register with the driver
+ * core a faux device and populate the device with an initial
+ * set of sysfs attributes.
+ * @name: The name of the device we are adding, must be unique for
+ * all faux devices.
+ * @parent: Pointer to a potential parent struct device. If set to
+ * NULL, the device will be created in the "root" of the faux
+ * device tree in sysfs.
+ * @faux_ops: struct faux_device_ops that the new device will call back
+ * into, can be NULL.
+ * @groups: The set of sysfs attributes that will be created for this
+ * device when it is registered with the driver core.
+ *
+ * Create a new faux device and register it in the driver core properly.
+ * If present, callbacks in @faux_ops will be called with the device that
+ * for the caller to do something with at the proper time given the
+ * device's lifecycle.
+ *
+ * Note, when this function is called, the functions specified in struct
+ * faux_ops can be called before the function returns, so be prepared for
+ * everything to be properly initialized before that point in time.
+ *
+ * Return:
+ * * NULL if an error happened with creating the device
+ * * pointer to a valid struct faux_device that is registered with sysfs
+ */
+struct faux_device *faux_device_create_with_groups(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops,
+ const struct attribute_group **groups)
+{
+ struct faux_object *faux_obj;
+ struct faux_device *faux_dev;
+ struct device *dev;
+ int ret;
+
+ faux_obj = kzalloc(sizeof(*faux_obj), GFP_KERNEL);
+ if (!faux_obj)
+ return NULL;
+
+ /* Save off the callbacks so we can use them in the future */
+ faux_obj->faux_ops = faux_ops;
+
+ /* Initialize the device portion and register it with the driver core */
+ faux_dev = &faux_obj->faux_dev;
+ dev = &faux_dev->dev;
+
+ device_initialize(dev);
+ dev->release = faux_device_release;
+ if (parent)
+ dev->parent = parent;
+ else
+ dev->parent = &faux_bus_root;
+ dev->bus = &faux_bus_type;
+ dev->groups = groups;
+ dev_set_name(dev, "%s", name);
+
+ ret = device_add(dev);
+ if (ret) {
+ pr_err("%s: device_add for faux device '%s' failed with %d\n",
+ __func__, name, ret);
+ put_device(dev);
+ return NULL;
+ }
+
+ return faux_dev;
+}
+EXPORT_SYMBOL_GPL(faux_device_create_with_groups);
+
+/**
+ * faux_device_create - create and register with the driver core a faux device
+ * @name: The name of the device we are adding, must be unique for all
+ * faux devices.
+ * @parent: Pointer to a potential parent struct device. If set to
+ * NULL, the device will be created in the "root" of the faux
+ * device tree in sysfs.
+ * @faux_ops: struct faux_device_ops that the new device will call back
+ * into, can be NULL.
+ *
+ * Create a new faux device and register it in the driver core properly.
+ * If present, callbacks in @faux_ops will be called with the device that
+ * for the caller to do something with at the proper time given the
+ * device's lifecycle.
+ *
+ * Note, when this function is called, the functions specified in struct
+ * faux_ops can be called before the function returns, so be prepared for
+ * everything to be properly initialized before that point in time.
+ *
+ * Return:
+ * * NULL if an error happened with creating the device
+ * * pointer to a valid struct faux_device that is registered with sysfs
+ */
+struct faux_device *faux_device_create(const char *name,
+ struct device *parent,
+ const struct faux_device_ops *faux_ops)
+{
+ return faux_device_create_with_groups(name, parent, faux_ops, NULL);
+}
+EXPORT_SYMBOL_GPL(faux_device_create);
+
+/**
+ * faux_device_destroy - destroy a faux device
+ * @faux_dev: faux device to destroy
+ *
+ * Unregisters and cleans up a device that was created with a call to
+ * faux_device_create()
+ */
+void faux_device_destroy(struct faux_device *faux_dev)
+{
+ struct device *dev = &faux_dev->dev;
+
+ if (!faux_dev)
+ return;
+
+ device_del(dev);
+
+ /* The final put_device() will clean up the memory we allocated for this device. */
+ put_device(dev);
+}
+EXPORT_SYMBOL_GPL(faux_device_destroy);
+
+int __init faux_bus_init(void)
+{
+ int ret;
+
+ ret = device_register(&faux_bus_root);
+ if (ret) {
+ put_device(&faux_bus_root);
+ return ret;
+ }
+
+ ret = bus_register(&faux_bus_type);
+ if (ret)
+ goto error_bus;
+
+ ret = driver_register(&faux_driver);
+ if (ret)
+ goto error_driver;
+
+ return ret;
+
+error_driver:
+ bus_unregister(&faux_bus_type);
+
+error_bus:
+ device_unregister(&faux_bus_root);
+ return ret;
+}
diff --git a/drivers/base/init.c b/drivers/base/init.c
index c4954835128c..9d2b06d65dfc 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -32,6 +32,7 @@ void __init driver_init(void)
/* These are also core pieces, but must come after the
* core core pieces.
*/
+ faux_bus_init();
of_core_init();
platform_bus_init();
auxiliary_bus_init();
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index d497d448e4b2..40e1d8d8a589 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1191,24 +1191,18 @@ static pm_message_t resume_event(pm_message_t sleep_state)
return PMSG_ON;
}
-static void dpm_superior_set_must_resume(struct device *dev, bool set_active)
+static void dpm_superior_set_must_resume(struct device *dev)
{
struct device_link *link;
int idx;
- if (dev->parent) {
+ if (dev->parent)
dev->parent->power.must_resume = true;
- if (set_active)
- dev->parent->power.set_active = true;
- }
idx = device_links_read_lock();
- list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node) {
+ list_for_each_entry_rcu_locked(link, &dev->links.suppliers, c_node)
link->supplier->power.must_resume = true;
- if (set_active)
- link->supplier->power.set_active = true;
- }
device_links_read_unlock(idx);
}
@@ -1287,9 +1281,12 @@ Skip:
dev->power.must_resume = true;
if (dev->power.must_resume) {
- dev->power.set_active = dev->power.set_active ||
- dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND);
- dpm_superior_set_must_resume(dev, dev->power.set_active);
+ if (dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) {
+ dev->power.set_active = true;
+ if (dev->parent && !dev->parent->power.ignore_children)
+ dev->parent->power.set_active = true;
+ }
+ dpm_superior_set_must_resume(dev);
}
Complete:
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 0bcd81389a29..978613407ea3 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -906,6 +906,7 @@ err_alloc:
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->main_status_buf);
kfree(d->status_buf);
kfree(d->status_reg_buf);
if (d->config_buf) {
@@ -981,6 +982,7 @@ void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
kfree(d->wake_buf);
kfree(d->mask_buf_def);
kfree(d->mask_buf);
+ kfree(d->main_status_buf);
kfree(d->status_reg_buf);
kfree(d->status_buf);
if (d->config_buf) {
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 33b3bc99d532..282f81616a78 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -1127,8 +1127,8 @@ static void vdc_queue_drain(struct vdc_port *port)
spin_lock_irq(&port->vio.lock);
port->drain = 0;
- blk_mq_unquiesce_queue(q, memflags);
- blk_mq_unfreeze_queue(q);
+ blk_mq_unquiesce_queue(q);
+ blk_mq_unfreeze_queue(q, memflags);
}
static void vdc_ldc_reset_timer_work(struct work_struct *work)
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 529085181f35..ca9a67b5b537 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -2715,9 +2715,12 @@ static int ublk_ctrl_set_params(struct ublk_device *ub,
if (ph.len > sizeof(struct ublk_params))
ph.len = sizeof(struct ublk_params);
- /* parameters can only be changed when device isn't live */
mutex_lock(&ub->mutex);
- if (ub->dev_info.state == UBLK_S_DEV_LIVE) {
+ if (test_bit(UB_STATE_USED, &ub->state)) {
+ /*
+ * Parameters can only be changed when device hasn't
+ * been started yet
+ */
ret = -EACCES;
} else if (copy_from_user(&ub->params, argp, ph.len)) {
ret = -EFAULT;
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 2b79952f3628..091ffe3e1495 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -1320,6 +1320,10 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
if (opcode == 0xfc01)
btintel_pcie_inject_cmd_complete(hdev, opcode);
}
+ /* Firmware raises alive interrupt on HCI_OP_RESET */
+ if (opcode == HCI_OP_RESET)
+ data->gp0_received = false;
+
hdev->stat.cmd_tx++;
break;
case HCI_ACLDATA_PKT:
@@ -1357,7 +1361,6 @@ static int btintel_pcie_send_frame(struct hci_dev *hdev,
opcode, btintel_pcie_alivectxt_state2str(old_ctxt),
btintel_pcie_alivectxt_state2str(data->alive_intr_ctxt));
if (opcode == HCI_OP_RESET) {
- data->gp0_received = false;
ret = wait_event_timeout(data->gp0_wait_q,
data->gp0_received,
msecs_to_jiffies(BTINTEL_DEFAULT_INTR_TIMEOUT_MS));
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 90966dfbd278..2a8d91963c63 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -2102,7 +2102,8 @@ static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
- if (hci_conn_num(hdev, SCO_LINK) < 1)
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
@@ -2576,7 +2577,8 @@ static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb)
return submit_or_queue_tx_urb(hdev, urb);
case HCI_SCODATA_PKT:
- if (hci_conn_num(hdev, SCO_LINK) < 1)
+ if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
+ hci_conn_num(hdev, SCO_LINK) < 1)
return -ENODEV;
urb = alloc_isoc_urb(hdev, skb);
@@ -3642,6 +3644,7 @@ static ssize_t force_poll_sync_write(struct file *file,
}
static const struct file_operations force_poll_sync_fops = {
+ .owner = THIS_MODULE,
.open = simple_open,
.read = force_poll_sync_read,
.write = force_poll_sync_write,
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index efe43592e170..474f1359c997 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -1129,8 +1129,9 @@ static void mhi_pci_recovery_work(struct work_struct *work)
err_unprepare:
mhi_unprepare_after_power_down(mhi_cntrl);
err_try_reset:
- if (pci_reset_function(pdev))
- dev_err(&pdev->dev, "Recovery failed\n");
+ err = pci_try_reset_function(pdev);
+ if (err)
+ dev_err(&pdev->dev, "Recovery failed: %d\n", err);
}
static void health_check(struct timer_list *t)
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 6276551d7968..1e57ebfb7622 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -657,7 +657,7 @@ static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
id = moxtet->modules[pos->idx];
- seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
+ seq_printf(p, "moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
pos->bit);
}
diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c
index 5dea31769f9a..d8e029e7e53f 100644
--- a/drivers/bus/simple-pm-bus.c
+++ b/drivers/bus/simple-pm-bus.c
@@ -109,9 +109,29 @@ static int simple_pm_bus_runtime_resume(struct device *dev)
return 0;
}
+static int simple_pm_bus_suspend(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int simple_pm_bus_resume(struct device *dev)
+{
+ struct simple_pm_bus *bus = dev_get_drvdata(dev);
+
+ if (!bus)
+ return 0;
+
+ return pm_runtime_force_resume(dev);
+}
+
static const struct dev_pm_ops simple_pm_bus_pm_ops = {
RUNTIME_PM_OPS(simple_pm_bus_runtime_suspend, simple_pm_bus_runtime_resume, NULL)
- NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(simple_pm_bus_suspend, simple_pm_bus_resume)
};
#define ONLY_BUS ((void *) 1) /* Match if the device is only a bus. */
diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c
index c573ed2ee71a..7811aa734053 100644
--- a/drivers/cdx/cdx.c
+++ b/drivers/cdx/cdx.c
@@ -473,8 +473,12 @@ static ssize_t driver_override_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cdx_device *cdx_dev = to_cdx_device(dev);
+ ssize_t len;
- return sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+ device_lock(dev);
+ len = sysfs_emit(buf, "%s\n", cdx_dev->driver_override);
+ device_unlock(dev);
+ return len;
}
static DEVICE_ATTR_RW(driver_override);
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 2cf595d2e10b..f7dd455dd0dd 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -264,8 +264,8 @@ int misc_register(struct miscdevice *misc)
device_create_with_groups(&misc_class, misc->parent, dev,
misc, misc->groups, "%s", misc->name);
if (IS_ERR(misc->this_device)) {
+ misc_minor_free(misc->minor);
if (is_dynamic) {
- misc_minor_free(misc->minor);
misc->minor = MISC_DYNAMIC_MINOR;
}
err = PTR_ERR(misc->this_device);
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index f887569fd3d0..677bb5ac950a 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -37,6 +37,7 @@
#include <linux/kfifo.h>
#include <linux/platform_device.h>
#include <linux/gfp.h>
+#include <linux/string_choices.h>
#include <linux/uaccess.h>
#include <asm/io.h>
@@ -1268,12 +1269,12 @@ static void sonypi_display_info(void)
"compat = %s, mask = 0x%08lx, useinput = %s, acpi = %s\n",
sonypi_device.model,
verbose,
- fnkeyinit ? "on" : "off",
- camera ? "on" : "off",
- compat ? "on" : "off",
+ str_on_off(fnkeyinit),
+ str_on_off(camera),
+ str_on_off(compat),
mask,
- useinput ? "on" : "off",
- SONYPI_ACPI_ACTIVE ? "on" : "off");
+ str_on_off(useinput),
+ str_on_off(SONYPI_ACPI_ACTIVE));
printk(KERN_INFO "sonypi: enabled at irq=%d, port1=0x%x, port2=0x%x\n",
sonypi_device.irq,
sonypi_device.ioport1, sonypi_device.ioport2);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 24442485e73e..5f04951d0dd4 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -26,6 +26,7 @@
#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <linux/string_choices.h>
#include "../tty/hvc/hvc_console.h"
#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
@@ -923,14 +924,14 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
pipe_lock(pipe);
ret = 0;
- if (pipe_empty(pipe->head, pipe->tail))
+ if (pipe_is_empty(pipe))
goto error_out;
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
goto error_out;
- occupancy = pipe_occupancy(pipe->head, pipe->tail);
+ occupancy = pipe_buf_usage(pipe);
buf = alloc_buf(port->portdev->vdev, 0, occupancy);
if (!buf) {
@@ -1269,8 +1270,7 @@ static int port_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, "bytes_sent: %lu\n", port->stats.bytes_sent);
seq_printf(s, "bytes_received: %lu\n", port->stats.bytes_received);
seq_printf(s, "bytes_discarded: %lu\n", port->stats.bytes_discarded);
- seq_printf(s, "is_console: %s\n",
- is_console_port(port) ? "yes" : "no");
+ seq_printf(s, "is_console: %s\n", str_yes_no(is_console_port(port)));
seq_printf(s, "console_vtermno: %u\n", port->cons.vtermno);
return 0;
@@ -1321,7 +1321,6 @@ static void send_sigio_to_port(struct port *port)
static int add_port(struct ports_device *portdev, u32 id)
{
- char debugfs_name[16];
struct port *port;
dev_t devt;
int err;
@@ -1424,9 +1423,7 @@ static int add_port(struct ports_device *portdev, u32 id)
* Finally, create the debugfs file that we can use to
* inspect a port's state at any time
*/
- snprintf(debugfs_name, sizeof(debugfs_name), "vport%up%u",
- port->portdev->vdev->index, id);
- port->debugfs_file = debugfs_create_file(debugfs_name, 0444,
+ port->debugfs_file = debugfs_create_file(dev_name(port->dev), 0444,
pdrvdata.debugfs_dir,
port, &port_debugfs_fops);
return 0;
diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c
index a3fe98cd3838..82815428f8f9 100644
--- a/drivers/clocksource/jcore-pit.c
+++ b/drivers/clocksource/jcore-pit.c
@@ -114,6 +114,18 @@ static int jcore_pit_local_init(unsigned cpu)
pit->periodic_delta = DIV_ROUND_CLOSEST(NSEC_PER_SEC, HZ * buspd);
clockevents_config_and_register(&pit->ced, freq, 1, ULONG_MAX);
+ enable_percpu_irq(pit->ced.irq, IRQ_TYPE_NONE);
+
+ return 0;
+}
+
+static int jcore_pit_local_teardown(unsigned cpu)
+{
+ struct jcore_pit *pit = this_cpu_ptr(jcore_pit_percpu);
+
+ pr_info("Local J-Core PIT teardown on cpu %u\n", cpu);
+
+ disable_percpu_irq(pit->ced.irq);
return 0;
}
@@ -168,6 +180,7 @@ static int __init jcore_pit_init(struct device_node *node)
return -ENOMEM;
}
+ irq_set_percpu_devid(pit_irq);
err = request_percpu_irq(pit_irq, jcore_timer_interrupt,
"jcore_pit", jcore_pit_percpu);
if (err) {
@@ -237,7 +250,7 @@ static int __init jcore_pit_init(struct device_node *node)
cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING,
"clockevents/jcore:starting",
- jcore_pit_local_init, NULL);
+ jcore_pit_local_init, jcore_pit_local_teardown);
return 0;
}
diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c
index 2f096a5b973d..00a463b044b5 100644
--- a/drivers/counter/microchip-tcb-capture.c
+++ b/drivers/counter/microchip-tcb-capture.c
@@ -6,18 +6,24 @@
*/
#include <linux/clk.h>
#include <linux/counter.h>
+#include <linux/interrupt.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <uapi/linux/counter/microchip-tcb-capture.h>
#include <soc/at91/atmel_tcb.h>
#define ATMEL_TC_CMR_MASK (ATMEL_TC_LDRA_RISING | ATMEL_TC_LDRB_FALLING | \
ATMEL_TC_ETRGEDG_RISING | ATMEL_TC_LDBDIS | \
ATMEL_TC_LDBSTOP)
+#define ATMEL_TC_DEF_IRQS (ATMEL_TC_ETRGS | ATMEL_TC_COVFS | \
+ ATMEL_TC_LDRAS | ATMEL_TC_LDRBS | ATMEL_TC_CPCS)
+
#define ATMEL_TC_QDEN BIT(8)
#define ATMEL_TC_POSEN BIT(9)
@@ -247,6 +253,90 @@ static int mchp_tc_count_read(struct counter_device *counter,
return 0;
}
+static int mchp_tc_count_cap_read(struct counter_device *counter,
+ struct counter_count *count, size_t idx, u64 *val)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ u32 cnt;
+ int ret;
+
+ switch (idx) {
+ case COUNTER_MCHP_EXCAP_RA:
+ ret = regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], RA), &cnt);
+ break;
+ case COUNTER_MCHP_EXCAP_RB:
+ ret = regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], RB), &cnt);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ *val = cnt;
+
+ return 0;
+}
+
+static int mchp_tc_count_cap_write(struct counter_device *counter,
+ struct counter_count *count, size_t idx, u64 val)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ int ret;
+
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ switch (idx) {
+ case COUNTER_MCHP_EXCAP_RA:
+ ret = regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], RA), val);
+ break;
+ case COUNTER_MCHP_EXCAP_RB:
+ ret = regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], RB), val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int mchp_tc_count_compare_read(struct counter_device *counter, struct counter_count *count,
+ u64 *val)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ u32 cnt;
+ int ret;
+
+ ret = regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], RC), &cnt);
+ if (ret < 0)
+ return ret;
+
+ *val = cnt;
+
+ return 0;
+}
+
+static int mchp_tc_count_compare_write(struct counter_device *counter, struct counter_count *count,
+ u64 val)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+
+ if (val > U32_MAX)
+ return -ERANGE;
+
+ return regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], RC), val);
+}
+
+static DEFINE_COUNTER_ARRAY_CAPTURE(mchp_tc_cnt_cap_array, 2);
+
+static struct counter_comp mchp_tc_count_ext[] = {
+ COUNTER_COMP_ARRAY_CAPTURE(mchp_tc_count_cap_read, mchp_tc_count_cap_write,
+ mchp_tc_cnt_cap_array),
+ COUNTER_COMP_COMPARE(mchp_tc_count_compare_read, mchp_tc_count_compare_write),
+};
+
static struct counter_count mchp_tc_counts[] = {
{
.id = 0,
@@ -255,6 +345,8 @@ static struct counter_count mchp_tc_counts[] = {
.num_functions = ARRAY_SIZE(mchp_tc_count_functions),
.synapses = mchp_tc_count_synapses,
.num_synapses = ARRAY_SIZE(mchp_tc_count_synapses),
+ .ext = mchp_tc_count_ext,
+ .num_ext = ARRAY_SIZE(mchp_tc_count_ext),
},
};
@@ -294,6 +386,65 @@ static const struct of_device_id atmel_tc_of_match[] = {
{ /* sentinel */ }
};
+static irqreturn_t mchp_tc_isr(int irq, void *dev_id)
+{
+ struct counter_device *const counter = dev_id;
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ u32 sr, mask;
+
+ regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], SR), &sr);
+ regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], IMR), &mask);
+
+ sr &= mask;
+ if (!(sr & ATMEL_TC_ALL_IRQ))
+ return IRQ_NONE;
+
+ if (sr & ATMEL_TC_ETRGS)
+ counter_push_event(counter, COUNTER_EVENT_CHANGE_OF_STATE,
+ COUNTER_MCHP_EVCHN_CV);
+ if (sr & ATMEL_TC_LDRAS)
+ counter_push_event(counter, COUNTER_EVENT_CAPTURE,
+ COUNTER_MCHP_EVCHN_RA);
+ if (sr & ATMEL_TC_LDRBS)
+ counter_push_event(counter, COUNTER_EVENT_CAPTURE,
+ COUNTER_MCHP_EVCHN_RB);
+ if (sr & ATMEL_TC_CPCS)
+ counter_push_event(counter, COUNTER_EVENT_THRESHOLD,
+ COUNTER_MCHP_EVCHN_RC);
+ if (sr & ATMEL_TC_COVFS)
+ counter_push_event(counter, COUNTER_EVENT_OVERFLOW,
+ COUNTER_MCHP_EVCHN_CV);
+
+ return IRQ_HANDLED;
+}
+
+static void mchp_tc_irq_remove(void *ptr)
+{
+ struct mchp_tc_data *priv = ptr;
+
+ regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], IDR), ATMEL_TC_DEF_IRQS);
+}
+
+static int mchp_tc_irq_enable(struct counter_device *const counter, int irq)
+{
+ struct mchp_tc_data *const priv = counter_priv(counter);
+ int ret = devm_request_irq(counter->parent, irq, mchp_tc_isr, 0,
+ dev_name(counter->parent), counter);
+
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(priv->regmap, ATMEL_TC_REG(priv->channel[0], IER), ATMEL_TC_DEF_IRQS);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_add_action_or_reset(counter->parent, mchp_tc_irq_remove, priv);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
static void mchp_tc_clk_remove(void *ptr)
{
clk_disable_unprepare((struct clk *)ptr);
@@ -378,6 +529,15 @@ static int mchp_tc_probe(struct platform_device *pdev)
counter->num_signals = ARRAY_SIZE(mchp_tc_count_signals);
counter->signals = mchp_tc_count_signals;
+ i = of_irq_get(np->parent, 0);
+ if (i == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ if (i > 0) {
+ ret = mchp_tc_irq_enable(counter, i);
+ if (ret < 0)
+ return dev_err_probe(&pdev->dev, ret, "Failed to set up IRQ");
+ }
+
ret = devm_counter_add(&pdev->dev, counter);
if (ret < 0)
return dev_err_probe(&pdev->dev, ret, "Failed to add counter\n");
diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c
index bc586eff0dae..d21c157e531a 100644
--- a/drivers/counter/ti-eqep.c
+++ b/drivers/counter/ti-eqep.c
@@ -107,6 +107,15 @@
#define QCLR_PCE BIT(1)
#define QCLR_INT BIT(0)
+#define QEPSTS_UPEVNT BIT(7)
+#define QEPSTS_FDF BIT(6)
+#define QEPSTS_QDF BIT(5)
+#define QEPSTS_QDLF BIT(4)
+#define QEPSTS_COEF BIT(3)
+#define QEPSTS_CDEF BIT(2)
+#define QEPSTS_FIMF BIT(1)
+#define QEPSTS_PCEF BIT(0)
+
/* EQEP Inputs */
enum {
TI_EQEP_SIGNAL_QEPA, /* QEPA/XCLK */
@@ -286,6 +295,9 @@ static int ti_eqep_events_configure(struct counter_device *counter)
case COUNTER_EVENT_UNDERFLOW:
qeint |= QEINT_PCU;
break;
+ case COUNTER_EVENT_DIRECTION_CHANGE:
+ qeint |= QEINT_QDC;
+ break;
}
}
@@ -298,6 +310,7 @@ static int ti_eqep_watch_validate(struct counter_device *counter,
switch (watch->event) {
case COUNTER_EVENT_OVERFLOW:
case COUNTER_EVENT_UNDERFLOW:
+ case COUNTER_EVENT_DIRECTION_CHANGE:
if (watch->channel != 0)
return -EINVAL;
@@ -368,11 +381,27 @@ static int ti_eqep_position_enable_write(struct counter_device *counter,
return 0;
}
+static int ti_eqep_direction_read(struct counter_device *counter,
+ struct counter_count *count,
+ enum counter_count_direction *direction)
+{
+ struct ti_eqep_cnt *priv = counter_priv(counter);
+ u32 qepsts;
+
+ regmap_read(priv->regmap16, QEPSTS, &qepsts);
+
+ *direction = (qepsts & QEPSTS_QDF) ? COUNTER_COUNT_DIRECTION_FORWARD
+ : COUNTER_COUNT_DIRECTION_BACKWARD;
+
+ return 0;
+}
+
static struct counter_comp ti_eqep_position_ext[] = {
COUNTER_COMP_CEILING(ti_eqep_position_ceiling_read,
ti_eqep_position_ceiling_write),
COUNTER_COMP_ENABLE(ti_eqep_position_enable_read,
ti_eqep_position_enable_write),
+ COUNTER_COMP_DIRECTION(ti_eqep_direction_read),
};
static struct counter_signal ti_eqep_signals[] = {
@@ -439,6 +468,9 @@ static irqreturn_t ti_eqep_irq_handler(int irq, void *dev_id)
if (qflg & QFLG_PCU)
counter_push_event(counter, COUNTER_EVENT_UNDERFLOW, 0);
+ if (qflg & QFLG_QDC)
+ counter_push_event(counter, COUNTER_EVENT_DIRECTION_CHANGE, 0);
+
regmap_write(priv->regmap16, QCLR, qflg);
return IRQ_HANDLED;
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 0ee5c691fb36..9e46960f6a86 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -17,7 +17,8 @@ config ARM_ALLWINNER_SUN50I_CPUFREQ_NVMEM
config ARM_AIROHA_SOC_CPUFREQ
tristate "Airoha EN7581 SoC CPUFreq support"
- depends on (ARCH_AIROHA && OF) || COMPILE_TEST
+ depends on ARCH_AIROHA || COMPILE_TEST
+ depends on OF
select PM_OPP
default ARCH_AIROHA
help
diff --git a/drivers/cpufreq/amd-pstate.c b/drivers/cpufreq/amd-pstate.c
index dd9b8d6993d6..313550fa62d4 100644
--- a/drivers/cpufreq/amd-pstate.c
+++ b/drivers/cpufreq/amd-pstate.c
@@ -699,7 +699,7 @@ static void amd_pstate_adjust_perf(unsigned int cpu,
if (min_perf < lowest_nonlinear_perf)
min_perf = lowest_nonlinear_perf;
- max_perf = cap_perf;
+ max_perf = cpudata->max_limit_perf;
if (max_perf < min_perf)
max_perf = min_perf;
@@ -747,7 +747,6 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
guard(mutex)(&amd_pstate_driver_lock);
ret = amd_pstate_cpu_boost_update(policy, state);
- policy->boost_enabled = !ret ? state : false;
refresh_frequency_limits(policy);
return ret;
@@ -822,25 +821,28 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
static void amd_pstate_update_limits(unsigned int cpu)
{
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy *policy = NULL;
struct amd_cpudata *cpudata;
u32 prev_high = 0, cur_high = 0;
int ret;
bool highest_perf_changed = false;
+ if (!amd_pstate_prefcore)
+ return;
+
+ policy = cpufreq_cpu_get(cpu);
if (!policy)
return;
cpudata = policy->driver_data;
- if (!amd_pstate_prefcore)
- return;
-
guard(mutex)(&amd_pstate_driver_lock);
ret = amd_get_highest_perf(cpu, &cur_high);
- if (ret)
- goto free_cpufreq_put;
+ if (ret) {
+ cpufreq_cpu_put(policy);
+ return;
+ }
prev_high = READ_ONCE(cpudata->prefcore_ranking);
highest_perf_changed = (prev_high != cur_high);
@@ -850,8 +852,6 @@ static void amd_pstate_update_limits(unsigned int cpu)
if (cur_high < CPPC_MAX_PERF)
sched_set_itmt_core_prio((int)cur_high, cpu);
}
-
-free_cpufreq_put:
cpufreq_cpu_put(policy);
if (!highest_perf_changed)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index e0048856ecee..30ffbddc7ece 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1571,7 +1571,8 @@ static int cpufreq_online(unsigned int cpu)
policy->cdev = of_cpufreq_cooling_register(policy);
/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
- if (policy->boost_enabled != cpufreq_boost_enabled()) {
+ if (cpufreq_driver->set_boost &&
+ policy->boost_enabled != cpufreq_boost_enabled()) {
policy->boost_enabled = cpufreq_boost_enabled();
ret = cpufreq_driver->set_boost(policy, policy->boost_enabled);
if (ret) {
diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
index 7eb3e4668286..3467f6db4f50 100644
--- a/drivers/crypto/ccp/sp-dev.c
+++ b/drivers/crypto/ccp/sp-dev.c
@@ -19,6 +19,7 @@
#include <linux/types.h>
#include <linux/ccp.h>
+#include "sev-dev.h"
#include "ccp-dev.h"
#include "sp-dev.h"
@@ -253,8 +254,12 @@ unlock:
static int __init sp_mod_init(void)
{
#ifdef CONFIG_X86
+ static bool initialized;
int ret;
+ if (initialized)
+ return 0;
+
ret = sp_pci_init();
if (ret)
return ret;
@@ -263,6 +268,8 @@ static int __init sp_mod_init(void)
psp_pci_init();
#endif
+ initialized = true;
+
return 0;
#endif
@@ -279,6 +286,13 @@ static int __init sp_mod_init(void)
return -ENODEV;
}
+#if IS_BUILTIN(CONFIG_KVM_AMD) && IS_ENABLED(CONFIG_KVM_AMD_SEV)
+int __init sev_module_init(void)
+{
+ return sp_mod_init();
+}
+#endif
+
static void __exit sp_mod_exit(void)
{
#ifdef CONFIG_X86
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index c14557efd577..bbc3276992bb 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -59,9 +59,6 @@ struct bam_desc_hw {
#define DESC_FLAG_NWD BIT(12)
#define DESC_FLAG_CMD BIT(11)
-#define BAM_NDP_REVISION_START 0x20
-#define BAM_NDP_REVISION_END 0x27
-
struct bam_async_desc {
struct virt_dma_desc vd;
@@ -401,7 +398,6 @@ struct bam_device {
/* dma start transaction tasklet */
struct tasklet_struct task;
- u32 bam_revision;
};
/**
@@ -445,10 +441,8 @@ static void bam_reset(struct bam_device *bdev)
writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
/* set descriptor threshold, start with 4 bytes */
- if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
- BAM_NDP_REVISION_END))
- writel_relaxed(DEFAULT_CNT_THRSHLD,
- bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
+ writel_relaxed(DEFAULT_CNT_THRSHLD,
+ bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
/* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
@@ -1006,10 +1000,9 @@ static void bam_apply_new_config(struct bam_chan *bchan,
maxburst = bchan->slave.src_maxburst;
else
maxburst = bchan->slave.dst_maxburst;
- if (in_range(bdev->bam_revision, BAM_NDP_REVISION_START,
- BAM_NDP_REVISION_END))
- writel_relaxed(maxburst,
- bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
+
+ writel_relaxed(maxburst,
+ bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
}
bchan->reconfigure = 0;
@@ -1199,11 +1192,10 @@ static int bam_init(struct bam_device *bdev)
u32 val;
/* read revision and configuration information */
- val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
- if (!bdev->num_ees)
+ if (!bdev->num_ees) {
+ val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
-
- bdev->bam_revision = val & REVISION_MASK;
+ }
/* check that configured EE is within range */
if (bdev->ee >= bdev->num_ees)
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index 6896da8ac7ef..ce80ac4b1a1b 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -83,7 +83,9 @@ struct tegra_adma;
* @nr_channels: Number of DMA channels available.
* @ch_fifo_size_mask: Mask for FIFO size field.
* @sreq_index_offset: Slave channel index offset.
+ * @max_page: Maximum ADMA Channel Page.
* @has_outstanding_reqs: If DMA channel can have outstanding requests.
+ * @set_global_pg_config: Global page programming.
*/
struct tegra_adma_chip_data {
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
@@ -99,6 +101,7 @@ struct tegra_adma_chip_data {
unsigned int nr_channels;
unsigned int ch_fifo_size_mask;
unsigned int sreq_index_offset;
+ unsigned int max_page;
bool has_outstanding_reqs;
void (*set_global_pg_config)(struct tegra_adma *tdma);
};
@@ -854,6 +857,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.nr_channels = 22,
.ch_fifo_size_mask = 0xf,
.sreq_index_offset = 2,
+ .max_page = 0,
.has_outstanding_reqs = false,
.set_global_pg_config = NULL,
};
@@ -871,6 +875,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.nr_channels = 32,
.ch_fifo_size_mask = 0x1f,
.sreq_index_offset = 4,
+ .max_page = 4,
.has_outstanding_reqs = true,
.set_global_pg_config = tegra186_adma_global_page_config,
};
@@ -887,7 +892,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
const struct tegra_adma_chip_data *cdata;
struct tegra_adma *tdma;
struct resource *res_page, *res_base;
- int ret, i, page_no;
+ int ret, i;
cdata = of_device_get_match_data(&pdev->dev);
if (!cdata) {
@@ -914,9 +919,20 @@ static int tegra_adma_probe(struct platform_device *pdev)
res_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "global");
if (res_base) {
- page_no = (res_page->start - res_base->start) / cdata->ch_base_offset;
- if (page_no <= 0)
+ resource_size_t page_offset, page_no;
+ unsigned int ch_base_offset;
+
+ if (res_page->start < res_base->start)
+ return -EINVAL;
+ page_offset = res_page->start - res_base->start;
+ ch_base_offset = cdata->ch_base_offset;
+ if (!ch_base_offset)
return -EINVAL;
+
+ page_no = div_u64(page_offset, ch_base_offset);
+ if (!page_no || page_no > INT_MAX)
+ return -EINVAL;
+
tdma->ch_page_no = page_no - 1;
tdma->base_addr = devm_ioremap_resource(&pdev->dev, res_base);
if (IS_ERR(tdma->base_addr))
diff --git a/drivers/edac/qcom_edac.c b/drivers/edac/qcom_edac.c
index 04c42c83a2ba..f3da9385ca0d 100644
--- a/drivers/edac/qcom_edac.c
+++ b/drivers/edac/qcom_edac.c
@@ -95,7 +95,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
* Configure interrupt enable registers such that Tag, Data RAM related
* interrupts are propagated to interrupt controller for servicing
*/
- ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
TRP0_INTERRUPT_ENABLE,
TRP0_INTERRUPT_ENABLE);
if (ret)
@@ -113,7 +113,7 @@ static int qcom_llcc_core_setup(struct llcc_drv_data *drv, struct regmap *llcc_b
if (ret)
return ret;
- ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_2_enable,
+ ret = regmap_update_bits(llcc_bcast_regmap, drv->edac_reg_offset->cmn_interrupt_0_enable,
DRP0_INTERRUPT_ENABLE,
DRP0_INTERRUPT_ENABLE);
if (ret)
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 71d8b26c4103..9f35f69e0f9e 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -106,7 +106,7 @@ config ISCSI_IBFT
select ISCSI_BOOT_SYSFS
select ISCSI_IBFT_FIND if X86
depends on ACPI && SCSI && SCSI_LOWLEVEL
- default n
+ default n
help
This option enables support for detection and exposing of iSCSI
Boot Firmware Table (iBFT) via sysfs to userspace. If you wish to
diff --git a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
index 83b69fc4fba5..a8915d3b4df5 100644
--- a/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
+++ b/drivers/firmware/arm_scmi/vendors/imx/imx-sm-misc.c
@@ -254,8 +254,8 @@ static int scmi_imx_misc_ctrl_set(const struct scmi_protocol_handle *ph,
if (num > max_num)
return -EINVAL;
- ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET, sizeof(*in),
- 0, &t);
+ ret = ph->xops->xfer_get_init(ph, SCMI_IMX_MISC_CTRL_SET,
+ sizeof(*in) + num * sizeof(__le32), 0, &t);
if (ret)
return ret;
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index 5365e9a43000..42433c19eb30 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -1609,8 +1609,8 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
goto out_fw;
}
- ret = regmap_raw_write_async(regmap, reg, buf->buf,
- le32_to_cpu(region->len));
+ ret = regmap_raw_write(regmap, reg, buf->buf,
+ le32_to_cpu(region->len));
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write %d bytes at %d in %s: %d\n",
@@ -1625,12 +1625,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
regions++;
}
- ret = regmap_async_complete(regmap);
- if (ret != 0) {
- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
- goto out_fw;
- }
-
if (pos > firmware->size)
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
file, regions, pos - firmware->size);
@@ -1638,7 +1632,6 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
cs_dsp_debugfs_save_wmfwname(dsp, file);
out_fw:
- regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
if (ret == -EOVERFLOW)
@@ -2326,8 +2319,8 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
file, blocks, le32_to_cpu(blk->len),
reg);
- ret = regmap_raw_write_async(regmap, reg, buf->buf,
- le32_to_cpu(blk->len));
+ ret = regmap_raw_write(regmap, reg, buf->buf,
+ le32_to_cpu(blk->len));
if (ret != 0) {
cs_dsp_err(dsp,
"%s.%d: Failed to write to %x in %s: %d\n",
@@ -2339,10 +2332,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
blocks++;
}
- ret = regmap_async_complete(regmap);
- if (ret != 0)
- cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
-
if (pos > firmware->size)
cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
file, blocks, pos - firmware->size);
@@ -2350,7 +2339,6 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
cs_dsp_debugfs_save_binname(dsp, file);
out_fw:
- regmap_async_complete(regmap);
cs_dsp_buf_free(&buf_list);
if (ret == -EOVERFLOW)
@@ -2561,8 +2549,8 @@ static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
{
int ret;
- ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
- ADSP2_SYS_ENA, ADSP2_SYS_ENA);
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, ADSP2_SYS_ENA);
if (ret != 0)
return ret;
diff --git a/drivers/firmware/efi/cper-arm.c b/drivers/firmware/efi/cper-arm.c
index fa9c1c3bf168..f0a63d09d3c4 100644
--- a/drivers/firmware/efi/cper-arm.c
+++ b/drivers/firmware/efi/cper-arm.c
@@ -311,7 +311,7 @@ void cper_print_proc_arm(const char *pfx,
ctx_info = (struct cper_arm_ctx_info *)err_info;
max_ctx_type = ARRAY_SIZE(arm_reg_ctx_strs) - 1;
for (i = 0; i < proc->context_info_num; i++) {
- int size = sizeof(*ctx_info) + ctx_info->size;
+ int size = ALIGN(sizeof(*ctx_info) + ctx_info->size, 16);
printk("%sContext info structure %d:\n", pfx, i);
if (len < size) {
diff --git a/drivers/firmware/efi/cper-x86.c b/drivers/firmware/efi/cper-x86.c
index 438ed9eff6d0..3949d7b5e808 100644
--- a/drivers/firmware/efi/cper-x86.c
+++ b/drivers/firmware/efi/cper-x86.c
@@ -325,7 +325,7 @@ void cper_print_proc_ia(const char *pfx, const struct cper_sec_proc_ia *proc)
ctx_info = (struct cper_ia_proc_ctx *)err_info;
for (i = 0; i < VALID_PROC_CXT_INFO_NUM(proc->validation_bits); i++) {
- int size = sizeof(*ctx_info) + ctx_info->reg_arr_size;
+ int size = ALIGN(sizeof(*ctx_info) + ctx_info->reg_arr_size, 16);
int groupsize = 4;
printk("%sContext Information Structure %d:\n", pfx, i);
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 8296bf985d1d..7309394b8fc9 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -934,13 +934,15 @@ char * __init efi_md_typeattr_format(char *buf, size_t size,
EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
- EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
+ EFI_MEMORY_MORE_RELIABLE | EFI_MEMORY_HOT_PLUGGABLE |
+ EFI_MEMORY_RUNTIME))
snprintf(pos, size, "|attr=0x%016llx]",
(unsigned long long)attr);
else
snprintf(pos, size,
- "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
+ "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
+ attr & EFI_MEMORY_HOT_PLUGGABLE ? "HP" : "",
attr & EFI_MEMORY_MORE_RELIABLE ? "MR" : "",
attr & EFI_MEMORY_CPU_CRYPTO ? "CC" : "",
attr & EFI_MEMORY_SP ? "SP" : "",
diff --git a/drivers/firmware/efi/libstub/randomalloc.c b/drivers/firmware/efi/libstub/randomalloc.c
index e5872e38d9a4..5a732018be36 100644
--- a/drivers/firmware/efi/libstub/randomalloc.c
+++ b/drivers/firmware/efi/libstub/randomalloc.c
@@ -25,6 +25,9 @@ static unsigned long get_entry_num_slots(efi_memory_desc_t *md,
if (md->type != EFI_CONVENTIONAL_MEMORY)
return 0;
+ if (md->attribute & EFI_MEMORY_HOT_PLUGGABLE)
+ return 0;
+
if (efi_soft_reserve_enabled() &&
(md->attribute & EFI_MEMORY_SP))
return 0;
diff --git a/drivers/firmware/efi/libstub/relocate.c b/drivers/firmware/efi/libstub/relocate.c
index 99b45d1cd624..d4264bfb6dc1 100644
--- a/drivers/firmware/efi/libstub/relocate.c
+++ b/drivers/firmware/efi/libstub/relocate.c
@@ -53,6 +53,9 @@ efi_status_t efi_low_alloc_above(unsigned long size, unsigned long align,
if (desc->type != EFI_CONVENTIONAL_MEMORY)
continue;
+ if (desc->attribute & EFI_MEMORY_HOT_PLUGGABLE)
+ continue;
+
if (efi_soft_reserve_enabled() &&
(desc->attribute & EFI_MEMORY_SP))
continue;
diff --git a/drivers/firmware/efi/mokvar-table.c b/drivers/firmware/efi/mokvar-table.c
index 5ed0602c2f75..208db29613c6 100644
--- a/drivers/firmware/efi/mokvar-table.c
+++ b/drivers/firmware/efi/mokvar-table.c
@@ -99,14 +99,13 @@ static struct kobject *mokvar_kobj;
*/
void __init efi_mokvar_table_init(void)
{
+ struct efi_mokvar_table_entry __aligned(1) *mokvar_entry, *next_entry;
efi_memory_desc_t md;
void *va = NULL;
unsigned long cur_offset = 0;
unsigned long offset_limit;
- unsigned long map_size = 0;
unsigned long map_size_needed = 0;
unsigned long size;
- struct efi_mokvar_table_entry *mokvar_entry;
int err;
if (!efi_enabled(EFI_MEMMAP))
@@ -134,48 +133,46 @@ void __init efi_mokvar_table_init(void)
*/
err = -EINVAL;
while (cur_offset + sizeof(*mokvar_entry) <= offset_limit) {
- mokvar_entry = va + cur_offset;
- map_size_needed = cur_offset + sizeof(*mokvar_entry);
- if (map_size_needed > map_size) {
- if (va)
- early_memunmap(va, map_size);
- /*
- * Map a little more than the fixed size entry
- * header, anticipating some data. It's safe to
- * do so as long as we stay within current memory
- * descriptor.
- */
- map_size = min(map_size_needed + 2*EFI_PAGE_SIZE,
- offset_limit);
- va = early_memremap(efi.mokvar_table, map_size);
- if (!va) {
- pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%lu.\n",
- efi.mokvar_table, map_size);
- return;
- }
- mokvar_entry = va + cur_offset;
+ if (va)
+ early_memunmap(va, sizeof(*mokvar_entry));
+ va = early_memremap(efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
+ if (!va) {
+ pr_err("Failed to map EFI MOKvar config table pa=0x%lx, size=%zu.\n",
+ efi.mokvar_table + cur_offset, sizeof(*mokvar_entry));
+ return;
}
-
+ mokvar_entry = va;
+next:
/* Check for last sentinel entry */
if (mokvar_entry->name[0] == '\0') {
if (mokvar_entry->data_size != 0)
break;
err = 0;
+ map_size_needed = cur_offset + sizeof(*mokvar_entry);
break;
}
- /* Sanity check that the name is null terminated */
- size = strnlen(mokvar_entry->name,
- sizeof(mokvar_entry->name));
- if (size >= sizeof(mokvar_entry->name))
- break;
+ /* Enforce that the name is NUL terminated */
+ mokvar_entry->name[sizeof(mokvar_entry->name) - 1] = '\0';
/* Advance to the next entry */
- cur_offset = map_size_needed + mokvar_entry->data_size;
+ size = sizeof(*mokvar_entry) + mokvar_entry->data_size;
+ cur_offset += size;
+
+ /*
+ * Don't bother remapping if the current entry header and the
+ * next one end on the same page.
+ */
+ next_entry = (void *)((unsigned long)mokvar_entry + size);
+ if (((((unsigned long)(mokvar_entry + 1) - 1) ^
+ ((unsigned long)(next_entry + 1) - 1)) & PAGE_MASK) == 0) {
+ mokvar_entry = next_entry;
+ goto next;
+ }
}
if (va)
- early_memunmap(va, map_size);
+ early_memunmap(va, sizeof(*mokvar_entry));
if (err) {
pr_err("EFI MOKvar config table is not valid\n");
return;
diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig
index 907cd149c40a..c964f4924359 100644
--- a/drivers/firmware/imx/Kconfig
+++ b/drivers/firmware/imx/Kconfig
@@ -25,6 +25,7 @@ config IMX_SCU
config IMX_SCMI_MISC_DRV
tristate "IMX SCMI MISC Protocol driver"
+ depends on ARCH_MXC || COMPILE_TEST
default y if ARCH_MXC
help
The System Controller Management Interface firmware (SCMI FW) is
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 6e9788324fea..371f24569b3b 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -310,7 +310,10 @@ static ssize_t ibft_attr_show_nic(void *data, int type, char *buf)
str += sprintf_ipaddr(str, nic->ip_addr);
break;
case ISCSI_BOOT_ETH_SUBNET_MASK:
- val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
+ if (nic->subnet_mask_prefix > 32)
+ val = cpu_to_be32(~0);
+ else
+ val = cpu_to_be32(~((1 << (32-nic->subnet_mask_prefix))-1));
str += sprintf(str, "%pI4", &val);
break;
case ISCSI_BOOT_ETH_PREFIX_LEN:
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index add5ad29a673..98b4d1633b25 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -338,6 +338,7 @@ config GPIO_GRANITERAPIDS
config GPIO_GRGPIO
tristate "Aeroflex Gaisler GRGPIO support"
+ depends on OF || COMPILE_TEST
select GPIO_GENERIC
select IRQ_DOMAIN
help
diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c
index 65f41cc3eafc..d668ddb2e81d 100644
--- a/drivers/gpio/gpio-aggregator.c
+++ b/drivers/gpio/gpio-aggregator.c
@@ -119,10 +119,15 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
struct platform_device *pdev;
int res, id;
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
/* kernfs guarantees string termination, so count + 1 is safe */
aggr = kzalloc(sizeof(*aggr) + count + 1, GFP_KERNEL);
- if (!aggr)
- return -ENOMEM;
+ if (!aggr) {
+ res = -ENOMEM;
+ goto put_module;
+ }
memcpy(aggr->args, buf, count + 1);
@@ -161,6 +166,7 @@ static ssize_t new_device_store(struct device_driver *driver, const char *buf,
}
aggr->pdev = pdev;
+ module_put(THIS_MODULE);
return count;
remove_table:
@@ -175,6 +181,8 @@ free_table:
kfree(aggr->lookups);
free_ga:
kfree(aggr);
+put_module:
+ module_put(THIS_MODULE);
return res;
}
@@ -203,13 +211,19 @@ static ssize_t delete_device_store(struct device_driver *driver,
if (error)
return error;
+ if (!try_module_get(THIS_MODULE))
+ return -ENOENT;
+
mutex_lock(&gpio_aggregator_lock);
aggr = idr_remove(&gpio_aggregator_idr, id);
mutex_unlock(&gpio_aggregator_lock);
- if (!aggr)
+ if (!aggr) {
+ module_put(THIS_MODULE);
return -ENOENT;
+ }
gpio_aggregator_free(aggr);
+ module_put(THIS_MODULE);
return count;
}
static DRIVER_ATTR_WO(delete_device);
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 5321ef98f442..64908f1a5e7f 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -69,6 +69,22 @@ struct bcm_kona_gpio {
struct bcm_kona_gpio_bank {
int id;
int irq;
+ /*
+ * Used to keep track of lock/unlock operations for each GPIO in the
+ * bank.
+ *
+ * All GPIOs are locked by default (see bcm_kona_gpio_reset), and the
+ * unlock count for all GPIOs is 0 by default. Each unlock increments
+ * the counter, and each lock decrements the counter.
+ *
+ * The lock function only locks the GPIO once its unlock counter is
+ * down to 0. This is necessary because the GPIO is unlocked in two
+ * places in this driver: once for requested GPIOs, and once for
+ * requested IRQs. Since it is possible for a GPIO to be requested
+ * as both a GPIO and an IRQ, we need to ensure that we don't lock it
+ * too early.
+ */
+ u8 gpio_unlock_count[GPIO_PER_BANK];
/* Used in the interrupt handler */
struct bcm_kona_gpio *kona_gpio;
};
@@ -86,14 +102,24 @@ static void bcm_kona_gpio_lock_gpio(struct bcm_kona_gpio *kona_gpio,
u32 val;
unsigned long flags;
int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ if (bank->gpio_unlock_count[bit] == 0) {
+ dev_err(kona_gpio->gpio_chip.parent,
+ "Unbalanced locks for GPIO %u\n", gpio);
+ return;
+ }
- val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
- val |= BIT(gpio);
- bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+ if (--bank->gpio_unlock_count[bit] == 0) {
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+ val |= BIT(bit);
+ bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ }
}
static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
@@ -102,14 +128,20 @@ static void bcm_kona_gpio_unlock_gpio(struct bcm_kona_gpio *kona_gpio,
u32 val;
unsigned long flags;
int bank_id = GPIO_BANK(gpio);
+ int bit = GPIO_BIT(gpio);
+ struct bcm_kona_gpio_bank *bank = &kona_gpio->banks[bank_id];
- raw_spin_lock_irqsave(&kona_gpio->lock, flags);
+ if (bank->gpio_unlock_count[bit] == 0) {
+ raw_spin_lock_irqsave(&kona_gpio->lock, flags);
- val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
- val &= ~BIT(gpio);
- bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
+ val = readl(kona_gpio->reg_base + GPIO_PWD_STATUS(bank_id));
+ val &= ~BIT(bit);
+ bcm_kona_gpio_write_lock_regs(kona_gpio->reg_base, bank_id, val);
- raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ raw_spin_unlock_irqrestore(&kona_gpio->lock, flags);
+ }
+
+ ++bank->gpio_unlock_count[bit];
}
static int bcm_kona_gpio_get_dir(struct gpio_chip *chip, unsigned gpio)
@@ -360,6 +392,7 @@ static void bcm_kona_gpio_irq_mask(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
+
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_INT_MASK(bank_id));
@@ -382,6 +415,7 @@ static void bcm_kona_gpio_irq_unmask(struct irq_data *d)
kona_gpio = irq_data_get_irq_chip_data(d);
reg_base = kona_gpio->reg_base;
+
raw_spin_lock_irqsave(&kona_gpio->lock, flags);
val = readl(reg_base + GPIO_INT_MSKCLR(bank_id));
@@ -477,15 +511,26 @@ static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
static int bcm_kona_gpio_irq_reqres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
+ unsigned int gpio = d->hwirq;
- return gpiochip_reqres_irq(&kona_gpio->gpio_chip, d->hwirq);
+ /*
+ * We need to unlock the GPIO before any other operations are performed
+ * on the relevant GPIO configuration registers
+ */
+ bcm_kona_gpio_unlock_gpio(kona_gpio, gpio);
+
+ return gpiochip_reqres_irq(&kona_gpio->gpio_chip, gpio);
}
static void bcm_kona_gpio_irq_relres(struct irq_data *d)
{
struct bcm_kona_gpio *kona_gpio = irq_data_get_irq_chip_data(d);
+ unsigned int gpio = d->hwirq;
+
+ /* Once we no longer use it, lock the GPIO again */
+ bcm_kona_gpio_lock_gpio(kona_gpio, gpio);
- gpiochip_relres_irq(&kona_gpio->gpio_chip, d->hwirq);
+ gpiochip_relres_irq(&kona_gpio->gpio_chip, gpio);
}
static struct irq_chip bcm_gpio_irq_chip = {
@@ -614,7 +659,7 @@ static int bcm_kona_gpio_probe(struct platform_device *pdev)
bank->irq = platform_get_irq(pdev, i);
bank->kona_gpio = kona_gpio;
if (bank->irq < 0) {
- dev_err(dev, "Couldn't get IRQ for bank %d", i);
+ dev_err(dev, "Couldn't get IRQ for bank %d\n", i);
ret = -ENOENT;
goto err_irq_domain;
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index be4c9981ebc4..d63c1030e6ac 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -841,25 +841,6 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, unsigned long *pendin
DECLARE_BITMAP(trigger, MAX_LINE);
int ret;
- if (chip->driver_data & PCA_PCAL) {
- /* Read the current interrupt status from the device */
- ret = pca953x_read_regs(chip, PCAL953X_INT_STAT, trigger);
- if (ret)
- return false;
-
- /* Check latched inputs and clear interrupt status */
- ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
- if (ret)
- return false;
-
- /* Apply filter for rising/falling edge selection */
- bitmap_replace(new_stat, chip->irq_trig_fall, chip->irq_trig_raise, cur_stat, gc->ngpio);
-
- bitmap_and(pending, new_stat, trigger, gc->ngpio);
-
- return !bitmap_empty(pending, gc->ngpio);
- }
-
ret = pca953x_read_regs(chip, chip->regs->input, cur_stat);
if (ret)
return false;
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 2ecee3269a0c..a7a1cdf7ac66 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -40,7 +40,7 @@ struct gpio_rcar_info {
struct gpio_rcar_priv {
void __iomem *base;
- spinlock_t lock;
+ raw_spinlock_t lock;
struct device *dev;
struct gpio_chip gpio_chip;
unsigned int irq_parent;
@@ -123,7 +123,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
* "Setting Level-Sensitive Interrupt Input Mode"
*/
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
/* Configure positive or negative logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge);
@@ -142,7 +142,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
if (!level_trigger)
gpio_rcar_write(p, INTCLR, BIT(hwirq));
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
@@ -246,7 +246,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
* "Setting General Input Mode"
*/
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
/* Configure positive logic in POSNEG */
gpio_rcar_modify_bit(p, POSNEG, gpio, false);
@@ -261,7 +261,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
if (p->info.has_outdtsel && output)
gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_request(struct gpio_chip *chip, unsigned offset)
@@ -347,7 +347,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
return 0;
}
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
outputs = gpio_rcar_read(p, INOUTSEL);
m = outputs & bankmask;
if (m)
@@ -356,7 +356,7 @@ static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
m = ~outputs & bankmask;
if (m)
val |= gpio_rcar_read(p, INDT) & m;
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
bits[0] = val;
return 0;
@@ -367,9 +367,9 @@ static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
struct gpio_rcar_priv *p = gpiochip_get_data(chip);
unsigned long flags;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
gpio_rcar_modify_bit(p, OUTDT, offset, value);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
@@ -386,12 +386,12 @@ static void gpio_rcar_set_multiple(struct gpio_chip *chip, unsigned long *mask,
if (!bankmask)
return;
- spin_lock_irqsave(&p->lock, flags);
+ raw_spin_lock_irqsave(&p->lock, flags);
val = gpio_rcar_read(p, OUTDT);
val &= ~bankmask;
val |= (bankmask & bits[0]);
gpio_rcar_write(p, OUTDT, val);
- spin_unlock_irqrestore(&p->lock, flags);
+ raw_spin_unlock_irqrestore(&p->lock, flags);
}
static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
@@ -468,7 +468,12 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
p->info = *info;
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
- *npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
+ if (ret) {
+ *npins = RCAR_MAX_GPIO_PER_BANK;
+ } else {
+ *npins = args.args[2];
+ of_node_put(args.np);
+ }
if (*npins == 0 || *npins > RCAR_MAX_GPIO_PER_BANK) {
dev_warn(p->dev, "Invalid number of gpio lines %u, using %u\n",
@@ -505,7 +510,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
return -ENOMEM;
p->dev = dev;
- spin_lock_init(&p->lock);
+ raw_spin_lock_init(&p->lock);
/* Get device configuration from DT node */
ret = gpio_rcar_parse_dt(p, &npins);
diff --git a/drivers/gpio/gpio-sim.c b/drivers/gpio/gpio-sim.c
index a086087ada17..b6c230fab840 100644
--- a/drivers/gpio/gpio-sim.c
+++ b/drivers/gpio/gpio-sim.c
@@ -1028,20 +1028,23 @@ gpio_sim_device_lockup_configfs(struct gpio_sim_device *dev, bool lock)
struct configfs_subsystem *subsys = dev->group.cg_subsys;
struct gpio_sim_bank *bank;
struct gpio_sim_line *line;
+ struct config_item *item;
/*
- * The device only needs to depend on leaf line entries. This is
+ * The device only needs to depend on leaf entries. This is
* sufficient to lock up all the configfs entries that the
* instantiated, alive device depends on.
*/
list_for_each_entry(bank, &dev->bank_list, siblings) {
list_for_each_entry(line, &bank->line_list, siblings) {
+ item = line->hog ? &line->hog->item
+ : &line->group.cg_item;
+
if (lock)
- WARN_ON(configfs_depend_item_unlocked(
- subsys, &line->group.cg_item));
+ WARN_ON(configfs_depend_item_unlocked(subsys,
+ item));
else
- configfs_undepend_item_unlocked(
- &line->group.cg_item);
+ configfs_undepend_item_unlocked(item);
}
}
}
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 75a3633ceddb..222279a9d82b 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -191,7 +191,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
[REG_IE][CSB] = STMPE_IDX_IEGPIOR_CSB,
[REG_IE][MSB] = STMPE_IDX_IEGPIOR_MSB,
};
- int i, j;
+ int ret, i, j;
/*
* STMPE1600: to be able to get IRQ from pins,
@@ -199,8 +199,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
* GPSR or GPCR registers
*/
if (stmpe->partnum == STMPE1600) {
- stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
- stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
+ ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
+ if (ret < 0) {
+ dev_err(stmpe->dev, "Failed to read GPMR_LSB: %d\n", ret);
+ goto err;
+ }
+ ret = stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
+ if (ret < 0) {
+ dev_err(stmpe->dev, "Failed to read GPMR_CSB: %d\n", ret);
+ goto err;
+ }
}
for (i = 0; i < CACHE_NR_REGS; i++) {
@@ -222,6 +230,7 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
}
}
+err:
mutex_unlock(&stmpe_gpio->irq_lock);
}
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index c4f34a347cb6..c36a9dbccd4d 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -36,6 +36,7 @@ struct vf610_gpio_port {
struct clk *clk_port;
struct clk *clk_gpio;
int irq;
+ spinlock_t lock; /* protect gpio direction registers */
};
#define GPIO_PDOR 0x00
@@ -124,6 +125,7 @@ static int vf610_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio)
u32 val;
if (port->sdata->have_paddr) {
+ guard(spinlock_irqsave)(&port->lock);
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
val &= ~mask;
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
@@ -142,6 +144,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio
vf610_gpio_set(chip, gpio, value);
if (port->sdata->have_paddr) {
+ guard(spinlock_irqsave)(&port->lock);
val = vf610_gpio_readl(port->gpio_base + GPIO_PDDR);
val |= mask;
vf610_gpio_writel(val, port->gpio_base + GPIO_PDDR);
@@ -297,6 +300,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
return -ENOMEM;
port->sdata = device_get_match_data(dev);
+ spin_lock_init(&port->lock);
dual_base = port->sdata->have_dual_base;
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 1f9fe50bba00..f7746c57ba76 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -1689,6 +1689,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
.ignore_wake = "PNP0C50:00@8",
},
},
+ {
+ /*
+ * Spurious wakeups from GPIO 11
+ * Found in BIOS 1.04
+ * https://gitlab.freedesktop.org/drm/amd/-/issues/3954
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Acer Nitro V 14"),
+ },
+ .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+ .ignore_interrupt = "AMDI0030:00@11",
+ },
+ },
{} /* Terminating entry */
};
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 679ed764cb14..8741600af7ef 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -904,13 +904,13 @@ int gpiochip_get_ngpios(struct gpio_chip *gc, struct device *dev)
}
if (gc->ngpio == 0) {
- chip_err(gc, "tried to insert a GPIO chip with zero lines\n");
+ dev_err(dev, "tried to insert a GPIO chip with zero lines\n");
return -EINVAL;
}
if (gc->ngpio > FASTPATH_NGPIO)
- chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n",
- gc->ngpio, FASTPATH_NGPIO);
+ dev_warn(dev, "line cnt %u is greater than fast path cnt %u\n",
+ gc->ngpio, FASTPATH_NGPIO);
return 0;
}
@@ -1057,8 +1057,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
desc->gdev = gdev;
if (gc->get_direction && gpiochip_line_is_valid(gc, desc_index)) {
- assign_bit(FLAG_IS_OUT,
- &desc->flags, !gc->get_direction(gc, desc_index));
+ ret = gc->get_direction(gc, desc_index);
+ if (ret < 0)
+ /*
+ * FIXME: Bail-out here once all GPIO drivers
+ * are updated to not return errors in
+ * situations that can be considered normal
+ * operation.
+ */
+ dev_warn(&gdev->dev,
+ "%s: get_direction failed: %d\n",
+ __func__, ret);
+
+ assign_bit(FLAG_IS_OUT, &desc->flags, !ret);
} else {
assign_bit(FLAG_IS_OUT,
&desc->flags, !gc->direction_input);
@@ -2701,7 +2712,7 @@ EXPORT_SYMBOL_GPL(gpiod_direction_input);
int gpiod_direction_input_nonotify(struct gpio_desc *desc)
{
- int ret = 0;
+ int ret = 0, dir;
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
@@ -2728,13 +2739,18 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
if (guard.gc->direction_input) {
ret = guard.gc->direction_input(guard.gc,
gpio_chip_hwgpio(desc));
- } else if (guard.gc->get_direction &&
- (guard.gc->get_direction(guard.gc,
- gpio_chip_hwgpio(desc)) != 1)) {
- gpiod_warn(desc,
- "%s: missing direction_input() operation and line is output\n",
- __func__);
- return -EIO;
+ } else if (guard.gc->get_direction) {
+ dir = guard.gc->get_direction(guard.gc,
+ gpio_chip_hwgpio(desc));
+ if (dir < 0)
+ return dir;
+
+ if (dir != GPIO_LINE_DIRECTION_IN) {
+ gpiod_warn(desc,
+ "%s: missing direction_input() operation and line is output\n",
+ __func__);
+ return -EIO;
+ }
}
if (ret == 0) {
clear_bit(FLAG_IS_OUT, &desc->flags);
@@ -2748,7 +2764,7 @@ int gpiod_direction_input_nonotify(struct gpio_desc *desc)
static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
{
- int val = !!value, ret = 0;
+ int val = !!value, ret = 0, dir;
CLASS(gpio_chip_guard, guard)(desc);
if (!guard.gc)
@@ -2771,12 +2787,18 @@ static int gpiod_direction_output_raw_commit(struct gpio_desc *desc, int value)
gpio_chip_hwgpio(desc), val);
} else {
/* Check that we are in output mode if we can */
- if (guard.gc->get_direction &&
- guard.gc->get_direction(guard.gc, gpio_chip_hwgpio(desc))) {
- gpiod_warn(desc,
- "%s: missing direction_output() operation\n",
- __func__);
- return -EIO;
+ if (guard.gc->get_direction) {
+ dir = guard.gc->get_direction(guard.gc,
+ gpio_chip_hwgpio(desc));
+ if (dir < 0)
+ return dir;
+
+ if (dir != GPIO_LINE_DIRECTION_OUT) {
+ gpiod_warn(desc,
+ "%s: missing direction_output() operation\n",
+ __func__);
+ return -EIO;
+ }
}
/*
* If we can't actively set the direction, we are some
@@ -3129,6 +3151,8 @@ static int gpiod_get_raw_value_commit(const struct gpio_desc *desc)
static int gpio_chip_get_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
if (gc->get_multiple)
return gc->get_multiple(gc, mask, bits);
if (gc->get) {
@@ -3159,6 +3183,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
+ struct gpio_chip *gc;
int ret, i = 0;
/*
@@ -3170,10 +3195,15 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep,
array_size <= array_info->size &&
(void *)array_info == desc_array + array_info->size) {
if (!can_sleep)
- WARN_ON(array_info->chip->can_sleep);
+ WARN_ON(array_info->gdev->can_sleep);
+
+ guard(srcu)(&array_info->gdev->srcu);
+ gc = srcu_dereference(array_info->gdev->chip,
+ &array_info->gdev->srcu);
+ if (!gc)
+ return -ENODEV;
- ret = gpio_chip_get_multiple(array_info->chip,
- array_info->get_mask,
+ ret = gpio_chip_get_multiple(gc, array_info->get_mask,
value_bitmap);
if (ret)
return ret;
@@ -3454,6 +3484,8 @@ static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value)
static void gpio_chip_set_multiple(struct gpio_chip *gc,
unsigned long *mask, unsigned long *bits)
{
+ lockdep_assert_held(&gc->gpiodev->srcu);
+
if (gc->set_multiple) {
gc->set_multiple(gc, mask, bits);
} else {
@@ -3471,6 +3503,7 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
struct gpio_array *array_info,
unsigned long *value_bitmap)
{
+ struct gpio_chip *gc;
int i = 0;
/*
@@ -3482,14 +3515,19 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep,
array_size <= array_info->size &&
(void *)array_info == desc_array + array_info->size) {
if (!can_sleep)
- WARN_ON(array_info->chip->can_sleep);
+ WARN_ON(array_info->gdev->can_sleep);
+
+ guard(srcu)(&array_info->gdev->srcu);
+ gc = srcu_dereference(array_info->gdev->chip,
+ &array_info->gdev->srcu);
+ if (!gc)
+ return -ENODEV;
if (!raw && !bitmap_empty(array_info->invert_mask, array_size))
bitmap_xor(value_bitmap, value_bitmap,
array_info->invert_mask, array_size);
- gpio_chip_set_multiple(array_info->chip, array_info->set_mask,
- value_bitmap);
+ gpio_chip_set_multiple(gc, array_info->set_mask, value_bitmap);
i = find_first_zero_bit(array_info->set_mask, array_size);
if (i == array_size)
@@ -4751,9 +4789,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
{
struct gpio_desc *desc;
struct gpio_descs *descs;
+ struct gpio_device *gdev;
struct gpio_array *array_info = NULL;
- struct gpio_chip *gc;
int count, bitmap_size;
+ unsigned long dflags;
size_t descs_size;
count = gpiod_count(dev, con_id);
@@ -4774,7 +4813,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
descs->desc[descs->ndescs] = desc;
- gc = gpiod_to_chip(desc);
+ gdev = gpiod_to_gpio_device(desc);
/*
* If pin hardware number of array member 0 is also 0, select
* its chip as a candidate for fast bitmap processing path.
@@ -4782,8 +4821,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) {
struct gpio_descs *array;
- bitmap_size = BITS_TO_LONGS(gc->ngpio > count ?
- gc->ngpio : count);
+ bitmap_size = BITS_TO_LONGS(gdev->ngpio > count ?
+ gdev->ngpio : count);
array = krealloc(descs, descs_size +
struct_size(array_info, invert_mask, 3 * bitmap_size),
@@ -4803,7 +4842,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
array_info->desc = descs->desc;
array_info->size = count;
- array_info->chip = gc;
+ array_info->gdev = gdev;
bitmap_set(array_info->get_mask, descs->ndescs,
count - descs->ndescs);
bitmap_set(array_info->set_mask, descs->ndescs,
@@ -4816,7 +4855,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
continue;
/* Unmark array members which don't belong to the 'fast' chip */
- if (array_info->chip != gc) {
+ if (array_info->gdev != gdev) {
__clear_bit(descs->ndescs, array_info->get_mask);
__clear_bit(descs->ndescs, array_info->set_mask);
}
@@ -4839,9 +4878,10 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
array_info->set_mask);
}
} else {
+ dflags = READ_ONCE(desc->flags);
/* Exclude open drain or open source from fast output */
- if (gpiochip_line_is_open_drain(gc, descs->ndescs) ||
- gpiochip_line_is_open_source(gc, descs->ndescs))
+ if (test_bit(FLAG_OPEN_DRAIN, &dflags) ||
+ test_bit(FLAG_OPEN_SOURCE, &dflags))
__clear_bit(descs->ndescs,
array_info->set_mask);
/* Identify 'fast' pins which require invertion */
@@ -4853,7 +4893,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev,
if (array_info)
dev_dbg(dev,
"GPIO array info: chip=%s, size=%d, get_mask=%lx, set_mask=%lx, invert_mask=%lx\n",
- array_info->chip->label, array_info->size,
+ array_info->gdev->label, array_info->size,
*array_info->get_mask, *array_info->set_mask,
*array_info->invert_mask);
return descs;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index 83690f72f7e5..147156ec502b 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -114,7 +114,7 @@ extern const char *const gpio_suffixes[];
*
* @desc: Array of pointers to the GPIO descriptors
* @size: Number of elements in desc
- * @chip: Parent GPIO chip
+ * @gdev: Parent GPIO device
* @get_mask: Get mask used in fastpath
* @set_mask: Set mask used in fastpath
* @invert_mask: Invert mask used in fastpath
@@ -126,7 +126,7 @@ extern const char *const gpio_suffixes[];
struct gpio_array {
struct gpio_desc **desc;
unsigned int size;
- struct gpio_chip *chip;
+ struct gpio_device *gdev;
unsigned long *get_mask;
unsigned long *set_mask;
unsigned long invert_mask[];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d100bb7a137c..018dfccd771b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1638,6 +1638,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
if (amdgpu_sriov_vf(adev))
return 0;
+ /* resizing on Dell G5 SE platforms causes problems with runtime pm */
+ if ((amdgpu_runtime_pm != 0) &&
+ adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
+ adev->pdev->device == 0x731f &&
+ adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
+ return 0;
+
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
DRM_WARN("System can't access extended configuration space, please check!!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 817116e53d44..95a05b03f799 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -119,9 +119,11 @@
* - 3.57.0 - Compute tunneling on GFX10+
* - 3.58.0 - Add GFX12 DCC support
* - 3.59.0 - Cleared VRAM
+ * - 3.60.0 - Add AMDGPU_TILING_GFX12_DCC_WRITE_COMPRESS_DISABLE (Vulkan requirement)
+ * - 3.61.0 - Contains fix for RV/PCO compute queues
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 59
+#define KMS_DRIVER_MINOR 61
#define KMS_DRIVER_PATCHLEVEL 0
/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 784b03abb3a4..c1f35ded684e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -1638,22 +1638,19 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
}
mutex_lock(&adev->enforce_isolation_mutex);
-
for (i = 0; i < num_partitions; i++) {
- if (adev->enforce_isolation[i] && !partition_values[i]) {
+ if (adev->enforce_isolation[i] && !partition_values[i])
/* Going from enabled to disabled */
amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
- amdgpu_mes_set_enforce_isolation(adev, i, false);
- } else if (!adev->enforce_isolation[i] && partition_values[i]) {
+ else if (!adev->enforce_isolation[i] && partition_values[i])
/* Going from disabled to enabled */
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
- amdgpu_mes_set_enforce_isolation(adev, i, true);
- }
adev->enforce_isolation[i] = partition_values[i];
}
-
mutex_unlock(&adev->enforce_isolation_mutex);
+ amdgpu_mes_update_enforce_isolation(adev);
+
return count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 32b27a1658e7..709c11cbeabd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -1681,7 +1681,8 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
}
/* Fix me -- node_id is used to identify the correct MES instances in the future */
-int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable)
+static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
+ uint32_t node_id, bool enable)
{
struct mes_misc_op_input op_input = {0};
int r;
@@ -1703,6 +1704,23 @@ error:
return r;
}
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
+{
+ int i, r = 0;
+
+ if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
+ mutex_lock(&adev->enforce_isolation_mutex);
+ for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
+ if (adev->enforce_isolation[i])
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
+ else
+ r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
+ }
+ mutex_unlock(&adev->enforce_isolation_mutex);
+ }
+ return r;
+}
+
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index 2df2444ee892..e98ea7ede1ba 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -534,6 +534,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
-int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable);
+int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev);
#endif /* __AMDGPU_MES_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index babe94ade247..e5fc80ed06ea 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -3815,9 +3815,10 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name)
if (err == -ENODEV) {
dev_warn(adev->dev, "cap microcode does not exist, skip\n");
err = 0;
- goto out;
+ } else {
+ dev_err(adev->dev, "fail to initialize cap microcode\n");
}
- dev_err(adev->dev, "fail to initialize cap microcode\n");
+ goto out;
}
info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ff286940ab43..262bd010a283 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -309,7 +309,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
mutex_lock(&adev->mman.gtt_window_lock);
while (src_mm.remaining) {
uint64_t from, to, cur_size, tiling_flags;
- uint32_t num_type, data_format, max_com;
+ uint32_t num_type, data_format, max_com, write_compress_disable;
struct dma_fence *next;
/* Never copy more than 256MiB at once to avoid a timeout */
@@ -340,9 +340,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
max_com = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
num_type = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
data_format = AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
+ write_compress_disable =
+ AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_WRITE_COMPRESS_DISABLE);
copy_flags |= (AMDGPU_COPY_FLAGS_SET(MAX_COMPRESSED, max_com) |
AMDGPU_COPY_FLAGS_SET(NUMBER_TYPE, num_type) |
- AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format));
+ AMDGPU_COPY_FLAGS_SET(DATA_FORMAT, data_format) |
+ AMDGPU_COPY_FLAGS_SET(WRITE_COMPRESS_DISABLE,
+ write_compress_disable));
}
r = amdgpu_copy_buffer(ring, from, to, cur_size, resv,
@@ -2277,7 +2281,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
struct amdgpu_res_cursor cursor;
u64 addr;
- int r;
+ int r = 0;
if (!adev->mman.buffer_funcs_enabled)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 461fb8090ae0..208b7d1d8a27 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -119,6 +119,8 @@ struct amdgpu_copy_mem {
#define AMDGPU_COPY_FLAGS_NUMBER_TYPE_MASK 0x07
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_SHIFT 8
#define AMDGPU_COPY_FLAGS_DATA_FORMAT_MASK 0x3f
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_SHIFT 14
+#define AMDGPU_COPY_FLAGS_WRITE_COMPRESS_DISABLE_MASK 0x1
#define AMDGPU_COPY_FLAGS_SET(field, value) \
(((__u32)(value) & AMDGPU_COPY_FLAGS_##field##_MASK) << AMDGPU_COPY_FLAGS_##field##_SHIFT)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index fa572b40989e..0dce4421418c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -7437,6 +7437,38 @@ static void gfx_v9_0_ring_emit_cleaner_shader(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, 0); /* RESERVED field, programmed to zero */
}
+static void gfx_v9_0_ring_begin_use_compute(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ip_block *gfx_block =
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+
+ amdgpu_gfx_enforce_isolation_ring_begin_use(ring);
+
+ /* Raven and PCO APUs seem to have stability issues
+ * with compute and gfxoff and gfx pg. Disable gfx pg during
+ * submission and allow again afterwards.
+ */
+ if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
+ gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_UNGATE);
+}
+
+static void gfx_v9_0_ring_end_use_compute(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_ip_block *gfx_block =
+ amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
+
+ /* Raven and PCO APUs seem to have stability issues
+ * with compute and gfxoff and gfx pg. Disable gfx pg during
+ * submission and allow again afterwards.
+ */
+ if (gfx_block && amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 1, 0))
+ gfx_v9_0_set_powergating_state(gfx_block, AMD_PG_STATE_GATE);
+
+ amdgpu_gfx_enforce_isolation_ring_end_use(ring);
+}
+
static const struct amd_ip_funcs gfx_v9_0_ip_funcs = {
.name = "gfx_v9_0",
.early_init = gfx_v9_0_early_init,
@@ -7613,8 +7645,8 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.emit_wave_limit = gfx_v9_0_emit_wave_limit,
.reset = gfx_v9_0_reset_kcq,
.emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader,
- .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use,
- .end_use = amdgpu_gfx_enforce_isolation_ring_end_use,
+ .begin_use = gfx_v9_0_ring_begin_use_compute,
+ .end_use = gfx_v9_0_ring_end_use_compute,
};
static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 65f389eb65e5..f9a4d08eef92 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -1633,6 +1633,10 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
goto failure;
}
+ r = amdgpu_mes_update_enforce_isolation(adev);
+ if (r)
+ goto failure;
+
out:
/*
* Disable KIQ ring usage from the driver once MES is enabled.
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 901e924e69ad..0fd0fa6ed518 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -1743,6 +1743,10 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
goto failure;
}
+ r = amdgpu_mes_update_enforce_isolation(adev);
+ if (r)
+ goto failure;
+
out:
/*
* Disable KIQ ring usage from the driver once MES is enabled.
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
index 9c17df2cf37b..7e10e94624e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c
@@ -1741,11 +1741,12 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
uint32_t byte_count,
uint32_t copy_flags)
{
- uint32_t num_type, data_format, max_com;
+ uint32_t num_type, data_format, max_com, write_cm;
max_com = AMDGPU_COPY_FLAGS_GET(copy_flags, MAX_COMPRESSED);
data_format = AMDGPU_COPY_FLAGS_GET(copy_flags, DATA_FORMAT);
num_type = AMDGPU_COPY_FLAGS_GET(copy_flags, NUMBER_TYPE);
+ write_cm = AMDGPU_COPY_FLAGS_GET(copy_flags, WRITE_COMPRESS_DISABLE) ? 2 : 1;
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
@@ -1762,7 +1763,7 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
if ((copy_flags & (AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)))
ib->ptr[ib->length_dw++] = SDMA_DCC_DATA_FORMAT(data_format) | SDMA_DCC_NUM_TYPE(num_type) |
((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) |
- ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) |
+ ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(write_cm) : 0) |
SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1);
else
ib->ptr[ib->length_dw++] = 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 984f0e705078..651660958e5b 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -4121,7 +4121,8 @@ static const uint32_t cwsr_trap_gfx12_hex[] = {
0x0000ffff, 0x8bfe7e7e,
0x8bea6a6a, 0xb97af804,
0xbe804ec2, 0xbf94fffe,
- 0xbe804a6c, 0xbfb10000,
+ 0xbe804a6c, 0xbe804ec2,
+ 0xbf94fffe, 0xbfb10000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
index 1740e98c6719..7b9d36e5fa43 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx12.asm
@@ -1049,6 +1049,10 @@ L_SKIP_BARRIER_RESTORE:
s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
L_END_PGM:
+ // Make sure that no wave of the workgroup can exit the trap handler
+ // before the workgroup barrier state is saved.
+ s_barrier_signal -2
+ s_barrier_wait -2
s_endpgm_saved
end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 2eff37aaf827..1695dd78ede8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -107,6 +107,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -167,10 +169,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
index 68dbc0399c87..3c0ae28c5923 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
@@ -154,6 +154,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -221,10 +223,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
index 2b72d5b4949b..565858b9044d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
@@ -121,6 +121,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -184,10 +186,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |=
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
- m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index ff417d5361c4..3014925d95ff 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -183,6 +183,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
+ m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
+
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
@@ -245,7 +248,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m = get_mqd(mqd);
- m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
+ m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index bcddd989c7f3..bd36a75309e1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -300,7 +300,7 @@ static int init_user_queue(struct process_queue_manager *pqm,
return 0;
free_gang_ctx_bo:
- amdgpu_amdkfd_free_gtt_mem(dev->adev, (*q)->gang_ctx_bo);
+ amdgpu_amdkfd_free_gtt_mem(dev->adev, &(*q)->gang_ctx_bo);
cleanup:
uninit_queue(*q);
*q = NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
index ecccd7adbab4..24396a2c77bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -266,8 +266,8 @@ int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_prope
/* EOP buffer is not required for all ASICs */
if (properties->eop_ring_buffer_address) {
if (properties->eop_ring_buffer_size != topo_dev->node_props.eop_buffer_size) {
- pr_debug("queue eop bo size 0x%lx not equal to node eop buf size 0x%x\n",
- properties->eop_buf_bo->tbo.base.size,
+ pr_debug("queue eop bo size 0x%x not equal to node eop buf size 0x%x\n",
+ properties->eop_ring_buffer_size,
topo_dev->node_props.eop_buffer_size);
err = -EINVAL;
goto out_err_unreserve;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index ac3fd81fecef..9d9645a2d18e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1618,75 +1618,130 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
return false;
}
-static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
+struct amdgpu_dm_quirks {
+ bool aux_hpd_discon;
+ bool support_edp0_on_dp1;
+};
+
+static struct amdgpu_dm_quirks quirk_entries = {
+ .aux_hpd_discon = false,
+ .support_edp0_on_dp1 = false
+};
+
+static int edp0_on_dp1_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.support_edp0_on_dp1 = true;
+ return 0;
+}
+
+static int aux_hpd_discon_callback(const struct dmi_system_id *id)
+{
+ quirk_entries.aux_hpd_discon = true;
+ return 0;
+}
+
+static const struct dmi_system_id dmi_quirk_table[] = {
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
},
},
{
+ .callback = aux_hpd_discon_callback,
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
},
},
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
+ },
+ },
+ {
+ .callback = edp0_on_dp1_callback,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
+ },
+ },
{}
/* TODO: refactor this from a fixed table to a dynamic option */
};
-static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
+static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
{
- const struct dmi_system_id *dmi_id;
+ int dmi_id;
+ struct drm_device *dev = dm->ddev;
dm->aux_hpd_discon_quirk = false;
+ init_data->flags.support_edp0_on_dp1 = false;
+
+ dmi_id = dmi_check_system(dmi_quirk_table);
+
+ if (!dmi_id)
+ return;
- dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
- if (dmi_id) {
+ if (quirk_entries.aux_hpd_discon) {
dm->aux_hpd_discon_quirk = true;
- DRM_INFO("aux_hpd_discon_quirk attached\n");
+ drm_info(dev, "aux_hpd_discon_quirk attached\n");
+ }
+ if (quirk_entries.support_edp0_on_dp1) {
+ init_data->flags.support_edp0_on_dp1 = true;
+ drm_info(dev, "aux_hpd_discon_quirk attached\n");
}
}
@@ -1994,7 +2049,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
init_data.num_virtual_links = 1;
- retrieve_dmi_info(&adev->dm);
+ retrieve_dmi_info(&adev->dm, &init_data);
if (adev->dm.bb_from_dmub)
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
@@ -7240,8 +7295,14 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
const struct drm_edid *drm_edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
- drm_edid = drm_edid_read(connector);
+ drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
@@ -7286,14 +7347,21 @@ static int get_modes(struct drm_connector *connector)
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
+ struct dc_link *dc_link = aconnector->dc_link;
struct dc_sink_init_data init_params = {
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
const struct drm_edid *drm_edid;
const struct edid *edid;
+ struct i2c_adapter *ddc;
+
+ if (dc_link && dc_link->aux_mode)
+ ddc = &aconnector->dm_dp_aux.aux.ddc;
+ else
+ ddc = &aconnector->i2c->base;
- drm_edid = drm_edid_read(connector);
+ drm_edid = drm_edid_read_ddc(connector, ddc);
drm_edid_connector_update(connector, drm_edid);
if (!drm_edid) {
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 3390f0d8420a..c4a7fd453e5f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -894,6 +894,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int i;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -920,6 +921,12 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ /* Update reference counts for HPDs */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
+ if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
+ drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
+ }
}
/**
@@ -935,6 +942,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
struct drm_device *dev = adev_to_drm(adev);
struct drm_connector *connector;
struct drm_connector_list_iter iter;
+ int i;
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
@@ -960,4 +968,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
}
}
drm_connector_list_iter_end(&iter);
+
+ /* Update reference counts for HPDs */
+ for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
+ if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
+ drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
+ }
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 45858bf1523d..e140b7a04d72 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -54,7 +54,8 @@ static bool link_supports_psrsu(struct dc_link *link)
if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
return false;
- return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
+ /* Temporarily disable PSR-SU to avoid glitches */
+ return false;
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index cecaadf741ad..f84e795e35f5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2133,7 +2133,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
dc_enable_stereo(dc, context, dc_streams, context->stream_count);
- if (context->stream_count > get_seamless_boot_stream_count(context) ||
+ if (get_seamless_boot_stream_count(context) == 0 ||
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
hwss_wait_for_no_pipes_pending(dc, context);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 520a34a42827..a45037cb4cc0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1455,7 +1455,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
/* Invalid input */
- if (!plane_state->dst_rect.width ||
+ if (!plane_state ||
+ !plane_state->dst_rect.width ||
!plane_state->dst_rect.height ||
!plane_state->src_rect.width ||
!plane_state->src_rect.height) {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index 5bb8b78bf250..bf636b28e3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -63,8 +63,7 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
bool should_use_dmub_lock(struct dc_link *link)
{
- if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1 ||
- link->psr_settings.psr_version == DC_PSR_VERSION_1)
+ if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1)
return true;
if (link->replay_settings.replay_feature_enabled)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 46f9c05de16e..e1d500633dfa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -29,11 +29,15 @@ dml_ccflags := $(CC_FLAGS_FPU)
dml_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
-ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
-frame_warn_flag := -Wframe-larger-than=3072
-else
-frame_warn_flag := -Wframe-larger-than=2048
-endif
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ frame_warn_limit := 3072
+ else
+ frame_warn_limit := 2048
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
endif
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
index 91c4f3b4bd5f..21fd466dba26 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile
@@ -28,15 +28,19 @@ dml2_ccflags := $(CC_FLAGS_FPU)
dml2_rcflags := $(CC_FLAGS_NO_FPU)
ifneq ($(CONFIG_FRAME_WARN),0)
-ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
-ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
-frame_warn_flag := -Wframe-larger-than=4096
-else
-frame_warn_flag := -Wframe-larger-than=3072
-endif
-else
-frame_warn_flag := -Wframe-larger-than=2048
-endif
+ ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+ ifeq ($(CONFIG_CC_IS_CLANG)$(CONFIG_COMPILE_TEST),yy)
+ frame_warn_limit := 4096
+ else
+ frame_warn_limit := 3072
+ endif
+ else
+ frame_warn_limit := 2048
+ endif
+
+ ifeq ($(call test-lt, $(CONFIG_FRAME_WARN), $(frame_warn_limit)),y)
+ frame_warn_flag := -Wframe-larger-than=$(frame_warn_limit)
+ endif
endif
subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc/dml2
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index b9c6b45f6872..0c8ec30ea672 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -1017,7 +1017,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
if (disp_cfg_stream_location < 0)
disp_cfg_stream_location = dml_dispcfg->num_streams++;
- ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx);
adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]);
populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]);
@@ -1042,7 +1042,7 @@ bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_s
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_planes++;
- ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml21_surface_config_from_plane_state(in_dc, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location].surface, context->stream_status[stream_index].plane_states[plane_index]);
populate_dml21_plane_config_from_plane_state(dml_ctx, &dml_dispcfg->plane_descriptors[disp_cfg_plane_location], context->stream_status[stream_index].plane_states[plane_index], context, stream_index);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index b416320873e1..b8a34abaf519 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -786,7 +786,7 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
out->OutputEncoder[location] = dml_dp;
- if (dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
+ if (location < MAX_HPO_DP2_ENCODERS && dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
out->OutputEncoder[dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location]] = dml_dp2p0;
break;
case SIGNAL_TYPE_EDP:
@@ -1343,7 +1343,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (disp_cfg_stream_location < 0)
disp_cfg_stream_location = dml_dispcfg->num_timings++;
- ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context, dml2);
@@ -1383,7 +1383,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (disp_cfg_plane_location < 0)
disp_cfg_plane_location = dml_dispcfg->num_surfaces++;
- ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
+ ASSERT(disp_cfg_plane_location >= 0 && disp_cfg_plane_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]);
populate_dml_plane_cfg_from_plane_state(
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
index fe741100c0f8..d347bb06577a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn30/dcn30_hubbub.c
@@ -129,7 +129,8 @@ bool hubbub3_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
return wm_pending;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
index 7fb5523f9722..b98505b240a7 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn31/dcn31_hubbub.c
@@ -750,7 +750,8 @@ static bool hubbub31_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
return wm_pending;
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
index 5264dc26cce1..32a6be543105 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn32/dcn32_hubbub.c
@@ -786,7 +786,8 @@ static bool hubbub32_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 0x1FF);*/
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
index 5eb3da8d5206..dce7269959ce 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn35/dcn35_hubbub.c
@@ -326,7 +326,8 @@ static bool hubbub35_program_watermarks(
DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, 0xA);/*hw delta*/
REG_UPDATE(DCHUBBUB_ARB_HOSTVM_CNTL, DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
- hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ if (safe_to_lower || hubbub->ctx->dc->debug.disable_stutter)
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
hubbub32_force_usr_retraining_allow(hubbub, hubbub->ctx->dc->debug.force_usr_allow);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
index be0ac613675a..0da70b50e86d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn30/dcn30_hubp.c
@@ -500,6 +500,8 @@ void hubp3_init(struct hubp *hubp)
//hubp[i].HUBPREQ_DEBUG.HUBPREQ_DEBUG[26] = 1;
REG_WRITE(HUBPREQ_DEBUG, 1 << 26);
+ REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
+
hubp_reset(hubp);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
index edd37898d550..f3a21c623f44 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn32/dcn32_hubp.c
@@ -168,6 +168,8 @@ void hubp32_init(struct hubp *hubp)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
REG_WRITE(HUBPREQ_DEBUG_DB, 1 << 8);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_TTU_DISABLE, 0);
}
static struct hubp_funcs dcn32_hubp_funcs = {
.hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 623cde76debf..b907ad1acedd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -236,7 +236,8 @@ void dcn35_init_hw(struct dc *dc)
}
hws->funcs.init_pipes(dc, dc->current_state);
- if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+ if (dc->res_pool->hubbub->funcs->allow_self_refresh_control &&
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter)
dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 6a9e26905edf..7a22aef6e59c 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -78,7 +78,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
int ret = 0;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON;
- bool is_vcn = (block_type == AMD_IP_BLOCK_TYPE_UVD || block_type == AMD_IP_BLOCK_TYPE_VCN);
+ bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN;
if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state &&
(!is_vcn || adev->vcn.num_vcn_inst == 1)) {
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index 67a8e22b1126..e237ea1185a7 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -3042,6 +3042,7 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_dpm)
return 0;
+ mutex_lock(&adev->pm.mutex);
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
@@ -3049,6 +3050,8 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
+ mutex_unlock(&adev->pm.mutex);
+
return ret;
}
@@ -3066,32 +3069,42 @@ static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
+
if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm_enabled = false;
/* disable dpm */
kv_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
}
return 0;
}
static int kv_dpm_resume(struct amdgpu_ip_block *ip_block)
{
- int ret;
+ int ret = 0;
struct amdgpu_device *adev = ip_block->adev;
- if (adev->pm.dpm_enabled) {
+ if (!amdgpu_dpm)
+ return 0;
+
+ if (!adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
/* asic init will reset to the boot state */
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
- if (ret)
+ if (ret) {
adev->pm.dpm_enabled = false;
- else
+ } else {
adev->pm.dpm_enabled = true;
- if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
+ }
+ mutex_unlock(&adev->pm.mutex);
}
- return 0;
+ return ret;
}
static bool kv_dpm_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
index e861355ebd75..c7518b13e787 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -1009,9 +1009,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
int temp, size = sizeof(temp);
- if (!adev->pm.dpm_enabled)
- return;
+ mutex_lock(&adev->pm.mutex);
+ if (!adev->pm.dpm_enabled) {
+ mutex_unlock(&adev->pm.mutex);
+ return;
+ }
if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
AMDGPU_PP_SENSOR_GPU_TEMP,
(void *)&temp,
@@ -1033,4 +1036,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
adev->pm.dpm.state = dpm_state;
amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
+ mutex_unlock(&adev->pm.mutex);
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index a87dcf0974bc..d6dfe2599ebe 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -7786,6 +7786,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
if (!amdgpu_dpm)
return 0;
+ mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
@@ -7793,6 +7794,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
else
adev->pm.dpm_enabled = true;
amdgpu_legacy_dpm_compute_clocks(adev);
+ mutex_unlock(&adev->pm.mutex);
return ret;
}
@@ -7810,32 +7812,44 @@ static int si_dpm_suspend(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+ cancel_work_sync(&adev->pm.dpm.thermal.work);
+
if (adev->pm.dpm_enabled) {
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm_enabled = false;
/* disable dpm */
si_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
+ mutex_unlock(&adev->pm.mutex);
}
+
return 0;
}
static int si_dpm_resume(struct amdgpu_ip_block *ip_block)
{
- int ret;
+ int ret = 0;
struct amdgpu_device *adev = ip_block->adev;
- if (adev->pm.dpm_enabled) {
+ if (!amdgpu_dpm)
+ return 0;
+
+ if (!adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
+ mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
- if (ret)
+ if (ret) {
adev->pm.dpm_enabled = false;
- else
+ } else {
adev->pm.dpm_enabled = true;
- if (adev->pm.dpm_enabled)
amdgpu_legacy_dpm_compute_clocks(adev);
+ }
+ mutex_unlock(&adev->pm.mutex);
}
- return 0;
+
+ return ret;
}
static bool si_dpm_is_idle(void *handle)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 8ca793c222ff..ed9dac00ebfb 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -612,7 +612,8 @@ static int smu_sys_set_pp_table(void *handle,
return -EIO;
}
- if (!smu_table->hardcode_pptable) {
+ if (!smu_table->hardcode_pptable || smu_table->power_play_table_size < size) {
+ kfree(smu_table->hardcode_pptable);
smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
if (!smu_table->hardcode_pptable)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
index 9b2f4fe1578b..ddb6444406d2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c
@@ -1895,16 +1895,6 @@ static int smu_v14_0_allow_ih_interrupt(struct smu_context *smu)
NULL);
}
-static int smu_v14_0_process_pending_interrupt(struct smu_context *smu)
-{
- int ret = 0;
-
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
- ret = smu_v14_0_allow_ih_interrupt(smu);
-
- return ret;
-}
-
int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
{
int ret = 0;
@@ -1916,7 +1906,7 @@ int smu_v14_0_enable_thermal_alert(struct smu_context *smu)
if (ret)
return ret;
- return smu_v14_0_process_pending_interrupt(smu);
+ return smu_v14_0_allow_ih_interrupt(smu);
}
int smu_v14_0_disable_thermal_alert(struct smu_context *smu)
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
index ebccb74306a7..f30b3d5eeca5 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
@@ -160,6 +160,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms,
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
kwb_conn->wb_layer->layer_type,
&n_formats);
+ if (!formats) {
+ kfree(kwb_conn);
+ return -ENOMEM;
+ }
err = drm_writeback_connector_init(&kms->base, wb_conn,
&komeda_wb_connector_funcs,
diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c
index 0e282b7b167c..b9eb67e3fa90 100644
--- a/drivers/gpu/drm/ast/ast_dp.c
+++ b/drivers/gpu/drm/ast/ast_dp.c
@@ -195,7 +195,7 @@ static bool __ast_dp_wait_enable(struct ast_device *ast, bool enabled)
if (enabled)
vgacrdf_test |= AST_IO_VGACRDF_DP_VIDEO_ENABLE;
- for (i = 0; i < 200; ++i) {
+ for (i = 0; i < 1000; ++i) {
if (i)
mdelay(1);
vgacrdf = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xdf,
diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c
index 007ceb281d00..56a4965e518c 100644
--- a/drivers/gpu/drm/display/drm_dp_cec.c
+++ b/drivers/gpu/drm/display/drm_dp_cec.c
@@ -311,16 +311,6 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
if (!aux->transfer)
return;
-#ifndef CONFIG_MEDIA_CEC_RC
- /*
- * CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
- * cec_allocate_adapter() if CONFIG_MEDIA_CEC_RC is undefined.
- *
- * Do this here as well to ensure the tests against cec_caps are
- * correct.
- */
- cec_caps &= ~CEC_CAP_RC;
-#endif
cancel_delayed_work_sync(&aux->cec.unregister_work);
mutex_lock(&aux->cec.lock);
@@ -337,7 +327,9 @@ void drm_dp_cec_attach(struct drm_dp_aux *aux, u16 source_physical_address)
num_las = CEC_MAX_LOG_ADDRS;
if (aux->cec.adap) {
- if (aux->cec.adap->capabilities == cec_caps &&
+ /* Check if the adapter properties have changed */
+ if ((aux->cec.adap->capabilities & CEC_CAP_MONITOR_ALL) ==
+ (cec_caps & CEC_CAP_MONITOR_ALL) &&
aux->cec.adap->available_log_addrs == num_las) {
/* Unchanged, so just set the phys addr */
cec_s_phys_addr(aux->cec.adap, source_physical_address, false);
diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c
index da3c8521a7fa..61c7c2c588c6 100644
--- a/drivers/gpu/drm/display/drm_dp_helper.c
+++ b/drivers/gpu/drm/display/drm_dp_helper.c
@@ -2544,7 +2544,7 @@ u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
{
u8 bpp_increment_dpcd = dsc_dpcd[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT];
- switch (bpp_increment_dpcd) {
+ switch (bpp_increment_dpcd & DP_DSC_BITS_PER_PIXEL_MASK) {
case DP_DSC_BITS_PER_PIXEL_1_16:
return 16;
case DP_DSC_BITS_PER_PIXEL_1_8:
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index b14b581c059d..02a516e77192 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: MIT
#include <linux/fb.h>
+#include <linux/vmalloc.h>
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_dma.h>
@@ -70,37 +71,102 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
.fb_destroy = drm_fbdev_dma_fb_destroy,
};
-FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
+FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma_shadowed,
drm_fb_helper_damage_range,
drm_fb_helper_damage_area);
-static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+static void drm_fbdev_dma_shadowed_fb_destroy(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
+ void *shadow = info->screen_buffer;
+
+ if (!fb_helper->dev)
+ return;
- if (!dma->map_noncoherent)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+ if (info->fbdefio)
+ fb_deferred_io_cleanup(info);
+ drm_fb_helper_fini(fb_helper);
+ vfree(shadow);
- return fb_deferred_io_mmap(info, vma);
+ drm_client_buffer_vunmap(fb_helper->buffer);
+ drm_client_framebuffer_delete(fb_helper->buffer);
+ drm_client_release(&fb_helper->client);
+ drm_fb_helper_unprepare(fb_helper);
+ kfree(fb_helper);
}
-static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
+static const struct fb_ops drm_fbdev_dma_shadowed_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release,
- __FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
+ FB_DEFAULT_DEFERRED_OPS(drm_fbdev_dma_shadowed),
DRM_FB_HELPER_DEFAULT_OPS,
- __FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
- .fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
- .fb_destroy = drm_fbdev_dma_fb_destroy,
+ .fb_destroy = drm_fbdev_dma_shadowed_fb_destroy,
};
/*
* struct drm_fb_helper
*/
+static void drm_fbdev_dma_damage_blit_real(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip,
+ struct iosys_map *dst)
+{
+ struct drm_framebuffer *fb = fb_helper->fb;
+ size_t offset = clip->y1 * fb->pitches[0];
+ size_t len = clip->x2 - clip->x1;
+ unsigned int y;
+ void *src;
+
+ switch (drm_format_info_bpp(fb->format, 0)) {
+ case 1:
+ offset += clip->x1 / 8;
+ len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
+ break;
+ case 2:
+ offset += clip->x1 / 4;
+ len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
+ break;
+ case 4:
+ offset += clip->x1 / 2;
+ len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
+ break;
+ default:
+ offset += clip->x1 * fb->format->cpp[0];
+ len *= fb->format->cpp[0];
+ break;
+ }
+
+ src = fb_helper->info->screen_buffer + offset;
+ iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
+
+ for (y = clip->y1; y < clip->y2; y++) {
+ iosys_map_memcpy_to(dst, 0, src, len);
+ iosys_map_incr(dst, fb->pitches[0]);
+ src += fb->pitches[0];
+ }
+}
+
+static int drm_fbdev_dma_damage_blit(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct iosys_map dst;
+
+ /*
+ * For fbdev emulation, we only have to protect against fbdev modeset
+ * operations. Nothing else will involve the client buffer's BO. So it
+ * is sufficient to acquire struct drm_fb_helper.lock here.
+ */
+ mutex_lock(&fb_helper->lock);
+
+ dst = buffer->map;
+ drm_fbdev_dma_damage_blit_real(fb_helper, clip, &dst);
+
+ mutex_unlock(&fb_helper->lock);
+
+ return 0;
+}
static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
struct drm_clip_rect *clip)
{
@@ -112,6 +178,10 @@ static int drm_fbdev_dma_helper_fb_dirty(struct drm_fb_helper *helper,
return 0;
if (helper->fb->funcs->dirty) {
+ ret = drm_fbdev_dma_damage_blit(helper, clip);
+ if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
+ return ret;
+
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
return ret;
@@ -128,14 +198,80 @@ static const struct drm_fb_helper_funcs drm_fbdev_dma_helper_funcs = {
* struct drm_fb_helper
*/
+static int drm_fbdev_dma_driver_fbdev_probe_tail(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_device *dev = fb_helper->dev;
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct drm_gem_dma_object *dma_obj = to_drm_gem_dma_obj(buffer->gem);
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct fb_info *info = fb_helper->info;
+ struct iosys_map map = buffer->map;
+
+ info->fbops = &drm_fbdev_dma_fb_ops;
+
+ /* screen */
+ info->flags |= FBINFO_VIRTFB; /* system memory */
+ if (dma_obj->map_noncoherent)
+ info->flags |= FBINFO_READS_FAST; /* signal caching */
+ info->screen_size = sizes->surface_height * fb->pitches[0];
+ info->screen_buffer = map.vaddr;
+ if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
+ if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
+ info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
+ }
+ info->fix.smem_len = info->screen_size;
+
+ return 0;
+}
+
+static int drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct fb_info *info = fb_helper->info;
+ size_t screen_size = buffer->gem->size;
+ void *screen_buffer;
+ int ret;
+
+ /*
+ * Deferred I/O requires struct page for framebuffer memory,
+ * which is not guaranteed for all DMA ranges. We thus create
+ * a shadow buffer in system memory.
+ */
+ screen_buffer = vzalloc(screen_size);
+ if (!screen_buffer)
+ return -ENOMEM;
+
+ info->fbops = &drm_fbdev_dma_shadowed_fb_ops;
+
+ /* screen */
+ info->flags |= FBINFO_VIRTFB; /* system memory */
+ info->flags |= FBINFO_READS_FAST; /* signal caching */
+ info->screen_buffer = screen_buffer;
+ info->fix.smem_len = screen_size;
+
+ fb_helper->fbdefio.delay = HZ / 20;
+ fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ info->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(info);
+ if (ret)
+ goto err_vfree;
+
+ return 0;
+
+err_vfree:
+ vfree(screen_buffer);
+ return ret;
+}
+
int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
- bool use_deferred_io = false;
struct drm_client_buffer *buffer;
- struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb;
struct fb_info *info;
u32 format;
@@ -152,19 +288,9 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
sizes->surface_height, format);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- dma_obj = to_drm_gem_dma_obj(buffer->gem);
fb = buffer->fb;
- /*
- * Deferred I/O requires struct page for framebuffer memory,
- * which is not guaranteed for all DMA ranges. We thus only
- * install deferred I/O if we have a framebuffer that requires
- * it.
- */
- if (fb->funcs->dirty)
- use_deferred_io = true;
-
ret = drm_client_buffer_vmap(buffer, &map);
if (ret) {
goto err_drm_client_buffer_delete;
@@ -185,45 +311,12 @@ int drm_fbdev_dma_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes);
- if (use_deferred_io)
- info->fbops = &drm_fbdev_dma_deferred_fb_ops;
+ if (fb->funcs->dirty)
+ ret = drm_fbdev_dma_driver_fbdev_probe_tail_shadowed(fb_helper, sizes);
else
- info->fbops = &drm_fbdev_dma_fb_ops;
-
- /* screen */
- info->flags |= FBINFO_VIRTFB; /* system memory */
- if (dma_obj->map_noncoherent)
- info->flags |= FBINFO_READS_FAST; /* signal caching */
- info->screen_size = sizes->surface_height * fb->pitches[0];
- info->screen_buffer = map.vaddr;
- if (!(info->flags & FBINFO_HIDE_SMEM_START)) {
- if (!drm_WARN_ON(dev, is_vmalloc_addr(info->screen_buffer)))
- info->fix.smem_start = page_to_phys(virt_to_page(info->screen_buffer));
- }
- info->fix.smem_len = info->screen_size;
-
- /*
- * Only set up deferred I/O if the screen buffer supports
- * it. If this disagrees with the previous test for ->dirty,
- * mmap on the /dev/fb file might not work correctly.
- */
- if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
- unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
-
- if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
- use_deferred_io = false;
- }
-
- /* deferred I/O */
- if (use_deferred_io) {
- fb_helper->fbdefio.delay = HZ / 20;
- fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
-
- info->fbdefio = &fb_helper->fbdefio;
- ret = fb_deferred_io_init(info);
- if (ret)
- goto err_drm_fb_helper_release_info;
- }
+ ret = drm_fbdev_dma_driver_fbdev_probe_tail(fb_helper, sizes);
+ if (ret)
+ goto err_drm_fb_helper_release_info;
return 0;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
index 93b8d32e3be1..98d77d74999d 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig
+++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig
@@ -4,6 +4,8 @@ config DRM_HISI_HIBMC
depends on DRM && PCI
depends on MMU
select DRM_CLIENT_SELECTION
+ select DRM_DISPLAY_HELPER
+ select DRM_DISPLAY_DP_HELPER
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index c977b74f82f0..82bf6c654de2 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -809,8 +809,8 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* select data lane width */
tmp = intel_de_read(display,
TRANS_DDI_FUNC_CTL(display, dsi_trans));
- tmp &= ~DDI_PORT_WIDTH_MASK;
- tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
+ tmp &= ~TRANS_DDI_PORT_WIDTH_MASK;
+ tmp |= TRANS_DDI_PORT_WIDTH(intel_dsi->lane_count);
/* select input pipe */
tmp &= ~TRANS_DDI_EDP_INPUT_MASK;
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index fc1e517e074a..7e6ce905bdaf 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -41,8 +41,9 @@ static u32 scale(u32 source_val,
{
u64 target_val;
- WARN_ON(source_min > source_max);
- WARN_ON(target_min > target_max);
+ if (WARN_ON(source_min >= source_max) ||
+ WARN_ON(target_min > target_max))
+ return target_min;
/* defensive */
source_val = clamp(source_val, source_min, source_max);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index acb986bc1f33..ff2cf3daa7a2 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -658,7 +658,6 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
u32 ctl;
if (DISPLAY_VER(dev_priv) >= 11)
@@ -678,8 +677,7 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
TRANS_DDI_PORT_SYNC_MASTER_SELECT_MASK);
if (DISPLAY_VER(dev_priv) >= 12) {
- if (!intel_dp_mst_is_master_trans(crtc_state) ||
- (!is_mst && intel_dp_is_uhbr(crtc_state))) {
+ if (!intel_dp_mst_is_master_trans(crtc_state)) {
ctl &= ~(TGL_TRANS_DDI_PORT_MASK |
TRANS_DDI_MODE_SELECT_MASK);
}
@@ -868,7 +866,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
encoder->base.base.id, encoder->base.name);
if (!mst_pipe_mask && dp128b132b_pipe_mask) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/*
* If we don't have 8b/10b MST, but have more than one
@@ -880,7 +878,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
* we don't expect MST to have been enabled at that point, and
* can assume it's SST.
*/
- if (hweight8(dp128b132b_pipe_mask) > 1 || intel_dp->is_mst)
+ if (hweight8(dp128b132b_pipe_mask) > 1 ||
+ intel_dp_mst_encoder_active_links(dig_port))
mst_pipe_mask = dp128b132b_pipe_mask;
}
@@ -3134,7 +3133,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_dp_set_power(intel_dp, DP_SET_POWER_D3);
if (DISPLAY_VER(dev_priv) >= 12) {
- if (is_mst) {
+ if (is_mst || intel_dp_is_uhbr(old_crtc_state)) {
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
intel_de_rmw(dev_priv,
@@ -3487,7 +3486,7 @@ static void intel_ddi_enable_hdmi(struct intel_atomic_state *state,
intel_de_rmw(dev_priv, XELPDP_PORT_BUF_CTL1(dev_priv, port),
XELPDP_PORT_WIDTH_MASK | XELPDP_PORT_REVERSAL, port_buf);
- buf_ctl |= DDI_PORT_WIDTH(lane_count);
+ buf_ctl |= DDI_PORT_WIDTH(crtc_state->lane_count);
if (DISPLAY_VER(dev_priv) >= 20)
buf_ctl |= XE2LPD_DDI_BUF_D2D_LINK_ENABLE;
@@ -4153,13 +4152,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_DP_MST) {
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
} else if (ddi_mode == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B && HAS_DP20(display)) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
/*
* If this is true, we know we're being called from mst stream
* encoder's ->get_config().
*/
- if (intel_dp->is_mst)
+ if (intel_dp_mst_encoder_active_links(dig_port))
intel_ddi_read_func_ctl_dp_mst(encoder, pipe_config, ddi_func_ctl);
else
intel_ddi_read_func_ctl_dp_sst(encoder, pipe_config, ddi_func_ctl);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 4271da219b41..41128469f12a 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -6628,12 +6628,30 @@ static int intel_async_flip_check_hw(struct intel_atomic_state *state, struct in
static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
+ const struct intel_plane_state *plane_state;
struct intel_crtc_state *crtc_state;
+ struct intel_plane *plane;
struct intel_crtc *crtc;
u8 affected_pipes = 0;
u8 modeset_pipes = 0;
int i;
+ /*
+ * Any plane which is in use by the joiner needs its crtc.
+ * Pull those in first as this will not have happened yet
+ * if the plane remains disabled according to uapi.
+ */
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ crtc = to_intel_crtc(plane_state->hw.crtc);
+ if (!crtc)
+ continue;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ /* Now pull in all joined crtcs */
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
affected_pipes |= crtc_state->joiner_pipes;
if (intel_crtc_needs_modeset(crtc_state))
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index f1f3b1bb1e89..aa77ddcee42c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1791,7 +1791,7 @@ int intel_dp_dsc_max_src_input_bpc(struct intel_display *display)
if (DISPLAY_VER(display) == 11)
return 10;
- return 0;
+ return intel_dp_dsc_min_src_input_bpc();
}
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
@@ -2072,11 +2072,10 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
/* Compressed BPP should be less than the Input DSC bpp */
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
- for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
- if (valid_dsc_bpp[i] < dsc_min_bpp)
+ for (i = ARRAY_SIZE(valid_dsc_bpp) - 1; i >= 0; i--) {
+ if (valid_dsc_bpp[i] < dsc_min_bpp ||
+ valid_dsc_bpp[i] > dsc_max_bpp)
continue;
- if (valid_dsc_bpp[i] > dsc_max_bpp)
- break;
ret = dsc_compute_link_config(intel_dp,
pipe_config,
@@ -2829,7 +2828,6 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_ADAPTIVE_SYNC);
- /* Currently only DP_AS_SDP_AVT_FIXED_VTOTAL mode supported */
as_sdp->sdp_type = DP_SDP_ADAPTIVE_SYNC;
as_sdp->length = 0x9;
as_sdp->duration_incr_ms = 0;
@@ -2840,7 +2838,7 @@ static void intel_dp_compute_as_sdp(struct intel_dp *intel_dp,
as_sdp->target_rr = drm_mode_vrefresh(adjusted_mode);
as_sdp->target_rr_divider = true;
} else {
- as_sdp->mode = DP_AS_SDP_AVT_FIXED_VTOTAL;
+ as_sdp->mode = DP_AS_SDP_AVT_DYNAMIC_VTOTAL;
as_sdp->vtotal = adjusted_mode->vtotal;
as_sdp->target_rr = 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 8b1977cfec50..6696a32cdd3e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -1563,7 +1563,7 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) {
lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n");
- return false;
+ goto out;
}
if (intel_dp_128b132b_lane_eq(intel_dp, crtc_state) &&
@@ -1575,6 +1575,19 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp,
passed ? "passed" : "failed",
crtc_state->port_clock, crtc_state->lane_count);
+out:
+ /*
+ * Ensure that the training pattern does get set to TPS2 even in case
+ * of a failure, as is the case at the end of a passing link training
+ * and what is expected by the transcoder. Leaving TPS1 set (and
+ * disabling the link train mode in DP_TP_CTL later from TPS1 directly)
+ * would result in a stuck transcoder HW state and flip-done timeouts
+ * later in the modeset sequence.
+ */
+ if (!passed)
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+ DP_PHY_DPRX, DP_TRAINING_PATTERN_2);
+
return passed;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 0c44fc7dd86c..86d6185fda50 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -341,6 +341,10 @@ int intel_dp_mtp_tu_compute_config(struct intel_dp *intel_dp,
break;
}
+
+ /* Allow using zero step to indicate one try */
+ if (!step)
+ break;
}
if (slots < 0) {
@@ -1863,7 +1867,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
/* create encoders */
mst_stream_encoders_create(dig_port);
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, display->drm,
- &intel_dp->aux, 16, 3, conn_base_id);
+ &intel_dp->aux, 16,
+ INTEL_NUM_PIPES(display), conn_base_id);
if (ret) {
intel_dp->mst_mgr.cbs = NULL;
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 7464b44c8bb3..1bab7c34a794 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -41,7 +41,7 @@ intel_hdcp_adjust_hdcp_line_rekeying(struct intel_encoder *encoder,
u32 rekey_bit = 0;
/* Here we assume HDMI is in TMDS mode of operation */
- if (encoder->type != INTEL_OUTPUT_HDMI)
+ if (!intel_encoder_is_hdmi(encoder))
return;
if (DISPLAY_VER(display) >= 30) {
@@ -2188,6 +2188,19 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
drm_dbg_kms(display->drm,
"HDCP2.2 Downstream topology change\n");
+
+ ret = hdcp2_authenticate_repeater_topology(connector);
+ if (!ret) {
+ intel_hdcp_update_value(connector,
+ DRM_MODE_CONTENT_PROTECTION_ENABLED,
+ true);
+ goto out;
+ }
+
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Repeater topology auth failed.(%d)\n",
+ connector->base.base.id, connector->base.name,
+ ret);
} else {
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] HDCP2.2 link failed, retrying auth\n",
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index ff9764cac1e7..80e558042d97 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -106,8 +106,6 @@ static const u32 icl_sdr_y_plane_formats[] = {
DRM_FORMAT_Y216,
DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
};
static const u32 icl_sdr_uv_plane_formats[] = {
@@ -134,8 +132,6 @@ static const u32 icl_sdr_uv_plane_formats[] = {
DRM_FORMAT_Y216,
DRM_FORMAT_XYUV8888,
DRM_FORMAT_XVYU2101010,
- DRM_FORMAT_XVYU12_16161616,
- DRM_FORMAT_XVYU16161616,
};
static const u32 icl_hdr_plane_formats[] = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index fe69f2c8527d..ae3343c81a64 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -209,8 +209,6 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
struct address_space *mapping = obj->base.filp->f_mapping;
unsigned int max_segment = i915_sg_segment_size(i915->drm.dev);
struct sg_table *st;
- struct sgt_iter sgt_iter;
- struct page *page;
int ret;
/*
@@ -239,9 +237,7 @@ rebuild_st:
* for PAGE_SIZE chunks instead may be helpful.
*/
if (max_segment > PAGE_SIZE) {
- for_each_sgt_page(page, sgt_iter, st)
- put_page(page);
- sg_free_table(st);
+ shmem_sg_free_table(st, mapping, false, false);
kfree(st);
max_segment = PAGE_SIZE;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 12f1ba7ca9c1..3fce5c000144 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1469,6 +1469,19 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc)
spin_unlock_irqrestore(&guc->timestamp.lock, flags);
}
+static void __update_guc_busyness_running_state(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->timestamp.lock, flags);
+ for_each_engine(engine, gt, id)
+ engine->stats.guc.running = false;
+ spin_unlock_irqrestore(&guc->timestamp.lock, flags);
+}
+
static void __update_guc_busyness_stats(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
@@ -1619,6 +1632,9 @@ void intel_guc_busyness_park(struct intel_gt *gt)
if (!guc_submission_initialized(guc))
return;
+ /* Assume no engines are running and set running state to false */
+ __update_guc_busyness_running_state(guc);
+
/*
* There is a race with suspend flow where the worker runs after suspend
* and causes an unclaimed register access warning. Cancel the worker
@@ -3433,10 +3449,10 @@ static inline int guc_lrc_desc_unpin(struct intel_context *ce)
*/
ret = deregister_context(ce, ce->guc_id.id);
if (ret) {
- spin_lock(&ce->guc_state.lock);
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
set_context_registered(ce);
clr_context_destroyed(ce);
- spin_unlock(&ce->guc_state.lock);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
/*
* As gt-pm is awake at function entry, intel_wakeref_put_async merely decrements
* the wakeref immediately but per function spec usage call this after unlock.
@@ -5519,12 +5535,20 @@ static inline void guc_log_context(struct drm_printer *p,
{
drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
- drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
- ce->ring->head,
- ce->lrc_reg_state[CTX_RING_HEAD]);
- drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
- ce->ring->tail,
- ce->lrc_reg_state[CTX_RING_TAIL]);
+ if (intel_context_pin_if_active(ce)) {
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
+ ce->ring->head,
+ ce->lrc_reg_state[CTX_RING_HEAD]);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
+ ce->ring->tail,
+ ce->lrc_reg_state[CTX_RING_TAIL]);
+ intel_context_unpin(ce);
+ } else {
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory not pinned\n",
+ ce->ring->head);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory not pinned\n",
+ ce->ring->tail);
+ }
drm_printf(p, "\t\tContext Pin Count: %u\n",
atomic_read(&ce->pin_count));
drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 765e6c0528fb..786c727aea45 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3633,7 +3633,7 @@ enum skl_power_gate {
#define DDI_BUF_IS_IDLE (1 << 7)
#define DDI_BUF_CTL_TC_PHY_OWNERSHIP REG_BIT(6)
#define DDI_A_4_LANES (1 << 4)
-#define DDI_PORT_WIDTH(width) (((width) - 1) << 1)
+#define DDI_PORT_WIDTH(width) (((width) == 3 ? 4 : ((width) - 1)) << 1)
#define DDI_PORT_WIDTH_MASK (7 << 1)
#define DDI_PORT_WIDTH_SHIFT 1
#define DDI_INIT_DISPLAY_DETECTED (1 << 0)
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index 5c397a2df70e..5d27e1c733c5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -168,7 +168,7 @@ static int igt_ppgtt_alloc(void *arg)
return PTR_ERR(ppgtt);
if (!ppgtt->vm.allocate_va_range)
- goto err_ppgtt_cleanup;
+ goto ppgtt_vm_put;
/*
* While we only allocate the page tables here and so we could
@@ -236,7 +236,7 @@ err_ppgtt_cleanup:
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
-
+ppgtt_vm_put:
i915_vm_put(&ppgtt->vm);
return err;
}
diff --git a/drivers/gpu/drm/imagination/Makefile b/drivers/gpu/drm/imagination/Makefile
index 9bc6a3884c22..3d9d4d40fb80 100644
--- a/drivers/gpu/drm/imagination/Makefile
+++ b/drivers/gpu/drm/imagination/Makefile
@@ -1,8 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only OR MIT
# Copyright (c) 2023 Imagination Technologies Ltd.
-subdir-ccflags-y := -I$(src)
-
powervr-y := \
pvr_ccb.o \
pvr_cccb.o \
diff --git a/drivers/gpu/drm/imagination/pvr_fw_meta.c b/drivers/gpu/drm/imagination/pvr_fw_meta.c
index c39beb70c317..6d13864851fc 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_meta.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_meta.c
@@ -527,8 +527,10 @@ pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
static void
pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj)
{
- pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start,
- fw_obj->fw_mm_node.size);
+ struct pvr_gem_object *pvr_obj = fw_obj->gem;
+
+ pvr_vm_unmap_obj(pvr_dev->kernel_vm_ctx, pvr_obj,
+ fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size);
}
static bool
diff --git a/drivers/gpu/drm/imagination/pvr_fw_trace.c b/drivers/gpu/drm/imagination/pvr_fw_trace.c
index 73707daa4e52..5dbb636d7d4f 100644
--- a/drivers/gpu/drm/imagination/pvr_fw_trace.c
+++ b/drivers/gpu/drm/imagination/pvr_fw_trace.c
@@ -333,8 +333,8 @@ static int fw_trace_seq_show(struct seq_file *s, void *v)
if (sf_id == ROGUE_FW_SF_LAST)
return -EINVAL;
- timestamp = read_fw_trace(trace_seq_data, 1) |
- ((u64)read_fw_trace(trace_seq_data, 2) << 32);
+ timestamp = ((u64)read_fw_trace(trace_seq_data, 1) << 32) |
+ read_fw_trace(trace_seq_data, 2);
timestamp = (timestamp & ~ROGUE_FWT_TIMESTAMP_TIME_CLRMSK) >>
ROGUE_FWT_TIMESTAMP_TIME_SHIFT;
diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c
index c4f08432882b..43411be930a2 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.c
+++ b/drivers/gpu/drm/imagination/pvr_queue.c
@@ -109,12 +109,20 @@ pvr_queue_fence_get_driver_name(struct dma_fence *f)
return PVR_DRIVER_NAME;
}
+static void pvr_queue_fence_release_work(struct work_struct *w)
+{
+ struct pvr_queue_fence *fence = container_of(w, struct pvr_queue_fence, release_work);
+
+ pvr_context_put(fence->queue->ctx);
+ dma_fence_free(&fence->base);
+}
+
static void pvr_queue_fence_release(struct dma_fence *f)
{
struct pvr_queue_fence *fence = container_of(f, struct pvr_queue_fence, base);
+ struct pvr_device *pvr_dev = fence->queue->ctx->pvr_dev;
- pvr_context_put(fence->queue->ctx);
- dma_fence_free(f);
+ queue_work(pvr_dev->sched_wq, &fence->release_work);
}
static const char *
@@ -268,6 +276,7 @@ pvr_queue_fence_init(struct dma_fence *f,
pvr_context_get(queue->ctx);
fence->queue = queue;
+ INIT_WORK(&fence->release_work, pvr_queue_fence_release_work);
dma_fence_init(&fence->base, fence_ops,
&fence_ctx->lock, fence_ctx->id,
atomic_inc_return(&fence_ctx->seqno));
@@ -304,8 +313,9 @@ pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
static void
pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue)
{
- pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
- &queue->job_fence_ctx);
+ if (!fence->ops)
+ pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops,
+ &queue->job_fence_ctx);
}
/**
diff --git a/drivers/gpu/drm/imagination/pvr_queue.h b/drivers/gpu/drm/imagination/pvr_queue.h
index e06ced69302f..93fe9ac9f58c 100644
--- a/drivers/gpu/drm/imagination/pvr_queue.h
+++ b/drivers/gpu/drm/imagination/pvr_queue.h
@@ -5,6 +5,7 @@
#define PVR_QUEUE_H
#include <drm/gpu_scheduler.h>
+#include <linux/workqueue.h>
#include "pvr_cccb.h"
#include "pvr_device.h"
@@ -63,6 +64,9 @@ struct pvr_queue_fence {
/** @queue: Queue that created this fence. */
struct pvr_queue *queue;
+
+ /** @release_work: Fence release work structure. */
+ struct work_struct release_work;
};
/**
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index 363f885a7098..2896fa7501b1 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -293,8 +293,9 @@ err_bind_op_fini:
static int
pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
- struct pvr_vm_context *vm_ctx, u64 device_addr,
- u64 size)
+ struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
{
int err;
@@ -318,6 +319,7 @@ pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
goto err_bind_op_fini;
}
+ bind_op->pvr_obj = pvr_obj;
bind_op->vm_ctx = vm_ctx;
bind_op->device_addr = device_addr;
bind_op->size = size;
@@ -598,20 +600,6 @@ err_free:
}
/**
- * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
- * @vm_ctx: Target VM context.
- *
- * This function ensures that no mappings are left dangling by unmapping them
- * all in order of ascending device-virtual address.
- */
-void
-pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
-{
- WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
- vm_ctx->gpuvm_mgr.mm_range));
-}
-
-/**
* pvr_vm_context_release() - Teardown a VM context.
* @ref_count: Pointer to reference counter of the VM context.
*
@@ -703,11 +691,7 @@ pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
- /* Unmap operations don't have an object to lock. */
- if (!pvr_obj)
- return 0;
-
- /* Acquire lock on the GEM being mapped. */
+ /* Acquire lock on the GEM object being mapped/unmapped. */
return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
}
@@ -772,8 +756,10 @@ err_cleanup:
}
/**
- * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
+ * memory.
* @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
* @device_addr: Virtual device address at the start of the target mapping.
* @size: Size of the target mapping.
*
@@ -784,9 +770,13 @@ err_cleanup:
* * Any error encountered while performing internal operations required to
* destroy the mapping (returned from pvr_vm_gpuva_unmap or
* pvr_vm_gpuva_remap).
+ *
+ * The vm_ctx->lock must be held when calling this function.
*/
-int
-pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+static int
+pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
{
struct pvr_vm_bind_op bind_op = {0};
struct drm_gpuvm_exec vm_exec = {
@@ -799,11 +789,13 @@ pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
},
};
- int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
- size);
+ int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj,
+ device_addr, size);
if (err)
return err;
+ pvr_gem_object_get(pvr_obj);
+
err = drm_gpuvm_exec_lock(&vm_exec);
if (err)
goto err_cleanup;
@@ -818,6 +810,96 @@ err_cleanup:
return err;
}
+/**
+ * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
+ * memory.
+ * @vm_ctx: Target VM context.
+ * @pvr_obj: Target PowerVR memory object.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error encountered by pvr_vm_unmap_obj_locked.
+ */
+int
+pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size)
+{
+ int err;
+
+ mutex_lock(&vm_ctx->lock);
+ err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size);
+ mutex_unlock(&vm_ctx->lock);
+
+ return err;
+}
+
+/**
+ * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
+ * @vm_ctx: Target VM context.
+ * @device_addr: Virtual device address at the start of the target mapping.
+ * @size: Size of the target mapping.
+ *
+ * Return:
+ * * 0 on success,
+ * * Any error encountered by drm_gpuva_find,
+ * * Any error encountered by pvr_vm_unmap_obj_locked.
+ */
+int
+pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
+{
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gpuva *va;
+ int err;
+
+ mutex_lock(&vm_ctx->lock);
+
+ va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size);
+ if (va) {
+ pvr_obj = gem_to_pvr_gem(va->gem.obj);
+ err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
+ va->va.addr, va->va.range);
+ } else {
+ err = -ENOENT;
+ }
+
+ mutex_unlock(&vm_ctx->lock);
+
+ return err;
+}
+
+/**
+ * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
+ * @vm_ctx: Target VM context.
+ *
+ * This function ensures that no mappings are left dangling by unmapping them
+ * all in order of ascending device-virtual address.
+ */
+void
+pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
+{
+ mutex_lock(&vm_ctx->lock);
+
+ for (;;) {
+ struct pvr_gem_object *pvr_obj;
+ struct drm_gpuva *va;
+
+ va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr,
+ vm_ctx->gpuvm_mgr.mm_start,
+ vm_ctx->gpuvm_mgr.mm_range);
+ if (!va)
+ break;
+
+ pvr_obj = gem_to_pvr_gem(va->gem.obj);
+
+ WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
+ va->va.addr, va->va.range));
+ }
+
+ mutex_unlock(&vm_ctx->lock);
+}
+
/* Static data areas are determined by firmware. */
static const struct drm_pvr_static_data_area static_data_areas[] = {
{
diff --git a/drivers/gpu/drm/imagination/pvr_vm.h b/drivers/gpu/drm/imagination/pvr_vm.h
index 79406243617c..b0528dffa7f1 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.h
+++ b/drivers/gpu/drm/imagination/pvr_vm.h
@@ -38,6 +38,9 @@ struct pvr_vm_context *pvr_vm_create_context(struct pvr_device *pvr_dev,
int pvr_vm_map(struct pvr_vm_context *vm_ctx,
struct pvr_gem_object *pvr_obj, u64 pvr_obj_offset,
u64 device_addr, u64 size);
+int pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx,
+ struct pvr_gem_object *pvr_obj,
+ u64 device_addr, u64 size);
int pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size);
void pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 65d38b25c070..699b0dd34b18 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -813,10 +813,10 @@ static int a6xx_gmu_fw_load(struct a6xx_gmu *gmu)
}
ver = gmu_read(gmu, REG_A6XX_GMU_CORE_FW_VERSION);
- DRM_INFO("Loaded GMU firmware v%u.%u.%u\n",
- FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver),
- FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver),
- FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver));
+ DRM_INFO_ONCE("Loaded GMU firmware v%u.%u.%u\n",
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MAJOR__MASK, ver),
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_MINOR__MASK, ver),
+ FIELD_GET(A6XX_GMU_CORE_FW_VERSION_STEP__MASK, ver));
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 421afacb7248..36cc9dbc00b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -297,7 +297,7 @@ static const struct dpu_wb_cfg sm8150_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SDM845_MASK,
+ .features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index 641023b102bf..e8eacdb47967 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -304,7 +304,7 @@ static const struct dpu_wb_cfg sc8180x_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SDM845_MASK,
+ .features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
index 621a2140f675..d761ed705bac 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_3_sm6150.h
@@ -116,14 +116,12 @@ static const struct dpu_lm_cfg sm6150_lm[] = {
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_0,
.dspp = DSPP_0,
- .lm_pair = LM_1,
}, {
.name = "lm_1", .id = LM_1,
.base = 0x45000, .len = 0x320,
.features = MIXER_QCM2290_MASK,
.sblk = &sdm845_lm_sblk,
.pingpong = PINGPONG_1,
- .lm_pair = LM_0,
}, {
.name = "lm_2", .id = LM_2,
.base = 0x46000, .len = 0x320,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index d039b96beb97..76f60a2df7a8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -144,7 +144,7 @@ static const struct dpu_wb_cfg sm6125_wb[] = {
{
.name = "wb_2", .id = WB_2,
.base = 0x65000, .len = 0x2c8,
- .features = WB_SDM845_MASK,
+ .features = WB_SM8250_MASK,
.format_list = wb2_formats_rgb,
.num_formats = ARRAY_SIZE(wb2_formats_rgb),
.clk_ctrl = DPU_CLK_CTRL_WB2,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 7191b1a6d41b..e5dcd41a361f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1228,8 +1228,6 @@ static int dpu_crtc_reassign_planes(struct drm_crtc *crtc, struct drm_crtc_state
done:
kfree(states);
return ret;
-
- return 0;
}
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 5172ab4dea99..48e6e8d74c85 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -2281,6 +2281,9 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
}
}
+ if (phys_enc->hw_pp && phys_enc->hw_pp->ops.setup_dither)
+ phys_enc->hw_pp->ops.setup_dither(phys_enc->hw_pp, NULL);
+
/* reset the merge 3D HW block */
if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
index 657200401f57..cec6d4e8baec 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.c
@@ -52,6 +52,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
u32 slice_last_group_size;
u32 det_thresh_flatness;
bool is_cmd_mode = !(mode & DSC_MODE_VIDEO);
+ bool input_10_bits = dsc->bits_per_component == 10;
DPU_REG_WRITE(c, DSC_COMMON_MODE, mode);
@@ -68,7 +69,7 @@ static void dpu_hw_dsc_config(struct dpu_hw_dsc *hw_dsc,
data |= (dsc->line_buf_depth << 3);
data |= (dsc->simple_422 << 2);
data |= (dsc->convert_rgb << 1);
- data |= dsc->bits_per_component;
+ data |= input_10_bits;
DPU_REG_WRITE(c, DSC_ENC, data);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index ad19330de61a..562a3f4c5238 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -272,7 +272,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
if (cap & BIT(DPU_MDP_VSYNC_SEL))
ops->setup_vsync_source = dpu_hw_setup_vsync_sel;
- else
+ else if (!(cap & BIT(DPU_MDP_PERIPH_0_REMOVED)))
ops->setup_vsync_source = dpu_hw_setup_wd_timer;
ops->get_safe_status = dpu_hw_get_safe_status;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index 098abc2c0003..af3e541f60c3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1164,7 +1164,6 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
unsigned int num_planes)
{
unsigned int i;
- int ret;
for (i = 0; i < num_planes; i++) {
struct drm_plane_state *plane_state = states[i];
@@ -1173,13 +1172,13 @@ int dpu_assign_plane_resources(struct dpu_global_state *global_state,
!plane_state->visible)
continue;
- ret = dpu_plane_virtual_assign_resources(crtc, global_state,
+ int ret = dpu_plane_virtual_assign_resources(crtc, global_state,
state, plane_state);
if (ret)
- break;
+ return ret;
}
- return ret;
+ return 0;
}
static void dpu_plane_flush_csc(struct dpu_plane *pdpu, struct dpu_sw_pipe *pipe)
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 24dd37f1682b..3898850739ab 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -930,16 +930,17 @@ enum drm_mode_status msm_dp_bridge_mode_valid(struct drm_bridge *bridge,
return -EINVAL;
}
- if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
- return MODE_CLOCK_HIGH;
-
msm_dp_display = container_of(dp, struct msm_dp_display_private, msm_dp_display);
link_info = &msm_dp_display->panel->link_info;
- if (drm_mode_is_420_only(&dp->connector->display_info, mode) &&
- msm_dp_display->panel->vsc_sdp_supported)
+ if ((drm_mode_is_420_only(&dp->connector->display_info, mode) &&
+ msm_dp_display->panel->vsc_sdp_supported) ||
+ msm_dp_wide_bus_available(dp))
mode_pclk_khz /= 2;
+ if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
+ return MODE_CLOCK_HIGH;
+
mode_bpp = dp->connector->display_info.bpc * num_components;
if (!mode_bpp)
mode_bpp = default_bpp;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index d3e241ea6941..16b7913d1eef 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -257,7 +257,10 @@ static enum drm_mode_status msm_edp_bridge_mode_valid(struct drm_bridge *bridge,
return -EINVAL;
}
- if (mode->clock > DP_MAX_PIXEL_CLK_KHZ)
+ if (msm_dp_wide_bus_available(dp))
+ mode_pclk_khz /= 2;
+
+ if (mode_pclk_khz > DP_MAX_PIXEL_CLK_KHZ)
return MODE_CLOCK_HIGH;
/*
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 031446c87dae..798168180c1a 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -83,6 +83,9 @@ struct dsi_pll_7nm {
/* protects REG_DSI_7nm_PHY_CMN_CLK_CFG0 register */
spinlock_t postdiv_lock;
+ /* protects REG_DSI_7nm_PHY_CMN_CLK_CFG1 register */
+ spinlock_t pclk_mux_lock;
+
struct pll_7nm_cached_state cached_state;
struct dsi_pll_7nm *slave;
@@ -372,22 +375,41 @@ static void dsi_pll_enable_pll_bias(struct dsi_pll_7nm *pll)
ndelay(250);
}
-static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+static void dsi_pll_cmn_clk_cfg0_write(struct dsi_pll_7nm *pll, u32 val)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pll->postdiv_lock, flags);
+ writel(val, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
+ spin_unlock_irqrestore(&pll->postdiv_lock, flags);
+}
+
+static void dsi_pll_cmn_clk_cfg1_update(struct dsi_pll_7nm *pll, u32 mask,
+ u32 val)
+{
+ unsigned long flags;
u32 data;
+ spin_lock_irqsave(&pll->pclk_mux_lock, flags);
data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- writel(data & ~BIT(5), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ data &= ~mask;
+ data |= val & mask;
+
+ writel(data, pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ spin_unlock_irqrestore(&pll->pclk_mux_lock, flags);
+}
+
+static void dsi_pll_disable_global_clk(struct dsi_pll_7nm *pll)
+{
+ dsi_pll_cmn_clk_cfg1_update(pll, DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN, 0);
}
static void dsi_pll_enable_global_clk(struct dsi_pll_7nm *pll)
{
- u32 data;
+ u32 cfg_1 = DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN | DSI_7nm_PHY_CMN_CLK_CFG1_CLK_EN_SEL;
writel(0x04, pll->phy->base + REG_DSI_7nm_PHY_CMN_CTRL_3);
-
- data = readl(pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- writel(data | BIT(5) | BIT(4), pll->phy->base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_pll_cmn_clk_cfg1_update(pll, cfg_1, cfg_1);
}
static void dsi_pll_phy_dig_reset(struct dsi_pll_7nm *pll)
@@ -565,7 +587,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
struct pll_7nm_cached_state *cached = &pll_7nm->cached_state;
- void __iomem *phy_base = pll_7nm->phy->base;
u32 val;
int ret;
@@ -574,13 +595,10 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
val |= cached->pll_out_div;
writel(val, pll_7nm->phy->pll_base + REG_DSI_7nm_PHY_PLL_PLL_OUTDIV_RATE);
- writel(cached->bit_clk_div | (cached->pix_clk_div << 4),
- phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG0);
-
- val = readl(phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
- val &= ~0x3;
- val |= cached->pll_mux;
- writel(val, phy_base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_pll_cmn_clk_cfg0_write(pll_7nm,
+ DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_3_0(cached->bit_clk_div) |
+ DSI_7nm_PHY_CMN_CLK_CFG0_DIV_CTRL_7_4(cached->pix_clk_div));
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm, 0x3, cached->pll_mux);
ret = dsi_pll_7nm_vco_set_rate(phy->vco_hw,
pll_7nm->vco_current_rate,
@@ -599,7 +617,6 @@ static int dsi_7nm_pll_restore_state(struct msm_dsi_phy *phy)
static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
{
struct dsi_pll_7nm *pll_7nm = to_pll_7nm(phy->vco_hw);
- void __iomem *base = phy->base;
u32 data = 0x0; /* internal PLL */
DBG("DSI PLL%d", pll_7nm->phy->id);
@@ -618,7 +635,8 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
}
/* set PLL src */
- writel(data << 2, base + REG_DSI_7nm_PHY_CMN_CLK_CFG1);
+ dsi_pll_cmn_clk_cfg1_update(pll_7nm, DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL__MASK,
+ DSI_7nm_PHY_CMN_CLK_CFG1_BITCLK_SEL(data));
return 0;
}
@@ -733,7 +751,7 @@ static int pll_7nm_register(struct dsi_pll_7nm *pll_7nm, struct clk_hw **provide
pll_by_2_bit,
}), 2, 0, pll_7nm->phy->base +
REG_DSI_7nm_PHY_CMN_CLK_CFG1,
- 0, 1, 0, NULL);
+ 0, 1, 0, &pll_7nm->pclk_mux_lock);
if (IS_ERR(hw)) {
ret = PTR_ERR(hw);
goto fail;
@@ -778,6 +796,7 @@ static int dsi_pll_7nm_init(struct msm_dsi_phy *phy)
pll_7nm_list[phy->id] = pll_7nm;
spin_lock_init(&pll_7nm->postdiv_lock);
+ spin_lock_init(&pll_7nm->pclk_mux_lock);
pll_7nm->phy = phy;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index fee31680a6d5..a65077855201 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -537,15 +537,12 @@ static inline int align_pitch(int width, int bpp)
static inline unsigned long timeout_to_jiffies(const ktime_t *timeout)
{
ktime_t now = ktime_get();
- s64 remaining_jiffies;
- if (ktime_compare(*timeout, now) < 0) {
- remaining_jiffies = 0;
- } else {
- ktime_t rem = ktime_sub(*timeout, now);
- remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
- }
+ if (ktime_compare(*timeout, now) <= 0)
+ return 0;
+ ktime_t rem = ktime_sub(*timeout, now);
+ s64 remaining_jiffies = ktime_divns(rem, NSEC_PER_SEC / HZ);
return clamp(remaining_jiffies, 1LL, (s64)INT_MAX);
}
diff --git a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
index d54b72f92449..35f7f40e405b 100644
--- a/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
+++ b/drivers/gpu/drm/msm/registers/display/dsi_phy_7nm.xml
@@ -9,8 +9,15 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00004" name="REVISION_ID1"/>
<reg32 offset="0x00008" name="REVISION_ID2"/>
<reg32 offset="0x0000c" name="REVISION_ID3"/>
- <reg32 offset="0x00010" name="CLK_CFG0"/>
- <reg32 offset="0x00014" name="CLK_CFG1"/>
+ <reg32 offset="0x00010" name="CLK_CFG0">
+ <bitfield name="DIV_CTRL_3_0" low="0" high="3" type="uint"/>
+ <bitfield name="DIV_CTRL_7_4" low="4" high="7" type="uint"/>
+ </reg32>
+ <reg32 offset="0x00014" name="CLK_CFG1">
+ <bitfield name="CLK_EN" pos="5" type="boolean"/>
+ <bitfield name="CLK_EN_SEL" pos="4" type="boolean"/>
+ <bitfield name="BITCLK_SEL" low="2" high="3" type="uint"/>
+ </reg32>
<reg32 offset="0x00018" name="GLBL_CTRL"/>
<reg32 offset="0x0001c" name="RBUF_CTRL"/>
<reg32 offset="0x00020" name="VREG_CTRL_0"/>
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index ce840300578d..1050a4617fc1 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -4,6 +4,7 @@ config DRM_NOUVEAU
depends on DRM && PCI && MMU
select IOMMU_API
select FW_LOADER
+ select FW_CACHE if PM_SLEEP
select DRM_CLIENT_SELECTION
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 8d5c9c74cbb9..eac0d1d2dbda 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -775,7 +775,6 @@ nouveau_connector_force(struct drm_connector *connector)
if (!nv_encoder) {
NV_ERROR(drm, "can't find encoder to force %s on!\n",
connector->name);
- connector->status = connector_status_disconnected;
return;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index b4da82ddbb6b..8ea98f06d39a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -590,6 +590,7 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
struct mm_struct *mm = svmm->notifier.mm;
+ struct folio *folio;
struct page *page;
unsigned long start = args->p.addr;
unsigned long notifier_seq;
@@ -616,12 +617,16 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
ret = -EINVAL;
goto out;
}
+ folio = page_folio(page);
mutex_lock(&svmm->mutex);
if (!mmu_interval_read_retry(&notifier->notifier,
notifier_seq))
break;
mutex_unlock(&svmm->mutex);
+
+ folio_unlock(folio);
+ folio_put(folio);
}
/* Map the page on the GPU. */
@@ -637,8 +642,8 @@ static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
mutex_unlock(&svmm->mutex);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
out:
mmu_interval_notifier_remove(&notifier->notifier);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
index a6f410ba60bc..d393bc540f86 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/gp10b.c
@@ -75,7 +75,7 @@ gp10b_pmu_acr = {
.bootstrap_multiple_falcons = gp10b_pmu_acr_bootstrap_multiple_falcons,
};
-#if IS_ENABLED(CONFIG_ARCH_TEGRA_210_SOC)
+#if IS_ENABLED(CONFIG_ARCH_TEGRA_186_SOC)
MODULE_FIRMWARE("nvidia/gp10b/pmu/desc.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/image.bin");
MODULE_FIRMWARE("nvidia/gp10b/pmu/sig.bin");
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index 45d09e6fa667..7d68a8acfe2e 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -109,13 +109,13 @@ static int jadard_prepare(struct drm_panel *panel)
if (jadard->desc->lp11_to_reset_delay_ms)
msleep(jadard->desc->lp11_to_reset_delay_ms);
- gpiod_set_value(jadard->reset, 1);
+ gpiod_set_value(jadard->reset, 0);
msleep(5);
- gpiod_set_value(jadard->reset, 0);
+ gpiod_set_value(jadard->reset, 1);
msleep(10);
- gpiod_set_value(jadard->reset, 1);
+ gpiod_set_value(jadard->reset, 0);
msleep(130);
ret = jadard->desc->init(jadard);
@@ -1130,7 +1130,7 @@ static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = desc->format;
dsi->lanes = desc->lanes;
- jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
if (IS_ERR(jadard->reset)) {
DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n");
return PTR_ERR(jadard->reset);
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index d5dcd3d1b33a..08136e790ca0 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -802,6 +802,7 @@ static void panthor_query_group_priorities_info(struct drm_file *file,
{
int prio;
+ memset(arg, 0, sizeof(*arg));
for (prio = PANTHOR_GROUP_PRIORITY_REALTIME; prio >= 0; prio--) {
if (!group_priority_permit(file, prio))
arg->allowed_mask |= BIT(prio);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 05c13102a8cb..d22889fbfa9c 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -359,7 +359,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-static void r300_gpu_init(struct radeon_device *rdev)
+/* rs400_gpu_init also calls this! */
+void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 1e00f6b99f94..8f5e07834fcc 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -165,6 +165,7 @@ void r200_set_safe_registers(struct radeon_device *rdev);
*/
extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
+extern void r300_gpu_init(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
extern int r300_asic_reset(struct radeon_device *rdev, bool hard);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index d6c18fd740ec..13cd0a688a65 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -256,8 +256,22 @@ int rs400_mc_wait_for_idle(struct radeon_device *rdev)
static void rs400_gpu_init(struct radeon_device *rdev)
{
- /* FIXME: is this correct ? */
- r420_pipes_init(rdev);
+ /* Earlier code was calling r420_pipes_init and then
+ * rs400_mc_wait_for_idle(rdev). The problem is that
+ * at least on my Mobility Radeon Xpress 200M RC410 card
+ * that ends up in this code path ends up num_gb_pipes == 3
+ * while the card seems to have only one pipe. With the
+ * r420 pipe initialization method.
+ *
+ * Problems shown up as HyperZ glitches, see:
+ * https://bugs.freedesktop.org/show_bug.cgi?id=110897
+ *
+ * Delegating initialization to r300 code seems to work
+ * and results in proper pipe numbers. The rs400 cards
+ * are said to be not r400, but r300 kind of cards.
+ */
+ r300_gpu_init(rdev);
+
if (rs400_mc_wait_for_idle(rdev)) {
pr_warn("rs400: Failed to wait MC idle while programming pipes. Bad things might happen. %08x\n",
RREG32(RADEON_MC_STATUS));
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index c75302ca3427..f56e77e7f6d0 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -21,7 +21,7 @@
*
*/
-#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#if !defined(_GPU_SCHED_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
#define _GPU_SCHED_TRACE_H_
#include <linux/stringify.h>
@@ -106,7 +106,7 @@ TRACE_EVENT(drm_sched_job_wait_dep,
__entry->seqno)
);
-#endif
+#endif /* _GPU_SCHED_TRACE_H_ */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
index b976a5e9aef5..23ecc00accb2 100644
--- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
+++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c
@@ -70,10 +70,17 @@ static int light_up_connector(struct kunit *test,
state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state);
+retry:
conn_state = drm_atomic_get_connector_state(state, connector);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state);
ret = drm_atomic_set_crtc_for_connector(conn_state, crtc);
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ ret = drm_modeset_backoff(ctx);
+ if (!ret)
+ goto retry;
+ }
KUNIT_EXPECT_EQ(test, ret, 0);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -282,15 +289,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
+
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -345,15 +353,16 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
+
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -408,18 +417,18 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -474,7 +483,6 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test)
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
- drm = &priv->drm;
crtc = priv->crtc;
ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -520,18 +528,18 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -588,7 +596,6 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test)
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
- drm = &priv->drm;
crtc = priv->crtc;
ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -636,18 +643,18 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
KUNIT_ASSERT_TRUE(test, conn->display_info.is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_NE(test, drm_match_cea_mode(preferred), 1);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -704,7 +711,6 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
- drm = &priv->drm;
crtc = priv->crtc;
ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -754,20 +760,20 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test)
10);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
KUNIT_ASSERT_GT(test, ret, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -828,20 +834,20 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test)
10);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz));
KUNIT_ASSERT_GT(test, ret, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -899,6 +905,8 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_dvi_1080p,
@@ -908,14 +916,12 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test)
info = &conn->display_info;
KUNIT_ASSERT_FALSE(test, info->is_hdmi);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -946,21 +952,21 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_200mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_max_200mhz));
KUNIT_ASSERT_GT(test, ret, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -993,21 +999,21 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test)
10);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
KUNIT_ASSERT_GT(test, ret, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -1040,21 +1046,21 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test)
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
ARRAY_SIZE(test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz));
KUNIT_ASSERT_GT(test, ret, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -1091,15 +1097,16 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test)
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
+
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_ASSERT_EQ(test, ret, 0);
@@ -1147,6 +1154,8 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
@@ -1157,9 +1166,6 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1170,8 +1176,9 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 10, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1216,6 +1223,8 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
@@ -1226,9 +1235,6 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
KUNIT_ASSERT_FALSE(test, preferred->flags & DRM_MODE_FLAG_DBLCLK);
@@ -1242,8 +1248,9 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1290,9 +1297,6 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
mode = drm_kunit_display_mode_from_cea_vic(test, drm, 1);
KUNIT_ASSERT_NOT_NULL(test, mode);
@@ -1306,7 +1310,9 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test)
rate = mode->clock * 1500;
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
crtc = priv->crtc;
ret = light_up_connector(test, drm, crtc, conn, mode, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1340,6 +1346,8 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz,
@@ -1350,9 +1358,6 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -1371,8 +1376,9 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test)
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1407,6 +1413,8 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_200mhz,
@@ -1417,9 +1425,6 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -1438,8 +1443,9 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_YUV422);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1473,6 +1479,8 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
8);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz,
@@ -1483,9 +1491,6 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -1496,8 +1501,9 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
@@ -1533,6 +1539,8 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
12);
KUNIT_ASSERT_NOT_NULL(test, priv);
+ drm = &priv->drm;
+ crtc = priv->crtc;
conn = &priv->connector;
ret = set_connector_edid(test, conn,
test_edid_hdmi_1080p_rgb_max_340mhz,
@@ -1543,9 +1551,6 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
KUNIT_ASSERT_TRUE(test, info->is_hdmi);
KUNIT_ASSERT_GT(test, info->max_tmds_clock, 0);
- ctx = drm_kunit_helper_acquire_ctx_alloc(test);
- KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
-
preferred = find_preferred_mode(conn);
KUNIT_ASSERT_NOT_NULL(test, preferred);
@@ -1556,8 +1561,9 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes
rate = drm_hdmi_compute_mode_clock(preferred, 12, HDMI_COLORSPACE_RGB);
KUNIT_ASSERT_LT(test, rate, info->max_tmds_clock * 1000);
- drm = &priv->drm;
- crtc = priv->crtc;
+ ctx = drm_kunit_helper_acquire_ctx_alloc(test);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx);
+
ret = light_up_connector(test, drm, crtc, conn, preferred, ctx);
KUNIT_EXPECT_EQ(test, ret, 0);
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index c67e1f906785..8706763af8fb 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -335,8 +335,6 @@ static void bochs_hw_setmode(struct bochs_device *bochs, struct drm_display_mode
bochs->xres, bochs->yres, bochs->bpp,
bochs->yres_virtual);
- bochs_hw_blank(bochs, false);
-
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_XRES, bochs->xres);
@@ -506,6 +504,9 @@ static int bochs_crtc_helper_atomic_check(struct drm_crtc *crtc,
static void bochs_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
+ struct bochs_device *bochs = to_bochs_device(crtc->dev);
+
+ bochs_hw_blank(bochs, false);
}
static void bochs_crtc_helper_atomic_disable(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index b20ac1705726..fa269d279e25 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -67,7 +67,7 @@ static u16 lerp_u16(u16 a, u16 b, s64 t)
s64 delta = drm_fixp_mul(b_fp - a_fp, t);
- return drm_fixp2int(a_fp + delta);
+ return drm_fixp2int_round(a_fp + delta);
}
static s64 get_lut_index(const struct vkms_color_lut *lut, u16 channel_value)
diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c
index 2eb9633f163a..2a2f250fa495 100644
--- a/drivers/gpu/drm/xe/display/xe_plane_initial.c
+++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c
@@ -194,8 +194,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
to_intel_plane(crtc->base.primary);
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
struct drm_framebuffer *fb;
struct i915_vma *vma;
@@ -241,14 +239,6 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc,
atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
plane_config->vma = vma;
-
- /*
- * Flip to the newly created mapping ASAP, so we can re-use the
- * first part of GGTT for WOPCM, prevent flickering, and prevent
- * the lookup of sysmem scratch pages.
- */
- plane->check_plane(crtc_state, plane_state);
- plane->async_flip(NULL, plane, crtc_state, plane_state, true);
return;
nofb:
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index d86219dedde2..b732c89816df 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -53,7 +53,6 @@
#define RING_CTL(base) XE_REG((base) + 0x3c)
#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
-#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */
#define RING_START_UDW(base) XE_REG((base) + 0x48)
diff --git a/drivers/gpu/drm/xe/regs/xe_oa_regs.h b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
index a49561e9f3c3..a79ad2da070c 100644
--- a/drivers/gpu/drm/xe/regs/xe_oa_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_oa_regs.h
@@ -51,6 +51,10 @@
/* Common to all OA units */
#define OA_OACONTROL_REPORT_BC_MASK REG_GENMASK(9, 9)
#define OA_OACONTROL_COUNTER_SIZE_MASK REG_GENMASK(8, 8)
+#define OAG_OACONTROL_USED_BITS \
+ (OAG_OACONTROL_OA_PES_DISAG_EN | OAG_OACONTROL_OA_CCS_SELECT_MASK | \
+ OAG_OACONTROL_OA_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE | \
+ OA_OACONTROL_REPORT_BC_MASK | OA_OACONTROL_COUNTER_SIZE_MASK)
#define OAG_OA_DEBUG XE_REG(0xdaf8, XE_REG_OPTION_MASKED)
#define OAG_OA_DEBUG_DISABLE_MMIO_TRG REG_BIT(14)
@@ -78,6 +82,8 @@
#define OAM_CONTEXT_CONTROL_OFFSET (0x1bc)
#define OAM_CONTROL_OFFSET (0x194)
#define OAM_CONTROL_COUNTER_SEL_MASK REG_GENMASK(3, 1)
+#define OAM_OACONTROL_USED_BITS \
+ (OAM_CONTROL_COUNTER_SEL_MASK | OAG_OACONTROL_OA_COUNTER_ENABLE)
#define OAM_DEBUG_OFFSET (0x198)
#define OAM_STATUS_OFFSET (0x19c)
#define OAM_MMIO_TRG_OFFSET (0x1d0)
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c
index 81dc7795c065..39fe485d2085 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.c
+++ b/drivers/gpu/drm/xe/xe_devcoredump.c
@@ -119,11 +119,7 @@ static ssize_t __xe_devcoredump_read(char *buffer, size_t count,
drm_puts(&p, "\n**** GuC CT ****\n");
xe_guc_ct_snapshot_print(ss->guc.ct, &p);
- /*
- * Don't add a new section header here because the mesa debug decoder
- * tool expects the context information to be in the 'GuC CT' section.
- */
- /* drm_puts(&p, "\n**** Contexts ****\n"); */
+ drm_puts(&p, "\n**** Contexts ****\n");
xe_guc_exec_queue_snapshot_print(ss->ge, &p);
drm_puts(&p, "\n**** Job ****\n");
@@ -395,42 +391,34 @@ int xe_devcoredump_init(struct xe_device *xe)
/**
* xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85
*
- * The output is split to multiple lines because some print targets, e.g. dmesg
- * cannot handle arbitrarily long lines. Note also that printing to dmesg in
- * piece-meal fashion is not possible, each separate call to drm_puts() has a
- * line-feed automatically added! Therefore, the entire output line must be
- * constructed in a local buffer first, then printed in one atomic output call.
+ * The output is split into multiple calls to drm_puts() because some print
+ * targets, e.g. dmesg, cannot handle arbitrarily long lines. These targets may
+ * add newlines, as is the case with dmesg: each drm_puts() call creates a
+ * separate line.
*
* There is also a scheduler yield call to prevent the 'task has been stuck for
* 120s' kernel hang check feature from firing when printing to a slow target
* such as dmesg over a serial port.
*
- * TODO: Add compression prior to the ASCII85 encoding to shrink huge buffers down.
- *
* @p: the printer object to output to
* @prefix: optional prefix to add to output string
+ * @suffix: optional suffix to add at the end. 0 disables it and is
+ * not added to the output, which is useful when using multiple calls
+ * to dump data to @p
* @blob: the Binary Large OBject to dump out
* @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32)
* @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32)
*/
-void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
const void *blob, size_t offset, size_t size)
{
const u32 *blob32 = (const u32 *)blob;
char buff[ASCII85_BUFSZ], *line_buff;
size_t line_pos = 0;
- /*
- * Splitting blobs across multiple lines is not compatible with the mesa
- * debug decoder tool. Note that even dropping the explicit '\n' below
- * doesn't help because the GuC log is so big some underlying implementation
- * still splits the lines at 512K characters. So just bail completely for
- * the moment.
- */
- return;
-
#define DMESG_MAX_LINE_LEN 800
-#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */
+ /* Always leave space for the suffix char and the \0 */
+#define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "<suffix>\0" */
if (size & 3)
drm_printf(p, "Size not word aligned: %zu", size);
@@ -462,7 +450,6 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
line_pos += strlen(line_buff + line_pos);
if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) {
- line_buff[line_pos++] = '\n';
line_buff[line_pos++] = 0;
drm_puts(p, line_buff);
@@ -474,10 +461,11 @@ void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
}
}
+ if (suffix)
+ line_buff[line_pos++] = suffix;
+
if (line_pos) {
- line_buff[line_pos++] = '\n';
line_buff[line_pos++] = 0;
-
drm_puts(p, line_buff);
}
diff --git a/drivers/gpu/drm/xe/xe_devcoredump.h b/drivers/gpu/drm/xe/xe_devcoredump.h
index 6a17e6d60102..5391a80a4d1b 100644
--- a/drivers/gpu/drm/xe/xe_devcoredump.h
+++ b/drivers/gpu/drm/xe/xe_devcoredump.h
@@ -29,7 +29,7 @@ static inline int xe_devcoredump_init(struct xe_device *xe)
}
#endif
-void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix,
+void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, char suffix,
const void *blob, size_t offset, size_t size);
#endif
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 63f30b6df70b..2d4874d2b922 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -135,8 +135,8 @@ void xe_drm_client_add_bo(struct xe_drm_client *client,
XE_WARN_ON(bo->client);
XE_WARN_ON(!list_empty(&bo->client_link));
- spin_lock(&client->bos_lock);
bo->client = xe_drm_client_get(client);
+ spin_lock(&client->bos_lock);
list_add_tail(&bo->client_link, &client->bos_list);
spin_unlock(&client->bos_lock);
}
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 26e64530ada2..9f4f27d1ef4a 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -380,9 +380,7 @@ int xe_gt_init_early(struct xe_gt *gt)
if (err)
return err;
- xe_wa_process_gt(gt);
xe_wa_process_oob(gt);
- xe_tuning_process_gt(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
spin_lock_init(&gt->global_invl_lock);
@@ -474,6 +472,8 @@ static int all_fw_domain_init(struct xe_gt *gt)
}
xe_gt_mcr_set_implicit_defaults(gt);
+ xe_wa_process_gt(gt);
+ xe_tuning_process_gt(gt);
xe_reg_sr_apply_mmio(&gt->reg_sr, gt);
err = xe_gt_clock_init(gt);
@@ -532,8 +532,10 @@ static int all_fw_domain_init(struct xe_gt *gt)
if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt))
xe_lmtt_init_hw(&gt_to_tile(gt)->sriov.pf.lmtt);
- if (IS_SRIOV_PF(gt_to_xe(gt)))
+ if (IS_SRIOV_PF(gt_to_xe(gt))) {
+ xe_gt_sriov_pf_init(gt);
xe_gt_sriov_pf_init_hw(gt);
+ }
xe_force_wake_put(gt_to_fw(gt), fw_ref);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index e71fc3d2bda2..6f906c8e8108 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -68,6 +68,19 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+/**
+ * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF.
+ * @gt: the &xe_gt to initialize
+ *
+ * Late one-time initialization of the PF data.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_init(struct xe_gt *gt)
+{
+ return xe_gt_sriov_pf_migration_init(gt);
+}
+
static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe)
{
return GRAPHICS_VERx100(xe) == 1200;
@@ -90,7 +103,6 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
pf_enable_ggtt_guest_update(gt);
xe_gt_sriov_pf_service_update(gt);
- xe_gt_sriov_pf_migration_init(gt);
}
static u32 pf_get_vf_regs_stride(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
index 96fab779a906..f474509411c0 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h
@@ -10,6 +10,7 @@ struct xe_gt;
#ifdef CONFIG_PCI_IOV
int xe_gt_sriov_pf_init_early(struct xe_gt *gt);
+int xe_gt_sriov_pf_init(struct xe_gt *gt);
void xe_gt_sriov_pf_init_hw(struct xe_gt *gt);
void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid);
void xe_gt_sriov_pf_restart(struct xe_gt *gt);
@@ -19,6 +20,11 @@ static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
return 0;
}
+static inline int xe_gt_sriov_pf_init(struct xe_gt *gt)
+{
+ return 0;
+}
+
static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
{
}
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 8b65c5e959cc..72ad576fc18e 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -1723,8 +1723,11 @@ void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
drm_printf(p, "\tg2h outstanding: %d\n",
snapshot->g2h_outstanding);
- if (snapshot->ctb)
- xe_print_blob_ascii85(p, "CTB data", snapshot->ctb, 0, snapshot->ctb_size);
+ if (snapshot->ctb) {
+ drm_printf(p, "[CTB].length: 0x%zx\n", snapshot->ctb_size);
+ xe_print_blob_ascii85(p, "[CTB].data", '\n',
+ snapshot->ctb, 0, snapshot->ctb_size);
+ }
} else {
drm_puts(p, "CT disabled\n");
}
diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c
index df4cfb698cdb..0ca3056d8bd3 100644
--- a/drivers/gpu/drm/xe/xe_guc_log.c
+++ b/drivers/gpu/drm/xe/xe_guc_log.c
@@ -208,11 +208,14 @@ void xe_guc_log_snapshot_print(struct xe_guc_log_snapshot *snapshot, struct drm_
drm_printf(p, "GuC timestamp: 0x%08llX [%llu]\n", snapshot->stamp, snapshot->stamp);
drm_printf(p, "Log level: %u\n", snapshot->level);
+ drm_printf(p, "[LOG].length: 0x%zx\n", snapshot->size);
remain = snapshot->size;
for (i = 0; i < snapshot->num_chunks; i++) {
size_t size = min(GUC_LOG_CHUNK_SIZE, remain);
+ const char *prefix = i ? NULL : "[LOG].data";
+ char suffix = i == snapshot->num_chunks - 1 ? '\n' : 0;
- xe_print_blob_ascii85(p, i ? NULL : "Log data", snapshot->copy[i], 0, size);
+ xe_print_blob_ascii85(p, prefix, suffix, snapshot->copy[i], 0, size);
remain -= size;
}
}
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index 913c74d6e2ae..b6a2dd742ebd 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1248,6 +1248,8 @@ static void __guc_exec_queue_fini_async(struct work_struct *w)
if (xe_exec_queue_is_lr(q))
cancel_work_sync(&ge->lr_tdr);
+ /* Confirm no work left behind accessing device structures */
+ cancel_delayed_work_sync(&ge->sched.base.work_tdr);
release_guc_id(guc, q);
xe_sched_entity_fini(&ge->entity);
xe_sched_fini(&ge->sched);
diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c
index 089834467880..392102515f3d 100644
--- a/drivers/gpu/drm/xe/xe_hmm.c
+++ b/drivers/gpu/drm/xe/xe_hmm.c
@@ -19,11 +19,10 @@ static u64 xe_npages_in_range(unsigned long start, unsigned long end)
return (end - start) >> PAGE_SHIFT;
}
-/*
+/**
* xe_mark_range_accessed() - mark a range is accessed, so core mm
* have such information for memory eviction or write back to
* hard disk
- *
* @range: the range to mark
* @write: if write to this range, we mark pages in this range
* as dirty
@@ -43,15 +42,51 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
}
}
-/*
+static int xe_alloc_sg(struct xe_device *xe, struct sg_table *st,
+ struct hmm_range *range, struct rw_semaphore *notifier_sem)
+{
+ unsigned long i, npages, hmm_pfn;
+ unsigned long num_chunks = 0;
+ int ret;
+
+ /* HMM docs says this is needed. */
+ ret = down_read_interruptible(notifier_sem);
+ if (ret)
+ return ret;
+
+ if (mmu_interval_read_retry(range->notifier, range->notifier_seq)) {
+ up_read(notifier_sem);
+ return -EAGAIN;
+ }
+
+ npages = xe_npages_in_range(range->start, range->end);
+ for (i = 0; i < npages;) {
+ unsigned long len;
+
+ hmm_pfn = range->hmm_pfns[i];
+ xe_assert(xe, hmm_pfn & HMM_PFN_VALID);
+
+ len = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+
+ /* If order > 0 the page may extend beyond range->start */
+ len -= (hmm_pfn & ~HMM_PFN_FLAGS) & (len - 1);
+ i += len;
+ num_chunks++;
+ }
+ up_read(notifier_sem);
+
+ return sg_alloc_table(st, num_chunks, GFP_KERNEL);
+}
+
+/**
* xe_build_sg() - build a scatter gather table for all the physical pages/pfn
* in a hmm_range. dma-map pages if necessary. dma-address is save in sg table
* and will be used to program GPU page table later.
- *
* @xe: the xe device who will access the dma-address in sg table
* @range: the hmm range that we build the sg table from. range->hmm_pfns[]
* has the pfn numbers of pages that back up this hmm address range.
* @st: pointer to the sg table.
+ * @notifier_sem: The xe notifier lock.
* @write: whether we write to this range. This decides dma map direction
* for system pages. If write we map it bi-diretional; otherwise
* DMA_TO_DEVICE
@@ -78,43 +113,84 @@ static void xe_mark_range_accessed(struct hmm_range *range, bool write)
* Returns 0 if successful; -ENOMEM if fails to allocate memory
*/
static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
- struct sg_table *st, bool write)
+ struct sg_table *st,
+ struct rw_semaphore *notifier_sem,
+ bool write)
{
+ unsigned long npages = xe_npages_in_range(range->start, range->end);
struct device *dev = xe->drm.dev;
- struct page **pages;
- u64 i, npages;
- int ret;
+ struct scatterlist *sgl;
+ struct page *page;
+ unsigned long i, j;
- npages = xe_npages_in_range(range->start, range->end);
- pages = kvmalloc_array(npages, sizeof(*pages), GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
+ lockdep_assert_held(notifier_sem);
- for (i = 0; i < npages; i++) {
- pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
- xe_assert(xe, !is_device_private_page(pages[i]));
+ i = 0;
+ for_each_sg(st->sgl, sgl, st->nents, j) {
+ unsigned long hmm_pfn, size;
+
+ hmm_pfn = range->hmm_pfns[i];
+ page = hmm_pfn_to_page(hmm_pfn);
+ xe_assert(xe, !is_device_private_page(page));
+
+ size = 1UL << hmm_pfn_to_map_order(hmm_pfn);
+ size -= page_to_pfn(page) & (size - 1);
+ i += size;
+
+ if (unlikely(j == st->nents - 1)) {
+ if (i > npages)
+ size -= (i - npages);
+ sg_mark_end(sgl);
+ }
+ sg_set_page(sgl, page, size << PAGE_SHIFT, 0);
}
+ xe_assert(xe, i == npages);
- ret = sg_alloc_table_from_pages_segment(st, pages, npages, 0, npages << PAGE_SHIFT,
- xe_sg_segment_size(dev), GFP_KERNEL);
- if (ret)
- goto free_pages;
+ return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
+}
+
+static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ lockdep_assert_held_write(&vm->lock);
+ lockdep_assert_held(&vm->userptr.notifier_lock);
+
+ mutex_lock(&userptr->unmap_mutex);
+ xe_assert(vm->xe, !userptr->mapped);
+ userptr->mapped = true;
+ mutex_unlock(&userptr->unmap_mutex);
+}
+
+void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
+{
+ struct xe_userptr *userptr = &uvma->userptr;
+ struct xe_vma *vma = &uvma->vma;
+ bool write = !xe_vma_read_only(vma);
+ struct xe_vm *vm = xe_vma_vm(vma);
+ struct xe_device *xe = vm->xe;
- ret = dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
- if (ret) {
- sg_free_table(st);
- st = NULL;
+ if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
+ !lockdep_is_held_type(&vm->lock, 0) &&
+ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
+ /* Don't unmap in exec critical section. */
+ xe_vm_assert_held(vm);
+ /* Don't unmap while mapping the sg. */
+ lockdep_assert_held(&vm->lock);
}
-free_pages:
- kvfree(pages);
- return ret;
+ mutex_lock(&userptr->unmap_mutex);
+ if (userptr->sg && userptr->mapped)
+ dma_unmap_sgtable(xe->drm.dev, userptr->sg,
+ write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+ userptr->mapped = false;
+ mutex_unlock(&userptr->unmap_mutex);
}
-/*
+/**
* xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
- *
* @uvma: the userptr vma which hold the scatter gather table
*
* With function xe_userptr_populate_range, we allocate storage of
@@ -124,16 +200,9 @@ free_pages:
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
{
struct xe_userptr *userptr = &uvma->userptr;
- struct xe_vma *vma = &uvma->vma;
- bool write = !xe_vma_read_only(vma);
- struct xe_vm *vm = xe_vma_vm(vma);
- struct xe_device *xe = vm->xe;
- struct device *dev = xe->drm.dev;
-
- xe_assert(xe, userptr->sg);
- dma_unmap_sgtable(dev, userptr->sg,
- write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
+ xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
+ xe_hmm_userptr_unmap(uvma);
sg_free_table(userptr->sg);
userptr->sg = NULL;
}
@@ -166,13 +235,20 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
- unsigned long *pfns, flags = HMM_PFN_REQ_FAULT;
+ unsigned long *pfns;
struct xe_userptr *userptr;
struct xe_vma *vma = &uvma->vma;
u64 userptr_start = xe_vma_userptr(vma);
u64 userptr_end = userptr_start + xe_vma_size(vma);
struct xe_vm *vm = xe_vma_vm(vma);
- struct hmm_range hmm_range;
+ struct hmm_range hmm_range = {
+ .pfn_flags_mask = 0, /* ignore pfns */
+ .default_flags = HMM_PFN_REQ_FAULT,
+ .start = userptr_start,
+ .end = userptr_end,
+ .notifier = &uvma->userptr.notifier,
+ .dev_private_owner = vm->xe,
+ };
bool write = !xe_vma_read_only(vma);
unsigned long notifier_seq;
u64 npages;
@@ -199,19 +275,14 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
return -ENOMEM;
if (write)
- flags |= HMM_PFN_REQ_WRITE;
+ hmm_range.default_flags |= HMM_PFN_REQ_WRITE;
if (!mmget_not_zero(userptr->notifier.mm)) {
ret = -EFAULT;
goto free_pfns;
}
- hmm_range.default_flags = flags;
hmm_range.hmm_pfns = pfns;
- hmm_range.notifier = &userptr->notifier;
- hmm_range.start = userptr_start;
- hmm_range.end = userptr_end;
- hmm_range.dev_private_owner = vm->xe;
while (true) {
hmm_range.notifier_seq = mmu_interval_read_begin(&userptr->notifier);
@@ -238,16 +309,37 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
if (ret)
goto free_pfns;
- ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt, write);
+ ret = xe_alloc_sg(vm->xe, &userptr->sgt, &hmm_range, &vm->userptr.notifier_lock);
if (ret)
goto free_pfns;
+ ret = down_read_interruptible(&vm->userptr.notifier_lock);
+ if (ret)
+ goto free_st;
+
+ if (mmu_interval_read_retry(hmm_range.notifier, hmm_range.notifier_seq)) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ ret = xe_build_sg(vm->xe, &hmm_range, &userptr->sgt,
+ &vm->userptr.notifier_lock, write);
+ if (ret)
+ goto out_unlock;
+
xe_mark_range_accessed(&hmm_range, write);
userptr->sg = &userptr->sgt;
+ xe_hmm_userptr_set_mapped(uvma);
userptr->notifier_seq = hmm_range.notifier_seq;
+ up_read(&vm->userptr.notifier_lock);
+ kvfree(pfns);
+ return 0;
+out_unlock:
+ up_read(&vm->userptr.notifier_lock);
+free_st:
+ sg_free_table(&userptr->sgt);
free_pfns:
kvfree(pfns);
return ret;
}
-
diff --git a/drivers/gpu/drm/xe/xe_hmm.h b/drivers/gpu/drm/xe/xe_hmm.h
index 909dc2bdcd97..0ea98d8e7bbc 100644
--- a/drivers/gpu/drm/xe/xe_hmm.h
+++ b/drivers/gpu/drm/xe/xe_hmm.h
@@ -3,9 +3,16 @@
* Copyright © 2024 Intel Corporation
*/
+#ifndef _XE_HMM_H_
+#define _XE_HMM_H_
+
#include <linux/types.h>
struct xe_userptr_vma;
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
+
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
+
+void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
+#endif
diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c
index 32f5a67a917b..08552ee3fb94 100644
--- a/drivers/gpu/drm/xe/xe_irq.c
+++ b/drivers/gpu/drm/xe/xe_irq.c
@@ -757,19 +757,7 @@ int xe_irq_install(struct xe_device *xe)
xe_irq_postinstall(xe);
- err = devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
- if (err)
- goto free_irq_handler;
-
- return 0;
-
-free_irq_handler:
- if (xe_device_has_msix(xe))
- xe_irq_msix_free(xe);
- else
- xe_irq_msi_free(xe);
-
- return err;
+ return devm_add_action_or_reset(xe->drm.dev, irq_uninstall, xe);
}
static void xe_irq_msi_synchronize_irq(struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index eeb96b5f49e2..eb6cd91e1e22 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -237,7 +237,6 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
u32 tail, hw_tail, partial_report_size, available;
int report_size = stream->oa_buffer.format->size;
unsigned long flags;
- bool pollin;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -282,11 +281,11 @@ static bool xe_oa_buffer_check_unlocked(struct xe_oa_stream *stream)
stream->oa_buffer.tail = tail;
available = xe_oa_circ_diff(stream, stream->oa_buffer.tail, stream->oa_buffer.head);
- pollin = available >= stream->wait_num_reports * report_size;
+ stream->pollin = available >= stream->wait_num_reports * report_size;
spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags);
- return pollin;
+ return stream->pollin;
}
static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
@@ -294,10 +293,8 @@ static enum hrtimer_restart xe_oa_poll_check_timer_cb(struct hrtimer *hrtimer)
struct xe_oa_stream *stream =
container_of(hrtimer, typeof(*stream), poll_check_timer);
- if (xe_oa_buffer_check_unlocked(stream)) {
- stream->pollin = true;
+ if (xe_oa_buffer_check_unlocked(stream))
wake_up(&stream->poll_wq);
- }
hrtimer_forward_now(hrtimer, ns_to_ktime(stream->poll_period_ns));
@@ -452,6 +449,12 @@ static u32 __oa_ccs_select(struct xe_oa_stream *stream)
return val;
}
+static u32 __oactrl_used_bits(struct xe_oa_stream *stream)
+{
+ return stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG ?
+ OAG_OACONTROL_USED_BITS : OAM_OACONTROL_USED_BITS;
+}
+
static void xe_oa_enable(struct xe_oa_stream *stream)
{
const struct xe_oa_format *format = stream->oa_buffer.format;
@@ -472,14 +475,14 @@ static void xe_oa_enable(struct xe_oa_stream *stream)
stream->hwe->oa_unit->type == DRM_XE_OA_UNIT_TYPE_OAG)
val |= OAG_OACONTROL_OA_PES_DISAG_EN;
- xe_mmio_write32(&stream->gt->mmio, regs->oa_ctrl, val);
+ xe_mmio_rmw32(&stream->gt->mmio, regs->oa_ctrl, __oactrl_used_bits(stream), val);
}
static void xe_oa_disable(struct xe_oa_stream *stream)
{
struct xe_mmio *mmio = &stream->gt->mmio;
- xe_mmio_write32(mmio, __oa_regs(stream)->oa_ctrl, 0);
+ xe_mmio_rmw32(mmio, __oa_regs(stream)->oa_ctrl, __oactrl_used_bits(stream), 0);
if (xe_mmio_wait32(mmio, __oa_regs(stream)->oa_ctrl,
OAG_OACONTROL_OA_COUNTER_ENABLE, 0, 50000, NULL, false))
drm_err(&stream->oa->xe->drm,
@@ -1686,7 +1689,7 @@ static int xe_oa_stream_init(struct xe_oa_stream *stream,
stream->oa_buffer.format = &stream->oa->oa_formats[param->oa_format];
stream->sample = param->sample;
- stream->periodic = param->period_exponent > 0;
+ stream->periodic = param->period_exponent >= 0;
stream->period_exponent = param->period_exponent;
stream->no_preempt = param->no_preempt;
stream->wait_num_reports = param->wait_num_reports;
@@ -1967,6 +1970,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
}
param.xef = xef;
+ param.period_exponent = -1;
ret = xe_oa_user_extensions(oa, XE_OA_USER_EXTN_FROM_OPEN, data, 0, &param);
if (ret)
return ret;
@@ -2021,7 +2025,7 @@ int xe_oa_stream_open_ioctl(struct drm_device *dev, u64 data, struct drm_file *f
goto err_exec_q;
}
- if (param.period_exponent > 0) {
+ if (param.period_exponent >= 0) {
u64 oa_period, oa_freq_hz;
/* Requesting samples from OAG buffer is a privileged operation */
@@ -2534,6 +2538,8 @@ static void __xe_oa_init_oa_units(struct xe_gt *gt)
u->type = DRM_XE_OA_UNIT_TYPE_OAM;
}
+ xe_mmio_write32(&gt->mmio, u->regs.oa_ctrl, 0);
+
/* Ensure MMIO trigger remains disabled till there is a stream */
xe_mmio_write32(&gt->mmio, u->regs.oa_debug,
oag_configure_mmio_trigger(NULL, false));
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 1ddcc7e79a93..dc24baa84092 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -28,6 +28,8 @@ struct xe_pt_dir {
struct xe_pt pt;
/** @children: Array of page-table child nodes */
struct xe_ptw *children[XE_PDES];
+ /** @staging: Array of page-table staging nodes */
+ struct xe_ptw *staging[XE_PDES];
};
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM)
@@ -48,9 +50,10 @@ static struct xe_pt_dir *as_xe_pt_dir(struct xe_pt *pt)
return container_of(pt, struct xe_pt_dir, pt);
}
-static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
+static struct xe_pt *
+xe_pt_entry_staging(struct xe_pt_dir *pt_dir, unsigned int index)
{
- return container_of(pt_dir->children[index], struct xe_pt, base);
+ return container_of(pt_dir->staging[index], struct xe_pt, base);
}
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
@@ -125,6 +128,7 @@ struct xe_pt *xe_pt_create(struct xe_vm *vm, struct xe_tile *tile,
}
pt->bo = bo;
pt->base.children = level ? as_xe_pt_dir(pt)->children : NULL;
+ pt->base.staging = level ? as_xe_pt_dir(pt)->staging : NULL;
if (vm->xef)
xe_drm_client_add_bo(vm->xef->client, pt->bo);
@@ -206,8 +210,8 @@ void xe_pt_destroy(struct xe_pt *pt, u32 flags, struct llist_head *deferred)
struct xe_pt_dir *pt_dir = as_xe_pt_dir(pt);
for (i = 0; i < XE_PDES; i++) {
- if (xe_pt_entry(pt_dir, i))
- xe_pt_destroy(xe_pt_entry(pt_dir, i), flags,
+ if (xe_pt_entry_staging(pt_dir, i))
+ xe_pt_destroy(xe_pt_entry_staging(pt_dir, i), flags,
deferred);
}
}
@@ -376,8 +380,10 @@ xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent,
/* Continue building a non-connected subtree. */
struct iosys_map *map = &parent->bo->vmap;
- if (unlikely(xe_child))
+ if (unlikely(xe_child)) {
parent->base.children[offset] = &xe_child->base;
+ parent->base.staging[offset] = &xe_child->base;
+ }
xe_pt_write(xe_walk->vm->xe, map, offset, pte);
parent->num_live++;
@@ -614,6 +620,7 @@ xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
.ops = &xe_pt_stage_bind_ops,
.shifts = xe_normal_pt_shifts,
.max_level = XE_PT_HIGHEST_LEVEL,
+ .staging = true,
},
.vm = xe_vma_vm(vma),
.tile = tile,
@@ -873,7 +880,7 @@ static void xe_pt_cancel_bind(struct xe_vma *vma,
}
}
-static void xe_pt_commit_locks_assert(struct xe_vma *vma)
+static void xe_pt_commit_prepare_locks_assert(struct xe_vma *vma)
{
struct xe_vm *vm = xe_vma_vm(vma);
@@ -885,6 +892,16 @@ static void xe_pt_commit_locks_assert(struct xe_vma *vma)
xe_vm_assert_held(vm);
}
+static void xe_pt_commit_locks_assert(struct xe_vma *vma)
+{
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+ xe_pt_commit_prepare_locks_assert(vma);
+
+ if (xe_vma_is_userptr(vma))
+ lockdep_assert_held_read(&vm->userptr.notifier_lock);
+}
+
static void xe_pt_commit(struct xe_vma *vma,
struct xe_vm_pgtable_update *entries,
u32 num_entries, struct llist_head *deferred)
@@ -895,13 +912,17 @@ static void xe_pt_commit(struct xe_vma *vma,
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
+ struct xe_pt_dir *pt_dir;
if (!pt->level)
continue;
+ pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
+ int j_ = j + entries[i].ofs;
+ pt_dir->children[j_] = pt_dir->staging[j_];
xe_pt_destroy(oldpte, xe_vma_vm(vma)->flags, deferred);
}
}
@@ -913,7 +934,7 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = num_entries - 1; i >= 0; --i) {
struct xe_pt *pt = entries[i].pt;
@@ -928,10 +949,10 @@ static void xe_pt_abort_bind(struct xe_vma *vma,
pt_dir = as_xe_pt_dir(pt);
for (j = 0; j < entries[i].qwords; j++) {
u32 j_ = j + entries[i].ofs;
- struct xe_pt *newpte = xe_pt_entry(pt_dir, j_);
+ struct xe_pt *newpte = xe_pt_entry_staging(pt_dir, j_);
struct xe_pt *oldpte = entries[i].pt_entries[j].pt;
- pt_dir->children[j_] = oldpte ? &oldpte->base : 0;
+ pt_dir->staging[j_] = oldpte ? &oldpte->base : 0;
xe_pt_destroy(newpte, xe_vma_vm(vma)->flags, NULL);
}
}
@@ -943,7 +964,7 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
{
u32 i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = 0; i < num_entries; i++) {
struct xe_pt *pt = entries[i].pt;
@@ -961,10 +982,10 @@ static void xe_pt_commit_prepare_bind(struct xe_vma *vma,
struct xe_pt *newpte = entries[i].pt_entries[j].pt;
struct xe_pt *oldpte = NULL;
- if (xe_pt_entry(pt_dir, j_))
- oldpte = xe_pt_entry(pt_dir, j_);
+ if (xe_pt_entry_staging(pt_dir, j_))
+ oldpte = xe_pt_entry_staging(pt_dir, j_);
- pt_dir->children[j_] = &newpte->base;
+ pt_dir->staging[j_] = &newpte->base;
entries[i].pt_entries[j].pt = oldpte;
}
}
@@ -1213,42 +1234,22 @@ static int vma_check_userptr(struct xe_vm *vm, struct xe_vma *vma,
return 0;
uvma = to_userptr_vma(vma);
- notifier_seq = uvma->userptr.notifier_seq;
+ if (xe_pt_userptr_inject_eagain(uvma))
+ xe_vma_userptr_force_invalidate(uvma);
- if (uvma->userptr.initial_bind && !xe_vm_in_fault_mode(vm))
- return 0;
+ notifier_seq = uvma->userptr.notifier_seq;
if (!mmu_interval_read_retry(&uvma->userptr.notifier,
- notifier_seq) &&
- !xe_pt_userptr_inject_eagain(uvma))
+ notifier_seq))
return 0;
- if (xe_vm_in_fault_mode(vm)) {
+ if (xe_vm_in_fault_mode(vm))
return -EAGAIN;
- } else {
- spin_lock(&vm->userptr.invalidated_lock);
- list_move_tail(&uvma->userptr.invalidate_link,
- &vm->userptr.invalidated);
- spin_unlock(&vm->userptr.invalidated_lock);
-
- if (xe_vm_in_preempt_fence_mode(vm)) {
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
- long err;
-
- dma_resv_iter_begin(&cursor, xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence)
- dma_fence_enable_sw_signaling(fence);
- dma_resv_iter_end(&cursor);
-
- err = dma_resv_wait_timeout(xe_vm_resv(vm),
- DMA_RESV_USAGE_BOOKKEEP,
- false, MAX_SCHEDULE_TIMEOUT);
- XE_WARN_ON(err <= 0);
- }
- }
+ /*
+ * Just continue the operation since exec or rebind worker
+ * will take care of rebinding.
+ */
return 0;
}
@@ -1514,6 +1515,7 @@ static unsigned int xe_pt_stage_unbind(struct xe_tile *tile, struct xe_vma *vma,
.ops = &xe_pt_stage_unbind_ops,
.shifts = xe_normal_pt_shifts,
.max_level = XE_PT_HIGHEST_LEVEL,
+ .staging = true,
},
.tile = tile,
.modified_start = xe_vma_start(vma),
@@ -1555,7 +1557,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = num_entries - 1; i >= 0; --i) {
struct xe_vm_pgtable_update *entry = &entries[i];
@@ -1568,7 +1570,7 @@ static void xe_pt_abort_unbind(struct xe_vma *vma,
continue;
for (j = entry->ofs; j < entry->ofs + entry->qwords; j++)
- pt_dir->children[j] =
+ pt_dir->staging[j] =
entries[i].pt_entries[j - entry->ofs].pt ?
&entries[i].pt_entries[j - entry->ofs].pt->base : NULL;
}
@@ -1581,7 +1583,7 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
{
int i, j;
- xe_pt_commit_locks_assert(vma);
+ xe_pt_commit_prepare_locks_assert(vma);
for (i = 0; i < num_entries; ++i) {
struct xe_vm_pgtable_update *entry = &entries[i];
@@ -1595,8 +1597,8 @@ xe_pt_commit_prepare_unbind(struct xe_vma *vma,
pt_dir = as_xe_pt_dir(pt);
for (j = entry->ofs; j < entry->ofs + entry->qwords; j++) {
entry->pt_entries[j - entry->ofs].pt =
- xe_pt_entry(pt_dir, j);
- pt_dir->children[j] = NULL;
+ xe_pt_entry_staging(pt_dir, j);
+ pt_dir->staging[j] = NULL;
}
}
}
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.c b/drivers/gpu/drm/xe/xe_pt_walk.c
index b8b3d2aea492..be602a763ff3 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.c
+++ b/drivers/gpu/drm/xe/xe_pt_walk.c
@@ -74,7 +74,8 @@ int xe_pt_walk_range(struct xe_ptw *parent, unsigned int level,
u64 addr, u64 end, struct xe_pt_walk *walk)
{
pgoff_t offset = xe_pt_offset(addr, level, walk);
- struct xe_ptw **entries = parent->children ? parent->children : NULL;
+ struct xe_ptw **entries = walk->staging ? (parent->staging ?: NULL) :
+ (parent->children ?: NULL);
const struct xe_pt_walk_ops *ops = walk->ops;
enum page_walk_action action;
struct xe_ptw *child;
diff --git a/drivers/gpu/drm/xe/xe_pt_walk.h b/drivers/gpu/drm/xe/xe_pt_walk.h
index 5ecc4d2f0f65..5c02c244f7de 100644
--- a/drivers/gpu/drm/xe/xe_pt_walk.h
+++ b/drivers/gpu/drm/xe/xe_pt_walk.h
@@ -11,12 +11,14 @@
/**
* struct xe_ptw - base class for driver pagetable subclassing.
* @children: Pointer to an array of children if any.
+ * @staging: Pointer to an array of staging if any.
*
* Drivers could subclass this, and if it's a page-directory, typically
* embed an array of xe_ptw pointers.
*/
struct xe_ptw {
struct xe_ptw **children;
+ struct xe_ptw **staging;
};
/**
@@ -41,6 +43,8 @@ struct xe_pt_walk {
* as shared pagetables.
*/
bool shared_pt_mode;
+ /** @staging: Walk staging PT structure */
+ bool staging;
};
/**
diff --git a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
index 423856cc18d4..d414421f8c13 100644
--- a/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
+++ b/drivers/gpu/drm/xe/xe_ttm_stolen_mgr.c
@@ -57,12 +57,35 @@ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe)
return GRAPHICS_VERx100(xe) < 1270 && !IS_DGFX(xe);
}
+static u32 get_wopcm_size(struct xe_device *xe)
+{
+ u32 wopcm_size;
+ u64 val;
+
+ val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED);
+ val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
+
+ switch (val) {
+ case 0x5 ... 0x6:
+ val--;
+ fallthrough;
+ case 0x0 ... 0x3:
+ wopcm_size = (1U << val) * SZ_1M;
+ break;
+ default:
+ WARN(1, "Missing case wopcm_size=%llx\n", val);
+ wopcm_size = 0;
+ }
+
+ return wopcm_size;
+}
+
static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_mmio *mmio = xe_root_tile_mmio(xe);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
- u64 stolen_size;
+ u64 stolen_size, wopcm_size;
u64 tile_offset;
u64 tile_size;
@@ -74,7 +97,13 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
if (drm_WARN_ON(&xe->drm, tile_size < mgr->stolen_base))
return 0;
+ /* Carve out the top of DSM as it contains the reserved WOPCM region */
+ wopcm_size = get_wopcm_size(xe);
+ if (drm_WARN_ON(&xe->drm, !wopcm_size))
+ return 0;
+
stolen_size = tile_size - mgr->stolen_base;
+ stolen_size -= wopcm_size;
/* Verify usage fits in the actual resource available */
if (mgr->stolen_base + stolen_size <= pci_resource_len(pdev, LMEM_BAR))
@@ -89,29 +118,6 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
return ALIGN_DOWN(stolen_size, SZ_1M);
}
-static u32 get_wopcm_size(struct xe_device *xe)
-{
- u32 wopcm_size;
- u64 val;
-
- val = xe_mmio_read64_2x32(xe_root_tile_mmio(xe), STOLEN_RESERVED);
- val = REG_FIELD_GET64(WOPCM_SIZE_MASK, val);
-
- switch (val) {
- case 0x5 ... 0x6:
- val--;
- fallthrough;
- case 0x0 ... 0x3:
- wopcm_size = (1U << val) * SZ_1M;
- break;
- default:
- WARN(1, "Missing case wopcm_size=%llx\n", val);
- wopcm_size = 0;
- }
-
- return wopcm_size;
-}
-
static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
{
struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 690330352d4c..ec6ec18ab3fa 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -579,51 +579,26 @@ out_unlock_outer:
trace_xe_vm_rebind_worker_exit(vm);
}
-static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
- const struct mmu_notifier_range *range,
- unsigned long cur_seq)
+static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uvma)
{
- struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier);
- struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr);
+ struct xe_userptr *userptr = &uvma->userptr;
struct xe_vma *vma = &uvma->vma;
- struct xe_vm *vm = xe_vma_vm(vma);
struct dma_resv_iter cursor;
struct dma_fence *fence;
long err;
- xe_assert(vm->xe, xe_vma_is_userptr(vma));
- trace_xe_vma_userptr_invalidate(vma);
-
- if (!mmu_notifier_range_blockable(range))
- return false;
-
- vm_dbg(&xe_vma_vm(vma)->xe->drm,
- "NOTIFIER: addr=0x%016llx, range=0x%016llx",
- xe_vma_start(vma), xe_vma_size(vma));
-
- down_write(&vm->userptr.notifier_lock);
- mmu_interval_set_seq(mni, cur_seq);
-
- /* No need to stop gpu access if the userptr is not yet bound. */
- if (!userptr->initial_bind) {
- up_write(&vm->userptr.notifier_lock);
- return true;
- }
-
/*
* Tell exec and rebind worker they need to repin and rebind this
* userptr.
*/
if (!xe_vm_in_fault_mode(vm) &&
- !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) {
+ !(vma->gpuva.flags & XE_VMA_DESTROYED)) {
spin_lock(&vm->userptr.invalidated_lock);
list_move_tail(&userptr->invalidate_link,
&vm->userptr.invalidated);
spin_unlock(&vm->userptr.invalidated_lock);
}
- up_write(&vm->userptr.notifier_lock);
-
/*
* Preempt fences turn into schedule disables, pipeline these.
* Note that even in fault mode, we need to wait for binds and
@@ -641,11 +616,37 @@ static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
false, MAX_SCHEDULE_TIMEOUT);
XE_WARN_ON(err <= 0);
- if (xe_vm_in_fault_mode(vm)) {
+ if (xe_vm_in_fault_mode(vm) && userptr->initial_bind) {
err = xe_vm_invalidate_vma(vma);
XE_WARN_ON(err);
}
+ xe_hmm_userptr_unmap(uvma);
+}
+
+static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq)
+{
+ struct xe_userptr_vma *uvma = container_of(mni, typeof(*uvma), userptr.notifier);
+ struct xe_vma *vma = &uvma->vma;
+ struct xe_vm *vm = xe_vma_vm(vma);
+
+ xe_assert(vm->xe, xe_vma_is_userptr(vma));
+ trace_xe_vma_userptr_invalidate(vma);
+
+ if (!mmu_notifier_range_blockable(range))
+ return false;
+
+ vm_dbg(&xe_vma_vm(vma)->xe->drm,
+ "NOTIFIER: addr=0x%016llx, range=0x%016llx",
+ xe_vma_start(vma), xe_vma_size(vma));
+
+ down_write(&vm->userptr.notifier_lock);
+ mmu_interval_set_seq(mni, cur_seq);
+
+ __vma_userptr_invalidate(vm, uvma);
+ up_write(&vm->userptr.notifier_lock);
trace_xe_vma_userptr_invalidate_complete(vma);
return true;
@@ -655,6 +656,34 @@ static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = {
.invalidate = vma_userptr_invalidate,
};
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+/**
+ * xe_vma_userptr_force_invalidate() - force invalidate a userptr
+ * @uvma: The userptr vma to invalidate
+ *
+ * Perform a forced userptr invalidation for testing purposes.
+ */
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+ struct xe_vm *vm = xe_vma_vm(&uvma->vma);
+
+ /* Protect against concurrent userptr pinning */
+ lockdep_assert_held(&vm->lock);
+ /* Protect against concurrent notifiers */
+ lockdep_assert_held(&vm->userptr.notifier_lock);
+ /*
+ * Protect against concurrent instances of this function and
+ * the critical exec sections
+ */
+ xe_vm_assert_held(vm);
+
+ if (!mmu_interval_read_retry(&uvma->userptr.notifier,
+ uvma->userptr.notifier_seq))
+ uvma->userptr.notifier_seq -= 2;
+ __vma_userptr_invalidate(vm, uvma);
+}
+#endif
+
int xe_vm_userptr_pin(struct xe_vm *vm)
{
struct xe_userptr_vma *uvma, *next;
@@ -666,20 +695,33 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
/* Collect invalidated userptrs */
spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&vm->userptr.repin_list));
list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated,
userptr.invalidate_link) {
list_del_init(&uvma->userptr.invalidate_link);
- list_move_tail(&uvma->userptr.repin_link,
- &vm->userptr.repin_list);
+ list_add_tail(&uvma->userptr.repin_link,
+ &vm->userptr.repin_list);
}
spin_unlock(&vm->userptr.invalidated_lock);
- /* Pin and move to temporary list */
+ /* Pin and move to bind list */
list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
userptr.repin_link) {
err = xe_vma_userptr_pin_pages(uvma);
if (err == -EFAULT) {
list_del_init(&uvma->userptr.repin_link);
+ /*
+ * We might have already done the pin once already, but
+ * then had to retry before the re-bind happened, due
+ * some other condition in the caller, but in the
+ * meantime the userptr got dinged by the notifier such
+ * that we need to revalidate here, but this time we hit
+ * the EFAULT. In such a case make sure we remove
+ * ourselves from the rebind list to avoid going down in
+ * flames.
+ */
+ if (!list_empty(&uvma->vma.combined_links.rebind))
+ list_del_init(&uvma->vma.combined_links.rebind);
/* Wait for pending binds */
xe_vm_lock(vm, false);
@@ -690,10 +732,10 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
err = xe_vm_invalidate_vma(&uvma->vma);
xe_vm_unlock(vm);
if (err)
- return err;
+ break;
} else {
- if (err < 0)
- return err;
+ if (err)
+ break;
list_del_init(&uvma->userptr.repin_link);
list_move_tail(&uvma->vma.combined_links.rebind,
@@ -701,7 +743,19 @@ int xe_vm_userptr_pin(struct xe_vm *vm)
}
}
- return 0;
+ if (err) {
+ down_write(&vm->userptr.notifier_lock);
+ spin_lock(&vm->userptr.invalidated_lock);
+ list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list,
+ userptr.repin_link) {
+ list_del_init(&uvma->userptr.repin_link);
+ list_move_tail(&uvma->userptr.invalidate_link,
+ &vm->userptr.invalidated);
+ }
+ spin_unlock(&vm->userptr.invalidated_lock);
+ up_write(&vm->userptr.notifier_lock);
+ }
+ return err;
}
/**
@@ -987,6 +1041,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
INIT_LIST_HEAD(&userptr->invalidate_link);
INIT_LIST_HEAD(&userptr->repin_link);
vma->gpuva.gem.offset = bo_offset_or_userptr;
+ mutex_init(&userptr->unmap_mutex);
err = mmu_interval_notifier_insert(&userptr->notifier,
current->mm,
@@ -1028,6 +1083,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
* them anymore
*/
mmu_interval_notifier_remove(&userptr->notifier);
+ mutex_destroy(&userptr->unmap_mutex);
xe_vm_put(vm);
} else if (xe_vma_is_null(vma)) {
xe_vm_put(vm);
@@ -1066,6 +1122,7 @@ static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence)
xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED);
spin_lock(&vm->userptr.invalidated_lock);
+ xe_assert(vm->xe, list_empty(&to_userptr_vma(vma)->userptr.repin_link));
list_del(&to_userptr_vma(vma)->userptr.invalidate_link);
spin_unlock(&vm->userptr.invalidated_lock);
} else if (!xe_vma_is_null(vma)) {
@@ -2260,8 +2317,17 @@ static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct drm_gpuva_ops *ops,
break;
}
case DRM_GPUVA_OP_UNMAP:
+ xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
+ break;
case DRM_GPUVA_OP_PREFETCH:
- /* FIXME: Need to skip some prefetch ops */
+ vma = gpuva_to_vma(op->base.prefetch.va);
+
+ if (xe_vma_is_userptr(vma)) {
+ err = xe_vma_userptr_pin_pages(to_userptr_vma(vma));
+ if (err)
+ return err;
+ }
+
xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask);
break;
default:
diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h
index 23adb7442881..b882bfb31bd0 100644
--- a/drivers/gpu/drm/xe/xe_vm.h
+++ b/drivers/gpu/drm/xe/xe_vm.h
@@ -274,9 +274,17 @@ static inline void vm_dbg(const struct drm_device *dev,
const char *format, ...)
{ /* noop */ }
#endif
-#endif
struct xe_vm_snapshot *xe_vm_snapshot_capture(struct xe_vm *vm);
void xe_vm_snapshot_capture_delayed(struct xe_vm_snapshot *snap);
void xe_vm_snapshot_print(struct xe_vm_snapshot *snap, struct drm_printer *p);
void xe_vm_snapshot_free(struct xe_vm_snapshot *snap);
+
+#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
+void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma);
+#else
+static inline void xe_vma_userptr_force_invalidate(struct xe_userptr_vma *uvma)
+{
+}
+#endif
+#endif
diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h
index 7f9a303e51d8..a4b4091cfd0d 100644
--- a/drivers/gpu/drm/xe/xe_vm_types.h
+++ b/drivers/gpu/drm/xe/xe_vm_types.h
@@ -59,12 +59,16 @@ struct xe_userptr {
struct sg_table *sg;
/** @notifier_seq: notifier sequence number */
unsigned long notifier_seq;
+ /** @unmap_mutex: Mutex protecting dma-unmapping */
+ struct mutex unmap_mutex;
/**
* @initial_bind: user pointer has been bound at least once.
* write: vm->userptr.notifier_lock in read mode and vm->resv held.
* read: vm->userptr.notifier_lock in write mode or vm->resv held.
*/
bool initial_bind;
+ /** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
+ bool mapped;
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
u32 divisor;
#endif
@@ -227,8 +231,8 @@ struct xe_vm {
* up for revalidation. Protected from access with the
* @invalidated_lock. Removing items from the list
* additionally requires @lock in write mode, and adding
- * items to the list requires the @userptr.notifer_lock in
- * write mode.
+ * items to the list requires either the @userptr.notifer_lock in
+ * write mode, OR @lock in write mode.
*/
struct list_head invalidated;
} userptr;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 7b1d091f3c09..46cae925b095 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -619,6 +619,8 @@ static int host1x_probe(struct platform_device *pdev)
goto free_contexts;
}
+ mutex_init(&host->intr_mutex);
+
pm_runtime_enable(&pdev->dev);
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index b3285dd10180..f77a678949e9 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -104,8 +104,6 @@ int host1x_intr_init(struct host1x *host)
unsigned int id;
int i, err;
- mutex_init(&host->intr_mutex);
-
for (id = 0; id < host1x_syncpt_nb_pts(host); ++id) {
struct host1x_syncpt *syncpt = &host->syncpt[id];
diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c
index 473ac3f2d382..da31f1131afc 100644
--- a/drivers/greybus/gb-beagleplay.c
+++ b/drivers/greybus/gb-beagleplay.c
@@ -912,7 +912,9 @@ static enum fw_upload_err cc1352_prepare(struct fw_upload *fw_upload,
cc1352_bootloader_reset(bg);
WRITE_ONCE(bg->flashing_mode, false);
msleep(200);
- gb_greybus_init(bg);
+ if (gb_greybus_init(bg) < 0)
+ return dev_err_probe(&bg->sd->dev, FW_UPLOAD_ERR_RW_ERROR,
+ "Failed to initialize greybus");
gb_beagleplay_start_svc(bg);
return FW_UPLOAD_ERR_FW_INVALID;
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index b53eb569bd49..dfc245867a46 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -570,6 +570,8 @@ config HID_LED
config HID_LENOVO
tristate "Lenovo / Thinkpad devices"
+ depends on ACPI
+ select ACPI_PLATFORM_PROFILE
select NEW_LEDS
select LEDS_CLASS
help
@@ -1167,7 +1169,8 @@ config HID_TOPRE
tristate "Topre REALFORCE keyboards"
depends on HID
help
- Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key keyboards.
+ Say Y for N-key rollover support on Topre REALFORCE R2 108/87 key and
+ Topre REALFORCE R3S 87 key keyboards.
config HID_THINGM
tristate "ThingM blink(1) USB RGB LED"
@@ -1374,10 +1377,6 @@ endmenu
source "drivers/hid/bpf/Kconfig"
-endif # HID
-
-source "drivers/hid/usbhid/Kconfig"
-
source "drivers/hid/i2c-hid/Kconfig"
source "drivers/hid/intel-ish-hid/Kconfig"
@@ -1388,4 +1387,10 @@ source "drivers/hid/surface-hid/Kconfig"
source "drivers/hid/intel-thc-hid/Kconfig"
+endif # HID
+
+# USB support may be used with HID disabled
+
+source "drivers/hid/usbhid/Kconfig"
+
endif # HID_SUPPORT
diff --git a/drivers/hid/amd-sfh-hid/Kconfig b/drivers/hid/amd-sfh-hid/Kconfig
index 329de5e12c1a..3291786a5ee6 100644
--- a/drivers/hid/amd-sfh-hid/Kconfig
+++ b/drivers/hid/amd-sfh-hid/Kconfig
@@ -5,7 +5,6 @@ menu "AMD SFH HID Support"
config AMD_SFH_HID
tristate "AMD Sensor Fusion Hub"
- depends on HID
depends on X86
help
If you say yes to this option, support will be included for the
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 7e1ae2a2bcc2..d900dd05c335 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -378,6 +378,12 @@ static bool apple_is_non_apple_keyboard(struct hid_device *hdev)
return false;
}
+static bool apple_is_omoton_kb066(struct hid_device *hdev)
+{
+ return hdev->product == USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI &&
+ strcmp(hdev->name, "Bluetooth Keyboard") == 0;
+}
+
static inline void apple_setup_key_translation(struct input_dev *input,
const struct apple_key_translation *table)
{
@@ -474,6 +480,7 @@ static int hidinput_apple_event(struct hid_device *hid, struct input_dev *input,
hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2015)
table = magic_keyboard_2015_fn_keys;
else if (hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 ||
+ hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 ||
hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 ||
hid->product == USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021)
table = apple2021_fn_keys;
@@ -724,7 +731,7 @@ static int apple_input_configured(struct hid_device *hdev,
{
struct apple_sc *asc = hid_get_drvdata(hdev);
- if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) {
+ if (((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) || apple_is_omoton_kb066(hdev)) {
hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n");
asc->quirks &= ~APPLE_HAS_FN;
}
@@ -1150,6 +1157,10 @@ static const struct hid_device_id apple_devices[] = {
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024),
+ .driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
.driver_data = APPLE_HAS_FN | APPLE_ISO_TILDE_QUIRK | APPLE_RDESC_BATTERY },
{ HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021),
diff --git a/drivers/hid/hid-appleir.c b/drivers/hid/hid-appleir.c
index 8deded185725..c45e5aa569d2 100644
--- a/drivers/hid/hid-appleir.c
+++ b/drivers/hid/hid-appleir.c
@@ -188,7 +188,7 @@ static int appleir_raw_event(struct hid_device *hid, struct hid_report *report,
static const u8 flatbattery[] = { 0x25, 0x87, 0xe0 };
unsigned long flags;
- if (len != 5)
+ if (len != 5 || !(hid->claimed & HID_CLAIMED_INPUT))
goto out;
if (!memcmp(data, keydown, sizeof(keydown))) {
diff --git a/drivers/hid/hid-corsair-void.c b/drivers/hid/hid-corsair-void.c
index 6ece56b850fc..afbd67aa9719 100644
--- a/drivers/hid/hid-corsair-void.c
+++ b/drivers/hid/hid-corsair-void.c
@@ -71,11 +71,9 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
-#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/power_supply.h>
#include <linux/usb.h>
#include <linux/workqueue.h>
@@ -120,6 +118,12 @@ enum {
CORSAIR_VOID_BATTERY_CHARGING = 5,
};
+enum {
+ CORSAIR_VOID_ADD_BATTERY = 0,
+ CORSAIR_VOID_REMOVE_BATTERY = 1,
+ CORSAIR_VOID_UPDATE_BATTERY = 2,
+};
+
static enum power_supply_property corsair_void_battery_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
@@ -155,12 +159,12 @@ struct corsair_void_drvdata {
struct power_supply *battery;
struct power_supply_desc battery_desc;
- struct mutex battery_mutex;
struct delayed_work delayed_status_work;
struct delayed_work delayed_firmware_work;
- struct work_struct battery_remove_work;
- struct work_struct battery_add_work;
+
+ unsigned long battery_work_flags;
+ struct work_struct battery_work;
};
/*
@@ -260,11 +264,9 @@ success:
/* Inform power supply if battery values changed */
if (memcmp(&orig_battery_data, battery_data, sizeof(*battery_data))) {
- scoped_guard(mutex, &drvdata->battery_mutex) {
- if (drvdata->battery) {
- power_supply_changed(drvdata->battery);
- }
- }
+ set_bit(CORSAIR_VOID_UPDATE_BATTERY,
+ &drvdata->battery_work_flags);
+ schedule_work(&drvdata->battery_work);
}
}
@@ -536,29 +538,11 @@ static void corsair_void_firmware_work_handler(struct work_struct *work)
}
-static void corsair_void_battery_remove_work_handler(struct work_struct *work)
-{
- struct corsair_void_drvdata *drvdata;
-
- drvdata = container_of(work, struct corsair_void_drvdata,
- battery_remove_work);
- scoped_guard(mutex, &drvdata->battery_mutex) {
- if (drvdata->battery) {
- power_supply_unregister(drvdata->battery);
- drvdata->battery = NULL;
- }
- }
-}
-
-static void corsair_void_battery_add_work_handler(struct work_struct *work)
+static void corsair_void_add_battery(struct corsair_void_drvdata *drvdata)
{
- struct corsair_void_drvdata *drvdata;
- struct power_supply_config psy_cfg;
+ struct power_supply_config psy_cfg = {};
struct power_supply *new_supply;
- drvdata = container_of(work, struct corsair_void_drvdata,
- battery_add_work);
- guard(mutex)(&drvdata->battery_mutex);
if (drvdata->battery)
return;
@@ -583,16 +567,42 @@ static void corsair_void_battery_add_work_handler(struct work_struct *work)
drvdata->battery = new_supply;
}
+static void corsair_void_battery_work_handler(struct work_struct *work)
+{
+ struct corsair_void_drvdata *drvdata = container_of(work,
+ struct corsair_void_drvdata, battery_work);
+
+ bool add_battery = test_and_clear_bit(CORSAIR_VOID_ADD_BATTERY,
+ &drvdata->battery_work_flags);
+ bool remove_battery = test_and_clear_bit(CORSAIR_VOID_REMOVE_BATTERY,
+ &drvdata->battery_work_flags);
+ bool update_battery = test_and_clear_bit(CORSAIR_VOID_UPDATE_BATTERY,
+ &drvdata->battery_work_flags);
+
+ if (add_battery && !remove_battery) {
+ corsair_void_add_battery(drvdata);
+ } else if (remove_battery && !add_battery && drvdata->battery) {
+ power_supply_unregister(drvdata->battery);
+ drvdata->battery = NULL;
+ }
+
+ if (update_battery && drvdata->battery)
+ power_supply_changed(drvdata->battery);
+
+}
+
static void corsair_void_headset_connected(struct corsair_void_drvdata *drvdata)
{
- schedule_work(&drvdata->battery_add_work);
+ set_bit(CORSAIR_VOID_ADD_BATTERY, &drvdata->battery_work_flags);
+ schedule_work(&drvdata->battery_work);
schedule_delayed_work(&drvdata->delayed_firmware_work,
msecs_to_jiffies(100));
}
static void corsair_void_headset_disconnected(struct corsair_void_drvdata *drvdata)
{
- schedule_work(&drvdata->battery_remove_work);
+ set_bit(CORSAIR_VOID_REMOVE_BATTERY, &drvdata->battery_work_flags);
+ schedule_work(&drvdata->battery_work);
corsair_void_set_unknown_wireless_data(drvdata);
corsair_void_set_unknown_batt(drvdata);
@@ -678,13 +688,7 @@ static int corsair_void_probe(struct hid_device *hid_dev,
drvdata->battery_desc.get_property = corsair_void_battery_get_property;
drvdata->battery = NULL;
- INIT_WORK(&drvdata->battery_remove_work,
- corsair_void_battery_remove_work_handler);
- INIT_WORK(&drvdata->battery_add_work,
- corsair_void_battery_add_work_handler);
- ret = devm_mutex_init(drvdata->dev, &drvdata->battery_mutex);
- if (ret)
- return ret;
+ INIT_WORK(&drvdata->battery_work, corsair_void_battery_work_handler);
ret = sysfs_create_group(&hid_dev->dev.kobj, &corsair_void_attr_group);
if (ret)
@@ -721,11 +725,11 @@ static void corsair_void_remove(struct hid_device *hid_dev)
struct corsair_void_drvdata *drvdata = hid_get_drvdata(hid_dev);
hid_hw_stop(hid_dev);
- cancel_work_sync(&drvdata->battery_remove_work);
- cancel_work_sync(&drvdata->battery_add_work);
+ cancel_work_sync(&drvdata->battery_work);
if (drvdata->battery)
power_supply_unregister(drvdata->battery);
+ cancel_delayed_work_sync(&drvdata->delayed_status_work);
cancel_delayed_work_sync(&drvdata->delayed_firmware_work);
sysfs_remove_group(&hid_dev->dev.kobj, &corsair_void_attr_group);
}
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 541d682af15a..8433306148d5 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -3450,7 +3450,7 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_MACRO_RECORD_START] = "MacroRecordStart",
[KEY_MACRO_RECORD_STOP] = "MacroRecordStop",
[KEY_MARK_WAYPOINT] = "MarkWayPoint", [KEY_MEDIA_REPEAT] = "MediaRepeat",
- [KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messanger",
+ [KEY_MEDIA_TOP_MENU] = "MediaTopMenu", [KEY_MESSENGER] = "Messenger",
[KEY_NAV_CHART] = "NavChar", [KEY_NAV_INFO] = "NavInfo",
[KEY_NEWS] = "News", [KEY_NEXT_ELEMENT] = "NextElement",
[KEY_NEXT_FAVORITE] = "NextFavorite", [KEY_NOTIFICATION_CENTER] = "NotificationCenter",
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 0f292b5d3e26..eb6fd2dc75d0 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -268,11 +268,13 @@ static void cbas_ec_remove(struct platform_device *pdev)
mutex_unlock(&cbas_ec_reglock);
}
+#ifdef CONFIG_ACPI
static const struct acpi_device_id cbas_ec_acpi_ids[] = {
{ "GOOG000B", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, cbas_ec_acpi_ids);
+#endif
#ifdef CONFIG_OF
static const struct of_device_id cbas_ec_of_match[] = {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index c448de53bf91..7e400624908e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -184,6 +184,7 @@
#define USB_DEVICE_ID_APPLE_IRCONTROL4 0x8242
#define USB_DEVICE_ID_APPLE_IRCONTROL5 0x8243
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2021 0x029c
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_2024 0x0320
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_FINGERPRINT_2021 0x029a
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_2021 0x029f
#define USB_DEVICE_ID_APPLE_TOUCHBAR_BACKLIGHT 0x8102
@@ -1095,6 +1096,7 @@
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3003 0x3003
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
+#define USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473 0x5473
#define I2C_VENDOR_ID_RAYDIUM 0x2386
#define I2C_PRODUCT_ID_RAYDIUM_4B33 0x4b33
@@ -1301,6 +1303,7 @@
#define USB_VENDOR_ID_TOPRE 0x0853
#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_108 0x0148
#define USB_DEVICE_ID_TOPRE_REALFORCE_R2_87 0x0146
+#define USB_DEVICE_ID_TOPRE_REALFORCE_R3S_87 0x0313
#define USB_VENDOR_ID_TOPSEED 0x0766
#define USB_DEVICE_ID_TOPSEED_CYBERLINK 0x0204
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 4d00bc4d656e..a7d9ca02779e 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -32,9 +32,7 @@
#include <linux/leds.h>
#include <linux/workqueue.h>
-#if IS_ENABLED(CONFIG_ACPI_PLATFORM_PROFILE)
#include <linux/platform_profile.h>
-#endif /* CONFIG_ACPI_PLATFORM_PROFILE */
#include "hid-ids.h"
@@ -730,13 +728,10 @@ static int lenovo_raw_event_TP_X12_tab(struct hid_device *hdev, u32 raw_data)
if (hdev->product == USB_DEVICE_ID_LENOVO_X12_TAB) {
report_key_event(input, KEY_RFKILL);
return 1;
- }
-#if IS_ENABLED(CONFIG_ACPI_PLATFORM_PROFILE)
- else {
+ } else {
platform_profile_cycle();
return 1;
}
-#endif /* CONFIG_ACPI_PLATFORM_PROFILE */
return 0;
case TP_X12_RAW_HOTKEY_FN_F10:
/* TAB1 has PICKUP Phone and TAB2 use Snipping tool*/
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 82900857bfd8..e50887a6d22c 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1679,9 +1679,12 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
break;
}
- if (suffix)
+ if (suffix) {
hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
"%s %s", hdev->name, suffix);
+ if (!hi->input->name)
+ return -ENOMEM;
+ }
return 0;
}
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
index 11ac246176ae..839d5bcd72b1 100644
--- a/drivers/hid/hid-nintendo.c
+++ b/drivers/hid/hid-nintendo.c
@@ -457,13 +457,13 @@ static const struct joycon_ctlr_button_mapping snescon_button_mappings[] = {
};
static const struct joycon_ctlr_button_mapping gencon_button_mappings[] = {
- { BTN_A, JC_BTN_A, },
- { BTN_B, JC_BTN_B, },
- { BTN_C, JC_BTN_R, },
- { BTN_X, JC_BTN_X, }, /* MD/GEN 6B Only */
- { BTN_Y, JC_BTN_Y, }, /* MD/GEN 6B Only */
- { BTN_Z, JC_BTN_L, }, /* MD/GEN 6B Only */
- { BTN_SELECT, JC_BTN_ZR, },
+ { BTN_WEST, JC_BTN_A, }, /* A */
+ { BTN_SOUTH, JC_BTN_B, }, /* B */
+ { BTN_EAST, JC_BTN_R, }, /* C */
+ { BTN_TL, JC_BTN_X, }, /* X MD/GEN 6B Only */
+ { BTN_NORTH, JC_BTN_Y, }, /* Y MD/GEN 6B Only */
+ { BTN_TR, JC_BTN_L, }, /* Z MD/GEN 6B Only */
+ { BTN_SELECT, JC_BTN_ZR, }, /* Mode */
{ BTN_START, JC_BTN_PLUS, },
{ BTN_MODE, JC_BTN_HOME, },
{ BTN_Z, JC_BTN_CAP, },
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index e0bbf0c6345d..5d7a418ccdbe 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -891,6 +891,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_DPAD) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) },
{ }
};
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index af38fc8eb34f..10460b7bde1a 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -313,6 +313,7 @@ struct steam_device {
u16 rumble_left;
u16 rumble_right;
unsigned int sensor_timestamp_us;
+ struct work_struct unregister_work;
};
static int steam_recv_report(struct steam_device *steam,
@@ -1050,10 +1051,10 @@ static void steam_mode_switch_cb(struct work_struct *work)
struct steam_device, mode_switch);
unsigned long flags;
bool client_opened;
- steam->gamepad_mode = !steam->gamepad_mode;
if (!lizard_mode)
return;
+ steam->gamepad_mode = !steam->gamepad_mode;
if (steam->gamepad_mode)
steam_set_lizard_mode(steam, false);
else {
@@ -1072,6 +1073,31 @@ static void steam_mode_switch_cb(struct work_struct *work)
}
}
+static void steam_work_unregister_cb(struct work_struct *work)
+{
+ struct steam_device *steam = container_of(work, struct steam_device,
+ unregister_work);
+ unsigned long flags;
+ bool connected;
+ bool opened;
+
+ spin_lock_irqsave(&steam->lock, flags);
+ opened = steam->client_opened;
+ connected = steam->connected;
+ spin_unlock_irqrestore(&steam->lock, flags);
+
+ if (connected) {
+ if (opened) {
+ steam_sensors_unregister(steam);
+ steam_input_unregister(steam);
+ } else {
+ steam_set_lizard_mode(steam, lizard_mode);
+ steam_input_register(steam);
+ steam_sensors_register(steam);
+ }
+ }
+}
+
static bool steam_is_valve_interface(struct hid_device *hdev)
{
struct hid_report_enum *rep_enum;
@@ -1117,8 +1143,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
steam->client_opened++;
spin_unlock_irqrestore(&steam->lock, flags);
- steam_sensors_unregister(steam);
- steam_input_unregister(steam);
+ schedule_work(&steam->unregister_work);
return 0;
}
@@ -1135,11 +1160,7 @@ static void steam_client_ll_close(struct hid_device *hdev)
connected = steam->connected && !steam->client_opened;
spin_unlock_irqrestore(&steam->lock, flags);
- if (connected) {
- steam_set_lizard_mode(steam, lizard_mode);
- steam_input_register(steam);
- steam_sensors_register(steam);
- }
+ schedule_work(&steam->unregister_work);
}
static int steam_client_ll_raw_request(struct hid_device *hdev,
@@ -1231,6 +1252,7 @@ static int steam_probe(struct hid_device *hdev,
INIT_LIST_HEAD(&steam->list);
INIT_WORK(&steam->rumble_work, steam_haptic_rumble_cb);
steam->sensor_timestamp_us = 0;
+ INIT_WORK(&steam->unregister_work, steam_work_unregister_cb);
/*
* With the real steam controller interface, do not connect hidraw.
@@ -1291,6 +1313,7 @@ err_cancel_work:
cancel_work_sync(&steam->work_connect);
cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->rumble_work);
+ cancel_work_sync(&steam->unregister_work);
return ret;
}
@@ -1304,10 +1327,11 @@ static void steam_remove(struct hid_device *hdev)
return;
}
+ hid_destroy_device(steam->client_hdev);
cancel_delayed_work_sync(&steam->mode_switch);
cancel_work_sync(&steam->work_connect);
cancel_work_sync(&steam->rumble_work);
- hid_destroy_device(steam->client_hdev);
+ cancel_work_sync(&steam->unregister_work);
steam->client_hdev = NULL;
steam->client_opened = 0;
if (steam->quirks & STEAM_QUIRK_WIRELESS) {
@@ -1593,13 +1617,13 @@ static void steam_do_deck_input_event(struct steam_device *steam,
if (!(b9 & BIT(6)) && steam->did_mode_switch) {
steam->did_mode_switch = false;
- cancel_delayed_work_sync(&steam->mode_switch);
+ cancel_delayed_work(&steam->mode_switch);
} else if (!steam->client_opened && (b9 & BIT(6)) && !steam->did_mode_switch) {
steam->did_mode_switch = true;
schedule_delayed_work(&steam->mode_switch, 45 * HZ / 100);
}
- if (!steam->gamepad_mode)
+ if (!steam->gamepad_mode && lizard_mode)
return;
lpad_touched = b10 & BIT(3);
@@ -1669,7 +1693,7 @@ static void steam_do_deck_sensors_event(struct steam_device *steam,
*/
steam->sensor_timestamp_us += 4000;
- if (!steam->gamepad_mode)
+ if (!steam->gamepad_mode && lizard_mode)
return;
input_event(sensors, EV_MSC, MSC_TIMESTAMP, steam->sensor_timestamp_us);
diff --git a/drivers/hid/hid-thrustmaster.c b/drivers/hid/hid-thrustmaster.c
index 6c3e758bbb09..3b81468a1df2 100644
--- a/drivers/hid/hid-thrustmaster.c
+++ b/drivers/hid/hid-thrustmaster.c
@@ -171,7 +171,7 @@ static void thrustmaster_interrupts(struct hid_device *hdev)
b_ep = ep->desc.bEndpointAddress;
/* Are the expected endpoints present? */
- u8 ep_addr[1] = {b_ep};
+ u8 ep_addr[2] = {b_ep, 0};
if (!usb_check_int_endpoints(usbif, ep_addr)) {
hid_err(hdev, "Unexpected non-int endpoint\n");
diff --git a/drivers/hid/hid-topre.c b/drivers/hid/hid-topre.c
index 848361f6225d..ccedf8721722 100644
--- a/drivers/hid/hid-topre.c
+++ b/drivers/hid/hid-topre.c
@@ -29,6 +29,11 @@ static const __u8 *topre_report_fixup(struct hid_device *hdev, __u8 *rdesc,
hid_info(hdev,
"fixing up Topre REALFORCE keyboard report descriptor\n");
rdesc[72] = 0x02;
+ } else if (*rsize >= 106 && rdesc[28] == 0x29 && rdesc[29] == 0xe7 &&
+ rdesc[30] == 0x81 && rdesc[31] == 0x00) {
+ hid_info(hdev,
+ "fixing up Topre REALFORCE keyboard report descriptor\n");
+ rdesc[31] = 0x02;
}
return rdesc;
}
@@ -38,6 +43,8 @@ static const struct hid_device_id topre_id_table[] = {
USB_DEVICE_ID_TOPRE_REALFORCE_R2_108) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
USB_DEVICE_ID_TOPRE_REALFORCE_R2_87) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_TOPRE,
+ USB_DEVICE_ID_TOPRE_REALFORCE_R3S_87) },
{ }
};
MODULE_DEVICE_TABLE(hid, topre_id_table);
diff --git a/drivers/hid/hid-winwing.c b/drivers/hid/hid-winwing.c
index 831b760c66ea..d4afbbd27807 100644
--- a/drivers/hid/hid-winwing.c
+++ b/drivers/hid/hid-winwing.c
@@ -106,6 +106,8 @@ static int winwing_init_led(struct hid_device *hdev,
"%s::%s",
dev_name(&input->dev),
info->led_name);
+ if (!led->cdev.name)
+ return -ENOMEM;
ret = devm_led_classdev_register(&hdev->dev, &led->cdev);
if (ret)
diff --git a/drivers/hid/i2c-hid/Kconfig b/drivers/hid/i2c-hid/Kconfig
index ef7c595c9403..e8d51f410cc1 100644
--- a/drivers/hid/i2c-hid/Kconfig
+++ b/drivers/hid/i2c-hid/Kconfig
@@ -2,7 +2,7 @@
menuconfig I2C_HID
tristate "I2C HID support"
default y
- depends on I2C && INPUT && HID
+ depends on I2C
if I2C_HID
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 75544448c239..d3912e3f2f13 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -290,7 +290,7 @@ static int i2c_hid_get_report(struct i2c_hid *ihid,
ihid->rawbuf, recv_len + sizeof(__le16));
if (error) {
dev_err(&ihid->client->dev,
- "failed to set a report to device: %d\n", error);
+ "failed to get a report from device: %d\n", error);
return error;
}
diff --git a/drivers/hid/intel-ish-hid/Kconfig b/drivers/hid/intel-ish-hid/Kconfig
index 253dc10d35ef..568c8688784e 100644
--- a/drivers/hid/intel-ish-hid/Kconfig
+++ b/drivers/hid/intel-ish-hid/Kconfig
@@ -6,7 +6,6 @@ config INTEL_ISH_HID
tristate "Intel Integrated Sensor Hub"
default n
depends on X86
- depends on HID
help
The Integrated Sensor Hub (ISH) enables the ability to offload
sensor polling and algorithm processing to a dedicated low power
diff --git a/drivers/hid/intel-ish-hid/ipc/hw-ish.h b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
index cdd80c653918..07e90d51f073 100644
--- a/drivers/hid/intel-ish-hid/ipc/hw-ish.h
+++ b/drivers/hid/intel-ish-hid/ipc/hw-ish.h
@@ -36,6 +36,8 @@
#define PCI_DEVICE_ID_INTEL_ISH_ARL_H 0x7745
#define PCI_DEVICE_ID_INTEL_ISH_ARL_S 0x7F78
#define PCI_DEVICE_ID_INTEL_ISH_LNL_M 0xA845
+#define PCI_DEVICE_ID_INTEL_ISH_PTL_H 0xE345
+#define PCI_DEVICE_ID_INTEL_ISH_PTL_P 0xE445
#define REVISION_ID_CHT_A0 0x6
#define REVISION_ID_CHT_Ax_SI 0x0
diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
index 3cd53fc80634..4c861119e97a 100644
--- a/drivers/hid/intel-ish-hid/ipc/ipc.c
+++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
@@ -517,6 +517,10 @@ static int ish_fw_reset_handler(struct ishtp_device *dev)
/* ISH FW is dead */
if (!ish_is_input_ready(dev))
return -EPIPE;
+
+ /* Send clock sync at once after reset */
+ ishtp_dev->prev_sync = 0;
+
/*
* Set HOST2ISH.ILUP. Apparently we need this BEFORE sending
* RESET_NOTIFY_ACK - FW will be checking for it
@@ -577,15 +581,14 @@ static void fw_reset_work_fn(struct work_struct *work)
*/
static void _ish_sync_fw_clock(struct ishtp_device *dev)
{
- static unsigned long prev_sync;
- uint64_t usec;
+ struct ipc_time_update_msg time = {};
- if (prev_sync && time_before(jiffies, prev_sync + 20 * HZ))
+ if (dev->prev_sync && time_before(jiffies, dev->prev_sync + 20 * HZ))
return;
- prev_sync = jiffies;
- usec = ktime_to_us(ktime_get_boottime());
- ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &usec, sizeof(uint64_t));
+ dev->prev_sync = jiffies;
+ /* The fields of time would be updated while sending message */
+ ipc_send_mng_msg(dev, MNG_SYNC_FW_CLOCK, &time, sizeof(time));
}
/**
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 9e2401291a2f..ff0fc8010072 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -26,9 +26,11 @@
enum ishtp_driver_data_index {
ISHTP_DRIVER_DATA_NONE,
ISHTP_DRIVER_DATA_LNL_M,
+ ISHTP_DRIVER_DATA_PTL,
};
#define ISH_FW_GEN_LNL_M "lnlm"
+#define ISH_FW_GEN_PTL "ptl"
#define ISH_FIRMWARE_PATH(gen) "intel/ish/ish_" gen ".bin"
#define ISH_FIRMWARE_PATH_ALL "intel/ish/ish_*.bin"
@@ -37,6 +39,9 @@ static struct ishtp_driver_data ishtp_driver_data[] = {
[ISHTP_DRIVER_DATA_LNL_M] = {
.fw_generation = ISH_FW_GEN_LNL_M,
},
+ [ISHTP_DRIVER_DATA_PTL] = {
+ .fw_generation = ISH_FW_GEN_PTL,
+ },
};
static const struct pci_device_id ish_pci_tbl[] = {
@@ -63,6 +68,8 @@ static const struct pci_device_id ish_pci_tbl[] = {
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_H)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_ARL_S)},
{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_LNL_M), .driver_data = ISHTP_DRIVER_DATA_LNL_M},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_PTL_H), .driver_data = ISHTP_DRIVER_DATA_PTL},
+ {PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ISH_PTL_P), .driver_data = ISHTP_DRIVER_DATA_PTL},
{}
};
MODULE_DEVICE_TABLE(pci, ish_pci_tbl);
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid-client.c b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
index cb04cd1d980b..6550ad5bfbb5 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid-client.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid-client.c
@@ -832,9 +832,9 @@ static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
hid_ishtp_cl);
dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
- hid_ishtp_cl_deinit(hid_ishtp_cl);
ishtp_put_device(cl_device);
ishtp_hid_remove(client_data);
+ hid_ishtp_cl_deinit(hid_ishtp_cl);
hid_ishtp_cl = NULL;
diff --git a/drivers/hid/intel-ish-hid/ishtp-hid.c b/drivers/hid/intel-ish-hid/ishtp-hid.c
index 00c6f0ebf356..be2c62fc8251 100644
--- a/drivers/hid/intel-ish-hid/ishtp-hid.c
+++ b/drivers/hid/intel-ish-hid/ishtp-hid.c
@@ -261,12 +261,14 @@ err_hid_data:
*/
void ishtp_hid_remove(struct ishtp_cl_data *client_data)
{
+ void *data;
int i;
for (i = 0; i < client_data->num_hid_devices; ++i) {
if (client_data->hid_sensor_hubs[i]) {
- kfree(client_data->hid_sensor_hubs[i]->driver_data);
+ data = client_data->hid_sensor_hubs[i]->driver_data;
hid_destroy_device(client_data->hid_sensor_hubs[i]);
+ kfree(data);
client_data->hid_sensor_hubs[i] = NULL;
}
}
diff --git a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
index 44eddc411e97..ec9f6e87aaf2 100644
--- a/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
+++ b/drivers/hid/intel-ish-hid/ishtp/ishtp-dev.h
@@ -253,6 +253,8 @@ struct ishtp_device {
unsigned int ipc_tx_cnt;
unsigned long long ipc_tx_bytes_cnt;
+ /* Time of the last clock sync */
+ unsigned long prev_sync;
const struct ishtp_hw_ops *ops;
size_t mtu;
uint32_t ishtp_msg_hdr;
diff --git a/drivers/hid/intel-thc-hid/Kconfig b/drivers/hid/intel-thc-hid/Kconfig
index 91ec84902db8..0351d1137607 100644
--- a/drivers/hid/intel-thc-hid/Kconfig
+++ b/drivers/hid/intel-thc-hid/Kconfig
@@ -7,7 +7,6 @@ menu "Intel THC HID Support"
config INTEL_THC_HID
tristate "Intel Touch Host Controller"
depends on ACPI
- select HID
help
THC (Touch Host Controller) is the name of the IP block in PCH that
interfaces with Touch Devices (ex: touchscreen, touchpad etc.). It
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
index 4641e818dfa4..6b2c7620be2b 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/pci-quickspi.c
@@ -909,6 +909,8 @@ static int quickspi_restore(struct device *device)
thc_change_ltr_mode(qsdev->thc_hw, THC_LTR_MODE_ACTIVE);
+ qsdev->state = QUICKSPI_ENABLED;
+
return 0;
}
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
index 7373238ceb18..918050af73e5 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-protocol.c
@@ -107,7 +107,7 @@ static int quickspi_get_device_descriptor(struct quickspi_device *qsdev)
return 0;
}
- dev_err_once(qsdev->dev, "Unexpected intput report type: %d\n", input_rep_type);
+ dev_err_once(qsdev->dev, "Unexpected input report type: %d\n", input_rep_type);
return -EINVAL;
}
diff --git a/drivers/hid/surface-hid/Kconfig b/drivers/hid/surface-hid/Kconfig
index 7ce9b5d641eb..d0cfd0d29926 100644
--- a/drivers/hid/surface-hid/Kconfig
+++ b/drivers/hid/surface-hid/Kconfig
@@ -1,7 +1,6 @@
# SPDX-License-Identifier: GPL-2.0+
menu "Surface System Aggregator Module HID support"
depends on SURFACE_AGGREGATOR
- depends on INPUT
config SURFACE_HID
tristate "HID transport driver for Surface System Aggregator Module"
@@ -39,4 +38,3 @@ endmenu
config SURFACE_HID_CORE
tristate
- select HID
diff --git a/drivers/hid/usbhid/Kconfig b/drivers/hid/usbhid/Kconfig
index 7c2032f7f44d..f3194767a45e 100644
--- a/drivers/hid/usbhid/Kconfig
+++ b/drivers/hid/usbhid/Kconfig
@@ -5,8 +5,7 @@ menu "USB HID support"
config USB_HID
tristate "USB HID transport layer"
default y
- depends on USB && INPUT
- select HID
+ depends on HID
help
Say Y here if you want to connect USB keyboards,
mice, joysticks, graphic tablets, or any other HID based devices
diff --git a/drivers/hwmon/ad7314.c b/drivers/hwmon/ad7314.c
index 7802bbf5f958..59424103f634 100644
--- a/drivers/hwmon/ad7314.c
+++ b/drivers/hwmon/ad7314.c
@@ -22,11 +22,13 @@
*/
#define AD7314_TEMP_MASK 0x7FE0
#define AD7314_TEMP_SHIFT 5
+#define AD7314_LEADING_ZEROS_MASK BIT(15)
/*
* ADT7301 and ADT7302 temperature masks
*/
#define ADT7301_TEMP_MASK 0x3FFF
+#define ADT7301_LEADING_ZEROS_MASK (BIT(15) | BIT(14))
enum ad7314_variant {
adt7301,
@@ -65,12 +67,20 @@ static ssize_t ad7314_temperature_show(struct device *dev,
return ret;
switch (spi_get_device_id(chip->spi_dev)->driver_data) {
case ad7314:
+ if (ret & AD7314_LEADING_ZEROS_MASK) {
+ /* Invalid read-out, leading zero part is missing */
+ return -EIO;
+ }
data = (ret & AD7314_TEMP_MASK) >> AD7314_TEMP_SHIFT;
data = sign_extend32(data, 9);
return sprintf(buf, "%d\n", 250 * data);
case adt7301:
case adt7302:
+ if (ret & ADT7301_LEADING_ZEROS_MASK) {
+ /* Invalid read-out, leading zero part is missing */
+ return -EIO;
+ }
/*
* Documented as a 13 bit twos complement register
* with a sign bit - which is a 14 bit 2's complement
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index b5352900463f..0d29c8f97ba7 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -181,40 +181,40 @@ static const struct ntc_compensation ncpXXwf104[] = {
};
static const struct ntc_compensation ncpXXxh103[] = {
- { .temp_c = -40, .ohm = 247565 },
- { .temp_c = -35, .ohm = 181742 },
- { .temp_c = -30, .ohm = 135128 },
- { .temp_c = -25, .ohm = 101678 },
- { .temp_c = -20, .ohm = 77373 },
- { .temp_c = -15, .ohm = 59504 },
- { .temp_c = -10, .ohm = 46222 },
- { .temp_c = -5, .ohm = 36244 },
- { .temp_c = 0, .ohm = 28674 },
- { .temp_c = 5, .ohm = 22878 },
- { .temp_c = 10, .ohm = 18399 },
- { .temp_c = 15, .ohm = 14910 },
- { .temp_c = 20, .ohm = 12169 },
+ { .temp_c = -40, .ohm = 195652 },
+ { .temp_c = -35, .ohm = 148171 },
+ { .temp_c = -30, .ohm = 113347 },
+ { .temp_c = -25, .ohm = 87559 },
+ { .temp_c = -20, .ohm = 68237 },
+ { .temp_c = -15, .ohm = 53650 },
+ { .temp_c = -10, .ohm = 42506 },
+ { .temp_c = -5, .ohm = 33892 },
+ { .temp_c = 0, .ohm = 27219 },
+ { .temp_c = 5, .ohm = 22021 },
+ { .temp_c = 10, .ohm = 17926 },
+ { .temp_c = 15, .ohm = 14674 },
+ { .temp_c = 20, .ohm = 12081 },
{ .temp_c = 25, .ohm = 10000 },
- { .temp_c = 30, .ohm = 8271 },
- { .temp_c = 35, .ohm = 6883 },
- { .temp_c = 40, .ohm = 5762 },
- { .temp_c = 45, .ohm = 4851 },
- { .temp_c = 50, .ohm = 4105 },
- { .temp_c = 55, .ohm = 3492 },
- { .temp_c = 60, .ohm = 2985 },
- { .temp_c = 65, .ohm = 2563 },
- { .temp_c = 70, .ohm = 2211 },
- { .temp_c = 75, .ohm = 1915 },
- { .temp_c = 80, .ohm = 1666 },
- { .temp_c = 85, .ohm = 1454 },
- { .temp_c = 90, .ohm = 1275 },
- { .temp_c = 95, .ohm = 1121 },
- { .temp_c = 100, .ohm = 990 },
- { .temp_c = 105, .ohm = 876 },
- { .temp_c = 110, .ohm = 779 },
- { .temp_c = 115, .ohm = 694 },
- { .temp_c = 120, .ohm = 620 },
- { .temp_c = 125, .ohm = 556 },
+ { .temp_c = 30, .ohm = 8315 },
+ { .temp_c = 35, .ohm = 6948 },
+ { .temp_c = 40, .ohm = 5834 },
+ { .temp_c = 45, .ohm = 4917 },
+ { .temp_c = 50, .ohm = 4161 },
+ { .temp_c = 55, .ohm = 3535 },
+ { .temp_c = 60, .ohm = 3014 },
+ { .temp_c = 65, .ohm = 2586 },
+ { .temp_c = 70, .ohm = 2228 },
+ { .temp_c = 75, .ohm = 1925 },
+ { .temp_c = 80, .ohm = 1669 },
+ { .temp_c = 85, .ohm = 1452 },
+ { .temp_c = 90, .ohm = 1268 },
+ { .temp_c = 95, .ohm = 1110 },
+ { .temp_c = 100, .ohm = 974 },
+ { .temp_c = 105, .ohm = 858 },
+ { .temp_c = 110, .ohm = 758 },
+ { .temp_c = 115, .ohm = 672 },
+ { .temp_c = 120, .ohm = 596 },
+ { .temp_c = 125, .ohm = 531 },
};
/*
diff --git a/drivers/hwmon/peci/dimmtemp.c b/drivers/hwmon/peci/dimmtemp.c
index d6762259dd69..fbe82d9852e0 100644
--- a/drivers/hwmon/peci/dimmtemp.c
+++ b/drivers/hwmon/peci/dimmtemp.c
@@ -127,8 +127,6 @@ static int update_thresholds(struct peci_dimmtemp *priv, int dimm_no)
return 0;
ret = priv->gen_info->read_thresholds(priv, dimm_order, chan_rank, &data);
- if (ret == -ENODATA) /* Use default or previous value */
- return 0;
if (ret)
return ret;
@@ -509,11 +507,11 @@ read_thresholds_icx(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd4, &reg_val);
if (ret || !(reg_val & BIT(31)))
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 13, 0, 2, 0xd0, &reg_val);
if (ret)
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
/*
* Device 26, Offset 224e0: IMC 0 channel 0 -> rank 0
@@ -546,11 +544,11 @@ read_thresholds_spr(struct peci_dimmtemp *priv, int dimm_order, int chan_rank, u
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd4, &reg_val);
if (ret || !(reg_val & BIT(31)))
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
ret = peci_ep_pci_local_read(priv->peci_dev, 0, 30, 0, 2, 0xd0, &reg_val);
if (ret)
- return -ENODATA; /* Use default or previous value */
+ return -ENODATA;
/*
* Device 26, Offset 219a8: IMC 0 channel 0 -> rank 0
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 77cf268e7d2d..920cd5408141 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -103,6 +103,8 @@ static int pmbus_identify(struct i2c_client *client,
if (pmbus_check_byte_register(client, 0, PMBUS_PAGE)) {
int page;
+ info->pages = PMBUS_PAGES;
+
for (page = 1; page < PMBUS_PAGES; page++) {
if (pmbus_set_page(client, page, 0xff) < 0)
break;
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 1e3bd129a922..7087197383c9 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -706,7 +706,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
goto out;
}
- if (!ctx->pcc_comm_addr) {
+ if (IS_ERR_OR_NULL(ctx->pcc_comm_addr)) {
dev_err(&pdev->dev,
"Failed to ioremap PCC comm region\n");
rc = -ENOMEM;
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index 66123d684ac9..bf99d79a4192 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -105,23 +105,32 @@ struct msc_iter {
/**
* struct msc - MSC device representation
- * @reg_base: register window base address
+ * @reg_base: register window base address for the entire MSU
+ * @msu_base: register window base address for this MSC
* @thdev: intel_th_device pointer
* @mbuf: MSU buffer, if assigned
- * @mbuf_priv MSU buffer's private data, if @mbuf
+ * @mbuf_priv: MSU buffer's private data, if @mbuf
+ * @work: a work to stop the trace when the buffer is full
* @win_list: list of windows in multiblock mode
* @single_sgt: single mode buffer
* @cur_win: current window
+ * @switch_on_unlock: window to switch to when it becomes available
* @nr_pages: total number of pages allocated for this buffer
* @single_sz: amount of data in single mode
* @single_wrap: single mode wrap occurred
* @base: buffer's base pointer
* @base_addr: buffer's base address
+ * @orig_addr: MSC0 buffer's base address
+ * @orig_sz: MSC0 buffer's size
* @user_count: number of users of the buffer
* @mmap_count: number of mappings
* @buf_mutex: mutex to serialize access to buffer-related bits
+ * @iter_list: list of open file descriptor iterators
+ * @stop_on_full: stop the trace if the current window is full
* @enabled: MSC is enabled
* @wrap: wrapping is enabled
+ * @do_irq: IRQ resource is available, handle interrupts
+ * @multi_is_broken: multiblock mode enabled (not disabled by PCI drvdata)
* @mode: MSC operating mode
* @burst_len: write burst length
* @index: number of this MSC in the MSU
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index e9d8d28e055f..e3def163d5cf 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -335,6 +335,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Arrow Lake */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7724),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Panther Lake-H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe324),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
+ /* Panther Lake-P/U */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe424),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Alder Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f),
.driver_data = (kernel_ulong_t)&intel_th_2x,
diff --git a/drivers/i2c/busses/i2c-amd-asf-plat.c b/drivers/i2c/busses/i2c-amd-asf-plat.c
index 7512614bf4b7..93ebec162c6d 100644
--- a/drivers/i2c/busses/i2c-amd-asf-plat.c
+++ b/drivers/i2c/busses/i2c-amd-asf-plat.c
@@ -293,6 +293,7 @@ static irqreturn_t amd_asf_irq_handler(int irq, void *ptr)
amd_asf_update_ioport_target(piix4_smba, ASF_SLV_INTR, SMBHSTSTS, true);
}
+ iowrite32(irq, dev->eoi_base);
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-ls2x.c b/drivers/i2c/busses/i2c-ls2x.c
index 8821cac3897b..b475dd27b7af 100644
--- a/drivers/i2c/busses/i2c-ls2x.c
+++ b/drivers/i2c/busses/i2c-ls2x.c
@@ -10,6 +10,7 @@
* Rewritten for mainline by Binbin Zhou <zhoubinbin@loongson.cn>
*/
+#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/completion.h>
#include <linux/device.h>
@@ -26,7 +27,8 @@
#include <linux/units.h>
/* I2C Registers */
-#define I2C_LS2X_PRER 0x0 /* Freq Division Register(16 bits) */
+#define I2C_LS2X_PRER_LO 0x0 /* Freq Division Low Byte Register */
+#define I2C_LS2X_PRER_HI 0x1 /* Freq Division High Byte Register */
#define I2C_LS2X_CTR 0x2 /* Control Register */
#define I2C_LS2X_TXR 0x3 /* Transport Data Register */
#define I2C_LS2X_RXR 0x3 /* Receive Data Register */
@@ -93,6 +95,7 @@ static irqreturn_t ls2x_i2c_isr(int this_irq, void *dev_id)
*/
static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
{
+ u16 val;
struct i2c_timings *t = &priv->i2c_t;
struct device *dev = priv->adapter.dev.parent;
u32 acpi_speed = i2c_acpi_find_bus_speed(dev);
@@ -104,9 +107,14 @@ static void ls2x_i2c_adjust_bus_speed(struct ls2x_i2c_priv *priv)
else
t->bus_freq_hz = LS2X_I2C_FREQ_STD;
- /* Calculate and set i2c frequency. */
- writew(LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1,
- priv->base + I2C_LS2X_PRER);
+ /*
+ * According to the chip manual, we can only access the registers as bytes,
+ * otherwise the high bits will be truncated.
+ * So set the I2C frequency with a sequential writeb() instead of writew().
+ */
+ val = LS2X_I2C_PCLK_FREQ / (5 * t->bus_freq_hz) - 1;
+ writeb(FIELD_GET(GENMASK(7, 0), val), priv->base + I2C_LS2X_PRER_LO);
+ writeb(FIELD_GET(GENMASK(15, 8), val), priv->base + I2C_LS2X_PRER_HI);
}
static void ls2x_i2c_init(struct ls2x_i2c_priv *priv)
diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c
index 3ca08b8ef8af..de713b5747fe 100644
--- a/drivers/i2c/busses/i2c-npcm7xx.c
+++ b/drivers/i2c/busses/i2c-npcm7xx.c
@@ -2554,6 +2554,13 @@ static int npcm_i2c_probe_bus(struct platform_device *pdev)
if (irq < 0)
return irq;
+ /*
+ * Disable the interrupt to avoid the interrupt handler being triggered
+ * incorrectly by the asynchronous interrupt status since the machine
+ * might do a warm reset during the last smbus/i2c transfer session.
+ */
+ npcm_i2c_int_enable(bus, false);
+
ret = devm_request_irq(bus->dev, irq, npcm_i2c_bus_irq, 0,
dev_name(bus->dev), bus);
if (ret)
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 5546184df05f..7ad1ad5c8c3f 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -1300,12 +1300,14 @@ new_device_store(struct device *dev, struct device_attribute *attr,
info.flags |= I2C_CLIENT_SLAVE;
}
- info.flags |= I2C_CLIENT_USER;
-
client = i2c_new_client_device(adap, &info);
if (IS_ERR(client))
return PTR_ERR(client);
+ /* Keep track of the added device */
+ mutex_lock(&adap->userspace_clients_lock);
+ list_add_tail(&client->detected, &adap->userspace_clients);
+ mutex_unlock(&adap->userspace_clients_lock);
dev_info(dev, "%s: Instantiated device %s at 0x%02hx\n", "new_device",
info.type, info.addr);
@@ -1313,15 +1315,6 @@ new_device_store(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR_WO(new_device);
-static int __i2c_find_user_addr(struct device *dev, const void *addrp)
-{
- struct i2c_client *client = i2c_verify_client(dev);
- unsigned short addr = *(unsigned short *)addrp;
-
- return client && client->flags & I2C_CLIENT_USER &&
- i2c_encode_flags_to_addr(client) == addr;
-}
-
/*
* And of course let the users delete the devices they instantiated, if
* they got it wrong. This interface can only be used to delete devices
@@ -1336,7 +1329,7 @@ delete_device_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct i2c_adapter *adap = to_i2c_adapter(dev);
- struct device *child_dev;
+ struct i2c_client *client, *next;
unsigned short addr;
char end;
int res;
@@ -1352,19 +1345,28 @@ delete_device_store(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- mutex_lock(&core_lock);
/* Make sure the device was added through sysfs */
- child_dev = device_find_child(&adap->dev, &addr, __i2c_find_user_addr);
- if (child_dev) {
- i2c_unregister_device(i2c_verify_client(child_dev));
- put_device(child_dev);
- } else {
- dev_err(dev, "Can't find userspace-created device at %#x\n", addr);
- count = -ENOENT;
+ res = -ENOENT;
+ mutex_lock_nested(&adap->userspace_clients_lock,
+ i2c_adapter_depth(adap));
+ list_for_each_entry_safe(client, next, &adap->userspace_clients,
+ detected) {
+ if (i2c_encode_flags_to_addr(client) == addr) {
+ dev_info(dev, "%s: Deleting device %s at 0x%02hx\n",
+ "delete_device", client->name, client->addr);
+
+ list_del(&client->detected);
+ i2c_unregister_device(client);
+ res = count;
+ break;
+ }
}
- mutex_unlock(&core_lock);
+ mutex_unlock(&adap->userspace_clients_lock);
- return count;
+ if (res < 0)
+ dev_err(dev, "%s: Can't find device in list\n",
+ "delete_device");
+ return res;
}
static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
delete_device_store);
@@ -1535,6 +1537,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
adap->locked_flags = 0;
rt_mutex_init(&adap->bus_lock);
rt_mutex_init(&adap->mux_lock);
+ mutex_init(&adap->userspace_clients_lock);
+ INIT_LIST_HEAD(&adap->userspace_clients);
/* Set default timeout to 1 second if not already set */
if (adap->timeout == 0)
@@ -1700,6 +1704,23 @@ int i2c_add_numbered_adapter(struct i2c_adapter *adap)
}
EXPORT_SYMBOL_GPL(i2c_add_numbered_adapter);
+static void i2c_do_del_adapter(struct i2c_driver *driver,
+ struct i2c_adapter *adapter)
+{
+ struct i2c_client *client, *_n;
+
+ /* Remove the devices we created ourselves as the result of hardware
+ * probing (using a driver's detect method) */
+ list_for_each_entry_safe(client, _n, &driver->clients, detected) {
+ if (client->adapter == adapter) {
+ dev_dbg(&adapter->dev, "Removing %s at 0x%x\n",
+ client->name, client->addr);
+ list_del(&client->detected);
+ i2c_unregister_device(client);
+ }
+ }
+}
+
static int __unregister_client(struct device *dev, void *dummy)
{
struct i2c_client *client = i2c_verify_client(dev);
@@ -1715,6 +1736,12 @@ static int __unregister_dummy(struct device *dev, void *dummy)
return 0;
}
+static int __process_removed_adapter(struct device_driver *d, void *data)
+{
+ i2c_do_del_adapter(to_i2c_driver(d), data);
+ return 0;
+}
+
/**
* i2c_del_adapter - unregister I2C adapter
* @adap: the adapter being unregistered
@@ -1726,6 +1753,7 @@ static int __unregister_dummy(struct device *dev, void *dummy)
void i2c_del_adapter(struct i2c_adapter *adap)
{
struct i2c_adapter *found;
+ struct i2c_client *client, *next;
/* First make sure that this adapter was ever added */
mutex_lock(&core_lock);
@@ -1737,16 +1765,31 @@ void i2c_del_adapter(struct i2c_adapter *adap)
}
i2c_acpi_remove_space_handler(adap);
+ /* Tell drivers about this removal */
+ mutex_lock(&core_lock);
+ bus_for_each_drv(&i2c_bus_type, NULL, adap,
+ __process_removed_adapter);
+ mutex_unlock(&core_lock);
+
+ /* Remove devices instantiated from sysfs */
+ mutex_lock_nested(&adap->userspace_clients_lock,
+ i2c_adapter_depth(adap));
+ list_for_each_entry_safe(client, next, &adap->userspace_clients,
+ detected) {
+ dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name,
+ client->addr);
+ list_del(&client->detected);
+ i2c_unregister_device(client);
+ }
+ mutex_unlock(&adap->userspace_clients_lock);
/* Detach any active clients. This can't fail, thus we do not
* check the returned value. This is a two-pass process, because
* we can't remove the dummy devices during the first pass: they
* could have been instantiated by real devices wishing to clean
* them up properly, so we give them a chance to do that first. */
- mutex_lock(&core_lock);
device_for_each_child(&adap->dev, NULL, __unregister_client);
device_for_each_child(&adap->dev, NULL, __unregister_dummy);
- mutex_unlock(&core_lock);
/* device name is gone after device_unregister */
dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
@@ -1966,6 +2009,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
/* add the driver to the list of i2c drivers in the driver core */
driver->driver.owner = owner;
driver->driver.bus = &i2c_bus_type;
+ INIT_LIST_HEAD(&driver->clients);
/* When registration returns, the driver core
* will have called probe() for all matching-but-unbound devices.
@@ -1983,13 +2027,10 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
}
EXPORT_SYMBOL(i2c_register_driver);
-static int __i2c_unregister_detected_client(struct device *dev, void *argp)
+static int __process_removed_driver(struct device *dev, void *data)
{
- struct i2c_client *client = i2c_verify_client(dev);
-
- if (client && client->flags & I2C_CLIENT_AUTO)
- i2c_unregister_device(client);
-
+ if (dev->type == &i2c_adapter_type)
+ i2c_do_del_adapter(data, to_i2c_adapter(dev));
return 0;
}
@@ -2000,12 +2041,7 @@ static int __i2c_unregister_detected_client(struct device *dev, void *argp)
*/
void i2c_del_driver(struct i2c_driver *driver)
{
- mutex_lock(&core_lock);
- /* Satisfy __must_check, function can't fail */
- if (driver_for_each_device(&driver->driver, NULL, NULL,
- __i2c_unregister_detected_client)) {
- }
- mutex_unlock(&core_lock);
+ i2c_for_each_dev(driver, __process_removed_driver);
driver_unregister(&driver->driver);
pr_debug("driver [%s] unregistered\n", driver->driver.name);
@@ -2432,7 +2468,6 @@ static int i2c_detect_address(struct i2c_client *temp_client,
/* Finally call the custom detection function */
memset(&info, 0, sizeof(struct i2c_board_info));
info.addr = addr;
- info.flags = I2C_CLIENT_AUTO;
err = driver->detect(temp_client, &info);
if (err) {
/* -ENODEV is returned if the detection fails. We catch it
@@ -2459,7 +2494,9 @@ static int i2c_detect_address(struct i2c_client *temp_client,
dev_dbg(&adapter->dev, "Creating %s at 0x%02x\n",
info.type, info.addr);
client = i2c_new_client_device(adapter, &info);
- if (IS_ERR(client))
+ if (!IS_ERR(client))
+ list_add_tail(&client->detected, &driver->clients);
+ else
dev_err(&adapter->dev, "Failed creating %s at 0x%02x\n",
info.type, info.addr);
}
@@ -2469,7 +2506,7 @@ static int i2c_detect_address(struct i2c_client *temp_client,
static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
{
const unsigned short *address_list;
- struct i2c_client temp_client;
+ struct i2c_client *temp_client;
int i, err = 0;
address_list = driver->address_list;
@@ -2490,19 +2527,24 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver)
return 0;
/* Set up a temporary client to help detect callback */
- memset(&temp_client, 0, sizeof(temp_client));
- temp_client.adapter = adapter;
+ temp_client = kzalloc(sizeof(*temp_client), GFP_KERNEL);
+ if (!temp_client)
+ return -ENOMEM;
+
+ temp_client->adapter = adapter;
for (i = 0; address_list[i] != I2C_CLIENT_END; i += 1) {
dev_dbg(&adapter->dev,
"found normal entry for adapter %d, addr 0x%02x\n",
i2c_adapter_id(adapter), address_list[i]);
- temp_client.addr = address_list[i];
- err = i2c_detect_address(&temp_client, driver);
+ temp_client->addr = address_list[i];
+ err = i2c_detect_address(temp_client, driver);
if (unlikely(err))
break;
}
+ kfree(temp_client);
+
return err;
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 118fe1d37c22..0fdb1d1316c4 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -56,6 +56,7 @@
#include <asm/intel-family.h>
#include <asm/mwait.h>
#include <asm/spec-ctrl.h>
+#include <asm/tsc.h>
#include <asm/fpu/api.h>
#define INTEL_IDLE_VERSION "0.5.1"
@@ -1799,6 +1800,9 @@ static void __init intel_idle_init_cstates_acpi(struct cpuidle_driver *drv)
if (intel_idle_state_needs_timer_stop(state))
state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+ if (cx->type > ACPI_STATE_C1 && !boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+ mark_tsc_unstable("TSC halts in idle");
+
state->enter = intel_idle;
state->enter_s2idle = intel_idle_s2idle;
}
diff --git a/drivers/iio/accel/adxl345.h b/drivers/iio/accel/adxl345.h
index 517e494ba555..bc6d634bd85c 100644
--- a/drivers/iio/accel/adxl345.h
+++ b/drivers/iio/accel/adxl345.h
@@ -43,7 +43,6 @@
#define ADXL345_REG_INT_ENABLE 0x2E
#define ADXL345_REG_INT_MAP 0x2F
#define ADXL345_REG_INT_SOURCE 0x30
-#define ADXL345_REG_INT_SOURCE_MSK 0xFF
#define ADXL345_REG_DATA_FORMAT 0x31
#define ADXL345_REG_XYZ_BASE 0x32
#define ADXL345_REG_DATA_AXIS(index) \
diff --git a/drivers/iio/accel/adxl345_core.c b/drivers/iio/accel/adxl345_core.c
index d1b2d3985a40..375c27d16827 100644
--- a/drivers/iio/accel/adxl345_core.c
+++ b/drivers/iio/accel/adxl345_core.c
@@ -76,6 +76,26 @@ static const unsigned long adxl345_scan_masks[] = {
0
};
+/**
+ * adxl345_set_measure_en() - Enable and disable measuring.
+ *
+ * @st: The device data.
+ * @en: Enable measurements, else standby mode.
+ *
+ * For lowest power operation, standby mode can be used. In standby mode,
+ * current consumption is supposed to be reduced to 0.1uA (typical). In this
+ * mode no measurements are made. Placing the device into standby mode
+ * preserves the contents of FIFO.
+ *
+ * Return: Returns 0 if successful, or a negative error value.
+ */
+static int adxl345_set_measure_en(struct adxl345_state *st, bool en)
+{
+ unsigned int val = en ? ADXL345_POWER_CTL_MEASURE : ADXL345_POWER_CTL_STANDBY;
+
+ return regmap_write(st->regmap, ADXL345_REG_POWER_CTL, val);
+}
+
static int adxl345_set_interrupts(struct adxl345_state *st)
{
int ret;
@@ -87,8 +107,7 @@ static int adxl345_set_interrupts(struct adxl345_state *st)
* interrupts to the INT1 pin, whereas bits set to 1 send their respective
* interrupts to the INT2 pin. The intio shall convert this accordingly.
*/
- int_map = FIELD_GET(ADXL345_REG_INT_SOURCE_MSK,
- st->intio ? st->int_map : ~st->int_map);
+ int_map = st->intio ? st->int_map : ~st->int_map;
ret = regmap_write(st->regmap, ADXL345_REG_INT_MAP, int_map);
if (ret)
@@ -182,6 +201,16 @@ static int adxl345_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int adxl345_reg_access(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ struct adxl345_state *st = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+ return regmap_write(st->regmap, reg, writeval);
+}
+
static int adxl345_set_watermark(struct iio_dev *indio_dev, unsigned int value)
{
struct adxl345_state *st = iio_priv(indio_dev);
@@ -214,26 +243,6 @@ static int adxl345_write_raw_get_fmt(struct iio_dev *indio_dev,
}
}
-/**
- * adxl345_set_measure_en() - Enable and disable measuring.
- *
- * @st: The device data.
- * @en: Enable measurements, else standby mode.
- *
- * For lowest power operation, standby mode can be used. In standby mode,
- * current consumption is supposed to be reduced to 0.1uA (typical). In this
- * mode no measurements are made. Placing the device into standby mode
- * preserves the contents of FIFO.
- *
- * Return: Returns 0 if successful, or a negative error value.
- */
-static int adxl345_set_measure_en(struct adxl345_state *st, bool en)
-{
- unsigned int val = en ? ADXL345_POWER_CTL_MEASURE : ADXL345_POWER_CTL_STANDBY;
-
- return regmap_write(st->regmap, ADXL345_REG_POWER_CTL, val);
-}
-
static void adxl345_powerdown(void *ptr)
{
struct adxl345_state *st = ptr;
@@ -394,18 +403,6 @@ static const struct iio_buffer_setup_ops adxl345_buffer_ops = {
.predisable = adxl345_buffer_predisable,
};
-static int adxl345_get_status(struct adxl345_state *st)
-{
- int ret;
- unsigned int regval;
-
- ret = regmap_read(st->regmap, ADXL345_REG_INT_SOURCE, &regval);
- if (ret < 0)
- return ret;
-
- return FIELD_GET(ADXL345_REG_INT_SOURCE_MSK, regval);
-}
-
static int adxl345_fifo_push(struct iio_dev *indio_dev,
int samples)
{
@@ -439,14 +436,10 @@ static irqreturn_t adxl345_irq_handler(int irq, void *p)
int int_stat;
int samples;
- int_stat = adxl345_get_status(st);
- if (int_stat <= 0)
+ if (regmap_read(st->regmap, ADXL345_REG_INT_SOURCE, &int_stat))
return IRQ_NONE;
- if (int_stat & ADXL345_INT_OVERRUN)
- goto err;
-
- if (int_stat & ADXL345_INT_WATERMARK) {
+ if (FIELD_GET(ADXL345_INT_WATERMARK, int_stat)) {
samples = adxl345_get_samples(st);
if (samples < 0)
goto err;
@@ -454,6 +447,10 @@ static irqreturn_t adxl345_irq_handler(int irq, void *p)
if (adxl345_fifo_push(indio_dev, samples) < 0)
goto err;
}
+
+ if (FIELD_GET(ADXL345_INT_OVERRUN, int_stat))
+ goto err;
+
return IRQ_HANDLED;
err:
@@ -467,6 +464,7 @@ static const struct iio_info adxl345_info = {
.read_raw = adxl345_read_raw,
.write_raw = adxl345_write_raw,
.write_raw_get_fmt = adxl345_write_raw_get_fmt,
+ .debugfs_reg_access = &adxl345_reg_access,
.hwfifo_set_watermark = adxl345_set_watermark,
};
diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c
index a48ac0d7bd96..add4053e7a02 100644
--- a/drivers/iio/accel/adxl367.c
+++ b/drivers/iio/accel/adxl367.c
@@ -477,45 +477,42 @@ static int adxl367_set_fifo_watermark(struct adxl367_state *st,
static int adxl367_set_range(struct iio_dev *indio_dev,
enum adxl367_range range)
{
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- struct adxl367_state *st = iio_priv(indio_dev);
- int ret;
+ struct adxl367_state *st = iio_priv(indio_dev);
+ int ret;
- guard(mutex)(&st->lock);
+ guard(mutex)(&st->lock);
- ret = adxl367_set_measure_en(st, false);
- if (ret)
- return ret;
+ ret = adxl367_set_measure_en(st, false);
+ if (ret)
+ return ret;
- ret = regmap_update_bits(st->regmap, ADXL367_REG_FILTER_CTL,
- ADXL367_FILTER_CTL_RANGE_MASK,
- FIELD_PREP(ADXL367_FILTER_CTL_RANGE_MASK,
- range));
- if (ret)
- return ret;
+ ret = regmap_update_bits(st->regmap, ADXL367_REG_FILTER_CTL,
+ ADXL367_FILTER_CTL_RANGE_MASK,
+ FIELD_PREP(ADXL367_FILTER_CTL_RANGE_MASK,
+ range));
+ if (ret)
+ return ret;
- adxl367_scale_act_thresholds(st, st->range, range);
+ adxl367_scale_act_thresholds(st, st->range, range);
- /* Activity thresholds depend on range */
- ret = _adxl367_set_act_threshold(st, ADXL367_ACTIVITY,
- st->act_threshold);
- if (ret)
- return ret;
+ /* Activity thresholds depend on range */
+ ret = _adxl367_set_act_threshold(st, ADXL367_ACTIVITY,
+ st->act_threshold);
+ if (ret)
+ return ret;
- ret = _adxl367_set_act_threshold(st, ADXL367_INACTIVITY,
- st->inact_threshold);
- if (ret)
- return ret;
+ ret = _adxl367_set_act_threshold(st, ADXL367_INACTIVITY,
+ st->inact_threshold);
+ if (ret)
+ return ret;
- ret = adxl367_set_measure_en(st, true);
- if (ret)
- return ret;
+ ret = adxl367_set_measure_en(st, true);
+ if (ret)
+ return ret;
- st->range = range;
+ st->range = range;
- return 0;
- }
- unreachable();
+ return 0;
}
static int adxl367_time_ms_to_samples(struct adxl367_state *st, unsigned int ms)
@@ -620,23 +617,20 @@ static int _adxl367_set_odr(struct adxl367_state *st, enum adxl367_odr odr)
static int adxl367_set_odr(struct iio_dev *indio_dev, enum adxl367_odr odr)
{
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- struct adxl367_state *st = iio_priv(indio_dev);
- int ret;
+ struct adxl367_state *st = iio_priv(indio_dev);
+ int ret;
- guard(mutex)(&st->lock);
+ guard(mutex)(&st->lock);
- ret = adxl367_set_measure_en(st, false);
- if (ret)
- return ret;
+ ret = adxl367_set_measure_en(st, false);
+ if (ret)
+ return ret;
- ret = _adxl367_set_odr(st, odr);
- if (ret)
- return ret;
+ ret = _adxl367_set_odr(st, odr);
+ if (ret)
+ return ret;
- return adxl367_set_measure_en(st, true);
- }
- unreachable();
+ return adxl367_set_measure_en(st, true);
}
static int adxl367_set_temp_adc_en(struct adxl367_state *st, unsigned int reg,
@@ -725,32 +719,29 @@ static int adxl367_read_sample(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val)
{
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- struct adxl367_state *st = iio_priv(indio_dev);
- u16 sample;
- int ret;
+ struct adxl367_state *st = iio_priv(indio_dev);
+ u16 sample;
+ int ret;
- guard(mutex)(&st->lock);
+ guard(mutex)(&st->lock);
- ret = adxl367_set_temp_adc_reg_en(st, chan->address, true);
- if (ret)
- return ret;
+ ret = adxl367_set_temp_adc_reg_en(st, chan->address, true);
+ if (ret)
+ return ret;
- ret = regmap_bulk_read(st->regmap, chan->address, &st->sample_buf,
- sizeof(st->sample_buf));
- if (ret)
- return ret;
+ ret = regmap_bulk_read(st->regmap, chan->address, &st->sample_buf,
+ sizeof(st->sample_buf));
+ if (ret)
+ return ret;
- sample = FIELD_GET(ADXL367_DATA_MASK, be16_to_cpu(st->sample_buf));
- *val = sign_extend32(sample, chan->scan_type.realbits - 1);
+ sample = FIELD_GET(ADXL367_DATA_MASK, be16_to_cpu(st->sample_buf));
+ *val = sign_extend32(sample, chan->scan_type.realbits - 1);
- ret = adxl367_set_temp_adc_reg_en(st, chan->address, false);
- if (ret)
- return ret;
+ ret = adxl367_set_temp_adc_reg_en(st, chan->address, false);
+ if (ret)
+ return ret;
- return IIO_VAL_INT;
- }
- unreachable();
+ return IIO_VAL_INT;
}
static int adxl367_get_status(struct adxl367_state *st, u8 *status,
@@ -852,10 +843,15 @@ static int adxl367_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long info)
{
struct adxl367_state *st = iio_priv(indio_dev);
+ int ret;
switch (info) {
case IIO_CHAN_INFO_RAW:
- return adxl367_read_sample(indio_dev, chan, val);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = adxl367_read_sample(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ACCEL: {
@@ -912,7 +908,12 @@ static int adxl367_write_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- return adxl367_set_odr(indio_dev, odr);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = adxl367_set_odr(indio_dev, odr);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
case IIO_CHAN_INFO_SCALE: {
enum adxl367_range range;
@@ -921,7 +922,12 @@ static int adxl367_write_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- return adxl367_set_range(indio_dev, range);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = adxl367_set_range(indio_dev, range);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
default:
return -EINVAL;
@@ -1069,13 +1075,15 @@ static int adxl367_read_event_config(struct iio_dev *indio_dev,
}
}
-static int adxl367_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan,
- enum iio_event_type type,
- enum iio_event_direction dir,
- bool state)
+static int __adxl367_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
{
+ struct adxl367_state *st = iio_priv(indio_dev);
enum adxl367_activity_type act;
+ int ret;
switch (dir) {
case IIO_EV_DIR_RISING:
@@ -1088,28 +1096,38 @@ static int adxl367_write_event_config(struct iio_dev *indio_dev,
return -EINVAL;
}
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- struct adxl367_state *st = iio_priv(indio_dev);
- int ret;
+ guard(mutex)(&st->lock);
+
+ ret = adxl367_set_measure_en(st, false);
+ if (ret)
+ return ret;
- guard(mutex)(&st->lock);
+ ret = adxl367_set_act_interrupt_en(st, act, state);
+ if (ret)
+ return ret;
- ret = adxl367_set_measure_en(st, false);
- if (ret)
- return ret;
+ ret = adxl367_set_act_en(st, act, state ? ADCL367_ACT_REF_ENABLED
+ : ADXL367_ACT_DISABLED);
+ if (ret)
+ return ret;
- ret = adxl367_set_act_interrupt_en(st, act, state);
- if (ret)
- return ret;
+ return adxl367_set_measure_en(st, true);
+}
- ret = adxl367_set_act_en(st, act, state ? ADCL367_ACT_REF_ENABLED
- : ADXL367_ACT_DISABLED);
- if (ret)
- return ret;
+static int adxl367_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
+{
+ int ret;
- return adxl367_set_measure_en(st, true);
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __adxl367_write_event_config(indio_dev, chan, type, dir, state);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
static ssize_t adxl367_get_fifo_enabled(struct device *dev,
diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c
index 8ba5fbe6e1f5..961145b50293 100644
--- a/drivers/iio/accel/adxl372.c
+++ b/drivers/iio/accel/adxl372.c
@@ -763,12 +763,11 @@ static int adxl372_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = adxl372_read_axis(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/accel/adxl380.c b/drivers/iio/accel/adxl380.c
index 90340f134722..0cf3c6815829 100644
--- a/drivers/iio/accel/adxl380.c
+++ b/drivers/iio/accel/adxl380.c
@@ -1175,12 +1175,11 @@ static int adxl380_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = adxl380_read_chn(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 128db14ba726..aa664a923f91 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -540,14 +540,13 @@ static int bma180_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&data->mutex);
ret = bma180_get_data_reg(data, chan->scan_index);
mutex_unlock(&data->mutex);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
if (chan->scan_type.sign == 's') {
diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c
index ae806ed60271..23f5e1ce9cc4 100644
--- a/drivers/iio/accel/bma400_core.c
+++ b/drivers/iio/accel/bma400_core.c
@@ -190,7 +190,7 @@ const struct regmap_config bma400_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = BMA400_CMD_REG,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
.writeable_reg = bma400_is_writable_reg,
.volatile_reg = bma400_is_volatile_reg,
};
diff --git a/drivers/iio/accel/bmi088-accel-core.c b/drivers/iio/accel/bmi088-accel-core.c
index 9206fbdbf520..dea126f993c1 100644
--- a/drivers/iio/accel/bmi088-accel-core.c
+++ b/drivers/iio/accel/bmi088-accel-core.c
@@ -145,7 +145,7 @@ const struct regmap_config bmi088_regmap_conf = {
.val_bits = 8,
.max_register = 0x7E,
.volatile_table = &bmi088_volatile_table,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
EXPORT_SYMBOL_NS_GPL(bmi088_regmap_conf, "IIO_BMI088");
@@ -313,12 +313,13 @@ static int bmi088_accel_read_raw(struct iio_dev *indio_dev,
if (ret)
return ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
+ if (!iio_device_claim_direct(indio_dev)) {
+ ret = -EBUSY;
goto out_read_raw_pm_put;
+ }
ret = bmi088_accel_get_axis(data, chan, val);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (!ret)
ret = IIO_VAL_INT;
diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c
index 987212a7c038..48e4282964a0 100644
--- a/drivers/iio/accel/fxls8962af-core.c
+++ b/drivers/iio/accel/fxls8962af-core.c
@@ -460,22 +460,20 @@ static int fxls8962af_write_raw(struct iio_dev *indio_dev,
if (val != 0)
return -EINVAL;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = fxls8962af_set_full_scale(data, val2);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = fxls8962af_set_samp_freq(data, val, val2);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
default:
return -EINVAL;
@@ -683,14 +681,13 @@ fxls8962af_write_event_config(struct iio_dev *indio_dev,
fxls8962af_active(data);
ret = fxls8962af_power_on(data);
} else {
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
/* Not in buffered mode so disable power */
ret = fxls8962af_power_off(data);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
}
return ret;
diff --git a/drivers/iio/accel/kionix-kx022a.c b/drivers/iio/accel/kionix-kx022a.c
index 5aeb3b951ac5..07dcf5f0599f 100644
--- a/drivers/iio/accel/kionix-kx022a.c
+++ b/drivers/iio/accel/kionix-kx022a.c
@@ -149,7 +149,7 @@ static const struct regmap_config kx022a_regmap_config = {
.rd_noinc_table = &kx022a_nir_regs,
.precious_table = &kx022a_precious_regs,
.max_register = KX022A_MAX_REGISTER,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
/* Regmap configs kx132 */
@@ -260,7 +260,7 @@ static const struct regmap_config kx132_regmap_config = {
.rd_noinc_table = &kx132_nir_regs,
.precious_table = &kx132_precious_regs,
.max_register = KX132_MAX_REGISTER,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
struct kx022a_data {
@@ -510,26 +510,13 @@ static int kx022a_write_raw_get_fmt(struct iio_dev *idev,
}
}
-static int kx022a_write_raw(struct iio_dev *idev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
+static int __kx022a_write_raw(struct iio_dev *idev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
struct kx022a_data *data = iio_priv(idev);
int ret, n;
- /*
- * We should not allow changing scale or frequency when FIFO is running
- * as it will mess the timestamp/scale for samples existing in the
- * buffer. If this turns out to be an issue we can later change logic
- * to internally flush the fifo before reconfiguring so the samples in
- * fifo keep matching the freq/scale settings. (Such setup could cause
- * issues if users trust the watermark to be reached within known
- * time-limit).
- */
- ret = iio_device_claim_direct_mode(idev);
- if (ret)
- return ret;
-
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
n = ARRAY_SIZE(kx022a_accel_samp_freq_table);
@@ -538,20 +525,19 @@ static int kx022a_write_raw(struct iio_dev *idev,
if (val == kx022a_accel_samp_freq_table[n][0] &&
val2 == kx022a_accel_samp_freq_table[n][1])
break;
- if (n < 0) {
- ret = -EINVAL;
- goto unlock_out;
- }
+ if (n < 0)
+ return -EINVAL;
+
ret = kx022a_turn_off_lock(data);
if (ret)
- break;
+ return ret;
ret = regmap_update_bits(data->regmap,
data->chip_info->odcntl,
KX022A_MASK_ODR, n);
data->odr_ns = kx022a_odrs[n];
kx022a_turn_on_unlock(data);
- break;
+ return ret;
case IIO_CHAN_INFO_SCALE:
n = data->chip_info->scale_table_size / 2;
@@ -559,27 +545,44 @@ static int kx022a_write_raw(struct iio_dev *idev,
if (val == data->chip_info->scale_table[n][0] &&
val2 == data->chip_info->scale_table[n][1])
break;
- if (n < 0) {
- ret = -EINVAL;
- goto unlock_out;
- }
+ if (n < 0)
+ return -EINVAL;
ret = kx022a_turn_off_lock(data);
if (ret)
- break;
+ return ret;
ret = regmap_update_bits(data->regmap, data->chip_info->cntl,
KX022A_MASK_GSEL,
n << KX022A_GSEL_SHIFT);
kx022a_turn_on_unlock(data);
- break;
+ return ret;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
+}
-unlock_out:
- iio_device_release_direct_mode(idev);
+static int kx022a_write_raw(struct iio_dev *idev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+
+ /*
+ * We should not allow changing scale or frequency when FIFO is running
+ * as it will mess the timestamp/scale for samples existing in the
+ * buffer. If this turns out to be an issue we can later change logic
+ * to internally flush the fifo before reconfiguring so the samples in
+ * fifo keep matching the freq/scale settings. (Such setup could cause
+ * issues if users trust the watermark to be reached within known
+ * time-limit).
+ */
+ if (!iio_device_claim_direct(idev))
+ return -EBUSY;
+
+ ret = __kx022a_write_raw(idev, chan, val, val2, mask);
+
+ iio_device_release_direct(idev);
return ret;
}
@@ -620,15 +623,14 @@ static int kx022a_read_raw(struct iio_dev *idev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(idev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(idev))
+ return -EBUSY;
mutex_lock(&data->mutex);
ret = kx022a_get_axis(data, chan, val);
mutex_unlock(&data->mutex);
- iio_device_release_direct_mode(idev);
+ iio_device_release_direct(idev);
return ret;
diff --git a/drivers/iio/accel/mc3230.c b/drivers/iio/accel/mc3230.c
index caa40a14a631..e2853090fa6e 100644
--- a/drivers/iio/accel/mc3230.c
+++ b/drivers/iio/accel/mc3230.c
@@ -22,20 +22,37 @@
#define MC3230_MODE_OPCON_STANDBY 0x03
#define MC3230_REG_CHIP_ID 0x18
-#define MC3230_CHIP_ID 0x01
-
#define MC3230_REG_PRODUCT_CODE 0x3b
-#define MC3230_PRODUCT_CODE 0x19
/*
* The accelerometer has one measurement range:
*
* -1.5g - +1.5g (8-bit, signed)
*
- * scale = (1.5 + 1.5) * 9.81 / (2^8 - 1) = 0.115411765
*/
-static const int mc3230_nscale = 115411765;
+struct mc3230_chip_info {
+ const char *name;
+ const u8 chip_id;
+ const u8 product_code;
+ const int scale;
+};
+
+static const struct mc3230_chip_info mc3230_chip_info = {
+ .name = "mc3230",
+ .chip_id = 0x01,
+ .product_code = 0x19,
+ /* (1.5 + 1.5) * 9.81 / (2^8 - 1) = 0.115411765 */
+ .scale = 115411765,
+};
+
+static const struct mc3230_chip_info mc3510c_chip_info = {
+ .name = "mc3510c",
+ .chip_id = 0x23,
+ .product_code = 0x10,
+ /* Was obtained empirically */
+ .scale = 625000000,
+};
#define MC3230_CHANNEL(reg, axis) { \
.type = IIO_ACCEL, \
@@ -44,18 +61,35 @@ static const int mc3230_nscale = 115411765;
.channel2 = IIO_MOD_##axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .ext_info = mc3230_ext_info, \
+}
+
+struct mc3230_data {
+ const struct mc3230_chip_info *chip_info;
+ struct i2c_client *client;
+ struct iio_mount_matrix orientation;
+};
+
+static const struct iio_mount_matrix *
+mc3230_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct mc3230_data *data = iio_priv(indio_dev);
+
+ return &data->orientation;
}
+static const struct iio_chan_spec_ext_info mc3230_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, mc3230_get_mount_matrix),
+ { }
+};
+
static const struct iio_chan_spec mc3230_channels[] = {
MC3230_CHANNEL(MC3230_REG_XOUT, X),
MC3230_CHANNEL(MC3230_REG_YOUT, Y),
MC3230_CHANNEL(MC3230_REG_ZOUT, Z),
};
-struct mc3230_data {
- struct i2c_client *client;
-};
-
static int mc3230_set_opcon(struct mc3230_data *data, int opcon)
{
int ret;
@@ -95,7 +129,7 @@ static int mc3230_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
- *val2 = mc3230_nscale;
+ *val2 = data->chip_info->scale;
return IIO_VAL_INT_PLUS_NANO;
default:
return -EINVAL;
@@ -111,15 +145,28 @@ static int mc3230_probe(struct i2c_client *client)
int ret;
struct iio_dev *indio_dev;
struct mc3230_data *data;
+ const struct mc3230_chip_info *chip_info;
+
+ chip_info = i2c_get_match_data(client);
+ if (chip_info == NULL) {
+ dev_err(&client->dev, "failed to get match data");
+ return -ENODATA;
+ }
/* First check chip-id and product-id */
ret = i2c_smbus_read_byte_data(client, MC3230_REG_CHIP_ID);
- if (ret != MC3230_CHIP_ID)
- return (ret < 0) ? ret : -ENODEV;
+ if (ret != chip_info->chip_id) {
+ dev_info(&client->dev,
+ "chip id check fail: 0x%x != 0x%x !\n",
+ ret, chip_info->chip_id);
+ }
ret = i2c_smbus_read_byte_data(client, MC3230_REG_PRODUCT_CODE);
- if (ret != MC3230_PRODUCT_CODE)
- return (ret < 0) ? ret : -ENODEV;
+ if (ret != chip_info->product_code) {
+ dev_info(&client->dev,
+ "product code check fail: 0x%x != 0x%x !\n",
+ ret, chip_info->product_code);
+ }
indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev) {
@@ -128,11 +175,12 @@ static int mc3230_probe(struct i2c_client *client)
}
data = iio_priv(indio_dev);
+ data->chip_info = chip_info;
data->client = client;
i2c_set_clientdata(client, indio_dev);
indio_dev->info = &mc3230_info;
- indio_dev->name = "mc3230";
+ indio_dev->name = chip_info->name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->channels = mc3230_channels;
indio_dev->num_channels = ARRAY_SIZE(mc3230_channels);
@@ -141,6 +189,10 @@ static int mc3230_probe(struct i2c_client *client)
if (ret < 0)
return ret;
+ ret = iio_read_mount_matrix(&client->dev, &data->orientation);
+ if (ret)
+ return ret;
+
ret = iio_device_register(indio_dev);
if (ret < 0) {
dev_err(&client->dev, "device_register failed\n");
@@ -180,14 +232,23 @@ static int mc3230_resume(struct device *dev)
static DEFINE_SIMPLE_DEV_PM_OPS(mc3230_pm_ops, mc3230_suspend, mc3230_resume);
static const struct i2c_device_id mc3230_i2c_id[] = {
- { "mc3230" },
- {}
+ { "mc3230", (kernel_ulong_t)&mc3230_chip_info },
+ { "mc3510c", (kernel_ulong_t)&mc3510c_chip_info },
+ { }
};
MODULE_DEVICE_TABLE(i2c, mc3230_i2c_id);
+static const struct of_device_id mc3230_of_match[] = {
+ { .compatible = "mcube,mc3230", &mc3230_chip_info },
+ { .compatible = "mcube,mc3510c", &mc3510c_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mc3230_of_match);
+
static struct i2c_driver mc3230_driver = {
.driver = {
.name = "mc3230",
+ .of_match_table = mc3230_of_match,
.pm = pm_sleep_ptr(&mc3230_pm_ops),
},
.probe = mc3230_probe,
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 962d289065ab..05f5482f366e 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -497,14 +497,13 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&data->lock);
ret = mma8452_read(data, buffer);
mutex_unlock(&data->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
@@ -707,55 +706,45 @@ static int mma8452_set_hp_filter_frequency(struct mma8452_data *data,
return mma8452_change_config(data, MMA8452_HP_FILTER_CUTOFF, reg);
}
-static int mma8452_write_raw(struct iio_dev *indio_dev,
+static int __mma8452_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct mma8452_data *data = iio_priv(indio_dev);
- int i, ret;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ int i, j, ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
i = mma8452_get_samp_freq_index(data, val, val2);
- if (i < 0) {
- ret = i;
- break;
- }
+ if (i < 0)
+ return i;
+
data->ctrl_reg1 &= ~MMA8452_CTRL_DR_MASK;
data->ctrl_reg1 |= i << MMA8452_CTRL_DR_SHIFT;
data->sleep_val = mma8452_calculate_sleep(data);
- ret = mma8452_change_config(data, MMA8452_CTRL_REG1,
- data->ctrl_reg1);
- break;
+ return mma8452_change_config(data, MMA8452_CTRL_REG1,
+ data->ctrl_reg1);
+
case IIO_CHAN_INFO_SCALE:
i = mma8452_get_scale_index(data, val, val2);
- if (i < 0) {
- ret = i;
- break;
- }
+ if (i < 0)
+ return i;
data->data_cfg &= ~MMA8452_DATA_CFG_FS_MASK;
data->data_cfg |= i;
- ret = mma8452_change_config(data, MMA8452_DATA_CFG,
- data->data_cfg);
- break;
+ return mma8452_change_config(data, MMA8452_DATA_CFG,
+ data->data_cfg);
+
case IIO_CHAN_INFO_CALIBBIAS:
- if (val < -128 || val > 127) {
- ret = -EINVAL;
- break;
- }
+ if (val < -128 || val > 127)
+ return -EINVAL;
- ret = mma8452_change_config(data,
- MMA8452_OFF_X + chan->scan_index,
- val);
- break;
+ return mma8452_change_config(data,
+ MMA8452_OFF_X + chan->scan_index,
+ val);
case IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY:
if (val == 0 && val2 == 0) {
@@ -764,29 +753,38 @@ static int mma8452_write_raw(struct iio_dev *indio_dev,
data->data_cfg |= MMA8452_DATA_CFG_HPF_MASK;
ret = mma8452_set_hp_filter_frequency(data, val, val2);
if (ret < 0)
- break;
+ return ret;
}
- ret = mma8452_change_config(data, MMA8452_DATA_CFG,
+ return mma8452_change_config(data, MMA8452_DATA_CFG,
data->data_cfg);
- break;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- ret = mma8452_get_odr_index(data);
+ j = mma8452_get_odr_index(data);
for (i = 0; i < ARRAY_SIZE(mma8452_os_ratio); i++) {
- if (mma8452_os_ratio[i][ret] == val) {
- ret = mma8452_set_power_mode(data, i);
- break;
- }
+ if (mma8452_os_ratio[i][j] == val)
+ return mma8452_set_power_mode(data, i);
}
- break;
+
+ return -EINVAL;
+
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
+}
+
+static int mma8452_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- iio_device_release_direct_mode(indio_dev);
+ ret = __mma8452_write_raw(indio_dev, chan, val, val2, mask);
+ iio_device_release_direct(indio_dev);
return ret;
}
diff --git a/drivers/iio/accel/msa311.c b/drivers/iio/accel/msa311.c
index e7fb860f3233..d31c11fbbe68 100644
--- a/drivers/iio/accel/msa311.c
+++ b/drivers/iio/accel/msa311.c
@@ -332,7 +332,7 @@ static const struct regmap_config msa311_regmap_config = {
.wr_table = &msa311_writeable_table,
.rd_table = &msa311_readable_table,
.volatile_table = &msa311_volatile_table,
- .cache_type = REGCACHE_RBTREE,
+ .cache_type = REGCACHE_MAPLE,
};
#define MSA311_GENMASK(field) ({ \
@@ -594,23 +594,24 @@ static int msa311_read_raw_data(struct iio_dev *indio_dev,
__le16 axis;
int err;
- err = pm_runtime_resume_and_get(dev);
- if (err)
- return err;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- err = iio_device_claim_direct_mode(indio_dev);
- if (err)
+ err = pm_runtime_resume_and_get(dev);
+ if (err) {
+ iio_device_release_direct(indio_dev);
return err;
+ }
mutex_lock(&msa311->lock);
err = msa311_get_axis(msa311, chan, &axis);
mutex_unlock(&msa311->lock);
- iio_device_release_direct_mode(indio_dev);
-
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
+ iio_device_release_direct(indio_dev);
+
if (err) {
dev_err(dev, "can't get axis %s (%pe)\n",
chan->datasheet_name, ERR_PTR(err));
@@ -756,18 +757,19 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2)
unsigned int odr;
int err;
- err = pm_runtime_resume_and_get(dev);
- if (err)
- return err;
-
/*
* Sampling frequency changing is prohibited when buffer mode is
* enabled, because sometimes MSA311 chip returns outliers during
* frequency values growing up in the read operation moment.
*/
- err = iio_device_claim_direct_mode(indio_dev);
- if (err)
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ err = pm_runtime_resume_and_get(dev);
+ if (err) {
+ iio_device_release_direct(indio_dev);
return err;
+ }
err = -EINVAL;
for (odr = 0; odr < ARRAY_SIZE(msa311_odr_table); odr++)
@@ -779,11 +781,11 @@ static int msa311_write_samp_freq(struct iio_dev *indio_dev, int val, int val2)
break;
}
- iio_device_release_direct_mode(indio_dev);
-
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
+ iio_device_release_direct(indio_dev);
+
if (err)
dev_err(dev, "can't update frequency (%pe)\n", ERR_PTR(err));
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 849c90203071..6529df1a498c 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -33,6 +33,20 @@ config AD4000
To compile this driver as a module, choose M here: the module will be
called ad4000.
+config AD4030
+ tristate "Analog Devices AD4030 ADC Driver"
+ depends on SPI
+ depends on GPIOLIB
+ select REGMAP
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say yes here to build support for Analog Devices AD4030 and AD4630 high speed
+ SPI analog to digital converters (ADC).
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad4030.
+
config AD4130
tristate "Analog Device AD4130 ADC Driver"
depends on SPI
@@ -51,9 +65,11 @@ config AD4130
config AD4695
tristate "Analog Device AD4695 ADC Driver"
depends on SPI
- select REGMAP_SPI
select IIO_BUFFER
+ select IIO_BUFFER_DMAENGINE
select IIO_TRIGGERED_BUFFER
+ select REGMAP
+ select SPI_OFFLOAD
help
Say yes here to build support for Analog Devices AD4695 and similar
analog to digital converters (ADC).
@@ -61,6 +77,20 @@ config AD4695
To compile this driver as a module, choose M here: the module will be
called ad4695.
+config AD4851
+ tristate "Analog Device AD4851 DAS Driver"
+ depends on SPI
+ depends on PWM
+ select REGMAP_SPI
+ select IIO_BACKEND
+ help
+ Say yes here to build support for Analog Devices AD4851, AD4852,
+ AD4853, AD4854, AD4855, AD4856, AD4857, AD4858, AD4858I high speed
+ data acquisition system (DAS).
+
+ To compile this driver as a module, choose M here: the module will be
+ called ad4851.
+
config AD7091R
tristate
@@ -112,6 +142,16 @@ config AD7173
To compile this driver as a module, choose M here: the module will be
called ad7173.
+config AD7191
+ tristate "Analog Devices AD7191 ADC driver"
+ depends on SPI
+ select AD_SIGMA_DELTA
+ help
+ Say yes here to build support for Analog Devices AD7191.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad7191.
+
config AD7192
tristate "Analog Devices AD7192 and similar ADC driver"
depends on SPI
@@ -188,7 +228,9 @@ config AD7298
config AD7380
tristate "Analog Devices AD7380 ADC driver"
depends on SPI_MASTER
+ select SPI_OFFLOAD
select IIO_BUFFER
+ select IIO_BUFFER_DMAENGINE
select IIO_TRIGGER
select IIO_TRIGGERED_BUFFER
help
@@ -360,7 +402,9 @@ config AD7923
config AD7944
tristate "Analog Devices AD7944 and similar ADCs driver"
depends on SPI
+ select SPI_OFFLOAD
select IIO_BUFFER
+ select IIO_BUFFER_DMAENGINE
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Analog Devices
@@ -1467,6 +1511,16 @@ config TI_ADS1119
This driver can also be built as a module. If so, the module will be
called ti-ads1119.
+config TI_ADS7138
+ tristate "Texas Instruments ADS7128 and ADS7138 ADC driver"
+ depends on I2C
+ help
+ If you say yes here you get support for Texas Instruments ADS7128 and
+ ADS7138 8-channel A/D converters with 12-bit resolution.
+
+ This driver can also be built as a module. If so, the module will be
+ called ti-ads7138.
+
config TI_ADS7924
tristate "Texas Instruments ADS7924 ADC"
depends on I2C
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index ee19afba62b7..3e918c3eec69 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -7,13 +7,16 @@
obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
obj-$(CONFIG_AD4000) += ad4000.o
+obj-$(CONFIG_AD4030) += ad4030.o
obj-$(CONFIG_AD4130) += ad4130.o
obj-$(CONFIG_AD4695) += ad4695.o
+obj-$(CONFIG_AD4851) += ad4851.o
obj-$(CONFIG_AD7091R) += ad7091r-base.o
obj-$(CONFIG_AD7091R5) += ad7091r5.o
obj-$(CONFIG_AD7091R8) += ad7091r8.o
obj-$(CONFIG_AD7124) += ad7124.o
obj-$(CONFIG_AD7173) += ad7173.o
+obj-$(CONFIG_AD7191) += ad7191.o
obj-$(CONFIG_AD7192) += ad7192.o
obj-$(CONFIG_AD7266) += ad7266.o
obj-$(CONFIG_AD7280) += ad7280a.o
@@ -133,6 +136,7 @@ obj-$(CONFIG_TI_ADS1119) += ti-ads1119.o
obj-$(CONFIG_TI_ADS124S08) += ti-ads124s08.o
obj-$(CONFIG_TI_ADS1298) += ti-ads1298.o
obj-$(CONFIG_TI_ADS131E08) += ti-ads131e08.o
+obj-$(CONFIG_TI_ADS7138) += ti-ads7138.o
obj-$(CONFIG_TI_ADS7924) += ti-ads7924.o
obj-$(CONFIG_TI_ADS7950) += ti-ads7950.o
obj-$(CONFIG_TI_ADS8344) += ti-ads8344.o
diff --git a/drivers/iio/adc/ad4000.c b/drivers/iio/adc/ad4000.c
index 1d556a842a68..4fe8dee48da9 100644
--- a/drivers/iio/adc/ad4000.c
+++ b/drivers/iio/adc/ad4000.c
@@ -535,12 +535,16 @@ static int ad4000_read_raw(struct iio_dev *indio_dev,
int *val2, long info)
{
struct ad4000_state *st = iio_priv(indio_dev);
+ int ret;
switch (info) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return ad4000_single_conversion(indio_dev, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad4000_single_conversion(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
*val = st->scale_tbl[st->span_comp][0];
*val2 = st->scale_tbl[st->span_comp][1];
@@ -585,36 +589,46 @@ static int ad4000_write_raw_get_fmt(struct iio_dev *indio_dev,
}
}
-static int ad4000_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int val, int val2,
- long mask)
+static int __ad4000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val2)
{
struct ad4000_state *st = iio_priv(indio_dev);
unsigned int reg_val;
bool span_comp_en;
int ret;
- switch (mask) {
- case IIO_CHAN_INFO_SCALE:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- guard(mutex)(&st->lock);
+ guard(mutex)(&st->lock);
+
+ ret = ad4000_read_reg(st, &reg_val);
+ if (ret < 0)
+ return ret;
+
+ span_comp_en = val2 == st->scale_tbl[1][1];
+ reg_val &= ~AD4000_CFG_SPAN_COMP;
+ reg_val |= FIELD_PREP(AD4000_CFG_SPAN_COMP, span_comp_en);
- ret = ad4000_read_reg(st, &reg_val);
- if (ret < 0)
- return ret;
+ ret = ad4000_write_reg(st, reg_val);
+ if (ret < 0)
+ return ret;
- span_comp_en = val2 == st->scale_tbl[1][1];
- reg_val &= ~AD4000_CFG_SPAN_COMP;
- reg_val |= FIELD_PREP(AD4000_CFG_SPAN_COMP, span_comp_en);
+ st->span_comp = span_comp_en;
+ return 0;
+}
- ret = ad4000_write_reg(st, reg_val);
- if (ret < 0)
- return ret;
+static int ad4000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
- st->span_comp = span_comp_en;
- return 0;
- }
- unreachable();
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = __ad4000_write_raw(indio_dev, chan, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
default:
return -EINVAL;
}
diff --git a/drivers/iio/adc/ad4030.c b/drivers/iio/adc/ad4030.c
new file mode 100644
index 000000000000..9a020680885d
--- /dev/null
+++ b/drivers/iio/adc/ad4030.c
@@ -0,0 +1,1230 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices AD4030 and AD4630 ADC family driver.
+ *
+ * Copyright 2024 Analog Devices, Inc.
+ * Copyright 2024 BayLibre, SAS
+ *
+ * based on code from:
+ * Analog Devices, Inc.
+ * Sergiu Cuciurean <sergiu.cuciurean@analog.com>
+ * Nuno Sa <nuno.sa@analog.com>
+ * Marcelo Schmitt <marcelo.schmitt@analog.com>
+ * Liviu Adace <liviu.adace@analog.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/unaligned.h>
+#include <linux/units.h>
+
+#define AD4030_REG_INTERFACE_CONFIG_A 0x00
+#define AD4030_REG_INTERFACE_CONFIG_A_SW_RESET (BIT(0) | BIT(7))
+#define AD4030_REG_INTERFACE_CONFIG_B 0x01
+#define AD4030_REG_DEVICE_CONFIG 0x02
+#define AD4030_REG_CHIP_TYPE 0x03
+#define AD4030_REG_PRODUCT_ID_L 0x04
+#define AD4030_REG_PRODUCT_ID_H 0x05
+#define AD4030_REG_CHIP_GRADE 0x06
+#define AD4030_REG_CHIP_GRADE_AD4030_24_GRADE 0x10
+#define AD4030_REG_CHIP_GRADE_AD4630_16_GRADE 0x03
+#define AD4030_REG_CHIP_GRADE_AD4630_24_GRADE 0x00
+#define AD4030_REG_CHIP_GRADE_AD4632_16_GRADE 0x05
+#define AD4030_REG_CHIP_GRADE_AD4632_24_GRADE 0x02
+#define AD4030_REG_CHIP_GRADE_MASK_CHIP_GRADE GENMASK(7, 3)
+#define AD4030_REG_SCRATCH_PAD 0x0A
+#define AD4030_REG_SPI_REVISION 0x0B
+#define AD4030_REG_VENDOR_L 0x0C
+#define AD4030_REG_VENDOR_H 0x0D
+#define AD4030_REG_STREAM_MODE 0x0E
+#define AD4030_REG_INTERFACE_CONFIG_C 0x10
+#define AD4030_REG_INTERFACE_STATUS_A 0x11
+#define AD4030_REG_EXIT_CFG_MODE 0x14
+#define AD4030_REG_EXIT_CFG_MODE_EXIT_MSK BIT(0)
+#define AD4030_REG_AVG 0x15
+#define AD4030_REG_AVG_MASK_AVG_SYNC BIT(7)
+#define AD4030_REG_AVG_MASK_AVG_VAL GENMASK(4, 0)
+#define AD4030_REG_OFFSET_X0_0 0x16
+#define AD4030_REG_OFFSET_X0_1 0x17
+#define AD4030_REG_OFFSET_X0_2 0x18
+#define AD4030_REG_OFFSET_X1_0 0x19
+#define AD4030_REG_OFFSET_X1_1 0x1A
+#define AD4030_REG_OFFSET_X1_2 0x1B
+#define AD4030_REG_OFFSET_BYTES_NB 3
+#define AD4030_REG_OFFSET_CHAN(ch) \
+ (AD4030_REG_OFFSET_X0_2 + (AD4030_REG_OFFSET_BYTES_NB * (ch)))
+#define AD4030_REG_GAIN_X0_LSB 0x1C
+#define AD4030_REG_GAIN_X0_MSB 0x1D
+#define AD4030_REG_GAIN_X1_LSB 0x1E
+#define AD4030_REG_GAIN_X1_MSB 0x1F
+#define AD4030_REG_GAIN_MAX_GAIN 1999970
+#define AD4030_REG_GAIN_BYTES_NB 2
+#define AD4030_REG_GAIN_CHAN(ch) \
+ (AD4030_REG_GAIN_X0_MSB + (AD4030_REG_GAIN_BYTES_NB * (ch)))
+#define AD4030_REG_MODES 0x20
+#define AD4030_REG_MODES_MASK_OUT_DATA_MODE GENMASK(2, 0)
+#define AD4030_REG_MODES_MASK_LANE_MODE GENMASK(7, 6)
+#define AD4030_REG_OSCILATOR 0x21
+#define AD4030_REG_IO 0x22
+#define AD4030_REG_IO_MASK_IO2X BIT(1)
+#define AD4030_REG_PAT0 0x23
+#define AD4030_REG_PAT1 0x24
+#define AD4030_REG_PAT2 0x25
+#define AD4030_REG_PAT3 0x26
+#define AD4030_REG_DIG_DIAG 0x34
+#define AD4030_REG_DIG_ERR 0x35
+
+/* Sequence starting with "1 0 1" to enable reg access */
+#define AD4030_REG_ACCESS 0xA0
+
+#define AD4030_MAX_IIO_SAMPLE_SIZE_BUFFERED BITS_TO_BYTES(64)
+#define AD4030_MAX_HARDWARE_CHANNEL_NB 2
+#define AD4030_MAX_IIO_CHANNEL_NB 5
+#define AD4030_SINGLE_COMMON_BYTE_CHANNELS_MASK 0b10
+#define AD4030_DUAL_COMMON_BYTE_CHANNELS_MASK 0b1100
+#define AD4030_GAIN_MIDLE_POINT 0x8000
+/*
+ * This accounts for 1 sample per channel plus one s64 for the timestamp,
+ * aligned on a s64 boundary
+ */
+#define AD4030_MAXIMUM_RX_BUFFER_SIZE \
+ (ALIGN(AD4030_MAX_IIO_SAMPLE_SIZE_BUFFERED * \
+ AD4030_MAX_HARDWARE_CHANNEL_NB, \
+ sizeof(s64)) + sizeof(s64))
+
+#define AD4030_VREF_MIN_UV (4096 * MILLI)
+#define AD4030_VREF_MAX_UV (5000 * MILLI)
+#define AD4030_VIO_THRESHOLD_UV (1400 * MILLI)
+#define AD4030_SPI_MAX_XFER_LEN 8
+#define AD4030_SPI_MAX_REG_XFER_SPEED (80 * MEGA)
+#define AD4030_TCNVH_NS 10
+#define AD4030_TCNVL_NS 20
+#define AD4030_TCYC_NS 500
+#define AD4030_TCYC_ADJUSTED_NS (AD4030_TCYC_NS - AD4030_TCNVL_NS)
+#define AD4030_TRESET_PW_NS 50
+#define AD4632_TCYC_NS 2000
+#define AD4632_TCYC_ADJUSTED_NS (AD4632_TCYC_NS - AD4030_TCNVL_NS)
+#define AD4030_TRESET_COM_DELAY_MS 750
+
+enum ad4030_out_mode {
+ AD4030_OUT_DATA_MD_DIFF,
+ AD4030_OUT_DATA_MD_16_DIFF_8_COM,
+ AD4030_OUT_DATA_MD_24_DIFF_8_COM,
+ AD4030_OUT_DATA_MD_30_AVERAGED_DIFF,
+ AD4030_OUT_DATA_MD_32_PATTERN,
+};
+
+enum {
+ AD4030_LANE_MD_1_PER_CH,
+ AD4030_LANE_MD_2_PER_CH,
+ AD4030_LANE_MD_4_PER_CH,
+ AD4030_LANE_MD_INTERLEAVED,
+};
+
+enum {
+ AD4030_SCAN_TYPE_NORMAL,
+ AD4030_SCAN_TYPE_AVG,
+};
+
+struct ad4030_chip_info {
+ const char *name;
+ const unsigned long *available_masks;
+ const struct iio_chan_spec channels[AD4030_MAX_IIO_CHANNEL_NB];
+ u8 grade;
+ u8 precision_bits;
+ /* Number of hardware channels */
+ int num_voltage_inputs;
+ unsigned int tcyc_ns;
+};
+
+struct ad4030_state {
+ struct spi_device *spi;
+ struct regmap *regmap;
+ const struct ad4030_chip_info *chip;
+ const struct iio_scan_type *current_scan_type;
+ struct gpio_desc *cnv_gpio;
+ int vref_uv;
+ int vio_uv;
+ int offset_avail[3];
+ unsigned int avg_log2;
+ enum ad4030_out_mode mode;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the transfer buffers
+ * to live in their own cache lines.
+ */
+ u8 tx_data[AD4030_SPI_MAX_XFER_LEN] __aligned(IIO_DMA_MINALIGN);
+ union {
+ u8 raw[AD4030_MAXIMUM_RX_BUFFER_SIZE];
+ struct {
+ s32 diff;
+ u8 common;
+ } single;
+ struct {
+ s32 diff[2];
+ u8 common[2];
+ } dual;
+ } rx_data;
+};
+
+/*
+ * For a chip with 2 hardware channel this will be used to create 2 common-mode
+ * channels:
+ * - voltage4
+ * - voltage5
+ * As the common-mode channels are after the differential ones, we compute the
+ * channel number like this:
+ * - _idx is the scan_index (the order in the output buffer)
+ * - _ch is the hardware channel number this common-mode channel is related
+ * - _idx - _ch gives us the number of channel in the chip
+ * - _idx - _ch * 2 is the starting number of the common-mode channels, since
+ * for each differential channel there is a common-mode channel
+ * - _idx - _ch * 2 + _ch gives the channel number for this specific common-mode
+ * channel
+ */
+#define AD4030_CHAN_CMO(_idx, _ch) { \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = (_ch), \
+ .channel = ((_idx) - (_ch)) * 2 + (_ch), \
+ .scan_index = (_idx), \
+ .scan_type = { \
+ .sign = 'u', \
+ .storagebits = 8, \
+ .realbits = 8, \
+ .endianness = IIO_BE, \
+ }, \
+}
+
+/*
+ * For a chip with 2 hardware channel this will be used to create 2 differential
+ * channels:
+ * - voltage0-voltage1
+ * - voltage2-voltage3
+ */
+#define AD4030_CHAN_DIFF(_idx, _scan_type) { \
+ .info_mask_shared_by_all = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE), \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .address = (_idx), \
+ .channel = (_idx) * 2, \
+ .channel2 = (_idx) * 2 + 1, \
+ .scan_index = (_idx), \
+ .differential = true, \
+ .has_ext_scan_type = 1, \
+ .ext_scan_type = _scan_type, \
+ .num_ext_scan_type = ARRAY_SIZE(_scan_type), \
+}
+
+static const int ad4030_average_modes[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128,
+ 256, 512, 1024, 2048, 4096, 8192, 16384, 32768,
+ 65536,
+};
+
+static int ad4030_enter_config_mode(struct ad4030_state *st)
+{
+ st->tx_data[0] = AD4030_REG_ACCESS;
+
+ struct spi_transfer xfer = {
+ .tx_buf = st->tx_data,
+ .bits_per_word = 8,
+ .len = 1,
+ .speed_hz = AD4030_SPI_MAX_REG_XFER_SPEED,
+ };
+
+ return spi_sync_transfer(st->spi, &xfer, 1);
+}
+
+static int ad4030_exit_config_mode(struct ad4030_state *st)
+{
+ st->tx_data[0] = 0;
+ st->tx_data[1] = AD4030_REG_EXIT_CFG_MODE;
+ st->tx_data[2] = AD4030_REG_EXIT_CFG_MODE_EXIT_MSK;
+
+ struct spi_transfer xfer = {
+ .tx_buf = st->tx_data,
+ .bits_per_word = 8,
+ .len = 3,
+ .speed_hz = AD4030_SPI_MAX_REG_XFER_SPEED,
+ };
+
+ return spi_sync_transfer(st->spi, &xfer, 1);
+}
+
+static int ad4030_spi_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ int ret;
+ struct ad4030_state *st = context;
+ struct spi_transfer xfer = {
+ .tx_buf = st->tx_data,
+ .rx_buf = st->rx_data.raw,
+ .bits_per_word = 8,
+ .len = reg_size + val_size,
+ .speed_hz = AD4030_SPI_MAX_REG_XFER_SPEED,
+ };
+
+ if (xfer.len > sizeof(st->tx_data) ||
+ xfer.len > sizeof(st->rx_data.raw))
+ return -EINVAL;
+
+ ret = ad4030_enter_config_mode(st);
+ if (ret)
+ return ret;
+
+ memset(st->tx_data, 0, sizeof(st->tx_data));
+ memcpy(st->tx_data, reg, reg_size);
+
+ ret = spi_sync_transfer(st->spi, &xfer, 1);
+ if (ret)
+ return ret;
+
+ memcpy(val, &st->rx_data.raw[reg_size], val_size);
+
+ return ad4030_exit_config_mode(st);
+}
+
+static int ad4030_spi_write(void *context, const void *data, size_t count)
+{
+ int ret;
+ struct ad4030_state *st = context;
+ bool is_reset = count >= 3 &&
+ ((u8 *)data)[0] == 0 &&
+ ((u8 *)data)[1] == 0 &&
+ ((u8 *)data)[2] == 0x81;
+ struct spi_transfer xfer = {
+ .tx_buf = st->tx_data,
+ .bits_per_word = 8,
+ .len = count,
+ .speed_hz = AD4030_SPI_MAX_REG_XFER_SPEED,
+ };
+
+ if (count > sizeof(st->tx_data))
+ return -EINVAL;
+
+ ret = ad4030_enter_config_mode(st);
+ if (ret)
+ return ret;
+
+ memcpy(st->tx_data, data, count);
+
+ ret = spi_sync_transfer(st->spi, &xfer, 1);
+ if (ret)
+ return ret;
+
+ /*
+ * From datasheet: "After a [...] reset, no SPI commands or conversions
+ * can be started for 750us"
+ * After a reset we are in conversion mode, no need to exit config mode
+ */
+ if (is_reset) {
+ fsleep(750);
+ return 0;
+ }
+
+ return ad4030_exit_config_mode(st);
+}
+
+static const struct regmap_bus ad4030_regmap_bus = {
+ .read = ad4030_spi_read,
+ .write = ad4030_spi_write,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+static const struct regmap_range ad4030_regmap_rd_range[] = {
+ regmap_reg_range(AD4030_REG_INTERFACE_CONFIG_A, AD4030_REG_CHIP_GRADE),
+ regmap_reg_range(AD4030_REG_SCRATCH_PAD, AD4030_REG_STREAM_MODE),
+ regmap_reg_range(AD4030_REG_INTERFACE_CONFIG_C,
+ AD4030_REG_INTERFACE_STATUS_A),
+ regmap_reg_range(AD4030_REG_EXIT_CFG_MODE, AD4030_REG_PAT3),
+ regmap_reg_range(AD4030_REG_DIG_DIAG, AD4030_REG_DIG_ERR),
+};
+
+static const struct regmap_range ad4030_regmap_wr_range[] = {
+ regmap_reg_range(AD4030_REG_CHIP_TYPE, AD4030_REG_CHIP_GRADE),
+ regmap_reg_range(AD4030_REG_SPI_REVISION, AD4030_REG_VENDOR_H),
+};
+
+static const struct regmap_access_table ad4030_regmap_rd_table = {
+ .yes_ranges = ad4030_regmap_rd_range,
+ .n_yes_ranges = ARRAY_SIZE(ad4030_regmap_rd_range),
+};
+
+static const struct regmap_access_table ad4030_regmap_wr_table = {
+ .no_ranges = ad4030_regmap_wr_range,
+ .n_no_ranges = ARRAY_SIZE(ad4030_regmap_wr_range),
+};
+
+static const struct regmap_config ad4030_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .read_flag_mask = 0x80,
+ .rd_table = &ad4030_regmap_rd_table,
+ .wr_table = &ad4030_regmap_wr_table,
+ .max_register = AD4030_REG_DIG_ERR,
+};
+
+static int ad4030_get_chan_scale(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+ const struct iio_scan_type *scan_type;
+
+ if (chan->differential) {
+ scan_type = iio_get_current_scan_type(indio_dev,
+ st->chip->channels);
+ *val = (st->vref_uv * 2) / MILLI;
+ *val2 = scan_type->realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ *val = st->vref_uv / MILLI;
+ *val2 = chan->scan_type.realbits;
+ return IIO_VAL_FRACTIONAL_LOG2;
+}
+
+static int ad4030_get_chan_calibscale(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+ u16 gain;
+ int ret;
+
+ ret = regmap_bulk_read(st->regmap, AD4030_REG_GAIN_CHAN(chan->address),
+ st->rx_data.raw, AD4030_REG_GAIN_BYTES_NB);
+ if (ret)
+ return ret;
+
+ gain = get_unaligned_be16(st->rx_data.raw);
+
+ /* From datasheet: multiplied output = input × gain word/0x8000 */
+ *val = gain / AD4030_GAIN_MIDLE_POINT;
+ *val2 = mul_u64_u32_div(gain % AD4030_GAIN_MIDLE_POINT, NANO,
+ AD4030_GAIN_MIDLE_POINT);
+
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+/* Returns the offset where 1 LSB = (VREF/2^precision_bits - 1)/gain */
+static int ad4030_get_chan_calibbias(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = regmap_bulk_read(st->regmap,
+ AD4030_REG_OFFSET_CHAN(chan->address),
+ st->rx_data.raw, AD4030_REG_OFFSET_BYTES_NB);
+ if (ret)
+ return ret;
+
+ switch (st->chip->precision_bits) {
+ case 16:
+ *val = sign_extend32(get_unaligned_be16(st->rx_data.raw), 15);
+ return IIO_VAL_INT;
+
+ case 24:
+ *val = sign_extend32(get_unaligned_be24(st->rx_data.raw), 23);
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4030_set_chan_calibscale(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int gain_int,
+ int gain_frac)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+ u64 gain;
+
+ if (gain_int < 0 || gain_frac < 0)
+ return -EINVAL;
+
+ gain = mul_u32_u32(gain_int, MICRO) + gain_frac;
+
+ if (gain > AD4030_REG_GAIN_MAX_GAIN)
+ return -EINVAL;
+
+ put_unaligned_be16(DIV_ROUND_CLOSEST_ULL(gain * AD4030_GAIN_MIDLE_POINT,
+ MICRO),
+ st->tx_data);
+
+ return regmap_bulk_write(st->regmap,
+ AD4030_REG_GAIN_CHAN(chan->address),
+ st->tx_data, AD4030_REG_GAIN_BYTES_NB);
+}
+
+static int ad4030_set_chan_calibbias(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int offset)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+
+ if (offset < st->offset_avail[0] || offset > st->offset_avail[2])
+ return -EINVAL;
+
+ st->tx_data[2] = 0;
+
+ switch (st->chip->precision_bits) {
+ case 16:
+ put_unaligned_be16(offset, st->tx_data);
+ break;
+
+ case 24:
+ put_unaligned_be24(offset, st->tx_data);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_bulk_write(st->regmap,
+ AD4030_REG_OFFSET_CHAN(chan->address),
+ st->tx_data, AD4030_REG_OFFSET_BYTES_NB);
+}
+
+static int ad4030_set_avg_frame_len(struct iio_dev *dev, int avg_val)
+{
+ struct ad4030_state *st = iio_priv(dev);
+ unsigned int avg_log2 = ilog2(avg_val);
+ unsigned int last_avg_idx = ARRAY_SIZE(ad4030_average_modes) - 1;
+ int ret;
+
+ if (avg_val < 0 || avg_val > ad4030_average_modes[last_avg_idx])
+ return -EINVAL;
+
+ ret = regmap_write(st->regmap, AD4030_REG_AVG,
+ AD4030_REG_AVG_MASK_AVG_SYNC |
+ FIELD_PREP(AD4030_REG_AVG_MASK_AVG_VAL, avg_log2));
+ if (ret)
+ return ret;
+
+ st->avg_log2 = avg_log2;
+
+ return 0;
+}
+
+static bool ad4030_is_common_byte_asked(struct ad4030_state *st,
+ unsigned int mask)
+{
+ return mask & (st->chip->num_voltage_inputs == 1 ?
+ AD4030_SINGLE_COMMON_BYTE_CHANNELS_MASK :
+ AD4030_DUAL_COMMON_BYTE_CHANNELS_MASK);
+}
+
+static int ad4030_set_mode(struct iio_dev *indio_dev, unsigned long mask)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+
+ if (st->avg_log2 > 0) {
+ st->mode = AD4030_OUT_DATA_MD_30_AVERAGED_DIFF;
+ } else if (ad4030_is_common_byte_asked(st, mask)) {
+ switch (st->chip->precision_bits) {
+ case 16:
+ st->mode = AD4030_OUT_DATA_MD_16_DIFF_8_COM;
+ break;
+
+ case 24:
+ st->mode = AD4030_OUT_DATA_MD_24_DIFF_8_COM;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ } else {
+ st->mode = AD4030_OUT_DATA_MD_DIFF;
+ }
+
+ st->current_scan_type = iio_get_current_scan_type(indio_dev,
+ st->chip->channels);
+ if (IS_ERR(st->current_scan_type))
+ return PTR_ERR(st->current_scan_type);
+
+ return regmap_update_bits(st->regmap, AD4030_REG_MODES,
+ AD4030_REG_MODES_MASK_OUT_DATA_MODE,
+ st->mode);
+}
+
+/*
+ * Descramble 2 32bits numbers out of a 64bits. The bits are interleaved:
+ * 1 bit for first number, 1 bit for the second, and so on...
+ */
+static void ad4030_extract_interleaved(u8 *src, u32 *ch0, u32 *ch1)
+{
+ u8 h0, h1, l0, l1;
+ u32 out0, out1;
+ u8 *out0_raw = (u8 *)&out0;
+ u8 *out1_raw = (u8 *)&out1;
+
+ for (int i = 0; i < 4; i++) {
+ h0 = src[i * 2];
+ l1 = src[i * 2 + 1];
+ h1 = h0 << 1;
+ l0 = l1 >> 1;
+
+ h0 &= 0xAA;
+ l0 &= 0x55;
+ h1 &= 0xAA;
+ l1 &= 0x55;
+
+ h0 = (h0 | h0 << 001) & 0xCC;
+ h1 = (h1 | h1 << 001) & 0xCC;
+ l0 = (l0 | l0 >> 001) & 0x33;
+ l1 = (l1 | l1 >> 001) & 0x33;
+ h0 = (h0 | h0 << 002) & 0xF0;
+ h1 = (h1 | h1 << 002) & 0xF0;
+ l0 = (l0 | l0 >> 002) & 0x0F;
+ l1 = (l1 | l1 >> 002) & 0x0F;
+
+ out0_raw[i] = h0 | l0;
+ out1_raw[i] = h1 | l1;
+ }
+
+ *ch0 = out0;
+ *ch1 = out1;
+}
+
+static int ad4030_conversion(struct iio_dev *indio_dev)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+ unsigned char diff_realbytes =
+ BITS_TO_BYTES(st->current_scan_type->realbits);
+ unsigned char diff_storagebytes =
+ BITS_TO_BYTES(st->current_scan_type->storagebits);
+ unsigned int bytes_to_read;
+ unsigned long cnv_nb = BIT(st->avg_log2);
+ unsigned int i;
+ int ret;
+
+ /* Number of bytes for one differential channel */
+ bytes_to_read = diff_realbytes;
+ /* Add one byte if we are using a differential + common byte mode */
+ bytes_to_read += (st->mode == AD4030_OUT_DATA_MD_24_DIFF_8_COM ||
+ st->mode == AD4030_OUT_DATA_MD_16_DIFF_8_COM) ? 1 : 0;
+ /* Mulitiply by the number of hardware channels */
+ bytes_to_read *= st->chip->num_voltage_inputs;
+
+ for (i = 0; i < cnv_nb; i++) {
+ gpiod_set_value_cansleep(st->cnv_gpio, 1);
+ ndelay(AD4030_TCNVH_NS);
+ gpiod_set_value_cansleep(st->cnv_gpio, 0);
+ ndelay(st->chip->tcyc_ns);
+ }
+
+ ret = spi_read(st->spi, st->rx_data.raw, bytes_to_read);
+ if (ret)
+ return ret;
+
+ if (st->chip->num_voltage_inputs == 2)
+ ad4030_extract_interleaved(st->rx_data.raw,
+ &st->rx_data.dual.diff[0],
+ &st->rx_data.dual.diff[1]);
+
+ if (st->mode != AD4030_OUT_DATA_MD_16_DIFF_8_COM &&
+ st->mode != AD4030_OUT_DATA_MD_24_DIFF_8_COM)
+ return 0;
+
+ if (st->chip->num_voltage_inputs == 1) {
+ st->rx_data.single.common = st->rx_data.raw[diff_realbytes];
+ return 0;
+ }
+
+ for (i = 0; i < st->chip->num_voltage_inputs; i++)
+ st->rx_data.dual.common[i] =
+ st->rx_data.raw[diff_storagebytes * i + diff_realbytes];
+
+ return 0;
+}
+
+static int ad4030_single_conversion(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = ad4030_set_mode(indio_dev, BIT(chan->scan_index));
+ if (ret)
+ return ret;
+
+ st->current_scan_type = iio_get_current_scan_type(indio_dev,
+ st->chip->channels);
+ if (IS_ERR(st->current_scan_type))
+ return PTR_ERR(st->current_scan_type);
+
+ ret = ad4030_conversion(indio_dev);
+ if (ret)
+ return ret;
+
+ if (chan->differential)
+ if (st->chip->num_voltage_inputs == 1)
+ *val = st->rx_data.single.diff;
+ else
+ *val = st->rx_data.dual.diff[chan->address];
+ else
+ if (st->chip->num_voltage_inputs == 1)
+ *val = st->rx_data.single.common;
+ else
+ *val = st->rx_data.dual.common[chan->address];
+
+ return IIO_VAL_INT;
+}
+
+static irqreturn_t ad4030_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct ad4030_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = ad4030_conversion(indio_dev);
+ if (ret)
+ goto out;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->rx_data.raw,
+ pf->timestamp);
+
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static const int ad4030_gain_avail[3][2] = {
+ { 0, 0 },
+ { 0, 30518 },
+ { 1, 999969482 },
+};
+
+static int ad4030_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel,
+ const int **vals, int *type,
+ int *length, long mask)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ *vals = st->offset_avail;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
+
+ case IIO_CHAN_INFO_CALIBSCALE:
+ *vals = (void *)ad4030_gain_avail;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ return IIO_AVAIL_RANGE;
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = ad4030_average_modes;
+ *type = IIO_VAL_INT;
+ *length = ARRAY_SIZE(ad4030_average_modes);
+ return IIO_AVAIL_LIST;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4030_read_raw_dispatch(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long info)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return ad4030_single_conversion(indio_dev, chan, val);
+
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return ad4030_get_chan_calibscale(indio_dev, chan, val, val2);
+
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return ad4030_get_chan_calibbias(indio_dev, chan, val);
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *val = BIT(st->avg_log2);
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4030_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long info)
+{
+ int ret;
+
+ if (info == IIO_CHAN_INFO_SCALE)
+ return ad4030_get_chan_scale(indio_dev, chan, val, val2);
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad4030_read_raw_dispatch(indio_dev, chan, val, val2, info);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
+}
+
+static int ad4030_write_raw_dispatch(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long info)
+{
+ switch (info) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return ad4030_set_chan_calibscale(indio_dev, chan, val, val2);
+
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val2 != 0)
+ return -EINVAL;
+ return ad4030_set_chan_calibbias(indio_dev, chan, val);
+
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return ad4030_set_avg_frame_len(indio_dev, val);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4030_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long info)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad4030_write_raw_dispatch(indio_dev, chan, val, val2, info);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
+}
+
+static int ad4030_reg_access(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int writeval, unsigned int *readval)
+{
+ const struct ad4030_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ if (readval)
+ ret = regmap_read(st->regmap, reg, readval);
+ else
+ ret = regmap_write(st->regmap, reg, writeval);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
+}
+
+static int ad4030_read_label(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ char *label)
+{
+ if (chan->differential)
+ return sprintf(label, "differential%lu\n", chan->address);
+ return sprintf(label, "common-mode%lu\n", chan->address);
+}
+
+static int ad4030_get_current_scan_type(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+
+ return st->avg_log2 ? AD4030_SCAN_TYPE_AVG : AD4030_SCAN_TYPE_NORMAL;
+}
+
+static const struct iio_info ad4030_iio_info = {
+ .read_avail = ad4030_read_avail,
+ .read_raw = ad4030_read_raw,
+ .write_raw = ad4030_write_raw,
+ .debugfs_reg_access = ad4030_reg_access,
+ .read_label = ad4030_read_label,
+ .get_current_scan_type = ad4030_get_current_scan_type,
+};
+
+static int ad4030_buffer_preenable(struct iio_dev *indio_dev)
+{
+ return ad4030_set_mode(indio_dev, *indio_dev->active_scan_mask);
+}
+
+static bool ad4030_validate_scan_mask(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ad4030_state *st = iio_priv(indio_dev);
+
+ /* Asking for both common channels and averaging */
+ if (st->avg_log2 && ad4030_is_common_byte_asked(st, *scan_mask))
+ return false;
+
+ return true;
+}
+
+static const struct iio_buffer_setup_ops ad4030_buffer_setup_ops = {
+ .preenable = ad4030_buffer_preenable,
+ .validate_scan_mask = ad4030_validate_scan_mask,
+};
+
+static int ad4030_regulators_get(struct ad4030_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ static const char * const ids[] = { "vdd-5v", "vdd-1v8" };
+ int ret;
+
+ ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(ids), ids);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable regulators\n");
+
+ st->vio_uv = devm_regulator_get_enable_read_voltage(dev, "vio");
+ if (st->vio_uv < 0)
+ return dev_err_probe(dev, st->vio_uv,
+ "Failed to enable and read vio voltage\n");
+
+ st->vref_uv = devm_regulator_get_enable_read_voltage(dev, "ref");
+ if (st->vref_uv < 0) {
+ if (st->vref_uv != -ENODEV)
+ return dev_err_probe(dev, st->vref_uv,
+ "Failed to read ref voltage\n");
+
+ /* if not using optional REF, the REFIN must be used */
+ st->vref_uv = devm_regulator_get_enable_read_voltage(dev,
+ "refin");
+ if (st->vref_uv < 0)
+ return dev_err_probe(dev, st->vref_uv,
+ "Failed to read refin voltage\n");
+ }
+
+ return 0;
+}
+
+static int ad4030_reset(struct ad4030_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ struct gpio_desc *reset;
+
+ reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(reset))
+ return dev_err_probe(dev, PTR_ERR(reset),
+ "Failed to get reset GPIO\n");
+
+ if (reset) {
+ ndelay(50);
+ gpiod_set_value_cansleep(reset, 0);
+ return 0;
+ }
+
+ return regmap_write(st->regmap, AD4030_REG_INTERFACE_CONFIG_A,
+ AD4030_REG_INTERFACE_CONFIG_A_SW_RESET);
+}
+
+static int ad4030_detect_chip_info(const struct ad4030_state *st)
+{
+ unsigned int grade;
+ int ret;
+
+ ret = regmap_read(st->regmap, AD4030_REG_CHIP_GRADE, &grade);
+ if (ret)
+ return ret;
+
+ grade = FIELD_GET(AD4030_REG_CHIP_GRADE_MASK_CHIP_GRADE, grade);
+ if (grade != st->chip->grade)
+ dev_warn(&st->spi->dev, "Unknown grade(0x%x) for %s\n", grade,
+ st->chip->name);
+
+ return 0;
+}
+
+static int ad4030_config(struct ad4030_state *st)
+{
+ int ret;
+ u8 reg_modes;
+
+ st->offset_avail[0] = (int)BIT(st->chip->precision_bits - 1) * -1;
+ st->offset_avail[1] = 1;
+ st->offset_avail[2] = BIT(st->chip->precision_bits - 1) - 1;
+
+ if (st->chip->num_voltage_inputs > 1)
+ reg_modes = FIELD_PREP(AD4030_REG_MODES_MASK_LANE_MODE,
+ AD4030_LANE_MD_INTERLEAVED);
+ else
+ reg_modes = FIELD_PREP(AD4030_REG_MODES_MASK_LANE_MODE,
+ AD4030_LANE_MD_1_PER_CH);
+
+ ret = regmap_write(st->regmap, AD4030_REG_MODES, reg_modes);
+ if (ret)
+ return ret;
+
+ if (st->vio_uv < AD4030_VIO_THRESHOLD_UV)
+ return regmap_write(st->regmap, AD4030_REG_IO,
+ AD4030_REG_IO_MASK_IO2X);
+
+ return 0;
+}
+
+static int ad4030_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct ad4030_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ st->regmap = devm_regmap_init(dev, &ad4030_regmap_bus, st,
+ &ad4030_regmap_config);
+ if (IS_ERR(st->regmap))
+ return dev_err_probe(dev, PTR_ERR(st->regmap),
+ "Failed to initialize regmap\n");
+
+ st->chip = spi_get_device_match_data(spi);
+ if (!st->chip)
+ return -EINVAL;
+
+ ret = ad4030_regulators_get(st);
+ if (ret)
+ return ret;
+
+ /*
+ * From datasheet: "Perform a reset no sooner than 3ms after the power
+ * supplies are valid and stable"
+ */
+ fsleep(3000);
+
+ ret = ad4030_reset(st);
+ if (ret)
+ return ret;
+
+ ret = ad4030_detect_chip_info(st);
+ if (ret)
+ return ret;
+
+ ret = ad4030_config(st);
+ if (ret)
+ return ret;
+
+ st->cnv_gpio = devm_gpiod_get(dev, "cnv", GPIOD_OUT_LOW);
+ if (IS_ERR(st->cnv_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->cnv_gpio),
+ "Failed to get cnv gpio\n");
+
+ /*
+ * One hardware channel is split in two software channels when using
+ * common byte mode. Add one more channel for the timestamp.
+ */
+ indio_dev->num_channels = 2 * st->chip->num_voltage_inputs + 1;
+ indio_dev->name = st->chip->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &ad4030_iio_info;
+ indio_dev->channels = st->chip->channels;
+ indio_dev->available_scan_masks = st->chip->available_masks;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ ad4030_trigger_handler,
+ &ad4030_buffer_setup_ops);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Failed to setup triggered buffer\n");
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const unsigned long ad4030_channel_masks[] = {
+ /* Differential only */
+ BIT(0),
+ /* Differential and common-mode voltage */
+ GENMASK(1, 0),
+ 0,
+};
+
+static const unsigned long ad4630_channel_masks[] = {
+ /* Differential only */
+ BIT(1) | BIT(0),
+ /* Differential with common byte */
+ GENMASK(3, 0),
+ 0,
+};
+
+static const struct iio_scan_type ad4030_24_scan_types[] = {
+ [AD4030_SCAN_TYPE_NORMAL] = {
+ .sign = 's',
+ .storagebits = 32,
+ .realbits = 24,
+ .shift = 8,
+ .endianness = IIO_BE,
+ },
+ [AD4030_SCAN_TYPE_AVG] = {
+ .sign = 's',
+ .storagebits = 32,
+ .realbits = 30,
+ .shift = 2,
+ .endianness = IIO_BE,
+ },
+};
+
+static const struct iio_scan_type ad4030_16_scan_types[] = {
+ [AD4030_SCAN_TYPE_NORMAL] = {
+ .sign = 's',
+ .storagebits = 32,
+ .realbits = 16,
+ .shift = 16,
+ .endianness = IIO_BE,
+ },
+ [AD4030_SCAN_TYPE_AVG] = {
+ .sign = 's',
+ .storagebits = 32,
+ .realbits = 30,
+ .shift = 2,
+ .endianness = IIO_BE,
+ }
+};
+
+static const struct ad4030_chip_info ad4030_24_chip_info = {
+ .name = "ad4030-24",
+ .available_masks = ad4030_channel_masks,
+ .channels = {
+ AD4030_CHAN_DIFF(0, ad4030_24_scan_types),
+ AD4030_CHAN_CMO(1, 0),
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+ },
+ .grade = AD4030_REG_CHIP_GRADE_AD4030_24_GRADE,
+ .precision_bits = 24,
+ .num_voltage_inputs = 1,
+ .tcyc_ns = AD4030_TCYC_ADJUSTED_NS,
+};
+
+static const struct ad4030_chip_info ad4630_16_chip_info = {
+ .name = "ad4630-16",
+ .available_masks = ad4630_channel_masks,
+ .channels = {
+ AD4030_CHAN_DIFF(0, ad4030_16_scan_types),
+ AD4030_CHAN_DIFF(1, ad4030_16_scan_types),
+ AD4030_CHAN_CMO(2, 0),
+ AD4030_CHAN_CMO(3, 1),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .grade = AD4030_REG_CHIP_GRADE_AD4630_16_GRADE,
+ .precision_bits = 16,
+ .num_voltage_inputs = 2,
+ .tcyc_ns = AD4030_TCYC_ADJUSTED_NS,
+};
+
+static const struct ad4030_chip_info ad4630_24_chip_info = {
+ .name = "ad4630-24",
+ .available_masks = ad4630_channel_masks,
+ .channels = {
+ AD4030_CHAN_DIFF(0, ad4030_24_scan_types),
+ AD4030_CHAN_DIFF(1, ad4030_24_scan_types),
+ AD4030_CHAN_CMO(2, 0),
+ AD4030_CHAN_CMO(3, 1),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .grade = AD4030_REG_CHIP_GRADE_AD4630_24_GRADE,
+ .precision_bits = 24,
+ .num_voltage_inputs = 2,
+ .tcyc_ns = AD4030_TCYC_ADJUSTED_NS,
+};
+
+static const struct ad4030_chip_info ad4632_16_chip_info = {
+ .name = "ad4632-16",
+ .available_masks = ad4630_channel_masks,
+ .channels = {
+ AD4030_CHAN_DIFF(0, ad4030_16_scan_types),
+ AD4030_CHAN_DIFF(1, ad4030_16_scan_types),
+ AD4030_CHAN_CMO(2, 0),
+ AD4030_CHAN_CMO(3, 1),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .grade = AD4030_REG_CHIP_GRADE_AD4632_16_GRADE,
+ .precision_bits = 16,
+ .num_voltage_inputs = 2,
+ .tcyc_ns = AD4632_TCYC_ADJUSTED_NS,
+};
+
+static const struct ad4030_chip_info ad4632_24_chip_info = {
+ .name = "ad4632-24",
+ .available_masks = ad4630_channel_masks,
+ .channels = {
+ AD4030_CHAN_DIFF(0, ad4030_24_scan_types),
+ AD4030_CHAN_DIFF(1, ad4030_24_scan_types),
+ AD4030_CHAN_CMO(2, 0),
+ AD4030_CHAN_CMO(3, 1),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+ },
+ .grade = AD4030_REG_CHIP_GRADE_AD4632_24_GRADE,
+ .precision_bits = 24,
+ .num_voltage_inputs = 2,
+ .tcyc_ns = AD4632_TCYC_ADJUSTED_NS,
+};
+
+static const struct spi_device_id ad4030_id_table[] = {
+ { "ad4030-24", (kernel_ulong_t)&ad4030_24_chip_info },
+ { "ad4630-16", (kernel_ulong_t)&ad4630_16_chip_info },
+ { "ad4630-24", (kernel_ulong_t)&ad4630_24_chip_info },
+ { "ad4632-16", (kernel_ulong_t)&ad4632_16_chip_info },
+ { "ad4632-24", (kernel_ulong_t)&ad4632_24_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad4030_id_table);
+
+static const struct of_device_id ad4030_of_match[] = {
+ { .compatible = "adi,ad4030-24", .data = &ad4030_24_chip_info },
+ { .compatible = "adi,ad4630-16", .data = &ad4630_16_chip_info },
+ { .compatible = "adi,ad4630-24", .data = &ad4630_24_chip_info },
+ { .compatible = "adi,ad4632-16", .data = &ad4632_16_chip_info },
+ { .compatible = "adi,ad4632-24", .data = &ad4632_24_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad4030_of_match);
+
+static struct spi_driver ad4030_driver = {
+ .driver = {
+ .name = "ad4030",
+ .of_match_table = ad4030_of_match,
+ },
+ .probe = ad4030_probe,
+ .id_table = ad4030_id_table,
+};
+module_spi_driver(ad4030_driver);
+
+MODULE_AUTHOR("Esteban Blanc <eblanc@baylibre.com>");
+MODULE_DESCRIPTION("Analog Devices AD4630 ADC family driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/ad4130.c b/drivers/iio/adc/ad4130.c
index de32cc9d18c5..0f4c9cd6c102 100644
--- a/drivers/iio/adc/ad4130.c
+++ b/drivers/iio/adc/ad4130.c
@@ -6,6 +6,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/delay.h>
@@ -203,7 +204,7 @@ enum ad4130_mode {
AD4130_MODE_IDLE = 0b0100,
};
-enum ad4130_filter_mode {
+enum ad4130_filter_type {
AD4130_FILTER_SINC4,
AD4130_FILTER_SINC4_SINC1,
AD4130_FILTER_SINC3,
@@ -223,6 +224,10 @@ enum ad4130_pin_function {
AD4130_PIN_FN_VBIAS = BIT(3),
};
+/*
+ * If you make adaptations in this struct, you most likely also have to adapt
+ * ad4130_setup_info_eq(), too.
+ */
struct ad4130_setup_info {
unsigned int iout0_val;
unsigned int iout1_val;
@@ -230,7 +235,7 @@ struct ad4130_setup_info {
unsigned int pga;
unsigned int fs;
u32 ref_sel;
- enum ad4130_filter_mode filter_mode;
+ enum ad4130_filter_type filter_type;
bool ref_bufp;
bool ref_bufm;
};
@@ -251,7 +256,7 @@ struct ad4130_chan_info {
};
struct ad4130_filter_config {
- enum ad4130_filter_mode filter_mode;
+ enum ad4130_filter_type filter_type;
unsigned int odr_div;
unsigned int fs_max;
enum iio_available_type samp_freq_avail_type;
@@ -337,9 +342,9 @@ static const unsigned int ad4130_burnout_current_na_tbl[AD4130_BURNOUT_MAX] = {
[AD4130_BURNOUT_4000NA] = 4000,
};
-#define AD4130_VARIABLE_ODR_CONFIG(_filter_mode, _odr_div, _fs_max) \
+#define AD4130_VARIABLE_ODR_CONFIG(_filter_type, _odr_div, _fs_max) \
{ \
- .filter_mode = (_filter_mode), \
+ .filter_type = (_filter_type), \
.odr_div = (_odr_div), \
.fs_max = (_fs_max), \
.samp_freq_avail_type = IIO_AVAIL_RANGE, \
@@ -350,9 +355,9 @@ static const unsigned int ad4130_burnout_current_na_tbl[AD4130_BURNOUT_MAX] = {
}, \
}
-#define AD4130_FIXED_ODR_CONFIG(_filter_mode, _odr_div) \
+#define AD4130_FIXED_ODR_CONFIG(_filter_type, _odr_div) \
{ \
- .filter_mode = (_filter_mode), \
+ .filter_type = (_filter_type), \
.odr_div = (_odr_div), \
.fs_max = AD4130_FILTER_SELECT_MIN, \
.samp_freq_avail_type = IIO_AVAIL_LIST, \
@@ -374,7 +379,7 @@ static const struct ad4130_filter_config ad4130_filter_configs[] = {
AD4130_FIXED_ODR_CONFIG(AD4130_FILTER_SINC3_PF4, 148),
};
-static const char * const ad4130_filter_modes_str[] = {
+static const char * const ad4130_filter_types_str[] = {
[AD4130_FILTER_SINC4] = "sinc4",
[AD4130_FILTER_SINC4_SINC1] = "sinc4+sinc1",
[AD4130_FILTER_SINC3] = "sinc3",
@@ -591,6 +596,40 @@ static irqreturn_t ad4130_irq_handler(int irq, void *private)
return IRQ_HANDLED;
}
+static bool ad4130_setup_info_eq(struct ad4130_setup_info *a,
+ struct ad4130_setup_info *b)
+{
+ /*
+ * This is just to make sure that the comparison is adapted after
+ * struct ad4130_setup_info was changed.
+ */
+ static_assert(sizeof(*a) ==
+ sizeof(struct {
+ unsigned int iout0_val;
+ unsigned int iout1_val;
+ unsigned int burnout;
+ unsigned int pga;
+ unsigned int fs;
+ u32 ref_sel;
+ enum ad4130_filter_type filter_type;
+ bool ref_bufp;
+ bool ref_bufm;
+ }));
+
+ if (a->iout0_val != b->iout0_val ||
+ a->iout1_val != b->iout1_val ||
+ a->burnout != b->burnout ||
+ a->pga != b->pga ||
+ a->fs != b->fs ||
+ a->ref_sel != b->ref_sel ||
+ a->filter_type != b->filter_type ||
+ a->ref_bufp != b->ref_bufp ||
+ a->ref_bufm != b->ref_bufm)
+ return false;
+
+ return true;
+}
+
static int ad4130_find_slot(struct ad4130_state *st,
struct ad4130_setup_info *target_setup_info,
unsigned int *slot, bool *overwrite)
@@ -604,8 +643,7 @@ static int ad4130_find_slot(struct ad4130_state *st,
struct ad4130_slot_info *slot_info = &st->slots_info[i];
/* Immediately accept a matching setup info. */
- if (!memcmp(target_setup_info, &slot_info->setup,
- sizeof(*target_setup_info))) {
+ if (ad4130_setup_info_eq(target_setup_info, &slot_info->setup)) {
*slot = i;
return 0;
}
@@ -691,7 +729,7 @@ static int ad4130_write_slot_setup(struct ad4130_state *st,
if (ret)
return ret;
- val = FIELD_PREP(AD4130_FILTER_MODE_MASK, setup_info->filter_mode) |
+ val = FIELD_PREP(AD4130_FILTER_MODE_MASK, setup_info->filter_type) |
FIELD_PREP(AD4130_FILTER_SELECT_MASK, setup_info->fs);
ret = regmap_write(st->regmap, AD4130_FILTER_X_REG(slot), val);
@@ -835,11 +873,11 @@ static int ad4130_set_channel_enable(struct ad4130_state *st,
* (used in ad4130_fs_to_freq)
*/
-static void ad4130_freq_to_fs(enum ad4130_filter_mode filter_mode,
+static void ad4130_freq_to_fs(enum ad4130_filter_type filter_type,
int val, int val2, unsigned int *fs)
{
const struct ad4130_filter_config *filter_config =
- &ad4130_filter_configs[filter_mode];
+ &ad4130_filter_configs[filter_type];
u64 dividend, divisor;
int temp;
@@ -858,11 +896,11 @@ static void ad4130_freq_to_fs(enum ad4130_filter_mode filter_mode,
*fs = temp;
}
-static void ad4130_fs_to_freq(enum ad4130_filter_mode filter_mode,
+static void ad4130_fs_to_freq(enum ad4130_filter_type filter_type,
unsigned int fs, int *val, int *val2)
{
const struct ad4130_filter_config *filter_config =
- &ad4130_filter_configs[filter_mode];
+ &ad4130_filter_configs[filter_type];
unsigned int dividend, divisor;
u64 temp;
@@ -874,7 +912,7 @@ static void ad4130_fs_to_freq(enum ad4130_filter_mode filter_mode,
*val = div_u64_rem(temp, NANO, val2);
}
-static int ad4130_set_filter_mode(struct iio_dev *indio_dev,
+static int ad4130_set_filter_type(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
unsigned int val)
{
@@ -882,17 +920,17 @@ static int ad4130_set_filter_mode(struct iio_dev *indio_dev,
unsigned int channel = chan->scan_index;
struct ad4130_chan_info *chan_info = &st->chans_info[channel];
struct ad4130_setup_info *setup_info = &chan_info->setup;
- enum ad4130_filter_mode old_filter_mode;
+ enum ad4130_filter_type old_filter_type;
int freq_val, freq_val2;
unsigned int old_fs;
int ret = 0;
guard(mutex)(&st->lock);
- if (setup_info->filter_mode == val)
+ if (setup_info->filter_type == val)
return 0;
old_fs = setup_info->fs;
- old_filter_mode = setup_info->filter_mode;
+ old_filter_type = setup_info->filter_type;
/*
* When switching between filter modes, try to match the ODR as
@@ -900,48 +938,55 @@ static int ad4130_set_filter_mode(struct iio_dev *indio_dev,
* using the old filter mode, then convert it back into FS using
* the new filter mode.
*/
- ad4130_fs_to_freq(setup_info->filter_mode, setup_info->fs,
+ ad4130_fs_to_freq(setup_info->filter_type, setup_info->fs,
&freq_val, &freq_val2);
ad4130_freq_to_fs(val, freq_val, freq_val2, &setup_info->fs);
- setup_info->filter_mode = val;
+ setup_info->filter_type = val;
ret = ad4130_write_channel_setup(st, channel, false);
if (ret) {
setup_info->fs = old_fs;
- setup_info->filter_mode = old_filter_mode;
+ setup_info->filter_type = old_filter_type;
return ret;
}
return 0;
}
-static int ad4130_get_filter_mode(struct iio_dev *indio_dev,
+static int ad4130_get_filter_type(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan)
{
struct ad4130_state *st = iio_priv(indio_dev);
unsigned int channel = chan->scan_index;
struct ad4130_setup_info *setup_info = &st->chans_info[channel].setup;
- enum ad4130_filter_mode filter_mode;
+ enum ad4130_filter_type filter_type;
guard(mutex)(&st->lock);
- filter_mode = setup_info->filter_mode;
+ filter_type = setup_info->filter_type;
- return filter_mode;
+ return filter_type;
}
-static const struct iio_enum ad4130_filter_mode_enum = {
- .items = ad4130_filter_modes_str,
- .num_items = ARRAY_SIZE(ad4130_filter_modes_str),
- .set = ad4130_set_filter_mode,
- .get = ad4130_get_filter_mode,
+static const struct iio_enum ad4130_filter_type_enum = {
+ .items = ad4130_filter_types_str,
+ .num_items = ARRAY_SIZE(ad4130_filter_types_str),
+ .set = ad4130_set_filter_type,
+ .get = ad4130_get_filter_type,
};
-static const struct iio_chan_spec_ext_info ad4130_filter_mode_ext_info[] = {
- IIO_ENUM("filter_mode", IIO_SEPARATE, &ad4130_filter_mode_enum),
+static const struct iio_chan_spec_ext_info ad4130_ext_info[] = {
+ /*
+ * `filter_type` is the standardized IIO ABI for digital filtering.
+ * `filter_mode` is just kept for backwards compatibility.
+ */
+ IIO_ENUM("filter_mode", IIO_SEPARATE, &ad4130_filter_type_enum),
IIO_ENUM_AVAILABLE("filter_mode", IIO_SHARED_BY_TYPE,
- &ad4130_filter_mode_enum),
+ &ad4130_filter_type_enum),
+ IIO_ENUM("filter_type", IIO_SEPARATE, &ad4130_filter_type_enum),
+ IIO_ENUM_AVAILABLE("filter_type", IIO_SHARED_BY_TYPE,
+ &ad4130_filter_type_enum),
{ }
};
@@ -955,7 +1000,7 @@ static const struct iio_chan_spec ad4130_channel_template = {
BIT(IIO_CHAN_INFO_SAMP_FREQ),
.info_mask_separate_available = BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ),
- .ext_info = ad4130_filter_mode_ext_info,
+ .ext_info = ad4130_ext_info,
.scan_type = {
.sign = 'u',
.endianness = IIO_BE,
@@ -1005,7 +1050,7 @@ static int ad4130_set_channel_freq(struct ad4130_state *st,
guard(mutex)(&st->lock);
old_fs = setup_info->fs;
- ad4130_freq_to_fs(setup_info->filter_mode, val, val2, &fs);
+ ad4130_freq_to_fs(setup_info->filter_type, val, val2, &fs);
if (fs == setup_info->fs)
return 0;
@@ -1060,13 +1105,11 @@ static int _ad4130_read_sample(struct iio_dev *indio_dev, unsigned int channel,
static int ad4130_read_sample(struct iio_dev *indio_dev, unsigned int channel,
int *val)
{
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- struct ad4130_state *st = iio_priv(indio_dev);
+ struct ad4130_state *st = iio_priv(indio_dev);
- guard(mutex)(&st->lock);
- return _ad4130_read_sample(indio_dev, channel, val);
- }
- unreachable();
+ guard(mutex)(&st->lock);
+
+ return _ad4130_read_sample(indio_dev, channel, val);
}
static int ad4130_read_raw(struct iio_dev *indio_dev,
@@ -1076,10 +1119,16 @@ static int ad4130_read_raw(struct iio_dev *indio_dev,
struct ad4130_state *st = iio_priv(indio_dev);
unsigned int channel = chan->scan_index;
struct ad4130_setup_info *setup_info = &st->chans_info[channel].setup;
+ int ret;
switch (info) {
case IIO_CHAN_INFO_RAW:
- return ad4130_read_sample(indio_dev, channel, val);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad4130_read_sample(indio_dev, channel, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE: {
guard(mutex)(&st->lock);
*val = st->scale_tbls[setup_info->ref_sel][setup_info->pga][0];
@@ -1093,7 +1142,7 @@ static int ad4130_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_SAMP_FREQ: {
guard(mutex)(&st->lock);
- ad4130_fs_to_freq(setup_info->filter_mode, setup_info->fs,
+ ad4130_fs_to_freq(setup_info->filter_type, setup_info->fs,
val, val2);
return IIO_VAL_INT_PLUS_NANO;
@@ -1123,7 +1172,7 @@ static int ad4130_read_avail(struct iio_dev *indio_dev,
return IIO_AVAIL_LIST;
case IIO_CHAN_INFO_SAMP_FREQ:
scoped_guard(mutex, &st->lock) {
- filter_config = &ad4130_filter_configs[setup_info->filter_mode];
+ filter_config = &ad4130_filter_configs[setup_info->filter_type];
}
*vals = (int *)filter_config->samp_freq_avail;
diff --git a/drivers/iio/adc/ad4695.c b/drivers/iio/adc/ad4695.c
index b79d135a5471..8222c8ab2940 100644
--- a/drivers/iio/adc/ad4695.c
+++ b/drivers/iio/adc/ad4695.c
@@ -19,14 +19,19 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/gpio/consumer.h>
+#include <linux/iio/buffer-dmaengine.h>
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/minmax.h>
+#include <linux/mutex.h>
#include <linux/property.h>
+#include <linux/pwm.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
+#include <linux/spi/offload/consumer.h>
+#include <linux/spi/offload/provider.h>
#include <linux/spi/spi.h>
#include <linux/units.h>
@@ -66,12 +71,15 @@
#define AD4695_REG_STD_SEQ_CONFIG 0x0024
#define AD4695_REG_GPIO_CTRL 0x0026
#define AD4695_REG_GP_MODE 0x0027
+#define AD4695_REG_GP_MODE_BUSY_GP_SEL BIT(5)
+#define AD4695_REG_GP_MODE_BUSY_GP_EN BIT(1)
#define AD4695_REG_TEMP_CTRL 0x0029
#define AD4695_REG_TEMP_CTRL_TEMP_EN BIT(0)
#define AD4695_REG_CONFIG_IN(n) (0x0030 | (n))
#define AD4695_REG_CONFIG_IN_MODE BIT(6)
#define AD4695_REG_CONFIG_IN_PAIR GENMASK(5, 4)
#define AD4695_REG_CONFIG_IN_AINHIGHZ_EN BIT(3)
+#define AD4695_REG_CONFIG_IN_OSR_SET GENMASK(1, 0)
#define AD4695_REG_UPPER_IN(n) (0x0040 | (2 * (n)))
#define AD4695_REG_LOWER_IN(n) (0x0060 | (2 * (n)))
#define AD4695_REG_HYST_IN(n) (0x0080 | (2 * (n)))
@@ -92,6 +100,8 @@
#define AD4695_T_REFBUF_MS 100
#define AD4695_T_REGCONFIG_NS 20
#define AD4695_T_SCK_CNV_DELAY_NS 80
+#define AD4695_T_CNVL_NS 80
+#define AD4695_T_CNVH_NS 10
#define AD4695_REG_ACCESS_SCLK_HZ (10 * MEGA)
/* Max number of voltage input channels. */
@@ -118,17 +128,27 @@ struct ad4695_channel_config {
bool bipolar;
enum ad4695_in_pair pin_pairing;
unsigned int common_mode_mv;
+ unsigned int oversampling_ratio;
};
struct ad4695_state {
struct spi_device *spi;
+ struct spi_offload *offload;
+ struct spi_offload_trigger *offload_trigger;
struct regmap *regmap;
struct regmap *regmap16;
struct gpio_desc *reset_gpio;
+ /* currently PWM CNV only supported with SPI offload use */
+ struct pwm_device *cnv_pwm;
+ /* protects against concurrent use of cnv_pwm */
+ struct mutex cnv_pwm_lock;
+ /* offload also requires separate gpio to manually control CNV */
+ struct gpio_desc *cnv_gpio;
/* voltages channels plus temperature and timestamp */
struct iio_chan_spec iio_chan[AD4695_MAX_CHANNELS + 2];
struct ad4695_channel_config channels_cfg[AD4695_MAX_CHANNELS];
const struct ad4695_chip_info *chip_info;
+ int sample_freq_range[3];
/* Reference voltage. */
unsigned int vref_mv;
/* Common mode input pin voltage. */
@@ -148,6 +168,8 @@ struct ad4695_state {
/* Commands to send for single conversion. */
u16 cnv_cmd;
u8 cnv_cmd2;
+ /* Buffer for storing data from regmap bus reads/writes */
+ u8 regmap_bus_data[4];
};
static const struct regmap_range ad4695_regmap_rd_ranges[] = {
@@ -192,7 +214,6 @@ static const struct regmap_config ad4695_regmap_config = {
.max_register = AD4695_REG_AS_SLOT(127),
.rd_table = &ad4695_regmap_rd_table,
.wr_table = &ad4695_regmap_wr_table,
- .can_multi_write = true,
};
static const struct regmap_range ad4695_regmap16_rd_ranges[] = {
@@ -224,7 +245,126 @@ static const struct regmap_config ad4695_regmap16_config = {
.max_register = AD4695_REG_GAIN_IN(15),
.rd_table = &ad4695_regmap16_rd_table,
.wr_table = &ad4695_regmap16_wr_table,
- .can_multi_write = true,
+};
+
+static int ad4695_regmap_bus_reg_write(void *context, const void *data,
+ size_t count)
+{
+ struct ad4695_state *st = context;
+ struct spi_transfer xfer = {
+ .speed_hz = AD4695_REG_ACCESS_SCLK_HZ,
+ .len = count,
+ .tx_buf = st->regmap_bus_data,
+ };
+
+ if (count > ARRAY_SIZE(st->regmap_bus_data))
+ return -EINVAL;
+
+ memcpy(st->regmap_bus_data, data, count);
+
+ return spi_sync_transfer(st->spi, &xfer, 1);
+}
+
+static int ad4695_regmap_bus_reg_read(void *context, const void *reg,
+ size_t reg_size, void *val,
+ size_t val_size)
+{
+ struct ad4695_state *st = context;
+ struct spi_transfer xfers[] = {
+ {
+ .speed_hz = AD4695_REG_ACCESS_SCLK_HZ,
+ .len = reg_size,
+ .tx_buf = &st->regmap_bus_data[0],
+ }, {
+ .speed_hz = AD4695_REG_ACCESS_SCLK_HZ,
+ .len = val_size,
+ .rx_buf = &st->regmap_bus_data[2],
+ },
+ };
+ int ret;
+
+ if (reg_size > 2)
+ return -EINVAL;
+
+ if (val_size > 2)
+ return -EINVAL;
+
+ memcpy(&st->regmap_bus_data[0], reg, reg_size);
+
+ ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
+ if (ret)
+ return ret;
+
+ memcpy(val, &st->regmap_bus_data[2], val_size);
+
+ return 0;
+}
+
+static const struct regmap_bus ad4695_regmap_bus = {
+ .write = ad4695_regmap_bus_reg_write,
+ .read = ad4695_regmap_bus_reg_read,
+ .read_flag_mask = 0x80,
+ .reg_format_endian_default = REGMAP_ENDIAN_BIG,
+ .val_format_endian_default = REGMAP_ENDIAN_BIG,
+};
+
+enum {
+ AD4695_SCAN_TYPE_OSR_1,
+ AD4695_SCAN_TYPE_OSR_4,
+ AD4695_SCAN_TYPE_OSR_16,
+ AD4695_SCAN_TYPE_OSR_64,
+};
+
+static const struct iio_scan_type ad4695_scan_type_offload_u[] = {
+ [AD4695_SCAN_TYPE_OSR_1] = {
+ .sign = 'u',
+ .realbits = 16,
+ .shift = 3,
+ .storagebits = 32,
+ },
+ [AD4695_SCAN_TYPE_OSR_4] = {
+ .sign = 'u',
+ .realbits = 17,
+ .shift = 2,
+ .storagebits = 32,
+ },
+ [AD4695_SCAN_TYPE_OSR_16] = {
+ .sign = 'u',
+ .realbits = 18,
+ .shift = 1,
+ .storagebits = 32,
+ },
+ [AD4695_SCAN_TYPE_OSR_64] = {
+ .sign = 'u',
+ .realbits = 19,
+ .storagebits = 32,
+ },
+};
+
+static const struct iio_scan_type ad4695_scan_type_offload_s[] = {
+ [AD4695_SCAN_TYPE_OSR_1] = {
+ .sign = 's',
+ .realbits = 16,
+ .shift = 3,
+ .storagebits = 32,
+ },
+ [AD4695_SCAN_TYPE_OSR_4] = {
+ .sign = 's',
+ .realbits = 17,
+ .shift = 2,
+ .storagebits = 32,
+ },
+ [AD4695_SCAN_TYPE_OSR_16] = {
+ .sign = 's',
+ .realbits = 18,
+ .shift = 1,
+ .storagebits = 32,
+ },
+ [AD4695_SCAN_TYPE_OSR_64] = {
+ .sign = 's',
+ .realbits = 19,
+ .storagebits = 32,
+ },
};
static const struct iio_chan_spec ad4695_channel_template = {
@@ -264,6 +404,10 @@ static const char * const ad4695_power_supplies[] = {
"avdd", "vio"
};
+static const int ad4695_oversampling_ratios[] = {
+ 1, 4, 16, 64,
+};
+
static const struct ad4695_chip_info ad4695_chip_info = {
.name = "ad4695",
.max_sample_rate = 500 * KILO,
@@ -292,6 +436,13 @@ static const struct ad4695_chip_info ad4698_chip_info = {
.num_voltage_inputs = 8,
};
+static void ad4695_cnv_manual_trigger(struct ad4695_state *st)
+{
+ gpiod_set_value_cansleep(st->cnv_gpio, 1);
+ ndelay(10);
+ gpiod_set_value_cansleep(st->cnv_gpio, 0);
+}
+
/**
* ad4695_set_single_cycle_mode - Set the device in single cycle mode
* @st: The AD4695 state
@@ -364,11 +515,31 @@ static int ad4695_enter_advanced_sequencer_mode(struct ad4695_state *st, u32 n)
*/
static int ad4695_exit_conversion_mode(struct ad4695_state *st)
{
- struct spi_transfer xfer = {
- .tx_buf = &st->cnv_cmd2,
- .len = 1,
- .delay.value = AD4695_T_REGCONFIG_NS,
- .delay.unit = SPI_DELAY_UNIT_NSECS,
+ /*
+ * An extra transfer is needed to trigger a conversion here so
+ * that we can be 100% sure the command will be processed by the
+ * ADC, rather than relying on it to be in the correct state
+ * when this function is called (this chip has a quirk where the
+ * command only works when reading a conversion, and if the
+ * previous conversion was already read then it won't work). The
+ * actual conversion command is then run at the slower
+ * AD4695_REG_ACCESS_SCLK_HZ speed to guarantee this works.
+ */
+ struct spi_transfer xfers[] = {
+ {
+ .delay.value = AD4695_T_CNVL_NS,
+ .delay.unit = SPI_DELAY_UNIT_NSECS,
+ .cs_change = 1,
+ .cs_change_delay.value = AD4695_T_CNVH_NS,
+ .cs_change_delay.unit = SPI_DELAY_UNIT_NSECS,
+ },
+ {
+ .speed_hz = AD4695_REG_ACCESS_SCLK_HZ,
+ .tx_buf = &st->cnv_cmd2,
+ .len = 1,
+ .delay.value = AD4695_T_REGCONFIG_NS,
+ .delay.unit = SPI_DELAY_UNIT_NSECS,
+ },
};
/*
@@ -377,7 +548,18 @@ static int ad4695_exit_conversion_mode(struct ad4695_state *st)
*/
st->cnv_cmd2 = AD4695_CMD_EXIT_CNV_MODE << 3;
- return spi_sync_transfer(st->spi, &xfer, 1);
+ if (st->cnv_gpio) {
+ ad4695_cnv_manual_trigger(st);
+
+ /*
+ * In this case, CNV is not connected to CS, so we don't need
+ * the extra CS toggle to trigger the conversion and toggling
+ * CS would have no effect.
+ */
+ return spi_sync_transfer(st->spi, &xfers[1], 1);
+ }
+
+ return spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
}
static int ad4695_set_ref_voltage(struct ad4695_state *st, int vref_mv)
@@ -402,6 +584,29 @@ static int ad4695_set_ref_voltage(struct ad4695_state *st, int vref_mv)
FIELD_PREP(AD4695_REG_REF_CTRL_VREF_SET, val));
}
+/**
+ * ad4695_osr_to_regval - convert ratio to OSR register value
+ * @ratio: ratio to check
+ *
+ * Check if ratio is present in the list of available ratios and return
+ * the corresponding value that needs to be written to the register to
+ * select that ratio.
+ *
+ * Returns: register value (0 to 3) or -EINVAL if there is not an exact
+ * match
+ */
+static int ad4695_osr_to_regval(int ratio)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ad4695_oversampling_ratios); i++) {
+ if (ratio == ad4695_oversampling_ratios[i])
+ return i;
+ }
+
+ return -EINVAL;
+}
+
static int ad4695_write_chn_cfg(struct ad4695_state *st,
struct ad4695_channel_config *cfg)
{
@@ -604,6 +809,161 @@ out:
return IRQ_HANDLED;
}
+static int ad4695_offload_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_DATA_READY,
+ };
+ struct spi_transfer *xfer = &st->buf_read_xfer[0];
+ struct pwm_state state;
+ u8 temp_chan_bit = st->chip_info->num_voltage_inputs;
+ u8 num_slots = 0;
+ u8 temp_en = 0;
+ unsigned int bit;
+ int ret;
+
+ iio_for_each_active_channel(indio_dev, bit) {
+ if (bit == temp_chan_bit) {
+ temp_en = 1;
+ continue;
+ }
+
+ ret = regmap_write(st->regmap, AD4695_REG_AS_SLOT(num_slots),
+ FIELD_PREP(AD4695_REG_AS_SLOT_INX, bit));
+ if (ret)
+ return ret;
+
+ num_slots++;
+ }
+
+ /*
+ * For non-offload, we could discard data to work around this
+ * restriction, but with offload, that is not possible.
+ */
+ if (num_slots < 2) {
+ dev_err(&st->spi->dev,
+ "At least two voltage channels must be enabled.\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(st->regmap, AD4695_REG_TEMP_CTRL,
+ AD4695_REG_TEMP_CTRL_TEMP_EN,
+ FIELD_PREP(AD4695_REG_TEMP_CTRL_TEMP_EN,
+ temp_en));
+ if (ret)
+ return ret;
+
+ /* Each BUSY event means just one sample for one channel is ready. */
+ memset(xfer, 0, sizeof(*xfer));
+ xfer->offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
+ /* Using 19 bits per word to allow for possible oversampling */
+ xfer->bits_per_word = 19;
+ xfer->len = 4;
+
+ spi_message_init_with_transfers(&st->buf_read_msg, xfer, 1);
+ st->buf_read_msg.offload = st->offload;
+
+ ret = spi_optimize_message(st->spi, &st->buf_read_msg);
+ if (ret)
+ return ret;
+
+ /*
+ * NB: technically, this is part the SPI offload trigger enable, but it
+ * doesn't work to call it from the offload trigger enable callback
+ * because it requires accessing the SPI bus. Calling it from the
+ * trigger enable callback could cause a deadlock.
+ */
+ ret = regmap_set_bits(st->regmap, AD4695_REG_GP_MODE,
+ AD4695_REG_GP_MODE_BUSY_GP_EN);
+ if (ret)
+ goto err_unoptimize_message;
+
+ ret = spi_offload_trigger_enable(st->offload, st->offload_trigger,
+ &config);
+ if (ret)
+ goto err_disable_busy_output;
+
+ ret = ad4695_enter_advanced_sequencer_mode(st, num_slots);
+ if (ret)
+ goto err_offload_trigger_disable;
+
+ mutex_lock(&st->cnv_pwm_lock);
+ pwm_get_state(st->cnv_pwm, &state);
+ /*
+ * PWM subsystem generally rounds down, so requesting 2x minimum high
+ * time ensures that we meet the minimum high time in any case.
+ */
+ state.duty_cycle = AD4695_T_CNVH_NS * 2;
+ ret = pwm_apply_might_sleep(st->cnv_pwm, &state);
+ mutex_unlock(&st->cnv_pwm_lock);
+ if (ret)
+ goto err_offload_exit_conversion_mode;
+
+ return 0;
+
+err_offload_exit_conversion_mode:
+ /*
+ * We have to unwind in a different order to avoid triggering offload.
+ * ad4695_exit_conversion_mode() triggers a conversion, so it has to be
+ * done after spi_offload_trigger_disable().
+ */
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+ ad4695_exit_conversion_mode(st);
+ goto err_disable_busy_output;
+
+err_offload_trigger_disable:
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+
+err_disable_busy_output:
+ regmap_clear_bits(st->regmap, AD4695_REG_GP_MODE,
+ AD4695_REG_GP_MODE_BUSY_GP_EN);
+
+err_unoptimize_message:
+ spi_unoptimize_message(&st->buf_read_msg);
+
+ return ret;
+}
+
+static int ad4695_offload_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ struct pwm_state state;
+ int ret;
+
+ scoped_guard(mutex, &st->cnv_pwm_lock) {
+ pwm_get_state(st->cnv_pwm, &state);
+ state.duty_cycle = 0;
+ ret = pwm_apply_might_sleep(st->cnv_pwm, &state);
+ if (ret)
+ return ret;
+ }
+
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+
+ /*
+ * ad4695_exit_conversion_mode() triggers a conversion, so it has to be
+ * done after spi_offload_trigger_disable().
+ */
+ ret = ad4695_exit_conversion_mode(st);
+ if (ret)
+ return ret;
+
+ ret = regmap_clear_bits(st->regmap, AD4695_REG_GP_MODE,
+ AD4695_REG_GP_MODE_BUSY_GP_EN);
+ if (ret)
+ return ret;
+
+ spi_unoptimize_message(&st->buf_read_msg);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ad4695_offload_buffer_setup_ops = {
+ .postenable = ad4695_offload_buffer_postenable,
+ .predisable = ad4695_offload_buffer_predisable,
+};
+
/**
* ad4695_read_one_sample - Read a single sample using single-cycle mode
* @st: The AD4695 state
@@ -636,6 +996,13 @@ static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
return ret;
/*
+ * If CNV is connected to CS, the previous function will have triggered
+ * the conversion, otherwise, we do it manually.
+ */
+ if (st->cnv_gpio)
+ ad4695_cnv_manual_trigger(st);
+
+ /*
* Setting the first channel to the temperature channel isn't supported
* in single-cycle mode, so we have to do an extra conversion to read
* the temperature.
@@ -646,6 +1013,13 @@ static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
ret = spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
if (ret)
return ret;
+
+ /*
+ * If CNV is connected to CS, the previous function will have
+ * triggered the conversion, otherwise, we do it manually.
+ */
+ if (st->cnv_gpio)
+ ad4695_cnv_manual_trigger(st);
}
/* Then read the result and exit conversion mode. */
@@ -655,36 +1029,58 @@ static int ad4695_read_one_sample(struct ad4695_state *st, unsigned int address)
return spi_sync_transfer(st->spi, xfers, ARRAY_SIZE(xfers));
}
+static int __ad4695_read_info_raw(struct ad4695_state *st,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ u8 realbits = chan->scan_type.realbits;
+ int ret;
+
+ ret = ad4695_read_one_sample(st, chan->address);
+ if (ret)
+ return ret;
+
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(st->raw_data, realbits - 1);
+ else
+ *val = st->raw_data;
+
+ return IIO_VAL_INT;
+}
+
static int ad4695_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
struct ad4695_state *st = iio_priv(indio_dev);
- struct ad4695_channel_config *cfg = &st->channels_cfg[chan->scan_index];
- u8 realbits = chan->scan_type.realbits;
+ const struct iio_scan_type *scan_type;
+ struct ad4695_channel_config *cfg;
unsigned int reg_val;
int ret, tmp;
+ u8 realbits;
+
+ if (chan->type == IIO_VOLTAGE)
+ cfg = &st->channels_cfg[chan->scan_index];
+
+ scan_type = iio_get_current_scan_type(indio_dev, chan);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ realbits = scan_type->realbits;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = ad4695_read_one_sample(st, chan->address);
- if (ret)
- return ret;
-
- if (chan->scan_type.sign == 's')
- *val = sign_extend32(st->raw_data, realbits - 1);
- else
- *val = st->raw_data;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- return IIO_VAL_INT;
- }
- unreachable();
+ ret = __ad4695_read_info_raw(st, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = st->vref_mv;
- *val2 = chan->scan_type.realbits;
+ *val2 = realbits;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_TEMP:
/* T_scale (°C) = raw * V_REF (mV) / (-1.8 mV/°C * 2^16) */
@@ -717,111 +1113,245 @@ static int ad4695_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_CALIBSCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = regmap_read(st->regmap16,
- AD4695_REG_GAIN_IN(chan->scan_index),
- &reg_val);
- if (ret)
- return ret;
-
- *val = reg_val;
- *val2 = 15;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = regmap_read(st->regmap16,
+ AD4695_REG_GAIN_IN(chan->scan_index),
+ &reg_val);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
+ *val = reg_val;
+ *val2 = 15;
- return IIO_VAL_FRACTIONAL_LOG2;
- }
- unreachable();
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
return -EINVAL;
}
case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_VOLTAGE:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = regmap_read(st->regmap16,
- AD4695_REG_OFFSET_IN(chan->scan_index),
- &reg_val);
- if (ret)
- return ret;
+ switch (chan->type)
+ case IIO_VOLTAGE: {
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = regmap_read(st->regmap16,
+ AD4695_REG_OFFSET_IN(chan->scan_index),
+ &reg_val);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
- tmp = sign_extend32(reg_val, 15);
+ tmp = sign_extend32(reg_val, 15);
+ switch (cfg->oversampling_ratio) {
+ case 1:
*val = tmp / 4;
*val2 = abs(tmp) % 4 * MICRO / 4;
+ break;
+ case 4:
+ *val = tmp / 2;
+ *val2 = abs(tmp) % 2 * MICRO / 2;
+ break;
+ case 16:
+ *val = tmp;
+ *val2 = 0;
+ break;
+ case 64:
+ *val = tmp * 2;
+ *val2 = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
- if (tmp < 0 && *val2) {
- *val *= -1;
- *val2 *= -1;
- }
-
- return IIO_VAL_INT_PLUS_MICRO;
+ if (tmp < 0 && *val2) {
+ *val *= -1;
+ *val2 *= -1;
}
- unreachable();
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *val = cfg->oversampling_ratio;
+ return IIO_VAL_INT;
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ struct pwm_state state;
+ unsigned int osr = 1;
+
+ if (chan->type == IIO_VOLTAGE)
+ osr = cfg->oversampling_ratio;
+
+ ret = pwm_get_state_hw(st->cnv_pwm, &state);
+ if (ret)
+ return ret;
+
+ /*
+ * The effective sampling frequency for a channel is the input
+ * frequency divided by the channel's OSR value.
+ */
+ *val = DIV_ROUND_UP_ULL(NSEC_PER_SEC, state.period * osr);
+
+ return IIO_VAL_INT;
+ }
default:
return -EINVAL;
}
}
-static int ad4695_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val, int val2, long mask)
+static int ad4695_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+}
+
+static int ad4695_set_osr_val(struct ad4695_state *st,
+ struct iio_chan_spec const *chan,
+ int val)
+{
+ int osr = ad4695_osr_to_regval(val);
+
+ if (osr < 0)
+ return osr;
+
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ st->channels_cfg[chan->scan_index].oversampling_ratio = val;
+ return regmap_update_bits(st->regmap,
+ AD4695_REG_CONFIG_IN(chan->scan_index),
+ AD4695_REG_CONFIG_IN_OSR_SET,
+ FIELD_PREP(AD4695_REG_CONFIG_IN_OSR_SET, osr));
+ default:
+ return -EINVAL;
+ }
+}
+
+static unsigned int ad4695_get_calibbias(int val, int val2, int osr)
+{
+ int val_calc, scale;
+
+ switch (osr) {
+ case 4:
+ scale = 4;
+ break;
+ case 16:
+ scale = 2;
+ break;
+ case 64:
+ scale = 1;
+ break;
+ default:
+ scale = 8;
+ break;
+ }
+
+ val = clamp_t(int, val, S32_MIN / 8, S32_MAX / 8);
+
+ /* val2 range is (-MICRO, MICRO) if val == 0, otherwise [0, MICRO) */
+ if (val < 0)
+ val_calc = val * scale - val2 * scale / MICRO;
+ else if (val2 < 0)
+ /* if val2 < 0 then val == 0 */
+ val_calc = val2 * scale / (int)MICRO;
+ else
+ val_calc = val * scale + val2 * scale / MICRO;
+
+ val_calc /= 2;
+
+ return clamp_t(int, val_calc, S16_MIN, S16_MAX);
+}
+
+static int __ad4695_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
struct ad4695_state *st = iio_priv(indio_dev);
unsigned int reg_val;
+ unsigned int osr = 1;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- switch (mask) {
- case IIO_CHAN_INFO_CALIBSCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (val < 0 || val2 < 0)
- reg_val = 0;
- else if (val > 1)
- reg_val = U16_MAX;
- else
- reg_val = (val * (1 << 16) +
- mul_u64_u32_div(val2, 1 << 16,
- MICRO)) / 2;
-
- return regmap_write(st->regmap16,
- AD4695_REG_GAIN_IN(chan->scan_index),
- reg_val);
- default:
- return -EINVAL;
- }
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (val2 >= 0 && val > S16_MAX / 4)
- reg_val = S16_MAX;
- else if ((val2 < 0 ? -val : val) < S16_MIN / 4)
- reg_val = S16_MIN;
- else if (val2 < 0)
- reg_val = clamp_t(int,
- -(val * 4 + -val2 * 4 / MICRO),
- S16_MIN, S16_MAX);
- else if (val < 0)
- reg_val = clamp_t(int,
- val * 4 - val2 * 4 / MICRO,
- S16_MIN, S16_MAX);
- else
- reg_val = clamp_t(int,
- val * 4 + val2 * 4 / MICRO,
- S16_MIN, S16_MAX);
-
- return regmap_write(st->regmap16,
- AD4695_REG_OFFSET_IN(chan->scan_index),
- reg_val);
- default:
- return -EINVAL;
- }
+ if (chan->type == IIO_VOLTAGE)
+ osr = st->channels_cfg[chan->scan_index].oversampling_ratio;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (val < 0 || val2 < 0)
+ reg_val = 0;
+ else if (val > 1)
+ reg_val = U16_MAX;
+ else
+ reg_val = (val * (1 << 16) +
+ mul_u64_u32_div(val2, 1 << 16,
+ MICRO)) / 2;
+
+ return regmap_write(st->regmap16,
+ AD4695_REG_GAIN_IN(chan->scan_index),
+ reg_val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ reg_val = ad4695_get_calibbias(val, val2, osr);
+ return regmap_write(st->regmap16,
+ AD4695_REG_OFFSET_IN(chan->scan_index),
+ reg_val);
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ struct pwm_state state;
+ /*
+ * Limit the maximum acceptable sample rate according to
+ * the channel's oversampling ratio.
+ */
+ u64 max_osr_rate = DIV_ROUND_UP_ULL(st->chip_info->max_sample_rate,
+ osr);
+
+ if (val <= 0 || val > max_osr_rate)
+ return -EINVAL;
+
+ guard(mutex)(&st->cnv_pwm_lock);
+ pwm_get_state(st->cnv_pwm, &state);
+ /*
+ * The required sample frequency for a given OSR is the
+ * input frequency multiplied by it.
+ */
+ state.period = DIV_ROUND_UP_ULL(NSEC_PER_SEC, val * osr);
+ return pwm_apply_might_sleep(st->cnv_pwm, &state);
+ }
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return ad4695_set_osr_val(st, chan, val);
+ default:
+ return -EINVAL;
}
- unreachable();
+}
+
+static int ad4695_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = __ad4695_write_raw(indio_dev, chan, val, val2, mask);
+ iio_device_release_direct(indio_dev);
+
+ return ret;
}
static int ad4695_read_avail(struct iio_dev *indio_dev,
@@ -829,17 +1359,43 @@ static int ad4695_read_avail(struct iio_dev *indio_dev,
const int **vals, int *type, int *length,
long mask)
{
+ int ret;
static const int ad4695_calibscale_available[6] = {
/* Range of 0 (inclusive) to 2 (exclusive) */
0, 15, 1, 15, U16_MAX, 15
};
- static const int ad4695_calibbias_available[6] = {
+ static const int ad4695_calibbias_available[4][6] = {
/*
* Datasheet says FSR/8 which translates to signed/4. The step
- * depends on oversampling ratio which is always 1 for now.
+ * depends on oversampling ratio, so we need four different
+ * ranges to select from.
*/
- S16_MIN / 4, 0, 0, MICRO / 4, S16_MAX / 4, S16_MAX % 4 * MICRO / 4
+ {
+ S16_MIN / 4, 0,
+ 0, MICRO / 4,
+ S16_MAX / 4, S16_MAX % 4 * MICRO / 4
+ },
+ {
+ S16_MIN / 2, 0,
+ 0, MICRO / 2,
+ S16_MAX / 2, S16_MAX % 2 * MICRO / 2,
+ },
+ {
+ S16_MIN, 0,
+ 1, 0,
+ S16_MAX, 0,
+ },
+ {
+ S16_MIN * 2, 0,
+ 2, 0,
+ S16_MAX * 2, 0,
+ },
};
+ struct ad4695_state *st = iio_priv(indio_dev);
+ unsigned int osr = 1;
+
+ if (chan->type == IIO_VOLTAGE)
+ osr = st->channels_cfg[chan->scan_index].oversampling_ratio;
switch (mask) {
case IIO_CHAN_INFO_CALIBSCALE:
@@ -854,12 +1410,36 @@ static int ad4695_read_avail(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_VOLTAGE:
- *vals = ad4695_calibbias_available;
+ ret = ad4695_osr_to_regval(osr);
+ if (ret < 0)
+ return ret;
+ /*
+ * Select the appropriate calibbias array based on the
+ * OSR value in the register.
+ */
+ *vals = ad4695_calibbias_available[ret];
*type = IIO_VAL_INT_PLUS_MICRO;
return IIO_AVAIL_RANGE;
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ /* Max sample rate for the channel depends on OSR */
+ st->sample_freq_range[2] =
+ DIV_ROUND_UP_ULL(st->chip_info->max_sample_rate, osr);
+ *vals = st->sample_freq_range;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ *vals = ad4695_oversampling_ratios;
+ *length = ARRAY_SIZE(ad4695_oversampling_ratios);
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -871,31 +1451,64 @@ static int ad4695_debugfs_reg_access(struct iio_dev *indio_dev,
unsigned int *readval)
{
struct ad4695_state *st = iio_priv(indio_dev);
-
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- if (readval) {
- if (regmap_check_range_table(st->regmap, reg,
- &ad4695_regmap_rd_table))
- return regmap_read(st->regmap, reg, readval);
- if (regmap_check_range_table(st->regmap16, reg,
- &ad4695_regmap16_rd_table))
- return regmap_read(st->regmap16, reg, readval);
- } else {
- if (regmap_check_range_table(st->regmap, reg,
- &ad4695_regmap_wr_table))
- return regmap_write(st->regmap, reg, writeval);
- if (regmap_check_range_table(st->regmap16, reg,
- &ad4695_regmap16_wr_table))
- return regmap_write(st->regmap16, reg, writeval);
- }
+ int ret = -EINVAL;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ if (readval) {
+ if (regmap_check_range_table(st->regmap, reg,
+ &ad4695_regmap_rd_table))
+ ret = regmap_read(st->regmap, reg, readval);
+ if (regmap_check_range_table(st->regmap16, reg,
+ &ad4695_regmap16_rd_table))
+ ret = regmap_read(st->regmap16, reg, readval);
+ } else {
+ if (regmap_check_range_table(st->regmap, reg,
+ &ad4695_regmap_wr_table))
+ ret = regmap_write(st->regmap, reg, writeval);
+ if (regmap_check_range_table(st->regmap16, reg,
+ &ad4695_regmap16_wr_table))
+ ret = regmap_write(st->regmap16, reg, writeval);
}
+ iio_device_release_direct(indio_dev);
- return -EINVAL;
+ return ret;
+}
+
+static int ad4695_get_current_scan_type(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad4695_state *st = iio_priv(indio_dev);
+ unsigned int osr = st->channels_cfg[chan->scan_index].oversampling_ratio;
+
+ switch (osr) {
+ case 1:
+ return AD4695_SCAN_TYPE_OSR_1;
+ case 4:
+ return AD4695_SCAN_TYPE_OSR_4;
+ case 16:
+ return AD4695_SCAN_TYPE_OSR_16;
+ case 64:
+ return AD4695_SCAN_TYPE_OSR_64;
+ default:
+ return -EINVAL;
+ }
}
static const struct iio_info ad4695_info = {
.read_raw = &ad4695_read_raw,
+ .write_raw_get_fmt = &ad4695_write_raw_get_fmt,
+ .write_raw = &ad4695_write_raw,
+ .read_avail = &ad4695_read_avail,
+ .debugfs_reg_access = &ad4695_debugfs_reg_access,
+};
+
+static const struct iio_info ad4695_offload_info = {
+ .read_raw = &ad4695_read_raw,
+ .write_raw_get_fmt = &ad4695_write_raw_get_fmt,
.write_raw = &ad4695_write_raw,
+ .get_current_scan_type = &ad4695_get_current_scan_type,
.read_avail = &ad4695_read_avail,
.debugfs_reg_access = &ad4695_debugfs_reg_access,
};
@@ -915,6 +1528,9 @@ static int ad4695_parse_channel_cfg(struct ad4695_state *st)
chan_cfg->highz_en = true;
chan_cfg->channel = i;
+ /* This is the default OSR after reset */
+ chan_cfg->oversampling_ratio = 1;
+
*iio_chan = ad4695_channel_template;
iio_chan->channel = i;
iio_chan->scan_index = i;
@@ -1008,26 +1624,188 @@ static int ad4695_parse_channel_cfg(struct ad4695_state *st)
return 0;
}
+static bool ad4695_offload_trigger_match(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type,
+ u64 *args, u32 nargs)
+{
+ if (type != SPI_OFFLOAD_TRIGGER_DATA_READY)
+ return false;
+
+ /*
+ * Requires 2 args:
+ * args[0] is the trigger event.
+ * args[1] is the GPIO pin number.
+ */
+ if (nargs != 2 || args[0] != AD4695_TRIGGER_EVENT_BUSY)
+ return false;
+
+ return true;
+}
+
+static int ad4695_offload_trigger_request(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type,
+ u64 *args, u32 nargs)
+{
+ struct ad4695_state *st = spi_offload_trigger_get_priv(trigger);
+
+ /* Should already be validated by match, but just in case. */
+ if (nargs != 2)
+ return -EINVAL;
+
+ /* DT tells us if BUSY event uses GP0 or GP3. */
+ if (args[1] == AD4695_TRIGGER_PIN_GP3)
+ return regmap_set_bits(st->regmap, AD4695_REG_GP_MODE,
+ AD4695_REG_GP_MODE_BUSY_GP_SEL);
+
+ return regmap_clear_bits(st->regmap, AD4695_REG_GP_MODE,
+ AD4695_REG_GP_MODE_BUSY_GP_SEL);
+}
+
+static int
+ad4695_offload_trigger_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ if (config->type != SPI_OFFLOAD_TRIGGER_DATA_READY)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * NB: There are no enable/disable callbacks here due to requiring a SPI
+ * message to enable or disable the BUSY output on the ADC.
+ */
+static const struct spi_offload_trigger_ops ad4695_offload_trigger_ops = {
+ .match = ad4695_offload_trigger_match,
+ .request = ad4695_offload_trigger_request,
+ .validate = ad4695_offload_trigger_validate,
+};
+
+static void ad4695_pwm_disable(void *pwm)
+{
+ pwm_disable(pwm);
+}
+
+static int ad4695_probe_spi_offload(struct iio_dev *indio_dev,
+ struct ad4695_state *st)
+{
+ struct device *dev = &st->spi->dev;
+ struct spi_offload_trigger_info trigger_info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &ad4695_offload_trigger_ops,
+ .priv = st,
+ };
+ struct pwm_state pwm_state;
+ struct dma_chan *rx_dma;
+ int ret, i;
+
+ indio_dev->info = &ad4695_offload_info;
+ indio_dev->num_channels = st->chip_info->num_voltage_inputs + 1;
+ indio_dev->setup_ops = &ad4695_offload_buffer_setup_ops;
+
+ if (!st->cnv_gpio)
+ return dev_err_probe(dev, -ENODEV,
+ "CNV GPIO is required for SPI offload\n");
+
+ ret = devm_spi_offload_trigger_register(dev, &trigger_info);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to register offload trigger\n");
+
+ st->offload_trigger = devm_spi_offload_trigger_get(dev, st->offload,
+ SPI_OFFLOAD_TRIGGER_DATA_READY);
+ if (IS_ERR(st->offload_trigger))
+ return dev_err_probe(dev, PTR_ERR(st->offload_trigger),
+ "failed to get offload trigger\n");
+
+ ret = devm_mutex_init(dev, &st->cnv_pwm_lock);
+ if (ret)
+ return ret;
+
+ st->cnv_pwm = devm_pwm_get(dev, NULL);
+ if (IS_ERR(st->cnv_pwm))
+ return dev_err_probe(dev, PTR_ERR(st->cnv_pwm),
+ "failed to get CNV PWM\n");
+
+ pwm_init_state(st->cnv_pwm, &pwm_state);
+
+ /* If firmware didn't provide default rate, use 10kHz (arbitrary). */
+ if (pwm_state.period == 0)
+ pwm_state.period = 100 * MILLI;
+
+ pwm_state.enabled = true;
+
+ ret = pwm_apply_might_sleep(st->cnv_pwm, &pwm_state);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to apply CNV PWM\n");
+
+ ret = devm_add_action_or_reset(dev, ad4695_pwm_disable, st->cnv_pwm);
+ if (ret)
+ return ret;
+
+ rx_dma = devm_spi_offload_rx_stream_request_dma_chan(dev, st->offload);
+ if (IS_ERR(rx_dma))
+ return dev_err_probe(dev, PTR_ERR(rx_dma),
+ "failed to get offload RX DMA\n");
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ struct iio_chan_spec *chan = &st->iio_chan[i];
+ struct ad4695_channel_config *cfg;
+
+ /*
+ * NB: When using offload support, all channels need to have the
+ * same bits_per_word because they all use the same SPI message
+ * for reading one sample. In order to prevent breaking
+ * userspace in the future when oversampling support is added,
+ * all channels are set read 19 bits with a shift of 3 to mask
+ * out the extra bits even though we currently only support 16
+ * bit samples (oversampling ratio == 1).
+ */
+ chan->scan_type.shift = 3;
+ chan->scan_type.storagebits = 32;
+ /* add sample frequency for PWM CNV trigger */
+ chan->info_mask_separate |= BIT(IIO_CHAN_INFO_SAMP_FREQ);
+ chan->info_mask_separate_available |= BIT(IIO_CHAN_INFO_SAMP_FREQ);
+
+ /* Add the oversampling properties only for voltage channels */
+ if (chan->type != IIO_VOLTAGE)
+ continue;
+
+ cfg = &st->channels_cfg[i];
+
+ chan->info_mask_separate |= BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+ chan->info_mask_separate_available |=
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO);
+ chan->has_ext_scan_type = 1;
+ if (cfg->bipolar) {
+ chan->ext_scan_type = ad4695_scan_type_offload_s;
+ chan->num_ext_scan_type =
+ ARRAY_SIZE(ad4695_scan_type_offload_s);
+ } else {
+ chan->ext_scan_type = ad4695_scan_type_offload_u;
+ chan->num_ext_scan_type =
+ ARRAY_SIZE(ad4695_scan_type_offload_u);
+ }
+ }
+
+ return devm_iio_dmaengine_buffer_setup_with_handle(dev, indio_dev,
+ rx_dma, IIO_BUFFER_DIRECTION_IN);
+}
+
+static const struct spi_offload_config ad4695_spi_offload_config = {
+ .capability_flags = SPI_OFFLOAD_CAP_TRIGGER |
+ SPI_OFFLOAD_CAP_RX_STREAM_DMA,
+};
+
static int ad4695_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
struct ad4695_state *st;
struct iio_dev *indio_dev;
- struct gpio_desc *cnv_gpio;
bool use_internal_ldo_supply;
bool use_internal_ref_buffer;
int ret;
- cnv_gpio = devm_gpiod_get_optional(dev, "cnv", GPIOD_OUT_LOW);
- if (IS_ERR(cnv_gpio))
- return dev_err_probe(dev, PTR_ERR(cnv_gpio),
- "Failed to get CNV GPIO\n");
-
- /* Driver currently requires CNV pin to be connected to SPI CS */
- if (cnv_gpio)
- return dev_err_probe(dev, -ENODEV,
- "CNV GPIO is not supported\n");
-
indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
@@ -1039,19 +1817,27 @@ static int ad4695_probe(struct spi_device *spi)
if (!st->chip_info)
return -EINVAL;
- /* Registers cannot be read at the max allowable speed */
- spi->max_speed_hz = AD4695_REG_ACCESS_SCLK_HZ;
+ st->sample_freq_range[0] = 1; /* min */
+ st->sample_freq_range[1] = 1; /* step */
+ st->sample_freq_range[2] = st->chip_info->max_sample_rate; /* max */
- st->regmap = devm_regmap_init_spi(spi, &ad4695_regmap_config);
+ st->regmap = devm_regmap_init(dev, &ad4695_regmap_bus, st,
+ &ad4695_regmap_config);
if (IS_ERR(st->regmap))
return dev_err_probe(dev, PTR_ERR(st->regmap),
"Failed to initialize regmap\n");
- st->regmap16 = devm_regmap_init_spi(spi, &ad4695_regmap16_config);
+ st->regmap16 = devm_regmap_init(dev, &ad4695_regmap_bus, st,
+ &ad4695_regmap16_config);
if (IS_ERR(st->regmap16))
return dev_err_probe(dev, PTR_ERR(st->regmap16),
"Failed to initialize regmap16\n");
+ st->cnv_gpio = devm_gpiod_get_optional(dev, "cnv", GPIOD_OUT_LOW);
+ if (IS_ERR(st->cnv_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->cnv_gpio),
+ "Failed to get CNV GPIO\n");
+
ret = devm_regulator_bulk_get_enable(dev,
ARRAY_SIZE(ad4695_power_supplies),
ad4695_power_supplies);
@@ -1179,12 +1965,31 @@ static int ad4695_probe(struct spi_device *spi)
indio_dev->channels = st->iio_chan;
indio_dev->num_channels = st->chip_info->num_voltage_inputs + 2;
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
- iio_pollfunc_store_time,
- ad4695_trigger_handler,
- &ad4695_buffer_setup_ops);
- if (ret)
- return ret;
+ st->offload = devm_spi_offload_get(dev, spi, &ad4695_spi_offload_config);
+ ret = PTR_ERR_OR_ZERO(st->offload);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to get SPI offload\n");
+
+ /* If no SPI offload, fall back to low speed usage. */
+ if (ret == -ENODEV) {
+ /* Driver currently requires CNV pin to be connected to SPI CS */
+ if (st->cnv_gpio)
+ return dev_err_probe(dev, -EINVAL,
+ "CNV GPIO is not supported\n");
+
+ indio_dev->num_channels = st->chip_info->num_voltage_inputs + 2;
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ ad4695_trigger_handler,
+ &ad4695_buffer_setup_ops);
+ if (ret)
+ return ret;
+ } else {
+ ret = ad4695_probe_spi_offload(indio_dev, st);
+ if (ret)
+ return ret;
+ }
return devm_iio_device_register(dev, indio_dev);
}
@@ -1221,3 +2026,4 @@ MODULE_AUTHOR("Ramona Gradinariu <ramona.gradinariu@analog.com>");
MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>");
MODULE_DESCRIPTION("Analog Devices AD4695 ADC driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_DMAENGINE_BUFFER");
diff --git a/drivers/iio/adc/ad4851.c b/drivers/iio/adc/ad4851.c
new file mode 100644
index 000000000000..98ebc853db79
--- /dev/null
+++ b/drivers/iio/adc/ad4851.c
@@ -0,0 +1,1315 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Analog Devices AD4851 DAS driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/minmax.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/unaligned.h>
+#include <linux/units.h>
+
+#include <linux/iio/backend.h>
+#include <linux/iio/iio.h>
+
+#define AD4851_REG_INTERFACE_CONFIG_A 0x00
+#define AD4851_REG_INTERFACE_CONFIG_B 0x01
+#define AD4851_REG_PRODUCT_ID_L 0x04
+#define AD4851_REG_PRODUCT_ID_H 0x05
+#define AD4851_REG_DEVICE_CTRL 0x25
+#define AD4851_REG_PACKET 0x26
+#define AD4851_REG_OVERSAMPLE 0x27
+
+#define AD4851_REG_CH_CONFIG_BASE 0x2A
+#define AD4851_REG_CHX_SOFTSPAN(ch) ((0x12 * (ch)) + AD4851_REG_CH_CONFIG_BASE)
+#define AD4851_REG_CHX_OFFSET(ch) (AD4851_REG_CHX_SOFTSPAN(ch) + 0x01)
+#define AD4851_REG_CHX_OFFSET_LSB(ch) AD4851_REG_CHX_OFFSET(ch)
+#define AD4851_REG_CHX_OFFSET_MID(ch) (AD4851_REG_CHX_OFFSET_LSB(ch) + 0x01)
+#define AD4851_REG_CHX_OFFSET_MSB(ch) (AD4851_REG_CHX_OFFSET_MID(ch) + 0x01)
+#define AD4851_REG_CHX_GAIN(ch) (AD4851_REG_CHX_OFFSET(ch) + 0x03)
+#define AD4851_REG_CHX_GAIN_LSB(ch) AD4851_REG_CHX_GAIN(ch)
+#define AD4851_REG_CHX_GAIN_MSB(ch) (AD4851_REG_CHX_GAIN(ch) + 0x01)
+#define AD4851_REG_CHX_PHASE(ch) (AD4851_REG_CHX_GAIN(ch) + 0x02)
+#define AD4851_REG_CHX_PHASE_LSB(ch) AD4851_REG_CHX_PHASE(ch)
+#define AD4851_REG_CHX_PHASE_MSB(ch) (AD4851_REG_CHX_PHASE_LSB(ch) + 0x01)
+
+#define AD4851_REG_TESTPAT_0(c) (0x38 + (c) * 0x12)
+#define AD4851_REG_TESTPAT_1(c) (0x39 + (c) * 0x12)
+#define AD4851_REG_TESTPAT_2(c) (0x3A + (c) * 0x12)
+#define AD4851_REG_TESTPAT_3(c) (0x3B + (c) * 0x12)
+
+#define AD4851_SW_RESET (BIT(7) | BIT(0))
+#define AD4851_SDO_ENABLE BIT(4)
+#define AD4851_SINGLE_INSTRUCTION BIT(7)
+#define AD4851_REFBUF BIT(2)
+#define AD4851_REFSEL BIT(1)
+#define AD4851_ECHO_CLOCK_MODE BIT(0)
+
+#define AD4851_PACKET_FORMAT_0 0
+#define AD4851_PACKET_FORMAT_1 1
+#define AD4851_PACKET_FORMAT_MASK GENMASK(1, 0)
+
+#define AD4851_OS_EN_MSK BIT(7)
+#define AD4851_OS_RATIO_MSK GENMASK(3, 0)
+
+#define AD4851_TEST_PAT BIT(2)
+
+#define AD4858_PACKET_SIZE_20 0
+#define AD4858_PACKET_SIZE_24 1
+#define AD4858_PACKET_SIZE_32 2
+
+#define AD4857_PACKET_SIZE_16 0
+#define AD4857_PACKET_SIZE_24 1
+
+#define AD4851_TESTPAT_0_DEFAULT 0x2A
+#define AD4851_TESTPAT_1_DEFAULT 0x3C
+#define AD4851_TESTPAT_2_DEFAULT 0xCE
+#define AD4851_TESTPAT_3_DEFAULT(c) (0x0A + (0x10 * (c)))
+
+#define AD4851_SOFTSPAN_0V_2V5 0
+#define AD4851_SOFTSPAN_N2V5_2V5 1
+#define AD4851_SOFTSPAN_0V_5V 2
+#define AD4851_SOFTSPAN_N5V_5V 3
+#define AD4851_SOFTSPAN_0V_6V25 4
+#define AD4851_SOFTSPAN_N6V25_6V25 5
+#define AD4851_SOFTSPAN_0V_10V 6
+#define AD4851_SOFTSPAN_N10V_10V 7
+#define AD4851_SOFTSPAN_0V_12V5 8
+#define AD4851_SOFTSPAN_N12V5_12V5 9
+#define AD4851_SOFTSPAN_0V_20V 10
+#define AD4851_SOFTSPAN_N20V_20V 11
+#define AD4851_SOFTSPAN_0V_25V 12
+#define AD4851_SOFTSPAN_N25V_25V 13
+#define AD4851_SOFTSPAN_0V_40V 14
+#define AD4851_SOFTSPAN_N40V_40V 15
+
+#define AD4851_MAX_LANES 8
+#define AD4851_MAX_IODELAY 32
+
+#define AD4851_T_CNVH_NS 40
+#define AD4851_T_CNVH_NS_MARGIN 10
+
+#define AD4841_MAX_SCALE_AVAIL 8
+
+#define AD4851_MAX_CH_NR 8
+#define AD4851_CH_START 0
+
+struct ad4851_scale {
+ unsigned int scale_val;
+ u8 reg_val;
+};
+
+static const struct ad4851_scale ad4851_scale_table_unipolar[] = {
+ { 2500, 0x0 },
+ { 5000, 0x2 },
+ { 6250, 0x4 },
+ { 10000, 0x6 },
+ { 12500, 0x8 },
+ { 20000, 0xA },
+ { 25000, 0xC },
+ { 40000, 0xE },
+};
+
+static const struct ad4851_scale ad4851_scale_table_bipolar[] = {
+ { 5000, 0x1 },
+ { 10000, 0x3 },
+ { 12500, 0x5 },
+ { 20000, 0x7 },
+ { 25000, 0x9 },
+ { 40000, 0xB },
+ { 50000, 0xD },
+ { 80000, 0xF },
+};
+
+static const unsigned int ad4851_scale_avail_unipolar[] = {
+ 2500,
+ 5000,
+ 6250,
+ 10000,
+ 12500,
+ 20000,
+ 25000,
+ 40000,
+};
+
+static const unsigned int ad4851_scale_avail_bipolar[] = {
+ 5000,
+ 10000,
+ 12500,
+ 20000,
+ 25000,
+ 40000,
+ 50000,
+ 80000,
+};
+
+struct ad4851_chip_info {
+ const char *name;
+ unsigned int product_id;
+ int num_scales;
+ unsigned long max_sample_rate_hz;
+ unsigned int resolution;
+ unsigned int max_channels;
+ int (*parse_channels)(struct iio_dev *indio_dev);
+};
+
+enum {
+ AD4851_SCAN_TYPE_NORMAL,
+ AD4851_SCAN_TYPE_RESOLUTION_BOOST,
+};
+
+struct ad4851_state {
+ struct spi_device *spi;
+ struct pwm_device *cnv;
+ struct iio_backend *back;
+ /*
+ * Synchronize access to members the of driver state, and ensure
+ * atomicity of consecutive regmap operations.
+ */
+ struct mutex lock;
+ struct regmap *regmap;
+ const struct ad4851_chip_info *info;
+ struct gpio_desc *pd_gpio;
+ bool resolution_boost_enabled;
+ unsigned long cnv_trigger_rate_hz;
+ unsigned int osr;
+ bool vrefbuf_en;
+ bool vrefio_en;
+ bool bipolar_ch[AD4851_MAX_CH_NR];
+ unsigned int scales_unipolar[AD4841_MAX_SCALE_AVAIL][2];
+ unsigned int scales_bipolar[AD4841_MAX_SCALE_AVAIL][2];
+};
+
+static int ad4851_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg,
+ unsigned int writeval,
+ unsigned int *readval)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+
+ if (readval)
+ return regmap_read(st->regmap, reg, readval);
+
+ return regmap_write(st->regmap, reg, writeval);
+}
+
+static int ad4851_set_sampling_freq(struct ad4851_state *st, unsigned int freq)
+{
+ struct pwm_state cnv_state = {
+ .duty_cycle = AD4851_T_CNVH_NS + AD4851_T_CNVH_NS_MARGIN,
+ .enabled = true,
+ };
+ int ret;
+
+ freq = clamp(freq, 1, st->info->max_sample_rate_hz);
+
+ cnv_state.period = DIV_ROUND_UP_ULL(NSEC_PER_SEC, freq);
+
+ ret = pwm_apply_might_sleep(st->cnv, &cnv_state);
+ if (ret)
+ return ret;
+
+ st->cnv_trigger_rate_hz = freq;
+
+ return 0;
+}
+
+static const int ad4851_oversampling_ratios[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128,
+ 256, 512, 1024, 2048, 4096, 8192, 16384, 32768,
+ 65536,
+};
+
+static int ad4851_osr_to_regval(unsigned int ratio)
+{
+ int i;
+
+ for (i = 1; i < ARRAY_SIZE(ad4851_oversampling_ratios); i++)
+ if (ratio == ad4851_oversampling_ratios[i])
+ return i - 1;
+
+ return -EINVAL;
+}
+
+static int __ad4851_get_scale(struct iio_dev *indio_dev, int scale_tbl,
+ unsigned int *val, unsigned int *val2)
+{
+ const struct iio_scan_type *scan_type;
+ unsigned int tmp;
+
+ scan_type = iio_get_current_scan_type(indio_dev, &indio_dev->channels[0]);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ tmp = ((u64)scale_tbl * MICRO) >> scan_type->realbits;
+ *val = tmp / MICRO;
+ *val2 = tmp % MICRO;
+
+ return 0;
+}
+
+static int ad4851_scale_fill(struct iio_dev *indio_dev)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ unsigned int i, val1, val2;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ad4851_scale_avail_unipolar); i++) {
+ ret = __ad4851_get_scale(indio_dev,
+ ad4851_scale_avail_unipolar[i],
+ &val1, &val2);
+ if (ret)
+ return ret;
+
+ st->scales_unipolar[i][0] = val1;
+ st->scales_unipolar[i][1] = val2;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ad4851_scale_avail_bipolar); i++) {
+ ret = __ad4851_get_scale(indio_dev,
+ ad4851_scale_avail_bipolar[i],
+ &val1, &val2);
+ if (ret)
+ return ret;
+
+ st->scales_bipolar[i][0] = val1;
+ st->scales_bipolar[i][1] = val2;
+ }
+
+ return 0;
+}
+
+static int ad4851_set_oversampling_ratio(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int osr)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ int val, ret;
+
+ guard(mutex)(&st->lock);
+
+ if (osr == 1) {
+ ret = regmap_clear_bits(st->regmap, AD4851_REG_OVERSAMPLE,
+ AD4851_OS_EN_MSK);
+ if (ret)
+ return ret;
+ } else {
+ val = ad4851_osr_to_regval(osr);
+ if (val < 0)
+ return -EINVAL;
+
+ ret = regmap_update_bits(st->regmap, AD4851_REG_OVERSAMPLE,
+ AD4851_OS_EN_MSK |
+ AD4851_OS_RATIO_MSK,
+ FIELD_PREP(AD4851_OS_EN_MSK, 1) |
+ FIELD_PREP(AD4851_OS_RATIO_MSK, val));
+ if (ret)
+ return ret;
+ }
+
+ ret = iio_backend_oversampling_ratio_set(st->back, osr);
+ if (ret)
+ return ret;
+
+ switch (st->info->resolution) {
+ case 20:
+ switch (osr) {
+ case 0:
+ return -EINVAL;
+ case 1:
+ val = 20;
+ break;
+ default:
+ val = 24;
+ break;
+ }
+ break;
+ case 16:
+ val = 16;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = iio_backend_data_size_set(st->back, val);
+ if (ret)
+ return ret;
+
+ if (osr == 1 || st->info->resolution == 16) {
+ ret = regmap_clear_bits(st->regmap, AD4851_REG_PACKET,
+ AD4851_PACKET_FORMAT_MASK);
+ if (ret)
+ return ret;
+
+ st->resolution_boost_enabled = false;
+ } else {
+ ret = regmap_update_bits(st->regmap, AD4851_REG_PACKET,
+ AD4851_PACKET_FORMAT_MASK,
+ FIELD_PREP(AD4851_PACKET_FORMAT_MASK, 1));
+ if (ret)
+ return ret;
+
+ st->resolution_boost_enabled = true;
+ }
+
+ if (st->osr != osr) {
+ ret = ad4851_scale_fill(indio_dev);
+ if (ret)
+ return ret;
+
+ st->osr = osr;
+ }
+
+ return 0;
+}
+
+static int ad4851_get_oversampling_ratio(struct ad4851_state *st, unsigned int *val)
+{
+ unsigned int osr;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, AD4851_REG_OVERSAMPLE, &osr);
+ if (ret)
+ return ret;
+
+ if (!FIELD_GET(AD4851_OS_EN_MSK, osr))
+ *val = 1;
+ else
+ *val = ad4851_oversampling_ratios[FIELD_GET(AD4851_OS_RATIO_MSK, osr) + 1];
+
+ st->osr = *val;
+
+ return IIO_VAL_INT;
+}
+
+static void ad4851_pwm_disable(void *data)
+{
+ pwm_disable(data);
+}
+
+static int ad4851_setup(struct ad4851_state *st)
+{
+ unsigned int product_id;
+ int ret;
+
+ if (st->pd_gpio) {
+ /* To initiate a global reset, bring the PD pin high twice */
+ gpiod_set_value(st->pd_gpio, 1);
+ fsleep(1);
+ gpiod_set_value(st->pd_gpio, 0);
+ fsleep(1);
+ gpiod_set_value(st->pd_gpio, 1);
+ fsleep(1);
+ gpiod_set_value(st->pd_gpio, 0);
+ fsleep(1000);
+ } else {
+ ret = regmap_set_bits(st->regmap, AD4851_REG_INTERFACE_CONFIG_A,
+ AD4851_SW_RESET);
+ if (ret)
+ return ret;
+ }
+
+ if (st->vrefbuf_en) {
+ ret = regmap_set_bits(st->regmap, AD4851_REG_DEVICE_CTRL,
+ AD4851_REFBUF);
+ if (ret)
+ return ret;
+ }
+
+ if (st->vrefio_en) {
+ ret = regmap_set_bits(st->regmap, AD4851_REG_DEVICE_CTRL,
+ AD4851_REFSEL);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_write(st->regmap, AD4851_REG_INTERFACE_CONFIG_B,
+ AD4851_SINGLE_INSTRUCTION);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_INTERFACE_CONFIG_A,
+ AD4851_SDO_ENABLE);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(st->regmap, AD4851_REG_PRODUCT_ID_L, &product_id);
+ if (ret)
+ return ret;
+
+ if (product_id != st->info->product_id)
+ dev_info(&st->spi->dev, "Unknown product ID: 0x%02X\n",
+ product_id);
+
+ ret = regmap_set_bits(st->regmap, AD4851_REG_DEVICE_CTRL,
+ AD4851_ECHO_CLOCK_MODE);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, AD4851_REG_PACKET, 0);
+}
+
+/*
+ * Find the longest consecutive sequence of false values from field
+ * and return starting index.
+ */
+static int ad4851_find_opt(const unsigned long *field, unsigned int start,
+ unsigned int nbits, unsigned int *val)
+{
+ unsigned int bit = start, end, start_cnt, cnt = 0;
+
+ for_each_clear_bitrange_from(bit, end, field, start + nbits) {
+ if (end - bit > cnt) {
+ cnt = end - bit;
+ start_cnt = bit - start;
+ }
+ }
+
+ if (!cnt)
+ return -ENOENT;
+
+ *val = start_cnt;
+
+ return cnt;
+}
+
+static int ad4851_calibrate(struct iio_dev *indio_dev)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ unsigned int opt_delay, num_lanes, delay, i, s;
+ enum iio_backend_interface_type interface_type;
+ DECLARE_BITMAP(pn_status, AD4851_MAX_LANES * AD4851_MAX_IODELAY);
+ bool status;
+ int c, ret;
+
+ ret = iio_backend_interface_type_get(st->back, &interface_type);
+ if (ret)
+ return ret;
+
+ switch (interface_type) {
+ case IIO_BACKEND_INTERFACE_SERIAL_CMOS:
+ num_lanes = indio_dev->num_channels;
+ break;
+ case IIO_BACKEND_INTERFACE_SERIAL_LVDS:
+ num_lanes = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (st->info->resolution == 16) {
+ ret = iio_backend_data_size_set(st->back, 24);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_PACKET,
+ AD4851_TEST_PAT | AD4857_PACKET_SIZE_24);
+ if (ret)
+ return ret;
+ } else {
+ ret = iio_backend_data_size_set(st->back, 32);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_PACKET,
+ AD4851_TEST_PAT | AD4858_PACKET_SIZE_32);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ ret = regmap_write(st->regmap, AD4851_REG_TESTPAT_0(i),
+ AD4851_TESTPAT_0_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_TESTPAT_1(i),
+ AD4851_TESTPAT_1_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_TESTPAT_2(i),
+ AD4851_TESTPAT_2_DEFAULT);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_TESTPAT_3(i),
+ AD4851_TESTPAT_3_DEFAULT(i));
+ if (ret)
+ return ret;
+
+ ret = iio_backend_chan_enable(st->back,
+ indio_dev->channels[i].channel);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < num_lanes; i++) {
+ for (delay = 0; delay < AD4851_MAX_IODELAY; delay++) {
+ ret = iio_backend_iodelay_set(st->back, i, delay);
+ if (ret)
+ return ret;
+
+ ret = iio_backend_chan_status(st->back, i, &status);
+ if (ret)
+ return ret;
+
+ __assign_bit(i * AD4851_MAX_IODELAY + delay, pn_status,
+ status);
+ }
+ }
+
+ for (i = 0; i < num_lanes; i++) {
+ c = ad4851_find_opt(pn_status, i * AD4851_MAX_IODELAY,
+ AD4851_MAX_IODELAY, &s);
+ if (c < 0)
+ return c;
+
+ opt_delay = s + c / 2;
+ ret = iio_backend_iodelay_set(st->back, i, opt_delay);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < indio_dev->num_channels; i++) {
+ ret = iio_backend_chan_disable(st->back, i);
+ if (ret)
+ return ret;
+ }
+
+ ret = iio_backend_data_size_set(st->back, 20);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, AD4851_REG_PACKET, 0);
+}
+
+static int ad4851_get_calibscale(struct ad4851_state *st, int ch, int *val, int *val2)
+{
+ unsigned int reg_val;
+ int gain;
+ int ret;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, AD4851_REG_CHX_GAIN_MSB(ch), &reg_val);
+ if (ret)
+ return ret;
+
+ gain = reg_val << 8;
+
+ ret = regmap_read(st->regmap, AD4851_REG_CHX_GAIN_LSB(ch), &reg_val);
+ if (ret)
+ return ret;
+
+ gain |= reg_val;
+
+ *val = gain;
+ *val2 = 15;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+}
+
+static int ad4851_set_calibscale(struct ad4851_state *st, int ch, int val,
+ int val2)
+{
+ u64 gain;
+ u8 buf[2];
+ int ret;
+
+ if (val < 0 || val2 < 0)
+ return -EINVAL;
+
+ gain = val * MICRO + val2;
+ gain = DIV_U64_ROUND_CLOSEST(gain * 32768, MICRO);
+
+ put_unaligned_be16(gain, buf);
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_write(st->regmap, AD4851_REG_CHX_GAIN_MSB(ch), buf[0]);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, AD4851_REG_CHX_GAIN_LSB(ch), buf[1]);
+}
+
+static int ad4851_get_calibbias(struct ad4851_state *st, int ch, int *val)
+{
+ unsigned int lsb, mid, msb;
+ int ret;
+
+ guard(mutex)(&st->lock);
+ /*
+ * After testing, the bulk_write operations doesn't work as expected
+ * here since the cs needs to be raised after each byte transaction.
+ */
+ ret = regmap_read(st->regmap, AD4851_REG_CHX_OFFSET_MSB(ch), &msb);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(st->regmap, AD4851_REG_CHX_OFFSET_MID(ch), &mid);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(st->regmap, AD4851_REG_CHX_OFFSET_LSB(ch), &lsb);
+ if (ret)
+ return ret;
+
+ if (st->info->resolution == 16) {
+ *val = msb << 8;
+ *val |= mid;
+ *val = sign_extend32(*val, 15);
+ } else {
+ *val = msb << 12;
+ *val |= mid << 4;
+ *val |= lsb >> 4;
+ *val = sign_extend32(*val, 19);
+ }
+
+ return IIO_VAL_INT;
+}
+
+static int ad4851_set_calibbias(struct ad4851_state *st, int ch, int val)
+{
+ u8 buf[3];
+ int ret;
+
+ if (val < 0)
+ return -EINVAL;
+
+ if (st->info->resolution == 16)
+ put_unaligned_be16(val, buf);
+ else
+ put_unaligned_be24(val << 4, buf);
+
+ guard(mutex)(&st->lock);
+ /*
+ * After testing, the bulk_write operations doesn't work as expected
+ * here since the cs needs to be raised after each byte transaction.
+ */
+ ret = regmap_write(st->regmap, AD4851_REG_CHX_OFFSET_LSB(ch), buf[2]);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(st->regmap, AD4851_REG_CHX_OFFSET_MID(ch), buf[1]);
+ if (ret)
+ return ret;
+
+ return regmap_write(st->regmap, AD4851_REG_CHX_OFFSET_MSB(ch), buf[0]);
+}
+
+static int ad4851_set_scale(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int val, int val2)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ unsigned int scale_val[2];
+ unsigned int i;
+ const struct ad4851_scale *scale_table;
+ size_t table_size;
+ int ret;
+
+ if (st->bipolar_ch[chan->channel]) {
+ scale_table = ad4851_scale_table_bipolar;
+ table_size = ARRAY_SIZE(ad4851_scale_table_bipolar);
+ } else {
+ scale_table = ad4851_scale_table_unipolar;
+ table_size = ARRAY_SIZE(ad4851_scale_table_unipolar);
+ }
+
+ for (i = 0; i < table_size; i++) {
+ ret = __ad4851_get_scale(indio_dev, scale_table[i].scale_val,
+ &scale_val[0], &scale_val[1]);
+ if (ret)
+ return ret;
+
+ if (scale_val[0] != val || scale_val[1] != val2)
+ continue;
+
+ return regmap_write(st->regmap,
+ AD4851_REG_CHX_SOFTSPAN(chan->channel),
+ scale_table[i].reg_val);
+ }
+
+ return -EINVAL;
+}
+
+static int ad4851_get_scale(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, int *val,
+ int *val2)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ const struct ad4851_scale *scale_table;
+ size_t table_size;
+ u32 softspan_val;
+ int i, ret;
+
+ if (st->bipolar_ch[chan->channel]) {
+ scale_table = ad4851_scale_table_bipolar;
+ table_size = ARRAY_SIZE(ad4851_scale_table_bipolar);
+ } else {
+ scale_table = ad4851_scale_table_unipolar;
+ table_size = ARRAY_SIZE(ad4851_scale_table_unipolar);
+ }
+
+ ret = regmap_read(st->regmap, AD4851_REG_CHX_SOFTSPAN(chan->channel),
+ &softspan_val);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < table_size; i++) {
+ if (softspan_val == scale_table[i].reg_val)
+ break;
+ }
+
+ if (i == table_size)
+ return -EIO;
+
+ ret = __ad4851_get_scale(indio_dev, scale_table[i].scale_val, val,
+ val2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
+static int ad4851_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long info)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->cnv_trigger_rate_hz;
+ *val2 = st->osr;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return ad4851_get_calibscale(st, chan->channel, val, val2);
+ case IIO_CHAN_INFO_SCALE:
+ return ad4851_get_scale(indio_dev, chan, val, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return ad4851_get_calibbias(st, chan->channel, val);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return ad4851_get_oversampling_ratio(st, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4851_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long info)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val < 0 || val2 < 0)
+ return -EINVAL;
+ return ad4851_set_sampling_freq(st, val * st->osr + val2 * st->osr / MICRO);
+ case IIO_CHAN_INFO_SCALE:
+ return ad4851_set_scale(indio_dev, chan, val, val2);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return ad4851_set_calibscale(st, chan->channel, val, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return ad4851_set_calibbias(st, chan->channel, val);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return ad4851_set_oversampling_ratio(indio_dev, chan, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad4851_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ unsigned int c;
+ int ret;
+
+ for (c = 0; c < indio_dev->num_channels; c++) {
+ if (test_bit(c, scan_mask))
+ ret = iio_backend_chan_enable(st->back, c);
+ else
+ ret = iio_backend_chan_disable(st->back, c);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ad4851_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (st->bipolar_ch[chan->channel]) {
+ *vals = (const int *)st->scales_bipolar;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(ad4851_scale_avail_bipolar) * 2;
+ } else {
+ *vals = (const int *)st->scales_unipolar;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+ /* Values are stored in a 2D matrix */
+ *length = ARRAY_SIZE(ad4851_scale_avail_unipolar) * 2;
+ }
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = ad4851_oversampling_ratios;
+ *length = ARRAY_SIZE(ad4851_oversampling_ratios);
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_scan_type ad4851_scan_type_20_u[] = {
+ [AD4851_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 20,
+ .storagebits = 32,
+ },
+ [AD4851_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ },
+};
+
+static const struct iio_scan_type ad4851_scan_type_20_b[] = {
+ [AD4851_SCAN_TYPE_NORMAL] = {
+ .sign = 's',
+ .realbits = 20,
+ .storagebits = 32,
+ },
+ [AD4851_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 's',
+ .realbits = 24,
+ .storagebits = 32,
+ },
+};
+
+static int ad4851_get_current_scan_type(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+
+ return st->resolution_boost_enabled ? AD4851_SCAN_TYPE_RESOLUTION_BOOST
+ : AD4851_SCAN_TYPE_NORMAL;
+}
+
+#define AD4851_IIO_CHANNEL \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_CALIBSCALE) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_all_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .indexed = 1
+
+/*
+ * In case of AD4858_IIO_CHANNEL the scan_type is handled dynamically during the
+ * parse_channels function.
+ */
+#define AD4858_IIO_CHANNEL \
+{ \
+ AD4851_IIO_CHANNEL \
+}
+
+#define AD4857_IIO_CHANNEL \
+{ \
+ AD4851_IIO_CHANNEL, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ }, \
+}
+
+static int ad4851_parse_channels_common(struct iio_dev *indio_dev,
+ struct iio_chan_spec **chans,
+ const struct iio_chan_spec ad4851_chan)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->spi->dev;
+ struct iio_chan_spec *channels, *chan_start;
+ unsigned int num_channels, reg;
+ unsigned int index = 0;
+ int ret;
+
+ num_channels = device_get_child_node_count(dev);
+ if (num_channels > AD4851_MAX_CH_NR)
+ return dev_err_probe(dev, -EINVAL, "Too many channels: %u\n",
+ num_channels);
+
+ channels = devm_kcalloc(dev, num_channels, sizeof(*channels), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ chan_start = channels;
+
+ device_for_each_child_node_scoped(dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "Missing channel number\n");
+ if (reg >= AD4851_MAX_CH_NR)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid channel number\n");
+ *channels = ad4851_chan;
+ channels->scan_index = index++;
+ channels->channel = reg;
+
+ if (fwnode_property_present(child, "diff-channels")) {
+ channels->channel2 = reg + st->info->max_channels;
+ channels->differential = 1;
+ }
+
+ st->bipolar_ch[reg] = fwnode_property_read_bool(child, "bipolar");
+
+ if (st->bipolar_ch[reg]) {
+ channels->scan_type.sign = 's';
+ } else {
+ ret = regmap_write(st->regmap, AD4851_REG_CHX_SOFTSPAN(reg),
+ AD4851_SOFTSPAN_0V_40V);
+ if (ret)
+ return ret;
+ }
+
+ channels++;
+ }
+
+ *chans = chan_start;
+
+ return num_channels;
+}
+
+static int ad4857_parse_channels(struct iio_dev *indio_dev)
+{
+ struct iio_chan_spec *ad4851_channels;
+ const struct iio_chan_spec ad4851_chan = AD4857_IIO_CHANNEL;
+ int ret;
+
+ ret = ad4851_parse_channels_common(indio_dev, &ad4851_channels,
+ ad4851_chan);
+ if (ret < 0)
+ return ret;
+
+ indio_dev->channels = ad4851_channels;
+ indio_dev->num_channels = ret;
+
+ return 0;
+}
+
+static int ad4858_parse_channels(struct iio_dev *indio_dev)
+{
+ struct ad4851_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->spi->dev;
+ struct iio_chan_spec *ad4851_channels;
+ const struct iio_chan_spec ad4851_chan = AD4858_IIO_CHANNEL;
+ int ret;
+
+ ret = ad4851_parse_channels_common(indio_dev, &ad4851_channels,
+ ad4851_chan);
+ if (ret < 0)
+ return ret;
+
+ device_for_each_child_node_scoped(dev, child) {
+ ad4851_channels->has_ext_scan_type = 1;
+ if (fwnode_property_read_bool(child, "bipolar")) {
+ ad4851_channels->ext_scan_type = ad4851_scan_type_20_b;
+ ad4851_channels->num_ext_scan_type = ARRAY_SIZE(ad4851_scan_type_20_b);
+ } else {
+ ad4851_channels->ext_scan_type = ad4851_scan_type_20_u;
+ ad4851_channels->num_ext_scan_type = ARRAY_SIZE(ad4851_scan_type_20_u);
+ }
+ ad4851_channels++;
+ }
+
+ indio_dev->channels = ad4851_channels;
+ indio_dev->num_channels = ret;
+
+ return 0;
+}
+
+/*
+ * parse_channels() function handles the rest of the channel related attributes
+ * that are usually are stored in the chip info structure.
+ */
+static const struct ad4851_chip_info ad4851_info = {
+ .name = "ad4851",
+ .product_id = 0x67,
+ .max_sample_rate_hz = 250 * KILO,
+ .resolution = 16,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4857_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4852_info = {
+ .name = "ad4852",
+ .product_id = 0x66,
+ .max_sample_rate_hz = 250 * KILO,
+ .resolution = 20,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4858_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4853_info = {
+ .name = "ad4853",
+ .product_id = 0x65,
+ .max_sample_rate_hz = 1 * MEGA,
+ .resolution = 16,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4857_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4854_info = {
+ .name = "ad4854",
+ .product_id = 0x64,
+ .max_sample_rate_hz = 1 * MEGA,
+ .resolution = 20,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4858_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4855_info = {
+ .name = "ad4855",
+ .product_id = 0x63,
+ .max_sample_rate_hz = 250 * KILO,
+ .resolution = 16,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4857_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4856_info = {
+ .name = "ad4856",
+ .product_id = 0x62,
+ .max_sample_rate_hz = 250 * KILO,
+ .resolution = 20,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4858_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4857_info = {
+ .name = "ad4857",
+ .product_id = 0x61,
+ .max_sample_rate_hz = 1 * MEGA,
+ .resolution = 16,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4857_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4858_info = {
+ .name = "ad4858",
+ .product_id = 0x60,
+ .max_sample_rate_hz = 1 * MEGA,
+ .resolution = 20,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4858_parse_channels,
+};
+
+static const struct ad4851_chip_info ad4858i_info = {
+ .name = "ad4858i",
+ .product_id = 0x6F,
+ .max_sample_rate_hz = 1 * MEGA,
+ .resolution = 20,
+ .max_channels = AD4851_MAX_CH_NR,
+ .parse_channels = ad4858_parse_channels,
+};
+
+static const struct iio_info ad4851_iio_info = {
+ .debugfs_reg_access = ad4851_reg_access,
+ .read_raw = ad4851_read_raw,
+ .write_raw = ad4851_write_raw,
+ .update_scan_mode = ad4851_update_scan_mode,
+ .get_current_scan_type = ad4851_get_current_scan_type,
+ .read_avail = ad4851_read_avail,
+};
+
+static const struct regmap_config regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .read_flag_mask = BIT(7),
+};
+
+static const char * const ad4851_power_supplies[] = {
+ "vcc", "vdd", "vee", "vio",
+};
+
+static int ad4851_probe(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev;
+ struct device *dev = &spi->dev;
+ struct ad4851_state *st;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->spi = spi;
+
+ ret = devm_mutex_init(dev, &st->lock);
+ if (ret)
+ return ret;
+
+ ret = devm_regulator_bulk_get_enable(dev,
+ ARRAY_SIZE(ad4851_power_supplies),
+ ad4851_power_supplies);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to get and enable supplies\n");
+
+ ret = devm_regulator_get_enable_optional(dev, "vddh");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to enable vddh voltage\n");
+
+ ret = devm_regulator_get_enable_optional(dev, "vddl");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to enable vddl voltage\n");
+
+ ret = devm_regulator_get_enable_optional(dev, "vrefbuf");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to enable vrefbuf voltage\n");
+
+ st->vrefbuf_en = ret != -ENODEV;
+
+ ret = devm_regulator_get_enable_optional(dev, "vrefio");
+ if (ret < 0 && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to enable vrefio voltage\n");
+
+ st->vrefio_en = ret != -ENODEV;
+
+ st->pd_gpio = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_LOW);
+ if (IS_ERR(st->pd_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->pd_gpio),
+ "Error on requesting pd GPIO\n");
+
+ st->cnv = devm_pwm_get(dev, NULL);
+ if (IS_ERR(st->cnv))
+ return dev_err_probe(dev, PTR_ERR(st->cnv),
+ "Error on requesting pwm\n");
+
+ st->info = spi_get_device_match_data(spi);
+ if (!st->info)
+ return -ENODEV;
+
+ st->regmap = devm_regmap_init_spi(spi, &regmap_config);
+ if (IS_ERR(st->regmap))
+ return PTR_ERR(st->regmap);
+
+ ret = ad4851_set_sampling_freq(st, HZ_PER_MHZ);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(&st->spi->dev, ad4851_pwm_disable,
+ st->cnv);
+ if (ret)
+ return ret;
+
+ ret = ad4851_setup(st);
+ if (ret)
+ return ret;
+
+ indio_dev->name = st->info->name;
+ indio_dev->info = &ad4851_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = st->info->parse_channels(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad4851_scale_fill(indio_dev);
+ if (ret)
+ return ret;
+
+ st->back = devm_iio_backend_get(dev, NULL);
+ if (IS_ERR(st->back))
+ return PTR_ERR(st->back);
+
+ ret = devm_iio_backend_request_buffer(dev, st->back, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_backend_enable(dev, st->back);
+ if (ret)
+ return ret;
+
+ ret = ad4851_calibrate(indio_dev);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id ad4851_of_match[] = {
+ { .compatible = "adi,ad4851", .data = &ad4851_info, },
+ { .compatible = "adi,ad4852", .data = &ad4852_info, },
+ { .compatible = "adi,ad4853", .data = &ad4853_info, },
+ { .compatible = "adi,ad4854", .data = &ad4854_info, },
+ { .compatible = "adi,ad4855", .data = &ad4855_info, },
+ { .compatible = "adi,ad4856", .data = &ad4856_info, },
+ { .compatible = "adi,ad4857", .data = &ad4857_info, },
+ { .compatible = "adi,ad4858", .data = &ad4858_info, },
+ { .compatible = "adi,ad4858i", .data = &ad4858i_info, },
+ { }
+};
+
+static const struct spi_device_id ad4851_spi_id[] = {
+ { "ad4851", (kernel_ulong_t)&ad4851_info },
+ { "ad4852", (kernel_ulong_t)&ad4852_info },
+ { "ad4853", (kernel_ulong_t)&ad4853_info },
+ { "ad4854", (kernel_ulong_t)&ad4854_info },
+ { "ad4855", (kernel_ulong_t)&ad4855_info },
+ { "ad4856", (kernel_ulong_t)&ad4856_info },
+ { "ad4857", (kernel_ulong_t)&ad4857_info },
+ { "ad4858", (kernel_ulong_t)&ad4858_info },
+ { "ad4858i", (kernel_ulong_t)&ad4858i_info },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad4851_spi_id);
+
+static struct spi_driver ad4851_driver = {
+ .probe = ad4851_probe,
+ .driver = {
+ .name = "ad4851",
+ .of_match_table = ad4851_of_match,
+ },
+ .id_table = ad4851_spi_id,
+};
+module_spi_driver(ad4851_driver);
+
+MODULE_AUTHOR("Sergiu Cuciurean <sergiu.cuciurean@analog.com>");
+MODULE_AUTHOR("Dragos Bogdan <dragos.bogdan@analog.com>");
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD4851 DAS driver");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_BACKEND");
diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c
index 606486c4dfe8..931ff71b2888 100644
--- a/drivers/iio/adc/ad7091r-base.c
+++ b/drivers/iio/adc/ad7091r-base.c
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/interrupt.h>
diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c
index 6ae27cdd3250..3ea81a98e455 100644
--- a/drivers/iio/adc/ad7124.c
+++ b/drivers/iio/adc/ad7124.c
@@ -53,6 +53,11 @@
#define AD7124_ADC_CTRL_MODE_MSK GENMASK(5, 2)
#define AD7124_ADC_CTRL_MODE(x) FIELD_PREP(AD7124_ADC_CTRL_MODE_MSK, x)
+#define AD7124_MODE_CAL_INT_ZERO 0x5 /* Internal Zero-Scale Calibration */
+#define AD7124_MODE_CAL_INT_FULL 0x6 /* Internal Full-Scale Calibration */
+#define AD7124_MODE_CAL_SYS_ZERO 0x7 /* System Zero-Scale Calibration */
+#define AD7124_MODE_CAL_SYS_FULL 0x8 /* System Full-Scale Calibration */
+
/* AD7124 ID */
#define AD7124_DEVICE_ID_MSK GENMASK(7, 4)
#define AD7124_DEVICE_ID_GET(x) FIELD_GET(AD7124_DEVICE_ID_MSK, x)
@@ -151,7 +156,11 @@ struct ad7124_chip_info {
struct ad7124_channel_config {
bool live;
unsigned int cfg_slot;
- /* Following fields are used to compare equality. */
+ /*
+ * Following fields are used to compare for equality. If you
+ * make adaptations in it, you most likely also have to adapt
+ * ad7124_find_similar_live_cfg(), too.
+ */
struct_group(config_props,
enum ad7124_ref_sel refsel;
bool bipolar;
@@ -162,6 +171,8 @@ struct ad7124_channel_config {
unsigned int odr;
unsigned int odr_sel_bits;
unsigned int filter_type;
+ unsigned int calibration_offset;
+ unsigned int calibration_gain;
);
};
@@ -170,6 +181,7 @@ struct ad7124_channel {
struct ad7124_channel_config cfg;
unsigned int ain;
unsigned int slot;
+ u8 syscalib_mode;
};
struct ad7124_state {
@@ -182,24 +194,13 @@ struct ad7124_state {
unsigned int num_channels;
struct mutex cfgs_lock; /* lock for configs access */
unsigned long cfg_slots_status; /* bitmap with slot status (1 means it is used) */
- DECLARE_KFIFO(live_cfgs_fifo, struct ad7124_channel_config *, AD7124_MAX_CONFIGS);
-};
-static const struct iio_chan_spec ad7124_channel_template = {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .differential = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SAMP_FREQ) |
- BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
- .scan_type = {
- .sign = 'u',
- .realbits = 24,
- .storagebits = 32,
- .endianness = IIO_BE,
- },
+ /*
+ * Stores the power-on reset value for the GAIN(x) registers which are
+ * needed for measurements at gain 1 (i.e. CONFIG(x).PGA == 0)
+ */
+ unsigned int gain_default;
+ DECLARE_KFIFO(live_cfgs_fifo, struct ad7124_channel_config *, AD7124_MAX_CONFIGS);
};
static struct ad7124_chip_info ad7124_chip_info_tbl[] = {
@@ -338,15 +339,42 @@ static struct ad7124_channel_config *ad7124_find_similar_live_cfg(struct ad7124_
struct ad7124_channel_config *cfg)
{
struct ad7124_channel_config *cfg_aux;
- ptrdiff_t cmp_size;
int i;
- cmp_size = sizeof_field(struct ad7124_channel_config, config_props);
+ /*
+ * This is just to make sure that the comparison is adapted after
+ * struct ad7124_channel_config was changed.
+ */
+ static_assert(sizeof_field(struct ad7124_channel_config, config_props) ==
+ sizeof(struct {
+ enum ad7124_ref_sel refsel;
+ bool bipolar;
+ bool buf_positive;
+ bool buf_negative;
+ unsigned int vref_mv;
+ unsigned int pga_bits;
+ unsigned int odr;
+ unsigned int odr_sel_bits;
+ unsigned int filter_type;
+ unsigned int calibration_offset;
+ unsigned int calibration_gain;
+ }));
+
for (i = 0; i < st->num_channels; i++) {
cfg_aux = &st->channels[i].cfg;
if (cfg_aux->live &&
- !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size))
+ cfg->refsel == cfg_aux->refsel &&
+ cfg->bipolar == cfg_aux->bipolar &&
+ cfg->buf_positive == cfg_aux->buf_positive &&
+ cfg->buf_negative == cfg_aux->buf_negative &&
+ cfg->vref_mv == cfg_aux->vref_mv &&
+ cfg->pga_bits == cfg_aux->pga_bits &&
+ cfg->odr == cfg_aux->odr &&
+ cfg->odr_sel_bits == cfg_aux->odr_sel_bits &&
+ cfg->filter_type == cfg_aux->filter_type &&
+ cfg->calibration_offset == cfg_aux->calibration_offset &&
+ cfg->calibration_gain == cfg_aux->calibration_gain)
return cfg_aux;
}
@@ -402,6 +430,14 @@ static int ad7124_write_config(struct ad7124_state *st, struct ad7124_channel_co
cfg->cfg_slot = cfg_slot;
+ ret = ad_sd_write_reg(&st->sd, AD7124_OFFSET(cfg->cfg_slot), 3, cfg->calibration_offset);
+ if (ret)
+ return ret;
+
+ ret = ad_sd_write_reg(&st->sd, AD7124_GAIN(cfg->cfg_slot), 3, cfg->calibration_gain);
+ if (ret)
+ return ret;
+
tmp = (cfg->buf_positive << 1) + cfg->buf_negative;
val = AD7124_CONFIG_BIPOLAR(cfg->bipolar) | AD7124_CONFIG_REF_SEL(cfg->refsel) |
AD7124_CONFIG_IN_BUFF(tmp) | AD7124_CONFIG_PGA(cfg->pga_bits);
@@ -540,14 +576,21 @@ static int ad7124_append_status(struct ad_sigma_delta *sd, bool append)
return 0;
}
-static int ad7124_disable_all(struct ad_sigma_delta *sd)
+static int ad7124_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
{
struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
+
+ /* The relevant thing here is that AD7124_CHANNEL_EN_MSK is cleared. */
+ return ad_sd_write_reg(&st->sd, AD7124_CHANNEL(chan), 2, 0);
+}
+
+static int ad7124_disable_all(struct ad_sigma_delta *sd)
+{
int ret;
int i;
- for (i = 0; i < st->num_channels; i++) {
- ret = ad7124_spi_write_mask(st, AD7124_CHANNEL(i), AD7124_CHANNEL_EN_MSK, 0, 2);
+ for (i = 0; i < 16; i++) {
+ ret = ad7124_disable_one(sd, i);
if (ret < 0)
return ret;
}
@@ -555,13 +598,6 @@ static int ad7124_disable_all(struct ad_sigma_delta *sd)
return 0;
}
-static int ad7124_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
-{
- struct ad7124_state *st = container_of(sd, struct ad7124_state, sd);
-
- return ad7124_spi_write_mask(st, AD7124_CHANNEL(chan), AD7124_CHANNEL_EN_MSK, 0, 2);
-}
-
static const struct ad_sigma_delta_info ad7124_sigma_delta_info = {
.set_channel = ad7124_set_channel,
.append_status = ad7124_append_status,
@@ -808,13 +844,22 @@ static int ad7124_soft_reset(struct ad7124_state *st)
return dev_err_probe(dev, ret, "Error reading status register\n");
if (!(readval & AD7124_STATUS_POR_FLAG_MSK))
- return 0;
+ break;
/* The AD7124 requires typically 2ms to power up and settle */
usleep_range(100, 2000);
} while (--timeout);
- return dev_err_probe(dev, -EIO, "Soft reset failed\n");
+ if (readval & AD7124_STATUS_POR_FLAG_MSK)
+ return dev_err_probe(dev, -EIO, "Soft reset failed\n");
+
+ ret = ad_sd_read_reg(&st->sd, AD7124_GAIN(0), 3, &st->gain_default);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Error reading gain register\n");
+
+ dev_dbg(dev, "Reset value of GAIN register is 0x%x\n", st->gain_default);
+
+ return 0;
}
static int ad7124_check_chip_id(struct ad7124_state *st)
@@ -842,6 +887,140 @@ static int ad7124_check_chip_id(struct ad7124_state *st)
return 0;
}
+enum {
+ AD7124_SYSCALIB_ZERO_SCALE,
+ AD7124_SYSCALIB_FULL_SCALE,
+};
+
+static int ad7124_syscalib_locked(struct ad7124_state *st, const struct iio_chan_spec *chan)
+{
+ struct device *dev = &st->sd.spi->dev;
+ struct ad7124_channel *ch = &st->channels[chan->channel];
+ int ret;
+
+ if (ch->syscalib_mode == AD7124_SYSCALIB_ZERO_SCALE) {
+ ch->cfg.calibration_offset = 0x800000;
+
+ ret = ad_sd_calibrate(&st->sd, AD7124_MODE_CAL_SYS_ZERO,
+ chan->address);
+ if (ret < 0)
+ return ret;
+
+ ret = ad_sd_read_reg(&st->sd, AD7124_OFFSET(ch->cfg.cfg_slot), 3,
+ &ch->cfg.calibration_offset);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "offset for channel %d after zero-scale calibration: 0x%x\n",
+ chan->channel, ch->cfg.calibration_offset);
+ } else {
+ ch->cfg.calibration_gain = st->gain_default;
+
+ ret = ad_sd_calibrate(&st->sd, AD7124_MODE_CAL_SYS_FULL,
+ chan->address);
+ if (ret < 0)
+ return ret;
+
+ ret = ad_sd_read_reg(&st->sd, AD7124_GAIN(ch->cfg.cfg_slot), 3,
+ &ch->cfg.calibration_gain);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "gain for channel %d after full-scale calibration: 0x%x\n",
+ chan->channel, ch->cfg.calibration_gain);
+ }
+
+ return 0;
+}
+
+static ssize_t ad7124_write_syscalib(struct iio_dev *indio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct ad7124_state *st = iio_priv(indio_dev);
+ bool sys_calib;
+ int ret;
+
+ ret = kstrtobool(buf, &sys_calib);
+ if (ret)
+ return ret;
+
+ if (!sys_calib)
+ return len;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad7124_syscalib_locked(st, chan);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret ?: len;
+}
+
+static const char * const ad7124_syscalib_modes[] = {
+ [AD7124_SYSCALIB_ZERO_SCALE] = "zero_scale",
+ [AD7124_SYSCALIB_FULL_SCALE] = "full_scale",
+};
+
+static int ad7124_set_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ unsigned int mode)
+{
+ struct ad7124_state *st = iio_priv(indio_dev);
+
+ st->channels[chan->channel].syscalib_mode = mode;
+
+ return 0;
+}
+
+static int ad7124_get_syscalib_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7124_state *st = iio_priv(indio_dev);
+
+ return st->channels[chan->channel].syscalib_mode;
+}
+
+static const struct iio_enum ad7124_syscalib_mode_enum = {
+ .items = ad7124_syscalib_modes,
+ .num_items = ARRAY_SIZE(ad7124_syscalib_modes),
+ .set = ad7124_set_syscalib_mode,
+ .get = ad7124_get_syscalib_mode
+};
+
+static const struct iio_chan_spec_ext_info ad7124_calibsys_ext_info[] = {
+ {
+ .name = "sys_calibration",
+ .write = ad7124_write_syscalib,
+ .shared = IIO_SEPARATE,
+ },
+ IIO_ENUM("sys_calibration_mode", IIO_SEPARATE,
+ &ad7124_syscalib_mode_enum),
+ IIO_ENUM_AVAILABLE("sys_calibration_mode", IIO_SHARED_BY_TYPE,
+ &ad7124_syscalib_mode_enum),
+ { }
+};
+
+static const struct iio_chan_spec ad7124_channel_template = {
+ .type = IIO_VOLTAGE,
+ .indexed = 1,
+ .differential = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) |
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ .ext_info = ad7124_calibsys_ext_info,
+};
+
/*
* Input specifiers 8 - 15 are explicitly reserved for ad7124-4
* while they are fine for ad7124-8. Values above 31 don't fit
@@ -881,12 +1060,12 @@ static int ad7124_parse_channel_config(struct iio_dev *indio_dev,
/* Add one for temperature */
st->num_channels = min(num_channels + 1, AD7124_MAX_CHANNELS);
- chan = devm_kcalloc(indio_dev->dev.parent, st->num_channels,
+ chan = devm_kcalloc(dev, st->num_channels,
sizeof(*chan), GFP_KERNEL);
if (!chan)
return -ENOMEM;
- channels = devm_kcalloc(indio_dev->dev.parent, st->num_channels, sizeof(*channels),
+ channels = devm_kcalloc(dev, st->num_channels, sizeof(*channels),
GFP_KERNEL);
if (!channels)
return -ENOMEM;
@@ -1016,11 +1195,10 @@ static int ad7124_setup(struct ad7124_state *st)
* set all channels to this default value.
*/
ad7124_set_channel_odr(st, i, 10);
-
- /* Disable all channels to prevent unintended conversions. */
- ad_sd_write_reg(&st->sd, AD7124_CHANNEL(i), 2, 0);
}
+ ad7124_disable_all(&st->sd);
+
ret = ad_sd_write_reg(&st->sd, AD7124_ADC_CONTROL, 2, st->adc_control);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to setup CONTROL register\n");
@@ -1028,6 +1206,91 @@ static int ad7124_setup(struct ad7124_state *st)
return ret;
}
+static int __ad7124_calibrate_all(struct ad7124_state *st, struct iio_dev *indio_dev)
+{
+ struct device *dev = &st->sd.spi->dev;
+ int ret, i;
+
+ for (i = 0; i < st->num_channels; i++) {
+
+ if (indio_dev->channels[i].type != IIO_VOLTAGE)
+ continue;
+
+ /*
+ * For calibration the OFFSET register should hold its reset default
+ * value. For the GAIN register there is no such requirement but
+ * for gain 1 it should hold the reset default value, too. So to
+ * simplify matters use the reset default value for both.
+ */
+ st->channels[i].cfg.calibration_offset = 0x800000;
+ st->channels[i].cfg.calibration_gain = st->gain_default;
+
+ /*
+ * Full-scale calibration isn't supported at gain 1, so skip in
+ * that case. Note that untypically full-scale calibration has
+ * to happen before zero-scale calibration. This only applies to
+ * the internal calibration. For system calibration it's as
+ * usual: first zero-scale then full-scale calibration.
+ */
+ if (st->channels[i].cfg.pga_bits > 0) {
+ ret = ad_sd_calibrate(&st->sd, AD7124_MODE_CAL_INT_FULL, i);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * read out the resulting value of GAIN
+ * after full-scale calibration because the next
+ * ad_sd_calibrate() call overwrites this via
+ * ad_sigma_delta_set_channel() -> ad7124_set_channel()
+ * ... -> ad7124_enable_channel().
+ */
+ ret = ad_sd_read_reg(&st->sd, AD7124_GAIN(st->channels[i].cfg.cfg_slot), 3,
+ &st->channels[i].cfg.calibration_gain);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = ad_sd_calibrate(&st->sd, AD7124_MODE_CAL_INT_ZERO, i);
+ if (ret < 0)
+ return ret;
+
+ ret = ad_sd_read_reg(&st->sd, AD7124_OFFSET(st->channels[i].cfg.cfg_slot), 3,
+ &st->channels[i].cfg.calibration_offset);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "offset and gain for channel %d = 0x%x + 0x%x\n", i,
+ st->channels[i].cfg.calibration_offset,
+ st->channels[i].cfg.calibration_gain);
+ }
+
+ return 0;
+}
+
+static int ad7124_calibrate_all(struct ad7124_state *st, struct iio_dev *indio_dev)
+{
+ int ret;
+ unsigned int adc_control = st->adc_control;
+
+ /*
+ * Calibration isn't supported at full power, so speed down a bit.
+ * Setting .adc_control is enough here because the control register is
+ * written as part of ad_sd_calibrate() -> ad_sigma_delta_set_mode().
+ * The resulting calibration is then also valid for high-speed, so just
+ * restore adc_control afterwards.
+ */
+ if (FIELD_GET(AD7124_ADC_CTRL_PWR_MSK, adc_control) >= AD7124_FULL_POWER) {
+ st->adc_control &= ~AD7124_ADC_CTRL_PWR_MSK;
+ st->adc_control |= AD7124_ADC_CTRL_PWR(AD7124_MID_POWER);
+ }
+
+ ret = __ad7124_calibrate_all(st, indio_dev);
+
+ st->adc_control = adc_control;
+
+ return ret;
+}
+
static void ad7124_reg_disable(void *r)
{
regulator_disable(r);
@@ -1106,6 +1369,10 @@ static int ad7124_probe(struct spi_device *spi)
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to setup triggers\n");
+ ret = ad7124_calibrate_all(st, indio_dev);
+ if (ret)
+ return ret;
+
ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret < 0)
return dev_err_probe(dev, ret, "Failed to register iio device\n");
diff --git a/drivers/iio/adc/ad7173.c b/drivers/iio/adc/ad7173.c
index 6c4ed10ae580..69de5886474c 100644
--- a/drivers/iio/adc/ad7173.c
+++ b/drivers/iio/adc/ad7173.c
@@ -35,6 +35,7 @@
#include <linux/units.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -102,6 +103,7 @@
#define AD7173_GPIO_PDSW BIT(14)
#define AD7173_GPIO_OP_EN2_3 BIT(13)
+#define AD4111_GPIO_GP_OW_EN BIT(12)
#define AD7173_GPIO_MUX_IO BIT(12)
#define AD7173_GPIO_SYNC_EN BIT(11)
#define AD7173_GPIO_ERR_EN BIT(10)
@@ -149,6 +151,7 @@
#define AD7173_FILTER_ODR0_MASK GENMASK(5, 0)
#define AD7173_MAX_CONFIGS 8
+#define AD4111_OW_DET_THRSH_MV 300
#define AD7173_MODE_CAL_INT_ZERO 0x4 /* Internal Zero-Scale Calibration */
#define AD7173_MODE_CAL_INT_FULL 0x5 /* Internal Full-Scale Calibration */
@@ -171,6 +174,7 @@ struct ad7173_device_info {
unsigned int clock;
unsigned int id;
char *name;
+ const struct ad_sigma_delta_info *sd_info;
bool has_current_inputs;
bool has_vincom_input;
bool has_temp;
@@ -181,15 +185,23 @@ struct ad7173_device_info {
bool has_int_ref;
bool has_ref2;
bool has_internal_fs_calibration;
+ bool has_openwire_det;
bool higher_gpio_bits;
u8 num_gpios;
};
struct ad7173_channel_config {
+ /* Openwire detection threshold */
+ unsigned int openwire_thrsh_raw;
+ int openwire_comp_chan;
u8 cfg_slot;
bool live;
- /* Following fields are used to compare equality. */
+ /*
+ * Following fields are used to compare equality. If you
+ * make adaptations in it, you most likely also have to adapt
+ * ad7173_find_live_config(), too.
+ */
struct_group(config_props,
bool bipolar;
bool input_buf;
@@ -202,11 +214,11 @@ struct ad7173_channel {
unsigned int ain;
struct ad7173_channel_config cfg;
u8 syscalib_mode;
+ bool openwire_det_en;
};
struct ad7173_state {
struct ad_sigma_delta sd;
- struct ad_sigma_delta_info sigma_delta_info;
const struct ad7173_device_info *info;
struct ad7173_channel *channels;
struct regulator_bulk_data regulators[3];
@@ -265,228 +277,6 @@ static unsigned int ad4111_current_channel_config[] = {
0x18B, /* 12:IIN3+ 11:IIN3− */
};
-static const struct ad7173_device_info ad4111_device_info = {
- .name = "ad4111",
- .id = AD4111_ID,
- .num_voltage_in_div = 8,
- .num_channels = 16,
- .num_configs = 8,
- .num_voltage_in = 8,
- .num_gpios = 2,
- .higher_gpio_bits = true,
- .has_temp = true,
- .has_vincom_input = true,
- .has_input_buf = true,
- .has_current_inputs = true,
- .has_int_ref = true,
- .has_internal_fs_calibration = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad4112_device_info = {
- .name = "ad4112",
- .id = AD4112_ID,
- .num_voltage_in_div = 8,
- .num_channels = 16,
- .num_configs = 8,
- .num_voltage_in = 8,
- .num_gpios = 2,
- .higher_gpio_bits = true,
- .has_vincom_input = true,
- .has_temp = true,
- .has_input_buf = true,
- .has_current_inputs = true,
- .has_int_ref = true,
- .has_internal_fs_calibration = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad4113_device_info = {
- .name = "ad4113",
- .id = AD4113_ID,
- .num_voltage_in_div = 8,
- .num_channels = 16,
- .num_configs = 8,
- .num_voltage_in = 8,
- .num_gpios = 2,
- .data_reg_only_16bit = true,
- .higher_gpio_bits = true,
- .has_vincom_input = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad4114_device_info = {
- .name = "ad4114",
- .id = AD4114_ID,
- .num_voltage_in_div = 16,
- .num_channels = 16,
- .num_configs = 8,
- .num_voltage_in = 16,
- .num_gpios = 4,
- .has_vincom_input = true,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_internal_fs_calibration = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad4115_device_info = {
- .name = "ad4115",
- .id = AD4115_ID,
- .num_voltage_in_div = 16,
- .num_channels = 16,
- .num_configs = 8,
- .num_voltage_in = 16,
- .num_gpios = 4,
- .has_vincom_input = true,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_internal_fs_calibration = true,
- .clock = 8 * HZ_PER_MHZ,
- .sinc5_data_rates = ad4115_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad4115_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad4116_device_info = {
- .name = "ad4116",
- .id = AD4116_ID,
- .num_voltage_in_div = 11,
- .num_channels = 16,
- .num_configs = 8,
- .num_voltage_in = 16,
- .num_gpios = 4,
- .has_vincom_input = true,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_internal_fs_calibration = true,
- .clock = 4 * HZ_PER_MHZ,
- .sinc5_data_rates = ad4116_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad4116_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7172_2_device_info = {
- .name = "ad7172-2",
- .id = AD7172_2_ID,
- .num_voltage_in = 5,
- .num_channels = 4,
- .num_configs = 4,
- .num_gpios = 2,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_pow_supply_monitoring = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7172_4_device_info = {
- .name = "ad7172-4",
- .id = AD7172_4_ID,
- .num_voltage_in = 9,
- .num_channels = 8,
- .num_configs = 8,
- .num_gpios = 4,
- .has_input_buf = true,
- .has_ref2 = true,
- .has_pow_supply_monitoring = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7173_8_device_info = {
- .name = "ad7173-8",
- .id = AD7173_ID,
- .num_voltage_in = 17,
- .num_channels = 16,
- .num_configs = 8,
- .num_gpios = 4,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_ref2 = true,
- .clock = 2 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7173_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7175_2_device_info = {
- .name = "ad7175-2",
- .id = AD7175_2_ID,
- .num_voltage_in = 5,
- .num_channels = 4,
- .num_configs = 4,
- .num_gpios = 2,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_pow_supply_monitoring = true,
- .clock = 16 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7175_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7175_8_device_info = {
- .name = "ad7175-8",
- .id = AD7175_8_ID,
- .num_voltage_in = 17,
- .num_channels = 16,
- .num_configs = 8,
- .num_gpios = 4,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_ref2 = true,
- .has_pow_supply_monitoring = true,
- .clock = 16 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7175_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7176_2_device_info = {
- .name = "ad7176-2",
- .id = AD7176_ID,
- .num_voltage_in = 5,
- .num_channels = 4,
- .num_configs = 4,
- .num_gpios = 2,
- .has_int_ref = true,
- .clock = 16 * HZ_PER_MHZ,
- .sinc5_data_rates = ad7175_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
-};
-
-static const struct ad7173_device_info ad7177_2_device_info = {
- .name = "ad7177-2",
- .id = AD7177_ID,
- .num_voltage_in = 5,
- .num_channels = 4,
- .num_configs = 4,
- .num_gpios = 2,
- .has_temp = true,
- .has_input_buf = true,
- .has_int_ref = true,
- .has_pow_supply_monitoring = true,
- .clock = 16 * HZ_PER_MHZ,
- .odr_start_value = AD7177_ODR_START_VALUE,
- .sinc5_data_rates = ad7175_sinc5_data_rates,
- .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
-};
-
static const char *const ad7173_ref_sel_str[] = {
[AD7173_SETUP_REF_SEL_EXT_REF] = "vref",
[AD7173_SETUP_REF_SEL_EXT_REF2] = "vref2",
@@ -559,6 +349,9 @@ static ssize_t ad7173_write_syscalib(struct iio_dev *indio_dev,
if (ret)
return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
mode = st->channels[chan->channel].syscalib_mode;
if (sys_calib) {
if (mode == AD7173_SYSCALIB_ZERO_SCALE)
@@ -569,6 +362,8 @@ static ssize_t ad7173_write_syscalib(struct iio_dev *indio_dev,
chan->address);
}
+ iio_device_release_direct(indio_dev);
+
return ret ? : len;
}
@@ -616,6 +411,76 @@ static int ad7173_calibrate_all(struct ad7173_state *st, struct iio_dev *indio_d
return 0;
}
+/*
+ * Associative array of channel pairs for open wire detection
+ * The array is indexed by ain and gives the associated channel pair
+ * to perform the open wire detection with
+ * the channel pair [0] is for non differential and pair [1]
+ * is for differential inputs
+ */
+static int openwire_ain_to_channel_pair[][2][2] = {
+/* AIN Single Differential */
+ [0] = { { 0, 15 }, { 1, 2 } },
+ [1] = { { 1, 2 }, { 2, 1 } },
+ [2] = { { 3, 4 }, { 5, 6 } },
+ [3] = { { 5, 6 }, { 6, 5 } },
+ [4] = { { 7, 8 }, { 9, 10 } },
+ [5] = { { 9, 10 }, { 10, 9 } },
+ [6] = { { 11, 12 }, { 13, 14 } },
+ [7] = { { 13, 14 }, { 14, 13 } },
+};
+
+/*
+ * Openwire detection on ad4111 works by running the same input measurement
+ * on two different channels and compare if the difference between the two
+ * measurements exceeds a certain value (typical 300mV)
+ */
+static int ad4111_openwire_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+ struct ad7173_channel *adchan = &st->channels[chan->address];
+ struct ad7173_channel_config *cfg = &adchan->cfg;
+ int ret, val1, val2;
+
+ ret = regmap_set_bits(st->reg_gpiocon_regmap, AD7173_REG_GPIO,
+ AD4111_GPIO_GP_OW_EN);
+ if (ret)
+ return ret;
+
+ adchan->cfg.openwire_comp_chan =
+ openwire_ain_to_channel_pair[chan->channel][chan->differential][0];
+
+ ret = ad_sigma_delta_single_conversion(indio_dev, chan, &val1);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "Error running ad_sigma_delta single conversion: %d", ret);
+ goto out;
+ }
+
+ adchan->cfg.openwire_comp_chan =
+ openwire_ain_to_channel_pair[chan->channel][chan->differential][1];
+
+ ret = ad_sigma_delta_single_conversion(indio_dev, chan, &val2);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "Error running ad_sigma_delta single conversion: %d", ret);
+ goto out;
+ }
+
+ if (abs(val1 - val2) > cfg->openwire_thrsh_raw)
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, chan->address,
+ IIO_EV_TYPE_FAULT, IIO_EV_DIR_FAULT_OPENWIRE),
+ iio_get_time_ns(indio_dev));
+
+out:
+ adchan->cfg.openwire_comp_chan = -1;
+ regmap_clear_bits(st->reg_gpiocon_regmap, AD7173_REG_GPIO,
+ AD4111_GPIO_GP_OW_EN);
+ return ret;
+}
+
static int ad7173_mask_xlate(struct gpio_regmap *gpio, unsigned int base,
unsigned int offset, unsigned int *reg,
unsigned int *mask)
@@ -712,15 +577,28 @@ static struct ad7173_channel_config *
ad7173_find_live_config(struct ad7173_state *st, struct ad7173_channel_config *cfg)
{
struct ad7173_channel_config *cfg_aux;
- ptrdiff_t cmp_size;
int i;
- cmp_size = sizeof_field(struct ad7173_channel_config, config_props);
+ /*
+ * This is just to make sure that the comparison is adapted after
+ * struct ad7173_channel_config was changed.
+ */
+ static_assert(sizeof_field(struct ad7173_channel_config, config_props) ==
+ sizeof(struct {
+ bool bipolar;
+ bool input_buf;
+ u8 odr;
+ u8 ref_sel;
+ }));
+
for (i = 0; i < st->num_channels; i++) {
cfg_aux = &st->channels[i].cfg;
if (cfg_aux->live &&
- !memcmp(&cfg->config_props, &cfg_aux->config_props, cmp_size))
+ cfg->bipolar == cfg_aux->bipolar &&
+ cfg->input_buf == cfg_aux->input_buf &&
+ cfg->odr == cfg_aux->odr &&
+ cfg->ref_sel == cfg_aux->ref_sel)
return cfg_aux;
}
return NULL;
@@ -813,6 +691,9 @@ static int ad7173_set_channel(struct ad_sigma_delta *sd, unsigned int channel)
FIELD_PREP(AD7173_CH_SETUP_SEL_MASK, st->channels[channel].cfg.cfg_slot) |
st->channels[channel].ain;
+ if (st->channels[channel].cfg.openwire_comp_chan >= 0)
+ channel = st->channels[channel].cfg.openwire_comp_chan;
+
return ad_sd_write_reg(&st->sd, AD7173_REG_CH(channel), 2, val);
}
@@ -861,21 +742,280 @@ static int ad7173_disable_all(struct ad_sigma_delta *sd)
static int ad7173_disable_one(struct ad_sigma_delta *sd, unsigned int chan)
{
+ struct ad7173_state *st = ad_sigma_delta_to_ad7173(sd);
+
+ if (st->channels[chan].cfg.openwire_comp_chan >= 0)
+ chan = st->channels[chan].cfg.openwire_comp_chan;
+
return ad_sd_write_reg(sd, AD7173_REG_CH(chan), 2, 0);
}
-static const struct ad_sigma_delta_info ad7173_sigma_delta_info = {
+static const struct ad_sigma_delta_info ad7173_sigma_delta_info_4_slots = {
+ .set_channel = ad7173_set_channel,
+ .append_status = ad7173_append_status,
+ .disable_all = ad7173_disable_all,
+ .disable_one = ad7173_disable_one,
+ .set_mode = ad7173_set_mode,
+ .has_registers = true,
+ .has_named_irqs = true,
+ .addr_shift = 0,
+ .read_mask = BIT(6),
+ .status_ch_mask = GENMASK(3, 0),
+ .data_reg = AD7173_REG_DATA,
+ .num_resetclks = 64,
+ .num_slots = 4,
+};
+
+static const struct ad_sigma_delta_info ad7173_sigma_delta_info_8_slots = {
.set_channel = ad7173_set_channel,
.append_status = ad7173_append_status,
.disable_all = ad7173_disable_all,
.disable_one = ad7173_disable_one,
.set_mode = ad7173_set_mode,
.has_registers = true,
+ .has_named_irqs = true,
.addr_shift = 0,
.read_mask = BIT(6),
.status_ch_mask = GENMASK(3, 0),
.data_reg = AD7173_REG_DATA,
.num_resetclks = 64,
+ .num_slots = 8,
+};
+
+static const struct ad7173_device_info ad4111_device_info = {
+ .name = "ad4111",
+ .id = AD4111_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in_div = 8,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 8,
+ .num_gpios = 2,
+ .higher_gpio_bits = true,
+ .has_temp = true,
+ .has_vincom_input = true,
+ .has_input_buf = true,
+ .has_current_inputs = true,
+ .has_int_ref = true,
+ .has_internal_fs_calibration = true,
+ .has_openwire_det = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4112_device_info = {
+ .name = "ad4112",
+ .id = AD4112_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in_div = 8,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 8,
+ .num_gpios = 2,
+ .higher_gpio_bits = true,
+ .has_vincom_input = true,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_current_inputs = true,
+ .has_int_ref = true,
+ .has_internal_fs_calibration = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4113_device_info = {
+ .name = "ad4113",
+ .id = AD4113_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in_div = 8,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 8,
+ .num_gpios = 2,
+ .data_reg_only_16bit = true,
+ .higher_gpio_bits = true,
+ .has_vincom_input = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4114_device_info = {
+ .name = "ad4114",
+ .id = AD4114_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in_div = 16,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 16,
+ .num_gpios = 4,
+ .has_vincom_input = true,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_internal_fs_calibration = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4115_device_info = {
+ .name = "ad4115",
+ .id = AD4115_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in_div = 16,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 16,
+ .num_gpios = 4,
+ .has_vincom_input = true,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_internal_fs_calibration = true,
+ .clock = 8 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad4115_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad4115_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad4116_device_info = {
+ .name = "ad4116",
+ .id = AD4116_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in_div = 11,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_voltage_in = 16,
+ .num_gpios = 4,
+ .has_vincom_input = true,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_internal_fs_calibration = true,
+ .clock = 4 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad4116_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad4116_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7172_2_device_info = {
+ .name = "ad7172-2",
+ .id = AD7172_2_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in = 5,
+ .num_channels = 4,
+ .num_configs = 4,
+ .num_gpios = 2,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7172_4_device_info = {
+ .name = "ad7172-4",
+ .id = AD7172_4_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in = 9,
+ .num_channels = 8,
+ .num_configs = 8,
+ .num_gpios = 4,
+ .has_input_buf = true,
+ .has_ref2 = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7173_8_device_info = {
+ .name = "ad7173-8",
+ .id = AD7173_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in = 17,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_gpios = 4,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_ref2 = true,
+ .clock = 2 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7173_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7173_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7175_2_device_info = {
+ .name = "ad7175-2",
+ .id = AD7175_2_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in = 5,
+ .num_channels = 4,
+ .num_configs = 4,
+ .num_gpios = 2,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 16 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7175_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7175_8_device_info = {
+ .name = "ad7175-8",
+ .id = AD7175_8_ID,
+ .sd_info = &ad7173_sigma_delta_info_8_slots,
+ .num_voltage_in = 17,
+ .num_channels = 16,
+ .num_configs = 8,
+ .num_gpios = 4,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_ref2 = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 16 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7175_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7176_2_device_info = {
+ .name = "ad7176-2",
+ .id = AD7176_ID,
+ .sd_info = &ad7173_sigma_delta_info_4_slots,
+ .num_voltage_in = 5,
+ .num_channels = 4,
+ .num_configs = 4,
+ .num_gpios = 2,
+ .has_int_ref = true,
+ .clock = 16 * HZ_PER_MHZ,
+ .sinc5_data_rates = ad7175_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
+};
+
+static const struct ad7173_device_info ad7177_2_device_info = {
+ .name = "ad7177-2",
+ .id = AD7177_ID,
+ .sd_info = &ad7173_sigma_delta_info_4_slots,
+ .num_voltage_in = 5,
+ .num_channels = 4,
+ .num_configs = 4,
+ .num_gpios = 2,
+ .has_temp = true,
+ .has_input_buf = true,
+ .has_int_ref = true,
+ .has_pow_supply_monitoring = true,
+ .clock = 16 * HZ_PER_MHZ,
+ .odr_start_value = AD7177_ODR_START_VALUE,
+ .sinc5_data_rates = ad7175_sinc5_data_rates,
+ .num_sinc5_data_rates = ARRAY_SIZE(ad7175_sinc5_data_rates),
};
static int ad7173_setup(struct iio_dev *indio_dev)
@@ -969,6 +1109,12 @@ static int ad7173_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
+ if (ch->openwire_det_en) {
+ ret = ad4111_openwire_event(indio_dev, chan);
+ if (ret < 0)
+ return ret;
+ }
+
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
@@ -1033,11 +1179,10 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
struct ad7173_state *st = iio_priv(indio_dev);
struct ad7173_channel_config *cfg;
unsigned int freq, i;
- int ret;
+ int ret = 0;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
switch (info) {
/*
@@ -1071,7 +1216,7 @@ static int ad7173_write_raw(struct iio_dev *indio_dev,
break;
}
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
@@ -1113,12 +1258,57 @@ static int ad7173_debug_reg_access(struct iio_dev *indio_dev, unsigned int reg,
return ad_sd_write_reg(&st->sd, reg, reg_size, writeval);
}
+static int ad7173_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+ struct ad7173_channel *adchan = &st->channels[chan->address];
+
+ switch (type) {
+ case IIO_EV_TYPE_FAULT:
+ adchan->openwire_det_en = state;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7173_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ad7173_state *st = iio_priv(indio_dev);
+ struct ad7173_channel *adchan = &st->channels[chan->address];
+
+ switch (type) {
+ case IIO_EV_TYPE_FAULT:
+ return adchan->openwire_det_en;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_event_spec ad4111_events[] = {
+ {
+ .type = IIO_EV_TYPE_FAULT,
+ .dir = IIO_EV_DIR_FAULT_OPENWIRE,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ .mask_shared_by_all = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
static const struct iio_info ad7173_info = {
.read_raw = &ad7173_read_raw,
.write_raw = &ad7173_write_raw,
.debugfs_reg_access = &ad7173_debug_reg_access,
.validate_trigger = ad_sd_validate_trigger,
.update_scan_mode = ad7173_update_scan_mode,
+ .write_event_config = ad7173_write_event_config,
+ .read_event_config = ad7173_read_event_config,
};
static const struct iio_scan_type ad4113_scan_type = {
@@ -1322,6 +1512,37 @@ static int ad7173_validate_reference(struct ad7173_state *st, int ref_sel)
return 0;
}
+static int ad7173_validate_openwire_ain_inputs(struct ad7173_state *st,
+ bool differential,
+ unsigned int ain0,
+ unsigned int ain1)
+{
+ /*
+ * If the channel is configured as differential,
+ * the ad4111 requires specific ains to be used together
+ */
+ if (differential)
+ return (ain0 % 2) ? (ain0 - 1) == ain1 : (ain0 + 1) == ain1;
+
+ return ain1 == AD4111_VINCOM_INPUT;
+}
+
+static unsigned int ad7173_calc_openwire_thrsh_raw(struct ad7173_state *st,
+ struct iio_chan_spec *chan,
+ struct ad7173_channel *chan_st_priv,
+ unsigned int thrsh_mv) {
+ unsigned int thrsh_raw;
+
+ thrsh_raw =
+ BIT(chan->scan_type.realbits - !!(chan_st_priv->cfg.bipolar))
+ * thrsh_mv
+ / ad7173_get_ref_voltage_milli(st, chan_st_priv->cfg.ref_sel);
+ if (chan->channel < st->info->num_voltage_in_div)
+ thrsh_raw /= AD4111_DIVIDER_RATIO;
+
+ return thrsh_raw;
+}
+
static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
{
struct ad7173_channel *chans_st_arr, *chan_st_priv;
@@ -1369,6 +1590,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan_st_priv->cfg.bipolar = false;
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.ref_sel = AD7173_SETUP_REF_SEL_INT_REF;
+ chan_st_priv->cfg.openwire_comp_chan = -1;
st->adc_mode |= AD7173_ADC_MODE_REF_EN;
if (st->info->data_reg_only_16bit)
chan_arr[chan_index].scan_type = ad4113_scan_type;
@@ -1435,6 +1657,7 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan->channel = ain[0];
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan_st_priv->cfg.odr = 0;
+ chan_st_priv->cfg.openwire_comp_chan = -1;
chan_st_priv->cfg.bipolar = fwnode_property_read_bool(child, "bipolar");
if (chan_st_priv->cfg.bipolar)
@@ -1449,6 +1672,14 @@ static int ad7173_fw_parse_channel_config(struct iio_dev *indio_dev)
chan_st_priv->cfg.input_buf = st->info->has_input_buf;
chan->channel2 = ain[1];
chan_st_priv->ain = AD7173_CH_ADDRESS(ain[0], ain[1]);
+ if (st->info->has_openwire_det &&
+ ad7173_validate_openwire_ain_inputs(st, chan->differential, ain[0], ain[1])) {
+ chan->event_spec = ad4111_events;
+ chan->num_event_specs = ARRAY_SIZE(ad4111_events);
+ chan_st_priv->cfg.openwire_thrsh_raw =
+ ad7173_calc_openwire_thrsh_raw(st, chan, chan_st_priv,
+ AD4111_OW_DET_THRSH_MV);
+ }
}
if (st->info->data_reg_only_16bit)
@@ -1515,12 +1746,6 @@ static int ad7173_fw_parse_device_config(struct iio_dev *indio_dev)
return ret;
}
- ret = fwnode_irq_get_byname(dev_fwnode(dev), "rdy");
- if (ret < 0)
- return dev_err_probe(dev, ret, "Interrupt 'rdy' is required\n");
-
- st->sigma_delta_info.irq_line = ret;
-
return ad7173_fw_parse_channel_config(indio_dev);
}
@@ -1552,9 +1777,7 @@ static int ad7173_probe(struct spi_device *spi)
spi->mode = SPI_MODE_3;
spi_setup(spi);
- st->sigma_delta_info = ad7173_sigma_delta_info;
- st->sigma_delta_info.num_slots = st->info->num_configs;
- ret = ad_sd_init(&st->sd, indio_dev, spi, &st->sigma_delta_info);
+ ret = ad_sd_init(&st->sd, indio_dev, spi, st->info->sd_info);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ad7191.c b/drivers/iio/adc/ad7191.c
new file mode 100644
index 000000000000..d9cd903ffdd2
--- /dev/null
+++ b/drivers/iio/adc/ad7191.c
@@ -0,0 +1,554 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * AD7191 ADC driver
+ *
+ * Copyright 2025 Analog Devices Inc.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <linux/iio/adc/ad_sigma_delta.h>
+#include <linux/iio/iio.h>
+
+#define ad_sigma_delta_to_ad7191(sigmad) \
+ container_of((sigmad), struct ad7191_state, sd)
+
+#define AD7191_TEMP_CODES_PER_DEGREE 2815
+
+#define AD7191_CHAN_MASK BIT(0)
+#define AD7191_TEMP_MASK BIT(1)
+
+enum ad7191_channel {
+ AD7191_CH_AIN1_AIN2,
+ AD7191_CH_AIN3_AIN4,
+ AD7191_CH_TEMP,
+};
+
+/*
+ * NOTE:
+ * The AD7191 features a dual-use data out ready DOUT/RDY output.
+ * In order to avoid contentions on the SPI bus, it's therefore necessary
+ * to use SPI bus locking.
+ *
+ * The DOUT/RDY output must also be wired to an interrupt-capable GPIO.
+ *
+ * The SPI controller's chip select must be connected to the PDOWN pin
+ * of the ADC. When CS (PDOWN) is high, it powers down the device and
+ * resets the internal circuitry.
+ */
+
+struct ad7191_state {
+ struct ad_sigma_delta sd;
+ struct mutex lock; /* Protect device state */
+
+ struct gpio_descs *odr_gpios;
+ struct gpio_descs *pga_gpios;
+ struct gpio_desc *temp_gpio;
+ struct gpio_desc *chan_gpio;
+
+ u16 int_vref_mv;
+ const u32 (*scale_avail)[2];
+ size_t scale_avail_size;
+ u32 scale_index;
+ const u32 *samp_freq_avail;
+ size_t samp_freq_avail_size;
+ u32 samp_freq_index;
+
+ struct clk *mclk;
+};
+
+static int ad7191_set_channel(struct ad_sigma_delta *sd, unsigned int address)
+{
+ struct ad7191_state *st = ad_sigma_delta_to_ad7191(sd);
+ u8 temp_gpio_val, chan_gpio_val;
+
+ if (!FIELD_FIT(AD7191_CHAN_MASK | AD7191_TEMP_MASK, address))
+ return -EINVAL;
+
+ chan_gpio_val = FIELD_GET(AD7191_CHAN_MASK, address);
+ temp_gpio_val = FIELD_GET(AD7191_TEMP_MASK, address);
+
+ gpiod_set_value(st->chan_gpio, chan_gpio_val);
+ gpiod_set_value(st->temp_gpio, temp_gpio_val);
+
+ return 0;
+}
+
+static int ad7191_set_cs(struct ad_sigma_delta *sigma_delta, int assert)
+{
+ struct spi_transfer t = {
+ .len = 0,
+ .cs_change = assert,
+ };
+ struct spi_message m;
+
+ spi_message_init_with_transfers(&m, &t, 1);
+
+ return spi_sync_locked(sigma_delta->spi, &m);
+}
+
+static int ad7191_set_mode(struct ad_sigma_delta *sd,
+ enum ad_sigma_delta_mode mode)
+{
+ struct ad7191_state *st = ad_sigma_delta_to_ad7191(sd);
+
+ switch (mode) {
+ case AD_SD_MODE_CONTINUOUS:
+ case AD_SD_MODE_SINGLE:
+ return ad7191_set_cs(&st->sd, 1);
+ case AD_SD_MODE_IDLE:
+ return ad7191_set_cs(&st->sd, 0);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct ad_sigma_delta_info ad7191_sigma_delta_info = {
+ .set_channel = ad7191_set_channel,
+ .set_mode = ad7191_set_mode,
+ .has_registers = false,
+};
+
+static int ad7191_init_regulators(struct iio_dev *indio_dev)
+{
+ struct ad7191_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->sd.spi->dev;
+ int ret;
+
+ ret = devm_regulator_get_enable(dev, "avdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable specified AVdd supply\n");
+
+ ret = devm_regulator_get_enable(dev, "dvdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable specified DVdd supply\n");
+
+ ret = devm_regulator_get_enable_read_voltage(dev, "vref");
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get Vref voltage\n");
+
+ st->int_vref_mv = ret / 1000;
+
+ return 0;
+}
+
+static int ad7191_config_setup(struct iio_dev *indio_dev)
+{
+ struct ad7191_state *st = iio_priv(indio_dev);
+ struct device *dev = &st->sd.spi->dev;
+ /* Sampling frequencies in Hz, see Table 5 */
+ static const u32 samp_freq[4] = { 120, 60, 50, 10 };
+ /* Gain options, see Table 7 */
+ const u32 gain[4] = { 1, 8, 64, 128 };
+ static u32 scale_buffer[4][2];
+ int odr_value, odr_index = 0, pga_value, pga_index = 0, i, ret;
+ u64 scale_uv;
+
+ st->samp_freq_index = 0;
+ st->scale_index = 0;
+
+ ret = device_property_read_u32(dev, "adi,odr-value", &odr_value);
+ if (ret && ret != -EINVAL)
+ return dev_err_probe(dev, ret, "Failed to get odr value.\n");
+
+ if (ret == -EINVAL) {
+ st->odr_gpios = devm_gpiod_get_array(dev, "odr", GPIOD_OUT_LOW);
+ if (IS_ERR(st->odr_gpios))
+ return dev_err_probe(dev, PTR_ERR(st->odr_gpios),
+ "Failed to get odr gpios.\n");
+
+ if (st->odr_gpios->ndescs != 2)
+ return dev_err_probe(dev, -EINVAL, "Expected 2 odr gpio pins.\n");
+
+ st->samp_freq_avail = samp_freq;
+ st->samp_freq_avail_size = ARRAY_SIZE(samp_freq);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(samp_freq); i++) {
+ if (odr_value != samp_freq[i])
+ continue;
+ odr_index = i;
+ break;
+ }
+
+ st->samp_freq_avail = &samp_freq[odr_index];
+ st->samp_freq_avail_size = 1;
+
+ st->odr_gpios = NULL;
+ }
+
+ mutex_lock(&st->lock);
+
+ for (i = 0; i < ARRAY_SIZE(scale_buffer); i++) {
+ scale_uv = ((u64)st->int_vref_mv * NANO) >>
+ (indio_dev->channels[0].scan_type.realbits - 1);
+ do_div(scale_uv, gain[i]);
+ scale_buffer[i][1] = do_div(scale_uv, NANO);
+ scale_buffer[i][0] = scale_uv;
+ }
+
+ mutex_unlock(&st->lock);
+
+ ret = device_property_read_u32(dev, "adi,pga-value", &pga_value);
+ if (ret && ret != -EINVAL)
+ return dev_err_probe(dev, ret, "Failed to get pga value.\n");
+
+ if (ret == -EINVAL) {
+ st->pga_gpios = devm_gpiod_get_array(dev, "pga", GPIOD_OUT_LOW);
+ if (IS_ERR(st->pga_gpios))
+ return dev_err_probe(dev, PTR_ERR(st->pga_gpios),
+ "Failed to get pga gpios.\n");
+
+ if (st->pga_gpios->ndescs != 2)
+ return dev_err_probe(dev, -EINVAL, "Expected 2 pga gpio pins.\n");
+
+ st->scale_avail = scale_buffer;
+ st->scale_avail_size = ARRAY_SIZE(scale_buffer);
+ } else {
+ for (i = 0; i < ARRAY_SIZE(gain); i++) {
+ if (pga_value != gain[i])
+ continue;
+ pga_index = i;
+ break;
+ }
+
+ st->scale_avail = &scale_buffer[pga_index];
+ st->scale_avail_size = 1;
+
+ st->pga_gpios = NULL;
+ }
+
+ st->temp_gpio = devm_gpiod_get(dev, "temp", GPIOD_OUT_LOW);
+ if (IS_ERR(st->temp_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->temp_gpio),
+ "Failed to get temp gpio.\n");
+
+ st->chan_gpio = devm_gpiod_get(dev, "chan", GPIOD_OUT_LOW);
+ if (IS_ERR(st->chan_gpio))
+ return dev_err_probe(dev, PTR_ERR(st->chan_gpio),
+ "Failed to get chan gpio.\n");
+
+ return 0;
+}
+
+static int ad7191_clock_setup(struct ad7191_state *st)
+{
+ struct device *dev = &st->sd.spi->dev;
+
+ st->mclk = devm_clk_get_optional_enabled(dev, "mclk");
+ if (IS_ERR(st->mclk))
+ return dev_err_probe(dev, PTR_ERR(st->mclk),
+ "Failed to get mclk.\n");
+
+ return 0;
+}
+
+static int ad7191_setup(struct iio_dev *indio_dev)
+{
+ struct ad7191_state *st = iio_priv(indio_dev);
+ int ret;
+
+ ret = ad7191_init_regulators(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad7191_config_setup(indio_dev);
+ if (ret)
+ return ret;
+
+ return ad7191_clock_setup(st);
+}
+
+static int ad7191_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long m)
+{
+ struct ad7191_state *st = iio_priv(indio_dev);
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ return ad_sigma_delta_single_conversion(indio_dev, chan, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE: {
+ guard(mutex)(&st->lock);
+ *val = st->scale_avail[st->scale_index][0];
+ *val2 = st->scale_avail[st->scale_index][1];
+ return IIO_VAL_INT_PLUS_NANO;
+ }
+ case IIO_TEMP:
+ *val = 0;
+ *val2 = NANO / AD7191_TEMP_CODES_PER_DEGREE;
+ return IIO_VAL_INT_PLUS_NANO;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -(1 << (chan->scan_type.realbits - 1));
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ *val -= 273 * AD7191_TEMP_CODES_PER_DEGREE;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->samp_freq_avail[st->samp_freq_index];
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7191_set_gain(struct ad7191_state *st, int gain_index)
+{
+ DECLARE_BITMAP(bitmap, 2) = { };
+
+ st->scale_index = gain_index;
+
+ bitmap_write(bitmap, gain_index, 0, 2);
+
+ return gpiod_multi_set_value_cansleep(st->pga_gpios, bitmap);
+}
+
+static int ad7191_set_samp_freq(struct ad7191_state *st, int samp_freq_index)
+{
+ DECLARE_BITMAP(bitmap, 2) = {};
+
+ st->samp_freq_index = samp_freq_index;
+
+ bitmap_write(bitmap, samp_freq_index, 0, 2);
+
+ return gpiod_multi_set_value_cansleep(st->odr_gpios, bitmap);
+}
+
+static int __ad7191_write_raw(struct ad7191_state *st,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int i;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE: {
+ if (!st->pga_gpios)
+ return -EPERM;
+ guard(mutex)(&st->lock);
+ for (i = 0; i < st->scale_avail_size; i++) {
+ if (val2 == st->scale_avail[i][1])
+ return ad7191_set_gain(st, i);
+ }
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ if (!st->odr_gpios)
+ return -EPERM;
+ guard(mutex)(&st->lock);
+ for (i = 0; i < st->samp_freq_avail_size; i++) {
+ if (val == st->samp_freq_avail[i])
+ return ad7191_set_samp_freq(st, i);
+ }
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7191_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2,
+ long mask)
+{
+ struct ad7191_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __ad7191_write_raw(st, chan, val, val2, mask);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
+}
+
+static int ad7191_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7191_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, const int **vals,
+ int *type, int *length, long mask)
+{
+ struct ad7191_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ *vals = (int *)st->scale_avail;
+ *type = IIO_VAL_INT_PLUS_NANO;
+ *length = st->scale_avail_size * 2;
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = (int *)st->samp_freq_avail;
+ *type = IIO_VAL_INT;
+ *length = st->samp_freq_avail_size;
+ return IIO_AVAIL_LIST;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad7191_info = {
+ .read_raw = ad7191_read_raw,
+ .write_raw = ad7191_write_raw,
+ .write_raw_get_fmt = ad7191_write_raw_get_fmt,
+ .read_avail = ad7191_read_avail,
+ .validate_trigger = ad_sd_validate_trigger,
+};
+
+static const struct iio_chan_spec ad7191_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .address = AD7191_CH_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 1,
+ .channel2 = 2,
+ .address = AD7191_CH_AIN1_AIN2,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ {
+ .type = IIO_VOLTAGE,
+ .differential = 1,
+ .indexed = 1,
+ .channel = 3,
+ .channel2 = 4,
+ .address = AD7191_CH_AIN3_AIN4,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_OFFSET) |
+ BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 2,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 24,
+ .storagebits = 32,
+ .endianness = IIO_BE,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(3),
+};
+
+static int ad7191_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ad7191_state *st;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ ret = devm_mutex_init(dev, &st->lock);
+ if (ret)
+ return ret;
+
+ indio_dev->name = "ad7191";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad7191_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad7191_channels);
+ indio_dev->info = &ad7191_info;
+
+ ret = ad_sd_init(&st->sd, indio_dev, spi, &ad7191_sigma_delta_info);
+ if (ret)
+ return ret;
+
+ ret = devm_ad_sd_setup_buffer_and_trigger(dev, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad7191_setup(indio_dev);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static const struct of_device_id ad7191_of_match[] = {
+ { .compatible = "adi,ad7191", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ad7191_of_match);
+
+static const struct spi_device_id ad7191_id_table[] = {
+ { "ad7191" },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad7191_id_table);
+
+static struct spi_driver ad7191_driver = {
+ .driver = {
+ .name = "ad7191",
+ .of_match_table = ad7191_of_match,
+ },
+ .probe = ad7191_probe,
+ .id_table = ad7191_id_table,
+};
+module_spi_driver(ad7191_driver);
+
+MODULE_AUTHOR("Alisa-Dariana Roman <alisa.roman@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD7191 ADC");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_AD_SIGMA_DELTA");
diff --git a/drivers/iio/adc/ad7192.c b/drivers/iio/adc/ad7192.c
index e96a5ae92375..530e1d307860 100644
--- a/drivers/iio/adc/ad7192.c
+++ b/drivers/iio/adc/ad7192.c
@@ -7,6 +7,7 @@
#include <linux/interrupt.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
@@ -256,6 +257,9 @@ static ssize_t ad7192_write_syscalib(struct iio_dev *indio_dev,
if (ret)
return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
temp = st->syscalib_mode[chan->channel];
if (sys_calib) {
if (temp == AD7192_SYSCALIB_ZERO_SCALE)
@@ -266,6 +270,8 @@ static ssize_t ad7192_write_syscalib(struct iio_dev *indio_dev,
chan->address);
}
+ iio_device_release_direct(indio_dev);
+
return ret ? ret : len;
}
@@ -693,9 +699,8 @@ static ssize_t ad7192_set(struct device *dev,
if (ret < 0)
return ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
switch ((u32)this_attr->address) {
case AD7192_REG_GPOCON:
@@ -718,7 +723,7 @@ static ssize_t ad7192_set(struct device *dev,
ret = -EINVAL;
}
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret ? ret : len;
}
@@ -945,82 +950,83 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int ad7192_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
+static int __ad7192_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
{
struct ad7192_state *st = iio_priv(indio_dev);
- int ret, i, div;
+ int i, div;
unsigned int tmp;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
- if (val2 == st->scale_avail[i][1]) {
- ret = 0;
- tmp = st->conf;
- st->conf &= ~AD7192_CONF_GAIN_MASK;
- st->conf |= FIELD_PREP(AD7192_CONF_GAIN_MASK, i);
- if (tmp == st->conf)
- break;
- ad_sd_write_reg(&st->sd, AD7192_REG_CONF,
- 3, st->conf);
- ad7192_calibrate_all(st);
- break;
- }
- break;
- case IIO_CHAN_INFO_SAMP_FREQ:
- if (!val) {
- ret = -EINVAL;
- break;
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) {
+ if (val2 != st->scale_avail[i][1])
+ continue;
+
+ tmp = st->conf;
+ st->conf &= ~AD7192_CONF_GAIN_MASK;
+ st->conf |= FIELD_PREP(AD7192_CONF_GAIN_MASK, i);
+ if (tmp == st->conf)
+ return 0;
+ ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, st->conf);
+ ad7192_calibrate_all(st);
+ return 0;
}
+ return -EINVAL;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!val)
+ return -EINVAL;
div = st->fclk / (val * ad7192_get_f_order(st) * 1024);
- if (div < 1 || div > 1023) {
- ret = -EINVAL;
- break;
- }
+ if (div < 1 || div > 1023)
+ return -EINVAL;
st->mode &= ~AD7192_MODE_RATE_MASK;
st->mode |= FIELD_PREP(AD7192_MODE_RATE_MASK, div);
ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
ad7192_update_filter_freq_avail(st);
- break;
+ return 0;
case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
- ret = ad7192_set_3db_filter_freq(st, val, val2 / 1000);
- break;
+ return ad7192_set_3db_filter_freq(st, val, val2 / 1000);
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(st->oversampling_ratio_avail); i++)
- if (val == st->oversampling_ratio_avail[i]) {
- ret = 0;
- tmp = st->mode;
- st->mode &= ~AD7192_MODE_AVG_MASK;
- st->mode |= FIELD_PREP(AD7192_MODE_AVG_MASK, i);
- if (tmp == st->mode)
- break;
- ad_sd_write_reg(&st->sd, AD7192_REG_MODE,
- 3, st->mode);
- break;
- }
- ad7192_update_filter_freq_avail(st);
- break;
+ for (i = 0; i < ARRAY_SIZE(st->oversampling_ratio_avail); i++) {
+ if (val != st->oversampling_ratio_avail[i])
+ continue;
+
+ tmp = st->mode;
+ st->mode &= ~AD7192_MODE_AVG_MASK;
+ st->mode |= FIELD_PREP(AD7192_MODE_AVG_MASK, i);
+ if (tmp == st->mode)
+ return 0;
+ ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
+ ad7192_update_filter_freq_avail(st);
+ return 0;
+ }
+ return -EINVAL;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
+}
+
+static int ad7192_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- mutex_unlock(&st->lock);
+ ret = __ad7192_write_raw(indio_dev, chan, val, val2, mask);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
@@ -1084,7 +1090,7 @@ static int ad7192_update_scan_mode(struct iio_dev *indio_dev, const unsigned lon
conf &= ~AD7192_CONF_CHAN_MASK;
for_each_set_bit(i, scan_mask, 8)
- conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, i);
+ conf |= FIELD_PREP(AD7192_CONF_CHAN_MASK, BIT(i));
ret = ad_sd_write_reg(&st->sd, AD7192_REG_CONF, 3, conf);
if (ret < 0)
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index 858c8be2ff1a..18559757f908 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -153,11 +153,10 @@ static int ad7266_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ad7266_read_single(st, val, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index b35bd4d9ef81..28b88092b4aa 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -232,16 +232,15 @@ static int ad7298_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
if (chan->address == AD7298_CH_TEMP)
ret = ad7298_scan_temp(st, val);
else
ret = ad7298_scan_direct(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7380.c b/drivers/iio/adc/ad7380.c
index 4f32cb22f140..4fcb49fdf566 100644
--- a/drivers/iio/adc/ad7380.c
+++ b/drivers/iio/adc/ad7380.c
@@ -15,6 +15,10 @@
* ad7386/7/8-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/ad7386-4-7387-4-7388-4.pdf
* adaq4370-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4370-4.pdf
* adaq4380-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4380-4.pdf
+ * adaq4381-4 : https://www.analog.com/media/en/technical-documentation/data-sheets/adaq4381-4.pdf
+ *
+ * HDL ad738x_fmc: https://analogdevicesinc.github.io/hdl/projects/ad738x_fmc/index.html
+ *
*/
#include <linux/align.h>
@@ -29,11 +33,14 @@
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/spi/offload/consumer.h>
#include <linux/spi/spi.h>
#include <linux/units.h>
#include <linux/util_macros.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/buffer-dmaengine.h>
+#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -91,6 +98,12 @@
#define AD7380_NUM_SDO_LINES 1
#define AD7380_DEFAULT_GAIN_MILLI 1000
+/*
+ * Using SPI offload, storagebits is always 32, so can't be used to compute struct
+ * spi_transfer.len. Using realbits instead.
+ */
+#define AD7380_SPI_BYTES(scan_type) ((scan_type)->realbits > 16 ? 4 : 2)
+
struct ad7380_timing_specs {
const unsigned int t_csh_ns; /* CS minimum high time */
};
@@ -98,6 +111,7 @@ struct ad7380_timing_specs {
struct ad7380_chip_info {
const char *name;
const struct iio_chan_spec *channels;
+ const struct iio_chan_spec *offload_channels;
unsigned int num_channels;
unsigned int num_simult_channels;
bool has_hardware_gain;
@@ -110,6 +124,25 @@ struct ad7380_chip_info {
unsigned int num_vcm_supplies;
const unsigned long *available_scan_masks;
const struct ad7380_timing_specs *timing_specs;
+ u32 max_conversion_rate_hz;
+};
+
+static const struct iio_event_spec ad7380_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_shared_by_dir = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_shared_by_dir = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_shared_by_all = BIT(IIO_EV_INFO_ENABLE),
+ },
};
enum {
@@ -197,6 +230,91 @@ static const struct iio_scan_type ad7380_scan_type_16_u[] = {
},
};
+/*
+ * Defining here scan types for offload mode, since with current available HDL
+ * only a value of 32 for storagebits is supported.
+ */
+
+/* Extended scan types for 12-bit unsigned chips, offload support. */
+static const struct iio_scan_type ad7380_scan_type_12_u_offload[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 14,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 14-bit signed chips, offload support. */
+static const struct iio_scan_type ad7380_scan_type_14_s_offload[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 's',
+ .realbits = 14,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 14-bit unsigned chips, offload support. */
+static const struct iio_scan_type ad7380_scan_type_14_u_offload[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 14,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 16-bit signed_chips, offload support. */
+static const struct iio_scan_type ad7380_scan_type_16_s_offload[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 's',
+ .realbits = 18,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+};
+
+/* Extended scan types for 16-bit unsigned chips, offload support. */
+static const struct iio_scan_type ad7380_scan_type_16_u_offload[] = {
+ [AD7380_SCAN_TYPE_NORMAL] = {
+ .sign = 'u',
+ .realbits = 16,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+ [AD7380_SCAN_TYPE_RESOLUTION_BOOST] = {
+ .sign = 'u',
+ .realbits = 18,
+ .storagebits = 32,
+ .endianness = IIO_CPU,
+ },
+};
+
#define _AD7380_CHANNEL(index, bits, diff, sign, gain) { \
.type = IIO_VOLTAGE, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
@@ -214,50 +332,127 @@ static const struct iio_scan_type ad7380_scan_type_16_u[] = {
.has_ext_scan_type = 1, \
.ext_scan_type = ad7380_scan_type_##bits##_##sign, \
.num_ext_scan_type = ARRAY_SIZE(ad7380_scan_type_##bits##_##sign), \
+ .event_spec = ad7380_events, \
+ .num_event_specs = ARRAY_SIZE(ad7380_events), \
+}
+
+#define _AD7380_OFFLOAD_CHANNEL(index, bits, diff, sign, gain) { \
+ .type = IIO_VOLTAGE, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ ((gain) ? BIT(IIO_CHAN_INFO_SCALE) : 0) | \
+ ((diff) ? 0 : BIT(IIO_CHAN_INFO_OFFSET)), \
+ .info_mask_shared_by_type = ((gain) ? 0 : BIT(IIO_CHAN_INFO_SCALE)) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .indexed = 1, \
+ .differential = (diff), \
+ .channel = (diff) ? (2 * (index)) : (index), \
+ .channel2 = (diff) ? (2 * (index) + 1) : 0, \
+ .scan_index = (index), \
+ .has_ext_scan_type = 1, \
+ .ext_scan_type = ad7380_scan_type_##bits##_##sign##_offload, \
+ .num_ext_scan_type = \
+ ARRAY_SIZE(ad7380_scan_type_##bits##_##sign##_offload), \
+ .event_spec = ad7380_events, \
+ .num_event_specs = ARRAY_SIZE(ad7380_events), \
}
+/*
+ * Notes on the offload channels:
+ * - There is no soft timestamp since everything is done in hardware.
+ * - There is a sampling frequency attribute added. This controls the SPI
+ * offload trigger.
+ * - The storagebits value depends on the SPI offload provider. Currently there
+ * is only one supported provider, namely the ADI PULSAR ADC HDL project,
+ * which always uses 32-bit words for data values, even for <= 16-bit ADCs.
+ * So the value is just hardcoded to 32 for now.
+ */
+
#define AD7380_CHANNEL(index, bits, diff, sign) \
_AD7380_CHANNEL(index, bits, diff, sign, false)
#define ADAQ4380_CHANNEL(index, bits, diff, sign) \
_AD7380_CHANNEL(index, bits, diff, sign, true)
-#define DEFINE_AD7380_2_CHANNEL(name, bits, diff, sign) \
+#define DEFINE_AD7380_2_CHANNEL(name, bits, diff, sign) \
+static const struct iio_chan_spec name[] = { \
+ AD7380_CHANNEL(0, bits, diff, sign), \
+ AD7380_CHANNEL(1, bits, diff, sign), \
+ IIO_CHAN_SOFT_TIMESTAMP(2), \
+}
+
+#define DEFINE_AD7380_4_CHANNEL(name, bits, diff, sign) \
+static const struct iio_chan_spec name[] = { \
+ AD7380_CHANNEL(0, bits, diff, sign), \
+ AD7380_CHANNEL(1, bits, diff, sign), \
+ AD7380_CHANNEL(2, bits, diff, sign), \
+ AD7380_CHANNEL(3, bits, diff, sign), \
+ IIO_CHAN_SOFT_TIMESTAMP(4), \
+}
+
+#define DEFINE_ADAQ4380_4_CHANNEL(name, bits, diff, sign) \
+static const struct iio_chan_spec name[] = { \
+ ADAQ4380_CHANNEL(0, bits, diff, sign), \
+ ADAQ4380_CHANNEL(1, bits, diff, sign), \
+ ADAQ4380_CHANNEL(2, bits, diff, sign), \
+ ADAQ4380_CHANNEL(3, bits, diff, sign), \
+ IIO_CHAN_SOFT_TIMESTAMP(4), \
+}
+
+#define DEFINE_AD7380_8_CHANNEL(name, bits, diff, sign) \
+static const struct iio_chan_spec name[] = { \
+ AD7380_CHANNEL(0, bits, diff, sign), \
+ AD7380_CHANNEL(1, bits, diff, sign), \
+ AD7380_CHANNEL(2, bits, diff, sign), \
+ AD7380_CHANNEL(3, bits, diff, sign), \
+ AD7380_CHANNEL(4, bits, diff, sign), \
+ AD7380_CHANNEL(5, bits, diff, sign), \
+ AD7380_CHANNEL(6, bits, diff, sign), \
+ AD7380_CHANNEL(7, bits, diff, sign), \
+ IIO_CHAN_SOFT_TIMESTAMP(8), \
+}
+
+#define AD7380_OFFLOAD_CHANNEL(index, bits, diff, sign) \
+_AD7380_OFFLOAD_CHANNEL(index, bits, diff, sign, false)
+
+#define ADAQ4380_OFFLOAD_CHANNEL(index, bits, diff, sign) \
+_AD7380_OFFLOAD_CHANNEL(index, bits, diff, sign, true)
+
+#define DEFINE_AD7380_2_OFFLOAD_CHANNEL(name, bits, diff, sign) \
static const struct iio_chan_spec name[] = { \
- AD7380_CHANNEL(0, bits, diff, sign), \
- AD7380_CHANNEL(1, bits, diff, sign), \
- IIO_CHAN_SOFT_TIMESTAMP(2), \
+ AD7380_OFFLOAD_CHANNEL(0, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(1, bits, diff, sign), \
}
-#define DEFINE_AD7380_4_CHANNEL(name, bits, diff, sign) \
+#define DEFINE_AD7380_4_OFFLOAD_CHANNEL(name, bits, diff, sign) \
static const struct iio_chan_spec name[] = { \
- AD7380_CHANNEL(0, bits, diff, sign), \
- AD7380_CHANNEL(1, bits, diff, sign), \
- AD7380_CHANNEL(2, bits, diff, sign), \
- AD7380_CHANNEL(3, bits, diff, sign), \
- IIO_CHAN_SOFT_TIMESTAMP(4), \
+ AD7380_OFFLOAD_CHANNEL(0, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(1, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(2, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(3, bits, diff, sign), \
}
-#define DEFINE_ADAQ4380_4_CHANNEL(name, bits, diff, sign) \
-static const struct iio_chan_spec name[] = { \
- ADAQ4380_CHANNEL(0, bits, diff, sign), \
- ADAQ4380_CHANNEL(1, bits, diff, sign), \
- ADAQ4380_CHANNEL(2, bits, diff, sign), \
- ADAQ4380_CHANNEL(3, bits, diff, sign), \
- IIO_CHAN_SOFT_TIMESTAMP(4), \
+#define DEFINE_ADAQ4380_4_OFFLOAD_CHANNEL(name, bits, diff, sign) \
+static const struct iio_chan_spec name[] = { \
+ AD7380_OFFLOAD_CHANNEL(0, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(1, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(2, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(3, bits, diff, sign), \
}
-#define DEFINE_AD7380_8_CHANNEL(name, bits, diff, sign) \
+#define DEFINE_AD7380_8_OFFLOAD_CHANNEL(name, bits, diff, sign) \
static const struct iio_chan_spec name[] = { \
- AD7380_CHANNEL(0, bits, diff, sign), \
- AD7380_CHANNEL(1, bits, diff, sign), \
- AD7380_CHANNEL(2, bits, diff, sign), \
- AD7380_CHANNEL(3, bits, diff, sign), \
- AD7380_CHANNEL(4, bits, diff, sign), \
- AD7380_CHANNEL(5, bits, diff, sign), \
- AD7380_CHANNEL(6, bits, diff, sign), \
- AD7380_CHANNEL(7, bits, diff, sign), \
- IIO_CHAN_SOFT_TIMESTAMP(8), \
+ AD7380_OFFLOAD_CHANNEL(0, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(1, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(2, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(3, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(4, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(5, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(6, bits, diff, sign), \
+ AD7380_OFFLOAD_CHANNEL(7, bits, diff, sign), \
}
/* fully differential */
@@ -266,6 +461,7 @@ DEFINE_AD7380_2_CHANNEL(ad7381_channels, 14, 1, s);
DEFINE_AD7380_4_CHANNEL(ad7380_4_channels, 16, 1, s);
DEFINE_AD7380_4_CHANNEL(ad7381_4_channels, 14, 1, s);
DEFINE_ADAQ4380_4_CHANNEL(adaq4380_4_channels, 16, 1, s);
+DEFINE_ADAQ4380_4_CHANNEL(adaq4381_4_channels, 14, 1, s);
/* pseudo differential */
DEFINE_AD7380_2_CHANNEL(ad7383_channels, 16, 0, s);
DEFINE_AD7380_2_CHANNEL(ad7384_channels, 14, 0, s);
@@ -280,6 +476,28 @@ DEFINE_AD7380_8_CHANNEL(ad7386_4_channels, 16, 0, u);
DEFINE_AD7380_8_CHANNEL(ad7387_4_channels, 14, 0, u);
DEFINE_AD7380_8_CHANNEL(ad7388_4_channels, 12, 0, u);
+/* offload channels */
+DEFINE_AD7380_2_OFFLOAD_CHANNEL(ad7380_offload_channels, 16, 1, s);
+DEFINE_AD7380_2_OFFLOAD_CHANNEL(ad7381_offload_channels, 14, 1, s);
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7380_4_offload_channels, 16, 1, s);
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7381_4_offload_channels, 14, 1, s);
+DEFINE_ADAQ4380_4_OFFLOAD_CHANNEL(adaq4380_4_offload_channels, 16, 1, s);
+DEFINE_ADAQ4380_4_OFFLOAD_CHANNEL(adaq4381_4_offload_channels, 14, 1, s);
+
+/* pseudo differential */
+DEFINE_AD7380_2_OFFLOAD_CHANNEL(ad7383_offload_channels, 16, 0, s);
+DEFINE_AD7380_2_OFFLOAD_CHANNEL(ad7384_offload_channels, 14, 0, s);
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7383_4_offload_channels, 16, 0, s);
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7384_4_offload_channels, 14, 0, s);
+
+/* Single ended */
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7386_offload_channels, 16, 0, u);
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7387_offload_channels, 14, 0, u);
+DEFINE_AD7380_4_OFFLOAD_CHANNEL(ad7388_offload_channels, 12, 0, u);
+DEFINE_AD7380_8_OFFLOAD_CHANNEL(ad7386_4_offload_channels, 16, 0, u);
+DEFINE_AD7380_8_OFFLOAD_CHANNEL(ad7387_4_offload_channels, 14, 0, u);
+DEFINE_AD7380_8_OFFLOAD_CHANNEL(ad7388_4_offload_channels, 12, 0, u);
+
static const char * const ad7380_supplies[] = {
"vcc", "vlogic",
};
@@ -386,28 +604,33 @@ static const int ad7380_gains[] = {
static const struct ad7380_chip_info ad7380_chip_info = {
.name = "ad7380",
.channels = ad7380_channels,
+ .offload_channels = ad7380_offload_channels,
.num_channels = ARRAY_SIZE(ad7380_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7381_chip_info = {
.name = "ad7381",
.channels = ad7381_channels,
+ .offload_channels = ad7381_offload_channels,
.num_channels = ARRAY_SIZE(ad7381_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7383_chip_info = {
.name = "ad7383",
.channels = ad7383_channels,
+ .offload_channels = ad7383_offload_channels,
.num_channels = ARRAY_SIZE(ad7383_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
@@ -416,11 +639,13 @@ static const struct ad7380_chip_info ad7383_chip_info = {
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7384_chip_info = {
.name = "ad7384",
.channels = ad7384_channels,
+ .offload_channels = ad7384_offload_channels,
.num_channels = ARRAY_SIZE(ad7384_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
@@ -429,11 +654,13 @@ static const struct ad7380_chip_info ad7384_chip_info = {
.num_vcm_supplies = ARRAY_SIZE(ad7380_2_channel_vcm_supplies),
.available_scan_masks = ad7380_2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7386_chip_info = {
.name = "ad7386",
.channels = ad7386_channels,
+ .offload_channels = ad7386_offload_channels,
.num_channels = ARRAY_SIZE(ad7386_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
@@ -441,11 +668,13 @@ static const struct ad7380_chip_info ad7386_chip_info = {
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7387_chip_info = {
.name = "ad7387",
.channels = ad7387_channels,
+ .offload_channels = ad7387_offload_channels,
.num_channels = ARRAY_SIZE(ad7387_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
@@ -453,11 +682,13 @@ static const struct ad7380_chip_info ad7387_chip_info = {
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7388_chip_info = {
.name = "ad7388",
.channels = ad7388_channels,
+ .offload_channels = ad7388_offload_channels,
.num_channels = ARRAY_SIZE(ad7388_channels),
.num_simult_channels = 2,
.supplies = ad7380_supplies,
@@ -465,11 +696,13 @@ static const struct ad7380_chip_info ad7388_chip_info = {
.has_mux = true,
.available_scan_masks = ad7380_2x2_channel_scan_masks,
.timing_specs = &ad7380_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7380_4_chip_info = {
.name = "ad7380-4",
.channels = ad7380_4_channels,
+ .offload_channels = ad7380_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7380_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
@@ -477,22 +710,26 @@ static const struct ad7380_chip_info ad7380_4_chip_info = {
.external_ref_only = true,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7381_4_chip_info = {
.name = "ad7381-4",
.channels = ad7381_4_channels,
+ .offload_channels = ad7381_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7381_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
.num_supplies = ARRAY_SIZE(ad7380_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7383_4_chip_info = {
.name = "ad7383-4",
.channels = ad7383_4_channels,
+ .offload_channels = ad7383_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7383_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
@@ -501,11 +738,13 @@ static const struct ad7380_chip_info ad7383_4_chip_info = {
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7384_4_chip_info = {
.name = "ad7384-4",
.channels = ad7384_4_channels,
+ .offload_channels = ad7384_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7384_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
@@ -514,11 +753,13 @@ static const struct ad7380_chip_info ad7384_4_chip_info = {
.num_vcm_supplies = ARRAY_SIZE(ad7380_4_channel_vcm_supplies),
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7386_4_chip_info = {
.name = "ad7386-4",
.channels = ad7386_4_channels,
+ .offload_channels = ad7386_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7386_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
@@ -526,11 +767,13 @@ static const struct ad7380_chip_info ad7386_4_chip_info = {
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7387_4_chip_info = {
.name = "ad7387-4",
.channels = ad7387_4_channels,
+ .offload_channels = ad7387_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7387_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
@@ -538,11 +781,13 @@ static const struct ad7380_chip_info ad7387_4_chip_info = {
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info ad7388_4_chip_info = {
.name = "ad7388-4",
.channels = ad7388_4_channels,
+ .offload_channels = ad7388_4_offload_channels,
.num_channels = ARRAY_SIZE(ad7388_4_channels),
.num_simult_channels = 4,
.supplies = ad7380_supplies,
@@ -550,11 +795,13 @@ static const struct ad7380_chip_info ad7388_4_chip_info = {
.has_mux = true,
.available_scan_masks = ad7380_2x4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
};
static const struct ad7380_chip_info adaq4370_4_chip_info = {
.name = "adaq4370-4",
.channels = adaq4380_4_channels,
+ .offload_channels = adaq4380_4_offload_channels,
.num_channels = ARRAY_SIZE(adaq4380_4_channels),
.num_simult_channels = 4,
.supplies = adaq4380_supplies,
@@ -563,11 +810,13 @@ static const struct ad7380_chip_info adaq4370_4_chip_info = {
.has_hardware_gain = true,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 2 * MEGA,
};
static const struct ad7380_chip_info adaq4380_4_chip_info = {
.name = "adaq4380-4",
.channels = adaq4380_4_channels,
+ .offload_channels = adaq4380_4_offload_channels,
.num_channels = ARRAY_SIZE(adaq4380_4_channels),
.num_simult_channels = 4,
.supplies = adaq4380_supplies,
@@ -576,13 +825,32 @@ static const struct ad7380_chip_info adaq4380_4_chip_info = {
.has_hardware_gain = true,
.available_scan_masks = ad7380_4_channel_scan_masks,
.timing_specs = &ad7380_4_timing,
+ .max_conversion_rate_hz = 4 * MEGA,
+};
+
+static const struct ad7380_chip_info adaq4381_4_chip_info = {
+ .name = "adaq4381-4",
+ .channels = adaq4381_4_channels,
+ .offload_channels = adaq4381_4_offload_channels,
+ .num_channels = ARRAY_SIZE(adaq4381_4_channels),
+ .num_simult_channels = 4,
+ .supplies = adaq4380_supplies,
+ .num_supplies = ARRAY_SIZE(adaq4380_supplies),
+ .adaq_internal_ref_only = true,
+ .has_hardware_gain = true,
+ .available_scan_masks = ad7380_4_channel_scan_masks,
+ .timing_specs = &ad7380_4_timing,
+};
+
+static const struct spi_offload_config ad7380_offload_config = {
+ .capability_flags = SPI_OFFLOAD_CAP_TRIGGER |
+ SPI_OFFLOAD_CAP_RX_STREAM_DMA,
};
struct ad7380_state {
const struct ad7380_chip_info *chip_info;
struct spi_device *spi;
struct regmap *regmap;
- unsigned int oversampling_ratio;
bool resolution_boost_enabled;
unsigned int ch;
bool seq;
@@ -594,6 +862,13 @@ struct ad7380_state {
struct spi_message normal_msg;
struct spi_transfer seq_xfer[4];
struct spi_message seq_msg;
+ struct spi_transfer offload_xfer;
+ struct spi_message offload_msg;
+ struct spi_offload *offload;
+ struct spi_offload_trigger *offload_trigger;
+ unsigned long offload_trigger_hz;
+
+ int sample_freq_range[3];
/*
* DMA (thus cache coherency maintenance) requires the transfer buffers
* to live in their own cache lines.
@@ -663,6 +938,20 @@ static int ad7380_regmap_reg_read(void *context, unsigned int reg,
return 0;
}
+static const struct reg_default ad7380_reg_defaults[] = {
+ { AD7380_REG_ADDR_ALERT_LOW_TH, 0x800 },
+ { AD7380_REG_ADDR_ALERT_HIGH_TH, 0x7FF },
+};
+
+static const struct regmap_range ad7380_volatile_reg_ranges[] = {
+ regmap_reg_range(AD7380_REG_ADDR_CONFIG2, AD7380_REG_ADDR_ALERT),
+};
+
+static const struct regmap_access_table ad7380_volatile_regs = {
+ .yes_ranges = ad7380_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ad7380_volatile_reg_ranges),
+};
+
static const struct regmap_config ad7380_regmap_config = {
.reg_bits = 3,
.val_bits = 12,
@@ -670,20 +959,59 @@ static const struct regmap_config ad7380_regmap_config = {
.reg_write = ad7380_regmap_reg_write,
.max_register = AD7380_REG_ADDR_ALERT_HIGH_TH,
.can_sleep = true,
+ .reg_defaults = ad7380_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(ad7380_reg_defaults),
+ .volatile_table = &ad7380_volatile_regs,
+ .cache_type = REGCACHE_MAPLE,
};
static int ad7380_debugfs_reg_access(struct iio_dev *indio_dev, u32 reg,
u32 writeval, u32 *readval)
{
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- struct ad7380_state *st = iio_priv(indio_dev);
+ struct ad7380_state *st = iio_priv(indio_dev);
+ int ret;
- if (readval)
- return regmap_read(st->regmap, reg, readval);
- else
- return regmap_write(st->regmap, reg, writeval);
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ if (readval)
+ ret = regmap_read(st->regmap, reg, readval);
+ else
+ ret = regmap_write(st->regmap, reg, writeval);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
+}
+
+/**
+ * ad7380_regval_to_osr - convert OSR register value to ratio
+ * @regval: register value to check
+ *
+ * Returns: the ratio corresponding to the OSR register. If regval is not in
+ * bound, return 1 (oversampling disabled)
+ *
+ */
+static int ad7380_regval_to_osr(unsigned int regval)
+{
+ if (regval >= ARRAY_SIZE(ad7380_oversampling_ratios))
+ return 1;
+
+ return ad7380_oversampling_ratios[regval];
+}
+
+static int ad7380_get_osr(struct ad7380_state *st, int *val)
+{
+ u32 tmp;
+ int ret;
+
+ ret = regmap_read(st->regmap, AD7380_REG_ADDR_CONFIG1, &tmp);
+ if (ret)
+ return ret;
+
+ *val = ad7380_regval_to_osr(FIELD_GET(AD7380_CONFIG1_OSR, tmp));
+
+ return 0;
}
/*
@@ -701,11 +1029,15 @@ static int ad7380_set_ch(struct ad7380_state *st, unsigned int ch)
.unit = SPI_DELAY_UNIT_NSECS,
}
};
- int ret;
+ int oversampling_ratio, ret;
if (st->ch == ch)
return 0;
+ ret = ad7380_get_osr(st, &oversampling_ratio);
+ if (ret)
+ return ret;
+
ret = regmap_update_bits(st->regmap,
AD7380_REG_ADDR_CONFIG1,
AD7380_CONFIG1_CH,
@@ -716,9 +1048,9 @@ static int ad7380_set_ch(struct ad7380_state *st, unsigned int ch)
st->ch = ch;
- if (st->oversampling_ratio > 1)
+ if (oversampling_ratio > 1)
xfer.delay.value = T_CONVERT_0_NS +
- T_CONVERT_X_NS * (st->oversampling_ratio - 1) *
+ T_CONVERT_X_NS * (oversampling_ratio - 1) *
st->chip_info->num_simult_channels / AD7380_NUM_SDO_LINES;
return spi_sync_transfer(st->spi, &xfer, 1);
@@ -729,20 +1061,25 @@ static int ad7380_set_ch(struct ad7380_state *st, unsigned int ch)
* @st: device instance specific state
* @scan_type: current scan type
*/
-static void ad7380_update_xfers(struct ad7380_state *st,
+static int ad7380_update_xfers(struct ad7380_state *st,
const struct iio_scan_type *scan_type)
{
struct spi_transfer *xfer = st->seq ? st->seq_xfer : st->normal_xfer;
unsigned int t_convert = T_CONVERT_NS;
+ int oversampling_ratio, ret;
/*
* In the case of oversampling, conversion time is higher than in normal
* mode. Technically T_CONVERT_X_NS is lower for some chips, but we use
* the maximum value for simplicity for now.
*/
- if (st->oversampling_ratio > 1)
+ ret = ad7380_get_osr(st, &oversampling_ratio);
+ if (ret)
+ return ret;
+
+ if (oversampling_ratio > 1)
t_convert = T_CONVERT_0_NS + T_CONVERT_X_NS *
- (st->oversampling_ratio - 1) *
+ (oversampling_ratio - 1) *
st->chip_info->num_simult_channels / AD7380_NUM_SDO_LINES;
if (st->seq) {
@@ -751,11 +1088,11 @@ static void ad7380_update_xfers(struct ad7380_state *st,
xfer[2].bits_per_word = xfer[3].bits_per_word =
scan_type->realbits;
xfer[2].len = xfer[3].len =
- BITS_TO_BYTES(scan_type->storagebits) *
+ AD7380_SPI_BYTES(scan_type) *
st->chip_info->num_simult_channels;
xfer[3].rx_buf = xfer[2].rx_buf + xfer[2].len;
/* Additional delay required here when oversampling is enabled */
- if (st->oversampling_ratio > 1)
+ if (oversampling_ratio > 1)
xfer[2].delay.value = t_convert;
else
xfer[2].delay.value = 0;
@@ -764,16 +1101,145 @@ static void ad7380_update_xfers(struct ad7380_state *st,
xfer[0].delay.value = t_convert;
xfer[0].delay.unit = SPI_DELAY_UNIT_NSECS;
xfer[1].bits_per_word = scan_type->realbits;
- xfer[1].len = BITS_TO_BYTES(scan_type->storagebits) *
+ xfer[1].len = AD7380_SPI_BYTES(scan_type) *
st->chip_info->num_simult_channels;
}
+
+ return 0;
}
+static int ad7380_set_sample_freq(struct ad7380_state *st, int val)
+{
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = val,
+ },
+ };
+ int ret;
+
+ ret = spi_offload_trigger_validate(st->offload_trigger, &config);
+ if (ret)
+ return ret;
+
+ st->offload_trigger_hz = config.periodic.frequency_hz;
+
+ return 0;
+}
+
+static int ad7380_init_offload_msg(struct ad7380_state *st,
+ struct iio_dev *indio_dev)
+{
+ struct spi_transfer *xfer = &st->offload_xfer;
+ struct device *dev = &st->spi->dev;
+ const struct iio_scan_type *scan_type;
+ int oversampling_ratio;
+ int ret;
+
+ scan_type = iio_get_current_scan_type(indio_dev,
+ &indio_dev->channels[0]);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ if (st->chip_info->has_mux) {
+ int index;
+
+ ret = iio_active_scan_mask_index(indio_dev);
+ if (ret < 0)
+ return ret;
+
+ index = ret;
+ if (index == AD7380_SCAN_MASK_SEQ) {
+ ret = regmap_set_bits(st->regmap, AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_SEQ);
+ if (ret)
+ return ret;
+
+ st->seq = true;
+ } else {
+ ret = ad7380_set_ch(st, index);
+ if (ret)
+ return ret;
+ }
+ }
+
+ ret = ad7380_get_osr(st, &oversampling_ratio);
+ if (ret)
+ return ret;
+
+ xfer->bits_per_word = scan_type->realbits;
+ xfer->offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
+ xfer->len = AD7380_SPI_BYTES(scan_type) * st->chip_info->num_simult_channels;
+
+ spi_message_init_with_transfers(&st->offload_msg, xfer, 1);
+ st->offload_msg.offload = st->offload;
+
+ ret = spi_optimize_message(st->spi, &st->offload_msg);
+ if (ret) {
+ dev_err(dev, "failed to prepare offload msg, err: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ad7380_offload_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = st->offload_trigger_hz,
+ },
+ };
+ int ret;
+
+ ret = ad7380_init_offload_msg(st, indio_dev);
+ if (ret)
+ return ret;
+
+ ret = spi_offload_trigger_enable(st->offload, st->offload_trigger, &config);
+ if (ret)
+ spi_unoptimize_message(&st->offload_msg);
+
+ return ret;
+}
+
+static int ad7380_offload_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (st->seq) {
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_SEQ,
+ FIELD_PREP(AD7380_CONFIG1_SEQ, 0));
+ if (ret)
+ return ret;
+
+ st->seq = false;
+ }
+
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+
+ spi_unoptimize_message(&st->offload_msg);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ad7380_offload_buffer_setup_ops = {
+ .postenable = ad7380_offload_buffer_postenable,
+ .predisable = ad7380_offload_buffer_predisable,
+};
+
static int ad7380_triggered_buffer_preenable(struct iio_dev *indio_dev)
{
struct ad7380_state *st = iio_priv(indio_dev);
const struct iio_scan_type *scan_type;
struct spi_message *msg = &st->normal_msg;
+ int ret;
/*
* Currently, we always read all channels at the same time. The scan_type
@@ -785,7 +1251,6 @@ static int ad7380_triggered_buffer_preenable(struct iio_dev *indio_dev)
if (st->chip_info->has_mux) {
unsigned int index;
- int ret;
/*
* Depending on the requested scan_mask and current state,
@@ -816,7 +1281,9 @@ static int ad7380_triggered_buffer_preenable(struct iio_dev *indio_dev)
}
- ad7380_update_xfers(st, scan_type);
+ ret = ad7380_update_xfers(st, scan_type);
+ if (ret)
+ return ret;
return spi_optimize_message(st->spi, msg);
}
@@ -889,13 +1356,15 @@ static int ad7380_read_direct(struct ad7380_state *st, unsigned int scan_index,
return ret;
}
- ad7380_update_xfers(st, scan_type);
+ ret = ad7380_update_xfers(st, scan_type);
+ if (ret)
+ return ret;
ret = spi_sync(st->spi, &st->normal_msg);
if (ret < 0)
return ret;
- if (scan_type->storagebits > 16) {
+ if (scan_type->realbits > 16) {
if (scan_type->sign == 's')
*val = sign_extend32(*(u32 *)(st->scan_data + 4 * index),
scan_type->realbits - 1);
@@ -920,6 +1389,7 @@ static int ad7380_read_raw(struct iio_dev *indio_dev,
{
struct ad7380_state *st = iio_priv(indio_dev);
const struct iio_scan_type *scan_type;
+ int ret;
scan_type = iio_get_current_scan_type(indio_dev, chan);
@@ -928,11 +1398,15 @@ static int ad7380_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- return ad7380_read_direct(st, chan->scan_index,
- scan_type, val);
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad7380_read_direct(st, chan->scan_index,
+ scan_type, val);
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
case IIO_CHAN_INFO_SCALE:
/*
* According to the datasheet, the LSB size is:
@@ -961,8 +1435,19 @@ static int ad7380_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- *val = st->oversampling_ratio;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ad7380_get_osr(st, val);
+
+ iio_device_release_direct(indio_dev);
+
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->offload_trigger_hz;
return IIO_VAL_INT;
default:
return -EINVAL;
@@ -974,6 +1459,8 @@ static int ad7380_read_avail(struct iio_dev *indio_dev,
const int **vals, int *type, int *length,
long mask)
{
+ struct ad7380_state *st = iio_priv(indio_dev);
+
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
*vals = ad7380_oversampling_ratios;
@@ -981,6 +1468,10 @@ static int ad7380_read_avail(struct iio_dev *indio_dev,
*type = IIO_VAL_INT;
return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = st->sample_freq_range;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
default:
return -EINVAL;
}
@@ -1008,47 +1499,61 @@ static int ad7380_osr_to_regval(int ratio)
return -EINVAL;
}
+static int ad7380_set_oversampling_ratio(struct ad7380_state *st, int val)
+{
+ int ret, osr, boost;
+
+ osr = ad7380_osr_to_regval(val);
+ if (osr < 0)
+ return osr;
+
+ /* always enable resolution boost when oversampling is enabled */
+ boost = osr > 0 ? 1 : 0;
+
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_OSR | AD7380_CONFIG1_RES,
+ FIELD_PREP(AD7380_CONFIG1_OSR, osr) |
+ FIELD_PREP(AD7380_CONFIG1_RES, boost));
+
+ if (ret)
+ return ret;
+
+ st->resolution_boost_enabled = boost;
+
+ /*
+ * Perform a soft reset. This will flush the oversampling
+ * block and FIFO but will maintain the content of the
+ * configurable registers.
+ */
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG2,
+ AD7380_CONFIG2_RESET,
+ FIELD_PREP(AD7380_CONFIG2_RESET,
+ AD7380_CONFIG2_RESET_SOFT));
+ return ret;
+}
static int ad7380_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val,
int val2, long mask)
{
struct ad7380_state *st = iio_priv(indio_dev);
- int ret, osr, boost;
+ int ret;
switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val < 1)
+ return -EINVAL;
+ return ad7380_set_sample_freq(st, val);
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- osr = ad7380_osr_to_regval(val);
- if (osr < 0)
- return osr;
-
- /* always enable resolution boost when oversampling is enabled */
- boost = osr > 0 ? 1 : 0;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = regmap_update_bits(st->regmap,
- AD7380_REG_ADDR_CONFIG1,
- AD7380_CONFIG1_OSR | AD7380_CONFIG1_RES,
- FIELD_PREP(AD7380_CONFIG1_OSR, osr) |
- FIELD_PREP(AD7380_CONFIG1_RES, boost));
+ ret = ad7380_set_oversampling_ratio(st, val);
- if (ret)
- return ret;
+ iio_device_release_direct(indio_dev);
- st->oversampling_ratio = val;
- st->resolution_boost_enabled = boost;
-
- /*
- * Perform a soft reset. This will flush the oversampling
- * block and FIFO but will maintain the content of the
- * configurable registers.
- */
- return regmap_update_bits(st->regmap,
- AD7380_REG_ADDR_CONFIG2,
- AD7380_CONFIG2_RESET,
- FIELD_PREP(AD7380_CONFIG2_RESET,
- AD7380_CONFIG2_RESET_SOFT));
- }
- unreachable();
+ return ret;
default:
return -EINVAL;
}
@@ -1063,12 +1568,179 @@ static int ad7380_get_current_scan_type(const struct iio_dev *indio_dev,
: AD7380_SCAN_TYPE_NORMAL;
}
+static int ad7380_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ int tmp, ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = regmap_read(st->regmap, AD7380_REG_ADDR_CONFIG1, &tmp);
+
+ iio_device_release_direct(indio_dev);
+
+ if (ret)
+ return ret;
+
+ return FIELD_GET(AD7380_CONFIG1_ALERTEN, tmp);
+}
+
+static int ad7380_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ bool state)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = regmap_update_bits(st->regmap,
+ AD7380_REG_ADDR_CONFIG1,
+ AD7380_CONFIG1_ALERTEN,
+ FIELD_PREP(AD7380_CONFIG1_ALERTEN, state));
+
+ iio_device_release_direct(indio_dev);
+
+ return ret;
+}
+
+static int ad7380_get_alert_th(struct ad7380_state *st,
+ enum iio_event_direction dir,
+ int *val)
+{
+ int ret, tmp;
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ ret = regmap_read(st->regmap,
+ AD7380_REG_ADDR_ALERT_HIGH_TH,
+ &tmp);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(AD7380_ALERT_HIGH_TH, tmp);
+ return IIO_VAL_INT;
+ case IIO_EV_DIR_FALLING:
+ ret = regmap_read(st->regmap,
+ AD7380_REG_ADDR_ALERT_LOW_TH,
+ &tmp);
+ if (ret)
+ return ret;
+
+ *val = FIELD_GET(AD7380_ALERT_LOW_TH, tmp);
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7380_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad7380_get_alert_th(st, dir, val);
+
+ iio_device_release_direct(indio_dev);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7380_set_alert_th(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_direction dir,
+ int val)
+{
+ struct ad7380_state *st = iio_priv(indio_dev);
+ const struct iio_scan_type *scan_type;
+ u16 th;
+
+ /*
+ * According to the datasheet,
+ * AD7380_REG_ADDR_ALERT_HIGH_TH[11:0] are the 12 MSB of the
+ * 16-bits internal alert high register. LSB are set to 0xf.
+ * AD7380_REG_ADDR_ALERT_LOW_TH[11:0] are the 12 MSB of the
+ * 16 bits internal alert low register. LSB are set to 0x0.
+ *
+ * When alert is enabled the conversion from the adc is compared
+ * immediately to the alert high/low thresholds, before any
+ * oversampling. This means that the thresholds are the same for
+ * normal mode and oversampling mode.
+ */
+
+ /* Extract the 12 MSB of val */
+ scan_type = iio_get_current_scan_type(indio_dev, chan);
+ if (IS_ERR(scan_type))
+ return PTR_ERR(scan_type);
+
+ th = val >> (scan_type->realbits - 12);
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ return regmap_write(st->regmap,
+ AD7380_REG_ADDR_ALERT_HIGH_TH,
+ th);
+ case IIO_EV_DIR_FALLING:
+ return regmap_write(st->regmap,
+ AD7380_REG_ADDR_ALERT_LOW_TH,
+ th);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7380_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad7380_set_alert_th(indio_dev, chan, dir, val);
+
+ iio_device_release_direct(indio_dev);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct iio_info ad7380_info = {
.read_raw = &ad7380_read_raw,
.read_avail = &ad7380_read_avail,
.write_raw = &ad7380_write_raw,
.get_current_scan_type = &ad7380_get_current_scan_type,
.debugfs_reg_access = &ad7380_debugfs_reg_access,
+ .read_event_config = &ad7380_read_event_config,
+ .write_event_config = &ad7380_write_event_config,
+ .read_event_value = &ad7380_read_event_value,
+ .write_event_value = &ad7380_write_event_value,
};
static int ad7380_init(struct ad7380_state *st, bool external_ref_en)
@@ -1092,7 +1764,6 @@ static int ad7380_init(struct ad7380_state *st, bool external_ref_en)
}
/* This is the default value after reset. */
- st->oversampling_ratio = 1;
st->ch = 0;
st->seq = false;
@@ -1103,6 +1774,53 @@ static int ad7380_init(struct ad7380_state *st, bool external_ref_en)
AD7380_NUM_SDO_LINES));
}
+static int ad7380_probe_spi_offload(struct iio_dev *indio_dev,
+ struct ad7380_state *st)
+{
+ struct spi_device *spi = st->spi;
+ struct device *dev = &spi->dev;
+ struct dma_chan *rx_dma;
+ int sample_rate, ret;
+
+ indio_dev->setup_ops = &ad7380_offload_buffer_setup_ops;
+ indio_dev->channels = st->chip_info->offload_channels;
+ /* Just removing the timestamp channel. */
+ indio_dev->num_channels--;
+
+ st->offload_trigger = devm_spi_offload_trigger_get(dev, st->offload,
+ SPI_OFFLOAD_TRIGGER_PERIODIC);
+ if (IS_ERR(st->offload_trigger))
+ return dev_err_probe(dev, PTR_ERR(st->offload_trigger),
+ "failed to get offload trigger\n");
+
+ sample_rate = st->chip_info->max_conversion_rate_hz *
+ AD7380_NUM_SDO_LINES / st->chip_info->num_simult_channels;
+
+ st->sample_freq_range[0] = 1; /* min */
+ st->sample_freq_range[1] = 1; /* step */
+ st->sample_freq_range[2] = sample_rate; /* max */
+
+ /*
+ * Starting with a quite low frequency, to allow oversampling x32,
+ * user is then reponsible to adjust the frequency for the specific case.
+ */
+ ret = ad7380_set_sample_freq(st, sample_rate / 32);
+ if (ret)
+ return ret;
+
+ rx_dma = devm_spi_offload_rx_stream_request_dma_chan(dev, st->offload);
+ if (IS_ERR(rx_dma))
+ return dev_err_probe(dev, PTR_ERR(rx_dma),
+ "failed to get offload RX DMA\n");
+
+ ret = devm_iio_dmaengine_buffer_setup_with_handle(dev, indio_dev,
+ rx_dma, IIO_BUFFER_DIRECTION_IN);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot setup dma buffer\n");
+
+ return 0;
+}
+
static int ad7380_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
@@ -1274,12 +1992,24 @@ static int ad7380_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->available_scan_masks = st->chip_info->available_scan_masks;
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
- iio_pollfunc_store_time,
- ad7380_trigger_handler,
- &ad7380_buffer_setup_ops);
- if (ret)
- return ret;
+ st->offload = devm_spi_offload_get(dev, spi, &ad7380_offload_config);
+ ret = PTR_ERR_OR_ZERO(st->offload);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to get offload\n");
+
+ /* If no SPI offload, fall back to low speed usage. */
+ if (ret == -ENODEV) {
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ ad7380_trigger_handler,
+ &ad7380_buffer_setup_ops);
+ if (ret)
+ return ret;
+ } else {
+ ret = ad7380_probe_spi_offload(indio_dev, st);
+ if (ret)
+ return ret;
+ }
ret = ad7380_init(st, external_ref_en);
if (ret)
@@ -1305,6 +2035,7 @@ static const struct of_device_id ad7380_of_match_table[] = {
{ .compatible = "adi,ad7388-4", .data = &ad7388_4_chip_info },
{ .compatible = "adi,adaq4370-4", .data = &adaq4370_4_chip_info },
{ .compatible = "adi,adaq4380-4", .data = &adaq4380_4_chip_info },
+ { .compatible = "adi,adaq4381-4", .data = &adaq4381_4_chip_info },
{ }
};
@@ -1325,6 +2056,7 @@ static const struct spi_device_id ad7380_id_table[] = {
{ "ad7388-4", (kernel_ulong_t)&ad7388_4_chip_info },
{ "adaq4370-4", (kernel_ulong_t)&adaq4370_4_chip_info },
{ "adaq4380-4", (kernel_ulong_t)&adaq4380_4_chip_info },
+ { "adaq4381-4", (kernel_ulong_t)&adaq4381_4_chip_info },
{ }
};
MODULE_DEVICE_TABLE(spi, ad7380_id_table);
@@ -1342,3 +2074,4 @@ module_spi_driver(ad7380_driver);
MODULE_AUTHOR("Stefan Popa <stefan.popa@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD738x ADC driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_DMAENGINE_BUFFER");
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index aeb8e383fe71..37b0515cf4fc 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -138,11 +138,10 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ad7476_scan_direct(st);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7606.c b/drivers/iio/adc/ad7606.c
index d8e3c7a43678..1a314fddd7eb 100644
--- a/drivers/iio/adc/ad7606.c
+++ b/drivers/iio/adc/ad7606.c
@@ -5,6 +5,7 @@
* Copyright 2011 Analog Devices Inc.
*/
+#include <linux/cleanup.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
@@ -85,6 +86,10 @@ static const unsigned int ad7606_oversampling_avail[7] = {
1, 2, 4, 8, 16, 32, 64,
};
+static const unsigned int ad7606b_oversampling_avail[9] = {
+ 1, 2, 4, 8, 16, 32, 64, 128, 256,
+};
+
static const unsigned int ad7616_oversampling_avail[8] = {
1, 2, 4, 8, 16, 32, 64, 128,
};
@@ -187,6 +192,8 @@ static int ad7608_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
static int ad7609_chan_scale_setup(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
+static int ad7616_sw_mode_setup(struct iio_dev *indio_dev);
+static int ad7606b_sw_mode_setup(struct iio_dev *indio_dev);
const struct ad7606_chip_info ad7605_4_info = {
.channels = ad7605_channels,
@@ -239,6 +246,7 @@ const struct ad7606_chip_info ad7606b_info = {
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606_16bit_chan_scale_setup,
+ .sw_setup_cb = ad7606b_sw_mode_setup,
};
EXPORT_SYMBOL_NS_GPL(ad7606b_info, "IIO_AD7606");
@@ -250,6 +258,7 @@ const struct ad7606_chip_info ad7606c_16_info = {
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606c_16bit_chan_scale_setup,
+ .sw_setup_cb = ad7606b_sw_mode_setup,
};
EXPORT_SYMBOL_NS_GPL(ad7606c_16_info, "IIO_AD7606");
@@ -294,6 +303,7 @@ const struct ad7606_chip_info ad7606c_18_info = {
.oversampling_avail = ad7606_oversampling_avail,
.oversampling_num = ARRAY_SIZE(ad7606_oversampling_avail),
.scale_setup_cb = ad7606c_18bit_chan_scale_setup,
+ .sw_setup_cb = ad7606b_sw_mode_setup,
};
EXPORT_SYMBOL_NS_GPL(ad7606c_18_info, "IIO_AD7606");
@@ -307,6 +317,7 @@ const struct ad7606_chip_info ad7616_info = {
.oversampling_num = ARRAY_SIZE(ad7616_oversampling_avail),
.os_req_reset = true,
.scale_setup_cb = ad7606_16bit_chan_scale_setup,
+ .sw_setup_cb = ad7616_sw_mode_setup,
};
EXPORT_SYMBOL_NS_GPL(ad7616_info, "IIO_AD7606");
@@ -752,13 +763,13 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = ad7606_scan_direct(indio_dev, chan->address, val);
- if (ret < 0)
- return ret;
- return IIO_VAL_INT;
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ad7606_scan_direct(indio_dev, chan->address, val);
+ iio_device_release_direct(indio_dev);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
if (st->sw_mode_en)
ch = chan->address;
@@ -818,8 +829,7 @@ static int ad7606_write_os_hw(struct iio_dev *indio_dev, int val)
values[0] = val & GENMASK(2, 0);
- gpiod_set_array_value(st->gpio_os->ndescs, st->gpio_os->desc,
- st->gpio_os->info, values);
+ gpiod_multi_set_value_cansleep(st->gpio_os, values);
/* AD7616 requires a reset to update value */
if (st->chip_info->os_req_reset)
@@ -852,7 +862,11 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
}
val = (val * MICRO) + val2;
i = find_closest(val, scale_avail_uv, cs->num_scales);
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = st->write_scale(indio_dev, ch, i + cs->reg_offset);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
cs->range = i;
@@ -863,7 +877,11 @@ static int ad7606_write_raw(struct iio_dev *indio_dev,
return -EINVAL;
i = find_closest(val, st->oversampling_avail,
st->num_os_ratios);
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = st->write_os(indio_dev, i);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
st->oversampling = st->oversampling_avail[i];
@@ -1047,7 +1065,7 @@ static int ad7606_read_avail(struct iio_dev *indio_dev,
cs = &st->chan_scales[ch];
*vals = (int *)cs->scale_avail;
- *length = cs->num_scales;
+ *length = cs->num_scales * 2;
*type = IIO_VAL_INT_PLUS_MICRO;
return IIO_AVAIL_LIST;
@@ -1138,16 +1156,117 @@ static const struct iio_trigger_ops ad7606_trigger_ops = {
.validate_device = iio_trigger_validate_own_device,
};
-static int ad7606_sw_mode_setup(struct iio_dev *indio_dev)
+static int ad7606_write_mask(struct ad7606_state *st, unsigned int addr,
+ unsigned long mask, unsigned int val)
+{
+ int readval;
+
+ readval = st->bops->reg_read(st, addr);
+ if (readval < 0)
+ return readval;
+
+ readval &= ~mask;
+ readval |= val;
+
+ return st->bops->reg_write(st, addr, readval);
+}
+
+static int ad7616_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
{
struct ad7606_state *st = iio_priv(indio_dev);
+ unsigned int ch_addr, mode, ch_index;
- st->sw_mode_en = st->bops->sw_mode_config &&
- device_property_present(st->dev, "adi,sw-mode");
- if (!st->sw_mode_en)
- return 0;
+ /*
+ * Ad7616 has 16 channels divided in group A and group B.
+ * The range of channels from A are stored in registers with address 4
+ * while channels from B are stored in register with address 6.
+ * The last bit from channels determines if it is from group A or B
+ * because the order of channels in iio is 0A, 0B, 1A, 1B...
+ */
+ ch_index = ch >> 1;
+
+ ch_addr = AD7616_RANGE_CH_ADDR(ch_index);
+
+ if ((ch & 0x1) == 0) /* channel A */
+ ch_addr += AD7616_RANGE_CH_A_ADDR_OFF;
+ else /* channel B */
+ ch_addr += AD7616_RANGE_CH_B_ADDR_OFF;
+
+ /* 0b01 for 2.5v, 0b10 for 5v and 0b11 for 10v */
+ mode = AD7616_RANGE_CH_MODE(ch_index, ((val + 1) & 0b11));
+
+ return ad7606_write_mask(st, ch_addr, AD7616_RANGE_CH_MSK(ch_index),
+ mode);
+}
+
+static int ad7616_write_os_sw(struct iio_dev *indio_dev, int val)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return ad7606_write_mask(st, AD7616_CONFIGURATION_REGISTER,
+ AD7616_OS_MASK, val << 2);
+}
+
+static int ad7606_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return ad7606_write_mask(st, AD7606_RANGE_CH_ADDR(ch),
+ AD7606_RANGE_CH_MSK(ch),
+ AD7606_RANGE_CH_MODE(ch, val));
+}
+
+static int ad7606_write_os_sw(struct iio_dev *indio_dev, int val)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+
+ return st->bops->reg_write(st, AD7606_OS_MODE, val);
+}
+
+static int ad7616_sw_mode_setup(struct iio_dev *indio_dev)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+ int ret;
+
+ /*
+ * Scale can be configured individually for each channel
+ * in software mode.
+ */
+
+ st->write_scale = ad7616_write_scale_sw;
+ st->write_os = &ad7616_write_os_sw;
+
+ ret = st->bops->sw_mode_config(indio_dev);
+ if (ret)
+ return ret;
- indio_dev->info = &ad7606_info_sw_mode;
+ /* Activate Burst mode and SEQEN MODE */
+ return ad7606_write_mask(st, AD7616_CONFIGURATION_REGISTER,
+ AD7616_BURST_MODE | AD7616_SEQEN_MODE,
+ AD7616_BURST_MODE | AD7616_SEQEN_MODE);
+}
+
+static int ad7606b_sw_mode_setup(struct iio_dev *indio_dev)
+{
+ struct ad7606_state *st = iio_priv(indio_dev);
+ DECLARE_BITMAP(os, 3);
+
+ bitmap_fill(os, 3);
+ /*
+ * Software mode is enabled when all three oversampling
+ * pins are set to high. If oversampling gpios are defined
+ * in the device tree, then they need to be set to high,
+ * otherwise, they must be hardwired to VDD
+ */
+ if (st->gpio_os)
+ gpiod_multi_set_value_cansleep(st->gpio_os, os);
+
+ /* OS of 128 and 256 are available only in software mode */
+ st->oversampling_avail = ad7606b_oversampling_avail;
+ st->num_os_ratios = ARRAY_SIZE(ad7606b_oversampling_avail);
+
+ st->write_scale = ad7606_write_scale_sw;
+ st->write_os = &ad7606_write_os_sw;
return st->bops->sw_mode_config(indio_dev);
}
@@ -1246,17 +1365,6 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
return -ERESTARTSYS;
}
- st->write_scale = ad7606_write_scale_hw;
- st->write_os = ad7606_write_os_hw;
-
- ret = ad7606_sw_mode_setup(indio_dev);
- if (ret)
- return ret;
-
- ret = ad7606_chan_scales_setup(indio_dev);
- if (ret)
- return ret;
-
/* If convst pin is not defined, setup PWM. */
if (!st->gpio_convst) {
st->cnvst_pwm = devm_pwm_get(dev, NULL);
@@ -1334,6 +1442,20 @@ int ad7606_probe(struct device *dev, int irq, void __iomem *base_address,
return ret;
}
+ st->write_scale = ad7606_write_scale_hw;
+ st->write_os = ad7606_write_os_hw;
+
+ st->sw_mode_en = st->chip_info->sw_setup_cb &&
+ device_property_present(st->dev, "adi,sw-mode");
+ if (st->sw_mode_en) {
+ indio_dev->info = &ad7606_info_sw_mode;
+ st->chip_info->sw_setup_cb(indio_dev);
+ }
+
+ ret = ad7606_chan_scales_setup(indio_dev);
+ if (ret)
+ return ret;
+
return devm_iio_device_register(dev, indio_dev);
}
EXPORT_SYMBOL_NS_GPL(ad7606_probe, "IIO_AD7606");
diff --git a/drivers/iio/adc/ad7606.h b/drivers/iio/adc/ad7606.h
index 8778ffe515b3..71a30525eaab 100644
--- a/drivers/iio/adc/ad7606.h
+++ b/drivers/iio/adc/ad7606.h
@@ -10,37 +10,49 @@
#define AD760X_MAX_CHANNELS 16
-#define AD760X_CHANNEL(num, mask_sep, mask_type, mask_all, bits) { \
+#define AD7616_CONFIGURATION_REGISTER 0x02
+#define AD7616_OS_MASK GENMASK(4, 2)
+#define AD7616_BURST_MODE BIT(6)
+#define AD7616_SEQEN_MODE BIT(5)
+#define AD7616_RANGE_CH_A_ADDR_OFF 0x04
+#define AD7616_RANGE_CH_B_ADDR_OFF 0x06
+/*
+ * Range of channels from a group are stored in 2 registers.
+ * 0, 1, 2, 3 in a register followed by 4, 5, 6, 7 in second register.
+ * For channels from second group(8-15) the order is the same, only with
+ * an offset of 2 for register address.
+ */
+#define AD7616_RANGE_CH_ADDR(ch) ((ch) >> 2)
+/* The range of the channel is stored in 2 bits */
+#define AD7616_RANGE_CH_MSK(ch) (0b11 << (((ch) & 0b11) * 2))
+#define AD7616_RANGE_CH_MODE(ch, mode) ((mode) << ((((ch) & 0b11)) * 2))
+
+#define AD7606_CONFIGURATION_REGISTER 0x02
+#define AD7606_SINGLE_DOUT 0x00
+
+/*
+ * Range for AD7606B channels are stored in registers starting with address 0x3.
+ * Each register stores range for 2 channels(4 bits per channel).
+ */
+#define AD7606_RANGE_CH_MSK(ch) (GENMASK(3, 0) << (4 * ((ch) & 0x1)))
+#define AD7606_RANGE_CH_MODE(ch, mode) \
+ ((GENMASK(3, 0) & (mode)) << (4 * ((ch) & 0x1)))
+#define AD7606_RANGE_CH_ADDR(ch) (0x03 + ((ch) >> 1))
+#define AD7606_OS_MODE 0x08
+
+#define AD760X_CHANNEL(num, mask_sep, mask_type, mask_all, \
+ mask_sep_avail, mask_all_avail, bits) { \
.type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = num, \
.address = num, \
.info_mask_separate = mask_sep, \
+ .info_mask_separate_available = \
+ mask_sep_avail, \
.info_mask_shared_by_type = mask_type, \
.info_mask_shared_by_all = mask_all, \
- .scan_index = num, \
- .scan_type = { \
- .sign = 's', \
- .realbits = (bits), \
- .storagebits = (bits) > 16 ? 32 : 16, \
- .endianness = IIO_CPU, \
- }, \
-}
-
-#define AD7606_SW_CHANNEL(num, bits) { \
- .type = IIO_VOLTAGE, \
- .indexed = 1, \
- .channel = num, \
- .address = num, \
- .info_mask_separate = \
- BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_separate_available = \
- BIT(IIO_CHAN_INFO_SCALE), \
- .info_mask_shared_by_all = \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
.info_mask_shared_by_all_available = \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ mask_all_avail, \
.scan_index = num, \
.scan_type = { \
.sign = 's', \
@@ -50,14 +62,30 @@
}, \
}
+#define AD7606_SW_CHANNEL(num, bits) \
+ AD760X_CHANNEL(num, \
+ /* mask separate */ \
+ BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ /* mask type */ \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ /* mask all */ \
+ 0, \
+ /* mask separate available */ \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ /* mask all available */ \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ bits)
+
#define AD7605_CHANNEL(num) \
AD760X_CHANNEL(num, BIT(IIO_CHAN_INFO_RAW), \
- BIT(IIO_CHAN_INFO_SCALE), 0, 16)
+ BIT(IIO_CHAN_INFO_SCALE), 0, 0, 0, 16)
#define AD7606_CHANNEL(num, bits) \
AD760X_CHANNEL(num, BIT(IIO_CHAN_INFO_RAW), \
BIT(IIO_CHAN_INFO_SCALE), \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), bits)
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ 0, 0, bits)
#define AD7616_CHANNEL(num) AD7606_SW_CHANNEL(num, 16)
@@ -65,12 +93,29 @@
AD760X_CHANNEL(num, 0, \
BIT(IIO_CHAN_INFO_SCALE), \
BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
- BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), 16)
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ 0, 0, 16)
+
+#define AD7606_BI_SW_CHANNEL(num) \
+ AD760X_CHANNEL(num, \
+ /* mask separate */ \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ /* mask type */ \
+ 0, \
+ /* mask all */ \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ /* mask separate available */ \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ /* mask all available */ \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ 16)
struct ad7606_state;
typedef int (*ad7606_scale_setup_cb_t)(struct iio_dev *indio_dev,
struct iio_chan_spec *chan, int ch);
+typedef int (*ad7606_sw_setup_cb_t)(struct iio_dev *indio_dev);
/**
* struct ad7606_chip_info - chip specific information
@@ -80,6 +125,7 @@ typedef int (*ad7606_scale_setup_cb_t)(struct iio_dev *indio_dev,
* @num_channels: number of channels
* @num_adc_channels the number of channels the ADC actually inputs.
* @scale_setup_cb: callback to setup the scales for each channel
+ * @sw_setup_cb: callback to setup the software mode if available.
* @oversampling_avail pointer to the array which stores the available
* oversampling ratios.
* @oversampling_num number of elements stored in oversampling_avail array
@@ -94,6 +140,7 @@ struct ad7606_chip_info {
unsigned int num_adc_channels;
unsigned int num_channels;
ad7606_scale_setup_cb_t scale_setup_cb;
+ ad7606_sw_setup_cb_t sw_setup_cb;
const unsigned int *oversampling_avail;
unsigned int oversampling_num;
bool os_req_reset;
@@ -206,10 +253,6 @@ struct ad7606_bus_ops {
int (*reg_write)(struct ad7606_state *st,
unsigned int addr,
unsigned int val);
- int (*write_mask)(struct ad7606_state *st,
- unsigned int addr,
- unsigned long mask,
- unsigned int val);
int (*update_scan_mode)(struct iio_dev *indio_dev, const unsigned long *scan_mask);
u16 (*rd_wr_cmd)(int addr, char isWriteOp);
};
diff --git a/drivers/iio/adc/ad7606_bus_iface.h b/drivers/iio/adc/ad7606_bus_iface.h
new file mode 100644
index 000000000000..f2c979a9b7f3
--- /dev/null
+++ b/drivers/iio/adc/ad7606_bus_iface.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2010-2024 Analog Devices Inc.
+ * Copyright (c) 2025 Baylibre, SAS
+ */
+#ifndef __LINUX_PLATFORM_DATA_AD7606_H__
+#define __LINUX_PLATFORM_DATA_AD7606_H__
+
+struct iio_backend;
+
+struct ad7606_platform_data {
+ int (*bus_reg_read)(struct iio_backend *back, u32 reg, u32 *val);
+ int (*bus_reg_write)(struct iio_backend *back, u32 reg, u32 val);
+};
+
+#endif /* __LINUX_PLATFORM_DATA_AD7606_H__ */
diff --git a/drivers/iio/adc/ad7606_par.c b/drivers/iio/adc/ad7606_par.c
index 64733b607aa8..335fb481bfde 100644
--- a/drivers/iio/adc/ad7606_par.c
+++ b/drivers/iio/adc/ad7606_par.c
@@ -19,6 +19,7 @@
#include <linux/iio/iio.h>
#include "ad7606.h"
+#include "ad7606_bus_iface.h"
static const struct iio_chan_spec ad7606b_bi_channels[] = {
AD7606_BI_CHANNEL(0),
@@ -31,7 +32,19 @@ static const struct iio_chan_spec ad7606b_bi_channels[] = {
AD7606_BI_CHANNEL(7),
};
-static int ad7606_bi_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask)
+static const struct iio_chan_spec ad7606b_bi_sw_channels[] = {
+ AD7606_BI_SW_CHANNEL(0),
+ AD7606_BI_SW_CHANNEL(1),
+ AD7606_BI_SW_CHANNEL(2),
+ AD7606_BI_SW_CHANNEL(3),
+ AD7606_BI_SW_CHANNEL(4),
+ AD7606_BI_SW_CHANNEL(5),
+ AD7606_BI_SW_CHANNEL(6),
+ AD7606_BI_SW_CHANNEL(7),
+};
+
+static int ad7606_par_bus_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
{
struct ad7606_state *st = iio_priv(indio_dev);
unsigned int c, ret;
@@ -48,7 +61,8 @@ static int ad7606_bi_update_scan_mode(struct iio_dev *indio_dev, const unsigned
return 0;
}
-static int ad7606_bi_setup_iio_backend(struct device *dev, struct iio_dev *indio_dev)
+static int ad7606_par_bus_setup_iio_backend(struct device *dev,
+ struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
unsigned int ret, c;
@@ -86,9 +100,39 @@ static int ad7606_bi_setup_iio_backend(struct device *dev, struct iio_dev *indio
return 0;
}
+static int ad7606_par_bus_reg_read(struct ad7606_state *st, unsigned int addr)
+{
+ struct ad7606_platform_data *pdata = st->dev->platform_data;
+ int val, ret;
+
+ ret = pdata->bus_reg_read(st->back, addr, &val);
+ if (ret)
+ return ret;
+
+ return val;
+}
+
+static int ad7606_par_bus_reg_write(struct ad7606_state *st, unsigned int addr,
+ unsigned int val)
+{
+ struct ad7606_platform_data *pdata = st->dev->platform_data;
+
+ return pdata->bus_reg_write(st->back, addr, val);
+}
+
+static int ad7606_par_bus_sw_mode_config(struct iio_dev *indio_dev)
+{
+ indio_dev->channels = ad7606b_bi_sw_channels;
+
+ return 0;
+}
+
static const struct ad7606_bus_ops ad7606_bi_bops = {
- .iio_backend_config = ad7606_bi_setup_iio_backend,
- .update_scan_mode = ad7606_bi_update_scan_mode,
+ .iio_backend_config = ad7606_par_bus_setup_iio_backend,
+ .update_scan_mode = ad7606_par_bus_update_scan_mode,
+ .reg_read = ad7606_par_bus_reg_read,
+ .reg_write = ad7606_par_bus_reg_write,
+ .sw_mode_config = ad7606_par_bus_sw_mode_config,
};
static int ad7606_par16_read_block(struct device *dev,
diff --git a/drivers/iio/adc/ad7606_spi.c b/drivers/iio/adc/ad7606_spi.c
index e2c147525706..885bf0b68e77 100644
--- a/drivers/iio/adc/ad7606_spi.c
+++ b/drivers/iio/adc/ad7606_spi.c
@@ -15,36 +15,6 @@
#define MAX_SPI_FREQ_HZ 23500000 /* VDRIVE above 4.75 V */
-#define AD7616_CONFIGURATION_REGISTER 0x02
-#define AD7616_OS_MASK GENMASK(4, 2)
-#define AD7616_BURST_MODE BIT(6)
-#define AD7616_SEQEN_MODE BIT(5)
-#define AD7616_RANGE_CH_A_ADDR_OFF 0x04
-#define AD7616_RANGE_CH_B_ADDR_OFF 0x06
-/*
- * Range of channels from a group are stored in 2 registers.
- * 0, 1, 2, 3 in a register followed by 4, 5, 6, 7 in second register.
- * For channels from second group(8-15) the order is the same, only with
- * an offset of 2 for register address.
- */
-#define AD7616_RANGE_CH_ADDR(ch) ((ch) >> 2)
-/* The range of the channel is stored in 2 bits */
-#define AD7616_RANGE_CH_MSK(ch) (0b11 << (((ch) & 0b11) * 2))
-#define AD7616_RANGE_CH_MODE(ch, mode) ((mode) << ((((ch) & 0b11)) * 2))
-
-#define AD7606_CONFIGURATION_REGISTER 0x02
-#define AD7606_SINGLE_DOUT 0x00
-
-/*
- * Range for AD7606B channels are stored in registers starting with address 0x3.
- * Each register stores range for 2 channels(4 bits per channel).
- */
-#define AD7606_RANGE_CH_MSK(ch) (GENMASK(3, 0) << (4 * ((ch) & 0x1)))
-#define AD7606_RANGE_CH_MODE(ch, mode) \
- ((GENMASK(3, 0) & mode) << (4 * ((ch) & 0x1)))
-#define AD7606_RANGE_CH_ADDR(ch) (0x03 + ((ch) >> 1))
-#define AD7606_OS_MODE 0x08
-
static const struct iio_chan_spec ad7616_sw_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(16),
AD7616_CHANNEL(0),
@@ -89,10 +59,6 @@ static const struct iio_chan_spec ad7606c_18_sw_channels[] = {
AD7606_SW_CHANNEL(7, 18),
};
-static const unsigned int ad7606B_oversampling_avail[9] = {
- 1, 2, 4, 8, 16, 32, 64, 128, 256
-};
-
static u16 ad7616_spi_rd_wr_cmd(int addr, char isWriteOp)
{
/*
@@ -194,118 +160,20 @@ static int ad7606_spi_reg_write(struct ad7606_state *st,
return spi_write(spi, &st->d16[0], sizeof(st->d16[0]));
}
-static int ad7606_spi_write_mask(struct ad7606_state *st,
- unsigned int addr,
- unsigned long mask,
- unsigned int val)
-{
- int readval;
-
- readval = st->bops->reg_read(st, addr);
- if (readval < 0)
- return readval;
-
- readval &= ~mask;
- readval |= val;
-
- return st->bops->reg_write(st, addr, readval);
-}
-
-static int ad7616_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
- unsigned int ch_addr, mode, ch_index;
-
-
- /*
- * Ad7616 has 16 channels divided in group A and group B.
- * The range of channels from A are stored in registers with address 4
- * while channels from B are stored in register with address 6.
- * The last bit from channels determines if it is from group A or B
- * because the order of channels in iio is 0A, 0B, 1A, 1B...
- */
- ch_index = ch >> 1;
-
- ch_addr = AD7616_RANGE_CH_ADDR(ch_index);
-
- if ((ch & 0x1) == 0) /* channel A */
- ch_addr += AD7616_RANGE_CH_A_ADDR_OFF;
- else /* channel B */
- ch_addr += AD7616_RANGE_CH_B_ADDR_OFF;
-
- /* 0b01 for 2.5v, 0b10 for 5v and 0b11 for 10v */
- mode = AD7616_RANGE_CH_MODE(ch_index, ((val + 1) & 0b11));
- return st->bops->write_mask(st, ch_addr, AD7616_RANGE_CH_MSK(ch_index),
- mode);
-}
-
-static int ad7616_write_os_sw(struct iio_dev *indio_dev, int val)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- return st->bops->write_mask(st, AD7616_CONFIGURATION_REGISTER,
- AD7616_OS_MASK, val << 2);
-}
-
-static int ad7606_write_scale_sw(struct iio_dev *indio_dev, int ch, int val)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- return ad7606_spi_write_mask(st,
- AD7606_RANGE_CH_ADDR(ch),
- AD7606_RANGE_CH_MSK(ch),
- AD7606_RANGE_CH_MODE(ch, val));
-}
-
-static int ad7606_write_os_sw(struct iio_dev *indio_dev, int val)
-{
- struct ad7606_state *st = iio_priv(indio_dev);
-
- return ad7606_spi_reg_write(st, AD7606_OS_MODE, val);
-}
-
static int ad7616_sw_mode_config(struct iio_dev *indio_dev)
{
- struct ad7606_state *st = iio_priv(indio_dev);
-
/*
* Scale can be configured individually for each channel
* in software mode.
*/
indio_dev->channels = ad7616_sw_channels;
- st->write_scale = ad7616_write_scale_sw;
- st->write_os = &ad7616_write_os_sw;
-
- /* Activate Burst mode and SEQEN MODE */
- return st->bops->write_mask(st,
- AD7616_CONFIGURATION_REGISTER,
- AD7616_BURST_MODE | AD7616_SEQEN_MODE,
- AD7616_BURST_MODE | AD7616_SEQEN_MODE);
+ return 0;
}
static int ad7606B_sw_mode_config(struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
- DECLARE_BITMAP(os, 3);
-
- bitmap_fill(os, 3);
- /*
- * Software mode is enabled when all three oversampling
- * pins are set to high. If oversampling gpios are defined
- * in the device tree, then they need to be set to high,
- * otherwise, they must be hardwired to VDD
- */
- if (st->gpio_os) {
- gpiod_set_array_value(st->gpio_os->ndescs,
- st->gpio_os->desc, st->gpio_os->info, os);
- }
- /* OS of 128 and 256 are available only in software mode */
- st->oversampling_avail = ad7606B_oversampling_avail;
- st->num_os_ratios = ARRAY_SIZE(ad7606B_oversampling_avail);
-
- st->write_scale = ad7606_write_scale_sw;
- st->write_os = &ad7606_write_os_sw;
/* Configure device spi to output on a single channel */
st->bops->reg_write(st,
@@ -350,7 +218,6 @@ static const struct ad7606_bus_ops ad7616_spi_bops = {
.read_block = ad7606_spi_read_block,
.reg_read = ad7606_spi_reg_read,
.reg_write = ad7606_spi_reg_write,
- .write_mask = ad7606_spi_write_mask,
.rd_wr_cmd = ad7616_spi_rd_wr_cmd,
.sw_mode_config = ad7616_sw_mode_config,
};
@@ -359,7 +226,6 @@ static const struct ad7606_bus_ops ad7606b_spi_bops = {
.read_block = ad7606_spi_read_block,
.reg_read = ad7606_spi_reg_read,
.reg_write = ad7606_spi_reg_write,
- .write_mask = ad7606_spi_write_mask,
.rd_wr_cmd = ad7606B_spi_rd_wr_cmd,
.sw_mode_config = ad7606B_sw_mode_config,
};
@@ -368,7 +234,6 @@ static const struct ad7606_bus_ops ad7606c_18_spi_bops = {
.read_block = ad7606_spi_read_block18to32,
.reg_read = ad7606_spi_reg_read,
.reg_write = ad7606_spi_reg_write,
- .write_mask = ad7606_spi_write_mask,
.rd_wr_cmd = ad7606B_spi_rd_wr_cmd,
.sw_mode_config = ad7606c_18_sw_mode_config,
};
diff --git a/drivers/iio/adc/ad7625.c b/drivers/iio/adc/ad7625.c
index afa9bf4ddf3c..0466c0c7eae4 100644
--- a/drivers/iio/adc/ad7625.c
+++ b/drivers/iio/adc/ad7625.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
+// SPDX-License-Identifier: (GPL-2.0-only)
/*
* Analog Devices Inc. AD7625 ADC driver
*
@@ -248,12 +248,15 @@ static int ad7625_write_raw(struct iio_dev *indio_dev,
int val, int val2, long info)
{
struct ad7625_state *st = iio_priv(indio_dev);
+ int ret;
switch (info) {
case IIO_CHAN_INFO_SAMP_FREQ:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return ad7625_set_sampling_freq(st, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ad7625_set_sampling_freq(st, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
default:
return -EINVAL;
}
@@ -680,5 +683,5 @@ module_platform_driver(ad7625_driver);
MODULE_AUTHOR("Trevor Gamblin <tgamblin@baylibre.com>");
MODULE_DESCRIPTION("Analog Devices AD7625 ADC");
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_LICENSE("GPL");
MODULE_IMPORT_NS("IIO_BACKEND");
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index 113703fb7245..5a863005aca6 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -142,7 +142,7 @@ static const struct iio_chan_spec ad7768_channels[] = {
.channel = 0,
.scan_index = 0,
.scan_type = {
- .sign = 'u',
+ .sign = 's',
.realbits = 24,
.storagebits = 32,
.shift = 8,
@@ -154,7 +154,6 @@ static const struct iio_chan_spec ad7768_channels[] = {
struct ad7768_state {
struct spi_device *spi;
struct regulator *vref;
- struct mutex lock;
struct clk *mclk;
unsigned int mclk_freq;
unsigned int samp_freq;
@@ -256,18 +255,20 @@ static int ad7768_reg_access(struct iio_dev *indio_dev,
struct ad7768_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
if (readval) {
ret = ad7768_spi_reg_read(st, reg, 1);
if (ret < 0)
- goto err_unlock;
+ goto err_release;
*readval = ret;
ret = 0;
} else {
ret = ad7768_spi_reg_write(st, reg, writeval);
}
-err_unlock:
- mutex_unlock(&st->lock);
+err_release:
+ iio_device_release_direct(indio_dev);
return ret;
}
@@ -365,17 +366,15 @@ static int ad7768_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ad7768_scan_direct(indio_dev);
- if (ret >= 0)
- *val = ret;
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
+ *val = sign_extend32(ret, chan->scan_type.realbits - 1);
return IIO_VAL_INT;
@@ -471,18 +470,15 @@ static irqreturn_t ad7768_trigger_handler(int irq, void *p)
struct ad7768_state *st = iio_priv(indio_dev);
int ret;
- mutex_lock(&st->lock);
-
ret = spi_read(st->spi, &st->data.scan.chan, 3);
if (ret < 0)
- goto err_unlock;
+ goto out;
iio_push_to_buffers_with_timestamp(indio_dev, &st->data.scan,
iio_get_time_ns(indio_dev));
-err_unlock:
+out:
iio_trigger_notify_done(indio_dev->trig);
- mutex_unlock(&st->lock);
return IRQ_HANDLED;
}
@@ -574,6 +570,21 @@ static int ad7768_probe(struct spi_device *spi)
return -ENOMEM;
st = iio_priv(indio_dev);
+ /*
+ * Datasheet recommends SDI line to be kept high when data is not being
+ * clocked out of the controller and the spi clock is free running,
+ * to prevent accidental reset.
+ * Since many controllers do not support the SPI_MOSI_IDLE_HIGH flag
+ * yet, only request the MOSI idle state to enable if the controller
+ * supports it.
+ */
+ if (spi->controller->mode_bits & SPI_MOSI_IDLE_HIGH) {
+ spi->mode |= SPI_MOSI_IDLE_HIGH;
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+ }
+
st->spi = spi;
st->vref = devm_regulator_get(&spi->dev, "vref");
@@ -596,8 +607,6 @@ static int ad7768_probe(struct spi_device *spi)
st->mclk_freq = clk_get_rate(st->mclk);
- mutex_init(&st->lock);
-
indio_dev->channels = ad7768_channels;
indio_dev->num_channels = ARRAY_SIZE(ad7768_channels);
indio_dev->name = spi_get_device_id(spi)->name;
diff --git a/drivers/iio/adc/ad7779.c b/drivers/iio/adc/ad7779.c
index 2537dab69a35..a5d87faa5e12 100644
--- a/drivers/iio/adc/ad7779.c
+++ b/drivers/iio/adc/ad7779.c
@@ -467,59 +467,82 @@ static int ad7779_set_calibbias(struct ad7779_state *st, int channel, int val)
calibbias[2]);
}
+static int __ad7779_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct ad7779_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ ret = ad7779_get_calibscale(st, chan->channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ *val2 = GAIN_REL;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = ad7779_get_calibbias(st, chan->channel);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->sampling_freq;
+ if (*val < 0)
+ return -EINVAL;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
static int ad7779_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
{
- struct ad7779_state *st = iio_priv(indio_dev);
int ret;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- switch (mask) {
- case IIO_CHAN_INFO_CALIBSCALE:
- ret = ad7779_get_calibscale(st, chan->channel);
- if (ret < 0)
- return ret;
- *val = ret;
- *val2 = GAIN_REL;
- return IIO_VAL_FRACTIONAL;
- case IIO_CHAN_INFO_CALIBBIAS:
- ret = ad7779_get_calibbias(st, chan->channel);
- if (ret < 0)
- return ret;
- *val = ret;
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_SAMP_FREQ:
- *val = st->sampling_freq;
- if (*val < 0)
- return -EINVAL;
- return IIO_VAL_INT;
- default:
- return -EINVAL;
- }
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __ad7779_read_raw(indio_dev, chan, val, val2, mask);
+ iio_device_release_direct(indio_dev);
+ return ret;
+}
+
+static int __ad7779_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2,
+ long mask)
+{
+ struct ad7779_state *st = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return ad7779_set_calibscale(st, chan->channel, val2);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return ad7779_set_calibbias(st, chan->channel, val);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return ad7779_set_sampling_frequency(st, val);
+ default:
+ return -EINVAL;
}
- unreachable();
}
static int ad7779_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2,
long mask)
{
- struct ad7779_state *st = iio_priv(indio_dev);
+ int ret;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- switch (mask) {
- case IIO_CHAN_INFO_CALIBSCALE:
- return ad7779_set_calibscale(st, chan->channel, val2);
- case IIO_CHAN_INFO_CALIBBIAS:
- return ad7779_set_calibbias(st, chan->channel, val);
- case IIO_CHAN_INFO_SAMP_FREQ:
- return ad7779_set_sampling_frequency(st, val);
- default:
- return -EINVAL;
- }
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __ad7779_write_raw(indio_dev, chan, val, val2, mask);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
static int ad7779_buffer_preenable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index 76118fe22db8..597c2686ffa4 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -310,15 +310,11 @@ static int ad7791_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int ad7791_write_raw(struct iio_dev *indio_dev,
+static int __ad7791_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int val, int val2, long mask)
{
struct ad7791_state *st = iio_priv(indio_dev);
- int ret, i;
-
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ int i;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
@@ -328,22 +324,31 @@ static int ad7791_write_raw(struct iio_dev *indio_dev,
break;
}
- if (i == ARRAY_SIZE(ad7791_sample_freq_avail)) {
- ret = -EINVAL;
- break;
- }
+ if (i == ARRAY_SIZE(ad7791_sample_freq_avail))
+ return -EINVAL;
st->filter &= ~AD7791_FILTER_RATE_MASK;
st->filter |= i;
ad_sd_write_reg(&st->sd, AD7791_REG_FILTER,
sizeof(st->filter),
st->filter);
- break;
+ return 0;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
+}
+
+static int ad7791_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __ad7791_write_raw(indio_dev, chan, val, val2, mask);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 1b50d9643a63..ccf18ce48e34 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -462,64 +462,68 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-static int ad7793_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
+static int __ad7793_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
{
struct ad7793_state *st = iio_priv(indio_dev);
- int ret, i;
+ int i;
unsigned int tmp;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- ret = -EINVAL;
- for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
- if (val2 == st->scale_avail[i][1]) {
- ret = 0;
- tmp = st->conf;
- st->conf &= ~AD7793_CONF_GAIN(-1);
- st->conf |= AD7793_CONF_GAIN(i);
-
- if (tmp == st->conf)
- break;
-
- ad_sd_write_reg(&st->sd, AD7793_REG_CONF,
- sizeof(st->conf), st->conf);
- ad7793_calibrate_all(st);
- break;
- }
- break;
- case IIO_CHAN_INFO_SAMP_FREQ:
- if (!val) {
- ret = -EINVAL;
- break;
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++) {
+ if (val2 != st->scale_avail[i][1])
+ continue;
+
+ tmp = st->conf;
+ st->conf &= ~AD7793_CONF_GAIN(-1);
+ st->conf |= AD7793_CONF_GAIN(i);
+
+ if (tmp == st->conf)
+ return 0;
+
+ ad_sd_write_reg(&st->sd, AD7793_REG_CONF,
+ sizeof(st->conf), st->conf);
+ ad7793_calibrate_all(st);
+
+ return 0;
}
+ return -EINVAL;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (!val)
+ return -EINVAL;
for (i = 0; i < 16; i++)
if (val == st->chip_info->sample_freq_avail[i])
break;
- if (i == 16) {
- ret = -EINVAL;
- break;
- }
+ if (i == 16)
+ return -EINVAL;
st->mode &= ~AD7793_MODE_RATE(-1);
st->mode |= AD7793_MODE_RATE(i);
ad_sd_write_reg(&st->sd, AD7793_REG_MODE, sizeof(st->mode),
st->mode);
- break;
+ return 0;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
+}
+
+static int ad7793_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __ad7793_write_raw(indio_dev, chan, val, val2, mask);
+
+ iio_device_release_direct(indio_dev);
- iio_device_release_direct_mode(indio_dev);
return ret;
}
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index 69add1dc4b53..87ff95643794 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -152,11 +152,10 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ad7887_scan_direct(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index acc44cb34f82..87945efb940b 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -260,11 +260,10 @@ static int ad7923_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ad7923_scan_direct(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ad7944.c b/drivers/iio/adc/ad7944.c
index 0ec9cda10f5f..2f949fe55873 100644
--- a/drivers/iio/adc/ad7944.c
+++ b/drivers/iio/adc/ad7944.c
@@ -16,11 +16,14 @@
#include <linux/module.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
+#include <linux/spi/offload/consumer.h>
#include <linux/spi/spi.h>
#include <linux/string_helpers.h>
+#include <linux/units.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer-dmaengine.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -54,6 +57,12 @@ struct ad7944_adc {
enum ad7944_spi_mode spi_mode;
struct spi_transfer xfers[3];
struct spi_message msg;
+ struct spi_transfer offload_xfers[2];
+ struct spi_message offload_msg;
+ struct spi_offload *offload;
+ struct spi_offload_trigger *offload_trigger;
+ unsigned long offload_trigger_hz;
+ int sample_freq_range[3];
void *chain_mode_buf;
/* Chip-specific timing specifications. */
const struct ad7944_timing_spec *timing_spec;
@@ -81,6 +90,8 @@ struct ad7944_adc {
/* quite time before CNV rising edge */
#define AD7944_T_QUIET_NS 20
+/* minimum CNV high time to trigger conversion */
+#define AD7944_T_CNVH_NS 10
static const struct ad7944_timing_spec ad7944_timing_spec = {
.conv_ns = 420,
@@ -95,20 +106,27 @@ static const struct ad7944_timing_spec ad7986_timing_spec = {
struct ad7944_chip_info {
const char *name;
const struct ad7944_timing_spec *timing_spec;
+ u32 max_sample_rate_hz;
const struct iio_chan_spec channels[2];
+ const struct iio_chan_spec offload_channels[1];
};
+/* get number of bytes for SPI xfer */
+#define AD7944_SPI_BYTES(scan_type) ((scan_type).realbits > 16 ? 4 : 2)
+
/*
* AD7944_DEFINE_CHIP_INFO - Define a chip info structure for a specific chip
* @_name: The name of the chip
* @_ts: The timing specification for the chip
+ * @_max: The maximum sample rate in Hz
* @_bits: The number of bits in the conversion result
* @_diff: Whether the chip is true differential or not
*/
-#define AD7944_DEFINE_CHIP_INFO(_name, _ts, _bits, _diff) \
+#define AD7944_DEFINE_CHIP_INFO(_name, _ts, _max, _bits, _diff) \
static const struct ad7944_chip_info _name##_chip_info = { \
.name = #_name, \
.timing_spec = &_ts##_timing_spec, \
+ .max_sample_rate_hz = _max, \
.channels = { \
{ \
.type = IIO_VOLTAGE, \
@@ -126,13 +144,43 @@ static const struct ad7944_chip_info _name##_chip_info = { \
}, \
IIO_CHAN_SOFT_TIMESTAMP(1), \
}, \
+ .offload_channels = { \
+ { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .differential = _diff, \
+ .channel = 0, \
+ .channel2 = _diff ? 1 : 0, \
+ .scan_index = 0, \
+ .scan_type.sign = _diff ? 's' : 'u', \
+ .scan_type.realbits = _bits, \
+ .scan_type.storagebits = 32, \
+ .scan_type.endianness = IIO_CPU, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) \
+ | BIT(IIO_CHAN_INFO_SCALE) \
+ | BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .info_mask_separate_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ }, \
+ }, \
}
+/*
+ * Notes on the offload channels:
+ * - There is no soft timestamp since everything is done in hardware.
+ * - There is a sampling frequency attribute added. This controls the SPI
+ * offload trigger.
+ * - The storagebits value depends on the SPI offload provider. Currently there
+ * is only one supported provider, namely the ADI PULSAR ADC HDL project,
+ * which always uses 32-bit words for data values, even for <= 16-bit ADCs.
+ * So the value is just hardcoded to 32 for now.
+ */
+
/* pseudo-differential with ground sense */
-AD7944_DEFINE_CHIP_INFO(ad7944, ad7944, 14, 0);
-AD7944_DEFINE_CHIP_INFO(ad7985, ad7944, 16, 0);
+AD7944_DEFINE_CHIP_INFO(ad7944, ad7944, 2.5 * MEGA, 14, 0);
+AD7944_DEFINE_CHIP_INFO(ad7985, ad7944, 2.5 * MEGA, 16, 0);
/* fully differential */
-AD7944_DEFINE_CHIP_INFO(ad7986, ad7986, 18, 1);
+AD7944_DEFINE_CHIP_INFO(ad7986, ad7986, 2 * MEGA, 18, 1);
static int ad7944_3wire_cs_mode_init_msg(struct device *dev, struct ad7944_adc *adc,
const struct iio_chan_spec *chan)
@@ -164,7 +212,7 @@ static int ad7944_3wire_cs_mode_init_msg(struct device *dev, struct ad7944_adc *
/* Then we can read the data during the acquisition phase */
xfers[2].rx_buf = &adc->sample.raw;
- xfers[2].len = BITS_TO_BYTES(chan->scan_type.storagebits);
+ xfers[2].len = AD7944_SPI_BYTES(chan->scan_type);
xfers[2].bits_per_word = chan->scan_type.realbits;
spi_message_init_with_transfers(&adc->msg, xfers, 3);
@@ -193,7 +241,7 @@ static int ad7944_4wire_mode_init_msg(struct device *dev, struct ad7944_adc *adc
xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
xfers[1].rx_buf = &adc->sample.raw;
- xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits);
+ xfers[1].len = AD7944_SPI_BYTES(chan->scan_type);
xfers[1].bits_per_word = chan->scan_type.realbits;
spi_message_init_with_transfers(&adc->msg, xfers, 2);
@@ -228,7 +276,7 @@ static int ad7944_chain_mode_init_msg(struct device *dev, struct ad7944_adc *adc
xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
xfers[1].rx_buf = adc->chain_mode_buf;
- xfers[1].len = BITS_TO_BYTES(chan->scan_type.storagebits) * n_chain_dev;
+ xfers[1].len = AD7944_SPI_BYTES(chan->scan_type) * n_chain_dev;
xfers[1].bits_per_word = chan->scan_type.realbits;
spi_message_init_with_transfers(&adc->msg, xfers, 2);
@@ -236,6 +284,48 @@ static int ad7944_chain_mode_init_msg(struct device *dev, struct ad7944_adc *adc
return devm_spi_optimize_message(dev, adc->spi, &adc->msg);
}
+/*
+ * Unlike ad7944_3wire_cs_mode_init_msg(), this creates a message that reads
+ * during the conversion phase instead of the acquisition phase when reading
+ * a sample from the ADC. This is needed to be able to read at the maximum
+ * sample rate. It requires the SPI controller to have offload support and a
+ * high enough SCLK rate to read the sample during the conversion phase.
+ */
+static int ad7944_3wire_cs_mode_init_offload_msg(struct device *dev,
+ struct ad7944_adc *adc,
+ const struct iio_chan_spec *chan)
+{
+ struct spi_transfer *xfers = adc->offload_xfers;
+ int ret;
+
+ /*
+ * CS is tied to CNV and we need a low to high transition to start the
+ * conversion, so place CNV low for t_QUIET to prepare for this.
+ */
+ xfers[0].delay.value = AD7944_T_QUIET_NS;
+ xfers[0].delay.unit = SPI_DELAY_UNIT_NSECS;
+ /* CNV has to be high for a minimum time to trigger conversion. */
+ xfers[0].cs_change = 1;
+ xfers[0].cs_change_delay.value = AD7944_T_CNVH_NS;
+ xfers[0].cs_change_delay.unit = SPI_DELAY_UNIT_NSECS;
+
+ /* Then we can read the previous sample during the conversion phase */
+ xfers[1].offload_flags = SPI_OFFLOAD_XFER_RX_STREAM;
+ xfers[1].len = AD7944_SPI_BYTES(chan->scan_type);
+ xfers[1].bits_per_word = chan->scan_type.realbits;
+
+ spi_message_init_with_transfers(&adc->offload_msg, xfers,
+ ARRAY_SIZE(adc->offload_xfers));
+
+ adc->offload_msg.offload = adc->offload;
+
+ ret = devm_spi_optimize_message(dev, adc->spi, &adc->offload_msg);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to prepare offload msg\n");
+
+ return 0;
+}
+
/**
* ad7944_convert_and_acquire - Perform a single conversion and acquisition
* @adc: The ADC device structure
@@ -274,12 +364,12 @@ static int ad7944_single_conversion(struct ad7944_adc *adc,
return ret;
if (adc->spi_mode == AD7944_SPI_MODE_CHAIN) {
- if (chan->scan_type.storagebits > 16)
+ if (chan->scan_type.realbits > 16)
*val = ((u32 *)adc->chain_mode_buf)[chan->scan_index];
else
*val = ((u16 *)adc->chain_mode_buf)[chan->scan_index];
} else {
- if (chan->scan_type.storagebits > 16)
+ if (chan->scan_type.realbits > 16)
*val = adc->sample.raw.u32;
else
*val = adc->sample.raw.u16;
@@ -291,6 +381,23 @@ static int ad7944_single_conversion(struct ad7944_adc *adc,
return IIO_VAL_INT;
}
+static int ad7944_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct ad7944_adc *adc = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = adc->sample_freq_range;
+ *type = IIO_VAL_INT;
+ return IIO_AVAIL_RANGE;
+ default:
+ return -EINVAL;
+ }
+}
+
static int ad7944_read_raw(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan,
int *val, int *val2, long info)
@@ -300,12 +407,11 @@ static int ad7944_read_raw(struct iio_dev *indio_dev,
switch (info) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ad7944_single_conversion(adc, chan, val);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SCALE:
@@ -323,13 +429,104 @@ static int ad7944_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = adc->offload_trigger_hz;
+ return IIO_VAL_INT;
+
default:
return -EINVAL;
}
}
+static int ad7944_set_sample_freq(struct ad7944_adc *adc, int val)
+{
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = val,
+ },
+ };
+ int ret;
+
+ ret = spi_offload_trigger_validate(adc->offload_trigger, &config);
+ if (ret)
+ return ret;
+
+ adc->offload_trigger_hz = config.periodic.frequency_hz;
+
+ return 0;
+}
+
+static int ad7944_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long info)
+{
+ struct ad7944_adc *adc = iio_priv(indio_dev);
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val < 1 || val > adc->sample_freq_range[2])
+ return -EINVAL;
+
+ return ad7944_set_sample_freq(adc, val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ad7944_write_raw_get_fmt(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+}
+
static const struct iio_info ad7944_iio_info = {
+ .read_avail = &ad7944_read_avail,
.read_raw = &ad7944_read_raw,
+ .write_raw = &ad7944_write_raw,
+ .write_raw_get_fmt = &ad7944_write_raw_get_fmt,
+};
+
+static int ad7944_offload_buffer_postenable(struct iio_dev *indio_dev)
+{
+ struct ad7944_adc *adc = iio_priv(indio_dev);
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = adc->offload_trigger_hz,
+ },
+ };
+ int ret;
+
+ gpiod_set_value_cansleep(adc->turbo, 1);
+
+ ret = spi_offload_trigger_enable(adc->offload, adc->offload_trigger,
+ &config);
+ if (ret)
+ gpiod_set_value_cansleep(adc->turbo, 0);
+
+ return ret;
+}
+
+static int ad7944_offload_buffer_predisable(struct iio_dev *indio_dev)
+{
+ struct ad7944_adc *adc = iio_priv(indio_dev);
+
+ spi_offload_trigger_disable(adc->offload, adc->offload_trigger);
+ gpiod_set_value_cansleep(adc->turbo, 0);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ad7944_offload_buffer_setup_ops = {
+ .postenable = &ad7944_offload_buffer_postenable,
+ .predisable = &ad7944_offload_buffer_predisable,
};
static irqreturn_t ad7944_trigger_handler(int irq, void *p)
@@ -409,8 +606,7 @@ static int ad7944_chain_mode_alloc(struct device *dev,
/* 1 word for each voltage channel + aligned u64 for timestamp */
chain_mode_buf_size = ALIGN(n_chain_dev *
- BITS_TO_BYTES(chan[0].scan_type.storagebits), sizeof(u64))
- + sizeof(u64);
+ AD7944_SPI_BYTES(chan[0].scan_type), sizeof(u64)) + sizeof(u64);
buf = devm_kzalloc(dev, chain_mode_buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -444,6 +640,11 @@ static const char * const ad7944_power_supplies[] = {
"avdd", "dvdd", "bvdd", "vio"
};
+static const struct spi_offload_config ad7944_offload_config = {
+ .capability_flags = SPI_OFFLOAD_CAP_TRIGGER |
+ SPI_OFFLOAD_CAP_RX_STREAM_DMA,
+};
+
static int ad7944_probe(struct spi_device *spi)
{
const struct ad7944_chip_info *chip_info;
@@ -469,6 +670,10 @@ static int ad7944_probe(struct spi_device *spi)
adc->timing_spec = chip_info->timing_spec;
+ adc->sample_freq_range[0] = 1; /* min */
+ adc->sample_freq_range[1] = 1; /* step */
+ adc->sample_freq_range[2] = chip_info->max_sample_rate_hz; /* max */
+
ret = device_property_match_property_string(dev, "adi,spi-mode",
ad7944_spi_modes,
ARRAY_SIZE(ad7944_spi_modes));
@@ -588,20 +793,74 @@ static int ad7944_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &ad7944_iio_info;
- if (adc->spi_mode == AD7944_SPI_MODE_CHAIN) {
- indio_dev->available_scan_masks = chain_scan_masks;
- indio_dev->channels = chain_chan;
- indio_dev->num_channels = n_chain_dev + 1;
+ adc->offload = devm_spi_offload_get(dev, spi, &ad7944_offload_config);
+ ret = PTR_ERR_OR_ZERO(adc->offload);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(dev, ret, "failed to get offload\n");
+
+ /* Fall back to low speed usage when no SPI offload available. */
+ if (ret == -ENODEV) {
+ if (adc->spi_mode == AD7944_SPI_MODE_CHAIN) {
+ indio_dev->available_scan_masks = chain_scan_masks;
+ indio_dev->channels = chain_chan;
+ indio_dev->num_channels = n_chain_dev + 1;
+ } else {
+ indio_dev->channels = chip_info->channels;
+ indio_dev->num_channels = ARRAY_SIZE(chip_info->channels);
+ }
+
+ ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
+ iio_pollfunc_store_time,
+ ad7944_trigger_handler,
+ NULL);
+ if (ret)
+ return ret;
} else {
- indio_dev->channels = chip_info->channels;
- indio_dev->num_channels = ARRAY_SIZE(chip_info->channels);
- }
+ struct dma_chan *rx_dma;
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
- iio_pollfunc_store_time,
- ad7944_trigger_handler, NULL);
- if (ret)
- return ret;
+ if (adc->spi_mode != AD7944_SPI_MODE_SINGLE)
+ return dev_err_probe(dev, -EINVAL,
+ "offload only supported in single mode\n");
+
+ indio_dev->setup_ops = &ad7944_offload_buffer_setup_ops;
+ indio_dev->channels = chip_info->offload_channels;
+ indio_dev->num_channels = ARRAY_SIZE(chip_info->offload_channels);
+
+ adc->offload_trigger = devm_spi_offload_trigger_get(dev,
+ adc->offload, SPI_OFFLOAD_TRIGGER_PERIODIC);
+ if (IS_ERR(adc->offload_trigger))
+ return dev_err_probe(dev, PTR_ERR(adc->offload_trigger),
+ "failed to get offload trigger\n");
+
+ ret = ad7944_set_sample_freq(adc, 2 * MEGA);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to init sample rate\n");
+
+ rx_dma = devm_spi_offload_rx_stream_request_dma_chan(dev,
+ adc->offload);
+ if (IS_ERR(rx_dma))
+ return dev_err_probe(dev, PTR_ERR(rx_dma),
+ "failed to get offload RX DMA\n");
+
+ /*
+ * REVISIT: ideally, we would confirm that the offload RX DMA
+ * buffer layout is the same as what is hard-coded in
+ * offload_channels. Right now, the only supported offload
+ * is the pulsar_adc project which always uses 32-bit word
+ * size for data values, regardless of the SPI bits per word.
+ */
+
+ ret = devm_iio_dmaengine_buffer_setup_with_handle(dev,
+ indio_dev, rx_dma, IIO_BUFFER_DIRECTION_IN);
+ if (ret)
+ return ret;
+
+ ret = ad7944_3wire_cs_mode_init_offload_msg(dev, adc,
+ &chip_info->offload_channels[0]);
+ if (ret)
+ return ret;
+ }
return devm_iio_device_register(dev, indio_dev);
}
@@ -636,3 +895,4 @@ module_spi_driver(ad7944_driver);
MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>");
MODULE_DESCRIPTION("Analog Devices AD7944 PulSAR ADC family driver");
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("IIO_DMAENGINE_BUFFER");
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index aa44b4e2542b..993f4651b73a 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -291,13 +291,12 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&st->lock);
ret = ad799x_scan_direct(st, chan->scan_index);
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
@@ -411,9 +410,8 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev,
struct ad799x_state *st = iio_priv(indio_dev);
int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&st->lock);
@@ -429,7 +427,7 @@ static int ad799x_write_event_config(struct iio_dev *indio_dev,
ret = ad799x_write_config(st, st->config);
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c
index f30119b42ba0..f7a9f46ea0dc 100644
--- a/drivers/iio/adc/ad9467.c
+++ b/drivers/iio/adc/ad9467.c
@@ -813,6 +813,18 @@ static int ad9467_read_raw(struct iio_dev *indio_dev,
}
}
+static int __ad9467_update_clock(struct ad9467_state *st, long r_clk)
+{
+ int ret;
+
+ ret = clk_set_rate(st->clk, r_clk);
+ if (ret)
+ return ret;
+
+ guard(mutex)(&st->lock);
+ return ad9467_calibrate(st);
+}
+
static int ad9467_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@@ -842,14 +854,11 @@ static int ad9467_write_raw(struct iio_dev *indio_dev,
if (sample_rate == r_clk)
return 0;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = clk_set_rate(st->clk, r_clk);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- guard(mutex)(&st->lock);
- ret = ad9467_calibrate(st);
- }
+ ret = __ad9467_update_clock(st, r_clk);
+ iio_device_release_direct(indio_dev);
return ret;
default:
return -EINVAL;
diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
index d5d81581ab34..6c37f8e21120 100644
--- a/drivers/iio/adc/ad_sigma_delta.c
+++ b/drivers/iio/adc/ad_sigma_delta.c
@@ -339,6 +339,7 @@ int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
out:
sigma_delta->keep_cs_asserted = false;
ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
+ ad_sigma_delta_disable_one(sigma_delta, channel);
sigma_delta->bus_locked = false;
spi_bus_unlock(sigma_delta->spi->controller);
@@ -386,11 +387,12 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
unsigned int data_reg;
int ret = 0;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- ad_sigma_delta_set_channel(sigma_delta, chan->address);
+ ret = ad_sigma_delta_set_channel(sigma_delta, chan->address);
+ if (ret)
+ goto out_release;
spi_bus_lock(sigma_delta->spi->controller);
sigma_delta->bus_locked = true;
@@ -431,7 +433,8 @@ out_unlock:
sigma_delta->keep_cs_asserted = false;
sigma_delta->bus_locked = false;
spi_bus_unlock(sigma_delta->spi->controller);
- iio_device_release_direct_mode(indio_dev);
+out_release:
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
@@ -801,10 +804,15 @@ int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
spin_lock_init(&sigma_delta->irq_lock);
- if (info->irq_line)
- sigma_delta->irq_line = info->irq_line;
- else
+ if (info->has_named_irqs) {
+ sigma_delta->irq_line = fwnode_irq_get_byname(dev_fwnode(&spi->dev),
+ "rdy");
+ if (sigma_delta->irq_line < 0)
+ return dev_err_probe(&spi->dev, sigma_delta->irq_line,
+ "Interrupt 'rdy' is required\n");
+ } else {
sigma_delta->irq_line = spi->irq;
+ }
sigma_delta->rdy_gpiod = devm_gpiod_get_optional(&spi->dev, "rdy", GPIOD_IN);
if (IS_ERR(sigma_delta->rdy_gpiod))
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index c7357601f0f8..cf942c043457 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -12,9 +12,9 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
@@ -27,6 +27,7 @@
#include <linux/iio/buffer.h>
#include <linux/iio/iio.h>
+#include "ad7606_bus_iface.h"
/*
* Register definitions:
* https://wiki.analog.com/resources/fpga/docs/axi_adc_ip#register_map
@@ -39,9 +40,19 @@
#define ADI_AXI_REG_RSTN_MMCM_RSTN BIT(1)
#define ADI_AXI_REG_RSTN_RSTN BIT(0)
+#define ADI_AXI_ADC_REG_CONFIG 0x000c
+#define ADI_AXI_ADC_REG_CONFIG_CMOS_OR_LVDS_N BIT(7)
+
#define ADI_AXI_ADC_REG_CTRL 0x0044
#define ADI_AXI_ADC_CTRL_DDR_EDGESEL_MASK BIT(1)
+#define ADI_AXI_ADC_REG_CNTRL_3 0x004c
+#define AXI_AD485X_CNTRL_3_OS_EN_MSK BIT(2)
+#define AXI_AD485X_CNTRL_3_PACKET_FORMAT_MSK GENMASK(1, 0)
+#define AXI_AD485X_PACKET_FORMAT_20BIT 0x0
+#define AXI_AD485X_PACKET_FORMAT_24BIT 0x1
+#define AXI_AD485X_PACKET_FORMAT_32BIT 0x2
+
#define ADI_AXI_ADC_REG_DRP_STATUS 0x0074
#define ADI_AXI_ADC_DRP_LOCKED BIT(17)
@@ -73,6 +84,12 @@
#define ADI_AXI_ADC_REG_DELAY(l) (0x0800 + (l) * 0x4)
#define AXI_ADC_DELAY_CTRL_MASK GENMASK(4, 0)
+#define ADI_AXI_REG_CONFIG_WR 0x0080
+#define ADI_AXI_REG_CONFIG_RD 0x0084
+#define ADI_AXI_REG_CONFIG_CTRL 0x008c
+#define ADI_AXI_REG_CONFIG_CTRL_READ 0x03
+#define ADI_AXI_REG_CONFIG_CTRL_WRITE 0x01
+
#define ADI_AXI_ADC_MAX_IO_NUM_LANES 15
#define ADI_AXI_REG_CHAN_CTRL_DEFAULTS \
@@ -80,7 +97,20 @@
ADI_AXI_REG_CHAN_CTRL_FMT_EN | \
ADI_AXI_REG_CHAN_CTRL_ENABLE)
+#define ADI_AXI_REG_READ_BIT 0x8000
+#define ADI_AXI_REG_ADDRESS_MASK 0xff00
+#define ADI_AXI_REG_VALUE_MASK 0x00ff
+
+struct axi_adc_info {
+ unsigned int version;
+ const struct iio_backend_info *backend_info;
+ bool has_child_nodes;
+ const void *pdata;
+ unsigned int pdata_sz;
+};
+
struct adi_axi_adc_state {
+ const struct axi_adc_info *info;
struct regmap *regmap;
struct device *dev;
/* lock to protect multiple accesses to the device registers */
@@ -290,6 +320,88 @@ static int axi_adc_chan_disable(struct iio_backend *back, unsigned int chan)
ADI_AXI_REG_CHAN_CTRL_ENABLE);
}
+static int axi_adc_interface_type_get(struct iio_backend *back,
+ enum iio_backend_interface_type *type)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(st->regmap, ADI_AXI_ADC_REG_CONFIG, &val);
+ if (ret)
+ return ret;
+
+ if (val & ADI_AXI_ADC_REG_CONFIG_CMOS_OR_LVDS_N)
+ *type = IIO_BACKEND_INTERFACE_SERIAL_CMOS;
+ else
+ *type = IIO_BACKEND_INTERFACE_SERIAL_LVDS;
+
+ return 0;
+}
+
+static int axi_adc_ad485x_data_size_set(struct iio_backend *back,
+ unsigned int size)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ unsigned int val;
+
+ switch (size) {
+ /*
+ * There are two different variants of the AXI AXI_AD485X IP block, a
+ * 16-bit and a 20-bit variant.
+ * The 0x0 value (AXI_AD485X_PACKET_FORMAT_20BIT) is corresponding also
+ * to the 16-bit variant of the IP block.
+ */
+ case 16:
+ case 20:
+ val = AXI_AD485X_PACKET_FORMAT_20BIT;
+ break;
+ case 24:
+ val = AXI_AD485X_PACKET_FORMAT_24BIT;
+ break;
+ /*
+ * The 0x2 (AXI_AD485X_PACKET_FORMAT_32BIT) corresponds only to the
+ * 20-bit variant of the IP block. Setting this value properly is
+ * ensured by the upper layers of the drivers calling the axi-adc
+ * functions.
+ * Also, for 16-bit IP block, the 0x2 (AXI_AD485X_PACKET_FORMAT_32BIT)
+ * value is handled as maximum size available which is 24-bit for this
+ * configuration.
+ */
+ case 32:
+ val = AXI_AD485X_PACKET_FORMAT_32BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(st->regmap, ADI_AXI_ADC_REG_CNTRL_3,
+ AXI_AD485X_CNTRL_3_PACKET_FORMAT_MSK,
+ FIELD_PREP(AXI_AD485X_CNTRL_3_PACKET_FORMAT_MSK, val));
+}
+
+static int axi_adc_ad485x_oversampling_ratio_set(struct iio_backend *back,
+ unsigned int ratio)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+
+ /* The current state of the function enables or disables the
+ * oversampling in REG_CNTRL_3 register. A ratio equal to 1 implies no
+ * oversampling, while a value greater than 1 implies oversampling being
+ * enabled.
+ */
+ switch (ratio) {
+ case 0:
+ return -EINVAL;
+ case 1:
+ return regmap_clear_bits(st->regmap, ADI_AXI_ADC_REG_CNTRL_3,
+ AXI_AD485X_CNTRL_3_OS_EN_MSK);
+ default:
+ return regmap_set_bits(st->regmap, ADI_AXI_ADC_REG_CNTRL_3,
+ AXI_AD485X_CNTRL_3_OS_EN_MSK);
+ }
+}
+
static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
struct iio_dev *indio_dev)
{
@@ -302,10 +414,79 @@ static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
return iio_dmaengine_buffer_setup(st->dev, indio_dev, dma_name);
}
+static int axi_adc_raw_write(struct iio_backend *back, u32 val)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+
+ regmap_write(st->regmap, ADI_AXI_REG_CONFIG_WR, val);
+ regmap_write(st->regmap, ADI_AXI_REG_CONFIG_CTRL,
+ ADI_AXI_REG_CONFIG_CTRL_WRITE);
+ fsleep(100);
+ regmap_write(st->regmap, ADI_AXI_REG_CONFIG_CTRL, 0x00);
+ fsleep(100);
+
+ return 0;
+}
+
+static int axi_adc_raw_read(struct iio_backend *back, u32 *val)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+
+ regmap_write(st->regmap, ADI_AXI_REG_CONFIG_CTRL,
+ ADI_AXI_REG_CONFIG_CTRL_READ);
+ fsleep(100);
+ regmap_read(st->regmap, ADI_AXI_REG_CONFIG_RD, val);
+ regmap_write(st->regmap, ADI_AXI_REG_CONFIG_CTRL, 0x00);
+ fsleep(100);
+
+ return 0;
+}
+
+static int ad7606_bus_reg_read(struct iio_backend *back, u32 reg, u32 *val)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ int addr;
+
+ guard(mutex)(&st->lock);
+
+ /*
+ * The address is written on the highest weight byte, and the MSB set
+ * at 1 indicates a read operation.
+ */
+ addr = FIELD_PREP(ADI_AXI_REG_ADDRESS_MASK, reg) | ADI_AXI_REG_READ_BIT;
+ axi_adc_raw_write(back, addr);
+ axi_adc_raw_read(back, val);
+
+ /* Write 0x0 on the bus to get back to ADC mode */
+ axi_adc_raw_write(back, 0);
+
+ return 0;
+}
+
+static int ad7606_bus_reg_write(struct iio_backend *back, u32 reg, u32 val)
+{
+ struct adi_axi_adc_state *st = iio_backend_get_priv(back);
+ u32 buf;
+
+ guard(mutex)(&st->lock);
+
+ /* Write any register to switch to register mode */
+ axi_adc_raw_write(back, 0xaf00);
+
+ buf = FIELD_PREP(ADI_AXI_REG_ADDRESS_MASK, reg) |
+ FIELD_PREP(ADI_AXI_REG_VALUE_MASK, val);
+ axi_adc_raw_write(back, buf);
+
+ /* Write 0x0 on the bus to get back to ADC mode */
+ axi_adc_raw_write(back, 0);
+
+ return 0;
+}
+
static void axi_adc_free_buffer(struct iio_backend *back,
struct iio_buffer *buffer)
{
- iio_dmaengine_buffer_free(buffer);
+ iio_dmaengine_buffer_teardown(buffer);
}
static int axi_adc_reg_access(struct iio_backend *back, unsigned int reg,
@@ -325,6 +506,36 @@ static const struct regmap_config axi_adc_regmap_config = {
.reg_stride = 4,
};
+static void axi_adc_child_remove(void *data)
+{
+ platform_device_unregister(data);
+}
+
+static int axi_adc_create_platform_device(struct adi_axi_adc_state *st,
+ struct fwnode_handle *child)
+{
+ struct platform_device_info pi = {
+ .parent = st->dev,
+ .name = fwnode_get_name(child),
+ .id = PLATFORM_DEVID_AUTO,
+ .fwnode = child,
+ .data = st->info->pdata,
+ .size_data = st->info->pdata_sz,
+ };
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = platform_device_register_full(&pi);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ ret = devm_add_action_or_reset(st->dev, axi_adc_child_remove, pdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
static const struct iio_backend_ops adi_axi_adc_ops = {
.enable = axi_adc_enable,
.disable = axi_adc_disable,
@@ -337,6 +548,7 @@ static const struct iio_backend_ops adi_axi_adc_ops = {
.iodelay_set = axi_adc_iodelays_set,
.test_pattern_set = axi_adc_test_pattern_set,
.chan_status = axi_adc_chan_status,
+ .interface_type_get = axi_adc_interface_type_get,
.debugfs_reg_access = iio_backend_debugfs_ptr(axi_adc_reg_access),
.debugfs_print_chan_status = iio_backend_debugfs_ptr(axi_adc_debugfs_print_chan_status),
};
@@ -346,9 +558,32 @@ static const struct iio_backend_info adi_axi_adc_generic = {
.ops = &adi_axi_adc_ops,
};
+static const struct iio_backend_ops adi_ad485x_ops = {
+ .enable = axi_adc_enable,
+ .disable = axi_adc_disable,
+ .data_format_set = axi_adc_data_format_set,
+ .chan_enable = axi_adc_chan_enable,
+ .chan_disable = axi_adc_chan_disable,
+ .request_buffer = axi_adc_request_buffer,
+ .free_buffer = axi_adc_free_buffer,
+ .data_sample_trigger = axi_adc_data_sample_trigger,
+ .iodelay_set = axi_adc_iodelays_set,
+ .chan_status = axi_adc_chan_status,
+ .interface_type_get = axi_adc_interface_type_get,
+ .data_size_set = axi_adc_ad485x_data_size_set,
+ .oversampling_ratio_set = axi_adc_ad485x_oversampling_ratio_set,
+ .debugfs_reg_access = iio_backend_debugfs_ptr(axi_adc_reg_access),
+ .debugfs_print_chan_status =
+ iio_backend_debugfs_ptr(axi_adc_debugfs_print_chan_status),
+};
+
+static const struct iio_backend_info axi_ad485x = {
+ .name = "axi-ad485x",
+ .ops = &adi_ad485x_ops,
+};
+
static int adi_axi_adc_probe(struct platform_device *pdev)
{
- const unsigned int *expected_ver;
struct adi_axi_adc_state *st;
void __iomem *base;
unsigned int ver;
@@ -370,8 +605,8 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, PTR_ERR(st->regmap),
"failed to init register map\n");
- expected_ver = device_get_match_data(&pdev->dev);
- if (!expected_ver)
+ st->info = device_get_match_data(&pdev->dev);
+ if (!st->info)
return -ENODEV;
clk = devm_clk_get_enabled(&pdev->dev, NULL);
@@ -391,23 +626,46 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (ADI_AXI_PCORE_VER_MAJOR(ver) != ADI_AXI_PCORE_VER_MAJOR(*expected_ver)) {
+ if (ADI_AXI_PCORE_VER_MAJOR(ver) !=
+ ADI_AXI_PCORE_VER_MAJOR(st->info->version)) {
dev_err(&pdev->dev,
"Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
- ADI_AXI_PCORE_VER_MAJOR(*expected_ver),
- ADI_AXI_PCORE_VER_MINOR(*expected_ver),
- ADI_AXI_PCORE_VER_PATCH(*expected_ver),
+ ADI_AXI_PCORE_VER_MAJOR(st->info->version),
+ ADI_AXI_PCORE_VER_MINOR(st->info->version),
+ ADI_AXI_PCORE_VER_PATCH(st->info->version),
ADI_AXI_PCORE_VER_MAJOR(ver),
ADI_AXI_PCORE_VER_MINOR(ver),
ADI_AXI_PCORE_VER_PATCH(ver));
return -ENODEV;
}
- ret = devm_iio_backend_register(&pdev->dev, &adi_axi_adc_generic, st);
+ ret = devm_iio_backend_register(&pdev->dev, st->info->backend_info, st);
if (ret)
return dev_err_probe(&pdev->dev, ret,
"failed to register iio backend\n");
+ device_for_each_child_node_scoped(&pdev->dev, child) {
+ int val;
+
+ if (!st->info->has_child_nodes)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "invalid fdt axi-dac compatible.");
+
+ /* Processing only reg 0 node */
+ ret = fwnode_property_read_u32(child, "reg", &val);
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "invalid reg property.");
+ if (val != 0)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "invalid node address.");
+
+ ret = axi_adc_create_platform_device(st, child);
+ if (ret)
+ return dev_err_probe(&pdev->dev, -EINVAL,
+ "cannot create device.");
+ }
+
dev_info(&pdev->dev, "AXI ADC IP core (%d.%.2d.%c) probed\n",
ADI_AXI_PCORE_VER_MAJOR(ver),
ADI_AXI_PCORE_VER_MINOR(ver),
@@ -416,11 +674,34 @@ static int adi_axi_adc_probe(struct platform_device *pdev)
return 0;
}
-static unsigned int adi_axi_adc_10_0_a_info = ADI_AXI_PCORE_VER(10, 0, 'a');
+static const struct axi_adc_info adc_generic = {
+ .version = ADI_AXI_PCORE_VER(10, 0, 'a'),
+ .backend_info = &adi_axi_adc_generic,
+};
+
+static const struct axi_adc_info adi_axi_ad485x = {
+ .version = ADI_AXI_PCORE_VER(10, 0, 'a'),
+ .backend_info = &axi_ad485x,
+};
+
+static const struct ad7606_platform_data ad7606_pdata = {
+ .bus_reg_read = ad7606_bus_reg_read,
+ .bus_reg_write = ad7606_bus_reg_write,
+};
+
+static const struct axi_adc_info adc_ad7606 = {
+ .version = ADI_AXI_PCORE_VER(10, 0, 'a'),
+ .backend_info = &adi_axi_adc_generic,
+ .pdata = &ad7606_pdata,
+ .pdata_sz = sizeof(ad7606_pdata),
+ .has_child_nodes = true,
+};
/* Match table for of_platform binding */
static const struct of_device_id adi_axi_adc_of_match[] = {
- { .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info },
+ { .compatible = "adi,axi-adc-10.0.a", .data = &adc_generic },
+ { .compatible = "adi,axi-ad485x", .data = &adi_axi_ad485x },
+ { .compatible = "adi,axi-ad7606x", .data = &adc_ad7606 },
{ /* end of list */ }
};
MODULE_DEVICE_TABLE(of, adi_axi_adc_of_match);
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 8e5aaf15a921..414610afcb2c 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -9,6 +9,7 @@
*/
#include <linux/bitops.h>
+#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
@@ -329,7 +330,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
#define AT91_HWFIFO_MAX_SIZE_STR "128"
#define AT91_HWFIFO_MAX_SIZE 128
-#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \
+#define AT91_SAMA_CHAN_SINGLE(index, num, addr, rbits) \
{ \
.type = IIO_VOLTAGE, \
.channel = num, \
@@ -337,7 +338,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.scan_index = index, \
.scan_type = { \
.sign = 'u', \
- .realbits = 14, \
+ .realbits = rbits, \
.storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
@@ -350,7 +351,13 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.indexed = 1, \
}
-#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \
+#define AT91_SAMA5D2_CHAN_SINGLE(index, num, addr) \
+ AT91_SAMA_CHAN_SINGLE(index, num, addr, 14)
+
+#define AT91_SAMA7G5_CHAN_SINGLE(index, num, addr) \
+ AT91_SAMA_CHAN_SINGLE(index, num, addr, 16)
+
+#define AT91_SAMA_CHAN_DIFF(index, num, num2, addr, rbits) \
{ \
.type = IIO_VOLTAGE, \
.differential = 1, \
@@ -360,7 +367,7 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.scan_index = index, \
.scan_type = { \
.sign = 's', \
- .realbits = 14, \
+ .realbits = rbits, \
.storagebits = 16, \
}, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
@@ -373,6 +380,12 @@ static const struct at91_adc_reg_layout sama7g5_layout = {
.indexed = 1, \
}
+#define AT91_SAMA5D2_CHAN_DIFF(index, num, num2, addr) \
+ AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 14)
+
+#define AT91_SAMA7G5_CHAN_DIFF(index, num, num2, addr) \
+ AT91_SAMA_CHAN_DIFF(index, num, num2, addr, 16)
+
#define AT91_SAMA5D2_CHAN_TOUCH(num, name, mod) \
{ \
.type = IIO_POSITIONRELATIVE, \
@@ -666,30 +679,30 @@ static const struct iio_chan_spec at91_sama5d2_adc_channels[] = {
};
static const struct iio_chan_spec at91_sama7g5_adc_channels[] = {
- AT91_SAMA5D2_CHAN_SINGLE(0, 0, 0x60),
- AT91_SAMA5D2_CHAN_SINGLE(1, 1, 0x64),
- AT91_SAMA5D2_CHAN_SINGLE(2, 2, 0x68),
- AT91_SAMA5D2_CHAN_SINGLE(3, 3, 0x6c),
- AT91_SAMA5D2_CHAN_SINGLE(4, 4, 0x70),
- AT91_SAMA5D2_CHAN_SINGLE(5, 5, 0x74),
- AT91_SAMA5D2_CHAN_SINGLE(6, 6, 0x78),
- AT91_SAMA5D2_CHAN_SINGLE(7, 7, 0x7c),
- AT91_SAMA5D2_CHAN_SINGLE(8, 8, 0x80),
- AT91_SAMA5D2_CHAN_SINGLE(9, 9, 0x84),
- AT91_SAMA5D2_CHAN_SINGLE(10, 10, 0x88),
- AT91_SAMA5D2_CHAN_SINGLE(11, 11, 0x8c),
- AT91_SAMA5D2_CHAN_SINGLE(12, 12, 0x90),
- AT91_SAMA5D2_CHAN_SINGLE(13, 13, 0x94),
- AT91_SAMA5D2_CHAN_SINGLE(14, 14, 0x98),
- AT91_SAMA5D2_CHAN_SINGLE(15, 15, 0x9c),
- AT91_SAMA5D2_CHAN_DIFF(16, 0, 1, 0x60),
- AT91_SAMA5D2_CHAN_DIFF(17, 2, 3, 0x68),
- AT91_SAMA5D2_CHAN_DIFF(18, 4, 5, 0x70),
- AT91_SAMA5D2_CHAN_DIFF(19, 6, 7, 0x78),
- AT91_SAMA5D2_CHAN_DIFF(20, 8, 9, 0x80),
- AT91_SAMA5D2_CHAN_DIFF(21, 10, 11, 0x88),
- AT91_SAMA5D2_CHAN_DIFF(22, 12, 13, 0x90),
- AT91_SAMA5D2_CHAN_DIFF(23, 14, 15, 0x98),
+ AT91_SAMA7G5_CHAN_SINGLE(0, 0, 0x60),
+ AT91_SAMA7G5_CHAN_SINGLE(1, 1, 0x64),
+ AT91_SAMA7G5_CHAN_SINGLE(2, 2, 0x68),
+ AT91_SAMA7G5_CHAN_SINGLE(3, 3, 0x6c),
+ AT91_SAMA7G5_CHAN_SINGLE(4, 4, 0x70),
+ AT91_SAMA7G5_CHAN_SINGLE(5, 5, 0x74),
+ AT91_SAMA7G5_CHAN_SINGLE(6, 6, 0x78),
+ AT91_SAMA7G5_CHAN_SINGLE(7, 7, 0x7c),
+ AT91_SAMA7G5_CHAN_SINGLE(8, 8, 0x80),
+ AT91_SAMA7G5_CHAN_SINGLE(9, 9, 0x84),
+ AT91_SAMA7G5_CHAN_SINGLE(10, 10, 0x88),
+ AT91_SAMA7G5_CHAN_SINGLE(11, 11, 0x8c),
+ AT91_SAMA7G5_CHAN_SINGLE(12, 12, 0x90),
+ AT91_SAMA7G5_CHAN_SINGLE(13, 13, 0x94),
+ AT91_SAMA7G5_CHAN_SINGLE(14, 14, 0x98),
+ AT91_SAMA7G5_CHAN_SINGLE(15, 15, 0x9c),
+ AT91_SAMA7G5_CHAN_DIFF(16, 0, 1, 0x60),
+ AT91_SAMA7G5_CHAN_DIFF(17, 2, 3, 0x68),
+ AT91_SAMA7G5_CHAN_DIFF(18, 4, 5, 0x70),
+ AT91_SAMA7G5_CHAN_DIFF(19, 6, 7, 0x78),
+ AT91_SAMA7G5_CHAN_DIFF(20, 8, 9, 0x80),
+ AT91_SAMA7G5_CHAN_DIFF(21, 10, 11, 0x88),
+ AT91_SAMA7G5_CHAN_DIFF(22, 12, 13, 0x90),
+ AT91_SAMA7G5_CHAN_DIFF(23, 14, 15, 0x98),
IIO_CHAN_SOFT_TIMESTAMP(24),
AT91_SAMA5D2_CHAN_TEMP(AT91_SAMA7G5_ADC_TEMP_CHANNEL, "temp", 0xdc),
};
@@ -1814,19 +1827,10 @@ static int at91_adc_read_info_locked(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val)
{
struct at91_adc_state *st = iio_priv(indio_dev);
- int ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ guard(mutex)(&st->lock);
- mutex_lock(&st->lock);
- ret = at91_adc_read_info_raw(indio_dev, chan, val);
- mutex_unlock(&st->lock);
-
- iio_device_release_direct_mode(indio_dev);
-
- return ret;
+ return at91_adc_read_info_raw(indio_dev, chan, val);
}
static void at91_adc_temp_sensor_configure(struct at91_adc_state *st,
@@ -1871,14 +1875,11 @@ static int at91_adc_read_temp(struct iio_dev *indio_dev,
u32 tmp;
int ret, vbg, vtemp;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
ret = pm_runtime_resume_and_get(st->dev);
if (ret < 0)
- goto unlock;
+ return ret;
at91_adc_temp_sensor_configure(st, true);
@@ -1900,9 +1901,6 @@ restore_config:
at91_adc_temp_sensor_configure(st, false);
pm_runtime_mark_last_busy(st->dev);
pm_runtime_put_autosuspend(st->dev);
-unlock:
- mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -1924,10 +1922,16 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct at91_adc_state *st = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- return at91_adc_read_info_locked(indio_dev, chan, val);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = at91_adc_read_info_locked(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
*val = st->vref_uv / 1000;
@@ -1939,7 +1943,13 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_PROCESSED:
if (chan->type != IIO_TEMP)
return -EINVAL;
- return at91_adc_read_temp(indio_dev, chan, val);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = at91_adc_read_temp(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
*val = at91_adc_get_sample_freq(st);
@@ -1967,28 +1977,26 @@ static int at91_adc_write_raw(struct iio_dev *indio_dev,
if (val == st->oversampling_ratio)
return 0;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&st->lock);
/* update ratio */
ret = at91_adc_config_emr(st, val, 0);
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
if (val < st->soc_info.min_sample_rate ||
val > st->soc_info.max_sample_rate)
return -EINVAL;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&st->lock);
at91_adc_setup_samp_freq(indio_dev, val,
st->soc_info.startup_time, 0);
mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return 0;
default:
return -EINVAL;
diff --git a/drivers/iio/adc/dln2-adc.c b/drivers/iio/adc/dln2-adc.c
index 221a5fdc1eaa..a1e48a756a7b 100644
--- a/drivers/iio/adc/dln2-adc.c
+++ b/drivers/iio/adc/dln2-adc.c
@@ -314,15 +314,14 @@ static int dln2_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret < 0)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&dln2->mutex);
ret = dln2_adc_read(dln2, chan->channel);
mutex_unlock(&dln2->mutex);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/max1027.c b/drivers/iio/adc/max1027.c
index f5ba4a1b5a7d..7e736e77d8bb 100644
--- a/drivers/iio/adc/max1027.c
+++ b/drivers/iio/adc/max1027.c
@@ -336,10 +336,6 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
int ret;
struct max1027_state *st = iio_priv(indio_dev);
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
/* Configure conversion register with the requested chan */
st->reg = MAX1027_CONV_REG | MAX1027_CHAN(chan->channel) |
MAX1027_NOSCAN;
@@ -349,7 +345,7 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
if (ret < 0) {
dev_err(&indio_dev->dev,
"Failed to configure conversion register\n");
- goto release;
+ return ret;
}
/*
@@ -359,14 +355,10 @@ static int max1027_read_single_value(struct iio_dev *indio_dev,
*/
ret = max1027_wait_eoc(indio_dev);
if (ret)
- goto release;
+ return ret;
/* Read result */
ret = spi_read(st->spi, st->buffer, (chan->type == IIO_TEMP) ? 4 : 2);
-
-release:
- iio_device_release_direct_mode(indio_dev);
-
if (ret < 0)
return ret;
@@ -382,37 +374,32 @@ static int max1027_read_raw(struct iio_dev *indio_dev,
int ret = 0;
struct max1027_state *st = iio_priv(indio_dev);
- mutex_lock(&st->lock);
+ guard(mutex)(&st->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
ret = max1027_read_single_value(indio_dev, chan, val);
- break;
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_TEMP:
*val = 1;
*val2 = 8;
- ret = IIO_VAL_FRACTIONAL;
- break;
+ return IIO_VAL_FRACTIONAL;
case IIO_VOLTAGE:
*val = 2500;
*val2 = chan->scan_type.realbits;
- ret = IIO_VAL_FRACTIONAL_LOG2;
- break;
+ return IIO_VAL_FRACTIONAL_LOG2;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- break;
default:
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
-
- mutex_unlock(&st->lock);
-
- return ret;
}
static int max1027_debugfs_reg_access(struct iio_dev *indio_dev,
diff --git a/drivers/iio/adc/max11410.c b/drivers/iio/adc/max11410.c
index 76abafd47404..437d9f24b5a1 100644
--- a/drivers/iio/adc/max11410.c
+++ b/drivers/iio/adc/max11410.c
@@ -471,9 +471,8 @@ static int max11410_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
mutex_lock(&state->lock);
@@ -481,7 +480,7 @@ static int max11410_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&state->lock);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
@@ -507,12 +506,37 @@ static int max11410_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static int __max11410_write_samp_freq(struct max11410_state *st,
+ int val, int val2)
+{
+ int ret, i, reg_val, filter;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_read(st->regmap, MAX11410_REG_FILTER, &reg_val);
+ if (ret)
+ return ret;
+
+ filter = FIELD_GET(MAX11410_FILTER_LINEF_MASK, reg_val);
+
+ for (i = 0; i < max11410_sampling_len[filter]; ++i) {
+ if (val == max11410_sampling_rates[filter][i][0] &&
+ val2 == max11410_sampling_rates[filter][i][1])
+ break;
+ }
+ if (i == max11410_sampling_len[filter])
+ return -EINVAL;
+
+ return regmap_write_bits(st->regmap, MAX11410_REG_FILTER,
+ MAX11410_FILTER_RATE_MASK, i);
+}
+
static int max11410_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct max11410_state *st = iio_priv(indio_dev);
- int i, ret, reg_val, filter, gain;
+ int ret, gain;
u32 *scale_avail;
switch (mask) {
@@ -525,9 +549,8 @@ static int max11410_write_raw(struct iio_dev *indio_dev,
if (val != 0 || val2 == 0)
return -EINVAL;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
/* Convert from INT_PLUS_MICRO to FRACTIONAL_LOG2 */
val2 = val2 * DIV_ROUND_CLOSEST(BIT(24), 1000000);
@@ -536,38 +559,15 @@ static int max11410_write_raw(struct iio_dev *indio_dev,
st->channels[chan->address].gain = clamp_val(gain, 0, 7);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return 0;
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
-
- mutex_lock(&st->lock);
-
- ret = regmap_read(st->regmap, MAX11410_REG_FILTER, &reg_val);
- if (ret)
- goto out;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- filter = FIELD_GET(MAX11410_FILTER_LINEF_MASK, reg_val);
-
- for (i = 0; i < max11410_sampling_len[filter]; ++i) {
- if (val == max11410_sampling_rates[filter][i][0] &&
- val2 == max11410_sampling_rates[filter][i][1])
- break;
- }
- if (i == max11410_sampling_len[filter]) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = regmap_write_bits(st->regmap, MAX11410_REG_FILTER,
- MAX11410_FILTER_RATE_MASK, i);
-
-out:
- mutex_unlock(&st->lock);
- iio_device_release_direct_mode(indio_dev);
+ ret = __max11410_write_samp_freq(st, val, val2);
+ iio_device_release_direct(indio_dev);
return ret;
default:
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index e8d731bc34e0..35717ec082ce 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -364,55 +364,52 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
int *val,
long m)
{
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- s32 data;
- u8 rxbuf[2];
- struct max1363_state *st = iio_priv(indio_dev);
- struct i2c_client *client = st->client;
-
- guard(mutex)(&st->lock);
-
- /*
- * If monitor mode is enabled, the method for reading a single
- * channel will have to be rather different and has not yet
- * been implemented.
- *
- * Also, cannot read directly if buffered capture enabled.
- */
- if (st->monitor_on)
- return -EBUSY;
+ s32 data;
+ u8 rxbuf[2];
+ struct max1363_state *st = iio_priv(indio_dev);
+ struct i2c_client *client = st->client;
- /* Check to see if current scan mode is correct */
- if (st->current_mode != &max1363_mode_table[chan->address]) {
- int ret;
+ guard(mutex)(&st->lock);
- /* Update scan mode if needed */
- st->current_mode = &max1363_mode_table[chan->address];
- ret = max1363_set_scan_mode(st);
- if (ret < 0)
- return ret;
- }
- if (st->chip_info->bits != 8) {
- /* Get reading */
- data = st->recv(client, rxbuf, 2);
- if (data < 0)
- return data;
-
- data = get_unaligned_be16(rxbuf) &
- ((1 << st->chip_info->bits) - 1);
- } else {
- /* Get reading */
- data = st->recv(client, rxbuf, 1);
- if (data < 0)
- return data;
-
- data = rxbuf[0];
- }
- *val = data;
+ /*
+ * If monitor mode is enabled, the method for reading a single
+ * channel will have to be rather different and has not yet
+ * been implemented.
+ *
+ * Also, cannot read directly if buffered capture enabled.
+ */
+ if (st->monitor_on)
+ return -EBUSY;
+
+ /* Check to see if current scan mode is correct */
+ if (st->current_mode != &max1363_mode_table[chan->address]) {
+ int ret;
+
+ /* Update scan mode if needed */
+ st->current_mode = &max1363_mode_table[chan->address];
+ ret = max1363_set_scan_mode(st);
+ if (ret < 0)
+ return ret;
+ }
+ if (st->chip_info->bits != 8) {
+ /* Get reading */
+ data = st->recv(client, rxbuf, 2);
+ if (data < 0)
+ return data;
+
+ data = get_unaligned_be16(rxbuf) &
+ ((1 << st->chip_info->bits) - 1);
+ } else {
+ /* Get reading */
+ data = st->recv(client, rxbuf, 1);
+ if (data < 0)
+ return data;
- return 0;
+ data = rxbuf[0];
}
- unreachable();
+ *val = data;
+
+ return 0;
}
static int max1363_read_raw(struct iio_dev *indio_dev,
@@ -426,7 +423,11 @@ static int max1363_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
ret = max1363_read_single_chan(indio_dev, chan, val, m);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
return IIO_VAL_INT;
@@ -947,46 +948,58 @@ error_ret:
return ret;
}
-static int max1363_write_event_config(struct iio_dev *indio_dev,
- const struct iio_chan_spec *chan, enum iio_event_type type,
+static int __max1363_write_event_config(struct max1363_state *st,
+ const struct iio_chan_spec *chan,
enum iio_event_direction dir, bool state)
{
- struct max1363_state *st = iio_priv(indio_dev);
-
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- int number = chan->channel;
- u16 unifiedmask;
- int ret;
+ int number = chan->channel;
+ u16 unifiedmask;
+ int ret;
- guard(mutex)(&st->lock);
+ guard(mutex)(&st->lock);
- unifiedmask = st->mask_low | st->mask_high;
- if (dir == IIO_EV_DIR_FALLING) {
+ unifiedmask = st->mask_low | st->mask_high;
+ if (dir == IIO_EV_DIR_FALLING) {
- if (state == 0)
- st->mask_low &= ~(1 << number);
- else {
- ret = __max1363_check_event_mask((1 << number),
- unifiedmask);
- if (ret)
- return ret;
- st->mask_low |= (1 << number);
- }
- } else {
- if (state == 0)
- st->mask_high &= ~(1 << number);
- else {
- ret = __max1363_check_event_mask((1 << number),
- unifiedmask);
- if (ret)
- return ret;
- st->mask_high |= (1 << number);
- }
+ if (state == 0)
+ st->mask_low &= ~(1 << number);
+ else {
+ ret = __max1363_check_event_mask((1 << number),
+ unifiedmask);
+ if (ret)
+ return ret;
+ st->mask_low |= (1 << number);
+ }
+ } else {
+ if (state == 0)
+ st->mask_high &= ~(1 << number);
+ else {
+ ret = __max1363_check_event_mask((1 << number),
+ unifiedmask);
+ if (ret)
+ return ret;
+ st->mask_high |= (1 << number);
}
}
- max1363_monitor_mode_update(st, !!(st->mask_high | st->mask_low));
return 0;
+
+}
+static int max1363_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, enum iio_event_type type,
+ enum iio_event_direction dir, bool state)
+{
+ struct max1363_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __max1363_write_event_config(st, chan, dir, state);
+ iio_device_release_direct(indio_dev);
+ max1363_monitor_mode_update(st, !!(st->mask_high | st->mask_low));
+
+ return ret;
}
/*
diff --git a/drivers/iio/adc/max34408.c b/drivers/iio/adc/max34408.c
index 971e6e5dee9b..4f45fd22a90c 100644
--- a/drivers/iio/adc/max34408.c
+++ b/drivers/iio/adc/max34408.c
@@ -8,6 +8,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/module.h>
diff --git a/drivers/iio/adc/pac1921.c b/drivers/iio/adc/pac1921.c
index 90f61c47b1c4..beb5511c4504 100644
--- a/drivers/iio/adc/pac1921.c
+++ b/drivers/iio/adc/pac1921.c
@@ -7,6 +7,7 @@
#include <linux/unaligned.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/i2c.h>
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
@@ -1198,11 +1199,11 @@ static int pac1921_match_acpi_device(struct iio_dev *indio_dev)
label = devm_kstrdup(dev, status->package.elements[0].string.pointer,
GFP_KERNEL);
+ ACPI_FREE(status);
if (!label)
return -ENOMEM;
indio_dev->label = label;
- ACPI_FREE(status);
return 0;
}
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index a29e54754c8f..9a099df79518 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Rockchip Successive Approximation Register (SAR) A/D Converter
- * Copyright (C) 2014 ROCKCHIP, Inc.
+ * Copyright (C) 2014 Rockchip Electronics Co., Ltd.
*/
#include <linux/bitfield.h>
@@ -275,6 +275,40 @@ static const struct rockchip_saradc_data rk3399_saradc_data = {
.power_down = rockchip_saradc_power_down_v1,
};
+static const struct iio_chan_spec rockchip_rk3528_saradc_iio_channels[] = {
+ SARADC_CHANNEL(0, "adc0", 10),
+ SARADC_CHANNEL(1, "adc1", 10),
+ SARADC_CHANNEL(2, "adc2", 10),
+ SARADC_CHANNEL(3, "adc3", 10),
+};
+
+static const struct rockchip_saradc_data rk3528_saradc_data = {
+ .channels = rockchip_rk3528_saradc_iio_channels,
+ .num_channels = ARRAY_SIZE(rockchip_rk3528_saradc_iio_channels),
+ .clk_rate = 1000000,
+ .start = rockchip_saradc_start_v2,
+ .read = rockchip_saradc_read_v2,
+};
+
+static const struct iio_chan_spec rockchip_rk3562_saradc_iio_channels[] = {
+ SARADC_CHANNEL(0, "adc0", 10),
+ SARADC_CHANNEL(1, "adc1", 10),
+ SARADC_CHANNEL(2, "adc2", 10),
+ SARADC_CHANNEL(3, "adc3", 10),
+ SARADC_CHANNEL(4, "adc4", 10),
+ SARADC_CHANNEL(5, "adc5", 10),
+ SARADC_CHANNEL(6, "adc6", 10),
+ SARADC_CHANNEL(7, "adc7", 10),
+};
+
+static const struct rockchip_saradc_data rk3562_saradc_data = {
+ .channels = rockchip_rk3562_saradc_iio_channels,
+ .num_channels = ARRAY_SIZE(rockchip_rk3562_saradc_iio_channels),
+ .clk_rate = 1000000,
+ .start = rockchip_saradc_start_v2,
+ .read = rockchip_saradc_read_v2,
+};
+
static const struct iio_chan_spec rockchip_rk3568_saradc_iio_channels[] = {
SARADC_CHANNEL(0, "adc0", 10),
SARADC_CHANNEL(1, "adc1", 10),
@@ -325,6 +359,12 @@ static const struct of_device_id rockchip_saradc_match[] = {
.compatible = "rockchip,rk3399-saradc",
.data = &rk3399_saradc_data,
}, {
+ .compatible = "rockchip,rk3528-saradc",
+ .data = &rk3528_saradc_data,
+ }, {
+ .compatible = "rockchip,rk3562-saradc",
+ .data = &rk3562_saradc_data,
+ }, {
.compatible = "rockchip,rk3568-saradc",
.data = &rk3568_saradc_data,
}, {
diff --git a/drivers/iio/adc/rtq6056.c b/drivers/iio/adc/rtq6056.c
index 337bc8b31b2c..54239df61d86 100644
--- a/drivers/iio/adc/rtq6056.c
+++ b/drivers/iio/adc/rtq6056.c
@@ -514,26 +514,37 @@ static int rtq6056_adc_read_avail(struct iio_dev *indio_dev,
}
}
-static int rtq6056_adc_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan, int val,
- int val2, long mask)
+static int __rtq6056_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ long mask)
{
struct rtq6056_priv *priv = iio_priv(indio_dev);
const struct richtek_dev_data *devdata = priv->devdata;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- switch (mask) {
- case IIO_CHAN_INFO_SAMP_FREQ:
- if (devdata->fixed_samp_freq)
- return -EINVAL;
- return rtq6056_adc_set_samp_freq(priv, chan, val);
- case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- return devdata->set_average(priv, val);
- default:
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (devdata->fixed_samp_freq)
return -EINVAL;
- }
+ return rtq6056_adc_set_samp_freq(priv, chan, val);
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ return devdata->set_average(priv, val);
+ default:
+ return -EINVAL;
}
- unreachable();
+}
+
+static int rtq6056_adc_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ int ret;
+
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __rtq6056_adc_write_raw(indio_dev, chan, val, mask);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
static const char *rtq6056_channel_labels[RTQ6056_MAX_CHANNEL] = {
@@ -590,9 +601,8 @@ static ssize_t shunt_resistor_store(struct device *dev,
struct rtq6056_priv *priv = iio_priv(indio_dev);
int val, val_fract, ret;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = iio_str_to_fixpoint(buf, 100000, &val, &val_fract);
if (ret)
@@ -601,7 +611,7 @@ static ssize_t shunt_resistor_store(struct device *dev,
ret = rtq6056_set_shunt_resistor(priv, val * 1000000 + val_fract);
out_store:
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret ?: len;
}
diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c
index 2201ee9987ae..0914148d1a22 100644
--- a/drivers/iio/adc/stm32-adc-core.c
+++ b/drivers/iio/adc/stm32-adc-core.c
@@ -615,8 +615,7 @@ static int stm32_adc_core_switches_probe(struct device *dev,
}
/* Booster can be used to supply analog switches (optional) */
- if (priv->cfg->has_syscfg & HAS_VBOOSTER &&
- of_property_read_bool(np, "booster-supply")) {
+ if (priv->cfg->has_syscfg & HAS_VBOOSTER) {
priv->booster = devm_regulator_get_optional(dev, "booster");
if (IS_ERR(priv->booster)) {
ret = PTR_ERR(priv->booster);
@@ -628,8 +627,7 @@ static int stm32_adc_core_switches_probe(struct device *dev,
}
/* Vdd can be used to supply analog switches (optional) */
- if (priv->cfg->has_syscfg & HAS_ANASWVDD &&
- of_property_read_bool(np, "vdd-supply")) {
+ if (priv->cfg->has_syscfg & HAS_ANASWVDD) {
priv->vdd = devm_regulator_get_optional(dev, "vdd");
if (IS_ERR(priv->vdd)) {
ret = PTR_ERR(priv->vdd);
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 9d3b23efcc06..5dbf5f136768 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -1471,9 +1471,8 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
case IIO_CHAN_INFO_PROCESSED:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
if (chan->type == IIO_VOLTAGE)
ret = stm32_adc_single_conv(indio_dev, chan, val);
else
@@ -1482,7 +1481,7 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev,
if (mask == IIO_CHAN_INFO_PROCESSED)
*val = STM32_ADC_VREFINT_VOLTAGE * adc->vrefint.vrefint_cal / *val;
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index fe11b0d8eab3..726ddafc9f6d 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -1275,9 +1275,8 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = stm32_dfsdm_compute_all_osrs(indio_dev, val);
if (!ret) {
@@ -1287,25 +1286,56 @@ static int stm32_dfsdm_write_raw(struct iio_dev *indio_dev,
adc->oversamp = val;
adc->sample_freq = spi_freq / val;
}
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
if (!val)
return -EINVAL;
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = dfsdm_adc_set_samp_freq(indio_dev, val, spi_freq);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
return -EINVAL;
}
+static int __stm32_dfsdm_read_info_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct stm32_dfsdm_adc *adc = iio_priv(indio_dev);
+ int ret = 0;
+
+ if (adc->hwc)
+ ret = iio_hw_consumer_enable(adc->hwc);
+ if (adc->backend)
+ ret = iio_backend_enable(adc->backend[chan->scan_index]);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: IIO enable failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+ ret = stm32_dfsdm_single_conv(indio_dev, chan, val);
+ if (adc->hwc)
+ iio_hw_consumer_disable(adc->hwc);
+ if (adc->backend)
+ iio_backend_disable(adc->backend[chan->scan_index]);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev,
+ "%s: Conversion failed (channel %d)\n",
+ __func__, chan->channel);
+ return ret;
+ }
+
+ return 0;
+}
+
static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan, int *val,
int *val2, long mask)
@@ -1323,33 +1353,13 @@ static int stm32_dfsdm_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = __stm32_dfsdm_read_info_raw(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
- if (adc->hwc)
- ret = iio_hw_consumer_enable(adc->hwc);
- if (adc->backend)
- ret = iio_backend_enable(adc->backend[idx]);
- if (ret < 0) {
- dev_err(&indio_dev->dev,
- "%s: IIO enable failed (channel %d)\n",
- __func__, chan->channel);
- iio_device_release_direct_mode(indio_dev);
- return ret;
- }
- ret = stm32_dfsdm_single_conv(indio_dev, chan, val);
- if (adc->hwc)
- iio_hw_consumer_disable(adc->hwc);
- if (adc->backend)
- iio_backend_disable(adc->backend[idx]);
- if (ret < 0) {
- dev_err(&indio_dev->dev,
- "%s: Conversion failed (channel %d)\n",
- __func__, chan->channel);
- iio_device_release_direct_mode(indio_dev);
- return ret;
- }
- iio_device_release_direct_mode(indio_dev);
return IIO_VAL_INT;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
diff --git a/drivers/iio/adc/ti-adc084s021.c b/drivers/iio/adc/ti-adc084s021.c
index da16876c32ae..9c845ee01697 100644
--- a/drivers/iio/adc/ti-adc084s021.c
+++ b/drivers/iio/adc/ti-adc084s021.c
@@ -96,19 +96,18 @@ static int adc084s021_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret < 0)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = regulator_enable(adc->reg);
if (ret) {
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
adc->tx_buf[0] = channel->channel << 3;
ret = adc084s021_adc_conversion(adc, &be_val);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
regulator_disable(adc->reg);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ti-adc108s102.c b/drivers/iio/adc/ti-adc108s102.c
index 9758ac801310..7d615e2bbf39 100644
--- a/drivers/iio/adc/ti-adc108s102.c
+++ b/drivers/iio/adc/ti-adc108s102.c
@@ -181,13 +181,12 @@ static int adc108s102_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = adc108s102_scan_direct(st, chan->address);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
diff --git a/drivers/iio/adc/ti-adc161s626.c b/drivers/iio/adc/ti-adc161s626.c
index 474e733fb8e0..28aa6b80160c 100644
--- a/drivers/iio/adc/ti-adc161s626.c
+++ b/drivers/iio/adc/ti-adc161s626.c
@@ -137,13 +137,13 @@ static int ti_adc_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = ti_adc_read_measurement(data, chan, val);
- if (ret)
- return ret;
- return IIO_VAL_INT;
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ti_adc_read_measurement(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
ret = regulator_get_voltage(data->ref);
if (ret < 0)
diff --git a/drivers/iio/adc/ti-ads1119.c b/drivers/iio/adc/ti-ads1119.c
index de019b3faa48..f120e7e21cff 100644
--- a/drivers/iio/adc/ti-ads1119.c
+++ b/drivers/iio/adc/ti-ads1119.c
@@ -336,19 +336,24 @@ static int ads1119_read_raw(struct iio_dev *indio_dev,
{
struct ads1119_state *st = iio_priv(indio_dev);
unsigned int index = chan->address;
+ int ret;
if (index >= st->num_channels_cfg)
return -EINVAL;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return ads1119_single_conversion(st, chan, val, false);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ads1119_single_conversion(st, chan, val, false);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_OFFSET:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return ads1119_single_conversion(st, chan, val, true);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ads1119_single_conversion(st, chan, val, true);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
*val = st->vref_uV / 1000;
*val /= st->channels_cfg[index].gain;
diff --git a/drivers/iio/adc/ti-ads124s08.c b/drivers/iio/adc/ti-ads124s08.c
index f452f57f11c9..77c299bb4ebc 100644
--- a/drivers/iio/adc/ti-ads124s08.c
+++ b/drivers/iio/adc/ti-ads124s08.c
@@ -184,7 +184,7 @@ static int ads124s_reset(struct iio_dev *indio_dev)
if (priv->reset_gpio) {
gpiod_set_value_cansleep(priv->reset_gpio, 0);
- udelay(200);
+ fsleep(200);
gpiod_set_value_cansleep(priv->reset_gpio, 1);
} else {
return ads124s_write_cmd(indio_dev, ADS124S08_CMD_RESET);
diff --git a/drivers/iio/adc/ti-ads1298.c b/drivers/iio/adc/ti-ads1298.c
index 03f762415fa5..ae30b47e4514 100644
--- a/drivers/iio/adc/ti-ads1298.c
+++ b/drivers/iio/adc/ti-ads1298.c
@@ -319,13 +319,12 @@ static int ads1298_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ads1298_read_one(priv, chan->scan_index);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c
index 91a79ebc4bde..c6096b64664e 100644
--- a/drivers/iio/adc/ti-ads131e08.c
+++ b/drivers/iio/adc/ti-ads131e08.c
@@ -505,12 +505,11 @@ static int ads131e08_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ads131e08_read_direct(indio_dev, channel, value);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret)
return ret;
@@ -551,12 +550,11 @@ static int ads131e08_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = ads131e08_set_data_rate(st, value);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
default:
diff --git a/drivers/iio/adc/ti-ads7138.c b/drivers/iio/adc/ti-ads7138.c
new file mode 100644
index 000000000000..ee5c1b8e3a8e
--- /dev/null
+++ b/drivers/iio/adc/ti-ads7138.c
@@ -0,0 +1,749 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * ADS7138 - Texas Instruments Analog-to-Digital Converter
+ */
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/unaligned.h>
+
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+
+/*
+ * Always assume 16 bits resolution as HW registers are aligned like that and
+ * with enabled oversampling/averaging it actually corresponds to 16 bits.
+ */
+#define ADS7138_RES_BITS 16
+
+/* ADS7138 operation codes */
+#define ADS7138_OPCODE_SINGLE_WRITE 0x08
+#define ADS7138_OPCODE_SET_BIT 0x18
+#define ADS7138_OPCODE_CLEAR_BIT 0x20
+#define ADS7138_OPCODE_BLOCK_WRITE 0x28
+#define ADS7138_OPCODE_BLOCK_READ 0x30
+
+/* ADS7138 registers */
+#define ADS7138_REG_GENERAL_CFG 0x01
+#define ADS7138_REG_OSR_CFG 0x03
+#define ADS7138_REG_OPMODE_CFG 0x04
+#define ADS7138_REG_SEQUENCE_CFG 0x10
+#define ADS7138_REG_AUTO_SEQ_CH_SEL 0x12
+#define ADS7138_REG_ALERT_CH_SEL 0x14
+#define ADS7138_REG_EVENT_FLAG 0x18
+#define ADS7138_REG_EVENT_HIGH_FLAG 0x1A
+#define ADS7138_REG_EVENT_LOW_FLAG 0x1C
+#define ADS7138_REG_HIGH_TH_HYS_CH(x) ((x) * 4 + 0x20)
+#define ADS7138_REG_LOW_TH_CNT_CH(x) ((x) * 4 + 0x22)
+#define ADS7138_REG_MAX_LSB_CH(x) ((x) * 2 + 0x60)
+#define ADS7138_REG_MIN_LSB_CH(x) ((x) * 2 + 0x80)
+#define ADS7138_REG_RECENT_LSB_CH(x) ((x) * 2 + 0xA0)
+
+#define ADS7138_GENERAL_CFG_RST BIT(0)
+#define ADS7138_GENERAL_CFG_DWC_EN BIT(4)
+#define ADS7138_GENERAL_CFG_STATS_EN BIT(5)
+#define ADS7138_OSR_CFG_MASK GENMASK(2, 0)
+#define ADS7138_OPMODE_CFG_CONV_MODE BIT(5)
+#define ADS7138_OPMODE_CFG_FREQ_MASK GENMASK(4, 0)
+#define ADS7138_SEQUENCE_CFG_SEQ_MODE BIT(0)
+#define ADS7138_SEQUENCE_CFG_SEQ_START BIT(4)
+#define ADS7138_THRESHOLD_LSB_MASK GENMASK(7, 4)
+
+enum ads7138_modes {
+ ADS7138_MODE_MANUAL,
+ ADS7138_MODE_AUTO,
+};
+
+struct ads7138_chip_data {
+ const char *name;
+ const int channel_num;
+};
+
+struct ads7138_data {
+ /* Protects RMW access to the I2C interface */
+ struct mutex lock;
+ struct i2c_client *client;
+ struct regulator *vref_regu;
+ const struct ads7138_chip_data *chip_data;
+};
+
+/*
+ * 2D array of available sampling frequencies and the corresponding register
+ * values. Structured like this to be easily usable in read_avail function.
+ */
+static const int ads7138_samp_freqs_bits[2][26] = {
+ {
+ 163, 244, 326, 488, 651, 977, 1302, 1953,
+ 2604, 3906, 5208, 7813, 10417, 15625, 20833, 31250,
+ 41667, 62500, 83333, 125000, 166667, 250000, 333333, 500000,
+ 666667, 1000000
+ }, {
+ 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
+ 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
+ /* Here is a hole, due to duplicate frequencies */
+ 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02,
+ 0x01, 0x00
+ }
+};
+
+static const int ads7138_oversampling_ratios[] = {
+ 1, 2, 4, 8, 16, 32, 64, 128
+};
+
+static int ads7138_i2c_write_block(const struct i2c_client *client, u8 reg,
+ u8 *values, u8 length)
+{
+ int ret;
+ int len = length + 2; /* "+ 2" for OPCODE and reg */
+
+ u8 *buf __free(kfree) = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[0] = ADS7138_OPCODE_BLOCK_WRITE;
+ buf[1] = reg;
+ memcpy(&buf[2], values, length);
+
+ ret = i2c_master_send(client, buf, len);
+ if (ret < 0)
+ return ret;
+ if (ret != len)
+ return -EIO;
+
+ return 0;
+}
+
+static int ads7138_i2c_write_with_opcode(const struct i2c_client *client,
+ u8 reg, u8 regval, u8 opcode)
+{
+ u8 buf[3] = { opcode, reg, regval };
+ int ret;
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ return ret;
+ if (ret != ARRAY_SIZE(buf))
+ return -EIO;
+
+ return 0;
+}
+
+static int ads7138_i2c_write(const struct i2c_client *client, u8 reg, u8 value)
+{
+ return ads7138_i2c_write_with_opcode(client, reg, value,
+ ADS7138_OPCODE_SINGLE_WRITE);
+}
+
+static int ads7138_i2c_set_bit(const struct i2c_client *client, u8 reg, u8 bits)
+{
+ return ads7138_i2c_write_with_opcode(client, reg, bits,
+ ADS7138_OPCODE_SET_BIT);
+}
+
+static int ads7138_i2c_clear_bit(const struct i2c_client *client, u8 reg, u8 bits)
+{
+ return ads7138_i2c_write_with_opcode(client, reg, bits,
+ ADS7138_OPCODE_CLEAR_BIT);
+}
+
+static int ads7138_i2c_read_block(const struct i2c_client *client, u8 reg,
+ u8 *out_values, u8 length)
+{
+ u8 buf[2] = { ADS7138_OPCODE_BLOCK_READ, reg };
+ int ret;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = client->addr,
+ .len = ARRAY_SIZE(buf),
+ .buf = buf,
+ },
+ {
+ .addr = client->addr,
+ .flags = I2C_M_RD,
+ .len = length,
+ .buf = out_values,
+ },
+ };
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ return 0;
+}
+
+static int ads7138_i2c_read(const struct i2c_client *client, u8 reg)
+{
+ u8 value;
+ int ret;
+
+ ret = ads7138_i2c_read_block(client, reg, &value, sizeof(value));
+ if (ret)
+ return ret;
+ return value;
+}
+
+static int ads7138_freq_to_bits(int freq)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ads7138_samp_freqs_bits[0]); i++)
+ if (freq == ads7138_samp_freqs_bits[0][i])
+ return ads7138_samp_freqs_bits[1][i];
+
+ return -EINVAL;
+}
+
+static int ads7138_bits_to_freq(int bits)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ads7138_samp_freqs_bits[1]); i++)
+ if (bits == ads7138_samp_freqs_bits[1][i])
+ return ads7138_samp_freqs_bits[0][i];
+
+ return -EINVAL;
+}
+
+static int ads7138_osr_to_bits(int osr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ads7138_oversampling_ratios); i++)
+ if (osr == ads7138_oversampling_ratios[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static int ads7138_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct ads7138_data *data = iio_priv(indio_dev);
+ int ret, vref, bits;
+ u8 values[2];
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = ads7138_i2c_read_block(data->client,
+ ADS7138_REG_RECENT_LSB_CH(chan->channel),
+ values, ARRAY_SIZE(values));
+ if (ret)
+ return ret;
+
+ *val = get_unaligned_le16(values);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PEAK:
+ ret = ads7138_i2c_read_block(data->client,
+ ADS7138_REG_MAX_LSB_CH(chan->channel),
+ values, ARRAY_SIZE(values));
+ if (ret)
+ return ret;
+
+ *val = get_unaligned_le16(values);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_TROUGH:
+ ret = ads7138_i2c_read_block(data->client,
+ ADS7138_REG_MIN_LSB_CH(chan->channel),
+ values, ARRAY_SIZE(values));
+ if (ret)
+ return ret;
+
+ *val = get_unaligned_le16(values);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_OPMODE_CFG);
+ if (ret < 0)
+ return ret;
+
+ bits = FIELD_GET(ADS7138_OPMODE_CFG_FREQ_MASK, ret);
+ *val = ads7138_bits_to_freq(bits);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ vref = regulator_get_voltage(data->vref_regu);
+ if (vref < 0)
+ return vref;
+ *val = vref / 1000;
+ *val2 = ADS7138_RES_BITS;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_OSR_CFG);
+ if (ret < 0)
+ return ret;
+
+ bits = FIELD_GET(ADS7138_OSR_CFG_MASK, ret);
+ *val = ads7138_oversampling_ratios[bits];
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads7138_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct ads7138_data *data = iio_priv(indio_dev);
+ int bits, ret;
+ u8 value;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ: {
+ bits = ads7138_freq_to_bits(val);
+ if (bits < 0)
+ return bits;
+
+ guard(mutex)(&data->lock);
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_OPMODE_CFG);
+ if (ret < 0)
+ return ret;
+
+ value = ret & ~ADS7138_OPMODE_CFG_FREQ_MASK;
+ value |= FIELD_PREP(ADS7138_OPMODE_CFG_FREQ_MASK, bits);
+ return ads7138_i2c_write(data->client, ADS7138_REG_OPMODE_CFG,
+ value);
+ }
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ bits = ads7138_osr_to_bits(val);
+ if (bits < 0)
+ return bits;
+
+ return ads7138_i2c_write(data->client, ADS7138_REG_OSR_CFG,
+ bits);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads7138_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ struct ads7138_data *data = iio_priv(indio_dev);
+ u8 reg, values[2];
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE:
+ reg = (dir == IIO_EV_DIR_RISING) ?
+ ADS7138_REG_HIGH_TH_HYS_CH(chan->channel) :
+ ADS7138_REG_LOW_TH_CNT_CH(chan->channel);
+ ret = ads7138_i2c_read_block(data->client, reg, values,
+ ARRAY_SIZE(values));
+ if (ret)
+ return ret;
+
+ *val = ((values[1] << 4) | (values[0] >> 4));
+ return IIO_VAL_INT;
+ case IIO_EV_INFO_HYSTERESIS:
+ ret = ads7138_i2c_read(data->client,
+ ADS7138_REG_HIGH_TH_HYS_CH(chan->channel));
+ if (ret < 0)
+ return ret;
+
+ *val = ret & ~ADS7138_THRESHOLD_LSB_MASK;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads7138_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ struct ads7138_data *data = iio_priv(indio_dev);
+ u8 reg, values[2];
+ int ret;
+
+ switch (info) {
+ case IIO_EV_INFO_VALUE: {
+ if (val >= BIT(12) || val < 0)
+ return -EINVAL;
+
+ reg = (dir == IIO_EV_DIR_RISING) ?
+ ADS7138_REG_HIGH_TH_HYS_CH(chan->channel) :
+ ADS7138_REG_LOW_TH_CNT_CH(chan->channel);
+
+ guard(mutex)(&data->lock);
+ ret = ads7138_i2c_read(data->client, reg);
+ if (ret < 0)
+ return ret;
+
+ values[0] = ret & ~ADS7138_THRESHOLD_LSB_MASK;
+ values[0] |= FIELD_PREP(ADS7138_THRESHOLD_LSB_MASK, val);
+ values[1] = (val >> 4);
+ return ads7138_i2c_write_block(data->client, reg, values,
+ ARRAY_SIZE(values));
+ }
+ case IIO_EV_INFO_HYSTERESIS: {
+ if (val >= BIT(4) || val < 0)
+ return -EINVAL;
+
+ reg = ADS7138_REG_HIGH_TH_HYS_CH(chan->channel);
+
+ guard(mutex)(&data->lock);
+ ret = ads7138_i2c_read(data->client, reg);
+ if (ret < 0)
+ return ret;
+
+ values[0] = val & ~ADS7138_THRESHOLD_LSB_MASK;
+ values[0] |= FIELD_PREP(ADS7138_THRESHOLD_LSB_MASK, ret >> 4);
+ return ads7138_i2c_write(data->client, reg, values[0]);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ads7138_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct ads7138_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (dir != IIO_EV_DIR_EITHER)
+ return -EINVAL;
+
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_ALERT_CH_SEL);
+ if (ret < 0)
+ return ret;
+
+ return (ret & BIT(chan->channel)) ? 1 : 0;
+}
+
+static int ads7138_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, bool state)
+{
+ struct ads7138_data *data = iio_priv(indio_dev);
+
+ if (dir != IIO_EV_DIR_EITHER)
+ return -EINVAL;
+
+ if (state)
+ return ads7138_i2c_set_bit(data->client,
+ ADS7138_REG_ALERT_CH_SEL,
+ BIT(chan->channel));
+ else
+ return ads7138_i2c_clear_bit(data->client,
+ ADS7138_REG_ALERT_CH_SEL,
+ BIT(chan->channel));
+}
+
+static int ads7138_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *vals = ads7138_samp_freqs_bits[0];
+ *length = ARRAY_SIZE(ads7138_samp_freqs_bits[0]);
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_LIST;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ *vals = ads7138_oversampling_ratios;
+ *length = ARRAY_SIZE(ads7138_oversampling_ratios);
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info ti_ads7138_info = {
+ .read_raw = &ads7138_read_raw,
+ .read_avail = &ads7138_read_avail,
+ .write_raw = &ads7138_write_raw,
+ .read_event_value = &ads7138_read_event,
+ .write_event_value = &ads7138_write_event,
+ .read_event_config = &ads7138_read_event_config,
+ .write_event_config = &ads7138_write_event_config,
+};
+
+static const struct iio_event_spec ads7138_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE)
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ }, {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS) |
+ BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+#define ADS7138_V_CHAN(_chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _chan, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_PEAK) | \
+ BIT(IIO_CHAN_INFO_TROUGH), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .info_mask_shared_by_type_available = \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) | \
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \
+ .datasheet_name = "AIN"#_chan, \
+ .event_spec = ads7138_events, \
+ .num_event_specs = ARRAY_SIZE(ads7138_events), \
+}
+
+static const struct iio_chan_spec ads7138_channels[] = {
+ ADS7138_V_CHAN(0),
+ ADS7138_V_CHAN(1),
+ ADS7138_V_CHAN(2),
+ ADS7138_V_CHAN(3),
+ ADS7138_V_CHAN(4),
+ ADS7138_V_CHAN(5),
+ ADS7138_V_CHAN(6),
+ ADS7138_V_CHAN(7),
+};
+
+static irqreturn_t ads7138_event_handler(int irq, void *priv)
+{
+ struct iio_dev *indio_dev = priv;
+ struct ads7138_data *data = iio_priv(indio_dev);
+ struct device *dev = &data->client->dev;
+ u8 i, events_high, events_low;
+ u64 code;
+ int ret;
+
+ /* Check if interrupt was trigger by us */
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_EVENT_FLAG);
+ if (ret <= 0)
+ return IRQ_NONE;
+
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_EVENT_HIGH_FLAG);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to read event high flags: %d\n", ret);
+ return IRQ_HANDLED;
+ }
+ events_high = ret;
+
+ ret = ads7138_i2c_read(data->client, ADS7138_REG_EVENT_LOW_FLAG);
+ if (ret < 0) {
+ dev_warn(dev, "Failed to read event low flags: %d\n", ret);
+ return IRQ_HANDLED;
+ }
+ events_low = ret;
+
+ for (i = 0; i < data->chip_data->channel_num; i++) {
+ if (events_high & BIT(i)) {
+ code = IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING);
+ iio_push_event(indio_dev, code,
+ iio_get_time_ns(indio_dev));
+ }
+ if (events_low & BIT(i)) {
+ code = IIO_UNMOD_EVENT_CODE(IIO_VOLTAGE, i,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING);
+ iio_push_event(indio_dev, code,
+ iio_get_time_ns(indio_dev));
+ }
+ }
+
+ /* Try to clear all interrupt flags */
+ ret = ads7138_i2c_write(data->client, ADS7138_REG_EVENT_HIGH_FLAG, 0xFF);
+ if (ret)
+ dev_warn(dev, "Failed to clear event high flags: %d\n", ret);
+
+ ret = ads7138_i2c_write(data->client, ADS7138_REG_EVENT_LOW_FLAG, 0xFF);
+ if (ret)
+ dev_warn(dev, "Failed to clear event low flags: %d\n", ret);
+
+ return IRQ_HANDLED;
+}
+
+static int ads7138_set_conv_mode(struct ads7138_data *data,
+ enum ads7138_modes mode)
+{
+ if (mode == ADS7138_MODE_AUTO)
+ return ads7138_i2c_set_bit(data->client, ADS7138_REG_OPMODE_CFG,
+ ADS7138_OPMODE_CFG_CONV_MODE);
+ return ads7138_i2c_clear_bit(data->client, ADS7138_REG_OPMODE_CFG,
+ ADS7138_OPMODE_CFG_CONV_MODE);
+}
+
+static int ads7138_init_hw(struct ads7138_data *data)
+{
+ struct device *dev = &data->client->dev;
+ int ret;
+
+ data->vref_regu = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(data->vref_regu))
+ return dev_err_probe(dev, PTR_ERR(data->vref_regu),
+ "Failed to get avdd regulator\n");
+
+ ret = regulator_get_voltage(data->vref_regu);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "Failed to get avdd voltage\n");
+
+ /* Reset the chip to get a defined starting configuration */
+ ret = ads7138_i2c_set_bit(data->client, ADS7138_REG_GENERAL_CFG,
+ ADS7138_GENERAL_CFG_RST);
+ if (ret)
+ return ret;
+
+ ret = ads7138_set_conv_mode(data, ADS7138_MODE_AUTO);
+ if (ret)
+ return ret;
+
+ /* Enable statistics and digital window comparator */
+ ret = ads7138_i2c_set_bit(data->client, ADS7138_REG_GENERAL_CFG,
+ ADS7138_GENERAL_CFG_STATS_EN |
+ ADS7138_GENERAL_CFG_DWC_EN);
+ if (ret)
+ return ret;
+
+ /* Enable all channels for auto sequencing */
+ ret = ads7138_i2c_set_bit(data->client, ADS7138_REG_AUTO_SEQ_CH_SEL, 0xFF);
+ if (ret)
+ return ret;
+
+ /* Set auto sequence mode and start sequencing */
+ return ads7138_i2c_set_bit(data->client, ADS7138_REG_SEQUENCE_CFG,
+ ADS7138_SEQUENCE_CFG_SEQ_START |
+ ADS7138_SEQUENCE_CFG_SEQ_MODE);
+}
+
+static int ads7138_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct iio_dev *indio_dev;
+ struct ads7138_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ data->chip_data = i2c_get_match_data(client);
+ if (!data->chip_data)
+ return -ENODEV;
+
+ ret = devm_mutex_init(dev, &data->lock);
+ if (ret)
+ return ret;
+
+ indio_dev->name = data->chip_data->name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ads7138_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ads7138_channels);
+ indio_dev->info = &ti_ads7138_info;
+
+ i2c_set_clientdata(client, indio_dev);
+
+ if (client->irq > 0) {
+ ret = devm_request_threaded_irq(dev, client->irq,
+ NULL, ads7138_event_handler,
+ IRQF_TRIGGER_LOW |
+ IRQF_ONESHOT | IRQF_SHARED,
+ client->name, indio_dev);
+ if (ret)
+ return ret;
+ }
+
+ ret = ads7138_init_hw(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to initialize device\n");
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to register iio device\n");
+
+ return 0;
+}
+
+static int ads7138_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ads7138_data *data = iio_priv(indio_dev);
+
+ return ads7138_set_conv_mode(data, ADS7138_MODE_MANUAL);
+}
+
+static int ads7138_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ads7138_data *data = iio_priv(indio_dev);
+
+ return ads7138_set_conv_mode(data, ADS7138_MODE_AUTO);
+}
+
+static DEFINE_RUNTIME_DEV_PM_OPS(ads7138_pm_ops,
+ ads7138_runtime_suspend,
+ ads7138_runtime_resume,
+ NULL);
+
+static const struct ads7138_chip_data ads7128_data = {
+ .name = "ads7128",
+ .channel_num = 8,
+};
+
+static const struct ads7138_chip_data ads7138_data = {
+ .name = "ads7138",
+ .channel_num = 8,
+};
+
+static const struct of_device_id ads7138_of_match[] = {
+ { .compatible = "ti,ads7128", .data = &ads7128_data },
+ { .compatible = "ti,ads7138", .data = &ads7138_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ads7138_of_match);
+
+static const struct i2c_device_id ads7138_device_ids[] = {
+ { "ads7128", (kernel_ulong_t)&ads7128_data },
+ { "ads7138", (kernel_ulong_t)&ads7138_data },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ads7138_device_ids);
+
+static struct i2c_driver ads7138_driver = {
+ .driver = {
+ .name = "ads7138",
+ .of_match_table = ads7138_of_match,
+ .pm = pm_ptr(&ads7138_pm_ops),
+ },
+ .id_table = ads7138_device_ids,
+ .probe = ads7138_probe,
+};
+module_i2c_driver(ads7138_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tobias Sperling <tobias.sperling@softing.com>");
+MODULE_DESCRIPTION("Driver for TI ADS7138 ADCs");
diff --git a/drivers/iio/adc/ti-ads7924.c b/drivers/iio/adc/ti-ads7924.c
index 66b54c0d75aa..b1f745f75dbe 100644
--- a/drivers/iio/adc/ti-ads7924.c
+++ b/drivers/iio/adc/ti-ads7924.c
@@ -251,11 +251,8 @@ static const struct iio_info ads7924_info = {
.read_raw = ads7924_read_raw,
};
-static int ads7924_get_channels_config(struct i2c_client *client,
- struct iio_dev *indio_dev)
+static int ads7924_get_channels_config(struct device *dev)
{
- struct ads7924_data *priv = iio_priv(indio_dev);
- struct device *dev = priv->dev;
struct fwnode_handle *node;
int num_channels = 0;
@@ -380,7 +377,7 @@ static int ads7924_probe(struct i2c_client *client)
indio_dev->num_channels = ARRAY_SIZE(ads7924_channels);
indio_dev->info = &ads7924_info;
- ret = ads7924_get_channels_config(client, indio_dev);
+ ret = ads7924_get_channels_config(dev);
if (ret < 0)
return dev_err_probe(dev, ret,
"failed to get channels configuration\n");
diff --git a/drivers/iio/adc/ti-tlc4541.c b/drivers/iio/adc/ti-tlc4541.c
index 08de997584fd..5a138be983ed 100644
--- a/drivers/iio/adc/ti-tlc4541.c
+++ b/drivers/iio/adc/ti-tlc4541.c
@@ -131,11 +131,10 @@ static int tlc4541_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = spi_sync(st->spi, &st->scan_single_msg);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
if (ret < 0)
return ret;
*val = be16_to_cpu(st->rx_buf[0]);
diff --git a/drivers/iio/addac/ad74413r.c b/drivers/iio/addac/ad74413r.c
index daea2bde7acf..f14d12b03da6 100644
--- a/drivers/iio/addac/ad74413r.c
+++ b/drivers/iio/addac/ad74413r.c
@@ -826,6 +826,8 @@ static int _ad74413r_get_single_adc_result(struct ad74413r_state *st,
unsigned int uval;
int ret;
+ guard(mutex)(&st->lock);
+
reinit_completion(&st->adc_data_completion);
ret = ad74413r_set_adc_channel_enable(st, channel, true);
@@ -865,12 +867,14 @@ static int ad74413r_get_single_adc_result(struct iio_dev *indio_dev,
unsigned int channel, int *val)
{
struct ad74413r_state *st = iio_priv(indio_dev);
+ int ret;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- guard(mutex)(&st->lock);
- return _ad74413r_get_single_adc_result(st, channel, val);
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = _ad74413r_get_single_adc_result(st, channel, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
static void ad74413r_adc_to_resistance_result(int adc_result, int *val)
diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c
index 2ee4c0d70281..d9a359e1388a 100644
--- a/drivers/iio/amplifiers/hmc425a.c
+++ b/drivers/iio/amplifiers/hmc425a.c
@@ -161,8 +161,7 @@ static int hmc425a_write(struct iio_dev *indio_dev, u32 value)
values[0] = value;
- gpiod_set_array_value_cansleep(st->gpios->ndescs, st->gpios->desc,
- NULL, values);
+ gpiod_multi_set_value_cansleep(st->gpios, values);
return 0;
}
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 7ea784304ffb..ee294a775e8a 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -624,7 +624,7 @@ out_unlock:
/**
* iio_dma_buffer_read() - DMA buffer read callback
- * @buffer: Buffer to read form
+ * @buffer: Buffer to read from
* @n: Number of bytes to read
* @user_buffer: Userspace buffer to copy the data to
*
@@ -640,7 +640,7 @@ EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_read, "IIO_DMA_BUFFER");
/**
* iio_dma_buffer_write() - DMA buffer write callback
- * @buffer: Buffer to read form
+ * @buffer: Buffer to write to
* @n: Number of bytes to read
* @user_buffer: Userspace buffer to copy the data from
*
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index 614e1c4189a9..e9d9a7d39fe1 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -206,39 +206,29 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
/**
* iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
- * @dev: DMA channel consumer device
- * @channel: DMA channel name, typically "rx".
+ * @chan: DMA channel.
*
* This allocates a new IIO buffer which internally uses the DMAengine framework
- * to perform its transfers. The parent device will be used to request the DMA
- * channel.
+ * to perform its transfers.
*
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
* release it.
*/
-static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
- const char *channel)
+static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan)
{
struct dmaengine_buffer *dmaengine_buffer;
unsigned int width, src_width, dest_width;
struct dma_slave_caps caps;
- struct dma_chan *chan;
int ret;
+ ret = dma_get_slave_caps(chan, &caps);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
if (!dmaengine_buffer)
return ERR_PTR(-ENOMEM);
- chan = dma_request_chan(dev, channel);
- if (IS_ERR(chan)) {
- ret = PTR_ERR(chan);
- goto err_free;
- }
-
- ret = dma_get_slave_caps(chan, &caps);
- if (ret < 0)
- goto err_release;
-
/* Needs to be aligned to the maximum of the minimums */
if (caps.src_addr_widths)
src_width = __ffs(caps.src_addr_widths);
@@ -262,12 +252,6 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
return &dmaengine_buffer->queue.buffer;
-
-err_release:
- dma_release_channel(chan);
-err_free:
- kfree(dmaengine_buffer);
- return ERR_PTR(ret);
}
/**
@@ -276,17 +260,57 @@ err_free:
*
* Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
*/
-void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
+static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
{
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(buffer);
iio_dma_buffer_exit(&dmaengine_buffer->queue);
- dma_release_channel(dmaengine_buffer->chan);
-
iio_buffer_put(buffer);
}
-EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, "IIO_DMAENGINE_BUFFER");
+
+/**
+ * iio_dmaengine_buffer_teardown() - Releases DMA channel and frees buffer
+ * @buffer: Buffer to free
+ *
+ * Releases the DMA channel and frees the buffer previously setup with
+ * iio_dmaengine_buffer_setup_ext().
+ */
+void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer)
+{
+ struct dmaengine_buffer *dmaengine_buffer =
+ iio_buffer_to_dmaengine_buffer(buffer);
+ struct dma_chan *chan = dmaengine_buffer->chan;
+
+ iio_dmaengine_buffer_free(buffer);
+ dma_release_channel(chan);
+}
+EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_teardown, "IIO_DMAENGINE_BUFFER");
+
+static struct iio_buffer
+*__iio_dmaengine_buffer_setup_ext(struct iio_dev *indio_dev,
+ struct dma_chan *chan,
+ enum iio_buffer_direction dir)
+{
+ struct iio_buffer *buffer;
+ int ret;
+
+ buffer = iio_dmaengine_buffer_alloc(chan);
+ if (IS_ERR(buffer))
+ return ERR_CAST(buffer);
+
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+
+ buffer->direction = dir;
+
+ ret = iio_device_attach_buffer(indio_dev, buffer);
+ if (ret) {
+ iio_dmaengine_buffer_free(buffer);
+ return ERR_PTR(ret);
+ }
+
+ return buffer;
+}
/**
* iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
@@ -300,7 +324,7 @@ EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, "IIO_DMAENGINE_BUFFER");
* It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
* IIO device.
*
- * Once done using the buffer iio_dmaengine_buffer_free() should be used to
+ * Once done using the buffer iio_dmaengine_buffer_teardown() should be used to
* release it.
*/
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
@@ -308,30 +332,24 @@ struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
const char *channel,
enum iio_buffer_direction dir)
{
+ struct dma_chan *chan;
struct iio_buffer *buffer;
- int ret;
-
- buffer = iio_dmaengine_buffer_alloc(dev, channel);
- if (IS_ERR(buffer))
- return ERR_CAST(buffer);
-
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
- buffer->direction = dir;
+ chan = dma_request_chan(dev, channel);
+ if (IS_ERR(chan))
+ return ERR_CAST(chan);
- ret = iio_device_attach_buffer(indio_dev, buffer);
- if (ret) {
- iio_dmaengine_buffer_free(buffer);
- return ERR_PTR(ret);
- }
+ buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir);
+ if (IS_ERR(buffer))
+ dma_release_channel(chan);
return buffer;
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
-static void __devm_iio_dmaengine_buffer_free(void *buffer)
+static void devm_iio_dmaengine_buffer_teardown(void *buffer)
{
- iio_dmaengine_buffer_free(buffer);
+ iio_dmaengine_buffer_teardown(buffer);
}
/**
@@ -357,11 +375,49 @@ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
+ return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_teardown,
buffer);
}
EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
+static void devm_iio_dmaengine_buffer_free(void *buffer)
+{
+ iio_dmaengine_buffer_free(buffer);
+}
+
+/**
+ * devm_iio_dmaengine_buffer_setup_with_handle() - Setup a DMA buffer for an
+ * IIO device
+ * @dev: Device for devm ownership
+ * @indio_dev: IIO device to which to attach this buffer.
+ * @chan: DMA channel
+ * @dir: Direction of buffer (in or out)
+ *
+ * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
+ * and attaches it to an IIO device with iio_device_attach_buffer().
+ * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
+ * IIO device.
+ *
+ * This is the same as devm_iio_dmaengine_buffer_setup_ext() except that the
+ * caller manages requesting and releasing the DMA channel handle.
+ */
+int devm_iio_dmaengine_buffer_setup_with_handle(struct device *dev,
+ struct iio_dev *indio_dev,
+ struct dma_chan *chan,
+ enum iio_buffer_direction dir)
+{
+ struct iio_buffer *buffer;
+
+ buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir);
+ if (IS_ERR(buffer))
+ return PTR_ERR(buffer);
+
+ return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_free,
+ buffer);
+}
+EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_with_handle,
+ "IIO_DMAENGINE_BUFFER");
+
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");
MODULE_LICENSE("GPL");
diff --git a/drivers/iio/chemical/ens160_core.c b/drivers/iio/chemical/ens160_core.c
index 48d5ad2075b6..152f81ff57e3 100644
--- a/drivers/iio/chemical/ens160_core.c
+++ b/drivers/iio/chemical/ens160_core.c
@@ -100,25 +100,35 @@ static const struct iio_chan_spec ens160_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(2),
};
+static int __ens160_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct ens160_data *data = iio_priv(indio_dev);
+ int ret;
+
+ guard(mutex)(&data->mutex);
+ ret = regmap_bulk_read(data->regmap, chan->address,
+ &data->buf, sizeof(data->buf));
+ if (ret)
+ return ret;
+ *val = le16_to_cpu(data->buf);
+ return IIO_VAL_INT;
+}
+
static int ens160_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
- struct ens160_data *data = iio_priv(indio_dev);
int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- guard(mutex)(&data->mutex);
- ret = regmap_bulk_read(data->regmap, chan->address,
- &data->buf, sizeof(data->buf));
- if (ret)
- return ret;
- *val = le16_to_cpu(data->buf);
- return IIO_VAL_INT;
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = __ens160_read_raw(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
switch (chan->channel2) {
case IIO_MOD_CO2:
diff --git a/drivers/iio/chemical/scd30_core.c b/drivers/iio/chemical/scd30_core.c
index d613c54cb28d..3fed6b63710f 100644
--- a/drivers/iio/chemical/scd30_core.c
+++ b/drivers/iio/chemical/scd30_core.c
@@ -5,6 +5,7 @@
* Copyright (c) 2020 Tomasz Duszynski <tomasz.duszynski@octakon.com>
*/
#include <linux/bits.h>
+#include <linux/cleanup.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -198,112 +199,103 @@ static int scd30_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const
int *val, int *val2, long mask)
{
struct scd30_state *state = iio_priv(indio_dev);
- int ret = -EINVAL;
+ int ret;
u16 tmp;
- mutex_lock(&state->lock);
+ guard(mutex)(&state->lock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
case IIO_CHAN_INFO_PROCESSED:
if (chan->output) {
*val = state->pressure_comp;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
}
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- break;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
ret = scd30_read(state);
if (ret) {
- iio_device_release_direct_mode(indio_dev);
- break;
+ iio_device_release_direct(indio_dev);
+ return ret;
}
*val = state->meas[chan->address];
- iio_device_release_direct_mode(indio_dev);
- ret = IIO_VAL_INT;
- break;
+ iio_device_release_direct(indio_dev);
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = 1;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
+ return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_SAMP_FREQ:
ret = scd30_command_read(state, CMD_MEAS_INTERVAL, &tmp);
if (ret)
- break;
+ return ret;
*val = 0;
*val2 = 1000000000 / tmp;
- ret = IIO_VAL_INT_PLUS_NANO;
- break;
+ return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_CALIBBIAS:
ret = scd30_command_read(state, CMD_TEMP_OFFSET, &tmp);
if (ret)
- break;
+ return ret;
*val = tmp;
- ret = IIO_VAL_INT;
- break;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
}
- mutex_unlock(&state->lock);
-
- return ret;
}
static int scd30_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
struct scd30_state *state = iio_priv(indio_dev);
- int ret = -EINVAL;
+ int ret;
- mutex_lock(&state->lock);
+ guard(mutex)(&state->lock);
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
if (val)
- break;
+ return -EINVAL;
val = 1000000000 / val2;
if (val < SCD30_MEAS_INTERVAL_MIN_S || val > SCD30_MEAS_INTERVAL_MAX_S)
- break;
+ return -EINVAL;
ret = scd30_command_write(state, CMD_MEAS_INTERVAL, val);
if (ret)
- break;
+ return ret;
state->meas_interval = val;
- break;
+ return 0;
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
case IIO_PRESSURE:
if (val < SCD30_PRESSURE_COMP_MIN_MBAR ||
val > SCD30_PRESSURE_COMP_MAX_MBAR)
- break;
+ return -EINVAL;
ret = scd30_command_write(state, CMD_START_MEAS, val);
if (ret)
- break;
+ return ret;
state->pressure_comp = val;
- break;
+ return 0;
default:
- break;
+ return -EINVAL;
}
- break;
case IIO_CHAN_INFO_CALIBBIAS:
if (val < 0 || val > SCD30_TEMP_OFFSET_MAX)
- break;
+ return -EINVAL;
/*
* Manufacturer does not explicitly specify min/max sensible
* values hence check is omitted for simplicity.
*/
- ret = scd30_command_write(state, CMD_TEMP_OFFSET / 10, val);
+ return scd30_command_write(state, CMD_TEMP_OFFSET / 10, val);
+ default:
+ return -EINVAL;
}
- mutex_unlock(&state->lock);
-
- return ret;
}
static int scd30_write_raw_get_fmt(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
diff --git a/drivers/iio/common/cros_ec_sensors/Makefile b/drivers/iio/common/cros_ec_sensors/Makefile
index e0a33ab66d21..c358fa0328ab 100644
--- a/drivers/iio/common/cros_ec_sensors/Makefile
+++ b/drivers/iio/common/cros_ec_sensors/Makefile
@@ -3,6 +3,7 @@
# Makefile for sensors seen through the ChromeOS EC sensor hub.
#
-obj-$(CONFIG_IIO_CROS_EC_SENSORS_CORE) += cros_ec_sensors_core.o
+cros-ec-sensors-core-objs += cros_ec_sensors_core.o cros_ec_sensors_trace.o
+obj-$(CONFIG_IIO_CROS_EC_SENSORS_CORE) += cros-ec-sensors-core.o
obj-$(CONFIG_IIO_CROS_EC_SENSORS) += cros_ec_sensors.o
obj-$(CONFIG_IIO_CROS_EC_SENSORS_LID_ANGLE) += cros_ec_lid_angle.o
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index 9fc71a73caa1..7751d6f69b12 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -23,6 +23,8 @@
#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/platform_device.h>
+#include "cros_ec_sensors_trace.h"
+
/*
* Hard coded to the first device to support sensor fifo. The EC has a 2048
* byte fifo and will trigger an interrupt when fifo is 2/3 full.
@@ -413,6 +415,7 @@ EXPORT_SYMBOL_GPL(cros_ec_sensors_core_register);
int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state,
u16 opt_length)
{
+ struct ec_response_motion_sense *resp = (struct ec_response_motion_sense *)state->msg->data;
int ret;
if (opt_length)
@@ -423,12 +426,12 @@ int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state,
memcpy(state->msg->data, &state->param, sizeof(state->param));
ret = cros_ec_cmd_xfer_status(state->ec, state->msg);
+ trace_cros_ec_motion_host_cmd(&state->param, resp, ret);
if (ret < 0)
return ret;
- if (ret &&
- state->resp != (struct ec_response_motion_sense *)state->msg->data)
- memcpy(state->resp, state->msg->data, ret);
+ if (ret && state->resp != resp)
+ memcpy(state->resp, resp, ret);
return 0;
}
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.c
new file mode 100644
index 000000000000..c4db949fa775
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+// Trace events for the ChromeOS Embedded Controller
+//
+// Copyright 2025 Google LLC.
+
+#define TRACE_SYMBOL(a) {a, #a}
+
+// Generate the list using the following script:
+// sed -n 's/^.*\(MOTIONSENSE_CMD.*\) = .*,$/\tTRACE_SYMBOL(\1), \\/p' include/linux/platform_data/cros_ec_commands.h
+#define MOTIONSENSE_CMDS \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_DUMP), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_INFO), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_EC_RATE), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_SENSOR_ODR), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_SENSOR_RANGE), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_KB_WAKE_ANGLE), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_DATA), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_FIFO_INFO), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_FIFO_FLUSH), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_FIFO_READ), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_PERFORM_CALIB), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_SENSOR_OFFSET), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_LIST_ACTIVITIES), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_SET_ACTIVITY), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_LID_ANGLE), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_FIFO_INT_ENABLE), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_SPOOF), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE), \
+ TRACE_SYMBOL(MOTIONSENSE_CMD_SENSOR_SCALE)
+
+#define CREATE_TRACE_POINTS
+#include "cros_ec_sensors_trace.h"
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.h b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.h
new file mode 100644
index 000000000000..8956f2e8ad08
--- /dev/null
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_trace.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Trace events for the ChromeOS Embedded Controller
+ *
+ * Copyright 2025 Google LLC.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cros_ec
+
+#if !defined(_CROS_EC_SENSORS_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _CROS_EC_SENSORS_TRACE_H_
+
+#include <linux/bits.h>
+#include <linux/types.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(cros_ec_motion_host_cmd,
+ TP_PROTO(struct ec_params_motion_sense *param,
+ struct ec_response_motion_sense *resp,
+ int retval),
+ TP_ARGS(param, resp, retval),
+ TP_STRUCT__entry(__field(uint8_t, cmd)
+ __field(uint8_t, sensor_id)
+ __field(uint32_t, data)
+ __field(int, retval)
+ __field(int32_t, ret)
+ ),
+ TP_fast_assign(__entry->cmd = param->cmd;
+ __entry->sensor_id = param->sensor_odr.sensor_num;
+ __entry->data = param->sensor_odr.data;
+ __entry->retval = retval;
+ __entry->ret = retval > 0 ? resp->sensor_odr.ret : -1;
+ ),
+ TP_printk("%s, id: %d, data: %u, result: %u, return: %d",
+ __print_symbolic(__entry->cmd, MOTIONSENSE_CMDS),
+ __entry->sensor_id,
+ __entry->data,
+ __entry->retval,
+ __entry->ret)
+);
+
+#endif /* _CROS_EC_SENSORS_TRACE_H_ */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/iio/common/cros_ec_sensors
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cros_ec_sensors_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 5690a37267d8..4811ea973125 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -296,6 +296,9 @@ config AD5770R
config AD5791
tristate "Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC SPI driver"
depends on SPI
+ select SPI_OFFLOAD
+ select IIO_BUFFER
+ select IIO_BUFFER_DMAENGINE
help
Say yes here to build support for Analog Devices AD5760, AD5780,
AD5781, AD5790, AD5791 High Resolution Voltage Output Digital to
diff --git a/drivers/iio/dac/ad3552r-common.c b/drivers/iio/dac/ad3552r-common.c
index 03e0864f5084..b8807e54fa05 100644
--- a/drivers/iio/dac/ad3552r-common.c
+++ b/drivers/iio/dac/ad3552r-common.c
@@ -11,23 +11,21 @@
#include "ad3552r.h"
-const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2] = {
+static const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2] = {
[AD3552R_CH_OUTPUT_RANGE_0__2P5V] = { 0, 2500 },
[AD3552R_CH_OUTPUT_RANGE_0__5V] = { 0, 5000 },
[AD3552R_CH_OUTPUT_RANGE_0__10V] = { 0, 10000 },
[AD3552R_CH_OUTPUT_RANGE_NEG_5__5V] = { -5000, 5000 },
[AD3552R_CH_OUTPUT_RANGE_NEG_10__10V] = { -10000, 10000 }
};
-EXPORT_SYMBOL_NS_GPL(ad3552r_ch_ranges, "IIO_AD3552R");
-const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2] = {
+static const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2] = {
[AD3542R_CH_OUTPUT_RANGE_0__2P5V] = { 0, 2500 },
[AD3542R_CH_OUTPUT_RANGE_0__5V] = { 0, 5000 },
[AD3542R_CH_OUTPUT_RANGE_0__10V] = { 0, 10000 },
[AD3542R_CH_OUTPUT_RANGE_NEG_5__5V] = { -5000, 5000 },
[AD3542R_CH_OUTPUT_RANGE_NEG_2P5__7P5V] = { -2500, 7500 }
};
-EXPORT_SYMBOL_NS_GPL(ad3542r_ch_ranges, "IIO_AD3552R");
/* Gain * AD3552R_GAIN_SCALE */
static const s32 gains_scaling_table[] = {
@@ -37,6 +35,50 @@ static const s32 gains_scaling_table[] = {
[AD3552R_CH_GAIN_SCALING_0_125] = 125
};
+const struct ad3552r_model_data ad3541r_model_data = {
+ .model_name = "ad3541r",
+ .chip_id = AD3541R_ID,
+ .num_hw_channels = 1,
+ .ranges_table = ad3542r_ch_ranges,
+ .num_ranges = ARRAY_SIZE(ad3542r_ch_ranges),
+ .requires_output_range = true,
+ .num_spi_data_lanes = 2,
+};
+EXPORT_SYMBOL_NS_GPL(ad3541r_model_data, "IIO_AD3552R");
+
+const struct ad3552r_model_data ad3542r_model_data = {
+ .model_name = "ad3542r",
+ .chip_id = AD3542R_ID,
+ .num_hw_channels = 2,
+ .ranges_table = ad3542r_ch_ranges,
+ .num_ranges = ARRAY_SIZE(ad3542r_ch_ranges),
+ .requires_output_range = true,
+ .num_spi_data_lanes = 2,
+};
+EXPORT_SYMBOL_NS_GPL(ad3542r_model_data, "IIO_AD3552R");
+
+const struct ad3552r_model_data ad3551r_model_data = {
+ .model_name = "ad3551r",
+ .chip_id = AD3551R_ID,
+ .num_hw_channels = 1,
+ .ranges_table = ad3552r_ch_ranges,
+ .num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
+ .requires_output_range = false,
+ .num_spi_data_lanes = 4,
+};
+EXPORT_SYMBOL_NS_GPL(ad3551r_model_data, "IIO_AD3552R");
+
+const struct ad3552r_model_data ad3552r_model_data = {
+ .model_name = "ad3552r",
+ .chip_id = AD3552R_ID,
+ .num_hw_channels = 2,
+ .ranges_table = ad3552r_ch_ranges,
+ .num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
+ .requires_output_range = false,
+ .num_spi_data_lanes = 4,
+};
+EXPORT_SYMBOL_NS_GPL(ad3552r_model_data, "IIO_AD3552R");
+
u16 ad3552r_calc_custom_gain(u8 p, u8 n, s16 goffs)
{
return FIELD_PREP(AD3552R_MASK_CH_RANGE_OVERRIDE, 1) |
diff --git a/drivers/iio/dac/ad3552r-hs.c b/drivers/iio/dac/ad3552r-hs.c
index 8974df625670..cd8dabb60c55 100644
--- a/drivers/iio/dac/ad3552r-hs.c
+++ b/drivers/iio/dac/ad3552r-hs.c
@@ -19,6 +19,31 @@
#include "ad3552r.h"
#include "ad3552r-hs.h"
+/*
+ * Important notes for register map access:
+ * ========================================
+ *
+ * Register address space is divided in 2 regions, primary (config) and
+ * secondary (DAC). Primary region can only be accessed in simple SPI mode,
+ * with exception for ad355x models where setting QSPI pin high allows QSPI
+ * access to both the regions.
+ *
+ * Due to the fact that ad3541/2r do not implement QSPI, for proper device
+ * detection, HDL keeps "QSPI" pin level low at boot (see ad3552r manual, rev B
+ * table 7, pin 31, digital input). For this reason, actually the working mode
+ * between SPI, DSPI and QSPI must be set via software, configuring the target
+ * DAC appropriately, together with the backend API to configure the bus mode
+ * accordingly.
+ *
+ * Also, important to note that none of the three modes allow to read in DDR.
+ *
+ * In non-buffering operations, mode is set to simple SPI SDR for all primary
+ * and secondary region r/w accesses, to avoid to switch the mode each time DAC
+ * register is accessed (raw accesses, r/w), and to be able to dump registers
+ * content (possible as non DDR only).
+ * In buffering mode, driver sets best possible mode, D/QSPI and DDR.
+ */
+
struct ad3552r_hs_state {
const struct ad3552r_model_data *model_data;
struct gpio_desc *reset_gpio;
@@ -27,16 +52,26 @@ struct ad3552r_hs_state {
bool single_channel;
struct ad3552r_ch_data ch_data[AD3552R_MAX_CH];
struct ad3552r_hs_platform_data *data;
+ /* INTERFACE_CONFIG_D register cache, in DDR we cannot read values. */
+ u32 config_d;
};
-static int ad3552r_qspi_update_reg_bits(struct ad3552r_hs_state *st,
- u32 reg, u32 mask, u32 val,
- size_t xfer_size)
+static int ad3552r_hs_reg_read(struct ad3552r_hs_state *st, u32 reg, u32 *val,
+ size_t xfer_size)
+{
+ /* No chip in the family supports DDR read. Informing of this. */
+ WARN_ON_ONCE(st->config_d & AD3552R_MASK_SPI_CONFIG_DDR);
+
+ return st->data->bus_reg_read(st->back, reg, val, xfer_size);
+}
+
+static int ad3552r_hs_update_reg_bits(struct ad3552r_hs_state *st, u32 reg,
+ u32 mask, u32 val, size_t xfer_size)
{
u32 rval;
int ret;
- ret = st->data->bus_reg_read(st->back, reg, &rval, xfer_size);
+ ret = ad3552r_hs_reg_read(st, reg, &rval, xfer_size);
if (ret)
return ret;
@@ -56,16 +91,20 @@ static int ad3552r_hs_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
/*
- * Using 4 lanes (QSPI), then using 2 as DDR mode is
- * considered always on (considering buffering mode always).
+ * Using a "num_spi_data_lanes" variable since ad3541/2 have
+ * only DSPI interface, while ad355x is QSPI. Then using 2 as
+ * DDR mode is considered always on (considering buffering
+ * mode always).
*/
*val = DIV_ROUND_CLOSEST(st->data->bus_sample_data_clock_hz *
- 4 * 2, chan->scan_type.realbits);
+ st->model_data->num_spi_data_lanes * 2,
+ chan->scan_type.realbits);
return IIO_VAL_INT;
case IIO_CHAN_INFO_RAW:
- ret = st->data->bus_reg_read(st->back,
+ /* For RAW accesses, stay always in simple-spi. */
+ ret = ad3552r_hs_reg_read(st,
AD3552R_REG_ADDR_CH_DAC_16B(chan->channel),
val, 2);
if (ret)
@@ -90,20 +129,60 @@ static int ad3552r_hs_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct ad3552r_hs_state *st = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- return st->data->bus_reg_write(st->back,
- AD3552R_REG_ADDR_CH_DAC_16B(chan->channel),
- val, 2);
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ /* For RAW accesses, stay always in simple-spi. */
+ ret = st->data->bus_reg_write(st->back,
+ AD3552R_REG_ADDR_CH_DAC_16B(chan->channel),
+ val, 2);
+
+ iio_device_release_direct(indio_dev);
+ return ret;
default:
return -EINVAL;
}
}
+static int ad3552r_hs_set_bus_io_mode_hs(struct ad3552r_hs_state *st)
+{
+ int bus_mode;
+
+ if (st->model_data->num_spi_data_lanes == 4)
+ bus_mode = AD3552R_IO_MODE_QSPI;
+ else
+ bus_mode = AD3552R_IO_MODE_DSPI;
+
+ return st->data->bus_set_io_mode(st->back, bus_mode);
+}
+
+static int ad3552r_hs_set_target_io_mode_hs(struct ad3552r_hs_state *st)
+{
+ u32 mode_target;
+
+ /*
+ * Best access for secondary reg area, QSPI where possible,
+ * else as DSPI.
+ */
+ if (st->model_data->num_spi_data_lanes == 4)
+ mode_target = AD3552R_QUAD_SPI;
+ else
+ mode_target = AD3552R_DUAL_SPI;
+
+ /*
+ * Better to not use update here, since generally it is already
+ * set as DDR mode, and it's not possible to read in DDR mode.
+ */
+ return st->data->bus_reg_write(st->back,
+ AD3552R_REG_ADDR_TRANSFER_REGISTER,
+ FIELD_PREP(AD3552R_MASK_MULTI_IO_MODE,
+ mode_target) |
+ AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE, 1);
+}
+
static int ad3552r_hs_buffer_postenable(struct iio_dev *indio_dev)
{
struct ad3552r_hs_state *st = iio_priv(indio_dev);
@@ -132,48 +211,111 @@ static int ad3552r_hs_buffer_postenable(struct iio_dev *indio_dev)
return -EINVAL;
}
- ret = st->data->bus_reg_write(st->back, AD3552R_REG_ADDR_STREAM_MODE,
- loop_len, 1);
+ /*
+ * With ad3541/2r support, QSPI pin is held low at reset from HDL,
+ * streaming start sequence must respect strictly the order below.
+ */
+
+ /* Primary region access, set streaming mode (now in SPI + SDR). */
+ ret = ad3552r_hs_update_reg_bits(st,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_B,
+ AD3552R_MASK_SINGLE_INST, 0, 1);
if (ret)
return ret;
- /* Inform DAC chip to switch into DDR mode */
- ret = ad3552r_qspi_update_reg_bits(st,
- AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
- AD3552R_MASK_SPI_CONFIG_DDR,
- AD3552R_MASK_SPI_CONFIG_DDR, 1);
+ /*
+ * Set target loop len, keeping the value: streaming writes at address
+ * 0x2c or 0x2a, in descending loop (2 or 4 bytes), keeping loop len
+ * value so that it's not cleared hereafter when _CS is deasserted.
+ */
+ ret = ad3552r_hs_update_reg_bits(st, AD3552R_REG_ADDR_TRANSFER_REGISTER,
+ AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE,
+ AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE,
+ 1);
if (ret)
- return ret;
+ goto exit_err_streaming;
+
+ ret = st->data->bus_reg_write(st->back,
+ AD3552R_REG_ADDR_STREAM_MODE,
+ loop_len, 1);
+ if (ret)
+ goto exit_err_streaming;
+
+ st->config_d |= AD3552R_MASK_SPI_CONFIG_DDR;
+ ret = st->data->bus_reg_write(st->back,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
+ st->config_d, 1);
+ if (ret)
+ goto exit_err_streaming;
- /* Inform DAC IP to go for DDR mode from now on */
ret = iio_backend_ddr_enable(st->back);
- if (ret) {
- dev_err(st->dev, "could not set DDR mode, not streaming");
- goto exit_err;
- }
+ if (ret)
+ goto exit_err_ddr_mode_target;
+
+ /*
+ * From here onward mode is DDR, so reading any register is not possible
+ * anymore, including calling "ad3552r_hs_update_reg_bits" function.
+ */
+ /* Set target to best high speed mode (D or QSPI). */
+ ret = ad3552r_hs_set_target_io_mode_hs(st);
+ if (ret)
+ goto exit_err_ddr_mode;
+
+ /* Set bus to best high speed mode (D or QSPI). */
+ ret = ad3552r_hs_set_bus_io_mode_hs(st);
+ if (ret)
+ goto exit_err_bus_mode_target;
+
+ /*
+ * Backend setup must be done now only, or related register values will
+ * be disrupted by previous bus accesses.
+ */
ret = iio_backend_data_transfer_addr(st->back, val);
if (ret)
- goto exit_err;
+ goto exit_err_bus_mode_target;
ret = iio_backend_data_format_set(st->back, 0, &fmt);
if (ret)
- goto exit_err;
+ goto exit_err_bus_mode_target;
ret = iio_backend_data_stream_enable(st->back);
if (ret)
- goto exit_err;
+ goto exit_err_bus_mode_target;
return 0;
-exit_err:
- ad3552r_qspi_update_reg_bits(st,
- AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
- AD3552R_MASK_SPI_CONFIG_DDR,
- 0, 1);
+exit_err_bus_mode_target:
+ /* Back to simple SPI, not using update to avoid read. */
+ st->data->bus_reg_write(st->back, AD3552R_REG_ADDR_TRANSFER_REGISTER,
+ FIELD_PREP(AD3552R_MASK_MULTI_IO_MODE,
+ AD3552R_SPI) |
+ AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE, 1);
+
+ /*
+ * Back bus to simple SPI, this must be executed together with above
+ * target mode unwind, and can be done only after it.
+ */
+ st->data->bus_set_io_mode(st->back, AD3552R_IO_MODE_SPI);
+exit_err_ddr_mode:
iio_backend_ddr_disable(st->back);
+exit_err_ddr_mode_target:
+ /*
+ * Back to SDR. In DDR we cannot read, whatever the mode is, so not
+ * using update.
+ */
+ st->config_d &= ~AD3552R_MASK_SPI_CONFIG_DDR;
+ st->data->bus_reg_write(st->back, AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
+ st->config_d, 1);
+
+exit_err_streaming:
+ /* Back to single instruction mode, disabling loop. */
+ st->data->bus_reg_write(st->back, AD3552R_REG_ADDR_INTERFACE_CONFIG_B,
+ AD3552R_MASK_SINGLE_INST |
+ AD3552R_MASK_SHORT_INSTRUCTION, 1);
+
return ret;
}
@@ -186,11 +328,22 @@ static int ad3552r_hs_buffer_predisable(struct iio_dev *indio_dev)
if (ret)
return ret;
- /* Inform DAC to set in SDR mode */
- ret = ad3552r_qspi_update_reg_bits(st,
- AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
- AD3552R_MASK_SPI_CONFIG_DDR,
- 0, 1);
+ /*
+ * Set us to simple SPI, even if still in ddr, so to be able to write
+ * in primary region.
+ */
+ ret = st->data->bus_set_io_mode(st->back, AD3552R_IO_MODE_SPI);
+ if (ret)
+ return ret;
+
+ /*
+ * Back to SDR (in DDR we cannot read, whatever the mode is, so not
+ * using update).
+ */
+ st->config_d &= ~AD3552R_MASK_SPI_CONFIG_DDR;
+ ret = st->data->bus_reg_write(st->back,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
+ st->config_d, 1);
if (ret)
return ret;
@@ -198,6 +351,24 @@ static int ad3552r_hs_buffer_predisable(struct iio_dev *indio_dev)
if (ret)
return ret;
+ /*
+ * Back to simple SPI for secondary region too now, so to be able to
+ * dump/read registers there too if needed.
+ */
+ ret = ad3552r_hs_update_reg_bits(st, AD3552R_REG_ADDR_TRANSFER_REGISTER,
+ AD3552R_MASK_MULTI_IO_MODE,
+ AD3552R_SPI, 1);
+ if (ret)
+ return ret;
+
+ /* Back to single instruction mode, disabling loop. */
+ ret = ad3552r_hs_update_reg_bits(st,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_B,
+ AD3552R_MASK_SINGLE_INST,
+ AD3552R_MASK_SINGLE_INST, 1);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -211,10 +382,10 @@ static inline int ad3552r_hs_set_output_range(struct ad3552r_hs_state *st,
else
val = FIELD_PREP(AD3552R_MASK_CH1_RANGE, mode);
- return ad3552r_qspi_update_reg_bits(st,
- AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE,
- AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch),
- val, 1);
+ return ad3552r_hs_update_reg_bits(st,
+ AD3552R_REG_ADDR_CH0_CH1_OUTPUT_RANGE,
+ AD3552R_MASK_CH_OUTPUT_RANGE_SEL(ch),
+ val, 1);
}
static int ad3552r_hs_reset(struct ad3552r_hs_state *st)
@@ -230,10 +401,10 @@ static int ad3552r_hs_reset(struct ad3552r_hs_state *st)
fsleep(10);
gpiod_set_value_cansleep(st->reset_gpio, 0);
} else {
- ret = ad3552r_qspi_update_reg_bits(st,
- AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
- AD3552R_MASK_SOFTWARE_RESET,
- AD3552R_MASK_SOFTWARE_RESET, 1);
+ ret = ad3552r_hs_update_reg_bits(st,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
+ AD3552R_MASK_SOFTWARE_RESET,
+ AD3552R_MASK_SOFTWARE_RESET, 1);
if (ret)
return ret;
}
@@ -304,30 +475,49 @@ static int ad3552r_hs_setup(struct ad3552r_hs_state *st)
if (ret)
return ret;
+ /* HDL starts with DDR enabled, disabling it. */
ret = iio_backend_ddr_disable(st->back);
if (ret)
return ret;
+ ret = st->data->bus_reg_write(st->back,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_B,
+ AD3552R_MASK_SINGLE_INST |
+ AD3552R_MASK_SHORT_INSTRUCTION, 1);
+ if (ret)
+ return ret;
+
ret = ad3552r_hs_scratch_pad_test(st);
if (ret)
return ret;
- ret = st->data->bus_reg_read(st->back, AD3552R_REG_ADDR_PRODUCT_ID_L,
- &val, 1);
+ /*
+ * Caching config_d, needed to restore it after streaming,
+ * and also, to detect possible DDR read, that's not allowed.
+ */
+ ret = st->data->bus_reg_read(st->back,
+ AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
+ &st->config_d, 1);
+ if (ret)
+ return ret;
+
+ ret = ad3552r_hs_reg_read(st, AD3552R_REG_ADDR_PRODUCT_ID_L, &val, 1);
if (ret)
return ret;
id = val;
- ret = st->data->bus_reg_read(st->back, AD3552R_REG_ADDR_PRODUCT_ID_H,
- &val, 1);
+ ret = ad3552r_hs_reg_read(st, AD3552R_REG_ADDR_PRODUCT_ID_H, &val, 1);
if (ret)
return ret;
id |= val << 8;
if (id != st->model_data->chip_id)
- dev_info(st->dev, "Chip ID error. Expected 0x%x, Read 0x%x\n",
- AD3552R_ID, id);
+ dev_warn(st->dev,
+ "chip ID mismatch, detected 0x%x but expected 0x%x\n",
+ id, st->model_data->chip_id);
+
+ dev_dbg(st->dev, "chip id %s detected", st->model_data->model_name);
/* Clear reset error flag, see ad3552r manual, rev B table 38. */
ret = st->data->bus_reg_write(st->back, AD3552R_REG_ADDR_ERR_STATUS,
@@ -341,14 +531,6 @@ static int ad3552r_hs_setup(struct ad3552r_hs_state *st)
if (ret)
return ret;
- ret = st->data->bus_reg_write(st->back,
- AD3552R_REG_ADDR_TRANSFER_REGISTER,
- FIELD_PREP(AD3552R_MASK_MULTI_IO_MODE,
- AD3552R_QUAD_SPI) |
- AD3552R_MASK_STREAM_LENGTH_KEEP_VALUE, 1);
- if (ret)
- return ret;
-
ret = iio_backend_data_source_set(st->back, 0, IIO_BACKEND_EXTERNAL);
if (ret)
return ret;
@@ -363,19 +545,21 @@ static int ad3552r_hs_setup(struct ad3552r_hs_state *st)
val = ret;
- ret = ad3552r_qspi_update_reg_bits(st,
- AD3552R_REG_ADDR_SH_REFERENCE_CONFIG,
- AD3552R_MASK_REFERENCE_VOLTAGE_SEL,
- val, 1);
+ ret = ad3552r_hs_update_reg_bits(st,
+ AD3552R_REG_ADDR_SH_REFERENCE_CONFIG,
+ AD3552R_MASK_REFERENCE_VOLTAGE_SEL,
+ val, 1);
if (ret)
return ret;
ret = ad3552r_get_drive_strength(st->dev, &val);
if (!ret) {
- ret = ad3552r_qspi_update_reg_bits(st,
+ st->config_d |=
+ FIELD_PREP(AD3552R_MASK_SDO_DRIVE_STRENGTH, val);
+
+ ret = st->data->bus_reg_write(st->back,
AD3552R_REG_ADDR_INTERFACE_CONFIG_D,
- AD3552R_MASK_SDO_DRIVE_STRENGTH,
- val, 1);
+ st->config_d, 1);
if (ret)
return ret;
}
@@ -504,15 +688,10 @@ static int ad3552r_hs_probe(struct platform_device *pdev)
return devm_iio_device_register(&pdev->dev, indio_dev);
}
-static const struct ad3552r_model_data ad3552r_model_data = {
- .model_name = "ad3552r",
- .chip_id = AD3552R_ID,
- .num_hw_channels = 2,
- .ranges_table = ad3552r_ch_ranges,
- .num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
-};
-
static const struct of_device_id ad3552r_hs_of_id[] = {
+ { .compatible = "adi,ad3541r", .data = &ad3541r_model_data },
+ { .compatible = "adi,ad3542r", .data = &ad3542r_model_data },
+ { .compatible = "adi,ad3551r", .data = &ad3551r_model_data },
{ .compatible = "adi,ad3552r", .data = &ad3552r_model_data },
{ }
};
diff --git a/drivers/iio/dac/ad3552r-hs.h b/drivers/iio/dac/ad3552r-hs.h
index 724261d38dea..4a9e35234124 100644
--- a/drivers/iio/dac/ad3552r-hs.h
+++ b/drivers/iio/dac/ad3552r-hs.h
@@ -8,11 +8,19 @@
struct iio_backend;
+enum ad3552r_io_mode {
+ AD3552R_IO_MODE_SPI,
+ AD3552R_IO_MODE_DSPI,
+ AD3552R_IO_MODE_QSPI,
+};
+
struct ad3552r_hs_platform_data {
int (*bus_reg_read)(struct iio_backend *back, u32 reg, u32 *val,
size_t data_size);
int (*bus_reg_write)(struct iio_backend *back, u32 reg, u32 val,
size_t data_size);
+ int (*bus_set_io_mode)(struct iio_backend *back,
+ enum ad3552r_io_mode mode);
u32 bus_sample_data_clock_hz;
};
diff --git a/drivers/iio/dac/ad3552r.c b/drivers/iio/dac/ad3552r.c
index e7206af53af6..a44b163f3183 100644
--- a/drivers/iio/dac/ad3552r.c
+++ b/drivers/iio/dac/ad3552r.c
@@ -410,6 +410,12 @@ static int ad3552r_reset(struct ad3552r_desc *dac)
return ret;
}
+ /* Clear reset error flag, see ad3552r manual, rev B table 38. */
+ ret = ad3552r_write_reg(dac, AD3552R_REG_ADDR_ERR_STATUS,
+ AD3552R_MASK_RESET_STATUS);
+ if (ret)
+ return ret;
+
return ad3552r_update_reg_field(dac,
AD3552R_REG_ADDR_INTERFACE_CONFIG_A,
AD3552R_MASK_ADDR_ASCENSION,
@@ -649,42 +655,6 @@ static int ad3552r_probe(struct spi_device *spi)
return devm_iio_device_register(&spi->dev, indio_dev);
}
-static const struct ad3552r_model_data ad3541r_model_data = {
- .model_name = "ad3541r",
- .chip_id = AD3541R_ID,
- .num_hw_channels = 1,
- .ranges_table = ad3542r_ch_ranges,
- .num_ranges = ARRAY_SIZE(ad3542r_ch_ranges),
- .requires_output_range = true,
-};
-
-static const struct ad3552r_model_data ad3542r_model_data = {
- .model_name = "ad3542r",
- .chip_id = AD3542R_ID,
- .num_hw_channels = 2,
- .ranges_table = ad3542r_ch_ranges,
- .num_ranges = ARRAY_SIZE(ad3542r_ch_ranges),
- .requires_output_range = true,
-};
-
-static const struct ad3552r_model_data ad3551r_model_data = {
- .model_name = "ad3551r",
- .chip_id = AD3551R_ID,
- .num_hw_channels = 1,
- .ranges_table = ad3552r_ch_ranges,
- .num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
- .requires_output_range = false,
-};
-
-static const struct ad3552r_model_data ad3552r_model_data = {
- .model_name = "ad3552r",
- .chip_id = AD3552R_ID,
- .num_hw_channels = 2,
- .ranges_table = ad3552r_ch_ranges,
- .num_ranges = ARRAY_SIZE(ad3552r_ch_ranges),
- .requires_output_range = false,
-};
-
static const struct spi_device_id ad3552r_id[] = {
{
.name = "ad3541r",
diff --git a/drivers/iio/dac/ad3552r.h b/drivers/iio/dac/ad3552r.h
index 4b5581039ae9..768fa264d39e 100644
--- a/drivers/iio/dac/ad3552r.h
+++ b/drivers/iio/dac/ad3552r.h
@@ -132,10 +132,14 @@
#define AD3552R_MAX_RANGES 5
#define AD3542R_MAX_RANGES 5
+#define AD3552R_SPI 0
+#define AD3552R_DUAL_SPI 1
#define AD3552R_QUAD_SPI 2
-extern const s32 ad3552r_ch_ranges[AD3552R_MAX_RANGES][2];
-extern const s32 ad3542r_ch_ranges[AD3542R_MAX_RANGES][2];
+extern const struct ad3552r_model_data ad3541r_model_data;
+extern const struct ad3552r_model_data ad3542r_model_data;
+extern const struct ad3552r_model_data ad3551r_model_data;
+extern const struct ad3552r_model_data ad3552r_model_data;
enum ad3552r_id {
AD3541R_ID = 0x400b,
@@ -151,6 +155,7 @@ struct ad3552r_model_data {
const s32 (*ranges_table)[2];
int num_ranges;
bool requires_output_range;
+ int num_spi_data_lanes;
};
struct ad3552r_ch_data {
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 57374f78f6b8..07848be3f8d5 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -6,21 +6,24 @@
* Copyright 2011 Analog Devices Inc.
*/
-#include <linux/interrupt.h>
-#include <linux/fs.h>
-#include <linux/device.h>
+#include <linux/bitops.h>
#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/spi/offload/consumer.h>
+#include <linux/spi/spi.h>
#include <linux/sysfs.h>
-#include <linux/regulator/consumer.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
+#include <linux/units.h>
+#include <linux/iio/buffer-dmaengine.h>
+#include <linux/iio/dac/ad5791.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
-#include <linux/iio/dac/ad5791.h>
#define AD5791_DAC_MASK GENMASK(19, 0)
@@ -64,11 +67,13 @@
* struct ad5791_chip_info - chip specific information
* @name: name of the dac chip
* @channel: channel specification
+ * @channel_offload: channel specification for offload
* @get_lin_comp: function pointer to the device specific function
*/
struct ad5791_chip_info {
const char *name;
const struct iio_chan_spec channel;
+ const struct iio_chan_spec channel_offload;
int (*get_lin_comp)(unsigned int span);
};
@@ -81,6 +86,11 @@ struct ad5791_chip_info {
* @gpio_clear: clear gpio
* @gpio_ldac: load dac gpio
* @chip_info: chip model specific constants
+ * @offload_msg: spi message used for offload
+ * @offload_xfer: spi transfer used for offload
+ * @offload: offload device
+ * @offload_trigger: offload trigger
+ * @offload_trigger_hz: offload sample rate
* @vref_mv: actual reference voltage used
* @vref_neg_mv: voltage of the negative supply
* @ctrl: control register cache
@@ -96,6 +106,11 @@ struct ad5791_state {
struct gpio_desc *gpio_clear;
struct gpio_desc *gpio_ldac;
const struct ad5791_chip_info *chip_info;
+ struct spi_message offload_msg;
+ struct spi_transfer offload_xfer;
+ struct spi_offload *offload;
+ struct spi_offload_trigger *offload_trigger;
+ unsigned int offload_trigger_hz;
unsigned short vref_mv;
unsigned int vref_neg_mv;
unsigned ctrl;
@@ -232,6 +247,25 @@ static int ad5780_get_lin_comp(unsigned int span)
return AD5780_LINCOMP_10_20;
}
+static int ad5791_set_sample_freq(struct ad5791_state *st, int val)
+{
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = val,
+ },
+ };
+ int ret;
+
+ ret = spi_offload_trigger_validate(st->offload_trigger, &config);
+ if (ret)
+ return ret;
+
+ st->offload_trigger_hz = config.periodic.frequency_hz;
+
+ return 0;
+}
+
static int ad5791_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
@@ -259,6 +293,9 @@ static int ad5791_read_raw(struct iio_dev *indio_dev,
do_div(val64, st->vref_mv);
*val = -val64;
return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = st->offload_trigger_hz;
+ return IIO_VAL_INT;
default:
return -EINVAL;
}
@@ -294,7 +331,25 @@ static const struct ad5791_chip_info _name##_chip_info = { \
.scan_type = { \
.sign = 'u', \
.realbits = (bits), \
- .storagebits = 24, \
+ .storagebits = 32, \
+ .shift = (_shift), \
+ }, \
+ .ext_info = ad5791_ext_info, \
+ }, \
+ .channel_offload = { \
+ .type = IIO_VOLTAGE, \
+ .output = 1, \
+ .indexed = 1, \
+ .address = AD5791_ADDR_DAC0, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (bits), \
+ .storagebits = 32, \
.shift = (_shift), \
}, \
.ext_info = ad5791_ext_info, \
@@ -322,14 +377,106 @@ static int ad5791_write_raw(struct iio_dev *indio_dev,
return ad5791_spi_write(st, chan->address, val);
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ if (val < 1)
+ return -EINVAL;
+ return ad5791_set_sample_freq(st, val);
default:
return -EINVAL;
}
}
+static int ad5791_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return IIO_VAL_INT;
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+}
+
+static int ad5791_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct ad5791_state *st = iio_priv(indio_dev);
+ struct spi_offload_trigger_config config = {
+ .type = SPI_OFFLOAD_TRIGGER_PERIODIC,
+ .periodic = {
+ .frequency_hz = st->offload_trigger_hz,
+ },
+ };
+
+ if (st->pwr_down)
+ return -EINVAL;
+
+ return spi_offload_trigger_enable(st->offload, st->offload_trigger,
+ &config);
+}
+
+static int ad5791_buffer_postdisable(struct iio_dev *indio_dev)
+{
+ struct ad5791_state *st = iio_priv(indio_dev);
+
+ spi_offload_trigger_disable(st->offload, st->offload_trigger);
+
+ return 0;
+}
+
+static const struct iio_buffer_setup_ops ad5791_buffer_setup_ops = {
+ .preenable = &ad5791_buffer_preenable,
+ .postdisable = &ad5791_buffer_postdisable,
+};
+
+static int ad5791_offload_setup(struct iio_dev *indio_dev)
+{
+ struct ad5791_state *st = iio_priv(indio_dev);
+ struct spi_device *spi = st->spi;
+ struct dma_chan *tx_dma;
+ int ret;
+
+ st->offload_trigger = devm_spi_offload_trigger_get(&spi->dev,
+ st->offload, SPI_OFFLOAD_TRIGGER_PERIODIC);
+ if (IS_ERR(st->offload_trigger))
+ return dev_err_probe(&spi->dev, PTR_ERR(st->offload_trigger),
+ "failed to get offload trigger\n");
+
+ ret = ad5791_set_sample_freq(st, 1 * MEGA);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "failed to init sample rate\n");
+
+ tx_dma = devm_spi_offload_tx_stream_request_dma_chan(&spi->dev,
+ st->offload);
+ if (IS_ERR(tx_dma))
+ return dev_err_probe(&spi->dev, PTR_ERR(tx_dma),
+ "failed to get offload TX DMA\n");
+
+ ret = devm_iio_dmaengine_buffer_setup_with_handle(&spi->dev,
+ indio_dev, tx_dma, IIO_BUFFER_DIRECTION_OUT);
+ if (ret)
+ return ret;
+
+ st->offload_xfer.len = 4;
+ st->offload_xfer.bits_per_word = 24;
+ st->offload_xfer.offload_flags = SPI_OFFLOAD_XFER_TX_STREAM;
+
+ spi_message_init_with_transfers(&st->offload_msg, &st->offload_xfer, 1);
+ st->offload_msg.offload = st->offload;
+
+ return devm_spi_optimize_message(&spi->dev, st->spi, &st->offload_msg);
+}
+
static const struct iio_info ad5791_info = {
.read_raw = &ad5791_read_raw,
.write_raw = &ad5791_write_raw,
+ .write_raw_get_fmt = &ad5791_write_raw_get_fmt,
+};
+
+static const struct spi_offload_config ad5791_offload_config = {
+ .capability_flags = SPI_OFFLOAD_CAP_TRIGGER |
+ SPI_OFFLOAD_CAP_TX_STREAM_DMA,
};
static int ad5791_probe(struct spi_device *spi)
@@ -416,6 +563,21 @@ static int ad5791_probe(struct spi_device *spi)
indio_dev->channels = &st->chip_info->channel;
indio_dev->num_channels = 1;
indio_dev->name = st->chip_info->name;
+
+ st->offload = devm_spi_offload_get(&spi->dev, spi, &ad5791_offload_config);
+ ret = PTR_ERR_OR_ZERO(st->offload);
+ if (ret && ret != -ENODEV)
+ return dev_err_probe(&spi->dev, ret, "failed to get offload\n");
+
+ if (ret != -ENODEV) {
+ indio_dev->channels = &st->chip_info->channel_offload;
+ indio_dev->setup_ops = &ad5791_buffer_setup_ops;
+ ret = ad5791_offload_setup(indio_dev);
+ if (ret)
+ return dev_err_probe(&spi->dev, ret,
+ "fail to setup offload\n");
+ }
+
return devm_iio_device_register(&spi->dev, indio_dev);
}
@@ -452,3 +614,4 @@ module_spi_driver(ad5791_driver);
MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("IIO_DMAENGINE_BUFFER");
diff --git a/drivers/iio/dac/ad8460.c b/drivers/iio/dac/ad8460.c
index 535ee3105af6..6e45686902dd 100644
--- a/drivers/iio/dac/ad8460.c
+++ b/drivers/iio/dac/ad8460.c
@@ -264,9 +264,12 @@ static ssize_t ad8460_write_toggle_en(struct iio_dev *indio_dev, uintptr_t priva
if (ret)
return ret;
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return ad8460_enable_apg_mode(state, toggle_en);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = ad8460_enable_apg_mode(state, toggle_en);
+ iio_device_release_direct(indio_dev);
+ return ret;
}
static ssize_t ad8460_read_powerdown(struct iio_dev *indio_dev, uintptr_t private,
@@ -421,14 +424,17 @@ static int ad8460_write_raw(struct iio_dev *indio_dev,
long mask)
{
struct ad8460_state *state = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
switch (chan->type) {
case IIO_VOLTAGE:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return ad8460_set_sample(state, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = ad8460_set_sample(state, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CURRENT:
return regmap_write(state->regmap, AD8460_CTRL_REG(0x04),
FIELD_PREP(AD8460_QUIESCENT_CURRENT_MSK, val));
diff --git a/drivers/iio/dac/adi-axi-dac.c b/drivers/iio/dac/adi-axi-dac.c
index b143f7ed6847..892d770aec69 100644
--- a/drivers/iio/dac/adi-axi-dac.c
+++ b/drivers/iio/dac/adi-axi-dac.c
@@ -64,7 +64,7 @@
#define AXI_DAC_UI_STATUS_IF_BUSY BIT(4)
#define AXI_DAC_CUSTOM_CTRL_REG 0x008C
#define AXI_DAC_CUSTOM_CTRL_ADDRESS GENMASK(31, 24)
-#define AXI_DAC_CUSTOM_CTRL_SYNCED_TRANSFER BIT(2)
+#define AXI_DAC_CUSTOM_CTRL_MULTI_IO_MODE GENMASK(3, 2)
#define AXI_DAC_CUSTOM_CTRL_STREAM BIT(1)
#define AXI_DAC_CUSTOM_CTRL_TRANSFER_DATA BIT(0)
@@ -168,7 +168,7 @@ static struct iio_buffer *axi_dac_request_buffer(struct iio_backend *back,
static void axi_dac_free_buffer(struct iio_backend *back,
struct iio_buffer *buffer)
{
- iio_dmaengine_buffer_free(buffer);
+ iio_dmaengine_buffer_teardown(buffer);
}
enum {
@@ -585,6 +585,14 @@ static int axi_dac_ddr_disable(struct iio_backend *back)
static int axi_dac_data_stream_enable(struct iio_backend *back)
{
struct axi_dac_state *st = iio_backend_get_priv(back);
+ int ret, val;
+
+ ret = regmap_read_poll_timeout(st->regmap,
+ AXI_DAC_UI_STATUS_REG, val,
+ FIELD_GET(AXI_DAC_UI_STATUS_IF_BUSY, val) == 0,
+ 10, 100 * KILO);
+ if (ret)
+ return ret;
return regmap_set_bits(st->regmap, AXI_DAC_CUSTOM_CTRL_REG,
AXI_DAC_CUSTOM_CTRL_STREAM_ENABLE);
@@ -714,6 +722,28 @@ static int axi_dac_bus_reg_read(struct iio_backend *back, u32 reg, u32 *val,
return regmap_read(st->regmap, AXI_DAC_CUSTOM_RD_REG, val);
}
+static int axi_dac_bus_set_io_mode(struct iio_backend *back,
+ enum ad3552r_io_mode mode)
+{
+ struct axi_dac_state *st = iio_backend_get_priv(back);
+ int ival, ret;
+
+ if (mode > AD3552R_IO_MODE_QSPI)
+ return -EINVAL;
+
+ guard(mutex)(&st->lock);
+
+ ret = regmap_update_bits(st->regmap, AXI_DAC_CUSTOM_CTRL_REG,
+ AXI_DAC_CUSTOM_CTRL_MULTI_IO_MODE,
+ FIELD_PREP(AXI_DAC_CUSTOM_CTRL_MULTI_IO_MODE, mode));
+ if (ret)
+ return ret;
+
+ return regmap_read_poll_timeout(st->regmap, AXI_DAC_UI_STATUS_REG, ival,
+ FIELD_GET(AXI_DAC_UI_STATUS_IF_BUSY, ival) == 0, 10,
+ 100 * KILO);
+}
+
static void axi_dac_child_remove(void *data)
{
platform_device_unregister(data);
@@ -725,6 +755,7 @@ static int axi_dac_create_platform_device(struct axi_dac_state *st,
struct ad3552r_hs_platform_data pdata = {
.bus_reg_read = axi_dac_bus_reg_read,
.bus_reg_write = axi_dac_bus_reg_write,
+ .bus_set_io_mode = axi_dac_bus_set_io_mode,
.bus_sample_data_clock_hz = st->dac_clk_rate,
};
struct platform_device_info pi = {
diff --git a/drivers/iio/dummy/iio_simple_dummy.c b/drivers/iio/dummy/iio_simple_dummy.c
index 09efacaf8f78..8575d4a08963 100644
--- a/drivers/iio/dummy/iio_simple_dummy.c
+++ b/drivers/iio/dummy/iio_simple_dummy.c
@@ -267,6 +267,65 @@ static const struct iio_chan_spec iio_dummy_channels[] = {
},
};
+static int __iio_dummy_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ if (chan->output) {
+ /* Set integer part to cached value */
+ *val = st->dac_val;
+ return IIO_VAL_INT;
+ } else if (chan->differential) {
+ if (chan->channel == 1)
+ *val = st->differential_adc_val[0];
+ else
+ *val = st->differential_adc_val[1];
+ return IIO_VAL_INT;
+ } else {
+ *val = st->single_ended_adc_val;
+ return IIO_VAL_INT;
+ }
+
+ case IIO_ACCEL:
+ *val = st->accel_val;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int __iio_dummy_read_processed(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct iio_dummy_state *st = iio_priv(indio_dev);
+
+ guard(mutex)(&st->lock);
+ switch (chan->type) {
+ case IIO_STEPS:
+ *val = st->steps;
+ return IIO_VAL_INT;
+ case IIO_ACTIVITY:
+ switch (chan->channel2) {
+ case IIO_MOD_RUNNING:
+ *val = st->activity_running;
+ return IIO_VAL_INT;
+ case IIO_MOD_WALKING:
+ *val = st->activity_walking;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* iio_dummy_read_raw() - data read function.
* @indio_dev: the struct iio_dev associated with this device instance
@@ -283,59 +342,21 @@ static int iio_dummy_read_raw(struct iio_dev *indio_dev,
long mask)
{
struct iio_dummy_state *st = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW: /* magic value - channel value read */
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- guard(mutex)(&st->lock);
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->output) {
- /* Set integer part to cached value */
- *val = st->dac_val;
- return IIO_VAL_INT;
- } else if (chan->differential) {
- if (chan->channel == 1)
- *val = st->differential_adc_val[0];
- else
- *val = st->differential_adc_val[1];
- return IIO_VAL_INT;
- } else {
- *val = st->single_ended_adc_val;
- return IIO_VAL_INT;
- }
-
- case IIO_ACCEL:
- *val = st->accel_val;
- return IIO_VAL_INT;
- default:
- return -EINVAL;
- }
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = __iio_dummy_read_raw(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_PROCESSED:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- guard(mutex)(&st->lock);
- switch (chan->type) {
- case IIO_STEPS:
- *val = st->steps;
- return IIO_VAL_INT;
- case IIO_ACTIVITY:
- switch (chan->channel2) {
- case IIO_MOD_RUNNING:
- *val = st->activity_running;
- return IIO_VAL_INT;
- case IIO_MOD_WALKING:
- *val = st->activity_walking;
- return IIO_VAL_INT;
- default:
- return -EINVAL;
- }
- default:
- return -EINVAL;
- }
- }
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = __iio_dummy_read_processed(indio_dev, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_OFFSET:
/* only single ended adc -> 7 */
*val = 7;
diff --git a/drivers/iio/filter/admv8818.c b/drivers/iio/filter/admv8818.c
index 848baa6e3bbf..d85b7d3de866 100644
--- a/drivers/iio/filter/admv8818.c
+++ b/drivers/iio/filter/admv8818.c
@@ -574,21 +574,15 @@ static int admv8818_init(struct admv8818_state *st)
struct spi_device *spi = st->spi;
unsigned int chip_id;
- ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
- ADMV8818_SOFTRESET_N_MSK |
- ADMV8818_SOFTRESET_MSK,
- FIELD_PREP(ADMV8818_SOFTRESET_N_MSK, 1) |
- FIELD_PREP(ADMV8818_SOFTRESET_MSK, 1));
+ ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+ ADMV8818_SOFTRESET_N_MSK | ADMV8818_SOFTRESET_MSK);
if (ret) {
dev_err(&spi->dev, "ADMV8818 Soft Reset failed.\n");
return ret;
}
- ret = regmap_update_bits(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
- ADMV8818_SDOACTIVE_N_MSK |
- ADMV8818_SDOACTIVE_MSK,
- FIELD_PREP(ADMV8818_SDOACTIVE_N_MSK, 1) |
- FIELD_PREP(ADMV8818_SDOACTIVE_MSK, 1));
+ ret = regmap_write(st->regmap, ADMV8818_REG_SPI_CONFIG_A,
+ ADMV8818_SDOACTIVE_N_MSK | ADMV8818_SDOACTIVE_MSK);
if (ret) {
dev_err(&spi->dev, "ADMV8818 SDO Enable failed.\n");
return ret;
diff --git a/drivers/iio/frequency/adf4371.c b/drivers/iio/frequency/adf4371.c
index d752507e0c98..9a84e81787b1 100644
--- a/drivers/iio/frequency/adf4371.c
+++ b/drivers/iio/frequency/adf4371.c
@@ -42,6 +42,12 @@
#define ADF4371_MOD2WORD_MSK GENMASK(5, 0)
#define ADF4371_MOD2WORD(x) FIELD_PREP(ADF4371_MOD2WORD_MSK, x)
+/* ADF4371_REG22 */
+#define ADF4371_REFIN_MODE_MASK BIT(6)
+#define ADF4371_REFIN_MODE(x) FIELD_PREP(ADF4371_REFIN_MODE_MASK, x)
+#define ADF4371_REF_DOUB_MASK BIT(5)
+#define ADF4371_REF_DOUB(x) FIELD_PREP(ADF4371_REF_DOUB_MASK, x)\
+
/* ADF4371_REG24 */
#define ADF4371_RF_DIV_SEL_MSK GENMASK(6, 4)
#define ADF4371_RF_DIV_SEL(x) FIELD_PREP(ADF4371_RF_DIV_SEL_MSK, x)
@@ -70,6 +76,10 @@
#define ADF4371_MAX_FREQ_PFD 250000000UL /* Hz */
#define ADF4371_MAX_FREQ_REFIN 600000000UL /* Hz */
+#define ADF4371_MAX_FREQ_REFIN_SE 500000000UL /* Hz */
+
+#define ADF4371_MIN_CLKIN_DOUB_FREQ 10000000ULL /* Hz */
+#define ADF4371_MAX_CLKIN_DOUB_FREQ 125000000ULL /* Hz */
/* MOD1 is a 24-bit primary modulus with fixed value of 2^25 */
#define ADF4371_MODULUS1 33554432ULL
@@ -176,6 +186,7 @@ struct adf4371_state {
unsigned int mod2;
unsigned int rf_div_sel;
unsigned int ref_div_factor;
+ bool ref_diff_en;
u8 buf[10] __aligned(IIO_DMA_MINALIGN);
};
@@ -477,7 +488,7 @@ static const struct iio_info adf4371_info = {
static int adf4371_setup(struct adf4371_state *st)
{
unsigned int synth_timeout = 2, timeout = 1, vco_alc_timeout = 1;
- unsigned int vco_band_div, tmp;
+ unsigned int vco_band_div, tmp, ref_doubler_en = 0;
int ret;
/* Perform a software reset */
@@ -505,6 +516,23 @@ static int adf4371_setup(struct adf4371_state *st)
ADF4371_ADDR_ASC(1) | ADF4371_ADDR_ASC_R(1));
if (ret < 0)
return ret;
+
+ if ((st->ref_diff_en && st->clkin_freq > ADF4371_MAX_FREQ_REFIN) ||
+ (!st->ref_diff_en && st->clkin_freq > ADF4371_MAX_FREQ_REFIN_SE))
+ return -EINVAL;
+
+ if (st->clkin_freq < ADF4371_MAX_CLKIN_DOUB_FREQ &&
+ st->clkin_freq > ADF4371_MIN_CLKIN_DOUB_FREQ)
+ ref_doubler_en = 1;
+
+ ret = regmap_update_bits(st->regmap, ADF4371_REG(0x22),
+ ADF4371_REF_DOUB_MASK |
+ ADF4371_REFIN_MODE_MASK,
+ ADF4371_REF_DOUB(ref_doubler_en) |
+ ADF4371_REFIN_MODE(st->ref_diff_en));
+ if (ret < 0)
+ return ret;
+
/*
* Calculate and maximize PFD frequency
* fPFD = REFIN × ((1 + D)/(R × (1 + T)))
@@ -514,7 +542,8 @@ static int adf4371_setup(struct adf4371_state *st)
*/
do {
st->ref_div_factor++;
- st->fpfd = st->clkin_freq / st->ref_div_factor;
+ st->fpfd = st->clkin_freq * (1 + ref_doubler_en) /
+ st->ref_div_factor;
} while (st->fpfd > ADF4371_MAX_FREQ_PFD);
/* Calculate Timeouts */
@@ -574,10 +603,16 @@ static int adf4371_probe(struct spi_device *spi)
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
+ st->ref_diff_en = false;
+
clkin = devm_clk_get_enabled(&spi->dev, "clkin");
- if (IS_ERR(clkin))
- return dev_err_probe(&spi->dev, PTR_ERR(clkin),
- "Failed to get clkin\n");
+ if (IS_ERR(clkin)) {
+ clkin = devm_clk_get_enabled(&spi->dev, "clkin-diff");
+ if (IS_ERR(clkin))
+ return dev_err_probe(&spi->dev, PTR_ERR(clkin),
+ "Failed to get clkin/clkin-diff\n");
+ st->ref_diff_en = true;
+ }
st->clkin_freq = clk_get_rate(clkin);
diff --git a/drivers/iio/gyro/bmg160_i2c.c b/drivers/iio/gyro/bmg160_i2c.c
index 9c5d7e8ee99c..e6caab49f98a 100644
--- a/drivers/iio/gyro/bmg160_i2c.c
+++ b/drivers/iio/gyro/bmg160_i2c.c
@@ -58,6 +58,7 @@ MODULE_DEVICE_TABLE(i2c, bmg160_i2c_id);
static const struct of_device_id bmg160_of_match[] = {
{ .compatible = "bosch,bmg160" },
{ .compatible = "bosch,bmi055_gyro" },
+ { .compatible = "bosch,bmi088_gyro" },
{ }
};
diff --git a/drivers/iio/gyro/bmg160_spi.c b/drivers/iio/gyro/bmg160_spi.c
index fc2e453527b9..ac04b3b1b554 100644
--- a/drivers/iio/gyro/bmg160_spi.c
+++ b/drivers/iio/gyro/bmg160_spi.c
@@ -41,9 +41,19 @@ static const struct spi_device_id bmg160_spi_id[] = {
MODULE_DEVICE_TABLE(spi, bmg160_spi_id);
+static const struct of_device_id bmg160_of_match[] = {
+ { .compatible = "bosch,bmg160" },
+ { .compatible = "bosch,bmi055_gyro" },
+ { .compatible = "bosch,bmi088_gyro" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, bmg160_of_match);
+
static struct spi_driver bmg160_spi_driver = {
.driver = {
.name = "bmg160_spi",
+ .of_match_table = bmg160_of_match,
.pm = &bmg160_pm_ops,
},
.probe = bmg160_spi_probe,
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index c97e25448772..48c59d09eea7 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/printk.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/sysfs.h>
#include <linux/io.h>
#include <linux/mod_devicetable.h>
@@ -99,7 +100,7 @@ static void dht11_edges_print(struct dht11 *dht11)
for (i = 1; i < dht11->num_edges; ++i) {
dev_dbg(dht11->dev, "%d: %lld ns %s\n", i,
dht11->edges[i].ts - dht11->edges[i - 1].ts,
- dht11->edges[i - 1].value ? "high" : "low");
+ str_high_low(dht11->edges[i - 1].value));
}
}
#endif /* CONFIG_DYNAMIC_DEBUG */
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index ca0efecb5b5c..15612f0f189b 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -52,6 +52,19 @@ config ADIS16480
Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
ADIS16485, ADIS16488 inertial sensors.
+config ADIS16550
+ tristate "Analog Devices ADIS16550 and similar IMU driver"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ select CRC32
+ help
+ Say yes here to build support for Analog Devices ADIS16550 inertial
+ sensor containing triaxis gyroscope and triaxis accelerometer.
+
+ To compile this driver as a module, choose M here: the module will be
+ called adis16550.
+
source "drivers/iio/imu/bmi160/Kconfig"
source "drivers/iio/imu/bmi270/Kconfig"
source "drivers/iio/imu/bmi323/Kconfig"
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index 04c77c2c4df8..e901aea498d3 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_ADIS16400) += adis16400.o
obj-$(CONFIG_ADIS16460) += adis16460.o
obj-$(CONFIG_ADIS16475) += adis16475.o
obj-$(CONFIG_ADIS16480) += adis16480.o
+obj-$(CONFIG_ADIS16550) += adis16550.o
adis_lib-y += adis.o
adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_trigger.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 494171844812..0ea072a4c966 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -223,13 +223,13 @@ int __adis_update_bits_base(struct adis *adis, unsigned int reg, const u32 mask,
int ret;
u32 __val;
- ret = __adis_read_reg(adis, reg, &__val, size);
+ ret = adis->ops->read(adis, reg, &__val, size);
if (ret)
return ret;
__val = (__val & ~mask) | (val & mask);
- return __adis_write_reg(adis, reg, __val, size);
+ return adis->ops->write(adis, reg, __val, size);
}
EXPORT_SYMBOL_NS_GPL(__adis_update_bits_base, "IIO_ADISLIB");
@@ -304,11 +304,20 @@ EXPORT_SYMBOL_NS(__adis_enable_irq, "IIO_ADISLIB");
*/
int __adis_check_status(struct adis *adis)
{
- u16 status;
+ unsigned int status;
+ int diag_stat_bits;
+ u16 status_16 = 0;
int ret;
int i;
- ret = __adis_read_reg_16(adis, adis->data->diag_stat_reg, &status);
+ if (adis->data->diag_stat_size) {
+ ret = adis->ops->read(adis, adis->data->diag_stat_reg, &status,
+ adis->data->diag_stat_size);
+ } else {
+ ret = __adis_read_reg_16(adis, adis->data->diag_stat_reg,
+ &status_16);
+ status = status_16;
+ }
if (ret)
return ret;
@@ -317,7 +326,10 @@ int __adis_check_status(struct adis *adis)
if (status == 0)
return 0;
- for (i = 0; i < 16; ++i) {
+ diag_stat_bits = BITS_PER_BYTE * (adis->data->diag_stat_size ?
+ adis->data->diag_stat_size : 2);
+
+ for (i = 0; i < diag_stat_bits; ++i) {
if (status & BIT(i)) {
dev_err(&adis->spi->dev, "%s.\n",
adis->data->status_error_msgs[i]);
@@ -468,7 +480,7 @@ int adis_single_conversion(struct iio_dev *indio_dev,
guard(mutex)(&adis->state_lock);
- ret = __adis_read_reg(adis, chan->address, &uval,
+ ret = adis->ops->read(adis, chan->address, &uval,
chan->scan_type.storagebits / 8);
if (ret)
return ret;
@@ -488,6 +500,12 @@ int adis_single_conversion(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_NS_GPL(adis_single_conversion, "IIO_ADISLIB");
+static const struct adis_ops adis_default_ops = {
+ .read = __adis_read_reg,
+ .write = __adis_write_reg,
+ .reset = __adis_reset,
+};
+
/**
* adis_init() - Initialize adis device structure
* @adis: The adis device
@@ -517,6 +535,11 @@ int adis_init(struct adis *adis, struct iio_dev *indio_dev,
adis->spi = spi;
adis->data = data;
+ if (!adis->ops->write && !adis->ops->read && !adis->ops->reset)
+ adis->ops = &adis_default_ops;
+ else if (!adis->ops->write || !adis->ops->read || !adis->ops->reset)
+ return -EINVAL;
+
iio_device_set_drvdata(indio_dev, adis);
if (data->has_paging) {
diff --git a/drivers/iio/imu/adis16550.c b/drivers/iio/imu/adis16550.c
new file mode 100644
index 000000000000..b14ea8937c7f
--- /dev/null
+++ b/drivers/iio/imu/adis16550.c
@@ -0,0 +1,1147 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ADIS16550 IMU driver
+ *
+ * Copyright 2024 Analog Devices Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/crc32.h>
+#include <linux/debugfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/imu/adis.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/lcm.h>
+#include <linux/math.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+#include <linux/swab.h>
+#include <linux/unaligned.h>
+
+#define ADIS16550_REG_BURST_GYRO_ACCEL 0x0a
+#define ADIS16550_REG_BURST_DELTA_ANG_VEL 0x0b
+#define ADIS16550_BURST_DATA_GYRO_ACCEL_MASK GENMASK(6, 1)
+#define ADIS16550_BURST_DATA_DELTA_ANG_VEL_MASK GENMASK(12, 7)
+
+#define ADIS16550_REG_STATUS 0x0e
+#define ADIS16550_REG_TEMP 0x10
+#define ADIS16550_REG_X_GYRO 0x12
+#define ADIS16550_REG_Y_GYRO 0x14
+#define ADIS16550_REG_Z_GYRO 0x16
+#define ADIS16550_REG_X_ACCEL 0x18
+#define ADIS16550_REG_Y_ACCEL 0x1a
+#define ADIS16550_REG_Z_ACCEL 0x1c
+#define ADIS16550_REG_X_DELTANG_L 0x1E
+#define ADIS16550_REG_Y_DELTANG_L 0x20
+#define ADIS16550_REG_Z_DELTANG_L 0x22
+#define ADIS16550_REG_X_DELTVEL_L 0x24
+#define ADIS16550_REG_Y_DELTVEL_L 0x26
+#define ADIS16550_REG_Z_DELTVEL_L 0x28
+#define ADIS16550_REG_X_GYRO_SCALE 0x30
+#define ADIS16550_REG_Y_GYRO_SCALE 0x32
+#define ADIS16550_REG_Z_GYRO_SCALE 0x34
+#define ADIS16550_REG_X_ACCEL_SCALE 0x36
+#define ADIS16550_REG_Y_ACCEL_SCALE 0x38
+#define ADIS16550_REG_Z_ACCEL_SCALE 0x3a
+#define ADIS16550_REG_X_GYRO_BIAS 0x40
+#define ADIS16550_REG_Y_GYRO_BIAS 0x42
+#define ADIS16550_REG_Z_GYRO_BIAS 0x44
+#define ADIS16550_REG_X_ACCEL_BIAS 0x46
+#define ADIS16550_REG_Y_ACCEL_BIAS 0x48
+#define ADIS16550_REG_Z_ACCEL_BIAS 0x4a
+#define ADIS16550_REG_COMMAND 0x50
+#define ADIS16550_REG_CONFIG 0x52
+#define ADIS16550_GYRO_FIR_EN_MASK BIT(3)
+#define ADIS16550_ACCL_FIR_EN_MASK BIT(2)
+#define ADIS16550_SYNC_MASK \
+ (ADIS16550_SYNC_EN_MASK | ADIS16550_SYNC_MODE_MASK)
+#define ADIS16550_SYNC_MODE_MASK BIT(1)
+#define ADIS16550_SYNC_EN_MASK BIT(0)
+/* max of 4000 SPS in scale sync */
+#define ADIS16550_SYNC_SCALE_MAX_RATE (4000 * 1000)
+#define ADIS16550_REG_DEC_RATE 0x54
+#define ADIS16550_REG_SYNC_SCALE 0x56
+#define ADIS16550_REG_SERIAL_NUM 0x76
+#define ADIS16550_REG_FW_REV 0x7A
+#define ADIS16550_REG_FW_DATE 0x7C
+#define ADIS16550_REG_PROD_ID 0x7E
+#define ADIS16550_REG_FLASH_CNT 0x72
+/* SPI protocol*/
+#define ADIS16550_SPI_DATA_MASK GENMASK(31, 16)
+#define ADIS16550_SPI_REG_MASK GENMASK(14, 8)
+#define ADIS16550_SPI_R_W_MASK BIT(7)
+#define ADIS16550_SPI_CRC_MASK GENMASK(3, 0)
+#define ADIS16550_SPI_SV_MASK GENMASK(7, 6)
+/* burst read */
+#define ADIS16550_BURST_N_ELEM 12
+#define ADIS16550_BURST_DATA_LEN (ADIS16550_BURST_N_ELEM * 4)
+#define ADIS16550_MAX_SCAN_DATA 12
+
+struct adis16550_sync {
+ u16 sync_mode;
+ u16 min_rate;
+ u16 max_rate;
+};
+
+struct adis16550_chip_info {
+ const struct iio_chan_spec *channels;
+ const struct adis16550_sync *sync_mode;
+ char *name;
+ u32 num_channels;
+ u32 gyro_max_val;
+ u32 gyro_max_scale;
+ u32 accel_max_val;
+ u32 accel_max_scale;
+ u32 temp_scale;
+ u32 deltang_max_val;
+ u32 deltvel_max_val;
+ u32 int_clk;
+ u16 max_dec;
+ u16 num_sync;
+};
+
+struct adis16550 {
+ const struct adis16550_chip_info *info;
+ struct adis adis;
+ unsigned long clk_freq_hz;
+ u32 sync_mode;
+ struct spi_transfer xfer[2];
+ u8 buffer[ADIS16550_BURST_DATA_LEN + sizeof(u32)] __aligned(IIO_DMA_MINALIGN);
+ __be32 din[2];
+ __be32 dout[2];
+};
+
+enum {
+ ADIS16550_SV_INIT,
+ ADIS16550_SV_OK,
+ ADIS16550_SV_NOK,
+ ADIS16550_SV_SPI_ERROR,
+};
+
+/*
+ * This is a simplified implementation of lib/crc4.c. It could not be used
+ * directly since the polynomial used is different from the one used by the
+ * 16550 which is 0b10001
+ */
+static u8 spi_crc4(const u32 val)
+{
+ int i;
+ const int bits = 28;
+ u8 crc = 0xa;
+ /* ignore 4lsb */
+ const u32 __val = val >> 4;
+
+ /* Calculate crc4 over four-bit nibbles, starting at the MSbit */
+ for (i = bits - 4; i >= 0; i -= 4)
+ crc = crc ^ ((__val >> i) & 0xf);
+
+ return crc;
+}
+
+static int adis16550_spi_validate(const struct adis *adis, __be32 dout,
+ u16 *data)
+{
+ u32 __dout;
+ u8 crc, crc_rcv, sv;
+
+ __dout = be32_to_cpu(dout);
+
+ /* validate received message */
+ crc_rcv = FIELD_GET(ADIS16550_SPI_CRC_MASK, __dout);
+ crc = spi_crc4(__dout);
+ if (crc_rcv != crc) {
+ dev_err(&adis->spi->dev,
+ "Invalid crc, rcv: 0x%02x, calc: 0x%02x!\n",
+ crc_rcv, crc);
+ return -EIO;
+ }
+ sv = FIELD_GET(ADIS16550_SPI_SV_MASK, __dout);
+ if (sv >= ADIS16550_SV_NOK) {
+ dev_err(&adis->spi->dev,
+ "State vector error detected: %02X", sv);
+ return -EIO;
+ }
+ *data = FIELD_GET(ADIS16550_SPI_DATA_MASK, __dout);
+
+ return 0;
+}
+
+static void adis16550_spi_msg_prepare(const u32 reg, const bool write,
+ const u16 data, __be32 *din)
+{
+ u8 crc;
+ u32 __din;
+
+ __din = FIELD_PREP(ADIS16550_SPI_REG_MASK, reg);
+
+ if (write) {
+ __din |= FIELD_PREP(ADIS16550_SPI_R_W_MASK, 1);
+ __din |= FIELD_PREP(ADIS16550_SPI_DATA_MASK, data);
+ }
+
+ crc = spi_crc4(__din);
+ __din |= FIELD_PREP(ADIS16550_SPI_CRC_MASK, crc);
+
+ *din = cpu_to_be32(__din);
+}
+
+static int adis16550_spi_xfer(const struct adis *adis, u32 reg, u32 len,
+ u32 *readval, u32 writeval)
+{
+ int ret;
+ u16 data = 0;
+ struct spi_message msg;
+ bool wr = readval ? false : true;
+ struct spi_device *spi = adis->spi;
+ struct adis16550 *st = container_of(adis, struct adis16550, adis);
+ struct spi_transfer xfers[] = {
+ {
+ .tx_buf = &st->din[0],
+ .len = 4,
+ .cs_change = 1,
+ }, {
+ .tx_buf = &st->din[1],
+ .len = 4,
+ .cs_change = 1,
+ .rx_buf = st->dout,
+ }, {
+ .tx_buf = &st->din[1],
+ .rx_buf = &st->dout[1],
+ .len = 4,
+ },
+ };
+
+ spi_message_init(&msg);
+
+ switch (len) {
+ case 4:
+ adis16550_spi_msg_prepare(reg + 1, wr, writeval >> 16,
+ &st->din[0]);
+ spi_message_add_tail(&xfers[0], &msg);
+ fallthrough;
+ case 2:
+ adis16550_spi_msg_prepare(reg, wr, writeval, &st->din[1]);
+ spi_message_add_tail(&xfers[1], &msg);
+ spi_message_add_tail(&xfers[2], &msg);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = spi_sync(spi, &msg);
+ if (ret) {
+ dev_err(&spi->dev, "Spi failure %d\n", ret);
+ return ret;
+ }
+ /*
+ * When writing a register, the device will reply with a readback on the
+ * transfer so that we can validate if our data was actually written..
+ */
+ switch (len) {
+ case 4:
+ ret = adis16550_spi_validate(adis, st->dout[0], &data);
+ if (ret)
+ return ret;
+
+ if (readval) {
+ *readval = data << 16;
+ } else if ((writeval >> 16) != data && reg != ADIS16550_REG_COMMAND) {
+ dev_err(&spi->dev,
+ "Data not written: wr: 0x%04X, rcv: 0x%04X\n",
+ writeval >> 16, data);
+ return -EIO;
+ }
+
+ fallthrough;
+ case 2:
+ ret = adis16550_spi_validate(adis, st->dout[1], &data);
+ if (ret)
+ return ret;
+
+ if (readval) {
+ *readval = (*readval & GENMASK(31, 16)) | data;
+ } else if ((writeval & GENMASK(15, 0)) != data && reg != ADIS16550_REG_COMMAND) {
+ dev_err(&spi->dev,
+ "Data not written: wr: 0x%04X, rcv: 0x%04X\n",
+ (u16)writeval, data);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int adis16550_spi_read(struct adis *adis, const u32 reg,
+ u32 *value, const u32 len)
+{
+ return adis16550_spi_xfer(adis, reg, len, value, 0);
+}
+
+static int adis16550_spi_write(struct adis *adis, const u32 reg,
+ const u32 value, const u32 len)
+{
+ return adis16550_spi_xfer(adis, reg, len, NULL, value);
+}
+
+static ssize_t adis16550_show_firmware_revision(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct adis16550 *st = file->private_data;
+ char buf[7];
+ size_t len;
+ u16 rev;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16550_REG_FW_REV, &rev);
+ if (ret)
+ return ret;
+
+ len = scnprintf(buf, sizeof(buf), "%x.%x\n", rev >> 8, rev & 0xff);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16550_firmware_revision_fops = {
+ .open = simple_open,
+ .read = adis16550_show_firmware_revision,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t adis16550_show_firmware_date(struct file *file,
+ char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct adis16550 *st = file->private_data;
+ char buf[12];
+ size_t len;
+ u32 date;
+ int ret;
+
+ ret = adis_read_reg_32(&st->adis, ADIS16550_REG_FW_DATE, &date);
+ if (ret)
+ return ret;
+
+ len = scnprintf(buf, sizeof(buf), "%.2x-%.2x-%.4x\n", date & 0xff,
+ (date >> 8) & 0xff, date >> 16);
+
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations adis16550_firmware_date_fops = {
+ .open = simple_open,
+ .read = adis16550_show_firmware_date,
+ .llseek = default_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int adis16550_show_serial_number(void *arg, u64 *val)
+{
+ struct adis16550 *st = arg;
+ u32 serial;
+ int ret;
+
+ ret = adis_read_reg_32(&st->adis, ADIS16550_REG_SERIAL_NUM, &serial);
+ if (ret)
+ return ret;
+
+ *val = serial;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16550_serial_number_fops,
+ adis16550_show_serial_number, NULL, "0x%.8llx\n");
+
+static int adis16550_show_product_id(void *arg, u64 *val)
+{
+ struct adis16550 *st = arg;
+ u16 prod_id;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16550_REG_PROD_ID, &prod_id);
+ if (ret)
+ return ret;
+
+ *val = prod_id;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16550_product_id_fops,
+ adis16550_show_product_id, NULL, "%llu\n");
+
+static int adis16550_show_flash_count(void *arg, u64 *val)
+{
+ struct adis16550 *st = arg;
+ u16 flash_count;
+ int ret;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16550_REG_FLASH_CNT, &flash_count);
+ if (ret)
+ return ret;
+
+ *val = flash_count;
+
+ return 0;
+}
+DEFINE_DEBUGFS_ATTRIBUTE(adis16550_flash_count_fops,
+ adis16550_show_flash_count, NULL, "%lld\n");
+
+static void adis16550_debugfs_init(struct iio_dev *indio_dev)
+{
+ struct adis16550 *st = iio_priv(indio_dev);
+ struct dentry *d = iio_get_debugfs_dentry(indio_dev);
+
+ debugfs_create_file_unsafe("serial_number", 0400, d, st,
+ &adis16550_serial_number_fops);
+ debugfs_create_file_unsafe("product_id", 0400, d, st,
+ &adis16550_product_id_fops);
+ debugfs_create_file("firmware_revision", 0400, d, st,
+ &adis16550_firmware_revision_fops);
+ debugfs_create_file("firmware_date", 0400, d, st,
+ &adis16550_firmware_date_fops);
+ debugfs_create_file_unsafe("flash_count", 0400, d, st,
+ &adis16550_flash_count_fops);
+}
+
+enum {
+ ADIS16550_SYNC_MODE_DIRECT,
+ ADIS16550_SYNC_MODE_SCALED,
+};
+
+static int adis16550_get_freq(struct adis16550 *st, u32 *freq)
+{
+ int ret;
+ u16 dec = 0;
+ u32 sample_rate = st->clk_freq_hz;
+
+ adis_dev_auto_lock(&st->adis);
+
+ if (st->sync_mode == ADIS16550_SYNC_MODE_SCALED) {
+ u16 sync_scale;
+
+ ret = __adis_read_reg_16(&st->adis, ADIS16550_REG_SYNC_SCALE, &sync_scale);
+ if (ret)
+ return ret;
+
+ sample_rate = st->clk_freq_hz * sync_scale;
+ }
+
+ ret = __adis_read_reg_16(&st->adis, ADIS16550_REG_DEC_RATE, &dec);
+ if (ret)
+ return -EINVAL;
+ *freq = DIV_ROUND_CLOSEST(sample_rate, dec + 1);
+
+ return 0;
+}
+
+static int adis16550_set_freq_hz(struct adis16550 *st, u32 freq_hz)
+{
+ u16 dec;
+ int ret;
+ u32 sample_rate = st->clk_freq_hz;
+ /*
+ * The optimal sample rate for the supported IMUs is between
+ * int_clk - 1000 and int_clk + 500.
+ */
+ u32 max_sample_rate = st->info->int_clk * 1000 + 500000;
+ u32 min_sample_rate = st->info->int_clk * 1000 - 1000000;
+
+ if (!freq_hz)
+ return -EINVAL;
+
+ adis_dev_auto_lock(&st->adis);
+
+ if (st->sync_mode == ADIS16550_SYNC_MODE_SCALED) {
+ unsigned long scaled_rate = lcm(st->clk_freq_hz, freq_hz);
+ int sync_scale;
+
+ if (scaled_rate > max_sample_rate)
+ scaled_rate = max_sample_rate / st->clk_freq_hz * st->clk_freq_hz;
+ else
+ scaled_rate = max_sample_rate / scaled_rate * scaled_rate;
+
+ if (scaled_rate < min_sample_rate)
+ scaled_rate = roundup(min_sample_rate, st->clk_freq_hz);
+
+ sync_scale = scaled_rate / st->clk_freq_hz;
+ ret = __adis_write_reg_16(&st->adis, ADIS16550_REG_SYNC_SCALE,
+ sync_scale);
+ if (ret)
+ return ret;
+
+ sample_rate = scaled_rate;
+ }
+
+ dec = DIV_ROUND_CLOSEST(sample_rate, freq_hz);
+
+ if (dec)
+ dec--;
+
+ dec = min(dec, st->info->max_dec);
+
+ return __adis_write_reg_16(&st->adis, ADIS16550_REG_DEC_RATE, dec);
+}
+
+static int adis16550_get_accl_filter_freq(struct adis16550 *st, int *freq_hz)
+{
+ int ret;
+ u16 config = 0;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16550_REG_CONFIG, &config);
+ if (ret)
+ return -EINVAL;
+
+ if (FIELD_GET(ADIS16550_ACCL_FIR_EN_MASK, config))
+ *freq_hz = 100;
+ else
+ *freq_hz = 0;
+
+ return 0;
+}
+
+static int adis16550_set_accl_filter_freq(struct adis16550 *st, int freq_hz)
+{
+ u8 en = freq_hz ? 1 : 0;
+ u16 val = FIELD_PREP(ADIS16550_ACCL_FIR_EN_MASK, en);
+
+ return __adis_update_bits(&st->adis, ADIS16550_REG_CONFIG,
+ ADIS16550_ACCL_FIR_EN_MASK, val);
+}
+
+static int adis16550_get_gyro_filter_freq(struct adis16550 *st, int *freq_hz)
+{
+ int ret;
+ u16 config = 0;
+
+ ret = adis_read_reg_16(&st->adis, ADIS16550_REG_CONFIG, &config);
+ if (ret)
+ return -EINVAL;
+
+ if (FIELD_GET(ADIS16550_GYRO_FIR_EN_MASK, config))
+ *freq_hz = 100;
+ else
+ *freq_hz = 0;
+
+ return 0;
+}
+
+static int adis16550_set_gyro_filter_freq(struct adis16550 *st, int freq_hz)
+{
+ u8 en = freq_hz ? 1 : 0;
+ u16 val = FIELD_PREP(ADIS16550_GYRO_FIR_EN_MASK, en);
+
+ return __adis_update_bits(&st->adis, ADIS16550_REG_CONFIG,
+ ADIS16550_GYRO_FIR_EN_MASK, val);
+}
+
+enum {
+ ADIS16550_SCAN_TEMP,
+ ADIS16550_SCAN_GYRO_X,
+ ADIS16550_SCAN_GYRO_Y,
+ ADIS16550_SCAN_GYRO_Z,
+ ADIS16550_SCAN_ACCEL_X,
+ ADIS16550_SCAN_ACCEL_Y,
+ ADIS16550_SCAN_ACCEL_Z,
+ ADIS16550_SCAN_DELTANG_X,
+ ADIS16550_SCAN_DELTANG_Y,
+ ADIS16550_SCAN_DELTANG_Z,
+ ADIS16550_SCAN_DELTVEL_X,
+ ADIS16550_SCAN_DELTVEL_Y,
+ ADIS16550_SCAN_DELTVEL_Z,
+};
+
+static const u32 adis16550_calib_bias[] = {
+ [ADIS16550_SCAN_GYRO_X] = ADIS16550_REG_X_GYRO_BIAS,
+ [ADIS16550_SCAN_GYRO_Y] = ADIS16550_REG_Y_GYRO_BIAS,
+ [ADIS16550_SCAN_GYRO_Z] = ADIS16550_REG_Z_GYRO_BIAS,
+ [ADIS16550_SCAN_ACCEL_X] = ADIS16550_REG_X_ACCEL_BIAS,
+ [ADIS16550_SCAN_ACCEL_Y] = ADIS16550_REG_Y_ACCEL_BIAS,
+ [ADIS16550_SCAN_ACCEL_Z] = ADIS16550_REG_Z_ACCEL_BIAS,
+
+};
+
+static const u32 adis16550_calib_scale[] = {
+ [ADIS16550_SCAN_GYRO_X] = ADIS16550_REG_X_GYRO_SCALE,
+ [ADIS16550_SCAN_GYRO_Y] = ADIS16550_REG_Y_GYRO_SCALE,
+ [ADIS16550_SCAN_GYRO_Z] = ADIS16550_REG_Z_GYRO_SCALE,
+ [ADIS16550_SCAN_ACCEL_X] = ADIS16550_REG_X_ACCEL_SCALE,
+ [ADIS16550_SCAN_ACCEL_Y] = ADIS16550_REG_Y_ACCEL_SCALE,
+ [ADIS16550_SCAN_ACCEL_Z] = ADIS16550_REG_Z_ACCEL_SCALE,
+};
+
+static int adis16550_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long info)
+{
+ struct adis16550 *st = iio_priv(indio_dev);
+ const int idx = chan->scan_index;
+ u16 scale;
+ int ret;
+ u32 tmp;
+
+ switch (info) {
+ case IIO_CHAN_INFO_RAW:
+ return adis_single_conversion(indio_dev, chan, 0, val);
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = st->info->gyro_max_val;
+ *val2 = st->info->gyro_max_scale;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_ACCEL:
+ *val = st->info->accel_max_val;
+ *val2 = st->info->accel_max_scale;
+ return IIO_VAL_FRACTIONAL;
+ case IIO_TEMP:
+ *val = st->info->temp_scale;
+ return IIO_VAL_INT;
+ case IIO_DELTA_ANGL:
+ *val = st->info->deltang_max_val;
+ *val2 = 31;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ case IIO_DELTA_VELOCITY:
+ *val = st->info->deltvel_max_val;
+ *val2 = 31;
+ return IIO_VAL_FRACTIONAL_LOG2;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_OFFSET:
+ /* temperature centered at 25°C */
+ *val = DIV_ROUND_CLOSEST(25000, st->info->temp_scale);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = adis_read_reg_32(&st->adis,
+ adis16550_calib_bias[idx], val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ ret = adis_read_reg_16(&st->adis,
+ adis16550_calib_scale[idx], &scale);
+ if (ret)
+ return ret;
+
+ *val = scale;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = adis16550_get_freq(st, &tmp);
+ if (ret)
+ return ret;
+
+ *val = tmp / 1000;
+ *val2 = (tmp % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ ret = adis16550_get_accl_filter_freq(st, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_ACCEL:
+ ret = adis16550_get_gyro_filter_freq(st, val);
+ if (ret)
+ return ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adis16550_write_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int val, int val2, long info)
+{
+ struct adis16550 *st = iio_priv(indio_dev);
+ const int idx = chan->scan_index;
+ u32 tmp;
+
+ switch (info) {
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ tmp = val * 1000 + val2 / 1000;
+ return adis16550_set_freq_hz(st, tmp);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return adis_write_reg_32(&st->adis, adis16550_calib_bias[idx],
+ val);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return adis_write_reg_16(&st->adis, adis16550_calib_scale[idx],
+ val);
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ return adis16550_set_accl_filter_freq(st, val);
+ case IIO_ACCEL:
+ return adis16550_set_gyro_filter_freq(st, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+#define ADIS16550_MOD_CHAN(_type, _mod, _address, _si) \
+ { \
+ .type = (_type), \
+ .modified = 1, \
+ .channel2 = (_mod), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_CALIBBIAS) | \
+ BIT(IIO_CHAN_INFO_CALIBSCALE), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = (_address), \
+ .scan_index = (_si), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16550_GYRO_CHANNEL(_mod) \
+ ADIS16550_MOD_CHAN(IIO_ANGL_VEL, IIO_MOD_ ## _mod, \
+ ADIS16550_REG_ ## _mod ## _GYRO, ADIS16550_SCAN_GYRO_ ## _mod)
+
+#define ADIS16550_ACCEL_CHANNEL(_mod) \
+ ADIS16550_MOD_CHAN(IIO_ACCEL, IIO_MOD_ ## _mod, \
+ ADIS16550_REG_ ## _mod ## _ACCEL, ADIS16550_SCAN_ACCEL_ ## _mod)
+
+#define ADIS16550_TEMP_CHANNEL() { \
+ .type = IIO_TEMP, \
+ .indexed = 1, \
+ .channel = 0, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = ADIS16550_REG_TEMP, \
+ .scan_index = ADIS16550_SCAN_TEMP, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16550_MOD_CHAN_DELTA(_type, _mod, _address, _si) { \
+ .type = (_type), \
+ .modified = 1, \
+ .channel2 = (_mod), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .address = (_address), \
+ .scan_index = _si, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 32, \
+ .storagebits = 32, \
+ .endianness = IIO_BE, \
+ }, \
+ }
+
+#define ADIS16550_DELTANG_CHAN(_mod) \
+ ADIS16550_MOD_CHAN_DELTA(IIO_DELTA_ANGL, IIO_MOD_ ## _mod, \
+ ADIS16550_REG_ ## _mod ## _DELTANG_L, ADIS16550_SCAN_DELTANG_ ## _mod)
+
+#define ADIS16550_DELTVEL_CHAN(_mod) \
+ ADIS16550_MOD_CHAN_DELTA(IIO_DELTA_VELOCITY, IIO_MOD_ ## _mod, \
+ ADIS16550_REG_ ## _mod ## _DELTVEL_L, ADIS16550_SCAN_DELTVEL_ ## _mod)
+
+#define ADIS16550_DELTANG_CHAN_NO_SCAN(_mod) \
+ ADIS16550_MOD_CHAN_DELTA(IIO_DELTA_ANGL, IIO_MOD_ ## _mod, \
+ ADIS16550_REG_ ## _mod ## _DELTANG_L, -1)
+
+#define ADIS16550_DELTVEL_CHAN_NO_SCAN(_mod) \
+ ADIS16550_MOD_CHAN_DELTA(IIO_DELTA_VELOCITY, IIO_MOD_ ## _mod, \
+ ADIS16550_REG_ ## _mod ## _DELTVEL_L, -1)
+
+static const struct iio_chan_spec adis16550_channels[] = {
+ ADIS16550_TEMP_CHANNEL(),
+ ADIS16550_GYRO_CHANNEL(X),
+ ADIS16550_GYRO_CHANNEL(Y),
+ ADIS16550_GYRO_CHANNEL(Z),
+ ADIS16550_ACCEL_CHANNEL(X),
+ ADIS16550_ACCEL_CHANNEL(Y),
+ ADIS16550_ACCEL_CHANNEL(Z),
+ ADIS16550_DELTANG_CHAN(X),
+ ADIS16550_DELTANG_CHAN(Y),
+ ADIS16550_DELTANG_CHAN(Z),
+ ADIS16550_DELTVEL_CHAN(X),
+ ADIS16550_DELTVEL_CHAN(Y),
+ ADIS16550_DELTVEL_CHAN(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(13),
+};
+
+static const struct adis16550_sync adis16550_sync_modes[] = {
+ { ADIS16550_SYNC_MODE_DIRECT, 3000, 4500 },
+ { ADIS16550_SYNC_MODE_SCALED, 1, 128 },
+};
+
+static const struct adis16550_chip_info adis16550_chip_info = {
+ .num_channels = ARRAY_SIZE(adis16550_channels),
+ .channels = adis16550_channels,
+ .name = "adis16550",
+ .gyro_max_val = 1,
+ .gyro_max_scale = IIO_RAD_TO_DEGREE(80 << 16),
+ .accel_max_val = 1,
+ .accel_max_scale = IIO_M_S_2_TO_G(102400000),
+ .temp_scale = 4,
+ .deltang_max_val = IIO_DEGREE_TO_RAD(720),
+ .deltvel_max_val = 125,
+ .int_clk = 4000,
+ .max_dec = 4095,
+ .sync_mode = adis16550_sync_modes,
+ .num_sync = ARRAY_SIZE(adis16550_sync_modes),
+};
+
+static u32 adis16550_validate_crc(__be32 *buffer, const u8 n_elem)
+{
+ int i;
+ u32 crc_calc;
+ u32 crc_buf[ADIS16550_BURST_N_ELEM - 2];
+ u32 crc = be32_to_cpu(buffer[ADIS16550_BURST_N_ELEM - 1]);
+ /*
+ * The crc calculation of the data is done in little endian. Hence, we
+ * always swap the 32bit elements making sure that the data LSB is
+ * always on address 0...
+ */
+ for (i = 0; i < n_elem; i++)
+ crc_buf[i] = be32_to_cpu(buffer[i]);
+
+ crc_calc = crc32(~0, crc_buf, n_elem * 4);
+ crc_calc ^= ~0;
+
+ return (crc_calc == crc);
+}
+
+static irqreturn_t adis16550_trigger_handler(int irq, void *p)
+{
+ int ret;
+ u16 dummy;
+ bool valid;
+ struct iio_poll_func *pf = p;
+ __be32 data[ADIS16550_MAX_SCAN_DATA];
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adis16550 *st = iio_priv(indio_dev);
+ struct adis *adis = iio_device_get_drvdata(indio_dev);
+ __be32 *buffer = (__be32 *)st->buffer;
+
+ ret = spi_sync(adis->spi, &adis->msg);
+ if (ret)
+ goto done;
+ /*
+ * Validate the header. The header is a normal spi reply with state
+ * vector and crc4.
+ */
+ ret = adis16550_spi_validate(&st->adis, buffer[0], &dummy);
+ if (ret)
+ goto done;
+
+ /* the header is not included in the crc */
+ valid = adis16550_validate_crc(buffer, ADIS16550_BURST_N_ELEM - 2);
+ if (!valid) {
+ dev_err(&adis->spi->dev, "Burst Invalid crc!\n");
+ goto done;
+ }
+
+ /* copy the temperature together with sensor data */
+ memcpy(data, &buffer[3],
+ (ADIS16550_SCAN_ACCEL_Z - ADIS16550_SCAN_GYRO_X + 2) *
+ sizeof(__be32));
+ iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp);
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static const unsigned long adis16550_channel_masks[] = {
+ ADIS16550_BURST_DATA_GYRO_ACCEL_MASK | BIT(ADIS16550_SCAN_TEMP),
+ ADIS16550_BURST_DATA_DELTA_ANG_VEL_MASK | BIT(ADIS16550_SCAN_TEMP),
+ 0
+};
+
+static int adis16550_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ u16 burst_length = ADIS16550_BURST_DATA_LEN;
+ struct adis16550 *st = iio_priv(indio_dev);
+ u8 burst_cmd;
+ u8 *tx;
+
+ memset(st->buffer, 0, burst_length + sizeof(u32));
+
+ if (*scan_mask & ADIS16550_BURST_DATA_GYRO_ACCEL_MASK)
+ burst_cmd = ADIS16550_REG_BURST_GYRO_ACCEL;
+ else
+ burst_cmd = ADIS16550_REG_BURST_DELTA_ANG_VEL;
+
+ tx = st->buffer + burst_length;
+ tx[0] = 0x00;
+ tx[1] = 0x00;
+ tx[2] = burst_cmd;
+ /* crc4 is 0 on burst command */
+ tx[3] = spi_crc4(get_unaligned_le32(tx));
+
+ return 0;
+}
+
+static int adis16550_reset(struct adis *adis)
+{
+ return __adis_write_reg_16(adis, ADIS16550_REG_COMMAND, BIT(15));
+}
+
+static int adis16550_config_sync(struct adis16550 *st)
+{
+ struct device *dev = &st->adis.spi->dev;
+ const struct adis16550_sync *sync_mode_data;
+ struct clk *clk;
+ int ret, i;
+ u16 mode;
+
+ clk = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+ if (!clk) {
+ st->clk_freq_hz = st->info->int_clk * 1000;
+ return 0;
+ }
+
+ st->clk_freq_hz = clk_get_rate(clk);
+
+ for (i = 0; i < st->info->num_sync; i++) {
+ if (st->clk_freq_hz >= st->info->sync_mode[i].min_rate &&
+ st->clk_freq_hz <= st->info->sync_mode[i].max_rate) {
+ sync_mode_data = &st->info->sync_mode[i];
+ break;
+ }
+ }
+
+ if (i == st->info->num_sync)
+ return dev_err_probe(dev, -EINVAL, "Clk rate: %lu not in a valid range",
+ st->clk_freq_hz);
+
+ if (sync_mode_data->sync_mode == ADIS16550_SYNC_MODE_SCALED) {
+ u16 sync_scale;
+ /*
+ * In sps scaled sync we must scale the input clock to a range
+ * of [3000 4500].
+ */
+
+ sync_scale = DIV_ROUND_CLOSEST(st->info->int_clk, st->clk_freq_hz);
+
+ if (3000 > sync_scale || 4500 < sync_scale)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid value:%u for sync_scale",
+ sync_scale);
+
+ ret = adis_write_reg_16(&st->adis, ADIS16550_REG_SYNC_SCALE,
+ sync_scale);
+ if (ret)
+ return ret;
+
+ st->clk_freq_hz = st->info->int_clk;
+ }
+
+ st->clk_freq_hz *= 1000;
+
+ mode = FIELD_PREP(ADIS16550_SYNC_MODE_MASK, sync_mode_data->sync_mode) |
+ FIELD_PREP(ADIS16550_SYNC_EN_MASK, true);
+
+ return __adis_update_bits(&st->adis, ADIS16550_REG_CONFIG,
+ ADIS16550_SYNC_MASK, mode);
+}
+
+static const struct iio_info adis16550_info = {
+ .read_raw = &adis16550_read_raw,
+ .write_raw = &adis16550_write_raw,
+ .update_scan_mode = adis16550_update_scan_mode,
+ .debugfs_reg_access = adis_debugfs_reg_access,
+};
+
+enum {
+ ADIS16550_STATUS_CRC_CODE,
+ ADIS16550_STATUS_CRC_CONFIG,
+ ADIS16550_STATUS_FLASH_UPDATE,
+ ADIS16550_STATUS_INERIAL,
+ ADIS16550_STATUS_SENSOR,
+ ADIS16550_STATUS_TEMPERATURE,
+ ADIS16550_STATUS_SPI,
+ ADIS16550_STATUS_PROCESSING,
+ ADIS16550_STATUS_POWER,
+ ADIS16550_STATUS_BOOT,
+ ADIS16550_STATUS_WATCHDOG = 15,
+ ADIS16550_STATUS_REGULATOR = 28,
+ ADIS16550_STATUS_SENSOR_SUPPLY,
+ ADIS16550_STATUS_CPU_SUPPLY,
+ ADIS16550_STATUS_5V_SUPPLY,
+};
+
+static const char * const adis16550_status_error_msgs[] = {
+ [ADIS16550_STATUS_CRC_CODE] = "Code CRC Error",
+ [ADIS16550_STATUS_CRC_CONFIG] = "Configuration/Calibration CRC Error",
+ [ADIS16550_STATUS_FLASH_UPDATE] = "Flash Update Error",
+ [ADIS16550_STATUS_INERIAL] = "Overrange for Inertial Signals",
+ [ADIS16550_STATUS_SENSOR] = "Sensor failure",
+ [ADIS16550_STATUS_TEMPERATURE] = "Temperature Error",
+ [ADIS16550_STATUS_SPI] = "SPI Communication Error",
+ [ADIS16550_STATUS_PROCESSING] = "Processing Overrun Error",
+ [ADIS16550_STATUS_POWER] = "Power Supply Failure",
+ [ADIS16550_STATUS_BOOT] = "Boot Memory Failure",
+ [ADIS16550_STATUS_WATCHDOG] = "Watchdog timer flag",
+ [ADIS16550_STATUS_REGULATOR] = "Internal Regulator Error",
+ [ADIS16550_STATUS_SENSOR_SUPPLY] = "Internal Sensor Supply Error.",
+ [ADIS16550_STATUS_CPU_SUPPLY] = "Internal Processor Supply Error.",
+ [ADIS16550_STATUS_5V_SUPPLY] = "External 5V Supply Error",
+};
+
+static const struct adis_timeout adis16550_timeouts = {
+ .reset_ms = 1000,
+ .sw_reset_ms = 1000,
+ .self_test_ms = 1000,
+};
+
+static const struct adis_data adis16550_data = {
+ .diag_stat_reg = ADIS16550_REG_STATUS,
+ .diag_stat_size = 4,
+ .prod_id_reg = ADIS16550_REG_PROD_ID,
+ .prod_id = 16550,
+ .self_test_mask = BIT(1),
+ .self_test_reg = ADIS16550_REG_COMMAND,
+ .cs_change_delay = 5,
+ .unmasked_drdy = true,
+ .status_error_msgs = adis16550_status_error_msgs,
+ .status_error_mask = BIT(ADIS16550_STATUS_CRC_CODE) |
+ BIT(ADIS16550_STATUS_CRC_CONFIG) |
+ BIT(ADIS16550_STATUS_FLASH_UPDATE) |
+ BIT(ADIS16550_STATUS_INERIAL) |
+ BIT(ADIS16550_STATUS_SENSOR) |
+ BIT(ADIS16550_STATUS_TEMPERATURE) |
+ BIT(ADIS16550_STATUS_SPI) |
+ BIT(ADIS16550_STATUS_PROCESSING) |
+ BIT(ADIS16550_STATUS_POWER) |
+ BIT(ADIS16550_STATUS_BOOT) |
+ BIT(ADIS16550_STATUS_WATCHDOG) |
+ BIT(ADIS16550_STATUS_REGULATOR) |
+ BIT(ADIS16550_STATUS_SENSOR_SUPPLY) |
+ BIT(ADIS16550_STATUS_CPU_SUPPLY) |
+ BIT(ADIS16550_STATUS_5V_SUPPLY),
+ .timeouts = &adis16550_timeouts,
+};
+
+static const struct adis_ops adis16550_ops = {
+ .write = adis16550_spi_write,
+ .read = adis16550_spi_read,
+ .reset = adis16550_reset,
+};
+
+static int adis16550_probe(struct spi_device *spi)
+{
+ u16 burst_length = ADIS16550_BURST_DATA_LEN;
+ struct device *dev = &spi->dev;
+ struct iio_dev *indio_dev;
+ struct adis16550 *st;
+ struct adis *adis;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+ st->info = spi_get_device_match_data(spi);
+ if (!st->info)
+ return -EINVAL;
+ adis = &st->adis;
+ indio_dev->name = st->info->name;
+ indio_dev->channels = st->info->channels;
+ indio_dev->num_channels = st->info->num_channels;
+ indio_dev->available_scan_masks = adis16550_channel_masks;
+ indio_dev->info = &adis16550_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ st->adis.ops = &adis16550_ops;
+ st->xfer[0].tx_buf = st->buffer + burst_length;
+ st->xfer[0].len = 4;
+ st->xfer[0].cs_change = 1;
+ st->xfer[0].delay.value = 8;
+ st->xfer[0].delay.unit = SPI_DELAY_UNIT_USECS;
+ st->xfer[1].rx_buf = st->buffer;
+ st->xfer[1].len = burst_length;
+
+ spi_message_init_with_transfers(&adis->msg, st->xfer, 2);
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get vdd regulator\n");
+
+ ret = adis_init(&st->adis, indio_dev, spi, &adis16550_data);
+ if (ret)
+ return ret;
+
+ ret = __adis_initial_startup(&st->adis);
+ if (ret)
+ return ret;
+
+ ret = adis16550_config_sync(st);
+ if (ret)
+ return ret;
+
+ ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
+ adis16550_trigger_handler);
+ if (ret)
+ return ret;
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return ret;
+
+ adis16550_debugfs_init(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id adis16550_id[] = {
+ { "adis16550", (kernel_ulong_t)&adis16550_chip_info},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adis16550_id);
+
+static const struct of_device_id adis16550_of_match[] = {
+ { .compatible = "adi,adis16550", .data = &adis16550_chip_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adis16550_of_match);
+
+static struct spi_driver adis16550_driver = {
+ .driver = {
+ .name = "adis16550",
+ .of_match_table = adis16550_of_match,
+ },
+ .probe = adis16550_probe,
+ .id_table = adis16550_id,
+};
+module_spi_driver(adis16550_driver);
+
+MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
+MODULE_AUTHOR("Ramona Gradinariu <ramona.gradinariu@analog.com>");
+MODULE_AUTHOR("Antoniu Miclaus <antoniu.miclaus@analog.com>");
+MODULE_AUTHOR("Robert Budai <robert.budai@analog.com>");
+MODULE_DESCRIPTION("Analog Devices ADIS16550 IMU driver");
+MODULE_IMPORT_NS("IIO_ADISLIB");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/imu/bmi270/bmi270.h b/drivers/iio/imu/bmi270/bmi270.h
index fdfad5784cc5..d94525f6aee8 100644
--- a/drivers/iio/imu/bmi270/bmi270.h
+++ b/drivers/iio/imu/bmi270/bmi270.h
@@ -6,22 +6,6 @@
#include <linux/regmap.h>
#include <linux/iio/iio.h>
-struct device;
-struct bmi270_data {
- struct device *dev;
- struct regmap *regmap;
- const struct bmi270_chip_info *chip_info;
-
- /*
- * Where IIO_DMA_MINALIGN may be larger than 8 bytes, align to
- * that to ensure a DMA safe buffer.
- */
- struct {
- __le16 channels[6];
- aligned_s64 timestamp;
- } data __aligned(IIO_DMA_MINALIGN);
-};
-
struct bmi270_chip_info {
const char *name;
int chip_id;
@@ -32,6 +16,7 @@ extern const struct regmap_config bmi270_regmap_config;
extern const struct bmi270_chip_info bmi260_chip_info;
extern const struct bmi270_chip_info bmi270_chip_info;
+struct device;
int bmi270_core_probe(struct device *dev, struct regmap *regmap,
const struct bmi270_chip_info *chip_info);
diff --git a/drivers/iio/imu/bmi270/bmi270_core.c b/drivers/iio/imu/bmi270/bmi270_core.c
index 7fec52e0b486..a86be5af5ccb 100644
--- a/drivers/iio/imu/bmi270/bmi270_core.c
+++ b/drivers/iio/imu/bmi270/bmi270_core.c
@@ -4,10 +4,13 @@
#include <linux/firmware.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/regmap.h>
+#include <linux/units.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
@@ -25,13 +28,17 @@
#define BMI270_ACCEL_X_REG 0x0c
#define BMI270_ANG_VEL_X_REG 0x12
+#define BMI270_INT_STATUS_1_REG 0x1d
+#define BMI270_INT_STATUS_1_ACC_GYR_DRDY_MSK GENMASK(7, 6)
+
#define BMI270_INTERNAL_STATUS_REG 0x21
#define BMI270_INTERNAL_STATUS_MSG_MSK GENMASK(3, 0)
#define BMI270_INTERNAL_STATUS_MSG_INIT_OK 0x01
-
#define BMI270_INTERNAL_STATUS_AXES_REMAP_ERR_MSK BIT(5)
#define BMI270_INTERNAL_STATUS_ODR_50HZ_ERR_MSK BIT(6)
+#define BMI270_TEMPERATURE_0_REG 0x22
+
#define BMI270_ACC_CONF_REG 0x40
#define BMI270_ACC_CONF_ODR_MSK GENMASK(3, 0)
#define BMI270_ACC_CONF_ODR_100HZ 0x08
@@ -53,6 +60,20 @@
#define BMI270_GYR_CONF_RANGE_REG 0x43
#define BMI270_GYR_CONF_RANGE_MSK GENMASK(2, 0)
+#define BMI270_INT1_IO_CTRL_REG 0x53
+#define BMI270_INT2_IO_CTRL_REG 0x54
+#define BMI270_INT_IO_CTRL_LVL_MSK BIT(1)
+#define BMI270_INT_IO_CTRL_OD_MSK BIT(2)
+#define BMI270_INT_IO_CTRL_OP_MSK BIT(3)
+#define BMI270_INT_IO_LVL_OD_OP_MSK GENMASK(3, 1)
+
+#define BMI270_INT_LATCH_REG 0x55
+#define BMI270_INT_LATCH_REG_MSK BIT(0)
+
+#define BMI270_INT_MAP_DATA_REG 0x58
+#define BMI270_INT_MAP_DATA_DRDY_INT1_MSK BIT(2)
+#define BMI270_INT_MAP_DATA_DRDY_INT2_MSK BIT(6)
+
#define BMI270_INIT_CTRL_REG 0x59
#define BMI270_INIT_CTRL_LOAD_DONE_MSK BIT(0)
@@ -69,9 +90,38 @@
#define BMI270_PWR_CTRL_ACCEL_EN_MSK BIT(2)
#define BMI270_PWR_CTRL_TEMP_EN_MSK BIT(3)
+/* See datasheet section 4.6.14, Temperature Sensor */
+#define BMI270_TEMP_OFFSET 11776
+#define BMI270_TEMP_SCALE 1953125
+
#define BMI260_INIT_DATA_FILE "bmi260-init-data.fw"
#define BMI270_INIT_DATA_FILE "bmi270-init-data.fw"
+enum bmi270_irq_pin {
+ BMI270_IRQ_DISABLED,
+ BMI270_IRQ_INT1,
+ BMI270_IRQ_INT2,
+};
+
+struct bmi270_data {
+ struct device *dev;
+ struct regmap *regmap;
+ const struct bmi270_chip_info *chip_info;
+ enum bmi270_irq_pin irq_pin;
+ struct iio_trigger *trig;
+ /* Protect device's private data from concurrent access */
+ struct mutex mutex;
+
+ /*
+ * Where IIO_DMA_MINALIGN may be larger than 8 bytes, align to
+ * that to ensure a DMA safe buffer.
+ */
+ struct {
+ __le16 channels[6];
+ aligned_s64 timestamp;
+ } buffer __aligned(IIO_DMA_MINALIGN);
+};
+
enum bmi270_scan {
BMI270_SCAN_ACCEL_X,
BMI270_SCAN_ACCEL_Y,
@@ -109,6 +159,7 @@ EXPORT_SYMBOL_NS_GPL(bmi270_chip_info, "IIO_BMI270");
enum bmi270_sensor_type {
BMI270_ACCEL = 0,
BMI270_GYRO,
+ BMI270_TEMP,
};
struct bmi270_scale {
@@ -136,6 +187,10 @@ static const struct bmi270_scale bmi270_gyro_scale[] = {
{ 0, 66 },
};
+static const struct bmi270_scale bmi270_temp_scale[] = {
+ { BMI270_TEMP_SCALE / MICRO, BMI270_TEMP_SCALE % MICRO },
+};
+
struct bmi270_scale_item {
const struct bmi270_scale *tbl;
int num;
@@ -150,6 +205,10 @@ static const struct bmi270_scale_item bmi270_scale_table[] = {
.tbl = bmi270_gyro_scale,
.num = ARRAY_SIZE(bmi270_gyro_scale),
},
+ [BMI270_TEMP] = {
+ .tbl = bmi270_temp_scale,
+ .num = ARRAY_SIZE(bmi270_temp_scale),
+ },
};
static const struct bmi270_odr bmi270_accel_odr[] = {
@@ -244,6 +303,8 @@ static int bmi270_set_scale(struct bmi270_data *data, int chan_type, int uscale)
return -EINVAL;
}
+ guard(mutex)(&data->mutex);
+
for (i = 0; i < bmi270_scale_item.num; i++) {
if (bmi270_scale_item.tbl[i].uscale != uscale)
continue;
@@ -254,17 +315,18 @@ static int bmi270_set_scale(struct bmi270_data *data, int chan_type, int uscale)
return -EINVAL;
}
-static int bmi270_get_scale(struct bmi270_data *bmi270_device, int chan_type,
+static int bmi270_get_scale(struct bmi270_data *data, int chan_type, int *scale,
int *uscale)
{
int ret;
unsigned int val;
struct bmi270_scale_item bmi270_scale_item;
+ guard(mutex)(&data->mutex);
+
switch (chan_type) {
case IIO_ACCEL:
- ret = regmap_read(bmi270_device->regmap,
- BMI270_ACC_CONF_RANGE_REG, &val);
+ ret = regmap_read(data->regmap, BMI270_ACC_CONF_RANGE_REG, &val);
if (ret)
return ret;
@@ -272,14 +334,17 @@ static int bmi270_get_scale(struct bmi270_data *bmi270_device, int chan_type,
bmi270_scale_item = bmi270_scale_table[BMI270_ACCEL];
break;
case IIO_ANGL_VEL:
- ret = regmap_read(bmi270_device->regmap,
- BMI270_GYR_CONF_RANGE_REG, &val);
+ ret = regmap_read(data->regmap, BMI270_GYR_CONF_RANGE_REG, &val);
if (ret)
return ret;
val = FIELD_GET(BMI270_GYR_CONF_RANGE_MSK, val);
bmi270_scale_item = bmi270_scale_table[BMI270_GYRO];
break;
+ case IIO_TEMP:
+ val = 0;
+ bmi270_scale_item = bmi270_scale_table[BMI270_TEMP];
+ break;
default:
return -EINVAL;
}
@@ -287,6 +352,7 @@ static int bmi270_get_scale(struct bmi270_data *bmi270_device, int chan_type,
if (val >= bmi270_scale_item.num)
return -EINVAL;
+ *scale = bmi270_scale_item.tbl[val].scale;
*uscale = bmi270_scale_item.tbl[val].uscale;
return 0;
}
@@ -313,6 +379,8 @@ static int bmi270_set_odr(struct bmi270_data *data, int chan_type, int odr,
return -EINVAL;
}
+ guard(mutex)(&data->mutex);
+
for (i = 0; i < bmi270_odr_item.num; i++) {
if (bmi270_odr_item.tbl[i].odr != odr ||
bmi270_odr_item.tbl[i].uodr != uodr)
@@ -331,6 +399,8 @@ static int bmi270_get_odr(struct bmi270_data *data, int chan_type, int *odr,
int i, val, ret;
struct bmi270_odr_item bmi270_odr_item;
+ guard(mutex)(&data->mutex);
+
switch (chan_type) {
case IIO_ACCEL:
ret = regmap_read(data->regmap, BMI270_ACC_CONF_REG, &val);
@@ -364,29 +434,85 @@ static int bmi270_get_odr(struct bmi270_data *data, int chan_type, int *odr,
return -EINVAL;
}
+static irqreturn_t bmi270_irq_thread_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct bmi270_data *data = iio_priv(indio_dev);
+ unsigned int status;
+ int ret;
+
+ scoped_guard(mutex, &data->mutex) {
+ ret = regmap_read(data->regmap, BMI270_INT_STATUS_1_REG,
+ &status);
+ if (ret)
+ return IRQ_NONE;
+ }
+
+ if (FIELD_GET(BMI270_INT_STATUS_1_ACC_GYR_DRDY_MSK, status))
+ iio_trigger_poll_nested(data->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int bmi270_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct bmi270_data *data = iio_trigger_get_drvdata(trig);
+ unsigned int field_value = 0;
+ unsigned int mask;
+
+ guard(mutex)(&data->mutex);
+
+ switch (data->irq_pin) {
+ case BMI270_IRQ_INT1:
+ mask = BMI270_INT_MAP_DATA_DRDY_INT1_MSK;
+ set_mask_bits(&field_value, BMI270_INT_MAP_DATA_DRDY_INT1_MSK,
+ FIELD_PREP(BMI270_INT_MAP_DATA_DRDY_INT1_MSK,
+ state));
+ break;
+ case BMI270_IRQ_INT2:
+ mask = BMI270_INT_MAP_DATA_DRDY_INT2_MSK;
+ set_mask_bits(&field_value, BMI270_INT_MAP_DATA_DRDY_INT2_MSK,
+ FIELD_PREP(BMI270_INT_MAP_DATA_DRDY_INT2_MSK,
+ state));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(data->regmap, BMI270_INT_MAP_DATA_REG, mask,
+ field_value);
+}
+
+static const struct iio_trigger_ops bmi270_trigger_ops = {
+ .set_trigger_state = &bmi270_data_rdy_trigger_set_state,
+};
+
static irqreturn_t bmi270_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
- struct bmi270_data *bmi270_device = iio_priv(indio_dev);
+ struct bmi270_data *data = iio_priv(indio_dev);
int ret;
- ret = regmap_bulk_read(bmi270_device->regmap, BMI270_ACCEL_X_REG,
- &bmi270_device->data.channels,
- sizeof(bmi270_device->data.channels));
+ guard(mutex)(&data->mutex);
+
+ ret = regmap_bulk_read(data->regmap, BMI270_ACCEL_X_REG,
+ &data->buffer.channels,
+ sizeof(data->buffer.channels));
if (ret)
goto done;
- iio_push_to_buffers_with_timestamp(indio_dev, &bmi270_device->data,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer,
pf->timestamp);
done:
iio_trigger_notify_done(indio_dev->trig);
return IRQ_HANDLED;
}
-static int bmi270_get_data(struct bmi270_data *bmi270_device,
- int chan_type, int axis, int *val)
+static int bmi270_get_data(struct bmi270_data *data, int chan_type, int axis,
+ int *val)
{
__le16 sample;
int reg;
@@ -399,17 +525,22 @@ static int bmi270_get_data(struct bmi270_data *bmi270_device,
case IIO_ANGL_VEL:
reg = BMI270_ANG_VEL_X_REG + (axis - IIO_MOD_X) * 2;
break;
+ case IIO_TEMP:
+ reg = BMI270_TEMPERATURE_0_REG;
+ break;
default:
return -EINVAL;
}
- ret = regmap_bulk_read(bmi270_device->regmap, reg, &sample, sizeof(sample));
+ guard(mutex)(&data->mutex);
+
+ ret = regmap_bulk_read(data->regmap, reg, &sample, sizeof(sample));
if (ret)
return ret;
*val = sign_extend32(le16_to_cpu(sample), 15);
- return 0;
+ return IIO_VAL_INT;
}
static int bmi270_read_raw(struct iio_dev *indio_dev,
@@ -417,21 +548,28 @@ static int bmi270_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
int ret;
- struct bmi270_data *bmi270_device = iio_priv(indio_dev);
+ struct bmi270_data *data = iio_priv(indio_dev);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = bmi270_get_data(bmi270_device, chan->type, chan->channel2, val);
- if (ret)
- return ret;
-
- return IIO_VAL_INT;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi270_get_data(data, chan->type, chan->channel2, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
- *val = 0;
- ret = bmi270_get_scale(bmi270_device, chan->type, val2);
+ ret = bmi270_get_scale(data, chan->type, val, val2);
return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = BMI270_TEMP_OFFSET;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
case IIO_CHAN_INFO_SAMP_FREQ:
- ret = bmi270_get_odr(bmi270_device, chan->type, val, val2);
+ ret = bmi270_get_odr(data, chan->type, val, val2);
return ret ? ret : IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
@@ -443,12 +581,21 @@ static int bmi270_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct bmi270_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
- return bmi270_set_scale(data, chan->type, val2);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi270_set_scale(data, chan->type, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
- return bmi270_set_odr(data, chan->type, val, val2);
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi270_set_odr(data, chan->type, val, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
default:
return -EINVAL;
}
@@ -544,15 +691,132 @@ static const struct iio_chan_spec bmi270_channels[] = {
BMI270_ANG_VEL_CHANNEL(X),
BMI270_ANG_VEL_CHANNEL(Y),
BMI270_ANG_VEL_CHANNEL(Z),
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ .scan_index = -1, /* No buffer support */
+ },
IIO_CHAN_SOFT_TIMESTAMP(BMI270_SCAN_TIMESTAMP),
};
-static int bmi270_validate_chip_id(struct bmi270_data *bmi270_device)
+static int bmi270_int_pin_config(struct bmi270_data *data,
+ enum bmi270_irq_pin irq_pin,
+ bool active_high, bool open_drain, bool latch)
+{
+ unsigned int reg, field_value;
+ int ret;
+
+ ret = regmap_update_bits(data->regmap, BMI270_INT_LATCH_REG,
+ BMI270_INT_LATCH_REG_MSK,
+ FIELD_PREP(BMI270_INT_LATCH_REG_MSK, latch));
+ if (ret)
+ return ret;
+
+ switch (irq_pin) {
+ case BMI270_IRQ_INT1:
+ reg = BMI270_INT1_IO_CTRL_REG;
+ break;
+ case BMI270_IRQ_INT2:
+ reg = BMI270_INT2_IO_CTRL_REG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ field_value = FIELD_PREP(BMI270_INT_IO_CTRL_LVL_MSK, active_high) |
+ FIELD_PREP(BMI270_INT_IO_CTRL_OD_MSK, open_drain) |
+ FIELD_PREP(BMI270_INT_IO_CTRL_OP_MSK, 1);
+ return regmap_update_bits(data->regmap, reg,
+ BMI270_INT_IO_LVL_OD_OP_MSK, field_value);
+}
+
+static int bmi270_trigger_probe(struct bmi270_data *data,
+ struct iio_dev *indio_dev)
+{
+ bool open_drain, active_high, latch;
+ struct fwnode_handle *fwnode;
+ enum bmi270_irq_pin irq_pin;
+ int ret, irq, irq_type;
+
+ fwnode = dev_fwnode(data->dev);
+ if (!fwnode)
+ return -ENODEV;
+
+ irq = fwnode_irq_get_byname(fwnode, "INT1");
+ if (irq > 0) {
+ irq_pin = BMI270_IRQ_INT1;
+ } else {
+ irq = fwnode_irq_get_byname(fwnode, "INT2");
+ if (irq < 0)
+ return 0;
+
+ irq_pin = BMI270_IRQ_INT2;
+ }
+
+ irq_type = irq_get_trigger_type(irq);
+ switch (irq_type) {
+ case IRQF_TRIGGER_RISING:
+ latch = false;
+ active_high = true;
+ break;
+ case IRQF_TRIGGER_HIGH:
+ latch = true;
+ active_high = true;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ latch = false;
+ active_high = false;
+ break;
+ case IRQF_TRIGGER_LOW:
+ latch = true;
+ active_high = false;
+ break;
+ default:
+ return dev_err_probe(data->dev, -EINVAL,
+ "Invalid interrupt type 0x%x specified\n",
+ irq_type);
+ }
+
+ open_drain = fwnode_property_read_bool(fwnode, "drive-open-drain");
+
+ ret = bmi270_int_pin_config(data, irq_pin, active_high, open_drain,
+ latch);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Failed to configure irq line\n");
+
+ data->trig = devm_iio_trigger_alloc(data->dev, "%s-trig-%d",
+ indio_dev->name, irq_pin);
+ if (!data->trig)
+ return -ENOMEM;
+
+ data->trig->ops = &bmi270_trigger_ops;
+ iio_trigger_set_drvdata(data->trig, data);
+
+ ret = devm_request_threaded_irq(data->dev, irq, NULL,
+ bmi270_irq_thread_handler,
+ IRQF_ONESHOT, "bmi270-int", indio_dev);
+ if (ret)
+ return dev_err_probe(data->dev, ret, "Failed to request IRQ\n");
+
+ ret = devm_iio_trigger_register(data->dev, data->trig);
+ if (ret)
+ return dev_err_probe(data->dev, ret,
+ "Trigger registration failed\n");
+
+ data->irq_pin = irq_pin;
+
+ return 0;
+}
+
+static int bmi270_validate_chip_id(struct bmi270_data *data)
{
int chip_id;
int ret;
- struct device *dev = bmi270_device->dev;
- struct regmap *regmap = bmi270_device->regmap;
+ struct device *dev = data->dev;
+ struct regmap *regmap = data->regmap;
ret = regmap_read(regmap, BMI270_CHIP_ID_REG, &chip_id);
if (ret)
@@ -566,24 +830,24 @@ static int bmi270_validate_chip_id(struct bmi270_data *bmi270_device)
if (chip_id == BMI160_CHIP_ID_VAL)
return -ENODEV;
- if (chip_id != bmi270_device->chip_info->chip_id)
+ if (chip_id != data->chip_info->chip_id)
dev_info(dev, "Unexpected chip id 0x%x", chip_id);
if (chip_id == bmi260_chip_info.chip_id)
- bmi270_device->chip_info = &bmi260_chip_info;
+ data->chip_info = &bmi260_chip_info;
else if (chip_id == bmi270_chip_info.chip_id)
- bmi270_device->chip_info = &bmi270_chip_info;
+ data->chip_info = &bmi270_chip_info;
return 0;
}
-static int bmi270_write_calibration_data(struct bmi270_data *bmi270_device)
+static int bmi270_write_calibration_data(struct bmi270_data *data)
{
int ret;
int status = 0;
const struct firmware *init_data;
- struct device *dev = bmi270_device->dev;
- struct regmap *regmap = bmi270_device->regmap;
+ struct device *dev = data->dev;
+ struct regmap *regmap = data->regmap;
ret = regmap_clear_bits(regmap, BMI270_PWR_CONF_REG,
BMI270_PWR_CONF_ADV_PWR_SAVE_MSK);
@@ -604,8 +868,7 @@ static int bmi270_write_calibration_data(struct bmi270_data *bmi270_device)
return dev_err_probe(dev, ret,
"Failed to prepare device to load init data");
- ret = request_firmware(&init_data,
- bmi270_device->chip_info->fw_name, dev);
+ ret = request_firmware(&init_data, data->chip_info->fw_name, dev);
if (ret)
return dev_err_probe(dev, ret, "Failed to load init data file");
@@ -637,16 +900,17 @@ static int bmi270_write_calibration_data(struct bmi270_data *bmi270_device)
return 0;
}
-static int bmi270_configure_imu(struct bmi270_data *bmi270_device)
+static int bmi270_configure_imu(struct bmi270_data *data)
{
int ret;
- struct device *dev = bmi270_device->dev;
- struct regmap *regmap = bmi270_device->regmap;
+ struct device *dev = data->dev;
+ struct regmap *regmap = data->regmap;
ret = regmap_set_bits(regmap, BMI270_PWR_CTRL_REG,
BMI270_PWR_CTRL_AUX_EN_MSK |
BMI270_PWR_CTRL_GYR_EN_MSK |
- BMI270_PWR_CTRL_ACCEL_EN_MSK);
+ BMI270_PWR_CTRL_ACCEL_EN_MSK |
+ BMI270_PWR_CTRL_TEMP_EN_MSK);
if (ret)
return dev_err_probe(dev, ret, "Failed to enable accelerometer and gyroscope");
@@ -677,38 +941,40 @@ static int bmi270_configure_imu(struct bmi270_data *bmi270_device)
return 0;
}
-static int bmi270_chip_init(struct bmi270_data *bmi270_device)
+static int bmi270_chip_init(struct bmi270_data *data)
{
int ret;
- ret = bmi270_validate_chip_id(bmi270_device);
+ ret = bmi270_validate_chip_id(data);
if (ret)
return ret;
- ret = bmi270_write_calibration_data(bmi270_device);
+ ret = bmi270_write_calibration_data(data);
if (ret)
return ret;
- return bmi270_configure_imu(bmi270_device);
+ return bmi270_configure_imu(data);
}
int bmi270_core_probe(struct device *dev, struct regmap *regmap,
const struct bmi270_chip_info *chip_info)
{
int ret;
- struct bmi270_data *bmi270_device;
+ struct bmi270_data *data;
struct iio_dev *indio_dev;
- indio_dev = devm_iio_device_alloc(dev, sizeof(*bmi270_device));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
- bmi270_device = iio_priv(indio_dev);
- bmi270_device->dev = dev;
- bmi270_device->regmap = regmap;
- bmi270_device->chip_info = chip_info;
+ data = iio_priv(indio_dev);
+ data->dev = dev;
+ data->regmap = regmap;
+ data->chip_info = chip_info;
+ data->irq_pin = BMI270_IRQ_DISABLED;
+ mutex_init(&data->mutex);
- ret = bmi270_chip_init(bmi270_device);
+ ret = bmi270_chip_init(data);
if (ret)
return ret;
@@ -719,6 +985,10 @@ int bmi270_core_probe(struct device *dev, struct regmap *regmap,
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &bmi270_info;
+ ret = bmi270_trigger_probe(data, indio_dev);
+ if (ret)
+ return ret;
+
ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
iio_pollfunc_store_time,
bmi270_trigger_handler, NULL);
diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c
index 7f386c5e58b4..fc54d464a3ae 100644
--- a/drivers/iio/imu/bmi323/bmi323_core.c
+++ b/drivers/iio/imu/bmi323/bmi323_core.c
@@ -1702,26 +1702,30 @@ static int bmi323_write_raw(struct iio_dev *indio_dev,
int val2, long mask)
{
struct bmi323_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return bmi323_set_odr(data,
- bmi323_iio_to_sensor(chan->type),
- val, val2);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi323_set_odr(data, bmi323_iio_to_sensor(chan->type),
+ val, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SCALE:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return bmi323_set_scale(data,
- bmi323_iio_to_sensor(chan->type),
- val, val2);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi323_set_scale(data, bmi323_iio_to_sensor(chan->type),
+ val, val2);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return bmi323_set_average(data,
- bmi323_iio_to_sensor(chan->type),
- val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi323_set_average(data, bmi323_iio_to_sensor(chan->type),
+ val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_ENABLE:
return bmi323_enable_steps(data, val);
case IIO_CHAN_INFO_PROCESSED: {
@@ -1747,6 +1751,7 @@ static int bmi323_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct bmi323_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_PROCESSED:
@@ -1755,10 +1760,11 @@ static int bmi323_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_ACCEL:
case IIO_ANGL_VEL:
- iio_device_claim_direct_scoped(return -EBUSY,
- indio_dev)
- return bmi323_read_axis(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+ ret = bmi323_read_axis(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_TEMP:
return bmi323_get_temp_data(data, val);
default:
diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c
index 363281272035..a43c8d1bb3d0 100644
--- a/drivers/iio/industrialio-backend.c
+++ b/drivers/iio/industrialio-backend.c
@@ -155,10 +155,12 @@ static ssize_t iio_backend_debugfs_write_reg(struct file *file,
ssize_t rc;
int ret;
- rc = simple_write_to_buffer(buf, sizeof(buf), ppos, userbuf, count);
+ rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count);
if (rc < 0)
return rc;
+ buf[count] = '\0';
+
ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val);
switch (ret) {
@@ -637,6 +639,66 @@ ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private,
EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, "IIO_BACKEND");
/**
+ * iio_backend_interface_type_get - get the interface type used.
+ * @back: Backend device
+ * @type: Interface type
+ *
+ * RETURNS:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_interface_type_get(struct iio_backend *back,
+ enum iio_backend_interface_type *type)
+{
+ int ret;
+
+ ret = iio_backend_op_call(back, interface_type_get, type);
+ if (ret)
+ return ret;
+
+ if (*type >= IIO_BACKEND_INTERFACE_MAX)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_interface_type_get, "IIO_BACKEND");
+
+/**
+ * iio_backend_data_size_set - set the data width/size in the data bus.
+ * @back: Backend device
+ * @size: Size in bits
+ *
+ * Some frontend devices can dynamically control the word/data size on the
+ * interface/data bus. Hence, the backend device needs to be aware of it so
+ * data can be correctly transferred.
+ *
+ * Return:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_data_size_set(struct iio_backend *back, unsigned int size)
+{
+ if (!size)
+ return -EINVAL;
+
+ return iio_backend_op_call(back, data_size_set, size);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_data_size_set, "IIO_BACKEND");
+
+/**
+ * iio_backend_oversampling_ratio_set - set the oversampling ratio
+ * @back: Backend device
+ * @ratio: The oversampling ratio - value 1 corresponds to no oversampling.
+ *
+ * Return:
+ * 0 on success, negative error number on failure.
+ */
+int iio_backend_oversampling_ratio_set(struct iio_backend *back,
+ unsigned int ratio)
+{
+ return iio_backend_op_call(back, oversampling_ratio_set, ratio);
+}
+EXPORT_SYMBOL_NS_GPL(iio_backend_oversampling_ratio_set, "IIO_BACKEND");
+
+/**
* iio_backend_extend_chan_spec - Extend an IIO channel
* @back: Backend device
* @chan: IIO channel
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index a2117ad1337d..b9f4113ae5fc 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -410,11 +410,12 @@ static ssize_t iio_debugfs_write_reg(struct file *file,
char buf[80];
int ret;
- count = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, userbuf, count))
- return -EFAULT;
+ ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf,
+ count);
+ if (ret < 0)
+ return ret;
- buf[count] = 0;
+ buf[count] = '\0';
ret = sscanf(buf, "%i %i", &reg, &val);
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index db06501b0e61..06295cfc2da8 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -232,6 +232,7 @@ static const char * const iio_ev_type_text[] = {
[IIO_EV_TYPE_CHANGE] = "change",
[IIO_EV_TYPE_MAG_REFERENCED] = "mag_referenced",
[IIO_EV_TYPE_GESTURE] = "gesture",
+ [IIO_EV_TYPE_FAULT] = "fault",
};
static const char * const iio_ev_dir_text[] = {
@@ -240,6 +241,7 @@ static const char * const iio_ev_dir_text[] = {
[IIO_EV_DIR_FALLING] = "falling",
[IIO_EV_DIR_SINGLETAP] = "singletap",
[IIO_EV_DIR_DOUBLETAP] = "doubletap",
+ [IIO_EV_DIR_FAULT_OPENWIRE] = "openwire",
};
static const char * const iio_ev_info_text[] = {
diff --git a/drivers/iio/industrialio-gts-helper.c b/drivers/iio/industrialio-gts-helper.c
index d70ebe3bf774..f35c36fd4a55 100644
--- a/drivers/iio/industrialio-gts-helper.c
+++ b/drivers/iio/industrialio-gts-helper.c
@@ -160,16 +160,123 @@ static void iio_gts_purge_avail_scale_table(struct iio_gts *gts)
gts->num_avail_all_scales = 0;
}
+static int scale_eq(int *sc1, int *sc2)
+{
+ return sc1[0] == sc2[0] && sc1[1] == sc2[1];
+}
+
+static int scale_smaller(int *sc1, int *sc2)
+{
+ if (sc1[0] != sc2[0])
+ return sc1[0] < sc2[0];
+
+ /* If integer parts are equal, fixp parts */
+ return sc1[1] < sc2[1];
+}
+
+/*
+ * Do a single table listing all the unique scales that any combination of
+ * supported gains and times can provide.
+ */
+static int do_combined_scaletable(struct iio_gts *gts,
+ size_t all_scales_tbl_bytes)
+{
+ int t_idx, i, new_idx;
+ int **scales = gts->per_time_avail_scale_tables;
+ int *all_scales = kcalloc(gts->num_itime, all_scales_tbl_bytes,
+ GFP_KERNEL);
+
+ if (!all_scales)
+ return -ENOMEM;
+ /*
+ * Create table containing all of the supported scales by looping
+ * through all of the per-time scales and copying the unique scales
+ * into one sorted table.
+ *
+ * We assume all the gains for same integration time were unique.
+ * It is likely the first time table had greatest time multiplier as
+ * the times are in the order of preference and greater times are
+ * usually preferred. Hence we start from the last table which is likely
+ * to have the smallest total gains.
+ */
+ t_idx = gts->num_itime - 1;
+ memcpy(all_scales, scales[t_idx], all_scales_tbl_bytes);
+ new_idx = gts->num_hwgain * 2;
+
+ while (t_idx-- > 0) {
+ for (i = 0; i < gts->num_hwgain ; i++) {
+ int *candidate = &scales[t_idx][i * 2];
+ int chk;
+
+ if (scale_smaller(candidate, &all_scales[new_idx - 2])) {
+ all_scales[new_idx] = candidate[0];
+ all_scales[new_idx + 1] = candidate[1];
+ new_idx += 2;
+
+ continue;
+ }
+ for (chk = 0; chk < new_idx; chk += 2)
+ if (!scale_smaller(candidate, &all_scales[chk]))
+ break;
+
+ if (scale_eq(candidate, &all_scales[chk]))
+ continue;
+
+ memmove(&all_scales[chk + 2], &all_scales[chk],
+ (new_idx - chk) * sizeof(int));
+ all_scales[chk] = candidate[0];
+ all_scales[chk + 1] = candidate[1];
+ new_idx += 2;
+ }
+ }
+
+ gts->num_avail_all_scales = new_idx / 2;
+ gts->avail_all_scales_table = all_scales;
+
+ return 0;
+}
+
+static void iio_gts_free_int_table_array(int **arr, int num_tables)
+{
+ int i;
+
+ for (i = 0; i < num_tables; i++)
+ kfree(arr[i]);
+
+ kfree(arr);
+}
+
+static int iio_gts_alloc_int_table_array(int ***arr, int num_tables, int num_table_items)
+{
+ int i, **tmp;
+
+ tmp = kcalloc(num_tables, sizeof(**arr), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ for (i = 0; i < num_tables; i++) {
+ tmp[i] = kcalloc(num_table_items, sizeof(int), GFP_KERNEL);
+ if (!tmp[i])
+ goto err_free;
+ }
+
+ *arr = tmp;
+
+ return 0;
+err_free:
+ iio_gts_free_int_table_array(tmp, i);
+
+ return -ENOMEM;
+}
+
static int iio_gts_gain_cmp(const void *a, const void *b)
{
return *(int *)a - *(int *)b;
}
-static int gain_to_scaletables(struct iio_gts *gts, int **gains, int **scales)
+static int fill_and_sort_scaletables(struct iio_gts *gts, int **gains, int **scales)
{
- int i, j, new_idx, time_idx, ret = 0;
- int *all_gains;
- size_t gain_bytes;
+ int i, j, ret;
for (i = 0; i < gts->num_itime; i++) {
/*
@@ -189,71 +296,69 @@ static int gain_to_scaletables(struct iio_gts *gts, int **gains, int **scales)
}
}
- gain_bytes = array_size(gts->num_hwgain, sizeof(int));
- all_gains = kcalloc(gts->num_itime, gain_bytes, GFP_KERNEL);
- if (!all_gains)
- return -ENOMEM;
+ return 0;
+}
+
+static void compute_per_time_gains(struct iio_gts *gts, int **gains)
+{
+ int i, j;
+
+ for (i = 0; i < gts->num_itime; i++) {
+ for (j = 0; j < gts->num_hwgain; j++)
+ gains[i][j] = gts->hwgain_table[j].gain *
+ gts->itime_table[i].mul;
+ }
+}
+
+static int compute_per_time_tables(struct iio_gts *gts, int **scales)
+{
+ int **per_time_gains;
+ int ret;
/*
- * We assume all the gains for same integration time were unique.
- * It is likely the first time table had greatest time multiplier as
- * the times are in the order of preference and greater times are
- * usually preferred. Hence we start from the last table which is likely
- * to have the smallest total gains.
+ * Create a temporary array of the 'total gains' for each integration
+ * time.
*/
- time_idx = gts->num_itime - 1;
- memcpy(all_gains, gains[time_idx], gain_bytes);
- new_idx = gts->num_hwgain;
+ ret = iio_gts_alloc_int_table_array(&per_time_gains, gts->num_itime,
+ gts->num_hwgain);
+ if (ret)
+ return ret;
- while (time_idx-- > 0) {
- for (j = 0; j < gts->num_hwgain; j++) {
- int candidate = gains[time_idx][j];
- int chk;
+ compute_per_time_gains(gts, per_time_gains);
- if (candidate > all_gains[new_idx - 1]) {
- all_gains[new_idx] = candidate;
- new_idx++;
+ /* Convert the gains to scales and populate the scale tables */
+ ret = fill_and_sort_scaletables(gts, per_time_gains, scales);
- continue;
- }
- for (chk = 0; chk < new_idx; chk++)
- if (candidate <= all_gains[chk])
- break;
+ iio_gts_free_int_table_array(per_time_gains, gts->num_itime);
- if (candidate == all_gains[chk])
- continue;
+ return ret;
+}
- memmove(&all_gains[chk + 1], &all_gains[chk],
- (new_idx - chk) * sizeof(int));
- all_gains[chk] = candidate;
- new_idx++;
- }
- }
+/*
+ * Create a table of supported scales for each supported integration time.
+ * This can be used as available_scales by drivers which don't allow scale
+ * setting to change the integration time to display correct set of scales
+ * depending on the used integration time.
+ */
+static int **create_per_time_scales(struct iio_gts *gts)
+{
+ int **per_time_scales, ret;
- gts->avail_all_scales_table = kcalloc(new_idx, 2 * sizeof(int),
- GFP_KERNEL);
- if (!gts->avail_all_scales_table) {
- ret = -ENOMEM;
- goto free_out;
- }
- gts->num_avail_all_scales = new_idx;
+ ret = iio_gts_alloc_int_table_array(&per_time_scales, gts->num_itime,
+ gts->num_hwgain * 2);
+ if (ret)
+ return ERR_PTR(ret);
- for (i = 0; i < gts->num_avail_all_scales; i++) {
- ret = iio_gts_total_gain_to_scale(gts, all_gains[i],
- &gts->avail_all_scales_table[i * 2],
- &gts->avail_all_scales_table[i * 2 + 1]);
+ ret = compute_per_time_tables(gts, per_time_scales);
+ if (ret)
+ goto err_out;
- if (ret) {
- kfree(gts->avail_all_scales_table);
- gts->num_avail_all_scales = 0;
- goto free_out;
- }
- }
+ return per_time_scales;
-free_out:
- kfree(all_gains);
+err_out:
+ iio_gts_free_int_table_array(per_time_scales, gts->num_itime);
- return ret;
+ return ERR_PTR(ret);
}
/**
@@ -275,55 +380,26 @@ free_out:
*/
static int iio_gts_build_avail_scale_table(struct iio_gts *gts)
{
- int **per_time_gains, **per_time_scales, i, j, ret = -ENOMEM;
+ int ret, all_scales_tbl_bytes;
+ int **per_time_scales;
- per_time_gains = kcalloc(gts->num_itime, sizeof(*per_time_gains), GFP_KERNEL);
- if (!per_time_gains)
- return ret;
-
- per_time_scales = kcalloc(gts->num_itime, sizeof(*per_time_scales), GFP_KERNEL);
- if (!per_time_scales)
- goto free_gains;
-
- for (i = 0; i < gts->num_itime; i++) {
- per_time_scales[i] = kcalloc(gts->num_hwgain, 2 * sizeof(int),
- GFP_KERNEL);
- if (!per_time_scales[i])
- goto err_free_out;
-
- per_time_gains[i] = kcalloc(gts->num_hwgain, sizeof(int),
- GFP_KERNEL);
- if (!per_time_gains[i]) {
- kfree(per_time_scales[i]);
- goto err_free_out;
- }
-
- for (j = 0; j < gts->num_hwgain; j++)
- per_time_gains[i][j] = gts->hwgain_table[j].gain *
- gts->itime_table[i].mul;
- }
+ if (unlikely(check_mul_overflow(gts->num_hwgain, 2 * sizeof(int),
+ &all_scales_tbl_bytes)))
+ return -EOVERFLOW;
- ret = gain_to_scaletables(gts, per_time_gains, per_time_scales);
- if (ret)
- goto err_free_out;
+ per_time_scales = create_per_time_scales(gts);
+ if (IS_ERR(per_time_scales))
+ return PTR_ERR(per_time_scales);
- for (i = 0; i < gts->num_itime; i++)
- kfree(per_time_gains[i]);
- kfree(per_time_gains);
gts->per_time_avail_scale_tables = per_time_scales;
- return 0;
-
-err_free_out:
- for (i--; i >= 0; i--) {
- kfree(per_time_scales[i]);
- kfree(per_time_gains[i]);
+ ret = do_combined_scaletable(gts, all_scales_tbl_bytes);
+ if (ret) {
+ iio_gts_free_int_table_array(per_time_scales, gts->num_itime);
+ return ret;
}
- kfree(per_time_scales);
-free_gains:
- kfree(per_time_gains);
- return ret;
+ return 0;
}
static void iio_gts_us_to_int_micro(int *time_us, int *int_micro_times,
@@ -950,7 +1026,15 @@ int iio_gts_find_gain_time_sel_for_scale(struct iio_gts *gts, int scale_int,
}
EXPORT_SYMBOL_NS_GPL(iio_gts_find_gain_time_sel_for_scale, "IIO_GTS_HELPER");
-static int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time)
+/**
+ * iio_gts_get_total_gain - Fetch total gain for given HW-gain and time
+ * @gts: Gain time scale descriptor
+ * @gain: HW-gain for which the total gain is searched for
+ * @time: Integration time for which the total gain is searched for
+ *
+ * Return: total gain on success and -EINVAL on error.
+ */
+int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time)
{
const struct iio_itime_sel_mul *itime;
@@ -966,6 +1050,7 @@ static int iio_gts_get_total_gain(struct iio_gts *gts, int gain, int time)
return gain * itime->mul;
}
+EXPORT_SYMBOL_NS_GPL(iio_gts_get_total_gain, "IIO_GTS_HELPER");
static int iio_gts_get_scale_linear(struct iio_gts *gts, int gain, int time,
u64 *scale)
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index e34e551eef3e..4a7d983c9cd4 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -43,6 +43,16 @@ config ADUX1020
To compile this driver as a module, choose M here: the
module will be called adux1020.
+config AL3000A
+ tristate "AL3000a ambient light sensor"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Dyna Image AL3000a
+ ambient light sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called al3000a.
+
config AL3010
tristate "AL3010 ambient light sensor"
depends on I2C
@@ -63,6 +73,17 @@ config AL3320A
To compile this driver as a module, choose M here: the
module will be called al3320a.
+config APDS9160
+ tristate "APDS9160 combined als and proximity sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here if you want to build support for a Broadcom APDS9160
+ combined ambient light and proximity sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called apds9160.
+
config APDS9300
tristate "APDS9300 ambient light sensor"
depends on I2C
@@ -683,6 +704,7 @@ config VEML6030
select REGMAP_I2C
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+ select IIO_GTS_HELPER
depends on I2C
help
Say Y here if you want to build a driver for the Vishay VEML6030
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 11a4041b918a..8229ebe6edc4 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -7,8 +7,10 @@
obj-$(CONFIG_ACPI_ALS) += acpi-als.o
obj-$(CONFIG_ADJD_S311) += adjd_s311.o
obj-$(CONFIG_ADUX1020) += adux1020.o
+obj-$(CONFIG_AL3000A) += al3000a.o
obj-$(CONFIG_AL3010) += al3010.o
obj-$(CONFIG_AL3320A) += al3320a.o
+obj-$(CONFIG_APDS9160) += apds9160.o
obj-$(CONFIG_APDS9300) += apds9300.o
obj-$(CONFIG_APDS9306) += apds9306.o
obj-$(CONFIG_APDS9960) += apds9960.o
diff --git a/drivers/iio/light/adux1020.c b/drivers/iio/light/adux1020.c
index 593d614b1689..9240983a6cc4 100644
--- a/drivers/iio/light/adux1020.c
+++ b/drivers/iio/light/adux1020.c
@@ -118,7 +118,6 @@ static const struct regmap_config adux1020_regmap_config = {
.reg_bits = 8,
.val_bits = 16,
.max_register = 0x6F,
- .cache_type = REGCACHE_NONE,
};
static const struct reg_sequence adux1020_def_conf[] = {
diff --git a/drivers/iio/light/al3000a.c b/drivers/iio/light/al3000a.c
new file mode 100644
index 000000000000..e2fbb1270040
--- /dev/null
+++ b/drivers/iio/light/al3000a.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#include <linux/iio/iio.h>
+
+#define AL3000A_REG_SYSTEM 0x00
+#define AL3000A_REG_DATA 0x05
+
+#define AL3000A_CONFIG_ENABLE 0x00
+#define AL3000A_CONFIG_DISABLE 0x0b
+#define AL3000A_CONFIG_RESET 0x0f
+#define AL3000A_GAIN_MASK GENMASK(5, 0)
+
+/*
+ * These are pre-calculated lux values based on possible output of sensor
+ * (range 0x00 - 0x3F)
+ */
+static const u32 lux_table[] = {
+ 1, 1, 1, 2, 2, 2, 3, 4, /* 0 - 7 */
+ 4, 5, 6, 7, 9, 11, 13, 16, /* 8 - 15 */
+ 19, 22, 27, 32, 39, 46, 56, 67, /* 16 - 23 */
+ 80, 96, 116, 139, 167, 200, 240, 289, /* 24 - 31 */
+ 347, 416, 499, 600, 720, 864, 1037, 1245, /* 32 - 39 */
+ 1495, 1795, 2155, 2587, 3105, 3728, 4475, 5373, /* 40 - 47 */
+ 6450, 7743, 9296, 11160, 13397, 16084, 19309, 23180, /* 48 - 55 */
+ 27828, 33408, 40107, 48148, 57803, 69393, 83306, 100000 /* 56 - 63 */
+};
+
+static const struct regmap_config al3000a_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = AL3000A_REG_DATA,
+};
+
+struct al3000a_data {
+ struct regmap *regmap;
+ struct regulator *vdd_supply;
+};
+
+static const struct iio_chan_spec al3000a_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static int al3000a_set_pwr_on(struct al3000a_data *data)
+{
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = regulator_enable(data->vdd_supply);
+ if (ret) {
+ dev_err(dev, "failed to enable vdd power supply\n");
+ return ret;
+ }
+
+ return regmap_write(data->regmap, AL3000A_REG_SYSTEM, AL3000A_CONFIG_ENABLE);
+}
+
+static void al3000a_set_pwr_off(void *_data)
+{
+ struct al3000a_data *data = _data;
+ struct device *dev = regmap_get_device(data->regmap);
+ int ret;
+
+ ret = regmap_write(data->regmap, AL3000A_REG_SYSTEM, AL3000A_CONFIG_DISABLE);
+ if (ret)
+ dev_err(dev, "failed to write system register\n");
+
+ ret = regulator_disable(data->vdd_supply);
+ if (ret)
+ dev_err(dev, "failed to disable vdd power supply\n");
+}
+
+static int al3000a_init(struct al3000a_data *data)
+{
+ int ret;
+
+ ret = al3000a_set_pwr_on(data);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(data->regmap, AL3000A_REG_SYSTEM, AL3000A_CONFIG_RESET);
+ if (ret)
+ return ret;
+
+ return regmap_write(data->regmap, AL3000A_REG_SYSTEM, AL3000A_CONFIG_ENABLE);
+}
+
+static int al3000a_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct al3000a_data *data = iio_priv(indio_dev);
+ int ret, gain;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = regmap_read(data->regmap, AL3000A_REG_DATA, &gain);
+ if (ret)
+ return ret;
+
+ *val = lux_table[gain & AL3000A_GAIN_MASK];
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info al3000a_info = {
+ .read_raw = al3000a_read_raw,
+};
+
+static int al3000a_probe(struct i2c_client *client)
+{
+ struct al3000a_data *data;
+ struct device *dev = &client->dev;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+
+ data->regmap = devm_regmap_init_i2c(client, &al3000a_regmap_config);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(dev, PTR_ERR(data->regmap),
+ "cannot allocate regmap\n");
+
+ data->vdd_supply = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(data->vdd_supply))
+ return dev_err_probe(dev, PTR_ERR(data->vdd_supply),
+ "failed to get vdd regulator\n");
+
+ indio_dev->info = &al3000a_info;
+ indio_dev->name = "al3000a";
+ indio_dev->channels = al3000a_channels;
+ indio_dev->num_channels = ARRAY_SIZE(al3000a_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = al3000a_init(data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init ALS\n");
+
+ ret = devm_add_action_or_reset(dev, al3000a_set_pwr_off, data);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add action\n");
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+static int al3000a_suspend(struct device *dev)
+{
+ struct al3000a_data *data = iio_priv(dev_get_drvdata(dev));
+
+ al3000a_set_pwr_off(data);
+ return 0;
+}
+
+static int al3000a_resume(struct device *dev)
+{
+ struct al3000a_data *data = iio_priv(dev_get_drvdata(dev));
+
+ return al3000a_set_pwr_on(data);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(al3000a_pm_ops, al3000a_suspend, al3000a_resume);
+
+static const struct i2c_device_id al3000a_id[] = {
+ { "al3000a" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, al3000a_id);
+
+static const struct of_device_id al3000a_of_match[] = {
+ { .compatible = "dynaimage,al3000a" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, al3000a_of_match);
+
+static struct i2c_driver al3000a_driver = {
+ .driver = {
+ .name = "al3000a",
+ .of_match_table = al3000a_of_match,
+ .pm = pm_sleep_ptr(&al3000a_pm_ops),
+ },
+ .probe = al3000a_probe,
+ .id_table = al3000a_id,
+};
+module_i2c_driver(al3000a_driver);
+
+MODULE_AUTHOR("Svyatolsav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("al3000a Ambient Light Sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/apds9160.c b/drivers/iio/light/apds9160.c
new file mode 100644
index 000000000000..d3f415930ec9
--- /dev/null
+++ b/drivers/iio/light/apds9160.c
@@ -0,0 +1,1594 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * APDS9160 sensor driver.
+ * Chip is combined proximity and ambient light sensor.
+ * Author: 2024 Mikael Gonella-Bolduc <m.gonella.bolduc@gmail.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/events.h>
+#include <linux/iio/sysfs.h>
+
+#include <linux/unaligned.h>
+
+#define APDS9160_REGMAP_NAME "apds9160_regmap"
+
+/* Main control register */
+#define APDS9160_REG_CTRL 0x00
+#define APDS9160_CTRL_SWRESET BIT(4) /* 1: Activate reset */
+#define APDS9160_CTRL_MODE_RGB BIT(2) /* 0: ALS & IR, 1: RGB & IR */
+#define APDS9160_CTRL_EN_ALS BIT(1) /* 1: ALS active */
+#define APDS9160_CTLR_EN_PS BIT(0) /* 1: PS active */
+
+/* Status register */
+#define APDS9160_SR_LS_INT BIT(4)
+#define APDS9160_SR_LS_NEW_DATA BIT(3)
+#define APDS9160_SR_PS_INT BIT(1)
+#define APDS9160_SR_PS_NEW_DATA BIT(0)
+
+/* Interrupt configuration registers */
+#define APDS9160_REG_INT_CFG 0x19
+#define APDS9160_REG_INT_PST 0x1A
+#define APDS9160_INT_CFG_EN_LS BIT(2) /* LS int enable */
+#define APDS9160_INT_CFG_EN_PS BIT(0) /* PS int enable */
+
+/* Proximity registers */
+#define APDS9160_REG_PS_LED 0x01
+#define APDS9160_REG_PS_PULSES 0x02
+#define APDS9160_REG_PS_MEAS_RATE 0x03
+#define APDS9160_REG_PS_THRES_HI_LSB 0x1B
+#define APDS9160_REG_PS_THRES_HI_MSB 0x1C
+#define APDS9160_REG_PS_THRES_LO_LSB 0x1D
+#define APDS9160_REG_PS_THRES_LO_MSB 0x1E
+#define APDS9160_REG_PS_DATA_LSB 0x08
+#define APDS9160_REG_PS_DATA_MSB 0x09
+#define APDS9160_REG_PS_CAN_LEVEL_DIG_LSB 0x1F
+#define APDS9160_REG_PS_CAN_LEVEL_DIG_MSB 0x20
+#define APDS9160_REG_PS_CAN_LEVEL_ANA_DUR 0x21
+#define APDS9160_REG_PS_CAN_LEVEL_ANA_CURRENT 0x22
+
+/* Light sensor registers */
+#define APDS9160_REG_LS_MEAS_RATE 0x04
+#define APDS9160_REG_LS_GAIN 0x05
+#define APDS9160_REG_LS_DATA_CLEAR_LSB 0x0A
+#define APDS9160_REG_LS_DATA_CLEAR 0x0B
+#define APDS9160_REG_LS_DATA_CLEAR_MSB 0x0C
+#define APDS9160_REG_LS_DATA_ALS_LSB 0x0D
+#define APDS9160_REG_LS_DATA_ALS 0x0E
+#define APDS9160_REG_LS_DATA_ALS_MSB 0x0F
+#define APDS9160_REG_LS_THRES_UP_LSB 0x24
+#define APDS9160_REG_LS_THRES_UP 0x25
+#define APDS9160_REG_LS_THRES_UP_MSB 0x26
+#define APDS9160_REG_LS_THRES_LO_LSB 0x27
+#define APDS9160_REG_LS_THRES_LO 0x28
+#define APDS9160_REG_LS_THRES_LO_MSB 0x29
+#define APDS9160_REG_LS_THRES_VAR 0x2A
+
+/* Part identification number register */
+#define APDS9160_REG_ID 0x06
+
+/* Status register */
+#define APDS9160_REG_SR 0x07
+#define APDS9160_SR_DATA_ALS BIT(3)
+#define APDS9160_SR_DATA_PS BIT(0)
+
+/* Supported ID:s */
+#define APDS9160_PART_ID_0 0x03
+
+#define APDS9160_PS_THRES_MAX 0x7FF
+#define APDS9160_LS_THRES_MAX 0xFFFFF
+#define APDS9160_CMD_LS_RESOLUTION_25MS 0x04
+#define APDS9160_CMD_LS_RESOLUTION_50MS 0x03
+#define APDS9160_CMD_LS_RESOLUTION_100MS 0x02
+#define APDS9160_CMD_LS_RESOLUTION_200MS 0x01
+#define APDS9160_PS_DATA_MASK 0x7FF
+
+#define APDS9160_DEFAULT_LS_GAIN 3
+#define APDS9160_DEFAULT_LS_RATE 100
+#define APDS9160_DEFAULT_PS_RATE 100
+#define APDS9160_DEFAULT_PS_CANCELLATION_LEVEL 0
+#define APDS9160_DEFAULT_PS_ANALOG_CANCELLATION 0
+#define APDS9160_DEFAULT_PS_GAIN 1
+#define APDS9160_DEFAULT_PS_CURRENT 100
+#define APDS9160_DEFAULT_PS_RESOLUTION_11BITS 0x03
+
+static const struct reg_default apds9160_reg_defaults[] = {
+ { APDS9160_REG_CTRL, 0x00 }, /* Sensors disabled by default */
+ { APDS9160_REG_PS_LED, 0x33 }, /* 60 kHz frequency, 100 mA */
+ { APDS9160_REG_PS_PULSES, 0x08 }, /* 8 pulses */
+ { APDS9160_REG_PS_MEAS_RATE, 0x05 }, /* 100ms */
+ { APDS9160_REG_LS_MEAS_RATE, 0x22 }, /* 100ms */
+ { APDS9160_REG_LS_GAIN, 0x01 }, /* 3x */
+ { APDS9160_REG_INT_CFG, 0x10 }, /* Interrupts disabled */
+ { APDS9160_REG_INT_PST, 0x00 },
+ { APDS9160_REG_PS_THRES_HI_LSB, 0xFF },
+ { APDS9160_REG_PS_THRES_HI_MSB, 0x07 },
+ { APDS9160_REG_PS_THRES_LO_LSB, 0x00 },
+ { APDS9160_REG_PS_THRES_LO_MSB, 0x00 },
+ { APDS9160_REG_PS_CAN_LEVEL_DIG_LSB, 0x00 },
+ { APDS9160_REG_PS_CAN_LEVEL_DIG_MSB, 0x00 },
+ { APDS9160_REG_PS_CAN_LEVEL_ANA_DUR, 0x00 },
+ { APDS9160_REG_PS_CAN_LEVEL_ANA_CURRENT, 0x00 },
+ { APDS9160_REG_LS_THRES_UP_LSB, 0xFF },
+ { APDS9160_REG_LS_THRES_UP, 0xFF },
+ { APDS9160_REG_LS_THRES_UP_MSB, 0x0F },
+ { APDS9160_REG_LS_THRES_LO_LSB, 0x00 },
+ { APDS9160_REG_LS_THRES_LO, 0x00 },
+ { APDS9160_REG_LS_THRES_LO_MSB, 0x00 },
+ { APDS9160_REG_LS_THRES_VAR, 0x00 },
+};
+
+static const struct regmap_range apds9160_readable_ranges[] = {
+ regmap_reg_range(APDS9160_REG_CTRL, APDS9160_REG_LS_THRES_VAR),
+};
+
+static const struct regmap_access_table apds9160_readable_table = {
+ .yes_ranges = apds9160_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(apds9160_readable_ranges),
+};
+
+static const struct regmap_range apds9160_writeable_ranges[] = {
+ regmap_reg_range(APDS9160_REG_CTRL, APDS9160_REG_LS_GAIN),
+ regmap_reg_range(APDS9160_REG_INT_CFG, APDS9160_REG_LS_THRES_VAR),
+};
+
+static const struct regmap_access_table apds9160_writeable_table = {
+ .yes_ranges = apds9160_writeable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(apds9160_writeable_ranges),
+};
+
+static const struct regmap_range apds9160_volatile_ranges[] = {
+ regmap_reg_range(APDS9160_REG_SR, APDS9160_REG_LS_DATA_ALS_MSB),
+};
+
+static const struct regmap_access_table apds9160_volatile_table = {
+ .yes_ranges = apds9160_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(apds9160_volatile_ranges),
+};
+
+static const struct regmap_config apds9160_regmap_config = {
+ .name = APDS9160_REGMAP_NAME,
+ .reg_bits = 8,
+ .val_bits = 8,
+ .use_single_read = true,
+ .use_single_write = true,
+
+ .rd_table = &apds9160_readable_table,
+ .wr_table = &apds9160_writeable_table,
+ .volatile_table = &apds9160_volatile_table,
+
+ .reg_defaults = apds9160_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(apds9160_reg_defaults),
+ .max_register = 37,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static const struct iio_event_spec apds9160_event_spec[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec apds9160_channels[] = {
+ {
+ /* Proximity sensor channel */
+ .type = IIO_PROXIMITY,
+ .address = APDS9160_REG_PS_DATA_LSB,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .event_spec = apds9160_event_spec,
+ .num_event_specs = ARRAY_SIZE(apds9160_event_spec),
+ },
+ {
+ /* Proximity sensor led current */
+ .type = IIO_CURRENT,
+ .output = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+ },
+ {
+ /* Illuminance */
+ .type = IIO_LIGHT,
+ .address = APDS9160_REG_LS_DATA_ALS_LSB,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .event_spec = apds9160_event_spec,
+ .num_event_specs = ARRAY_SIZE(apds9160_event_spec),
+ },
+ {
+ /* Clear channel */
+ .type = IIO_INTENSITY,
+ .address = APDS9160_REG_LS_DATA_CLEAR_LSB,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .channel2 = IIO_MOD_LIGHT_CLEAR,
+ .modified = 1,
+ },
+};
+
+static const struct iio_chan_spec apds9160_channels_without_events[] = {
+ {
+ /* Proximity sensor channel */
+ .type = IIO_PROXIMITY,
+ .address = APDS9160_REG_PS_DATA_LSB,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_CALIBBIAS),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ /* Proximity sensor led current */
+ .type = IIO_CURRENT,
+ .output = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW),
+ },
+ {
+ /* Illuminance */
+ .type = IIO_LIGHT,
+ .address = APDS9160_REG_LS_DATA_ALS_LSB,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_HARDWAREGAIN) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .info_mask_separate_available = BIT(IIO_CHAN_INFO_INT_TIME) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ /* Clear channel */
+ .type = IIO_INTENSITY,
+ .address = APDS9160_REG_LS_DATA_CLEAR_LSB,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .channel2 = IIO_MOD_LIGHT_CLEAR,
+ .modified = 1,
+ },
+};
+
+static const int apds9160_als_rate_avail[] = {
+ 25, 50, 100, 200
+};
+
+static const int apds9160_als_rate_map[][2] = {
+ { 25, 0x00 },
+ { 50, 0x01 },
+ { 100, 0x02 },
+ { 200, 0x03 },
+};
+
+static const int apds9160_als_gain_map[][2] = {
+ { 1, 0x00 },
+ { 3, 0x01 },
+ { 6, 0x02 },
+ { 18, 0x03 },
+ { 54, 0x04 },
+};
+
+static const int apds9160_ps_gain_avail[] = {
+ 1, 2, 4, 8
+};
+
+static const int apds9160_ps_gain_map[][2] = {
+ { 1, 0x00 },
+ { 2, 0x01 },
+ { 4, 0x02 },
+ { 8, 0x03 },
+};
+
+static const int apds9160_ps_rate_avail[] = {
+ 25, 50, 100, 200, 400
+};
+
+static const int apds9160_ps_rate_map[][2] = {
+ { 25, 0x03 },
+ { 50, 0x04 },
+ { 100, 0x05 },
+ { 200, 0x06 },
+ { 400, 0x07 },
+};
+
+static const int apds9160_ps_led_current_avail[] = {
+ 10, 25, 50, 100, 150, 175, 200
+};
+
+static const int apds9160_ps_led_current_map[][2] = {
+ { 10, 0x00 },
+ { 25, 0x01 },
+ { 50, 0x02 },
+ { 100, 0x03 },
+ { 150, 0x04 },
+ { 175, 0x05 },
+ { 200, 0x06 },
+};
+
+/**
+ * struct apds9160_scale - apds9160 scale mapping definition
+ *
+ * @itime: Integration time in ms
+ * @gain: Gain multiplier
+ * @scale1: lux/count resolution
+ * @scale2: micro lux/count
+ */
+struct apds9160_scale {
+ int itime;
+ int gain;
+ int scale1;
+ int scale2;
+};
+
+/* Scale mapping extracted from datasheet */
+static const struct apds9160_scale apds9160_als_scale_map[] = {
+ {
+ .itime = 25,
+ .gain = 1,
+ .scale1 = 3,
+ .scale2 = 272000,
+ },
+ {
+ .itime = 25,
+ .gain = 3,
+ .scale1 = 1,
+ .scale2 = 77000,
+ },
+ {
+ .itime = 25,
+ .gain = 6,
+ .scale1 = 0,
+ .scale2 = 525000,
+ },
+ {
+ .itime = 25,
+ .gain = 18,
+ .scale1 = 0,
+ .scale2 = 169000,
+ },
+ {
+ .itime = 25,
+ .gain = 54,
+ .scale1 = 0,
+ .scale2 = 49000,
+ },
+ {
+ .itime = 50,
+ .gain = 1,
+ .scale1 = 1,
+ .scale2 = 639000,
+ },
+ {
+ .itime = 50,
+ .gain = 3,
+ .scale1 = 0,
+ .scale2 = 538000,
+ },
+ {
+ .itime = 50,
+ .gain = 6,
+ .scale1 = 0,
+ .scale2 = 263000,
+ },
+ {
+ .itime = 50,
+ .gain = 18,
+ .scale1 = 0,
+ .scale2 = 84000,
+ },
+ {
+ .itime = 50,
+ .gain = 54,
+ .scale1 = 0,
+ .scale2 = 25000,
+ },
+ {
+ .itime = 100,
+ .gain = 1,
+ .scale1 = 0,
+ .scale2 = 819000,
+ },
+ {
+ .itime = 100,
+ .gain = 3,
+ .scale1 = 0,
+ .scale2 = 269000,
+ },
+ {
+ .itime = 100,
+ .gain = 6,
+ .scale1 = 0,
+ .scale2 = 131000,
+ },
+ {
+ .itime = 100,
+ .gain = 18,
+ .scale1 = 0,
+ .scale2 = 42000,
+ },
+ {
+ .itime = 100,
+ .gain = 54,
+ .scale1 = 0,
+ .scale2 = 12000,
+ },
+ {
+ .itime = 200,
+ .gain = 1,
+ .scale1 = 0,
+ .scale2 = 409000,
+ },
+ {
+ .itime = 200,
+ .gain = 3,
+ .scale1 = 0,
+ .scale2 = 135000,
+ },
+ {
+ .itime = 200,
+ .gain = 6,
+ .scale1 = 0,
+ .scale2 = 66000,
+ },
+ {
+ .itime = 200,
+ .gain = 18,
+ .scale1 = 0,
+ .scale2 = 21000,
+ },
+ {
+ .itime = 200,
+ .gain = 54,
+ .scale1 = 0,
+ .scale2 = 6000,
+ },
+};
+
+static const int apds9160_25ms_avail[][2] = {
+ { 3, 272000 },
+ { 1, 77000 },
+ { 0, 525000 },
+ { 0, 169000 },
+ { 0, 49000 },
+};
+
+static const int apds9160_50ms_avail[][2] = {
+ { 1, 639000 },
+ { 0, 538000 },
+ { 0, 263000 },
+ { 0, 84000 },
+ { 0, 25000 },
+};
+
+static const int apds9160_100ms_avail[][2] = {
+ { 0, 819000 },
+ { 0, 269000 },
+ { 0, 131000 },
+ { 0, 42000 },
+ { 0, 12000 },
+};
+
+static const int apds9160_200ms_avail[][2] = {
+ { 0, 409000 },
+ { 0, 135000 },
+ { 0, 66000 },
+ { 0, 21000 },
+ { 0, 6000 },
+};
+
+static const struct reg_field apds9160_reg_field_ls_en =
+ REG_FIELD(APDS9160_REG_CTRL, 1, 1);
+
+static const struct reg_field apds9160_reg_field_ps_en =
+ REG_FIELD(APDS9160_REG_CTRL, 0, 0);
+
+static const struct reg_field apds9160_reg_field_int_ps =
+ REG_FIELD(APDS9160_REG_INT_CFG, 0, 0);
+
+static const struct reg_field apds9160_reg_field_int_als =
+ REG_FIELD(APDS9160_REG_INT_CFG, 2, 2);
+
+static const struct reg_field apds9160_reg_field_ps_overflow =
+ REG_FIELD(APDS9160_REG_PS_DATA_MSB, 3, 3);
+
+static const struct reg_field apds9160_reg_field_als_rate =
+ REG_FIELD(APDS9160_REG_LS_MEAS_RATE, 0, 2);
+
+static const struct reg_field apds9160_reg_field_als_gain =
+ REG_FIELD(APDS9160_REG_LS_GAIN, 0, 2);
+
+static const struct reg_field apds9160_reg_field_ps_rate =
+ REG_FIELD(APDS9160_REG_PS_MEAS_RATE, 0, 2);
+
+static const struct reg_field apds9160_reg_field_als_res =
+ REG_FIELD(APDS9160_REG_LS_MEAS_RATE, 4, 6);
+
+static const struct reg_field apds9160_reg_field_ps_current =
+ REG_FIELD(APDS9160_REG_PS_LED, 0, 2);
+
+static const struct reg_field apds9160_reg_field_ps_gain =
+ REG_FIELD(APDS9160_REG_PS_MEAS_RATE, 6, 7);
+
+static const struct reg_field apds9160_reg_field_ps_resolution =
+ REG_FIELD(APDS9160_REG_PS_MEAS_RATE, 3, 4);
+
+struct apds9160_chip {
+ struct i2c_client *client;
+ struct regmap *regmap;
+
+ struct regmap_field *reg_enable_ps;
+ struct regmap_field *reg_enable_als;
+ struct regmap_field *reg_int_ps;
+ struct regmap_field *reg_int_als;
+ struct regmap_field *reg_ps_overflow;
+ struct regmap_field *reg_als_rate;
+ struct regmap_field *reg_als_resolution;
+ struct regmap_field *reg_ps_rate;
+ struct regmap_field *reg_als_gain;
+ struct regmap_field *reg_ps_current;
+ struct regmap_field *reg_ps_gain;
+ struct regmap_field *reg_ps_resolution;
+
+ struct mutex lock; /* protects state and config data */
+
+ /* State data */
+ int als_int;
+ int ps_int;
+
+ /* Configuration values */
+ int als_itime;
+ int als_hwgain;
+ int als_scale1;
+ int als_scale2;
+ int ps_rate;
+ int ps_cancellation_level;
+ int ps_current;
+ int ps_gain;
+};
+
+static int apds9160_set_ps_rate(struct apds9160_chip *data, int val)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_ps_rate_map); idx++) {
+ int ret;
+
+ if (apds9160_ps_rate_map[idx][0] != val)
+ continue;
+
+ ret = regmap_field_write(data->reg_ps_rate,
+ apds9160_ps_rate_map[idx][1]);
+ if (ret)
+ return ret;
+ data->ps_rate = val;
+
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int apds9160_set_ps_gain(struct apds9160_chip *data, int val)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_ps_gain_map); idx++) {
+ int ret;
+
+ if (apds9160_ps_gain_map[idx][0] != val)
+ continue;
+
+ ret = regmap_field_write(data->reg_ps_gain,
+ apds9160_ps_gain_map[idx][1]);
+ if (ret)
+ return ret;
+ data->ps_gain = val;
+
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * The PS intelligent cancellation level register allows
+ * for an on-chip substraction of the ADC count caused by
+ * unwanted reflected light from PS ADC output.
+ */
+static int apds9160_set_ps_cancellation_level(struct apds9160_chip *data,
+ int val)
+{
+ int ret;
+ __le16 buf;
+
+ if (val < 0 || val > 0xFFFF)
+ return -EINVAL;
+
+ buf = cpu_to_le16(val);
+ ret = regmap_bulk_write(data->regmap, APDS9160_REG_PS_CAN_LEVEL_DIG_LSB,
+ &buf, 2);
+ if (ret)
+ return ret;
+
+ data->ps_cancellation_level = val;
+
+ return ret;
+}
+
+/*
+ * This parameter determines the cancellation pulse duration
+ * in each of the PWM pulse. The cancellation is applied during the
+ * integration phase of the PS measurement.
+ * Duration is programmed in half clock cycles
+ * A duration value of 0 or 1 will not generate any cancellation pulse
+ */
+static int apds9160_set_ps_analog_cancellation(struct apds9160_chip *data,
+ int val)
+{
+ if (val < 0 || val > 63)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, APDS9160_REG_PS_CAN_LEVEL_ANA_DUR,
+ val);
+}
+
+/*
+ * This parameter works in conjunction with the cancellation pulse duration
+ * The value determines the current used for crosstalk cancellation
+ * Coarse value is in steps of 60 nA
+ * Fine value is in steps of 2.4 nA
+ */
+static int apds9160_set_ps_cancellation_current(struct apds9160_chip *data,
+ int coarse_val,
+ int fine_val)
+{
+ int val;
+
+ if (coarse_val < 0 || coarse_val > 4)
+ return -EINVAL;
+
+ if (fine_val < 0 || fine_val > 15)
+ return -EINVAL;
+
+ /* Coarse value at B4:B5 and fine value at B0:B3 */
+ val = (coarse_val << 4) | fine_val;
+
+ return regmap_write(data->regmap, APDS9160_REG_PS_CAN_LEVEL_ANA_CURRENT,
+ val);
+}
+
+static int apds9160_ps_init_analog_cancellation(struct device *dev,
+ struct apds9160_chip *data)
+{
+ int ret, duration, picoamp, idx, coarse, fine;
+
+ ret = device_property_read_u32(dev,
+ "ps-cancellation-duration", &duration);
+ if (ret || duration == 0) {
+ /* Don't fail since this is not required */
+ return 0;
+ }
+
+ ret = device_property_read_u32(dev,
+ "ps-cancellation-current-picoamp", &picoamp);
+ if (ret)
+ return ret;
+
+ if (picoamp < 60000 || picoamp > 276000 || picoamp % 2400 != 0)
+ return dev_err_probe(dev, -EINVAL,
+ "Invalid cancellation current\n");
+
+ /* Compute required coarse and fine value from requested current */
+ fine = 0;
+ coarse = 0;
+ for (idx = 60000; idx < picoamp; idx += 2400) {
+ if (fine == 15) {
+ fine = 0;
+ coarse++;
+ idx += 21600;
+ } else {
+ fine++;
+ }
+ }
+
+ if (picoamp != idx)
+ dev_warn(dev,
+ "Invalid cancellation current %i, rounding to %i\n",
+ picoamp, idx);
+
+ ret = apds9160_set_ps_analog_cancellation(data, duration);
+ if (ret)
+ return ret;
+
+ return apds9160_set_ps_cancellation_current(data, coarse, fine);
+}
+
+static int apds9160_set_ps_current(struct apds9160_chip *data, int val)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_ps_led_current_map); idx++) {
+ int ret;
+
+ if (apds9160_ps_led_current_map[idx][0] != val)
+ continue;
+
+ ret = regmap_field_write(
+ data->reg_ps_current,
+ apds9160_ps_led_current_map[idx][1]);
+ if (ret)
+ return ret;
+ data->ps_current = val;
+
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int apds9160_set_als_gain(struct apds9160_chip *data, int gain)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_als_gain_map); idx++) {
+ int ret;
+
+ if (gain != apds9160_als_gain_map[idx][0])
+ continue;
+
+ ret = regmap_field_write(data->reg_als_gain,
+ apds9160_als_gain_map[idx][1]);
+ if (ret)
+ return ret;
+ data->als_hwgain = gain;
+
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int apds9160_set_als_scale(struct apds9160_chip *data, int val, int val2)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_als_scale_map); idx++) {
+ if (apds9160_als_scale_map[idx].itime == data->als_itime &&
+ apds9160_als_scale_map[idx].scale1 == val &&
+ apds9160_als_scale_map[idx].scale2 == val2) {
+ int ret = apds9160_set_als_gain(data,
+ apds9160_als_scale_map[idx].gain);
+ if (ret)
+ return ret;
+ data->als_scale1 = val;
+ data->als_scale2 = val2;
+
+ return ret;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int apds9160_set_als_resolution(struct apds9160_chip *data, int val)
+{
+ switch (val) {
+ case 25:
+ return regmap_field_write(data->reg_als_resolution,
+ APDS9160_CMD_LS_RESOLUTION_25MS);
+ case 50:
+ return regmap_field_write(data->reg_als_resolution,
+ APDS9160_CMD_LS_RESOLUTION_50MS);
+ case 200:
+ return regmap_field_write(data->reg_als_resolution,
+ APDS9160_CMD_LS_RESOLUTION_200MS);
+ default:
+ return regmap_field_write(data->reg_als_resolution,
+ APDS9160_CMD_LS_RESOLUTION_100MS);
+ }
+}
+
+static int apds9160_set_als_rate(struct apds9160_chip *data, int val)
+{
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_als_rate_map); idx++) {
+ if (apds9160_als_rate_map[idx][0] != val)
+ continue;
+
+ return regmap_field_write(data->reg_als_rate,
+ apds9160_als_rate_map[idx][1]);
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * Setting the integration time ajusts resolution, rate, scale and gain
+ */
+static int apds9160_set_als_int_time(struct apds9160_chip *data, int val)
+{
+ int ret;
+ int idx;
+
+ ret = apds9160_set_als_rate(data, val);
+ if (ret)
+ return ret;
+
+ /* Match resolution register with rate */
+ ret = apds9160_set_als_resolution(data, val);
+ if (ret)
+ return ret;
+
+ data->als_itime = val;
+
+ /* Set the scale minimum gain */
+ for (idx = 0; idx < ARRAY_SIZE(apds9160_als_scale_map); idx++) {
+ if (data->als_itime != apds9160_als_scale_map[idx].itime)
+ continue;
+
+ return apds9160_set_als_scale(data,
+ apds9160_als_scale_map[idx].scale1,
+ apds9160_als_scale_map[idx].scale2);
+ }
+
+ return -EINVAL;
+}
+
+static int apds9160_read_avail(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ const int **vals, int *type, int *length,
+ long mask)
+{
+ struct apds9160_chip *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ *length = ARRAY_SIZE(apds9160_als_rate_avail);
+ *vals = (const int *)apds9160_als_rate_avail;
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_LIST;
+ case IIO_PROXIMITY:
+ *length = ARRAY_SIZE(apds9160_ps_rate_avail);
+ *vals = (const int *)apds9160_ps_rate_avail;
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *length = ARRAY_SIZE(apds9160_ps_gain_avail);
+ *vals = (const int *)apds9160_ps_gain_avail;
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_LIST;
+ case IIO_LIGHT:
+ /* The available scales changes depending on itime */
+ switch (data->als_itime) {
+ case 25:
+ *length = ARRAY_SIZE(apds9160_25ms_avail) * 2;
+ *vals = (const int *)apds9160_25ms_avail;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+
+ return IIO_AVAIL_LIST;
+ case 50:
+ *length = ARRAY_SIZE(apds9160_50ms_avail) * 2;
+ *vals = (const int *)apds9160_50ms_avail;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+
+ return IIO_AVAIL_LIST;
+ case 100:
+ *length = ARRAY_SIZE(apds9160_100ms_avail) * 2;
+ *vals = (const int *)apds9160_100ms_avail;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+
+ return IIO_AVAIL_LIST;
+ case 200:
+ *length = ARRAY_SIZE(apds9160_200ms_avail) * 2;
+ *vals = (const int *)apds9160_200ms_avail;
+ *type = IIO_VAL_INT_PLUS_MICRO;
+
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_CURRENT:
+ *length = ARRAY_SIZE(apds9160_ps_led_current_avail);
+ *vals = (const int *)apds9160_ps_led_current_avail;
+ *type = IIO_VAL_INT;
+
+ return IIO_AVAIL_LIST;
+ default:
+ return -EINVAL;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int apds9160_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_RAW:
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int apds9160_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ struct apds9160_chip *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_PROXIMITY: {
+ __le16 buf;
+
+ ret = regmap_bulk_read(data->regmap, chan->address,
+ &buf, 2);
+ if (ret)
+ return ret;
+ *val = le16_to_cpu(buf);
+ /* Remove overflow bits from result */
+ *val = FIELD_GET(APDS9160_PS_DATA_MASK, *val);
+
+ return IIO_VAL_INT;
+ }
+ case IIO_LIGHT:
+ case IIO_INTENSITY: {
+ u8 buf[3];
+
+ ret = regmap_bulk_read(data->regmap, chan->address,
+ &buf, 3);
+ if (ret)
+ return ret;
+ *val = get_unaligned_le24(buf);
+
+ return IIO_VAL_INT;
+ }
+ case IIO_CURRENT:
+ *val = data->ps_current;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_HARDWAREGAIN:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ *val = data->als_hwgain;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *val = data->ps_rate;
+
+ return IIO_VAL_INT;
+ case IIO_LIGHT:
+ *val = data->als_itime;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *val = data->ps_cancellation_level;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *val = data->ps_gain;
+
+ return IIO_VAL_INT;
+ case IIO_LIGHT:
+ *val = data->als_scale1;
+ *val2 = data->als_scale2;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+};
+
+static int apds9160_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val,
+ int val2, long mask)
+{
+ struct apds9160_chip *data = iio_priv(indio_dev);
+
+ guard(mutex)(&data->lock);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_INT_TIME:
+ if (val2 != 0)
+ return -EINVAL;
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return apds9160_set_ps_rate(data, val);
+ case IIO_LIGHT:
+ return apds9160_set_als_int_time(data, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return apds9160_set_ps_gain(data, val);
+ case IIO_LIGHT:
+ return apds9160_set_als_scale(data, val, val2);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val2 != 0)
+ return -EINVAL;
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return apds9160_set_ps_cancellation_level(data, val);
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_RAW:
+ if (val2 != 0)
+ return -EINVAL;
+ switch (chan->type) {
+ case IIO_CURRENT:
+ return apds9160_set_ps_current(data, val);
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int apds9160_get_thres_reg(const struct iio_chan_spec *chan,
+ enum iio_event_direction dir, u8 *reg)
+{
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *reg = APDS9160_REG_PS_THRES_HI_LSB;
+ break;
+ case IIO_LIGHT:
+ *reg = APDS9160_REG_LS_THRES_UP_LSB;
+ break;
+ default:
+ return -EINVAL;
+ } break;
+ case IIO_EV_DIR_FALLING:
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ *reg = APDS9160_REG_PS_THRES_LO_LSB;
+ break;
+ case IIO_LIGHT:
+ *reg = APDS9160_REG_LS_THRES_LO_LSB;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int apds9160_read_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int *val, int *val2)
+{
+ u8 reg;
+ int ret;
+ struct apds9160_chip *data = iio_priv(indio_dev);
+
+ if (info != IIO_EV_INFO_VALUE)
+ return -EINVAL;
+
+ ret = apds9160_get_thres_reg(chan, dir, &reg);
+ if (ret < 0)
+ return ret;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY: {
+ __le16 buf;
+
+ ret = regmap_bulk_read(data->regmap, reg, &buf, 2);
+ if (ret < 0)
+ return ret;
+ *val = le16_to_cpu(buf);
+ return IIO_VAL_INT;
+ }
+ case IIO_LIGHT: {
+ u8 buf[3];
+
+ ret = regmap_bulk_read(data->regmap, reg, &buf, 3);
+ if (ret < 0)
+ return ret;
+ *val = get_unaligned_le24(buf);
+ return IIO_VAL_INT;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int apds9160_write_event(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info, int val, int val2)
+{
+ u8 reg;
+ int ret = 0;
+ struct apds9160_chip *data = iio_priv(indio_dev);
+
+ if (info != IIO_EV_INFO_VALUE)
+ return -EINVAL;
+
+ ret = apds9160_get_thres_reg(chan, dir, &reg);
+ if (ret < 0)
+ return ret;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY: {
+ __le16 buf;
+
+ if (val < 0 || val > APDS9160_PS_THRES_MAX)
+ return -EINVAL;
+
+ buf = cpu_to_le16(val);
+ return regmap_bulk_write(data->regmap, reg, &buf, 2);
+ }
+ case IIO_LIGHT: {
+ u8 buf[3];
+
+ if (val < 0 || val > APDS9160_LS_THRES_MAX)
+ return -EINVAL;
+
+ put_unaligned_le24(val, buf);
+ return regmap_bulk_write(data->regmap, reg, &buf, 3);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int apds9160_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct apds9160_chip *data = iio_priv(indio_dev);
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ return data->ps_int;
+ case IIO_LIGHT:
+ return data->als_int;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int apds9160_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir, bool state)
+{
+ struct apds9160_chip *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (chan->type) {
+ case IIO_PROXIMITY:
+ ret = regmap_field_write(data->reg_int_ps, state);
+ if (ret)
+ return ret;
+ data->ps_int = state;
+
+ return 0;
+ case IIO_LIGHT:
+ ret = regmap_field_write(data->reg_int_als, state);
+ if (ret)
+ return ret;
+ data->als_int = state;
+
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static irqreturn_t apds9160_irq_handler(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct apds9160_chip *data = iio_priv(indio_dev);
+ int ret, status;
+
+ /* Reading status register clears the interrupt flag */
+ ret = regmap_read(data->regmap, APDS9160_REG_SR, &status);
+ if (ret < 0) {
+ dev_err_ratelimited(&data->client->dev,
+ "irq status reg read failed\n");
+ return IRQ_HANDLED;
+ }
+
+ if ((status & APDS9160_SR_LS_INT) &&
+ (status & APDS9160_SR_LS_NEW_DATA) && data->als_int) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+ }
+
+ if ((status & APDS9160_SR_PS_INT) &&
+ (status & APDS9160_SR_PS_NEW_DATA) && data->ps_int) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns(indio_dev));
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int apds9160_detect(struct apds9160_chip *chip)
+{
+ struct i2c_client *client = chip->client;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(chip->regmap, APDS9160_REG_ID, &val);
+ if (ret < 0) {
+ dev_err(&client->dev, "ID read failed\n");
+ return ret;
+ }
+
+ if (val != APDS9160_PART_ID_0)
+ dev_info(&client->dev, "Unknown part id %u\n", val);
+
+ return 0;
+}
+
+static void apds9160_disable(void *chip)
+{
+ struct apds9160_chip *data = chip;
+ int ret;
+
+ ret = regmap_field_write(data->reg_enable_als, 0);
+ if (ret)
+ return;
+
+ regmap_field_write(data->reg_enable_ps, 0);
+}
+
+static int apds9160_chip_init(struct apds9160_chip *chip)
+{
+ int ret;
+
+ /* Write default values to interrupt register */
+ ret = regmap_field_write(chip->reg_int_ps, 0);
+ chip->ps_int = 0;
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(chip->reg_int_als, 0);
+ chip->als_int = 0;
+ if (ret)
+ return ret;
+
+ /* Write default values to control register */
+ ret = regmap_field_write(chip->reg_enable_als, 1);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(chip->reg_enable_ps, 1);
+ if (ret)
+ return ret;
+
+ /* Write other default values */
+ ret = regmap_field_write(chip->reg_ps_resolution,
+ APDS9160_DEFAULT_PS_RESOLUTION_11BITS);
+ if (ret)
+ return ret;
+
+ /* Write default values to configuration registers */
+ ret = apds9160_set_ps_current(chip, APDS9160_DEFAULT_PS_CURRENT);
+ if (ret)
+ return ret;
+
+ ret = apds9160_set_ps_rate(chip, APDS9160_DEFAULT_PS_RATE);
+ if (ret)
+ return ret;
+
+ ret = apds9160_set_als_int_time(chip, APDS9160_DEFAULT_LS_RATE);
+ if (ret)
+ return ret;
+
+ ret = apds9160_set_als_scale(chip,
+ apds9160_100ms_avail[0][0],
+ apds9160_100ms_avail[0][1]);
+ if (ret)
+ return ret;
+
+ ret = apds9160_set_ps_gain(chip, APDS9160_DEFAULT_PS_GAIN);
+ if (ret)
+ return ret;
+
+ ret = apds9160_set_ps_analog_cancellation(
+ chip, APDS9160_DEFAULT_PS_ANALOG_CANCELLATION);
+ if (ret)
+ return ret;
+
+ ret = apds9160_set_ps_cancellation_level(
+ chip, APDS9160_DEFAULT_PS_CANCELLATION_LEVEL);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(&chip->client->dev, apds9160_disable,
+ chip);
+}
+
+static int apds9160_regfield_init(struct apds9160_chip *data)
+{
+ struct device *dev = &data->client->dev;
+ struct regmap *regmap = data->regmap;
+ struct regmap_field *tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_int_als);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_int_als = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_int_ps);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_int_ps = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_ls_en);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_enable_als = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_ps_en);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_enable_ps = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap,
+ apds9160_reg_field_ps_overflow);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_ps_overflow = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_als_rate);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_als_rate = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_als_res);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_als_resolution = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_ps_rate);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_ps_rate = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_als_gain);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_als_gain = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap,
+ apds9160_reg_field_ps_current);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_ps_current = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap, apds9160_reg_field_ps_gain);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_ps_gain = tmp;
+
+ tmp = devm_regmap_field_alloc(dev, regmap,
+ apds9160_reg_field_ps_resolution);
+ if (IS_ERR(tmp))
+ return PTR_ERR(tmp);
+ data->reg_ps_resolution = tmp;
+
+ return 0;
+}
+
+static const struct iio_info apds9160_info = {
+ .read_avail = apds9160_read_avail,
+ .read_raw = apds9160_read_raw,
+ .write_raw = apds9160_write_raw,
+ .write_raw_get_fmt = apds9160_write_raw_get_fmt,
+ .read_event_value = apds9160_read_event,
+ .write_event_value = apds9160_write_event,
+ .read_event_config = apds9160_read_event_config,
+ .write_event_config = apds9160_write_event_config,
+};
+
+static const struct iio_info apds9160_info_no_events = {
+ .read_avail = apds9160_read_avail,
+ .read_raw = apds9160_read_raw,
+ .write_raw = apds9160_write_raw,
+ .write_raw_get_fmt = apds9160_write_raw_get_fmt,
+};
+
+static int apds9160_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct apds9160_chip *chip;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ ret = devm_regulator_get_enable(dev, "vdd");
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable vdd supply\n");
+
+ indio_dev->name = "apds9160";
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ chip = iio_priv(indio_dev);
+ chip->client = client;
+ chip->regmap = devm_regmap_init_i2c(client, &apds9160_regmap_config);
+ if (IS_ERR(chip->regmap))
+ return dev_err_probe(dev, PTR_ERR(chip->regmap),
+ "regmap initialization failed.\n");
+
+ chip->client = client;
+ mutex_init(&chip->lock);
+
+ ret = apds9160_detect(chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "apds9160 not found\n");
+
+ ret = apds9160_regfield_init(chip);
+ if (ret)
+ return ret;
+
+ ret = apds9160_chip_init(chip);
+ if (ret)
+ return ret;
+
+ ret = apds9160_ps_init_analog_cancellation(dev, chip);
+ if (ret)
+ return ret;
+
+ if (client->irq > 0) {
+ indio_dev->info = &apds9160_info;
+ indio_dev->channels = apds9160_channels;
+ indio_dev->num_channels = ARRAY_SIZE(apds9160_channels);
+ ret = devm_request_threaded_irq(dev, client->irq, NULL,
+ apds9160_irq_handler,
+ IRQF_ONESHOT, "apds9160_event",
+ indio_dev);
+ if (ret) {
+ return dev_err_probe(dev, ret,
+ "request irq (%d) failed\n",
+ client->irq);
+ }
+ } else {
+ indio_dev->info = &apds9160_info_no_events;
+ indio_dev->channels = apds9160_channels_without_events;
+ indio_dev->num_channels =
+ ARRAY_SIZE(apds9160_channels_without_events);
+ }
+
+ ret = devm_iio_device_register(dev, indio_dev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed iio device registration\n");
+
+ return ret;
+}
+
+static const struct of_device_id apds9160_of_match[] = {
+ { .compatible = "brcm,apds9160" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, apds9160_of_match);
+
+static const struct i2c_device_id apds9160_id[] = {
+ { "apds9160", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, apds9160_id);
+
+static struct i2c_driver apds9160_driver = {
+ .driver = {
+ .name = "apds9160",
+ .of_match_table = apds9160_of_match,
+ },
+ .probe = apds9160_probe,
+ .id_table = apds9160_id,
+};
+module_i2c_driver(apds9160_driver);
+
+MODULE_DESCRIPTION("APDS9160 combined ALS and proximity sensor");
+MODULE_AUTHOR("Mikael Gonella-Bolduc <m.gonella.bolduc@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/apds9306.c b/drivers/iio/light/apds9306.c
index 69a0d609cffc..5ed7e17f49e7 100644
--- a/drivers/iio/light/apds9306.c
+++ b/drivers/iio/light/apds9306.c
@@ -108,11 +108,11 @@ static const struct part_id_gts_multiplier apds9306_gts_mul[] = {
{
.part_id = 0xB1,
.max_scale_int = 16,
- .max_scale_nano = 3264320,
+ .max_scale_nano = 326432000,
}, {
.part_id = 0xB3,
.max_scale_int = 14,
- .max_scale_nano = 9712000,
+ .max_scale_nano = 97120000,
},
};
diff --git a/drivers/iio/light/bh1745.c b/drivers/iio/light/bh1745.c
index 3b4056be54a0..56ab5fe90ff9 100644
--- a/drivers/iio/light/bh1745.c
+++ b/drivers/iio/light/bh1745.c
@@ -426,16 +426,16 @@ static int bh1745_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = regmap_bulk_read(data->regmap, chan->address,
- &value, 2);
- if (ret)
- return ret;
- *val = value;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
- return IIO_VAL_INT;
- }
- unreachable();
+ ret = regmap_bulk_read(data->regmap, chan->address, &value, 2);
+ iio_device_release_direct(indio_dev);
+ if (ret)
+ return ret;
+ *val = value;
+
+ return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE: {
guard(mutex)(&data->lock);
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index aeae0566ec12..bb90f738312a 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -492,7 +492,7 @@ static int cm32181_probe(struct i2c_client *client)
ret = devm_iio_device_register(dev, indio_dev);
if (ret) {
- dev_err(dev, "%s: regist device failed\n", __func__);
+ dev_err(dev, "%s: register device failed\n", __func__);
return ret;
}
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index ae3fc3299eec..446dd54d5037 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -683,7 +683,7 @@ static int cm36651_probe(struct i2c_client *client)
ret = iio_device_register(indio_dev);
if (ret) {
- dev_err(&client->dev, "%s: regist device failed\n", __func__);
+ dev_err(&client->dev, "%s: register device failed\n", __func__);
goto error_free_irq;
}
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index 7ab64f5c623c..76b76d12b388 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -49,9 +49,10 @@ static const u32 prox_sensitivity_addresses[] = {
#define PROX_CHANNEL(_is_proximity, _channel) \
{\
.type = _is_proximity ? IIO_PROXIMITY : IIO_ATTENTION,\
- .info_mask_separate = _is_proximity ? BIT(IIO_CHAN_INFO_RAW) :\
- BIT(IIO_CHAN_INFO_PROCESSED),\
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |\
+ .info_mask_separate = \
+ (_is_proximity ? BIT(IIO_CHAN_INFO_RAW) :\
+ BIT(IIO_CHAN_INFO_PROCESSED)) |\
+ BIT(IIO_CHAN_INFO_OFFSET) |\
BIT(IIO_CHAN_INFO_SCALE) |\
BIT(IIO_CHAN_INFO_SAMP_FREQ) |\
BIT(IIO_CHAN_INFO_HYSTERESIS),\
diff --git a/drivers/iio/light/veml6030.c b/drivers/iio/light/veml6030.c
index 9b71825eea9b..473a9c3e32a3 100644
--- a/drivers/iio/light/veml6030.c
+++ b/drivers/iio/light/veml6030.c
@@ -24,10 +24,12 @@
#include <linux/regmap.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
+#include <linux/units.h>
#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
+#include <linux/iio/iio-gts-helper.h>
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
@@ -59,22 +61,36 @@
#define VEML6035_INT_CHAN BIT(3)
#define VEML6035_CHAN_EN BIT(2)
+/* Regfields */
+#define VEML6030_GAIN_RF REG_FIELD(VEML6030_REG_ALS_CONF, 11, 12)
+#define VEML6030_IT_RF REG_FIELD(VEML6030_REG_ALS_CONF, 6, 9)
+
+#define VEML6035_GAIN_RF REG_FIELD(VEML6030_REG_ALS_CONF, 10, 12)
+
+/* Maximum scales x 10000 to work with integers */
+#define VEML6030_MAX_SCALE 21504
+#define VEML6035_MAX_SCALE 4096
+
enum veml6030_scan {
VEML6030_SCAN_ALS,
VEML6030_SCAN_WH,
VEML6030_SCAN_TIMESTAMP,
};
+struct veml6030_rf {
+ struct regmap_field *it;
+ struct regmap_field *gain;
+};
+
struct veml603x_chip {
const char *name;
- const int(*scale_vals)[][2];
- const int num_scale_vals;
const struct iio_chan_spec *channels;
const int num_channels;
+ const struct reg_field gain_rf;
+ const struct reg_field it_rf;
+ const int max_scale;
int (*hw_init)(struct iio_dev *indio_dev, struct device *dev);
int (*set_info)(struct iio_dev *indio_dev);
- int (*set_als_gain)(struct iio_dev *indio_dev, int val, int val2);
- int (*get_als_gain)(struct iio_dev *indio_dev, int *val, int *val2);
};
/*
@@ -91,40 +107,56 @@ struct veml603x_chip {
struct veml6030_data {
struct i2c_client *client;
struct regmap *regmap;
- int cur_resolution;
- int cur_gain;
- int cur_integration_time;
+ struct veml6030_rf rf;
const struct veml603x_chip *chip;
+ struct iio_gts gts;
+
};
-static const int veml6030_it_times[][2] = {
- { 0, 25000 },
- { 0, 50000 },
- { 0, 100000 },
- { 0, 200000 },
- { 0, 400000 },
- { 0, 800000 },
+#define VEML6030_SEL_IT_25MS 0x0C
+#define VEML6030_SEL_IT_50MS 0x08
+#define VEML6030_SEL_IT_100MS 0x00
+#define VEML6030_SEL_IT_200MS 0x01
+#define VEML6030_SEL_IT_400MS 0x02
+#define VEML6030_SEL_IT_800MS 0x03
+static const struct iio_itime_sel_mul veml6030_it_sel[] = {
+ GAIN_SCALE_ITIME_US(25000, VEML6030_SEL_IT_25MS, 1),
+ GAIN_SCALE_ITIME_US(50000, VEML6030_SEL_IT_50MS, 2),
+ GAIN_SCALE_ITIME_US(100000, VEML6030_SEL_IT_100MS, 4),
+ GAIN_SCALE_ITIME_US(200000, VEML6030_SEL_IT_200MS, 8),
+ GAIN_SCALE_ITIME_US(400000, VEML6030_SEL_IT_400MS, 16),
+ GAIN_SCALE_ITIME_US(800000, VEML6030_SEL_IT_800MS, 32),
};
-/*
- * Scale is 1/gain. Value 0.125 is ALS gain x (1/8), 0.25 is
- * ALS gain x (1/4), 0.5 is ALS gain x (1/2), 1.0 is ALS gain x 1,
- * 2.0 is ALS gain x2, and 4.0 is ALS gain x 4.
+/* Gains are multiplied by 8 to work with integers. The values in the
+ * iio-gts tables don't need corrections because the maximum value of
+ * the scale refers to GAIN = x1, and the rest of the values are
+ * obtained from the resulting linear function.
*/
-static const int veml6030_scale_vals[][2] = {
- { 0, 125000 },
- { 0, 250000 },
- { 1, 0 },
- { 2, 0 },
+#define VEML6030_SEL_MILLI_GAIN_X125 2
+#define VEML6030_SEL_MILLI_GAIN_X250 3
+#define VEML6030_SEL_MILLI_GAIN_X1000 0
+#define VEML6030_SEL_MILLI_GAIN_X2000 1
+static const struct iio_gain_sel_pair veml6030_gain_sel[] = {
+ GAIN_SCALE_GAIN(1, VEML6030_SEL_MILLI_GAIN_X125),
+ GAIN_SCALE_GAIN(2, VEML6030_SEL_MILLI_GAIN_X250),
+ GAIN_SCALE_GAIN(8, VEML6030_SEL_MILLI_GAIN_X1000),
+ GAIN_SCALE_GAIN(16, VEML6030_SEL_MILLI_GAIN_X2000),
};
-static const int veml6035_scale_vals[][2] = {
- { 0, 125000 },
- { 0, 250000 },
- { 0, 500000 },
- { 1, 0 },
- { 2, 0 },
- { 4, 0 },
+#define VEML6035_SEL_MILLI_GAIN_X125 4
+#define VEML6035_SEL_MILLI_GAIN_X250 5
+#define VEML6035_SEL_MILLI_GAIN_X500 7
+#define VEML6035_SEL_MILLI_GAIN_X1000 0
+#define VEML6035_SEL_MILLI_GAIN_X2000 1
+#define VEML6035_SEL_MILLI_GAIN_X4000 3
+static const struct iio_gain_sel_pair veml6035_gain_sel[] = {
+ GAIN_SCALE_GAIN(1, VEML6035_SEL_MILLI_GAIN_X125),
+ GAIN_SCALE_GAIN(2, VEML6035_SEL_MILLI_GAIN_X250),
+ GAIN_SCALE_GAIN(4, VEML6035_SEL_MILLI_GAIN_X500),
+ GAIN_SCALE_GAIN(8, VEML6035_SEL_MILLI_GAIN_X1000),
+ GAIN_SCALE_GAIN(16, VEML6035_SEL_MILLI_GAIN_X2000),
+ GAIN_SCALE_GAIN(32, VEML6035_SEL_MILLI_GAIN_X4000),
};
/*
@@ -319,113 +351,112 @@ static const struct iio_chan_spec veml7700_channels[] = {
IIO_CHAN_SOFT_TIMESTAMP(VEML6030_SCAN_TIMESTAMP),
};
+static const struct regmap_range veml6030_readable_ranges[] = {
+ regmap_reg_range(VEML6030_REG_ALS_CONF, VEML6030_REG_ALS_INT),
+};
+
+static const struct regmap_access_table veml6030_readable_table = {
+ .yes_ranges = veml6030_readable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(veml6030_readable_ranges),
+};
+
+static const struct regmap_range veml6030_writable_ranges[] = {
+ regmap_reg_range(VEML6030_REG_ALS_CONF, VEML6030_REG_ALS_PSM),
+};
+
+static const struct regmap_access_table veml6030_writable_table = {
+ .yes_ranges = veml6030_writable_ranges,
+ .n_yes_ranges = ARRAY_SIZE(veml6030_writable_ranges),
+};
+
+static const struct regmap_range veml6030_volatile_ranges[] = {
+ regmap_reg_range(VEML6030_REG_ALS_DATA, VEML6030_REG_WH_DATA),
+};
+
+static const struct regmap_access_table veml6030_volatile_table = {
+ .yes_ranges = veml6030_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(veml6030_volatile_ranges),
+};
+
static const struct regmap_config veml6030_regmap_config = {
.name = "veml6030_regmap",
.reg_bits = 8,
.val_bits = 16,
.max_register = VEML6030_REG_ALS_INT,
.val_format_endian = REGMAP_ENDIAN_LITTLE,
+ .rd_table = &veml6030_readable_table,
+ .wr_table = &veml6030_writable_table,
+ .volatile_table = &veml6030_volatile_table,
+ .cache_type = REGCACHE_RBTREE,
};
-static int veml6030_get_intgrn_tm(struct iio_dev *indio_dev,
- int *val, int *val2)
+static int veml6030_get_it(struct veml6030_data *data, int *val, int *val2)
{
- int ret, reg;
- struct veml6030_data *data = iio_priv(indio_dev);
+ int ret, it_idx;
- ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
- if (ret) {
- dev_err(&data->client->dev,
- "can't read als conf register %d\n", ret);
+ ret = regmap_field_read(data->rf.it, &it_idx);
+ if (ret)
return ret;
- }
- switch ((reg >> 6) & 0xF) {
- case 0:
- *val2 = 100000;
- break;
- case 1:
- *val2 = 200000;
- break;
- case 2:
- *val2 = 400000;
- break;
- case 3:
- *val2 = 800000;
- break;
- case 8:
- *val2 = 50000;
- break;
- case 12:
- *val2 = 25000;
- break;
- default:
- return -EINVAL;
- }
+ ret = iio_gts_find_int_time_by_sel(&data->gts, it_idx);
+ if (ret < 0)
+ return ret;
+ *val2 = ret;
*val = 0;
+
return IIO_VAL_INT_PLUS_MICRO;
}
-static int veml6030_set_intgrn_tm(struct iio_dev *indio_dev,
- int val, int val2)
+static int veml6030_set_it(struct iio_dev *indio_dev, int val, int val2)
{
- int ret, new_int_time, int_idx;
struct veml6030_data *data = iio_priv(indio_dev);
+ int ret, gain_idx, it_idx, new_gain, prev_gain, prev_it;
+ bool in_range;
- if (val)
+ if (val || !iio_gts_valid_time(&data->gts, val2))
return -EINVAL;
- switch (val2) {
- case 25000:
- new_int_time = 0x300;
- int_idx = 5;
- break;
- case 50000:
- new_int_time = 0x200;
- int_idx = 4;
- break;
- case 100000:
- new_int_time = 0x00;
- int_idx = 3;
- break;
- case 200000:
- new_int_time = 0x40;
- int_idx = 2;
- break;
- case 400000:
- new_int_time = 0x80;
- int_idx = 1;
- break;
- case 800000:
- new_int_time = 0xC0;
- int_idx = 0;
- break;
- default:
- return -EINVAL;
- }
+ ret = regmap_field_read(data->rf.it, &it_idx);
+ if (ret)
+ return ret;
- ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
- VEML6030_ALS_IT, new_int_time);
- if (ret) {
- dev_err(&data->client->dev,
- "can't update als integration time %d\n", ret);
+ ret = regmap_field_read(data->rf.gain, &gain_idx);
+ if (ret)
return ret;
- }
- /*
- * Cache current integration time and update resolution. For every
- * increase in integration time to next level, resolution is halved
- * and vice-versa.
- */
- if (data->cur_integration_time < int_idx)
- data->cur_resolution <<= int_idx - data->cur_integration_time;
- else if (data->cur_integration_time > int_idx)
- data->cur_resolution >>= data->cur_integration_time - int_idx;
+ prev_it = iio_gts_find_int_time_by_sel(&data->gts, it_idx);
+ if (prev_it < 0)
+ return prev_it;
+
+ if (prev_it == val2)
+ return 0;
- data->cur_integration_time = int_idx;
+ prev_gain = iio_gts_find_gain_by_sel(&data->gts, gain_idx);
+ if (prev_gain < 0)
+ return prev_gain;
- return ret;
+ ret = iio_gts_find_new_gain_by_gain_time_min(&data->gts, prev_gain, prev_it,
+ val2, &new_gain, &in_range);
+ if (ret)
+ return ret;
+
+ if (!in_range)
+ dev_dbg(&data->client->dev, "Optimal gain out of range\n");
+
+ ret = iio_gts_find_sel_by_int_time(&data->gts, val2);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_field_write(data->rf.it, ret);
+ if (ret)
+ return ret;
+
+ ret = iio_gts_find_sel_by_gain(&data->gts, new_gain);
+ if (ret < 0)
+ return ret;
+
+ return regmap_field_write(data->rf.gain, ret);
}
static int veml6030_read_persistence(struct iio_dev *indio_dev,
@@ -434,7 +465,7 @@ static int veml6030_read_persistence(struct iio_dev *indio_dev,
int ret, reg, period, x, y;
struct veml6030_data *data = iio_priv(indio_dev);
- ret = veml6030_get_intgrn_tm(indio_dev, &x, &y);
+ ret = veml6030_get_it(data, &x, &y);
if (ret < 0)
return ret;
@@ -459,7 +490,7 @@ static int veml6030_write_persistence(struct iio_dev *indio_dev,
int ret, period, x, y;
struct veml6030_data *data = iio_priv(indio_dev);
- ret = veml6030_get_intgrn_tm(indio_dev, &x, &y);
+ ret = veml6030_get_it(data, &x, &y);
if (ret < 0)
return ret;
@@ -488,177 +519,29 @@ static int veml6030_write_persistence(struct iio_dev *indio_dev,
return ret;
}
-/*
- * Cache currently set gain & update resolution. For every
- * increase in the gain to next level, resolution is halved
- * and vice-versa.
- */
-static void veml6030_update_gain_res(struct veml6030_data *data, int gain_idx)
-{
- if (data->cur_gain < gain_idx)
- data->cur_resolution <<= gain_idx - data->cur_gain;
- else if (data->cur_gain > gain_idx)
- data->cur_resolution >>= data->cur_gain - gain_idx;
-
- data->cur_gain = gain_idx;
-}
-
-static int veml6030_set_als_gain(struct iio_dev *indio_dev,
- int val, int val2)
+static int veml6030_set_scale(struct iio_dev *indio_dev, int val, int val2)
{
- int ret, new_gain, gain_idx;
+ int ret, gain_sel, it_idx, it_sel;
struct veml6030_data *data = iio_priv(indio_dev);
- if (val == 0 && val2 == 125000) {
- new_gain = 0x1000; /* 0x02 << 11 */
- gain_idx = 3;
- } else if (val == 0 && val2 == 250000) {
- new_gain = 0x1800;
- gain_idx = 2;
- } else if (val == 1 && val2 == 0) {
- new_gain = 0x00;
- gain_idx = 1;
- } else if (val == 2 && val2 == 0) {
- new_gain = 0x800;
- gain_idx = 0;
- } else {
- return -EINVAL;
- }
-
- ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
- VEML6030_ALS_GAIN, new_gain);
- if (ret) {
- dev_err(&data->client->dev,
- "can't set als gain %d\n", ret);
+ ret = regmap_field_read(data->rf.it, &it_idx);
+ if (ret)
return ret;
- }
-
- veml6030_update_gain_res(data, gain_idx);
-
- return 0;
-}
-
-static int veml6035_set_als_gain(struct iio_dev *indio_dev, int val, int val2)
-{
- int ret, new_gain, gain_idx;
- struct veml6030_data *data = iio_priv(indio_dev);
- if (val == 0 && val2 == 125000) {
- new_gain = VEML6035_SENS;
- gain_idx = 5;
- } else if (val == 0 && val2 == 250000) {
- new_gain = VEML6035_SENS | VEML6035_GAIN;
- gain_idx = 4;
- } else if (val == 0 && val2 == 500000) {
- new_gain = VEML6035_SENS | VEML6035_GAIN |
- VEML6035_DG;
- gain_idx = 3;
- } else if (val == 1 && val2 == 0) {
- new_gain = 0x0000;
- gain_idx = 2;
- } else if (val == 2 && val2 == 0) {
- new_gain = VEML6035_GAIN;
- gain_idx = 1;
- } else if (val == 4 && val2 == 0) {
- new_gain = VEML6035_GAIN | VEML6035_DG;
- gain_idx = 0;
- } else {
- return -EINVAL;
- }
-
- ret = regmap_update_bits(data->regmap, VEML6030_REG_ALS_CONF,
- VEML6035_GAIN_M, new_gain);
- if (ret) {
- dev_err(&data->client->dev, "can't set als gain %d\n", ret);
+ ret = iio_gts_find_gain_time_sel_for_scale(&data->gts, val, val2,
+ &gain_sel, &it_sel);
+ if (ret)
return ret;
- }
-
- veml6030_update_gain_res(data, gain_idx);
- return 0;
-}
-
-static int veml6030_get_als_gain(struct iio_dev *indio_dev,
- int *val, int *val2)
-{
- int ret, reg;
- struct veml6030_data *data = iio_priv(indio_dev);
-
- ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
- if (ret) {
- dev_err(&data->client->dev,
- "can't read als conf register %d\n", ret);
+ ret = regmap_field_write(data->rf.it, it_sel);
+ if (ret)
return ret;
- }
- switch ((reg >> 11) & 0x03) {
- case 0:
- *val = 1;
- *val2 = 0;
- break;
- case 1:
- *val = 2;
- *val2 = 0;
- break;
- case 2:
- *val = 0;
- *val2 = 125000;
- break;
- case 3:
- *val = 0;
- *val2 = 250000;
- break;
- default:
- return -EINVAL;
- }
-
- return IIO_VAL_INT_PLUS_MICRO;
-}
-
-static int veml6035_get_als_gain(struct iio_dev *indio_dev, int *val, int *val2)
-{
- int ret, reg;
- struct veml6030_data *data = iio_priv(indio_dev);
-
- ret = regmap_read(data->regmap, VEML6030_REG_ALS_CONF, &reg);
- if (ret) {
- dev_err(&data->client->dev,
- "can't read als conf register %d\n", ret);
+ ret = regmap_field_write(data->rf.gain, gain_sel);
+ if (ret)
return ret;
- }
- switch (FIELD_GET(VEML6035_GAIN_M, reg)) {
- case 0:
- *val = 1;
- *val2 = 0;
- break;
- case 1:
- case 2:
- *val = 2;
- *val2 = 0;
- break;
- case 3:
- *val = 4;
- *val2 = 0;
- break;
- case 4:
- *val = 0;
- *val2 = 125000;
- break;
- case 5:
- case 6:
- *val = 0;
- *val2 = 250000;
- break;
- case 7:
- *val = 0;
- *val2 = 500000;
- break;
- default:
- return -EINVAL;
- }
-
- return IIO_VAL_INT_PLUS_MICRO;
+ return 0;
}
static int veml6030_read_thresh(struct iio_dev *indio_dev,
@@ -705,6 +588,71 @@ static int veml6030_write_thresh(struct iio_dev *indio_dev,
return ret;
}
+static int veml6030_get_total_gain(struct veml6030_data *data)
+{
+ int gain, it, reg, ret;
+
+ ret = regmap_field_read(data->rf.gain, &reg);
+ if (ret)
+ return ret;
+
+ gain = iio_gts_find_gain_by_sel(&data->gts, reg);
+ if (gain < 0)
+ return gain;
+
+ ret = regmap_field_read(data->rf.it, &reg);
+ if (ret)
+ return ret;
+
+ it = iio_gts_find_int_time_by_sel(&data->gts, reg);
+ if (it < 0)
+ return it;
+
+ return iio_gts_get_total_gain(&data->gts, gain, it);
+}
+
+static int veml6030_get_scale(struct veml6030_data *data, int *val, int *val2)
+{
+ int gain, it, reg, ret;
+
+ ret = regmap_field_read(data->rf.gain, &reg);
+ if (ret)
+ return ret;
+
+ gain = iio_gts_find_gain_by_sel(&data->gts, reg);
+ if (gain < 0)
+ return gain;
+
+ ret = regmap_field_read(data->rf.it, &reg);
+ if (ret)
+ return ret;
+
+ it = iio_gts_find_int_time_by_sel(&data->gts, reg);
+ if (it < 0)
+ return it;
+
+ ret = iio_gts_get_scale(&data->gts, gain, it, val, val2);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static int veml6030_process_als(struct veml6030_data *data, int raw,
+ int *val, int *val2)
+{
+ int total_gain;
+
+ total_gain = veml6030_get_total_gain(data);
+ if (total_gain < 0)
+ return total_gain;
+
+ *val = raw * data->chip->max_scale / total_gain / 10000;
+ *val2 = raw * data->chip->max_scale / total_gain % 10000 * 100;
+
+ return IIO_VAL_INT_PLUS_MICRO;
+}
+
/*
* Provide both raw as well as light reading in lux.
* light (in lux) = resolution * raw reading
@@ -728,11 +676,9 @@ static int veml6030_read_raw(struct iio_dev *indio_dev,
dev_err(dev, "can't read als data %d\n", ret);
return ret;
}
- if (mask == IIO_CHAN_INFO_PROCESSED) {
- *val = (reg * data->cur_resolution) / 10000;
- *val2 = (reg * data->cur_resolution) % 10000 * 100;
- return IIO_VAL_INT_PLUS_MICRO;
- }
+ if (mask == IIO_CHAN_INFO_PROCESSED)
+ return veml6030_process_als(data, reg, val, val2);
+
*val = reg;
return IIO_VAL_INT;
case IIO_INTENSITY:
@@ -747,9 +693,9 @@ static int veml6030_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
case IIO_CHAN_INFO_INT_TIME:
- return veml6030_get_intgrn_tm(indio_dev, val, val2);
+ return veml6030_get_it(data, val, val2);
case IIO_CHAN_INFO_SCALE:
- return data->chip->get_als_gain(indio_dev, val, val2);
+ return veml6030_get_scale(data, val, val2);
default:
return -EINVAL;
}
@@ -764,15 +710,9 @@ static int veml6030_read_avail(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_INT_TIME:
- *vals = (int *)&veml6030_it_times;
- *length = 2 * ARRAY_SIZE(veml6030_it_times);
- *type = IIO_VAL_INT_PLUS_MICRO;
- return IIO_AVAIL_LIST;
+ return iio_gts_avail_times(&data->gts, vals, type, length);
case IIO_CHAN_INFO_SCALE:
- *vals = (int *)*data->chip->scale_vals;
- *length = 2 * data->chip->num_scale_vals;
- *type = IIO_VAL_INT_PLUS_MICRO;
- return IIO_AVAIL_LIST;
+ return iio_gts_all_avail_scales(&data->gts, vals, type, length);
}
return -EINVAL;
@@ -782,13 +722,25 @@ static int veml6030_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
- struct veml6030_data *data = iio_priv(indio_dev);
-
switch (mask) {
case IIO_CHAN_INFO_INT_TIME:
- return veml6030_set_intgrn_tm(indio_dev, val, val2);
+ return veml6030_set_it(indio_dev, val, val2);
case IIO_CHAN_INFO_SCALE:
- return data->chip->set_als_gain(indio_dev, val, val2);
+ return veml6030_set_scale(indio_dev, val, val2);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int veml6030_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_INT_TIME:
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -886,6 +838,7 @@ static const struct iio_info veml6030_info = {
.read_raw = veml6030_read_raw,
.read_avail = veml6030_read_avail,
.write_raw = veml6030_write_raw,
+ .write_raw_get_fmt = veml6030_write_raw_get_fmt,
.read_event_value = veml6030_read_event_val,
.write_event_value = veml6030_write_event_val,
.read_event_config = veml6030_read_interrupt_config,
@@ -897,6 +850,7 @@ static const struct iio_info veml6030_info_no_irq = {
.read_raw = veml6030_read_raw,
.read_avail = veml6030_read_avail,
.write_raw = veml6030_write_raw,
+ .write_raw_get_fmt = veml6030_write_raw_get_fmt,
};
static irqreturn_t veml6030_event_handler(int irq, void *private)
@@ -990,6 +944,27 @@ static int veml7700_set_info(struct iio_dev *indio_dev)
return 0;
}
+static int veml6030_regfield_init(struct iio_dev *indio_dev)
+{
+ struct veml6030_data *data = iio_priv(indio_dev);
+ struct regmap *regmap = data->regmap;
+ struct device *dev = &data->client->dev;
+ struct regmap_field *rm_field;
+ struct veml6030_rf *rf = &data->rf;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, data->chip->it_rf);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->it = rm_field;
+
+ rm_field = devm_regmap_field_alloc(dev, regmap, data->chip->gain_rf);
+ if (IS_ERR(rm_field))
+ return PTR_ERR(rm_field);
+ rf->gain = rm_field;
+
+ return 0;
+}
+
/*
* Set ALS gain to 1/8, integration time to 100 ms, PSM to mode 2,
* persistence to 1 x integration time and the threshold
@@ -1001,6 +976,13 @@ static int veml6030_hw_init(struct iio_dev *indio_dev, struct device *dev)
int ret, val;
struct veml6030_data *data = iio_priv(indio_dev);
+ ret = devm_iio_init_iio_gts(dev, 2, 150400000,
+ veml6030_gain_sel, ARRAY_SIZE(veml6030_gain_sel),
+ veml6030_it_sel, ARRAY_SIZE(veml6030_it_sel),
+ &data->gts);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init iio gts\n");
+
ret = veml6030_als_shut_down(data);
if (ret)
return dev_err_probe(dev, ret, "can't shutdown als\n");
@@ -1036,11 +1018,6 @@ static int veml6030_hw_init(struct iio_dev *indio_dev, struct device *dev)
return dev_err_probe(dev, ret,
"can't clear als interrupt status\n");
- /* Cache currently active measurement parameters */
- data->cur_gain = 3;
- data->cur_resolution = 5376;
- data->cur_integration_time = 3;
-
return ret;
}
@@ -1056,6 +1033,13 @@ static int veml6035_hw_init(struct iio_dev *indio_dev, struct device *dev)
int ret, val;
struct veml6030_data *data = iio_priv(indio_dev);
+ ret = devm_iio_init_iio_gts(dev, 0, 409600000,
+ veml6035_gain_sel, ARRAY_SIZE(veml6035_gain_sel),
+ veml6030_it_sel, ARRAY_SIZE(veml6030_it_sel),
+ &data->gts);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to init iio gts\n");
+
ret = veml6030_als_shut_down(data);
if (ret)
return dev_err_probe(dev, ret, "can't shutdown als\n");
@@ -1092,11 +1076,6 @@ static int veml6035_hw_init(struct iio_dev *indio_dev, struct device *dev)
return dev_err_probe(dev, ret,
"can't clear als interrupt status\n");
- /* Cache currently active measurement parameters */
- data->cur_gain = 5;
- data->cur_resolution = 1024;
- data->cur_integration_time = 3;
-
return 0;
}
@@ -1143,6 +1122,11 @@ static int veml6030_probe(struct i2c_client *client)
if (ret < 0)
return ret;
+ ret = veml6030_regfield_init(indio_dev);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "failed to init regfields\n");
+
ret = data->chip->hw_init(indio_dev, &client->dev);
if (ret < 0)
return ret;
@@ -1187,38 +1171,35 @@ static DEFINE_RUNTIME_DEV_PM_OPS(veml6030_pm_ops, veml6030_runtime_suspend,
static const struct veml603x_chip veml6030_chip = {
.name = "veml6030",
- .scale_vals = &veml6030_scale_vals,
- .num_scale_vals = ARRAY_SIZE(veml6030_scale_vals),
.channels = veml6030_channels,
.num_channels = ARRAY_SIZE(veml6030_channels),
+ .gain_rf = VEML6030_GAIN_RF,
+ .it_rf = VEML6030_IT_RF,
+ .max_scale = VEML6030_MAX_SCALE,
.hw_init = veml6030_hw_init,
.set_info = veml6030_set_info,
- .set_als_gain = veml6030_set_als_gain,
- .get_als_gain = veml6030_get_als_gain,
};
static const struct veml603x_chip veml6035_chip = {
.name = "veml6035",
- .scale_vals = &veml6035_scale_vals,
- .num_scale_vals = ARRAY_SIZE(veml6035_scale_vals),
.channels = veml6030_channels,
.num_channels = ARRAY_SIZE(veml6030_channels),
+ .gain_rf = VEML6035_GAIN_RF,
+ .it_rf = VEML6030_IT_RF,
+ .max_scale = VEML6035_MAX_SCALE,
.hw_init = veml6035_hw_init,
.set_info = veml6030_set_info,
- .set_als_gain = veml6035_set_als_gain,
- .get_als_gain = veml6035_get_als_gain,
};
static const struct veml603x_chip veml7700_chip = {
.name = "veml7700",
- .scale_vals = &veml6030_scale_vals,
- .num_scale_vals = ARRAY_SIZE(veml6030_scale_vals),
.channels = veml7700_channels,
.num_channels = ARRAY_SIZE(veml7700_channels),
+ .gain_rf = VEML6030_GAIN_RF,
+ .it_rf = VEML6030_IT_RF,
+ .max_scale = VEML6030_MAX_SCALE,
.hw_init = veml6030_hw_init,
.set_info = veml7700_set_info,
- .set_als_gain = veml6030_set_als_gain,
- .get_als_gain = veml6030_get_als_gain,
};
static const struct of_device_id veml6030_of_match[] = {
@@ -1260,3 +1241,4 @@ module_i2c_driver(veml6030_driver);
MODULE_AUTHOR("Rishi Gupta <gupt21@gmail.com>");
MODULE_DESCRIPTION("VEML6030 Ambient Light Sensor");
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("IIO_GTS_HELPER");
diff --git a/drivers/iio/light/veml6075.c b/drivers/iio/light/veml6075.c
index 05d4c0e9015d..859891e8f115 100644
--- a/drivers/iio/light/veml6075.c
+++ b/drivers/iio/light/veml6075.c
@@ -195,13 +195,17 @@ static int veml6075_read_uv_direct(struct veml6075_data *data, int chan,
static int veml6075_read_int_time_index(struct veml6075_data *data)
{
- int ret, conf;
+ int ret, conf, int_index;
ret = regmap_read(data->regmap, VEML6075_CMD_CONF, &conf);
if (ret < 0)
return ret;
- return FIELD_GET(VEML6075_CONF_IT, conf);
+ int_index = FIELD_GET(VEML6075_CONF_IT, conf);
+ if (int_index >= ARRAY_SIZE(veml6075_it_ms))
+ return -EINVAL;
+
+ return int_index;
}
static int veml6075_read_int_time_ms(struct veml6075_data *data, int *val)
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 7177cd1d67cb..3debf1320ad1 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -235,6 +235,17 @@ config SENSORS_RM3100_SPI
To compile this driver as a module, choose M here: the module
will be called rm3100-spi.
+config SI7210
+ tristate "SI7210 Hall effect sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here to add support for the SI7210 Hall effect sensor.
+
+ This driver can also be compiled as a module.
+ To compile this driver as a module, choose M here: the module
+ will be called si7210.
+
config TI_TMAG5273
tristate "TI TMAG5273 Low-Power Linear 3D Hall-Effect Sensor"
depends on I2C
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index 3e4c2ecd9adf..9297723a97d8 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -31,6 +31,8 @@ obj-$(CONFIG_SENSORS_RM3100) += rm3100-core.o
obj-$(CONFIG_SENSORS_RM3100_I2C) += rm3100-i2c.o
obj-$(CONFIG_SENSORS_RM3100_SPI) += rm3100-spi.o
+obj-$(CONFIG_SI7210) += si7210.o
+
obj-$(CONFIG_TI_TMAG5273) += tmag5273.o
obj-$(CONFIG_YAMAHA_YAS530) += yamaha-yas530.o
diff --git a/drivers/iio/magnetometer/af8133j.c b/drivers/iio/magnetometer/af8133j.c
index a70bf8a3c73b..c1fc339e85b4 100644
--- a/drivers/iio/magnetometer/af8133j.c
+++ b/drivers/iio/magnetometer/af8133j.c
@@ -383,7 +383,6 @@ static const struct regmap_config af8133j_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = AF8133J_REG_SWR,
- .cache_type = REGCACHE_NONE,
};
static void af8133j_power_down_action(void *ptr)
diff --git a/drivers/iio/magnetometer/si7210.c b/drivers/iio/magnetometer/si7210.c
new file mode 100644
index 000000000000..27e3feba7a0f
--- /dev/null
+++ b/drivers/iio/magnetometer/si7210.c
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Silicon Labs Si7210 Hall Effect sensor driver
+ *
+ * Copyright (c) 2024 Antoni Pokusinski <apokusinski01@gmail.com>
+ *
+ * Datasheet:
+ * https://www.silabs.com/documents/public/data-sheets/si7210-datasheet.pdf
+ */
+
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/math64.h>
+#include <linux/mod_devicetable.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+#include <linux/units.h>
+#include <asm/byteorder.h>
+
+/* Registers offsets and masks */
+#define SI7210_REG_DSPSIGM 0xC1
+#define SI7210_REG_DSPSIGL 0xC2
+
+#define SI7210_MASK_DSPSIGSEL GENMASK(2, 0)
+#define SI7210_REG_DSPSIGSEL 0xC3
+
+#define SI7210_MASK_STOP BIT(1)
+#define SI7210_MASK_ONEBURST BIT(2)
+#define SI7210_REG_POWER_CTRL 0xC4
+
+#define SI7210_MASK_ARAUTOINC BIT(0)
+#define SI7210_REG_ARAUTOINC 0xC5
+
+#define SI7210_REG_A0 0xCA
+#define SI7210_REG_A1 0xCB
+#define SI7210_REG_A2 0xCC
+#define SI7210_REG_A3 0xCE
+#define SI7210_REG_A4 0xCF
+#define SI7210_REG_A5 0xD0
+
+#define SI7210_REG_OTP_ADDR 0xE1
+#define SI7210_REG_OTP_DATA 0xE2
+
+#define SI7210_MASK_OTP_READ_EN BIT(1)
+#define SI7210_REG_OTP_CTRL 0xE3
+
+/* OTP data registers offsets */
+#define SI7210_OTPREG_TMP_OFF 0x1D
+#define SI7210_OTPREG_TMP_GAIN 0x1E
+
+#define SI7210_OTPREG_A0_20 0x21
+#define SI7210_OTPREG_A1_20 0x22
+#define SI7210_OTPREG_A2_20 0x23
+#define SI7210_OTPREG_A3_20 0x24
+#define SI7210_OTPREG_A4_20 0x25
+#define SI7210_OTPREG_A5_20 0x26
+
+#define SI7210_OTPREG_A0_200 0x27
+#define SI7210_OTPREG_A1_200 0x28
+#define SI7210_OTPREG_A2_200 0x29
+#define SI7210_OTPREG_A3_200 0x2A
+#define SI7210_OTPREG_A4_200 0x2B
+#define SI7210_OTPREG_A5_200 0x2C
+
+#define A_REGS_COUNT 6
+
+static const unsigned int a20_otp_regs[A_REGS_COUNT] = {
+ SI7210_OTPREG_A0_20, SI7210_OTPREG_A1_20, SI7210_OTPREG_A2_20,
+ SI7210_OTPREG_A3_20, SI7210_OTPREG_A4_20, SI7210_OTPREG_A5_20,
+};
+
+static const unsigned int a200_otp_regs[A_REGS_COUNT] = {
+ SI7210_OTPREG_A0_200, SI7210_OTPREG_A1_200, SI7210_OTPREG_A2_200,
+ SI7210_OTPREG_A3_200, SI7210_OTPREG_A4_200, SI7210_OTPREG_A5_200,
+};
+
+static const struct regmap_range si7210_read_reg_ranges[] = {
+ regmap_reg_range(SI7210_REG_DSPSIGM, SI7210_REG_ARAUTOINC),
+ regmap_reg_range(SI7210_REG_A0, SI7210_REG_A2),
+ regmap_reg_range(SI7210_REG_A3, SI7210_REG_A5),
+ regmap_reg_range(SI7210_REG_OTP_ADDR, SI7210_REG_OTP_CTRL),
+};
+
+static const struct regmap_access_table si7210_readable_regs = {
+ .yes_ranges = si7210_read_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si7210_read_reg_ranges),
+};
+
+static const struct regmap_range si7210_write_reg_ranges[] = {
+ regmap_reg_range(SI7210_REG_DSPSIGSEL, SI7210_REG_ARAUTOINC),
+ regmap_reg_range(SI7210_REG_A0, SI7210_REG_A2),
+ regmap_reg_range(SI7210_REG_A3, SI7210_REG_A5),
+ regmap_reg_range(SI7210_REG_OTP_ADDR, SI7210_REG_OTP_CTRL),
+};
+
+static const struct regmap_access_table si7210_writeable_regs = {
+ .yes_ranges = si7210_write_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si7210_write_reg_ranges),
+};
+
+static const struct regmap_range si7210_volatile_reg_ranges[] = {
+ regmap_reg_range(SI7210_REG_DSPSIGM, SI7210_REG_DSPSIGL),
+ regmap_reg_range(SI7210_REG_POWER_CTRL, SI7210_REG_POWER_CTRL),
+};
+
+static const struct regmap_access_table si7210_volatile_regs = {
+ .yes_ranges = si7210_volatile_reg_ranges,
+ .n_yes_ranges = ARRAY_SIZE(si7210_volatile_reg_ranges),
+};
+
+static const struct regmap_config si7210_regmap_conf = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = SI7210_REG_OTP_CTRL,
+
+ .rd_table = &si7210_readable_regs,
+ .wr_table = &si7210_writeable_regs,
+ .volatile_table = &si7210_volatile_regs,
+};
+
+struct si7210_data {
+ struct regmap *regmap;
+ struct i2c_client *client;
+ struct regulator *vdd;
+ struct mutex fetch_lock; /* lock for a single measurement fetch */
+ s8 temp_offset;
+ s8 temp_gain;
+ s8 scale_20_a[A_REGS_COUNT];
+ s8 scale_200_a[A_REGS_COUNT];
+ u8 curr_scale;
+};
+
+static const struct iio_chan_spec si7210_channels[] = {
+ {
+ .type = IIO_MAGN,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET),
+ }, {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ },
+};
+
+static int si7210_fetch_measurement(struct si7210_data *data,
+ struct iio_chan_spec const *chan,
+ u16 *buf)
+{
+ u8 dspsigsel = chan->type == IIO_MAGN ? 0 : 1;
+ int ret;
+ __be16 result;
+
+ guard(mutex)(&data->fetch_lock);
+
+ ret = regmap_update_bits(data->regmap, SI7210_REG_DSPSIGSEL,
+ SI7210_MASK_DSPSIGSEL, dspsigsel);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, SI7210_REG_POWER_CTRL,
+ SI7210_MASK_ONEBURST | SI7210_MASK_STOP,
+ SI7210_MASK_ONEBURST & ~SI7210_MASK_STOP);
+ if (ret)
+ return ret;
+
+ /*
+ * Read the contents of the
+ * registers containing the result: DSPSIGM, DSPSIGL
+ */
+ ret = regmap_bulk_read(data->regmap, SI7210_REG_DSPSIGM,
+ &result, sizeof(result));
+ if (ret)
+ return ret;
+
+ *buf = be16_to_cpu(result);
+
+ return 0;
+}
+
+static int si7210_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct si7210_data *data = iio_priv(indio_dev);
+ long long temp;
+ u16 dspsig;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = si7210_fetch_measurement(data, chan, &dspsig);
+ if (ret)
+ return ret;
+
+ *val = dspsig & GENMASK(14, 0);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ if (data->curr_scale == 20)
+ *val2 = 12500;
+ else /* data->curr_scale == 200 */
+ *val2 = 125000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -16384;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = si7210_fetch_measurement(data, chan, &dspsig);
+ if (ret)
+ return ret;
+
+ /* temp = 32 * Dspsigm[6:0] + (Dspsigl[7:0] >> 3) */
+ temp = FIELD_GET(GENMASK(14, 3), dspsig);
+ temp = div_s64(-383 * temp * temp, 100) + 160940 * temp - 279800000;
+ temp *= (1 + (data->temp_gain / 2048));
+ temp += (int)(MICRO / 16) * data->temp_offset;
+
+ ret = regulator_get_voltage(data->vdd);
+ if (ret < 0)
+ return ret;
+
+ /* temp -= 0.222 * VDD */
+ temp -= 222 * div_s64(ret, MILLI);
+
+ *val = div_s64(temp, MILLI);
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int si7210_set_scale(struct si7210_data *data, unsigned int scale)
+{
+ s8 *a_otp_values;
+ int ret;
+
+ if (scale == 20)
+ a_otp_values = data->scale_20_a;
+ else if (scale == 200)
+ a_otp_values = data->scale_200_a;
+ else
+ return -EINVAL;
+
+ guard(mutex)(&data->fetch_lock);
+
+ /* Write the registers 0xCA - 0xCC */
+ ret = regmap_bulk_write(data->regmap, SI7210_REG_A0, a_otp_values, 3);
+ if (ret)
+ return ret;
+
+ /* Write the registers 0xCE - 0xD0 */
+ ret = regmap_bulk_write(data->regmap, SI7210_REG_A3, &a_otp_values[3], 3);
+ if (ret)
+ return ret;
+
+ data->curr_scale = scale;
+
+ return 0;
+}
+
+static int si7210_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct si7210_data *data = iio_priv(indio_dev);
+ unsigned int scale;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (val == 0 && val2 == 12500)
+ scale = 20;
+ else if (val == 0 && val2 == 125000)
+ scale = 200;
+ else
+ return -EINVAL;
+
+ return si7210_set_scale(data, scale);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int si7210_read_otpreg_val(struct si7210_data *data, unsigned int otpreg, u8 *val)
+{
+ int ret;
+ unsigned int otpdata;
+
+ ret = regmap_write(data->regmap, SI7210_REG_OTP_ADDR, otpreg);
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, SI7210_REG_OTP_CTRL,
+ SI7210_MASK_OTP_READ_EN, SI7210_MASK_OTP_READ_EN);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(data->regmap, SI7210_REG_OTP_DATA, &otpdata);
+ if (ret)
+ return ret;
+
+ *val = otpdata;
+
+ return 0;
+}
+
+/*
+ * According to the datasheet, the primary method to wake up a
+ * device is to send an empty write. However this is not feasible
+ * using the current API so we use the other method i.e. read a single
+ * byte. The device should respond with 0xFF.
+ */
+static int si7210_device_wake(struct si7210_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte(data->client);
+ if (ret < 0)
+ return ret;
+
+ if (ret != 0xFF)
+ return -EIO;
+
+ return 0;
+}
+
+static int si7210_device_init(struct si7210_data *data)
+{
+ int ret;
+ unsigned int i;
+
+ ret = si7210_device_wake(data);
+ if (ret)
+ return ret;
+
+ fsleep(1000);
+
+ ret = si7210_read_otpreg_val(data, SI7210_OTPREG_TMP_GAIN, &data->temp_gain);
+ if (ret)
+ return ret;
+
+ ret = si7210_read_otpreg_val(data, SI7210_OTPREG_TMP_OFF, &data->temp_offset);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < A_REGS_COUNT; i++) {
+ ret = si7210_read_otpreg_val(data, a20_otp_regs[i], &data->scale_20_a[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < A_REGS_COUNT; i++) {
+ ret = si7210_read_otpreg_val(data, a200_otp_regs[i], &data->scale_200_a[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_update_bits(data->regmap, SI7210_REG_ARAUTOINC,
+ SI7210_MASK_ARAUTOINC, SI7210_MASK_ARAUTOINC);
+ if (ret)
+ return ret;
+
+ return si7210_set_scale(data, 20);
+}
+
+static const struct iio_info si7210_info = {
+ .read_raw = si7210_read_raw,
+ .write_raw = si7210_write_raw,
+};
+
+static int si7210_probe(struct i2c_client *client)
+{
+ struct si7210_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+
+ ret = devm_mutex_init(&client->dev, &data->fetch_lock);
+ if (ret)
+ return ret;
+
+ data->regmap = devm_regmap_init_i2c(client, &si7210_regmap_conf);
+ if (IS_ERR(data->regmap))
+ return dev_err_probe(&client->dev, PTR_ERR(data->regmap),
+ "failed to register regmap\n");
+
+ data->vdd = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR(data->vdd))
+ return dev_err_probe(&client->dev, PTR_ERR(data->vdd),
+ "failed to get VDD regulator\n");
+
+ ret = regulator_enable(data->vdd);
+ if (ret)
+ return ret;
+
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &si7210_info;
+ indio_dev->channels = si7210_channels;
+ indio_dev->num_channels = ARRAY_SIZE(si7210_channels);
+
+ ret = si7210_device_init(data);
+ if (ret)
+ return dev_err_probe(&client->dev, ret,
+ "device initialization failed\n");
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id si7210_id[] = {
+ { "si7210" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, si7210_id);
+
+static const struct of_device_id si7210_dt_ids[] = {
+ { .compatible = "silabs,si7210" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, si7210_dt_ids);
+
+static struct i2c_driver si7210_driver = {
+ .driver = {
+ .name = "si7210",
+ .of_match_table = si7210_dt_ids,
+ },
+ .probe = si7210_probe,
+ .id_table = si7210_id,
+};
+module_i2c_driver(si7210_driver);
+
+MODULE_AUTHOR("Antoni Pokusinski <apokusinski01@gmail.com>");
+MODULE_DESCRIPTION("Silicon Labs Si7210 Hall Effect sensor I2C driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/pressure/zpa2326_i2c.c b/drivers/iio/pressure/zpa2326_i2c.c
index 49a239ebdabf..a6034bf05d97 100644
--- a/drivers/iio/pressure/zpa2326_i2c.c
+++ b/drivers/iio/pressure/zpa2326_i2c.c
@@ -25,7 +25,6 @@ static const struct regmap_config zpa2326_regmap_i2c_config = {
.precious_reg = zpa2326_isreg_precious,
.max_register = ZPA2326_TEMP_OUT_H_REG,
.read_flag_mask = BIT(7),
- .cache_type = REGCACHE_NONE,
};
static unsigned int zpa2326_i2c_hwid(const struct i2c_client *client)
diff --git a/drivers/iio/pressure/zpa2326_spi.c b/drivers/iio/pressure/zpa2326_spi.c
index 317270fa1c43..c678f5b96266 100644
--- a/drivers/iio/pressure/zpa2326_spi.c
+++ b/drivers/iio/pressure/zpa2326_spi.c
@@ -26,7 +26,6 @@ static const struct regmap_config zpa2326_regmap_spi_config = {
.precious_reg = zpa2326_isreg_precious,
.max_register = ZPA2326_TEMP_OUT_H_REG,
.read_flag_mask = BIT(7) | BIT(6),
- .cache_type = REGCACHE_NONE,
};
static int zpa2326_probe_spi(struct spi_device *spi)
diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c
index e092a935dbac..5aa8e5a22f32 100644
--- a/drivers/iio/proximity/hx9023s.c
+++ b/drivers/iio/proximity/hx9023s.c
@@ -1036,12 +1036,13 @@ static int hx9023s_send_cfg(const struct firmware *fw, struct hx9023s_data *data
return -ENOMEM;
memcpy(bin->data, fw->data, fw->size);
- release_firmware(fw);
bin->fw_size = fw->size;
bin->fw_ver = bin->data[FW_VER_OFFSET];
bin->reg_count = get_unaligned_le16(bin->data + FW_REG_CNT_OFFSET);
+ release_firmware(fw);
+
return hx9023s_bin_load(data, bin);
}
diff --git a/drivers/iio/proximity/irsd200.c b/drivers/iio/proximity/irsd200.c
index b09d15230111..b0ffd3574013 100644
--- a/drivers/iio/proximity/irsd200.c
+++ b/drivers/iio/proximity/irsd200.c
@@ -10,6 +10,7 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/regmap.h>
+#include <linux/string_choices.h>
#include <linux/iio/buffer.h>
#include <linux/iio/events.h>
@@ -783,7 +784,7 @@ static int irsd200_set_trigger_state(struct iio_trigger *trig, bool state)
ret = regmap_field_write(data->regfields[IRS_REGF_INTR_DATA], state);
if (ret) {
dev_err(data->dev, "Could not %s data interrupt source (%d)\n",
- state ? "enable" : "disable", ret);
+ str_enable_disable(state), ret);
}
return ret;
diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c
index 0d7f0518d4fb..b60707eba39d 100644
--- a/drivers/iio/proximity/sx9310.c
+++ b/drivers/iio/proximity/sx9310.c
@@ -337,19 +337,26 @@ static int sx9310_read_raw(struct iio_dev *indio_dev,
int *val2, long mask)
{
struct sx_common_data *data = iio_priv(indio_dev);
+ int ret;
if (chan->type != IIO_PROXIMITY)
return -EINVAL;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return sx_common_read_proximity(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = sx_common_read_proximity(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_HARDWAREGAIN:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return sx9310_read_gain(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = sx9310_read_gain(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9310_read_samp_freq(data, val, val2);
default:
diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c
index f7819dd2775c..73d972416c01 100644
--- a/drivers/iio/proximity/sx9324.c
+++ b/drivers/iio/proximity/sx9324.c
@@ -429,16 +429,23 @@ static int sx9324_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct sx_common_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return sx_common_read_proximity(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = sx_common_read_proximity(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_HARDWAREGAIN:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return sx9324_read_gain(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = sx9324_read_gain(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9324_read_samp_freq(data, val, val2);
default:
diff --git a/drivers/iio/proximity/sx9360.c b/drivers/iio/proximity/sx9360.c
index a6ff16e33c1e..4448988d4e7e 100644
--- a/drivers/iio/proximity/sx9360.c
+++ b/drivers/iio/proximity/sx9360.c
@@ -321,16 +321,23 @@ static int sx9360_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct sx_common_data *data = iio_priv(indio_dev);
+ int ret;
switch (mask) {
case IIO_CHAN_INFO_RAW:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return sx_common_read_proximity(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = sx_common_read_proximity(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_HARDWAREGAIN:
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev)
- return sx9360_read_gain(data, chan, val);
- unreachable();
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = sx9360_read_gain(data, chan, val);
+ iio_device_release_direct(indio_dev);
+ return ret;
case IIO_CHAN_INFO_SAMP_FREQ:
return sx9360_read_samp_freq(data, val, val2);
default:
diff --git a/drivers/iio/resolver/ad2s1210.c b/drivers/iio/resolver/ad2s1210.c
index b681129a99b6..ab860cedecd1 100644
--- a/drivers/iio/resolver/ad2s1210.c
+++ b/drivers/iio/resolver/ad2s1210.c
@@ -46,6 +46,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/bitmap.h>
#include <linux/bits.h>
#include <linux/cleanup.h>
#include <linux/clk.h>
@@ -175,15 +176,14 @@ struct ad2s1210_state {
static int ad2s1210_set_mode(struct ad2s1210_state *st, enum ad2s1210_mode mode)
{
struct gpio_descs *gpios = st->mode_gpios;
- DECLARE_BITMAP(bitmap, 2);
+ DECLARE_BITMAP(bitmap, 2) = { };
if (!gpios)
return mode == st->fixed_mode ? 0 : -EOPNOTSUPP;
- bitmap[0] = mode;
+ bitmap_write(bitmap, mode, 0, 2);
- return gpiod_set_array_value(gpios->ndescs, gpios->desc, gpios->info,
- bitmap);
+ return gpiod_multi_set_value_cansleep(gpios, bitmap);
}
/*
@@ -1427,7 +1427,7 @@ static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
struct device *dev = &st->sdev->dev;
struct gpio_descs *resolution_gpios;
struct gpio_desc *reset_gpio;
- DECLARE_BITMAP(bitmap, 2);
+ DECLARE_BITMAP(bitmap, 2) = { };
int ret;
/* should not be sampling on startup */
@@ -1471,12 +1471,9 @@ static int ad2s1210_setup_gpios(struct ad2s1210_state *st)
return dev_err_probe(dev, -EINVAL,
"requires exactly 2 resolution-gpios\n");
- bitmap[0] = st->resolution;
+ bitmap_write(bitmap, st->resolution, 0, 2);
- ret = gpiod_set_array_value(resolution_gpios->ndescs,
- resolution_gpios->desc,
- resolution_gpios->info,
- bitmap);
+ ret = gpiod_multi_set_value_cansleep(resolution_gpios, bitmap);
if (ret < 0)
return dev_err_probe(dev, ret,
"failed to set resolution gpios\n");
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
index 1998047a1f24..b5c94b7492f5 100644
--- a/drivers/iio/temperature/tmp006.c
+++ b/drivers/iio/temperature/tmp006.c
@@ -85,19 +85,25 @@ static int tmp006_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
if (channel->type == IIO_VOLTAGE) {
/* LSB is 156.25 nV */
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = tmp006_read_measurement(data, TMP006_VOBJECT);
- if (ret < 0)
- return ret;
- }
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = tmp006_read_measurement(data, TMP006_VOBJECT);
+ iio_device_release_direct(indio_dev);
+ if (ret < 0)
+ return ret;
+
*val = sign_extend32(ret, 15);
} else if (channel->type == IIO_TEMP) {
/* LSB is 0.03125 degrees Celsius */
- iio_device_claim_direct_scoped(return -EBUSY, indio_dev) {
- ret = tmp006_read_measurement(data, TMP006_TAMBIENT);
- if (ret < 0)
- return ret;
- }
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
+
+ ret = tmp006_read_measurement(data, TMP006_TAMBIENT);
+ iio_device_release_direct(indio_dev);
+ if (ret < 0)
+ return ret;
+
*val = sign_extend32(ret, 15) >> TMP006_TAMBIENT_SHIFT;
} else {
break;
@@ -142,9 +148,8 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
if ((val == tmp006_freqs[i][0]) &&
(val2 == tmp006_freqs[i][1])) {
- ret = iio_device_claim_direct_mode(indio_dev);
- if (ret)
- return ret;
+ if (!iio_device_claim_direct(indio_dev))
+ return -EBUSY;
data->config &= ~TMP006_CONFIG_CR_MASK;
data->config |= i << TMP006_CONFIG_CR_SHIFT;
@@ -153,7 +158,7 @@ static int tmp006_write_raw(struct iio_dev *indio_dev,
TMP006_CONFIG,
data->config);
- iio_device_release_direct_mode(indio_dev);
+ iio_device_release_direct(indio_dev);
return ret;
}
return -EINVAL;
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index b91a85a491d0..3721446c6ba4 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -187,7 +187,6 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
struct auxiliary_device *adev;
- struct notifier_block nb;
unsigned int version, major, minor;
struct bnxt_qplib_chip_ctx *chip_ctx;
struct bnxt_en_dev *en_dev;
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 3ac47f4e6122..f039aefcaf67 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -348,8 +348,8 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
goto done;
}
bnxt_re_copy_err_stats(rdev, stats, err_s);
- if (_is_ext_stats_supported(rdev->dev_attr->dev_cap_flags) &&
- !rdev->is_virtfn) {
+ if (bnxt_ext_stats_supported(rdev->chip_ctx, rdev->dev_attr->dev_cap_flags,
+ rdev->is_virtfn)) {
rc = bnxt_re_get_ext_stat(rdev, stats);
if (rc) {
clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 2de101d6e825..6f5db32082dd 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -1870,6 +1870,8 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
srq->srq_limit = srq_init_attr->attr.srq_limit;
srq->qplib_srq.eventq_hw_ring_id = rdev->nqr->nq[0].ring_id;
+ srq->qplib_srq.sg_info.pgsize = PAGE_SIZE;
+ srq->qplib_srq.sg_info.pgshft = PAGE_SHIFT;
nq = &rdev->nqr->nq[0];
if (udata) {
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index e9e4da4dd576..a94c8c5387d9 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -396,11 +396,16 @@ free_dcb:
static void bnxt_re_async_notifier(void *handle, struct hwrm_async_event_cmpl *cmpl)
{
- struct bnxt_re_dev *rdev = (struct bnxt_re_dev *)handle;
+ struct bnxt_re_en_dev_info *en_info = auxiliary_get_drvdata(handle);
struct bnxt_re_dcb_work *dcb_work;
+ struct bnxt_re_dev *rdev;
u32 data1, data2;
u16 event_id;
+ rdev = en_info->rdev;
+ if (!rdev)
+ return;
+
event_id = le16_to_cpu(cmpl->event_id);
data1 = le32_to_cpu(cmpl->event_data1);
data2 = le32_to_cpu(cmpl->event_data2);
@@ -433,6 +438,8 @@ static void bnxt_re_stop_irq(void *handle, bool reset)
int indx;
rdev = en_info->rdev;
+ if (!rdev)
+ return;
rcfw = &rdev->rcfw;
if (reset) {
@@ -461,6 +468,8 @@ static void bnxt_re_start_irq(void *handle, struct bnxt_msix_entry *ent)
int indx, rc;
rdev = en_info->rdev;
+ if (!rdev)
+ return;
msix_ent = rdev->nqr->msix_entries;
rcfw = &rdev->rcfw;
if (!ent) {
@@ -1350,7 +1359,6 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct auxiliary_device *adev,
return NULL;
}
/* Default values */
- rdev->nb.notifier_call = NULL;
rdev->netdev = en_dev->net;
rdev->en_dev = en_dev;
rdev->adev = adev;
@@ -2345,15 +2353,6 @@ exit:
static void bnxt_re_remove_device(struct bnxt_re_dev *rdev, u8 op_type,
struct auxiliary_device *aux_dev)
{
- if (rdev->nb.notifier_call) {
- unregister_netdevice_notifier(&rdev->nb);
- rdev->nb.notifier_call = NULL;
- } else {
- /* If notifier is null, we should have already done a
- * clean up before coming here.
- */
- return;
- }
bnxt_re_setup_cc(rdev, false);
ib_unregister_device(&rdev->ibdev);
bnxt_re_dev_uninit(rdev, op_type);
@@ -2433,6 +2432,7 @@ static int bnxt_re_suspend(struct auxiliary_device *adev, pm_message_t state)
ibdev_info(&rdev->ibdev, "%s: L2 driver notified to stop en_state 0x%lx",
__func__, en_dev->en_state);
bnxt_re_remove_device(rdev, BNXT_RE_PRE_RECOVERY_REMOVE, adev);
+ bnxt_re_update_en_info_rdev(NULL, en_info, adev);
mutex_unlock(&bnxt_re_mutex);
return 0;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index be5d907a036b..711990232de1 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -547,6 +547,14 @@ static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
}
+static inline int bnxt_ext_stats_supported(struct bnxt_qplib_chip_ctx *ctx,
+ u16 flags, bool virtfn)
+{
+ /* ext stats supported if cap flag is set AND is a PF OR a Thor2 VF */
+ return (_is_ext_stats_supported(flags) &&
+ ((virtfn && bnxt_qplib_is_chip_gen_p7(ctx)) || (!virtfn)));
+}
+
static inline bool _is_hw_retx_supported(u16 dev_cap_flags)
{
return dev_cap_flags &
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index dded339802b3..160e8927d364 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1286,10 +1286,8 @@ static u32 hns_roce_cmdq_tx_timeout(u16 opcode, u32 tx_timeout)
return tx_timeout;
}
-static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
+static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u32 tx_timeout)
{
- struct hns_roce_v2_priv *priv = hr_dev->priv;
- u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
u32 timeout = 0;
do {
@@ -1299,8 +1297,9 @@ static void hns_roce_wait_csq_done(struct hns_roce_dev *hr_dev, u16 opcode)
} while (++timeout < tx_timeout);
}
-static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
- struct hns_roce_cmq_desc *desc, int num)
+static int __hns_roce_cmq_send_one(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc,
+ int num, u32 tx_timeout)
{
struct hns_roce_v2_priv *priv = hr_dev->priv;
struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
@@ -1309,8 +1308,6 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
int ret;
int i;
- spin_lock_bh(&csq->lock);
-
tail = csq->head;
for (i = 0; i < num; i++) {
@@ -1324,22 +1321,17 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_CNT]);
- hns_roce_wait_csq_done(hr_dev, le16_to_cpu(desc->opcode));
+ hns_roce_wait_csq_done(hr_dev, tx_timeout);
if (hns_roce_cmq_csq_done(hr_dev)) {
ret = 0;
for (i = 0; i < num; i++) {
/* check the result of hardware write back */
- desc[i] = csq->desc[tail++];
+ desc_ret = le16_to_cpu(csq->desc[tail++].retval);
if (tail == csq->desc_num)
tail = 0;
-
- desc_ret = le16_to_cpu(desc[i].retval);
if (likely(desc_ret == CMD_EXEC_SUCCESS))
continue;
- dev_err_ratelimited(hr_dev->dev,
- "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n",
- desc->opcode, desc_ret);
ret = hns_roce_cmd_err_convert_errno(desc_ret);
}
} else {
@@ -1354,14 +1346,54 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
ret = -EAGAIN;
}
- spin_unlock_bh(&csq->lock);
-
if (ret)
atomic64_inc(&hr_dev->dfx_cnt[HNS_ROCE_DFX_CMDS_ERR_CNT]);
return ret;
}
+static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
+ struct hns_roce_cmq_desc *desc, int num)
+{
+ struct hns_roce_v2_priv *priv = hr_dev->priv;
+ struct hns_roce_v2_cmq_ring *csq = &priv->cmq.csq;
+ u16 opcode = le16_to_cpu(desc->opcode);
+ u32 tx_timeout = hns_roce_cmdq_tx_timeout(opcode, priv->cmq.tx_timeout);
+ u8 try_cnt = HNS_ROCE_OPC_POST_MB_TRY_CNT;
+ u32 rsv_tail;
+ int ret;
+ int i;
+
+ while (try_cnt) {
+ try_cnt--;
+
+ spin_lock_bh(&csq->lock);
+ rsv_tail = csq->head;
+ ret = __hns_roce_cmq_send_one(hr_dev, desc, num, tx_timeout);
+ if (opcode == HNS_ROCE_OPC_POST_MB && ret == -ETIME &&
+ try_cnt) {
+ spin_unlock_bh(&csq->lock);
+ mdelay(HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC);
+ continue;
+ }
+
+ for (i = 0; i < num; i++) {
+ desc[i] = csq->desc[rsv_tail++];
+ if (rsv_tail == csq->desc_num)
+ rsv_tail = 0;
+ }
+ spin_unlock_bh(&csq->lock);
+ break;
+ }
+
+ if (ret)
+ dev_err_ratelimited(hr_dev->dev,
+ "Cmdq IO error, opcode = 0x%x, return = %d.\n",
+ opcode, ret);
+
+ return ret;
+}
+
static int hns_roce_cmq_send(struct hns_roce_dev *hr_dev,
struct hns_roce_cmq_desc *desc, int num)
{
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
index cbdbc9edbce6..91a5665465ff 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h
@@ -230,6 +230,8 @@ enum hns_roce_opcode_type {
};
#define HNS_ROCE_OPC_POST_MB_TIMEOUT 35000
+#define HNS_ROCE_OPC_POST_MB_TRY_CNT 8
+#define HNS_ROCE_OPC_POST_MB_RETRY_GAP_MSEC 5
struct hns_roce_cmdq_tx_timeout_map {
u16 opcode;
u32 tx_timeout;
diff --git a/drivers/infiniband/hw/mana/main.c b/drivers/infiniband/hw/mana/main.c
index 67c2d43135a8..457cea6d9909 100644
--- a/drivers/infiniband/hw/mana/main.c
+++ b/drivers/infiniband/hw/mana/main.c
@@ -174,7 +174,7 @@ static int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
req.num_resources = 1;
- req.alignment = 1;
+ req.alignment = PAGE_SIZE / MANA_PAGE_SIZE;
/* Have GDMA start searching from 0 */
req.allocated_resources = 0;
diff --git a/drivers/infiniband/hw/mlx5/ah.c b/drivers/infiniband/hw/mlx5/ah.c
index 505bc47fd575..99036afb3aef 100644
--- a/drivers/infiniband/hw/mlx5/ah.c
+++ b/drivers/infiniband/hw/mlx5/ah.c
@@ -67,7 +67,8 @@ static void create_ib_ah(struct mlx5_ib_dev *dev, struct mlx5_ib_ah *ah,
ah->av.tclass = grh->traffic_class;
}
- ah->av.stat_rate_sl = (rdma_ah_get_static_rate(ah_attr) << 4);
+ ah->av.stat_rate_sl =
+ (mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah_attr)) << 4);
if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (init_attr->xmit_slave)
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 4f6c1968a2ee..81cfa74147a1 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -546,6 +546,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
+ bool new = false;
int err;
if (!counter->id) {
@@ -560,6 +561,7 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
return err;
counter->id =
MLX5_GET(alloc_q_counter_out, out, counter_set_id);
+ new = true;
}
err = mlx5_ib_qp_set_counter(qp, counter);
@@ -569,8 +571,10 @@ static int mlx5_ib_counter_bind_qp(struct rdma_counter *counter,
return 0;
fail_set_counter:
- mlx5_ib_counter_dealloc(counter);
- counter->id = 0;
+ if (new) {
+ mlx5_ib_counter_dealloc(counter);
+ counter->id = 0;
+ }
return err;
}
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index bb02b6adbf2c..753faa9ad06a 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -1550,7 +1550,7 @@ static void mlx5_ib_dmabuf_invalidate_cb(struct dma_buf_attachment *attach)
dma_resv_assert_held(umem_dmabuf->attach->dmabuf->resv);
- if (!umem_dmabuf->sgt)
+ if (!umem_dmabuf->sgt || !mr)
return;
mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
@@ -1935,7 +1935,8 @@ err:
static void
mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
{
- if (!mr->umem && !mr->data_direct && mr->descs) {
+ if (!mr->umem && !mr->data_direct &&
+ mr->ibmr.type != IB_MR_TYPE_DM && mr->descs) {
struct ib_device *device = mr->ibmr.device;
int size = mr->max_descs * mr->desc_size;
struct mlx5_ib_dev *dev = to_mdev(device);
@@ -2022,11 +2023,16 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr)
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
struct mlx5_cache_ent *ent = mr->mmkey.cache_ent;
bool is_odp = is_odp_mr(mr);
+ bool is_odp_dma_buf = is_dmabuf_mr(mr) &&
+ !to_ib_umem_dmabuf(mr->umem)->pinned;
int ret = 0;
if (is_odp)
mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex);
+ if (is_odp_dma_buf)
+ dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL);
+
if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) {
ent = mr->mmkey.cache_ent;
/* upon storing to a clean temp entry - schedule its cleanup */
@@ -2054,6 +2060,12 @@ out:
mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex);
}
+ if (is_odp_dma_buf) {
+ if (!ret)
+ to_ib_umem_dmabuf(mr->umem)->private = NULL;
+ dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv);
+ }
+
return ret;
}
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index f1e23583e6c0..e77c9280c07e 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -242,6 +242,7 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr)
if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_KERNEL) !=
mr) {
xa_unlock(&imr->implicit_children);
+ mlx5r_deref_odp_mkey(&imr->mmkey);
return;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index a43eba9d3572..88724d15705d 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3447,11 +3447,11 @@ static int ib_to_mlx5_rate_map(u8 rate)
return 0;
}
-static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
+int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate)
{
u32 stat_rate_support;
- if (rate == IB_RATE_PORT_CURRENT)
+ if (rate == IB_RATE_PORT_CURRENT || rate == IB_RATE_800_GBPS)
return 0;
if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_800_GBPS)
@@ -3596,7 +3596,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
sizeof(grh->dgid.raw));
}
- err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
+ err = mlx5r_ib_rate(dev, rdma_ah_get_static_rate(ah));
if (err < 0)
return err;
MLX5_SET(ads, path, stat_rate, err);
@@ -4579,6 +4579,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
set_id = mlx5_ib_get_counters_id(dev, attr->port_num - 1);
MLX5_SET(dctc, dctc, counter_set_id, set_id);
+
+ qp->port = attr->port_num;
} else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
struct mlx5_ib_modify_qp_resp resp = {};
u32 out[MLX5_ST_SZ_DW(create_dct_out)] = {};
@@ -5074,7 +5076,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
}
if (qp_attr_mask & IB_QP_PORT)
- qp_attr->port_num = MLX5_GET(dctc, dctc, port);
+ qp_attr->port_num = mqp->port;
if (qp_attr_mask & IB_QP_MIN_RNR_TIMER)
qp_attr->min_rnr_timer = MLX5_GET(dctc, dctc, min_rnr_nak);
if (qp_attr_mask & IB_QP_AV) {
diff --git a/drivers/infiniband/hw/mlx5/qp.h b/drivers/infiniband/hw/mlx5/qp.h
index b6ee7c3ee1ca..2530e7730635 100644
--- a/drivers/infiniband/hw/mlx5/qp.h
+++ b/drivers/infiniband/hw/mlx5/qp.h
@@ -56,4 +56,5 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter);
int mlx5_ib_qp_event_init(void);
void mlx5_ib_qp_event_cleanup(void);
+int mlx5r_ib_rate(struct mlx5_ib_dev *dev, u8 rate);
#endif /* _MLX5_IB_QP_H */
diff --git a/drivers/infiniband/hw/mlx5/umr.c b/drivers/infiniband/hw/mlx5/umr.c
index 887fd6fa3ba9..793f3c5c4d01 100644
--- a/drivers/infiniband/hw/mlx5/umr.c
+++ b/drivers/infiniband/hw/mlx5/umr.c
@@ -231,30 +231,6 @@ void mlx5r_umr_cleanup(struct mlx5_ib_dev *dev)
ib_dealloc_pd(dev->umrc.pd);
}
-static int mlx5r_umr_recover(struct mlx5_ib_dev *dev)
-{
- struct umr_common *umrc = &dev->umrc;
- struct ib_qp_attr attr;
- int err;
-
- attr.qp_state = IB_QPS_RESET;
- err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
- if (err) {
- mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
- goto err;
- }
-
- err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
- if (err)
- goto err;
-
- umrc->state = MLX5_UMR_STATE_ACTIVE;
- return 0;
-
-err:
- umrc->state = MLX5_UMR_STATE_ERR;
- return err;
-}
static int mlx5r_umr_post_send(struct ib_qp *ibqp, u32 mkey, struct ib_cqe *cqe,
struct mlx5r_umr_wqe *wqe, bool with_data)
@@ -302,6 +278,61 @@ out:
return err;
}
+static int mlx5r_umr_recover(struct mlx5_ib_dev *dev, u32 mkey,
+ struct mlx5r_umr_context *umr_context,
+ struct mlx5r_umr_wqe *wqe, bool with_data)
+{
+ struct umr_common *umrc = &dev->umrc;
+ struct ib_qp_attr attr;
+ int err;
+
+ mutex_lock(&umrc->lock);
+ /* Preventing any further WRs to be sent now */
+ if (umrc->state != MLX5_UMR_STATE_RECOVER) {
+ mlx5_ib_warn(dev, "UMR recovery encountered an unexpected state=%d\n",
+ umrc->state);
+ umrc->state = MLX5_UMR_STATE_RECOVER;
+ }
+ mutex_unlock(&umrc->lock);
+
+ /* Sending a final/barrier WR (the failed one) and wait for its completion.
+ * This will ensure that all the previous WRs got a completion before
+ * we set the QP state to RESET.
+ */
+ err = mlx5r_umr_post_send(umrc->qp, mkey, &umr_context->cqe, wqe,
+ with_data);
+ if (err) {
+ mlx5_ib_warn(dev, "UMR recovery post send failed, err %d\n", err);
+ goto err;
+ }
+
+ /* Since the QP is in an error state, it will only receive
+ * IB_WC_WR_FLUSH_ERR. However, as it serves only as a barrier
+ * we don't care about its status.
+ */
+ wait_for_completion(&umr_context->done);
+
+ attr.qp_state = IB_QPS_RESET;
+ err = ib_modify_qp(umrc->qp, &attr, IB_QP_STATE);
+ if (err) {
+ mlx5_ib_warn(dev, "Couldn't modify UMR QP to RESET, err=%d\n", err);
+ goto err;
+ }
+
+ err = mlx5r_umr_qp_rst2rts(dev, umrc->qp);
+ if (err) {
+ mlx5_ib_warn(dev, "Couldn't modify UMR QP to RTS, err=%d\n", err);
+ goto err;
+ }
+
+ umrc->state = MLX5_UMR_STATE_ACTIVE;
+ return 0;
+
+err:
+ umrc->state = MLX5_UMR_STATE_ERR;
+ return err;
+}
+
static void mlx5r_umr_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct mlx5_ib_umr_context *context =
@@ -366,9 +397,7 @@ static int mlx5r_umr_post_send_wait(struct mlx5_ib_dev *dev, u32 mkey,
mlx5_ib_warn(dev,
"reg umr failed (%u). Trying to recover and resubmit the flushed WQEs, mkey = %u\n",
umr_context.status, mkey);
- mutex_lock(&umrc->lock);
- err = mlx5r_umr_recover(dev);
- mutex_unlock(&umrc->lock);
+ err = mlx5r_umr_recover(dev, mkey, &umr_context, wqe, with_data);
if (err)
mlx5_ib_warn(dev, "couldn't recover UMR, err %d\n",
err);
diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h
index 0bbda60d3cdc..23caea22f8dc 100644
--- a/drivers/iommu/amd/amd_iommu_types.h
+++ b/drivers/iommu/amd/amd_iommu_types.h
@@ -175,6 +175,7 @@
#define CONTROL_GAM_EN 25
#define CONTROL_GALOG_EN 28
#define CONTROL_GAINT_EN 29
+#define CONTROL_EPH_EN 45
#define CONTROL_XT_EN 50
#define CONTROL_INTCAPXT_EN 51
#define CONTROL_IRTCACHEDIS 59
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index c5cd92edada0..cb536d372b12 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -2653,6 +2653,10 @@ static void iommu_init_flags(struct amd_iommu *iommu)
/* Set IOTLB invalidation timeout to 1s */
iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
+
+ /* Enable Enhanced Peripheral Page Request Handling */
+ if (check_feature(FEATURE_EPHSUP))
+ iommu_feature_enable(iommu, CONTROL_EPH_EN);
}
static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
@@ -3194,7 +3198,7 @@ out:
return true;
}
-static void iommu_snp_enable(void)
+static __init void iommu_snp_enable(void)
{
#ifdef CONFIG_KVM_AMD_SEV
if (!cc_platform_has(CC_ATTR_HOST_SEV_SNP))
@@ -3219,6 +3223,14 @@ static void iommu_snp_enable(void)
goto disable_snp;
}
+ /*
+ * Enable host SNP support once SNP support is checked on IOMMU.
+ */
+ if (snp_rmptable_init()) {
+ pr_warn("SNP: RMP initialization failed, SNP cannot be supported.\n");
+ goto disable_snp;
+ }
+
pr_info("IOMMU SNP support enabled.\n");
return;
@@ -3318,6 +3330,19 @@ static int __init iommu_go_to_state(enum iommu_init_state state)
ret = state_next();
}
+ /*
+ * SNP platform initilazation requires IOMMUs to be fully configured.
+ * If the SNP support on IOMMUs has NOT been checked, simply mark SNP
+ * as unsupported. If the SNP support on IOMMUs has been checked and
+ * host SNP support enabled but RMP enforcement has not been enabled
+ * in IOMMUs, then the system is in a half-baked state, but can limp
+ * along as all memory should be Hypervisor-Owned in the RMP. WARN,
+ * but leave SNP as "supported" to avoid confusing the kernel.
+ */
+ if (ret && cc_platform_has(CC_ATTR_HOST_SEV_SNP) &&
+ !WARN_ON_ONCE(amd_iommu_snp_en))
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
+
return ret;
}
@@ -3426,18 +3451,23 @@ void __init amd_iommu_detect(void)
int ret;
if (no_iommu || (iommu_detected && !gart_iommu_aperture))
- return;
+ goto disable_snp;
if (!amd_iommu_sme_check())
- return;
+ goto disable_snp;
ret = iommu_go_to_state(IOMMU_IVRS_DETECTED);
if (ret)
- return;
+ goto disable_snp;
amd_iommu_detected = true;
iommu_detected = 1;
x86_init.iommu.iommu_init = amd_iommu_init;
+ return;
+
+disable_snp:
+ if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
+ cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
}
/****************************************************************************
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index b48a72bd7b23..cd5116d8c3b2 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2043,12 +2043,12 @@ static void set_dte_entry(struct amd_iommu *iommu,
make_clear_dte(dev_data, dte, &new);
if (domain->iop.mode != PAGE_MODE_NONE)
- new.data[0] = iommu_virt_to_phys(domain->iop.root);
+ new.data[0] |= iommu_virt_to_phys(domain->iop.root);
new.data[0] |= (domain->iop.mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
- new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW | DTE_FLAG_V;
+ new.data[0] |= DTE_FLAG_IR | DTE_FLAG_IW;
/*
* When SNP is enabled, we can only support TV=1 with non-zero domain ID.
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index c666ecab955d..69e23e017d9e 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -249,7 +249,7 @@ struct exynos_iommu_domain {
struct list_head clients; /* list of sysmmu_drvdata.domain_node */
sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
short *lv2entcnt; /* free lv2 entry counter for each section */
- spinlock_t lock; /* lock for modyfying list of clients */
+ spinlock_t lock; /* lock for modifying list of clients */
spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
struct iommu_domain domain; /* generic domain data structure */
};
@@ -292,7 +292,7 @@ struct sysmmu_drvdata {
struct clk *aclk; /* SYSMMU's aclk clock */
struct clk *pclk; /* SYSMMU's pclk clock */
struct clk *clk_master; /* master's device clock */
- spinlock_t lock; /* lock for modyfying state */
+ spinlock_t lock; /* lock for modifying state */
bool active; /* current status */
struct exynos_iommu_domain *domain; /* domain we belong to */
struct list_head domain_node; /* node for domain clients list */
@@ -746,7 +746,7 @@ static int exynos_sysmmu_probe(struct platform_device *pdev)
ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
dev_name(dev), data);
if (ret) {
- dev_err(dev, "Unabled to register handler of irq %d\n", irq);
+ dev_err(dev, "Unable to register handler of irq %d\n", irq);
return ret;
}
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 9f424acf474e..e540092d664d 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -2043,6 +2043,7 @@ int enable_drhd_fault_handling(unsigned int cpu)
/*
* Enable fault control interrupt.
*/
+ guard(rwsem_read)(&dmar_global_lock);
for_each_iommu(iommu, drhd) {
u32 fault_status;
int ret;
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index cc46098f875b..bf1f0c814348 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -3146,7 +3146,14 @@ int __init intel_iommu_init(void)
iommu_device_sysfs_add(&iommu->iommu, NULL,
intel_iommu_groups,
"%s", iommu->name);
+ /*
+ * The iommu device probe is protected by the iommu_probe_device_lock.
+ * Release the dmar_global_lock before entering the device probe path
+ * to avoid unnecessary lock order splat.
+ */
+ up_read(&dmar_global_lock);
iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
+ down_read(&dmar_global_lock);
iommu_pmu_register(iommu);
}
@@ -4378,9 +4385,6 @@ static int context_setup_pass_through_cb(struct pci_dev *pdev, u16 alias, void *
{
struct device *dev = data;
- if (dev != &pdev->dev)
- return 0;
-
return context_setup_pass_through(dev, PCI_BUS_NUM(alias), alias & 0xff);
}
diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
index c2d792db52c3..064194399b38 100644
--- a/drivers/iommu/intel/prq.c
+++ b/drivers/iommu/intel/prq.c
@@ -87,7 +87,9 @@ prq_retry:
struct page_req_dsc *req;
req = &iommu->prq[head / sizeof(*req)];
- if (!req->pasid_present || req->pasid != pasid) {
+ if (req->rid != sid ||
+ (req->pasid_present && pasid != req->pasid) ||
+ (!req->pasid_present && pasid != IOMMU_NO_PASID)) {
head = (head + sizeof(*req)) & PRQ_RING_MASK;
continue;
}
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index 4674e618797c..8b5926c1452e 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -478,6 +478,7 @@ void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
ops->page_response(dev, iopf, &resp);
list_del_init(&group->pending_node);
+ iopf_free_group(group);
}
mutex_unlock(&fault_param->lock);
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 870c3cdbd0f6..60aed01e54f2 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1756,7 +1756,7 @@ static int iommu_get_def_domain_type(struct iommu_group *group,
group->id);
/*
- * Try to recover, drivers are allowed to force IDENITY or DMA, IDENTITY
+ * Try to recover, drivers are allowed to force IDENTITY or DMA, IDENTITY
* takes precedence.
*/
if (type == IOMMU_DOMAIN_IDENTITY)
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index be063bfb50c4..c11b9965c4ad 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -169,6 +169,7 @@ config IXP4XX_IRQ
config LAN966X_OIC
tristate "Microchip LAN966x OIC Support"
+ depends on MCHP_LAN966X_PCI || COMPILE_TEST
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
help
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index da5250f0155c..2b1684c60e3c 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -577,7 +577,8 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
}
- if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
+ if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
+ (FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
int irq;
if (cpumask_test_cpu(smp_processor_id(),
&aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 76dce0aac246..270d7a4d85a6 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -44,6 +44,7 @@ static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI;
#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2)
+#define FLAGS_WORKAROUND_INSECURE (1ULL << 3)
#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
@@ -83,6 +84,8 @@ static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
+static bool nmi_support_forbidden;
+
/*
* There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs
* are potentially stolen by the secure side. Some code, especially code dealing
@@ -163,21 +166,27 @@ static void __init gic_prio_init(void)
{
bool ds;
- ds = gic_dist_security_disabled();
- if (!ds) {
- u32 val;
-
- val = readl_relaxed(gic_data.dist_base + GICD_CTLR);
- val |= GICD_CTLR_DS;
- writel_relaxed(val, gic_data.dist_base + GICD_CTLR);
+ cpus_have_group0 = gic_has_group0();
- ds = gic_dist_security_disabled();
- if (ds)
- pr_warn("Broken GIC integration, security disabled");
+ ds = gic_dist_security_disabled();
+ if ((gic_data.flags & FLAGS_WORKAROUND_INSECURE) && !ds) {
+ if (cpus_have_group0) {
+ u32 val;
+
+ val = readl_relaxed(gic_data.dist_base + GICD_CTLR);
+ val |= GICD_CTLR_DS;
+ writel_relaxed(val, gic_data.dist_base + GICD_CTLR);
+
+ ds = gic_dist_security_disabled();
+ if (ds)
+ pr_warn("Broken GIC integration, security disabled\n");
+ } else {
+ pr_warn("Broken GIC integration, pNMI forbidden\n");
+ nmi_support_forbidden = true;
+ }
}
cpus_have_security_disabled = ds;
- cpus_have_group0 = gic_has_group0();
/*
* How priority values are used by the GIC depends on two things:
@@ -209,7 +218,7 @@ static void __init gic_prio_init(void)
* be in the non-secure range, we program the non-secure values into
* the distributor to match the PMR values we want.
*/
- if (cpus_have_group0 & !cpus_have_security_disabled) {
+ if (cpus_have_group0 && !cpus_have_security_disabled) {
dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq);
dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi);
}
@@ -1922,6 +1931,18 @@ static bool gic_enable_quirk_arm64_2941627(void *data)
return true;
}
+static bool gic_enable_quirk_rk3399(void *data)
+{
+ struct gic_chip_data *d = data;
+
+ if (of_machine_is_compatible("rockchip,rk3399")) {
+ d->flags |= FLAGS_WORKAROUND_INSECURE;
+ return true;
+ }
+
+ return false;
+}
+
static bool rd_set_non_coherent(void *data)
{
struct gic_chip_data *d = data;
@@ -1997,6 +2018,12 @@ static const struct gic_quirk gic_quirks[] = {
.init = rd_set_non_coherent,
},
{
+ .desc = "GICv3: Insecure RK3399 integration",
+ .iidr = 0x0000043b,
+ .mask = 0xff000fff,
+ .init = gic_enable_quirk_rk3399,
+ },
+ {
}
};
@@ -2004,7 +2031,7 @@ static void gic_enable_nmi_support(void)
{
int i;
- if (!gic_prio_masking_enabled())
+ if (!gic_prio_masking_enabled() || nmi_support_forbidden)
return;
rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
index b9dcc8e78c75..1f613eb7b7f0 100644
--- a/drivers/irqchip/irq-jcore-aic.c
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -38,7 +38,7 @@ static struct irq_chip jcore_aic;
static void handle_jcore_irq(struct irq_desc *desc)
{
if (irqd_is_per_cpu(irq_desc_get_irq_data(desc)))
- handle_percpu_irq(desc);
+ handle_percpu_devid_irq(desc);
else
handle_simple_irq(desc);
}
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index b337f6c05f18..4eebed39880a 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -68,7 +68,8 @@ static int mvebu_icu_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
unsigned long *hwirq, unsigned int *type)
{
unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2;
- struct mvebu_icu_msi_data *msi_data = d->host_data;
+ struct msi_domain_info *info = d->host_data;
+ struct mvebu_icu_msi_data *msi_data = info->chip_data;
struct mvebu_icu *icu = msi_data->icu;
/* Check the count of the parameters in dt */
diff --git a/drivers/irqchip/irq-partition-percpu.c b/drivers/irqchip/irq-partition-percpu.c
index 8e76d2913e6b..4441ffe149ea 100644
--- a/drivers/irqchip/irq-partition-percpu.c
+++ b/drivers/irqchip/irq-partition-percpu.c
@@ -98,7 +98,7 @@ static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
- seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
+ seq_printf(p, "%5s-%lu", chip->name, data->hwirq);
}
static struct irq_chip partition_irq_chip = {
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
index c5c2e6929a2f..275df5005705 100644
--- a/drivers/irqchip/irq-riscv-imsic-early.c
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -27,7 +27,7 @@ static void imsic_ipi_send(unsigned int cpu)
{
struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu);
- writel_relaxed(IMSIC_IPI_ID, local->msi_va);
+ writel(IMSIC_IPI_ID, local->msi_va);
}
static void imsic_ipi_starting_cpu(void)
diff --git a/drivers/irqchip/irq-thead-c900-aclint-sswi.c b/drivers/irqchip/irq-thead-c900-aclint-sswi.c
index b0e366ade427..8ff6e7a1363b 100644
--- a/drivers/irqchip/irq-thead-c900-aclint-sswi.c
+++ b/drivers/irqchip/irq-thead-c900-aclint-sswi.c
@@ -31,7 +31,7 @@ static DEFINE_PER_CPU(void __iomem *, sswi_cpu_regs);
static void thead_aclint_sswi_ipi_send(unsigned int cpu)
{
- writel_relaxed(0x1, per_cpu(sswi_cpu_regs, cpu));
+ writel(0x1, per_cpu(sswi_cpu_regs, cpu));
}
static void thead_aclint_sswi_ipi_clear(void)
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index 74b2f124116e..52d77546aacb 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -21,9 +21,11 @@
#include <linux/types.h>
#define PDC_MAX_GPIO_IRQS 256
+#define PDC_DRV_OFFSET 0x10000
/* Valid only on HW version < 3.2 */
#define IRQ_ENABLE_BANK 0x10
+#define IRQ_ENABLE_BANK_MAX (IRQ_ENABLE_BANK + BITS_TO_BYTES(PDC_MAX_GPIO_IRQS))
#define IRQ_i_CFG 0x110
/* Valid only on HW version >= 3.2 */
@@ -46,13 +48,20 @@ struct pdc_pin_region {
static DEFINE_RAW_SPINLOCK(pdc_lock);
static void __iomem *pdc_base;
+static void __iomem *pdc_prev_base;
static struct pdc_pin_region *pdc_region;
static int pdc_region_cnt;
static unsigned int pdc_version;
+static bool pdc_x1e_quirk;
+
+static void pdc_base_reg_write(void __iomem *base, int reg, u32 i, u32 val)
+{
+ writel_relaxed(val, base + reg + i * sizeof(u32));
+}
static void pdc_reg_write(int reg, u32 i, u32 val)
{
- writel_relaxed(val, pdc_base + reg + i * sizeof(u32));
+ pdc_base_reg_write(pdc_base, reg, i, val);
}
static u32 pdc_reg_read(int reg, u32 i)
@@ -60,6 +69,34 @@ static u32 pdc_reg_read(int reg, u32 i)
return readl_relaxed(pdc_base + reg + i * sizeof(u32));
}
+static void pdc_x1e_irq_enable_write(u32 bank, u32 enable)
+{
+ void __iomem *base;
+
+ /* Remap the write access to work around a hardware bug on X1E */
+ switch (bank) {
+ case 0 ... 1:
+ /* Use previous DRV (client) region and shift to bank 3-4 */
+ base = pdc_prev_base;
+ bank += 3;
+ break;
+ case 2 ... 4:
+ /* Use our own region and shift to bank 0-2 */
+ base = pdc_base;
+ bank -= 2;
+ break;
+ case 5:
+ /* No fixup required for bank 5 */
+ base = pdc_base;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ pdc_base_reg_write(base, IRQ_ENABLE_BANK, bank, enable);
+}
+
static void __pdc_enable_intr(int pin_out, bool on)
{
unsigned long enable;
@@ -72,7 +109,11 @@ static void __pdc_enable_intr(int pin_out, bool on)
enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
__assign_bit(mask, &enable, on);
- pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
+
+ if (pdc_x1e_quirk)
+ pdc_x1e_irq_enable_write(index, enable);
+ else
+ pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
} else {
enable = pdc_reg_read(IRQ_i_CFG, pin_out);
__assign_bit(IRQ_i_CFG_IRQ_ENABLE, &enable, on);
@@ -324,10 +365,29 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
if (res_size > resource_size(&res))
pr_warn("%pOF: invalid reg size, please fix DT\n", node);
+ /*
+ * PDC has multiple DRV regions, each one provides the same set of
+ * registers for a particular client in the system. Due to a hardware
+ * bug on X1E, some writes to the IRQ_ENABLE_BANK register must be
+ * issued inside the previous region. This region belongs to
+ * a different client and is not described in the device tree. Map the
+ * region with the expected offset to preserve support for old DTs.
+ */
+ if (of_device_is_compatible(node, "qcom,x1e80100-pdc")) {
+ pdc_prev_base = ioremap(res.start - PDC_DRV_OFFSET, IRQ_ENABLE_BANK_MAX);
+ if (!pdc_prev_base) {
+ pr_err("%pOF: unable to map previous PDC DRV region\n", node);
+ return -ENXIO;
+ }
+
+ pdc_x1e_quirk = true;
+ }
+
pdc_base = ioremap(res.start, res_size);
if (!pdc_base) {
pr_err("%pOF: unable to map PDC registers\n", node);
- return -ENXIO;
+ ret = -ENXIO;
+ goto fail;
}
pdc_version = pdc_reg_read(PDC_VERSION_REG, 0);
@@ -363,6 +423,7 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
fail:
kfree(pdc_region);
iounmap(pdc_base);
+ iounmap(pdc_prev_base);
return ret;
}
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index ee9f7cecd78e..c45464b6576a 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -3790,20 +3790,18 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
break;
case STATUSTYPE_TABLE: {
- __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
-
- watermark_percentage += ic->journal_entries / 2;
- do_div(watermark_percentage, ic->journal_entries);
- arg_count = 3;
+ arg_count = 1; /* buffer_sectors */
arg_count += !!ic->meta_dev;
arg_count += ic->sectors_per_block != 1;
arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
arg_count += ic->reset_recalculate_flag;
arg_count += ic->discard;
- arg_count += ic->mode == 'J';
- arg_count += ic->mode == 'J';
- arg_count += ic->mode == 'B';
- arg_count += ic->mode == 'B';
+ arg_count += ic->mode != 'I'; /* interleave_sectors */
+ arg_count += ic->mode == 'J'; /* journal_sectors */
+ arg_count += ic->mode == 'J'; /* journal_watermark */
+ arg_count += ic->mode == 'J'; /* commit_time */
+ arg_count += ic->mode == 'B'; /* sectors_per_bit */
+ arg_count += ic->mode == 'B'; /* bitmap_flush_interval */
arg_count += !!ic->internal_hash_alg.alg_string;
arg_count += !!ic->journal_crypt_alg.alg_string;
arg_count += !!ic->journal_mac_alg.alg_string;
@@ -3822,10 +3820,15 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" reset_recalculate");
if (ic->discard)
DMEMIT(" allow_discards");
- DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
- DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
+ if (ic->mode != 'I')
+ DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
if (ic->mode == 'J') {
+ __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
+
+ watermark_percentage += ic->journal_entries / 2;
+ do_div(watermark_percentage, ic->journal_entries);
+ DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
DMEMIT(" journal_watermark:%u", (unsigned int)watermark_percentage);
DMEMIT(" commit_time:%u", ic->autocommit_msec);
}
diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
index b6f8e2dc7729..3f3d29af1be4 100644
--- a/drivers/md/dm-vdo/dedupe.c
+++ b/drivers/md/dm-vdo/dedupe.c
@@ -2178,6 +2178,7 @@ static int initialize_index(struct vdo *vdo, struct hash_zones *zones)
vdo_set_dedupe_index_timeout_interval(vdo_dedupe_index_timeout_interval);
vdo_set_dedupe_index_min_timer_interval(vdo_dedupe_index_min_timer_interval);
+ spin_lock_init(&zones->lock);
/*
* Since we will save up the timeouts that would have been reported but were ratelimited,
diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c
index a382929ce7ba..369aed044b40 100644
--- a/drivers/md/md-linear.c
+++ b/drivers/md/md-linear.c
@@ -76,10 +76,8 @@ static int linear_set_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = mddev->chunk_sectors;
lim.io_min = mddev->chunk_sectors << 9;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
- if (err) {
- queue_limits_cancel_update(mddev->gendisk->queue);
+ if (err)
return err;
- }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 8fc9339b00c7..70bcc3cdf2cd 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -386,10 +386,8 @@ static int raid0_set_limits(struct mddev *mddev)
lim.io_opt = lim.io_min * mddev->raid_disks;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
- if (err) {
- queue_limits_cancel_update(mddev->gendisk->queue);
+ if (err)
return err;
- }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 9d57a88dbd26..10ea3af40991 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -3219,10 +3219,8 @@ static int raid1_set_limits(struct mddev *mddev)
lim.max_write_zeroes_sectors = 0;
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
- if (err) {
- queue_limits_cancel_update(mddev->gendisk->queue);
+ if (err)
return err;
- }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index efe93b979167..15b9ae5bf84d 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4020,10 +4020,8 @@ static int raid10_set_queue_limits(struct mddev *mddev)
lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
lim.features |= BLK_FEAT_ATOMIC_WRITES;
err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
- if (err) {
- queue_limits_cancel_update(mddev->gendisk->queue);
+ if (err)
return err;
- }
return queue_limits_set(mddev->gendisk->queue, &lim);
}
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c
index 226915ca3c93..aa4a9940b569 100644
--- a/drivers/mfd/syscon.c
+++ b/drivers/mfd/syscon.c
@@ -159,6 +159,7 @@ err_regmap:
}
static struct regmap *device_node_get_regmap(struct device_node *np,
+ bool create_regmap,
bool check_res)
{
struct syscon *entry, *syscon = NULL;
@@ -172,7 +173,7 @@ static struct regmap *device_node_get_regmap(struct device_node *np,
}
if (!syscon) {
- if (of_device_is_compatible(np, "syscon"))
+ if (create_regmap)
syscon = of_syscon_register(np, check_res);
else
syscon = ERR_PTR(-EINVAL);
@@ -233,15 +234,37 @@ err_unlock:
}
EXPORT_SYMBOL_GPL(of_syscon_register_regmap);
+/**
+ * device_node_to_regmap() - Get or create a regmap for specified device node
+ * @np: Device tree node
+ *
+ * Get a regmap for the specified device node. If there's not an existing
+ * regmap, then one is instantiated. This function should not be used if the
+ * device node has a custom regmap driver or has resources (clocks, resets) to
+ * be managed. Use syscon_node_to_regmap() instead for those cases.
+ *
+ * Return: regmap ptr on success, negative error code on failure.
+ */
struct regmap *device_node_to_regmap(struct device_node *np)
{
- return device_node_get_regmap(np, false);
+ return device_node_get_regmap(np, true, false);
}
EXPORT_SYMBOL_GPL(device_node_to_regmap);
+/**
+ * syscon_node_to_regmap() - Get or create a regmap for specified syscon device node
+ * @np: Device tree node
+ *
+ * Get a regmap for the specified device node. If there's not an existing
+ * regmap, then one is instantiated if the node is a generic "syscon". This
+ * function is safe to use for a syscon registered with
+ * of_syscon_register_regmap().
+ *
+ * Return: regmap ptr on success, negative error code on failure.
+ */
struct regmap *syscon_node_to_regmap(struct device_node *np)
{
- return device_node_get_regmap(np, true);
+ return device_node_get_regmap(np, of_device_is_compatible(np, "syscon"), true);
}
EXPORT_SYMBOL_GPL(syscon_node_to_regmap);
diff --git a/drivers/misc/cardreader/rtsx_usb.c b/drivers/misc/cardreader/rtsx_usb.c
index e0174da5e9fc..77b0490a1b38 100644
--- a/drivers/misc/cardreader/rtsx_usb.c
+++ b/drivers/misc/cardreader/rtsx_usb.c
@@ -286,7 +286,6 @@ static int rtsx_usb_get_status_with_bulk(struct rtsx_ucr *ucr, u16 *status)
int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
{
int ret;
- u8 interrupt_val = 0;
u16 *buf;
if (!status)
@@ -309,20 +308,6 @@ int rtsx_usb_get_card_status(struct rtsx_ucr *ucr, u16 *status)
ret = rtsx_usb_get_status_with_bulk(ucr, status);
}
- rtsx_usb_read_register(ucr, CARD_INT_PEND, &interrupt_val);
- /* Cross check presence with interrupts */
- if (*status & XD_CD)
- if (!(interrupt_val & XD_INT))
- *status &= ~XD_CD;
-
- if (*status & SD_CD)
- if (!(interrupt_val & SD_INT))
- *status &= ~SD_CD;
-
- if (*status & MS_CD)
- if (!(interrupt_val & MS_INT))
- *status &= ~MS_CD;
-
/* usb_control_msg may return positive when success */
if (ret < 0)
return ret;
diff --git a/drivers/misc/eeprom/digsy_mtc_eeprom.c b/drivers/misc/eeprom/digsy_mtc_eeprom.c
index 88888485e6f8..ee58f7ce5bfa 100644
--- a/drivers/misc/eeprom/digsy_mtc_eeprom.c
+++ b/drivers/misc/eeprom/digsy_mtc_eeprom.c
@@ -50,7 +50,7 @@ static struct platform_device digsy_mtc_eeprom = {
};
static struct gpiod_lookup_table eeprom_spi_gpiod_table = {
- .dev_id = "spi_gpio",
+ .dev_id = "spi_gpio.1",
.table = {
GPIO_LOOKUP("gpio@b00", GPIO_EEPROM_CLK,
"sck", GPIO_ACTIVE_HIGH),
diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c
index 89224d4af4a2..e13f9fdd9d7b 100644
--- a/drivers/misc/eeprom/ee1004.c
+++ b/drivers/misc/eeprom/ee1004.c
@@ -304,6 +304,10 @@ static int ee1004_probe(struct i2c_client *client)
I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA))
return -EPFNOSUPPORT;
+ err = i2c_smbus_read_byte(client);
+ if (err < 0)
+ return -ENODEV;
+
mutex_lock(&ee1004_bus_lock);
err = ee1004_init_bus_data(client);
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 718ec5d81d94..67176caf5416 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -324,28 +324,6 @@ ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
/**
- * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read)
- *
- * @cldev: me client device
- * @buf: buffer to receive
- * @length: buffer length
- * @vtag: virtual tag
- *
- * Return:
- * * read size in bytes
- * * -EAGAIN if function will block.
- * * < 0 on other error
- */
-ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
- size_t length, u8 *vtag)
-{
- struct mei_cl *cl = cldev->cl;
-
- return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0);
-}
-EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
-
-/**
* mei_cldev_recv_timeout - client receive with timeout (read)
*
* @cldev: me client device
@@ -439,23 +417,6 @@ ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
EXPORT_SYMBOL_GPL(mei_cldev_recv);
/**
- * mei_cldev_recv_nonblock - non block client receive (read)
- *
- * @cldev: me client device
- * @buf: buffer to receive
- * @length: buffer length
- *
- * Return: read size in bytes of < 0 on error
- * -EAGAIN if function will block.
- */
-ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
- size_t length)
-{
- return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL);
-}
-EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
-
-/**
* mei_cl_bus_rx_work - dispatch rx event for a bus device
*
* @work: work
@@ -641,19 +602,6 @@ void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
/**
- * mei_cldev_uuid - return uuid of the underlying me client
- *
- * @cldev: mei client device
- *
- * Return: me client uuid
- */
-const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
-{
- return mei_me_cl_uuid(cldev->me_cl);
-}
-EXPORT_SYMBOL_GPL(mei_cldev_uuid);
-
-/**
* mei_cldev_ver - return protocol version of the underlying me client
*
* @cldev: mei client device
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index be011cef12e5..3db07d2a881f 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -272,28 +272,6 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
}
/**
- * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
- *
- * @dev: the device structure
- * @uuid: me client uuid
- * @id: me client id
- *
- * Locking: called under "dev->device_lock" lock
- */
-void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
-{
- struct mei_me_client *me_cl;
-
- dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
-
- down_write(&dev->me_clients_rwsem);
- me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
- __mei_me_cl_del(dev, me_cl);
- mei_me_cl_put(me_cl);
- up_write(&dev->me_clients_rwsem);
-}
-
-/**
* mei_me_cl_rm_all - remove all me clients
*
* @dev: the device structure
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 9052860bcfe0..01ed26a148c4 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -29,8 +29,6 @@ struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id);
struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
const uuid_le *uuid, u8 client_id);
void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid);
-void mei_me_cl_rm_by_uuid_id(struct mei_device *dev,
- const uuid_le *uuid, u8 id);
void mei_me_cl_rm_all(struct mei_device *dev);
/**
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c3a6657dcd4a..a5f88ec97df7 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -117,6 +117,8 @@
#define MEI_DEV_ID_LNL_M 0xA870 /* Lunar Lake Point M */
+#define MEI_DEV_ID_PTL_P 0xE470 /* Panther Lake P */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index 5d0f68b95c29..e9476f9ae25d 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -1209,48 +1209,3 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
return dev;
}
-
-/**
- * mei_txe_setup_satt2 - SATT2 configuration for DMA support.
- *
- * @dev: the device structure
- * @addr: physical address start of the range
- * @range: physical range size
- *
- * Return: 0 on success an error code otherwise
- */
-int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range)
-{
- struct mei_txe_hw *hw = to_txe_hw(dev);
-
- u32 lo32 = lower_32_bits(addr);
- u32 hi32 = upper_32_bits(addr);
- u32 ctrl;
-
- /* SATT is limited to 36 Bits */
- if (hi32 & ~0xF)
- return -EINVAL;
-
- /* SATT has to be 16Byte aligned */
- if (lo32 & 0xF)
- return -EINVAL;
-
- /* SATT range has to be 4Bytes aligned */
- if (range & 0x4)
- return -EINVAL;
-
- /* SATT is limited to 32 MB range*/
- if (range > SATT_RANGE_MAX)
- return -EINVAL;
-
- ctrl = SATT2_CTRL_VALID_MSK;
- ctrl |= hi32 << SATT2_CTRL_BR_BASE_ADDR_REG_SHIFT;
-
- mei_txe_br_reg_write(hw, SATT2_SAP_SIZE_REG, range);
- mei_txe_br_reg_write(hw, SATT2_BRG_BA_LSB_REG, lo32);
- mei_txe_br_reg_write(hw, SATT2_CTRL_REG, ctrl);
- dev_dbg(dev->dev, "SATT2: SAP_SIZE_OFFSET=0x%08X, BRG_BA_LSB_OFFSET=0x%08X, CTRL_OFFSET=0x%08X\n",
- range, lo32, ctrl);
-
- return 0;
-}
diff --git a/drivers/misc/mei/hw-txe.h b/drivers/misc/mei/hw-txe.h
index 96511b04bf88..6790e646895d 100644
--- a/drivers/misc/mei/hw-txe.h
+++ b/drivers/misc/mei/hw-txe.h
@@ -59,7 +59,5 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id);
int mei_txe_aliveness_set_sync(struct mei_device *dev, u32 req);
-int mei_txe_setup_satt2(struct mei_device *dev, phys_addr_t addr, u32 range);
-
#endif /* _MEI_HW_TXE_H_ */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 6589635f8ba3..d6ff9d82ae94 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -124,6 +124,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_LNL_M, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_PTL_P, MEI_ME_PCH15_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c
index 35d349fee769..7be1649b1972 100644
--- a/drivers/misc/mei/vsc-tp.c
+++ b/drivers/misc/mei/vsc-tp.c
@@ -502,7 +502,7 @@ static int vsc_tp_probe(struct spi_device *spi)
if (ret)
return ret;
- tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN);
+ tp->wakeuphost = devm_gpiod_get(dev, "wakeuphostint", GPIOD_IN);
if (IS_ERR(tp->wakeuphost))
return PTR_ERR(tp->wakeuphost);
diff --git a/drivers/misc/ntsync.c b/drivers/misc/ntsync.c
index 055395cde42b..999026a1ae04 100644
--- a/drivers/misc/ntsync.c
+++ b/drivers/misc/ntsync.c
@@ -873,6 +873,7 @@ static int setup_wait(struct ntsync_device *dev,
{
int fds[NTSYNC_MAX_WAIT_COUNT + 1];
const __u32 count = args->count;
+ size_t size = array_size(count, sizeof(fds[0]));
struct ntsync_q *q;
__u32 total_count;
__u32 i, j;
@@ -880,15 +881,14 @@ static int setup_wait(struct ntsync_device *dev,
if (args->pad || (args->flags & ~NTSYNC_WAIT_REALTIME))
return -EINVAL;
- if (args->count > NTSYNC_MAX_WAIT_COUNT)
+ if (size >= sizeof(fds))
return -EINVAL;
total_count = count;
if (args->alert)
total_count++;
- if (copy_from_user(fds, u64_to_user_ptr(args->objs),
- array_size(count, sizeof(*fds))))
+ if (copy_from_user(fds, u64_to_user_ptr(args->objs), size))
return -EFAULT;
if (args->alert)
fds[count] = args->alert;
@@ -1208,6 +1208,7 @@ static struct miscdevice ntsync_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = NTSYNC_NAME,
.fops = &ntsync_fops,
+ .mode = 0666,
};
module_misc_device(ntsync_misc);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 4b6e91372526..345ea91629e0 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -273,6 +273,7 @@
#define MSDC_PAD_TUNE_CMD2_SEL BIT(21) /* RW */
#define PAD_DS_TUNE_DLY_SEL BIT(0) /* RW */
+#define PAD_DS_TUNE_DLY2_SEL BIT(1) /* RW */
#define PAD_DS_TUNE_DLY1 GENMASK(6, 2) /* RW */
#define PAD_DS_TUNE_DLY2 GENMASK(11, 7) /* RW */
#define PAD_DS_TUNE_DLY3 GENMASK(16, 12) /* RW */
@@ -318,6 +319,7 @@
/* EMMC50_PAD_DS_TUNE mask */
#define PAD_DS_DLY_SEL BIT(16) /* RW */
+#define PAD_DS_DLY2_SEL BIT(15) /* RW */
#define PAD_DS_DLY1 GENMASK(14, 10) /* RW */
#define PAD_DS_DLY3 GENMASK(4, 0) /* RW */
@@ -2504,13 +2506,23 @@ tune_done:
static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct msdc_host *host = mmc_priv(mmc);
+
host->hs400_mode = true;
- if (host->top_base)
- writel(host->hs400_ds_delay,
- host->top_base + EMMC50_PAD_DS_TUNE);
- else
- writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ if (host->top_base) {
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY3, host->hs400_ds_dly3);
+ if (host->hs400_ds_delay)
+ writel(host->hs400_ds_delay,
+ host->top_base + EMMC50_PAD_DS_TUNE);
+ } else {
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
+ if (host->hs400_ds_delay)
+ writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE);
+ }
/* hs400 mode must set it to 0 */
sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS);
/* to improve read performance, set outstanding to 2 */
@@ -2530,14 +2542,11 @@ static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card
if (host->top_base) {
sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE,
PAD_DS_DLY_SEL);
- if (host->hs400_ds_dly3)
- sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
- PAD_DS_DLY3, host->hs400_ds_dly3);
+ sdr_clr_bits(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY2_SEL);
} else {
sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL);
- if (host->hs400_ds_dly3)
- sdr_set_field(host->base + PAD_DS_TUNE,
- PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
+ sdr_clr_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY2_SEL);
}
host->hs400_tuning = true;
diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c
index b73f673db92b..f75c31815ab0 100644
--- a/drivers/mmc/host/sdhci_am654.c
+++ b/drivers/mmc/host/sdhci_am654.c
@@ -155,7 +155,6 @@ struct sdhci_am654_data {
u32 tuning_loop;
#define SDHCI_AM654_QUIRK_FORCE_CDTEST BIT(0)
-#define SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA BIT(1)
};
struct window {
@@ -357,29 +356,6 @@ static void sdhci_j721e_4bit_set_clock(struct sdhci_host *host,
sdhci_set_clock(host, clock);
}
-static int sdhci_am654_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios)
-{
- struct sdhci_host *host = mmc_priv(mmc);
- struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
- struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host);
- int ret;
-
- if ((sdhci_am654->quirks & SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA) &&
- ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
- if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = mmc_regulator_set_vqmmc(mmc, ios);
- if (ret < 0) {
- pr_err("%s: Switching to 1.8V signalling voltage failed,\n",
- mmc_hostname(mmc));
- return -EIO;
- }
- }
- return 0;
- }
-
- return sdhci_start_signal_voltage_switch(mmc, ios);
-}
-
static u8 sdhci_am654_write_power_on(struct sdhci_host *host, u8 val, int reg)
{
writeb(val, host->ioaddr + reg);
@@ -868,11 +844,6 @@ static int sdhci_am654_get_of_property(struct platform_device *pdev,
if (device_property_read_bool(dev, "ti,fails-without-test-cd"))
sdhci_am654->quirks |= SDHCI_AM654_QUIRK_FORCE_CDTEST;
- /* Suppress v1p8 ena for eMMC and SD with vqmmc supply */
- if (!!of_parse_phandle(dev->of_node, "vmmc-supply", 0) ==
- !!of_parse_phandle(dev->of_node, "vqmmc-supply", 0))
- sdhci_am654->quirks |= SDHCI_AM654_QUIRK_SUPPRESS_V1P8_ENA;
-
sdhci_get_of_property(pdev);
return 0;
@@ -969,7 +940,6 @@ static int sdhci_am654_probe(struct platform_device *pdev)
goto err_pltfm_free;
}
- host->mmc_host_ops.start_signal_voltage_switch = sdhci_am654_start_signal_voltage_switch;
host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning;
pm_runtime_get_noresume(dev);
diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c
index 8d1d710e439d..6667eea95597 100644
--- a/drivers/mtd/nand/raw/cadence-nand-controller.c
+++ b/drivers/mtd/nand/raw/cadence-nand-controller.c
@@ -471,6 +471,8 @@ struct cdns_nand_ctrl {
struct {
void __iomem *virt;
dma_addr_t dma;
+ dma_addr_t iova_dma;
+ u32 size;
} io;
int irq;
@@ -1835,11 +1837,11 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
}
if (dir == DMA_FROM_DEVICE) {
- src_dma = cdns_ctrl->io.dma;
+ src_dma = cdns_ctrl->io.iova_dma;
dst_dma = buf_dma;
} else {
src_dma = buf_dma;
- dst_dma = cdns_ctrl->io.dma;
+ dst_dma = cdns_ctrl->io.iova_dma;
}
tx = dmaengine_prep_dma_memcpy(cdns_ctrl->dmac, dst_dma, src_dma, len,
@@ -1861,12 +1863,12 @@ static int cadence_nand_slave_dma_transfer(struct cdns_nand_ctrl *cdns_ctrl,
dma_async_issue_pending(cdns_ctrl->dmac);
wait_for_completion(&finished);
- dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+ dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
return 0;
err_unmap:
- dma_unmap_single(cdns_ctrl->dev, buf_dma, len, dir);
+ dma_unmap_single(dma_dev->dev, buf_dma, len, dir);
err:
dev_dbg(cdns_ctrl->dev, "Fall back to CPU I/O\n");
@@ -2869,6 +2871,7 @@ cadence_nand_irq_cleanup(int irqnum, struct cdns_nand_ctrl *cdns_ctrl)
static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
{
dma_cap_mask_t mask;
+ struct dma_device *dma_dev = cdns_ctrl->dmac->device;
int ret;
cdns_ctrl->cdma_desc = dma_alloc_coherent(cdns_ctrl->dev,
@@ -2904,15 +2907,24 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
dma_cap_set(DMA_MEMCPY, mask);
if (cdns_ctrl->caps1->has_dma) {
- cdns_ctrl->dmac = dma_request_channel(mask, NULL, NULL);
- if (!cdns_ctrl->dmac) {
- dev_err(cdns_ctrl->dev,
- "Unable to get a DMA channel\n");
- ret = -EBUSY;
+ cdns_ctrl->dmac = dma_request_chan_by_mask(&mask);
+ if (IS_ERR(cdns_ctrl->dmac)) {
+ ret = dev_err_probe(cdns_ctrl->dev, PTR_ERR(cdns_ctrl->dmac),
+ "%d: Failed to get a DMA channel\n", ret);
goto disable_irq;
}
}
+ cdns_ctrl->io.iova_dma = dma_map_resource(dma_dev->dev, cdns_ctrl->io.dma,
+ cdns_ctrl->io.size,
+ DMA_BIDIRECTIONAL, 0);
+
+ ret = dma_mapping_error(dma_dev->dev, cdns_ctrl->io.iova_dma);
+ if (ret) {
+ dev_err(cdns_ctrl->dev, "Failed to map I/O resource to DMA\n");
+ goto dma_release_chnl;
+ }
+
nand_controller_init(&cdns_ctrl->controller);
INIT_LIST_HEAD(&cdns_ctrl->chips);
@@ -2923,18 +2935,22 @@ static int cadence_nand_init(struct cdns_nand_ctrl *cdns_ctrl)
if (ret) {
dev_err(cdns_ctrl->dev, "Failed to register MTD: %d\n",
ret);
- goto dma_release_chnl;
+ goto unmap_dma_resource;
}
kfree(cdns_ctrl->buf);
cdns_ctrl->buf = kzalloc(cdns_ctrl->buf_size, GFP_KERNEL);
if (!cdns_ctrl->buf) {
ret = -ENOMEM;
- goto dma_release_chnl;
+ goto unmap_dma_resource;
}
return 0;
+unmap_dma_resource:
+ dma_unmap_resource(dma_dev->dev, cdns_ctrl->io.iova_dma,
+ cdns_ctrl->io.size, DMA_BIDIRECTIONAL, 0);
+
dma_release_chnl:
if (cdns_ctrl->dmac)
dma_release_channel(cdns_ctrl->dmac);
@@ -2956,6 +2972,10 @@ free_buf_desc:
static void cadence_nand_remove(struct cdns_nand_ctrl *cdns_ctrl)
{
cadence_nand_chips_cleanup(cdns_ctrl);
+ if (cdns_ctrl->dmac)
+ dma_unmap_resource(cdns_ctrl->dmac->device->dev,
+ cdns_ctrl->io.iova_dma, cdns_ctrl->io.size,
+ DMA_BIDIRECTIONAL, 0);
cadence_nand_irq_cleanup(cdns_ctrl->irq, cdns_ctrl);
kfree(cdns_ctrl->buf);
dma_free_coherent(cdns_ctrl->dev, sizeof(struct cadence_nand_cdma_desc),
@@ -3020,7 +3040,9 @@ static int cadence_nand_dt_probe(struct platform_device *ofdev)
cdns_ctrl->io.virt = devm_platform_get_and_ioremap_resource(ofdev, 1, &res);
if (IS_ERR(cdns_ctrl->io.virt))
return PTR_ERR(cdns_ctrl->io.virt);
+
cdns_ctrl->io.dma = res->start;
+ cdns_ctrl->io.size = resource_size(res);
dt->clk = devm_clk_get(cdns_ctrl->dev, "nf_clk");
if (IS_ERR(dt->clk))
diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c
index d2d2aeee42a7..6720b547892b 100644
--- a/drivers/mtd/nand/raw/qcom_nandc.c
+++ b/drivers/mtd/nand/raw/qcom_nandc.c
@@ -1881,18 +1881,18 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_
nandc->regs->addr0 = 0;
nandc->regs->addr1 = 0;
- host->cfg0 = FIELD_PREP(CW_PER_PAGE_MASK, 0) |
- FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
- FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
- FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0);
-
- host->cfg1 = FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
- FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
- FIELD_PREP(CS_ACTIVE_BSY, 0) |
- FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
- FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
- FIELD_PREP(WIDE_FLASH, 0) |
- FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1);
+ nandc->regs->cfg0 = cpu_to_le32(FIELD_PREP(CW_PER_PAGE_MASK, 0) |
+ FIELD_PREP(UD_SIZE_BYTES_MASK, 512) |
+ FIELD_PREP(NUM_ADDR_CYCLES_MASK, 5) |
+ FIELD_PREP(SPARE_SIZE_BYTES_MASK, 0));
+
+ nandc->regs->cfg1 = cpu_to_le32(FIELD_PREP(NAND_RECOVERY_CYCLES_MASK, 7) |
+ FIELD_PREP(BAD_BLOCK_BYTE_NUM_MASK, 17) |
+ FIELD_PREP(CS_ACTIVE_BSY, 0) |
+ FIELD_PREP(BAD_BLOCK_IN_SPARE_AREA, 1) |
+ FIELD_PREP(WR_RD_BSY_GAP_MASK, 2) |
+ FIELD_PREP(WIDE_FLASH, 0) |
+ FIELD_PREP(DEV0_CFG1_ECC_DISABLE, 1));
if (!nandc->props->qpic_version2)
nandc->regs->ecc_buf_cfg = cpu_to_le32(ECC_CFG_ECC_DISABLE);
diff --git a/drivers/mtd/spi-nor/sst.c b/drivers/mtd/spi-nor/sst.c
index b5ad7118c49a..175211fe6a5e 100644
--- a/drivers/mtd/spi-nor/sst.c
+++ b/drivers/mtd/spi-nor/sst.c
@@ -174,7 +174,7 @@ static int sst_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
int ret;
nor->program_opcode = op;
- ret = spi_nor_write_data(nor, to, 1, buf);
+ ret = spi_nor_write_data(nor, to, len, buf);
if (ret < 0)
return ret;
WARN(ret != len, "While writing %zu byte written %i bytes\n", len, ret);
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 7fea00c7ca8a..c60386bf2d1a 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -745,7 +745,7 @@ err:
if (cfv->vr_rx)
vdev->vringh_config->del_vrhs(cfv->vdev);
- if (cfv->vdev)
+ if (cfv->vq_tx)
vdev->config->del_vqs(cfv->vdev);
free_netdev(netdev);
return err;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 6cba9717a6d8..399844809bbe 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -385,15 +385,16 @@ static int c_can_plat_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
KBUILD_MODNAME, ret);
- goto exit_free_device;
+ goto exit_pm_runtime;
}
dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
KBUILD_MODNAME, priv->base, dev->irq);
return 0;
-exit_free_device:
+exit_pm_runtime:
pm_runtime_disable(priv->device);
+exit_free_device:
free_c_can_dev(dev);
exit:
dev_err(&pdev->dev, "probe failed\n");
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 64c349fd4600..f65c1a1e05cc 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -867,10 +867,12 @@ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr)
}
break;
case CAN_STATE_ERROR_ACTIVE:
- cf->can_id |= CAN_ERR_CNT;
- cf->data[1] = CAN_ERR_CRTL_ACTIVE;
- cf->data[6] = bec.txerr;
- cf->data[7] = bec.rxerr;
+ if (skb) {
+ cf->can_id |= CAN_ERR_CNT;
+ cf->data[1] = CAN_ERR_CRTL_ACTIVE;
+ cf->data[6] = bec.txerr;
+ cf->data[7] = bec.rxerr;
+ }
break;
default:
netdev_warn(ndev, "unhandled error state (%d:%s)!\n",
diff --git a/drivers/net/can/rockchip/rockchip_canfd-core.c b/drivers/net/can/rockchip/rockchip_canfd-core.c
index df18c85fc078..d9a937ba126c 100644
--- a/drivers/net/can/rockchip/rockchip_canfd-core.c
+++ b/drivers/net/can/rockchip/rockchip_canfd-core.c
@@ -622,7 +622,7 @@ rkcanfd_handle_rx_fifo_overflow_int(struct rkcanfd_priv *priv)
netdev_dbg(priv->ndev, "RX-FIFO overflow\n");
skb = rkcanfd_alloc_can_err_skb(priv, &cf, &timestamp);
- if (skb)
+ if (!skb)
return 0;
rkcanfd_get_berr_counter_corrected(priv, &bec);
diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
index eee20839d96f..0d155eb1b9e9 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
@@ -248,7 +248,11 @@ static int es58x_devlink_info_get(struct devlink *devlink,
return ret;
}
- return devlink_info_serial_number_put(req, es58x_dev->udev->serial);
+ if (es58x_dev->udev->serial)
+ ret = devlink_info_serial_number_put(req,
+ es58x_dev->udev->serial);
+
+ return ret;
}
const struct devlink_ops es58x_dl_ops = {
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 1c83af805209..5883eb93efb1 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2591,7 +2591,8 @@ mt7531_setup_common(struct dsa_switch *ds)
if (ret < 0)
return ret;
- return 0;
+ /* Setup VLAN ID 0 for VLAN-unaware bridges */
+ return mt7530_setup_vlan0(priv);
}
static int
@@ -2687,11 +2688,6 @@ mt7531_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Setup VLAN ID 0 for VLAN-unaware bridges */
- ret = mt7530_setup_vlan0(priv);
- if (ret)
- return ret;
-
ds->assisted_learning_on_cpu_port = true;
ds->mtu_enforcement_ingress = true;
diff --git a/drivers/net/dsa/realtek/Kconfig b/drivers/net/dsa/realtek/Kconfig
index 6989972eebc3..d6eb6713e5f6 100644
--- a/drivers/net/dsa/realtek/Kconfig
+++ b/drivers/net/dsa/realtek/Kconfig
@@ -43,4 +43,10 @@ config NET_DSA_REALTEK_RTL8366RB
help
Select to enable support for Realtek RTL8366RB.
+config NET_DSA_REALTEK_RTL8366RB_LEDS
+ bool
+ depends on (LEDS_CLASS=y || LEDS_CLASS=NET_DSA_REALTEK_RTL8366RB)
+ depends on NET_DSA_REALTEK_RTL8366RB
+ default NET_DSA_REALTEK_RTL8366RB
+
endif
diff --git a/drivers/net/dsa/realtek/Makefile b/drivers/net/dsa/realtek/Makefile
index 35491dc20d6d..17367bcba496 100644
--- a/drivers/net/dsa/realtek/Makefile
+++ b/drivers/net/dsa/realtek/Makefile
@@ -12,4 +12,7 @@ endif
obj-$(CONFIG_NET_DSA_REALTEK_RTL8366RB) += rtl8366.o
rtl8366-objs := rtl8366-core.o rtl8366rb.o
+ifdef CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS
+rtl8366-objs += rtl8366rb-leds.o
+endif
obj-$(CONFIG_NET_DSA_REALTEK_RTL8365MB) += rtl8365mb.o
diff --git a/drivers/net/dsa/realtek/rtl8366rb-leds.c b/drivers/net/dsa/realtek/rtl8366rb-leds.c
new file mode 100644
index 000000000000..99c890681ae6
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb-leds.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+#include "rtl83xx.h"
+#include "rtl8366rb.h"
+
+static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
+{
+ switch (led_group) {
+ case 0:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 1:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 2:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 3:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ default:
+ return 0;
+ }
+}
+
+static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ &val);
+ if (ret) {
+ dev_err(priv->dev, "error reading LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
+}
+
+static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ rtl8366rb_led_group_port_mask(led_group,
+ port_num),
+ enable ? 0xffff : 0);
+ if (ret) {
+ dev_err(priv->dev, "error updating LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ /* Change the LED group to manual controlled LEDs if required */
+ ret = rb8366rb_set_ledgroup_mode(priv, led_group,
+ RTL8366RB_LEDGROUP_FORCE);
+
+ if (ret) {
+ dev_err(priv->dev, "error updating LED GROUP group %d\n",
+ led_group);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
+ cdev);
+
+ return rb8366rb_set_port_led(led, brightness == LED_ON);
+}
+
+static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
+ struct fwnode_handle *led_fwnode)
+{
+ struct rtl8366rb *rb = priv->chip_data;
+ struct led_init_data init_data = { };
+ enum led_default_state state;
+ struct rtl8366rb_led *led;
+ u32 led_group;
+ int ret;
+
+ ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
+ if (ret)
+ return ret;
+
+ if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
+ dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+ led_group, dp->index);
+ return -EINVAL;
+ }
+
+ led = &rb->leds[dp->index][led_group];
+ led->port_num = dp->index;
+ led->led_group = led_group;
+ led->priv = priv;
+
+ state = led_init_default_state_get(led_fwnode);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ led->cdev.brightness = 1;
+ rb8366rb_set_port_led(led, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ led->cdev.brightness =
+ rb8366rb_get_port_led(led);
+ break;
+ case LEDS_DEFSTATE_OFF:
+ default:
+ led->cdev.brightness = 0;
+ rb8366rb_set_port_led(led, 0);
+ }
+
+ led->cdev.max_brightness = 1;
+ led->cdev.brightness_set_blocking =
+ rtl8366rb_cled_brightness_set_blocking;
+ init_data.fwnode = led_fwnode;
+ init_data.devname_mandatory = true;
+
+ init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
+ dp->ds->index, dp->index, led_group);
+ if (!init_data.devicename)
+ return -ENOMEM;
+
+ ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
+ if (ret) {
+ dev_warn(priv->dev, "Failed to init LED %d for port %d",
+ led_group, dp->index);
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ struct dsa_switch *ds = &priv->ds;
+ struct device_node *leds_np;
+ struct dsa_port *dp;
+ int ret = 0;
+
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->dn)
+ continue;
+
+ leds_np = of_get_child_by_name(dp->dn, "leds");
+ if (!leds_np) {
+ dev_dbg(priv->dev, "No leds defined for port %d",
+ dp->index);
+ continue;
+ }
+
+ for_each_child_of_node_scoped(leds_np, led_np) {
+ ret = rtl8366rb_setup_led(priv, dp,
+ of_fwnode_handle(led_np));
+ if (ret)
+ break;
+ }
+
+ of_node_put(leds_np);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index 4c4a95d4380c..f54771cab56d 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -27,11 +27,7 @@
#include "realtek-smi.h"
#include "realtek-mdio.h"
#include "rtl83xx.h"
-
-#define RTL8366RB_PORT_NUM_CPU 5
-#define RTL8366RB_NUM_PORTS 6
-#define RTL8366RB_PHY_NO_MAX 4
-#define RTL8366RB_PHY_ADDR_MAX 31
+#include "rtl8366rb.h"
/* Switch Global Configuration register */
#define RTL8366RB_SGCR 0x0000
@@ -176,39 +172,6 @@
*/
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
-/* LED control registers */
-/* The LED blink rate is global; it is used by all triggers in all groups. */
-#define RTL8366RB_LED_BLINKRATE_REG 0x0430
-#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
-#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
-#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
-#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
-#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
-#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
-#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
-
-/* LED trigger event for each group */
-#define RTL8366RB_LED_CTRL_REG 0x0431
-#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
- (4 * (led_group))
-#define RTL8366RB_LED_CTRL_MASK(led_group) \
- (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
-
-/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
- * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
- * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
- */
-#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
-#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
-#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
- ((led_group) <= 1 ? \
- RTL8366RB_LED_0_1_CTRL_REG : \
- RTL8366RB_LED_2_3_CTRL_REG)
-#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
-#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
-#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
-#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
-
#define RTL8366RB_MIB_COUNT 33
#define RTL8366RB_GLOBAL_MIB_COUNT 1
#define RTL8366RB_MIB_COUNTER_PORT_OFFSET 0x0050
@@ -244,7 +207,6 @@
#define RTL8366RB_PORT_STATUS_AN_MASK 0x0080
#define RTL8366RB_NUM_VLANS 16
-#define RTL8366RB_NUM_LEDGROUPS 4
#define RTL8366RB_NUM_VIDS 4096
#define RTL8366RB_PRIORITYMAX 7
#define RTL8366RB_NUM_FIDS 8
@@ -351,46 +313,6 @@
#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
-enum rtl8366_ledgroup_mode {
- RTL8366RB_LEDGROUP_OFF = 0x0,
- RTL8366RB_LEDGROUP_DUP_COL = 0x1,
- RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
- RTL8366RB_LEDGROUP_SPD1000 = 0x3,
- RTL8366RB_LEDGROUP_SPD100 = 0x4,
- RTL8366RB_LEDGROUP_SPD10 = 0x5,
- RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
- RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
- RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
- RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
- RTL8366RB_LEDGROUP_FIBER = 0xa,
- RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
- RTL8366RB_LEDGROUP_LINK_RX = 0xc,
- RTL8366RB_LEDGROUP_LINK_TX = 0xd,
- RTL8366RB_LEDGROUP_MASTER = 0xe,
- RTL8366RB_LEDGROUP_FORCE = 0xf,
-
- __RTL8366RB_LEDGROUP_MODE_MAX
-};
-
-struct rtl8366rb_led {
- u8 port_num;
- u8 led_group;
- struct realtek_priv *priv;
- struct led_classdev cdev;
-};
-
-/**
- * struct rtl8366rb - RTL8366RB-specific data
- * @max_mtu: per-port max MTU setting
- * @pvid_enabled: if PVID is set for respective port
- * @leds: per-port and per-ledgroup led info
- */
-struct rtl8366rb {
- unsigned int max_mtu[RTL8366RB_NUM_PORTS];
- bool pvid_enabled[RTL8366RB_NUM_PORTS];
- struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
-};
-
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
{ 0, 0, 4, "IfInOctets" },
{ 0, 4, 4, "EtherStatsOctets" },
@@ -831,9 +753,10 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
return 0;
}
-static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
- u8 led_group,
- enum rtl8366_ledgroup_mode mode)
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode)
{
int ret;
u32 val;
@@ -850,144 +773,7 @@ static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
return 0;
}
-static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
-{
- switch (led_group) {
- case 0:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 1:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 2:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- case 3:
- return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
- default:
- return 0;
- }
-}
-
-static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
-{
- struct realtek_priv *priv = led->priv;
- u8 led_group = led->led_group;
- u8 port_num = led->port_num;
- int ret;
- u32 val;
-
- ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
- &val);
- if (ret) {
- dev_err(priv->dev, "error reading LED on port %d group %d\n",
- led_group, port_num);
- return ret;
- }
-
- return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
-}
-
-static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
-{
- struct realtek_priv *priv = led->priv;
- u8 led_group = led->led_group;
- u8 port_num = led->port_num;
- int ret;
-
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_X_X_CTRL_REG(led_group),
- rtl8366rb_led_group_port_mask(led_group,
- port_num),
- enable ? 0xffff : 0);
- if (ret) {
- dev_err(priv->dev, "error updating LED on port %d group %d\n",
- led_group, port_num);
- return ret;
- }
-
- /* Change the LED group to manual controlled LEDs if required */
- ret = rb8366rb_set_ledgroup_mode(priv, led_group,
- RTL8366RB_LEDGROUP_FORCE);
-
- if (ret) {
- dev_err(priv->dev, "error updating LED GROUP group %d\n",
- led_group);
- return ret;
- }
-
- return 0;
-}
-
-static int
-rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
- enum led_brightness brightness)
-{
- struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
- cdev);
-
- return rb8366rb_set_port_led(led, brightness == LED_ON);
-}
-
-static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
- struct fwnode_handle *led_fwnode)
-{
- struct rtl8366rb *rb = priv->chip_data;
- struct led_init_data init_data = { };
- enum led_default_state state;
- struct rtl8366rb_led *led;
- u32 led_group;
- int ret;
-
- ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
- if (ret)
- return ret;
-
- if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
- dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
- led_group, dp->index);
- return -EINVAL;
- }
-
- led = &rb->leds[dp->index][led_group];
- led->port_num = dp->index;
- led->led_group = led_group;
- led->priv = priv;
-
- state = led_init_default_state_get(led_fwnode);
- switch (state) {
- case LEDS_DEFSTATE_ON:
- led->cdev.brightness = 1;
- rb8366rb_set_port_led(led, 1);
- break;
- case LEDS_DEFSTATE_KEEP:
- led->cdev.brightness =
- rb8366rb_get_port_led(led);
- break;
- case LEDS_DEFSTATE_OFF:
- default:
- led->cdev.brightness = 0;
- rb8366rb_set_port_led(led, 0);
- }
-
- led->cdev.max_brightness = 1;
- led->cdev.brightness_set_blocking =
- rtl8366rb_cled_brightness_set_blocking;
- init_data.fwnode = led_fwnode;
- init_data.devname_mandatory = true;
-
- init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
- dp->ds->index, dp->index, led_group);
- if (!init_data.devicename)
- return -ENOMEM;
-
- ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
- if (ret) {
- dev_warn(priv->dev, "Failed to init LED %d for port %d",
- led_group, dp->index);
- return ret;
- }
-
- return 0;
-}
-
+/* This code is used also with LEDs disabled */
static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
{
int ret = 0;
@@ -1008,38 +794,6 @@ static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
return ret;
}
-static int rtl8366rb_setup_leds(struct realtek_priv *priv)
-{
- struct dsa_switch *ds = &priv->ds;
- struct device_node *leds_np;
- struct dsa_port *dp;
- int ret = 0;
-
- dsa_switch_for_each_port(dp, ds) {
- if (!dp->dn)
- continue;
-
- leds_np = of_get_child_by_name(dp->dn, "leds");
- if (!leds_np) {
- dev_dbg(priv->dev, "No leds defined for port %d",
- dp->index);
- continue;
- }
-
- for_each_child_of_node_scoped(leds_np, led_np) {
- ret = rtl8366rb_setup_led(priv, dp,
- of_fwnode_handle(led_np));
- if (ret)
- break;
- }
-
- of_node_put(leds_np);
- if (ret)
- return ret;
- }
- return 0;
-}
-
static int rtl8366rb_setup(struct dsa_switch *ds)
{
struct realtek_priv *priv = ds->priv;
diff --git a/drivers/net/dsa/realtek/rtl8366rb.h b/drivers/net/dsa/realtek/rtl8366rb.h
new file mode 100644
index 000000000000..685ff3275faa
--- /dev/null
+++ b/drivers/net/dsa/realtek/rtl8366rb.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _RTL8366RB_H
+#define _RTL8366RB_H
+
+#include "realtek.h"
+
+#define RTL8366RB_PORT_NUM_CPU 5
+#define RTL8366RB_NUM_PORTS 6
+#define RTL8366RB_PHY_NO_MAX 4
+#define RTL8366RB_NUM_LEDGROUPS 4
+#define RTL8366RB_PHY_ADDR_MAX 31
+
+/* LED control registers */
+/* The LED blink rate is global; it is used by all triggers in all groups. */
+#define RTL8366RB_LED_BLINKRATE_REG 0x0430
+#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
+#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
+#define RTL8366RB_LED_BLINKRATE_56MS 0x0001
+#define RTL8366RB_LED_BLINKRATE_84MS 0x0002
+#define RTL8366RB_LED_BLINKRATE_111MS 0x0003
+#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
+#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+
+/* LED trigger event for each group */
+#define RTL8366RB_LED_CTRL_REG 0x0431
+#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
+ (4 * (led_group))
+#define RTL8366RB_LED_CTRL_MASK(led_group) \
+ (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+
+/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
+ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
+ * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
+ */
+#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
+#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
+#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
+ ((led_group) <= 1 ? \
+ RTL8366RB_LED_0_1_CTRL_REG : \
+ RTL8366RB_LED_2_3_CTRL_REG)
+#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
+#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
+
+enum rtl8366_ledgroup_mode {
+ RTL8366RB_LEDGROUP_OFF = 0x0,
+ RTL8366RB_LEDGROUP_DUP_COL = 0x1,
+ RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
+ RTL8366RB_LEDGROUP_SPD1000 = 0x3,
+ RTL8366RB_LEDGROUP_SPD100 = 0x4,
+ RTL8366RB_LEDGROUP_SPD10 = 0x5,
+ RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
+ RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
+ RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
+ RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
+ RTL8366RB_LEDGROUP_FIBER = 0xa,
+ RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
+ RTL8366RB_LEDGROUP_LINK_RX = 0xc,
+ RTL8366RB_LEDGROUP_LINK_TX = 0xd,
+ RTL8366RB_LEDGROUP_MASTER = 0xe,
+ RTL8366RB_LEDGROUP_FORCE = 0xf,
+
+ __RTL8366RB_LEDGROUP_MODE_MAX
+};
+
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+
+struct rtl8366rb_led {
+ u8 port_num;
+ u8 led_group;
+ struct realtek_priv *priv;
+ struct led_classdev cdev;
+};
+
+int rtl8366rb_setup_leds(struct realtek_priv *priv);
+
+#else
+
+static inline int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ return 0;
+}
+
+#endif /* IS_ENABLED(CONFIG_LEDS_CLASS) */
+
+/**
+ * struct rtl8366rb - RTL8366RB-specific data
+ * @max_mtu: per-port max MTU setting
+ * @pvid_enabled: if PVID is set for respective port
+ * @leds: per-port and per-ledgroup led info
+ */
+struct rtl8366rb {
+ unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+ bool pvid_enabled[RTL8366RB_NUM_PORTS];
+#if IS_ENABLED(CONFIG_NET_DSA_REALTEK_RTL8366RB_LEDS)
+ struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
+#endif
+};
+
+/* This code is used also with LEDs disabled */
+int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode);
+
+#endif /* _RTL8366RB_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index fe0e3e2a8117..71e50fc65c14 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -1441,7 +1441,9 @@ void aq_nic_deinit(struct aq_nic_s *self, bool link_down)
aq_ptp_ring_free(self);
aq_ptp_free(self);
- if (likely(self->aq_fw_ops->deinit) && link_down) {
+ /* May be invoked during hot unplug. */
+ if (pci_device_is_present(self->pdev) &&
+ likely(self->aq_fw_ops->deinit) && link_down) {
mutex_lock(&self->fwreq_mutex);
self->aq_fw_ops->deinit(self->aq_hw);
mutex_unlock(&self->fwreq_mutex);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 0715ea5bf13e..3b082114f2e5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -41,9 +41,12 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
+ u32 phy_wolopts = 0;
- if (dev->phydev)
+ if (dev->phydev) {
phy_ethtool_get_wol(dev->phydev, wol);
+ phy_wolopts = wol->wolopts;
+ }
/* MAC is not wake-up capable, return what the PHY does */
if (!device_can_wakeup(kdev))
@@ -51,9 +54,14 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Overlay MAC capabilities with that of the PHY queried before */
wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
- wol->wolopts = priv->wolopts;
- memset(wol->sopass, 0, sizeof(wol->sopass));
+ wol->wolopts |= priv->wolopts;
+ /* Return the PHY configured magic password */
+ if (phy_wolopts & WAKE_MAGICSECURE)
+ return;
+
+ /* Otherwise the MAC one */
+ memset(wol->sopass, 0, sizeof(wol->sopass));
if (wol->wolopts & WAKE_MAGICSECURE)
memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
@@ -70,7 +78,7 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
/* Try Wake-on-LAN from the PHY first */
if (dev->phydev) {
ret = phy_ethtool_set_wol(dev->phydev, wol);
- if (ret != -EOPNOTSUPP)
+ if (ret != -EOPNOTSUPP && wol->wolopts)
return ret;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 1c94bf1db718..d9d675f1ebfe 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -55,6 +55,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/crc32poly.h>
+#include <linux/dmi.h>
#include <net/checksum.h>
#include <net/gso.h>
@@ -18212,6 +18213,50 @@ unlock:
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+/* Systems where ACPI _PTS (Prepare To Sleep) S5 will result in a fatal
+ * PCIe AER event on the tg3 device if the tg3 device is not, or cannot
+ * be, powered down.
+ */
+static const struct dmi_system_id tg3_restart_aer_quirk_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R440"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R540"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R640"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R650"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R740"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R750"),
+ },
+ },
+ {}
+};
+
static void tg3_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -18228,6 +18273,19 @@ static void tg3_shutdown(struct pci_dev *pdev)
if (system_state == SYSTEM_POWER_OFF)
tg3_power_down(tp);
+ else if (system_state == SYSTEM_RESTART &&
+ dmi_first_match(tg3_restart_aer_quirk_table) &&
+ pdev->current_state != PCI_D3cold &&
+ pdev->current_state != PCI_UNKNOWN) {
+ /* Disable PCIe AER on the tg3 to avoid a fatal
+ * error during this system restart.
+ */
+ pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_CERE |
+ PCI_EXP_DEVCTL_NFERE |
+ PCI_EXP_DEVCTL_FERE |
+ PCI_EXP_DEVCTL_URRE);
+ }
rtnl_unlock();
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 5740c98d8c9f..2847278d9cd4 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -1279,6 +1279,8 @@ struct macb {
struct clk *rx_clk;
struct clk *tsu_clk;
struct net_device *dev;
+ /* Protects hw_stats and ethtool_stats */
+ spinlock_t stats_lock;
union {
struct macb_stats macb;
struct gem_stats gem;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 48496209fb16..c1f57d96e63f 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1978,10 +1978,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(ISR_ROVR)) {
/* We missed at least one packet */
+ spin_lock(&bp->stats_lock);
if (macb_is_gem(bp))
bp->hw_stats.gem.rx_overruns++;
else
bp->hw_stats.macb.rx_overruns++;
+ spin_unlock(&bp->stats_lock);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
@@ -3102,6 +3104,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
if (!netif_running(bp->dev))
return nstat;
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
@@ -3131,6 +3134,7 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
nstat->tx_fifo_errors = hwstat->tx_underrun;
+ spin_unlock_irq(&bp->stats_lock);
return nstat;
}
@@ -3138,12 +3142,13 @@ static struct net_device_stats *gem_get_stats(struct macb *bp)
static void gem_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
- struct macb *bp;
+ struct macb *bp = netdev_priv(dev);
- bp = netdev_priv(dev);
+ spin_lock_irq(&bp->stats_lock);
gem_update_stats(bp);
memcpy(data, &bp->ethtool_stats, sizeof(u64)
* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
+ spin_unlock_irq(&bp->stats_lock);
}
static int gem_get_sset_count(struct net_device *dev, int sset)
@@ -3193,6 +3198,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
return gem_get_stats(bp);
/* read stats from hardware */
+ spin_lock_irq(&bp->stats_lock);
macb_update_stats(bp);
/* Convert HW stats into netdevice stats */
@@ -3226,6 +3232,7 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
nstat->tx_fifo_errors = hwstat->tx_underruns;
/* Don't know about heartbeat or window errors... */
+ spin_unlock_irq(&bp->stats_lock);
return nstat;
}
@@ -5097,6 +5104,7 @@ static int macb_probe(struct platform_device *pdev)
}
}
spin_lock_init(&bp->lock);
+ spin_lock_init(&bp->stats_lock);
/* setup capabilities */
macb_configure_caps(bp, macb_config);
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index e48b861e4ce1..270ff9aab335 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -562,7 +562,7 @@ struct be_adapter {
struct be_dma_mem mbox_mem_alloced;
struct be_mcc_obj mcc_obj;
- struct mutex mcc_lock; /* For serializing mcc cmds to BE card */
+ spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
u16 cfg_num_rx_irqs; /* configured via set-channels */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 61adcebeef01..51b8377edd1d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -575,7 +575,7 @@ int be_process_mcc(struct be_adapter *adapter)
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter)
{
-#define mcc_timeout 12000 /* 12s timeout */
+#define mcc_timeout 120000 /* 12s timeout */
int i, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
@@ -589,7 +589,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
if (atomic_read(&mcc_obj->q.used) == 0)
break;
- usleep_range(500, 1000);
+ udelay(100);
}
if (i == mcc_timeout) {
dev_err(&adapter->pdev->dev, "FW not responding\n");
@@ -866,7 +866,7 @@ static bool use_mcc(struct be_adapter *adapter)
static int be_cmd_lock(struct be_adapter *adapter)
{
if (use_mcc(adapter)) {
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
return 0;
} else {
return mutex_lock_interruptible(&adapter->mbox_lock);
@@ -877,7 +877,7 @@ static int be_cmd_lock(struct be_adapter *adapter)
static void be_cmd_unlock(struct be_adapter *adapter)
{
if (use_mcc(adapter))
- return mutex_unlock(&adapter->mcc_lock);
+ return spin_unlock_bh(&adapter->mcc_lock);
else
return mutex_unlock(&adapter->mbox_lock);
}
@@ -1047,7 +1047,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1076,7 +1076,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1088,7 +1088,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
struct be_cmd_req_pmac_add *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1113,7 +1113,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
@@ -1131,7 +1131,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
if (pmac_id == -1)
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1151,7 +1151,7 @@ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1414,7 +1414,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1444,7 +1444,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1508,7 +1508,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
struct be_cmd_req_q_destroy *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1525,7 +1525,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
q->created = false;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1593,7 +1593,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
struct be_cmd_req_hdr *hdr;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1621,7 +1621,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1637,7 +1637,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
CMD_SUBSYSTEM_ETH))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1660,7 +1660,7 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
adapter->stats_cmd_sent = true;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1697,7 +1697,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
struct be_cmd_req_link_status *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
if (link_status)
*link_status = LINK_DOWN;
@@ -1736,7 +1736,7 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1747,7 +1747,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
struct be_cmd_req_get_cntl_addnl_attribs *req;
int status = 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1762,7 +1762,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1811,7 +1811,7 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!get_fat_cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
while (total_size) {
buf_size = min(total_size, (u32)60 * 1024);
@@ -1849,9 +1849,9 @@ int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
log_offset += buf_size;
}
err:
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
get_fat_cmd.va, get_fat_cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
return status;
}
@@ -1862,7 +1862,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
struct be_cmd_req_get_fw_version *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1885,7 +1885,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
sizeof(adapter->fw_on_flash));
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1899,7 +1899,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
struct be_cmd_req_modify_eq_delay *req;
int status = 0, i;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1922,7 +1922,7 @@ static int __be_cmd_modify_eqd(struct be_adapter *adapter,
status = be_mcc_notify(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1949,7 +1949,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
struct be_cmd_req_vlan_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -1971,7 +1971,7 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1982,7 +1982,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
struct be_cmd_req_rx_filter *req = mem->va;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2015,7 +2015,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2046,7 +2046,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2066,7 +2066,7 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
return -EOPNOTSUPP;
@@ -2085,7 +2085,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2108,7 +2108,7 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2189,7 +2189,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2214,7 +2214,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2226,7 +2226,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
struct be_cmd_req_enable_disable_beacon *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2247,7 +2247,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2258,7 +2258,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
struct be_cmd_req_get_beacon_state *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2282,7 +2282,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2306,7 +2306,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2328,7 +2328,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
memcpy(data, resp->page_data + off, len);
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
return status;
}
@@ -2345,7 +2345,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
void *ctxt = NULL;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2387,7 +2387,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
@@ -2406,7 +2406,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2460,7 +2460,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2478,7 +2478,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2491,7 +2491,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
struct lancer_cmd_resp_read_object *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2525,7 +2525,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
}
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2537,7 +2537,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_cmd_write_flashrom *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
adapter->flash_status = 0;
wrb = wrb_from_mccq(adapter);
@@ -2562,7 +2562,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(40000)))
@@ -2573,7 +2573,7 @@ static int be_cmd_write_flashrom(struct be_adapter *adapter,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2584,7 +2584,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
struct be_mcc_wrb *wrb;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -2611,7 +2611,7 @@ static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
memcpy(flashed_crc, req->crc, 4);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3217,7 +3217,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_cmd_req_acpi_wol_magic_config *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3234,7 +3234,7 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3249,7 +3249,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3272,7 +3272,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
if (status)
goto err_unlock;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
@@ -3281,7 +3281,7 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
return status;
err_unlock:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3298,7 +3298,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3324,7 +3324,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
if (status)
goto err;
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
wait_for_completion(&adapter->et_cmd_compl);
resp = embedded_payload(wrb);
@@ -3332,7 +3332,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
return status;
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3348,7 +3348,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
CMD_SUBSYSTEM_LOWLEVEL))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3382,7 +3382,7 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3393,7 +3393,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
struct be_cmd_req_seeprom_read *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3409,7 +3409,7 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3424,7 +3424,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3469,7 +3469,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
}
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3479,7 +3479,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
struct be_cmd_req_set_qos *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3499,7 +3499,7 @@ static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3611,7 +3611,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
struct be_cmd_req_get_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3643,7 +3643,7 @@ int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3655,7 +3655,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
struct be_cmd_req_set_fn_privileges *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3675,7 +3675,7 @@ int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3707,7 +3707,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
return -ENOMEM;
}
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3771,7 +3771,7 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
}
out:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
@@ -3831,7 +3831,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
if (!cmd.va)
return -ENOMEM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3853,7 +3853,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
err:
dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3889,7 +3889,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
CMD_SUBSYSTEM_COMMON))
return -EPERM;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3930,7 +3930,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -3944,7 +3944,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
int status;
u16 vid;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -3991,7 +3991,7 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4190,7 +4190,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
struct be_cmd_req_set_ext_fat_caps *req;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4206,7 +4206,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4684,7 +4684,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
if (iface == 0xFFFFFFFF)
return -1;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4701,7 +4701,7 @@ int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4735,7 +4735,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
struct be_cmd_resp_get_iface_list *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4756,7 +4756,7 @@ int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
}
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4850,7 +4850,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
if (BEx_chip(adapter))
return 0;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4868,7 +4868,7 @@ int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
req->enable = 1;
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -4941,7 +4941,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
u32 link_config = 0;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -4969,7 +4969,7 @@ __be_cmd_set_logical_link_config(struct be_adapter *adapter,
status = be_mcc_notify_wait(adapter);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5000,8 +5000,7 @@ int be_cmd_set_features(struct be_adapter *adapter)
struct be_mcc_wrb *wrb;
int status;
- if (mutex_lock_interruptible(&adapter->mcc_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5039,7 +5038,7 @@ err:
dev_info(&adapter->pdev->dev,
"Adapter does not support HW error recovery\n");
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -5053,7 +5052,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
struct be_cmd_resp_hdr *resp;
int status;
- mutex_lock(&adapter->mcc_lock);
+ spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
if (!wrb) {
@@ -5076,7 +5075,7 @@ int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
err:
- mutex_unlock(&adapter->mcc_lock);
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
EXPORT_SYMBOL(be_roce_mcc_cmd);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 875fe379eea2..3d2e21592119 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5667,8 +5667,8 @@ static int be_drv_init(struct be_adapter *adapter)
}
mutex_init(&adapter->mbox_lock);
- mutex_init(&adapter->mcc_lock);
mutex_init(&adapter->rx_filter_lock);
+ spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
init_completion(&adapter->et_cmd_compl);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 6a6fc819dfde..2106861463e4 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -167,6 +167,24 @@ static bool enetc_skb_is_tcp(struct sk_buff *skb)
return skb->csum_offset == offsetof(struct tcphdr, check);
}
+/**
+ * enetc_unwind_tx_frame() - Unwind the DMA mappings of a multi-buffer Tx frame
+ * @tx_ring: Pointer to the Tx ring on which the buffer descriptors are located
+ * @count: Number of Tx buffer descriptors which need to be unmapped
+ * @i: Index of the last successfully mapped Tx buffer descriptor
+ */
+static void enetc_unwind_tx_frame(struct enetc_bdr *tx_ring, int count, int i)
+{
+ while (count--) {
+ struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i];
+
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ }
+}
+
static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
{
bool do_vlan, do_onestep_tstamp = false, do_twostep_tstamp = false;
@@ -279,9 +297,11 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
}
if (do_onestep_tstamp) {
- u32 lo, hi, val;
- u64 sec, nsec;
+ __be32 new_sec_l, new_nsec;
+ u32 lo, hi, nsec, val;
+ __be16 new_sec_h;
u8 *data;
+ u64 sec;
lo = enetc_rd_hot(hw, ENETC_SICTR0);
hi = enetc_rd_hot(hw, ENETC_SICTR1);
@@ -295,13 +315,38 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
/* Update originTimestamp field of Sync packet
* - 48 bits seconds field
* - 32 bits nanseconds field
+ *
+ * In addition, the UDP checksum needs to be updated
+ * by software after updating originTimestamp field,
+ * otherwise the hardware will calculate the wrong
+ * checksum when updating the correction field and
+ * update it to the packet.
*/
data = skb_mac_header(skb);
- *(__be16 *)(data + offset2) =
- htons((sec >> 32) & 0xffff);
- *(__be32 *)(data + offset2 + 2) =
- htonl(sec & 0xffffffff);
- *(__be32 *)(data + offset2 + 6) = htonl(nsec);
+ new_sec_h = htons((sec >> 32) & 0xffff);
+ new_sec_l = htonl(sec & 0xffffffff);
+ new_nsec = htonl(nsec);
+ if (udp) {
+ struct udphdr *uh = udp_hdr(skb);
+ __be32 old_sec_l, old_nsec;
+ __be16 old_sec_h;
+
+ old_sec_h = *(__be16 *)(data + offset2);
+ inet_proto_csum_replace2(&uh->check, skb, old_sec_h,
+ new_sec_h, false);
+
+ old_sec_l = *(__be32 *)(data + offset2 + 2);
+ inet_proto_csum_replace4(&uh->check, skb, old_sec_l,
+ new_sec_l, false);
+
+ old_nsec = *(__be32 *)(data + offset2 + 6);
+ inet_proto_csum_replace4(&uh->check, skb, old_nsec,
+ new_nsec, false);
+ }
+
+ *(__be16 *)(data + offset2) = new_sec_h;
+ *(__be32 *)(data + offset2 + 2) = new_sec_l;
+ *(__be32 *)(data + offset2 + 6) = new_nsec;
/* Configure single-step register */
val = ENETC_PM0_SINGLE_STEP_EN;
@@ -372,25 +417,20 @@ static int enetc_map_tx_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
dma_err:
dev_err(tx_ring->dev, "DMA map error");
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
-static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
- struct enetc_tx_swbd *tx_swbd,
- union enetc_tx_bd *txbd, int *i, int hdr_len,
- int data_len)
+static int enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
{
union enetc_tx_bd txbd_tmp;
u8 flags = 0, e_flags = 0;
dma_addr_t addr;
+ int count = 1;
enetc_clear_tx_bd(&txbd_tmp);
addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
@@ -433,7 +473,10 @@ static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
/* Write the BD */
txbd_tmp.ext.e_flags = e_flags;
*txbd = txbd_tmp;
+ count++;
}
+
+ return count;
}
static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
@@ -790,9 +833,9 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
/* compute the csum over the L4 header */
csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
- enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ count += enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd,
+ &i, hdr_len, data_len);
bd_data_num = 0;
- count++;
while (data_len > 0) {
int size;
@@ -816,8 +859,13 @@ static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb
err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
tso.data, size,
size == data_len);
- if (err)
+ if (err) {
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+
goto err_map_data;
+ }
data_len -= size;
count++;
@@ -846,13 +894,7 @@ err_map_data:
dev_err(tx_ring->dev, "DMA map error");
err_chained_bd:
- do {
- tx_swbd = &tx_ring->tx_swbd[i];
- enetc_free_tx_frame(tx_ring, tx_swbd);
- if (i == 0)
- i = tx_ring->bd_count;
- i--;
- } while (count--);
+ enetc_unwind_tx_frame(tx_ring, count, i);
return 0;
}
@@ -1901,7 +1943,7 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
enetc_xdp_drop(rx_ring, orig_i, i);
tx_ring->stats.xdp_tx_drops++;
} else {
- tx_ring->stats.xdp_tx += xdp_tx_bd_cnt;
+ tx_ring->stats.xdp_tx++;
rx_ring->xdp.xdp_tx_in_flight += xdp_tx_bd_cnt;
xdp_tx_frm_cnt++;
/* The XDP_TX enqueue was successful, so we
@@ -3228,6 +3270,9 @@ static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr)
new_offloads |= ENETC_F_TX_TSTAMP;
break;
case HWTSTAMP_TX_ONESTEP_SYNC:
+ if (!enetc_si_is_pf(priv->si))
+ return -EOPNOTSUPP;
+
new_offloads &= ~ENETC_F_TX_TSTAMP_MASK;
new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP;
break;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
index fc41078c4f5d..73ac8c6afb3a 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc4_pf.c
@@ -672,7 +672,6 @@ err_link_init:
err_alloc_msix:
err_config_si:
err_clk_get:
- mutex_destroy(&priv->mm_lock);
free_netdev(ndev);
return err;
@@ -684,6 +683,7 @@ static void enetc4_pf_netdev_destroy(struct enetc_si *si)
struct net_device *ndev = si->ndev;
unregister_netdev(ndev);
+ enetc4_link_deinit(priv);
enetc_free_msix(priv);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index bf34b5bb1e35..ece3ae28ba82 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -832,6 +832,7 @@ static int enetc_set_coalesce(struct net_device *ndev,
static int enetc_get_ts_info(struct net_device *ndev,
struct kernel_ethtool_ts_info *info)
{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
int *phc_idx;
phc_idx = symbol_get(enetc_phc_index);
@@ -852,8 +853,10 @@ static int enetc_get_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_TX_SOFTWARE;
info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON) |
- (1 << HWTSTAMP_TX_ONESTEP_SYNC);
+ (1 << HWTSTAMP_TX_ON);
+
+ if (enetc_si_is_pf(priv->si))
+ info->tx_types |= (1 << HWTSTAMP_TX_ONESTEP_SYNC);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_ALL);
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 8167cc5fb0df..78d2a19593d1 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -1116,6 +1116,16 @@ static inline u32 gve_xdp_tx_start_queue_id(struct gve_priv *priv)
return gve_xdp_tx_queue_id(priv, 0);
}
+static inline bool gve_supports_xdp_xmit(struct gve_priv *priv)
+{
+ switch (priv->queue_format) {
+ case GVE_GQI_QPL_FORMAT:
+ return true;
+ default:
+ return false;
+ }
+}
+
/* gqi napi handler defined in gve_main.c */
int gve_napi_poll(struct napi_struct *napi, int budget);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 533e659b15b3..92237fb0b60c 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1903,6 +1903,8 @@ static void gve_turndown(struct gve_priv *priv)
/* Stop tx queues */
netif_tx_disable(priv->dev);
+ xdp_features_clear_redirect_target(priv->dev);
+
gve_clear_napi_enabled(priv);
gve_clear_report_stats(priv);
@@ -1972,6 +1974,9 @@ static void gve_turnup(struct gve_priv *priv)
napi_schedule(&block->napi);
}
+ if (priv->num_xdp_queues && gve_supports_xdp_xmit(priv))
+ xdp_features_set_redirect_target(priv->dev, false);
+
gve_set_napi_enabled(priv);
}
@@ -2246,7 +2251,6 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
if (priv->queue_format == GVE_GQI_QPL_FORMAT) {
xdp_features = NETDEV_XDP_ACT_BASIC;
xdp_features |= NETDEV_XDP_ACT_REDIRECT;
- xdp_features |= NETDEV_XDP_ACT_NDO_XMIT;
xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
} else {
xdp_features = 0;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8ac0047f1ada..f0674a443567 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -109,10 +109,12 @@ static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
+ struct gve_rx_ring *rx = &priv->rx[idx];
if (!gve_rx_was_added_to_block(priv, idx))
return;
+ page_pool_disable_direct_recycling(rx->dqo.page_pool);
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
gve_rx_reset_ring_dqo(priv, idx);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index bab16c2191b2..181af419b878 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -483,7 +483,7 @@ int hclge_ptp_init(struct hclge_dev *hdev)
ret = hclge_ptp_get_cycle(hdev);
if (ret)
- return ret;
+ goto out;
}
ret = hclge_ptp_int_en(hdev, true);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index e95ae0d39948..0676fc547b6f 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2408,6 +2408,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dma_addr_t data_dma_addr;
struct netdev_queue *txq;
unsigned long lpar_rc;
+ unsigned int skblen;
union sub_crq tx_crq;
unsigned int offset;
bool use_scrq_send_direct = false;
@@ -2522,6 +2523,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_buff->skb = skb;
tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
+ skblen = skb->len;
memset(&tx_crq, 0, sizeof(tx_crq));
tx_crq.v1.first = IBMVNIC_CRQ_CMD;
@@ -2614,7 +2616,7 @@ early_exit:
netif_stop_subqueue(netdev, queue_num);
}
- tx_bytes += skb->len;
+ tx_bytes += skblen;
txq_trans_cond_update(txq);
ret = NETDEV_TX_OK;
goto out;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 2d7a18fcc3be..6faa62bced3a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1983,7 +1983,7 @@ err:
static void iavf_finish_config(struct work_struct *work)
{
struct iavf_adapter *adapter;
- bool netdev_released = false;
+ bool locks_released = false;
int pairs, err;
adapter = container_of(work, struct iavf_adapter, finish_config);
@@ -2012,19 +2012,22 @@ static void iavf_finish_config(struct work_struct *work)
netif_set_real_num_tx_queues(adapter->netdev, pairs);
if (adapter->netdev->reg_state != NETREG_REGISTERED) {
+ mutex_unlock(&adapter->crit_lock);
netdev_unlock(adapter->netdev);
- netdev_released = true;
+ locks_released = true;
err = register_netdevice(adapter->netdev);
if (err) {
dev_err(&adapter->pdev->dev, "Unable to register netdev (%d)\n",
err);
/* go back and try again.*/
+ mutex_lock(&adapter->crit_lock);
iavf_free_rss(adapter);
iavf_free_misc_irq(adapter);
iavf_reset_interrupt_capability(adapter);
iavf_change_state(adapter,
__IAVF_INIT_CONFIG_ADAPTER);
+ mutex_unlock(&adapter->crit_lock);
goto out;
}
}
@@ -2040,9 +2043,10 @@ static void iavf_finish_config(struct work_struct *work)
}
out:
- mutex_unlock(&adapter->crit_lock);
- if (!netdev_released)
+ if (!locks_released) {
+ mutex_unlock(&adapter->crit_lock);
netdev_unlock(adapter->netdev);
+ }
rtnl_unlock();
}
@@ -2903,8 +2907,8 @@ static void iavf_watchdog_task(struct work_struct *work)
}
mutex_unlock(&adapter->crit_lock);
- netdev_unlock(netdev);
restart_watchdog:
+ netdev_unlock(netdev);
if (adapter->state >= __IAVF_DOWN)
queue_work(adapter->wq, &adapter->adminq_task);
if (adapter->aq_required)
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index d116e2b10bce..dbdb83567364 100644
--- a/drivers/net/ethernet/intel/ice/devlink/devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -981,6 +981,9 @@ static int ice_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv
/* preallocate memory for ice_sched_node */
node = devm_kzalloc(ice_hw_to_dev(pi->hw), sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
*priv = node;
return 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index fb527434b58b..d649c197cf67 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -38,8 +38,7 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
if (ice_vsi_add_vlan_zero(uplink_vsi))
goto err_vlan_zero;
- if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
- ICE_FLTR_RX))
+ if (ice_set_dflt_vsi(uplink_vsi))
goto err_def_rx;
if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index b83f99c01d91..8aabf7749aa5 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -36,6 +36,7 @@ static void ice_free_vf_entries(struct ice_pf *pf)
hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
hash_del_rcu(&vf->entry);
+ ice_deinitialize_vf_entry(vf);
ice_put_vf(vf);
}
}
@@ -193,10 +194,6 @@ void ice_free_vfs(struct ice_pf *pf)
wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
}
- /* clear malicious info since the VF is getting released */
- if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
- list_del(&vf->mbx_info.list_entry);
-
mutex_unlock(&vf->cfg_lock);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 5d2d7736fd5f..9c9ea4c1b93b 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -527,15 +527,14 @@ err:
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
- * @rx_buf: Rx buffer to store the XDP action
* @eop_desc: Last descriptor in packet to read metadata from
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
-static void
+static u32
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
- struct ice_rx_buf *rx_buf, union ice_32b_rx_flex_desc *eop_desc)
+ union ice_32b_rx_flex_desc *eop_desc)
{
unsigned int ret = ICE_XDP_PASS;
u32 act;
@@ -574,7 +573,7 @@ out_failure:
ret = ICE_XDP_CONSUMED;
}
exit:
- ice_set_rx_bufs_act(xdp, rx_ring, ret);
+ return ret;
}
/**
@@ -860,10 +859,8 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
xdp_buff_set_frags_flag(xdp);
}
- if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
- ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS))
return -ENOMEM;
- }
__skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
rx_buf->page_offset, size);
@@ -924,7 +921,6 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[ntc];
- rx_buf->pgcnt = page_count(rx_buf->page);
prefetchw(rx_buf->page);
if (!size)
@@ -941,6 +937,31 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
}
/**
+ * ice_get_pgcnts - grab page_count() for gathered fragments
+ * @rx_ring: Rx descriptor ring to store the page counts on
+ *
+ * This function is intended to be called right before running XDP
+ * program so that the page recycling mechanism will be able to take
+ * a correct decision regarding underlying pages; this is done in such
+ * way as XDP program can change the refcount of page
+ */
+static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
+{
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ struct ice_rx_buf *rx_buf;
+ u32 cnt = rx_ring->count;
+
+ for (int i = 0; i < nr_frags; i++) {
+ rx_buf = &rx_ring->rx_buf[idx];
+ rx_buf->pgcnt = page_count(rx_buf->page);
+
+ if (++idx == cnt)
+ idx = 0;
+ }
+}
+
+/**
* ice_build_skb - Build skb around an existing buffer
* @rx_ring: Rx descriptor ring to transact packets on
* @xdp: xdp_buff pointing to the data
@@ -1051,12 +1072,12 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
rx_buf->page_offset + headlen, size,
xdp->frame_sz);
} else {
- /* buffer is unused, change the act that should be taken later
- * on; data was copied onto skb's linear part so there's no
+ /* buffer is unused, restore biased page count in Rx buffer;
+ * data was copied onto skb's linear part so there's no
* need for adjusting page offset and we can reuse this buffer
* as-is
*/
- rx_buf->act = ICE_SKB_CONSUMED;
+ rx_buf->pagecnt_bias++;
}
if (unlikely(xdp_buff_has_frags(xdp))) {
@@ -1104,6 +1125,65 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
}
/**
+ * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
+ * @rx_ring: Rx ring with all the auxiliary data
+ * @xdp: XDP buffer carrying linear + frags part
+ * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
+ * @ntc: a current next_to_clean value to be stored at rx_ring
+ * @verdict: return code from XDP program execution
+ *
+ * Walk through gathered fragments and satisfy internal page
+ * recycle mechanism; we take here an action related to verdict
+ * returned by XDP program;
+ */
+static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ u32 *xdp_xmit, u32 ntc, u32 verdict)
+{
+ u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ u32 cnt = rx_ring->count;
+ u32 post_xdp_frags = 1;
+ struct ice_rx_buf *buf;
+ int i;
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+ post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
+
+ for (i = 0; i < post_xdp_frags; i++) {
+ buf = &rx_ring->rx_buf[idx];
+
+ if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ *xdp_xmit |= verdict;
+ } else if (verdict & ICE_XDP_CONSUMED) {
+ buf->pagecnt_bias++;
+ } else if (verdict == ICE_XDP_PASS) {
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+ }
+
+ ice_put_rx_buf(rx_ring, buf);
+
+ if (++idx == cnt)
+ idx = 0;
+ }
+ /* handle buffers that represented frags released by XDP prog;
+ * for these we keep pagecnt_bias as-is; refcount from struct page
+ * has been decremented within XDP prog and we do not have to increase
+ * the biased refcnt
+ */
+ for (; i < nr_frags; i++) {
+ buf = &rx_ring->rx_buf[idx];
+ ice_put_rx_buf(rx_ring, buf);
+ if (++idx == cnt)
+ idx = 0;
+ }
+
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
+ rx_ring->nr_frags = 0;
+}
+
+/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
@@ -1120,15 +1200,13 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
unsigned int offset = rx_ring->rx_offset;
struct xdp_buff *xdp = &rx_ring->xdp;
- u32 cached_ntc = rx_ring->first_desc;
struct ice_tx_ring *xdp_ring = NULL;
struct bpf_prog *xdp_prog = NULL;
u32 ntc = rx_ring->next_to_clean;
+ u32 cached_ntu, xdp_verdict;
u32 cnt = rx_ring->count;
u32 xdp_xmit = 0;
- u32 cached_ntu;
bool failure;
- u32 first;
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (xdp_prog) {
@@ -1190,6 +1268,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
xdp_buff_clear_frags_flag(xdp);
} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
+ ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
break;
}
if (++ntc == cnt)
@@ -1199,15 +1278,15 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf, rx_desc);
- if (rx_buf->act == ICE_XDP_PASS)
+ ice_get_pgcnts(rx_ring);
+ xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
+ if (xdp_verdict == ICE_XDP_PASS)
goto construct_skb;
total_rx_bytes += xdp_get_buff_len(xdp);
total_rx_pkts++;
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
+ ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+
continue;
construct_skb:
if (likely(ice_ring_uses_build_skb(rx_ring)))
@@ -1217,18 +1296,12 @@ construct_skb:
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->ring_stats->rx_stats.alloc_page_failed++;
- rx_buf->act = ICE_XDP_CONSUMED;
- if (unlikely(xdp_buff_has_frags(xdp)))
- ice_set_rx_bufs_act(xdp, rx_ring,
- ICE_XDP_CONSUMED);
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
- break;
+ xdp_verdict = ICE_XDP_CONSUMED;
}
- xdp->data = NULL;
- rx_ring->first_desc = ntc;
- rx_ring->nr_frags = 0;
+ ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
+
+ if (!skb)
+ break;
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
@@ -1257,23 +1330,6 @@ construct_skb:
total_rx_pkts++;
}
- first = rx_ring->first_desc;
- while (cached_ntc != first) {
- struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc];
-
- if (buf->act & (ICE_XDP_TX | ICE_XDP_REDIR)) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- xdp_xmit |= buf->act;
- } else if (buf->act & ICE_XDP_CONSUMED) {
- buf->pagecnt_bias++;
- } else if (buf->act == ICE_XDP_PASS) {
- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
- }
-
- ice_put_rx_buf(rx_ring, buf);
- if (++cached_ntc >= cnt)
- cached_ntc = 0;
- }
rx_ring->next_to_clean = ntc;
/* return up to cleaned_count buffers to hardware */
failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring));
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index cb347c852ba9..806bce701df3 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -201,7 +201,6 @@ struct ice_rx_buf {
struct page *page;
unsigned int page_offset;
unsigned int pgcnt;
- unsigned int act;
unsigned int pagecnt_bias;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 79f960c6680d..6cf32b404127 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -6,49 +6,6 @@
#include "ice.h"
/**
- * ice_set_rx_bufs_act - propagate Rx buffer action to frags
- * @xdp: XDP buffer representing frame (linear and frags part)
- * @rx_ring: Rx ring struct
- * act: action to store onto Rx buffers related to XDP buffer parts
- *
- * Set action that should be taken before putting Rx buffer from first frag
- * to the last.
- */
-static inline void
-ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
- const unsigned int act)
-{
- u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
- u32 nr_frags = rx_ring->nr_frags + 1;
- u32 idx = rx_ring->first_desc;
- u32 cnt = rx_ring->count;
- struct ice_rx_buf *buf;
-
- for (int i = 0; i < nr_frags; i++) {
- buf = &rx_ring->rx_buf[idx];
- buf->act = act;
-
- if (++idx == cnt)
- idx = 0;
- }
-
- /* adjust pagecnt_bias on frags freed by XDP prog */
- if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
- u32 delta = rx_ring->nr_frags - sinfo_frags;
-
- while (delta) {
- if (idx == 0)
- idx = cnt - 1;
- else
- idx--;
- buf = &rx_ring->rx_buf[idx];
- buf->pagecnt_bias--;
- delta--;
- }
- }
-}
-
-/**
* ice_test_staterr - tests bits in Rx descriptor status and error fields
* @status_err_n: Rx descriptor status_error0 or status_error1 bits
* @stat_err_bits: value to mask
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index c7c0c2f50c26..815ad0bfe832 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -1036,6 +1036,14 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
mutex_init(&vf->cfg_lock);
}
+void ice_deinitialize_vf_entry(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+
+ if (!ice_is_feature_supported(pf, ICE_F_MBX_LIMIT))
+ list_del(&vf->mbx_info.list_entry);
+}
+
/**
* ice_dis_vf_qs - Disable the VF queues
* @vf: pointer to the VF structure
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 0c7e77c0a09f..5392b0404986 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -24,6 +24,7 @@
#endif
void ice_initialize_vf_entry(struct ice_vf *vf);
+void ice_deinitialize_vf_entry(struct ice_vf *vf);
void ice_dis_vf_qs(struct ice_vf *vf);
int ice_check_vf_init(struct ice_vf *vf);
enum virtchnl_status_code ice_err_to_virt_err(int err);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index b4fbb99bfad2..a3d6b8f198a8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -2159,8 +2159,13 @@ static int idpf_open(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
+ err = idpf_set_real_num_queues(vport);
+ if (err)
+ goto unlock;
+
err = idpf_vport_open(vport);
+unlock:
idpf_vport_ctrl_unlock(netdev);
return err;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 2fa9c36e33c9..977741c41498 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -3008,14 +3008,11 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
return -EINVAL;
rsc_segments = DIV_ROUND_UP(skb->data_len, rsc_seg_len);
- if (unlikely(rsc_segments == 1))
- return 0;
NAPI_GRO_CB(skb)->count = rsc_segments;
skb_shinfo(skb)->gso_size = rsc_seg_len;
skb_reset_network_header(skb);
- len = skb->len - skb_transport_offset(skb);
if (ipv4) {
struct iphdr *ipv4h = ip_hdr(skb);
@@ -3024,6 +3021,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
/* Reset and set transport header offset in skb */
skb_set_transport_header(skb, sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
/* Compute the TCP pseudo header checksum*/
tcp_hdr(skb)->check =
@@ -3033,6 +3031,7 @@ static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
tcp_hdr(skb)->check =
~tcp_v6_check(len, &ipv6h->saddr, &ipv6h->daddr, 0);
}
@@ -3072,6 +3071,7 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
idpf_rx_hash(rxq, skb, rx_desc, decoded);
skb->protocol = eth_type_trans(skb, rxq->netdev);
+ skb_record_rx_queue(skb, rxq->idx);
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
@@ -3080,8 +3080,6 @@ idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
idpf_rx_csum(rxq, skb, csum_bits, decoded);
- skb_record_rx_queue(skb, rxq->idx);
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 56a35d58e7a6..84307bb7313e 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -1096,6 +1096,7 @@ static int igc_init_empty_frame(struct igc_ring *ring,
return -ENOMEM;
}
+ buffer->type = IGC_TX_BUFFER_TYPE_SKB;
buffer->skb = skb;
buffer->protocol = 0;
buffer->bytecount = skb->len;
@@ -2701,8 +2702,9 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
}
static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
- struct xdp_buff *xdp)
+ struct igc_xdp_buff *ctx)
{
+ struct xdp_buff *xdp = &ctx->xdp;
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
struct sk_buff *skb;
@@ -2721,27 +2723,28 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
__skb_pull(skb, metasize);
}
+ if (ctx->rx_ts) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_HW_TSTAMP_NETDEV;
+ skb_hwtstamps(skb)->netdev_data = ctx->rx_ts;
+ }
+
return skb;
}
static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector,
union igc_adv_rx_desc *desc,
- struct xdp_buff *xdp,
- ktime_t timestamp)
+ struct igc_xdp_buff *ctx)
{
struct igc_ring *ring = q_vector->rx.ring;
struct sk_buff *skb;
- skb = igc_construct_skb_zc(ring, xdp);
+ skb = igc_construct_skb_zc(ring, ctx);
if (!skb) {
ring->rx_stats.alloc_failed++;
set_bit(IGC_RING_FLAG_RX_ALLOC_FAILED, &ring->flags);
return;
}
- if (timestamp)
- skb_hwtstamps(skb)->hwtstamp = timestamp;
-
if (igc_cleanup_headers(ring, desc, skb))
return;
@@ -2777,7 +2780,6 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
union igc_adv_rx_desc *desc;
struct igc_rx_buffer *bi;
struct igc_xdp_buff *ctx;
- ktime_t timestamp = 0;
unsigned int size;
int res;
@@ -2807,6 +2809,8 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
*/
bi->xdp->data_meta += IGC_TS_HDR_LEN;
size -= IGC_TS_HDR_LEN;
+ } else {
+ ctx->rx_ts = NULL;
}
bi->xdp->data_end = bi->xdp->data + size;
@@ -2815,7 +2819,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
switch (res) {
case IGC_XDP_PASS:
- igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp);
+ igc_dispatch_skb_zc(q_vector, desc, ctx);
fallthrough;
case IGC_XDP_CONSUMED:
xsk_buff_free(bi->xdp);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
index 683c668672d6..cb07ecd8937d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c
@@ -1122,7 +1122,7 @@ static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
* returns error (ENOENT), then no cage present. If no cage present then
* connection type is backplane or BASE-T.
*/
- return ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
+ return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7236f20c9a30..467f81239e12 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2105,7 +2105,7 @@ static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring,
/* hand second half of page back to the ring */
ixgbe_reuse_rx_page(rx_ring, rx_buffer);
} else {
- if (!IS_ERR(skb) && IXGBE_CB(skb)->dma == rx_buffer->dma) {
+ if (skb && IXGBE_CB(skb)->dma == rx_buffer->dma) {
/* the page has been released from the ring */
IXGBE_CB(skb)->page_released = true;
} else {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
index 1641791a2d5b..8ed83fb98862 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
@@ -324,7 +324,7 @@ static const struct mvpp2_cls_flow cls_flows[MVPP2_N_PRS_FLOWS] = {
MVPP2_PRS_RI_VLAN_MASK),
/* Non IP flow, with vlan tag */
MVPP2_DEF_FLOW(MVPP22_FLOW_ETHERNET, MVPP2_FL_NON_IP_TAG,
- MVPP22_CLS_HEK_OPT_VLAN,
+ MVPP22_CLS_HEK_TAGGED,
0, 0),
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 8b7c843446e1..823c1ba456cd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -564,6 +564,9 @@ static int esw_qos_vport_enable(struct mlx5_vport *vport, struct mlx5_esw_sched_
return err;
esw_qos_normalize_min_rate(parent->esw, parent, extack);
+ trace_mlx5_esw_vport_qos_create(vport->dev, vport,
+ vport->qos.sched_node->max_rate,
+ vport->qos.sched_node->bw_share);
return 0;
}
@@ -591,8 +594,11 @@ static int mlx5_esw_qos_vport_enable(struct mlx5_vport *vport, enum sched_node_t
sched_node->vport = vport;
vport->qos.sched_node = sched_node;
err = esw_qos_vport_enable(vport, parent, extack);
- if (err)
+ if (err) {
+ __esw_qos_free_node(sched_node);
esw_qos_put(esw);
+ vport->qos.sched_node = NULL;
+ }
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 7db9cab9bedf..d9362eabc6a1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -572,7 +572,7 @@ irq_pool_alloc(struct mlx5_core_dev *dev, int start, int size, char *name,
pool->min_threshold = min_threshold * MLX5_EQ_REFS_PER_IRQ;
pool->max_threshold = max_threshold * MLX5_EQ_REFS_PER_IRQ;
mlx5_core_dbg(dev, "pool->name = %s, pool->size = %d, pool->start = %d",
- name, size, start);
+ name ? name : "mlx5_pcif_pool", size, start);
return pool;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 2bed8c86b7cf..3f64cdbabfa3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -768,7 +768,9 @@ static void __mlxsw_sp_port_get_stats(struct net_device *dev,
err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
if (err)
return;
- mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+ err = mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
+ if (err)
+ return;
for (i = 0; i < len; i++) {
data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
if (!hw_stats[i].cells_bytes)
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
index 2ec62c8d86e1..59486fe2ad18 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
@@ -20,6 +20,8 @@ nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
struct sk_buff *skb;
skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
+ if (!skb)
+ return NULL;
skb_put(skb, size);
return skb;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index bfe6e2d631bd..ab7c2750c104 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -11,6 +11,8 @@
#include "dwmac_dma.h"
#include "dwmac1000.h"
+#define DRIVER_NAME "dwmac-loongson-pci"
+
/* Normal Loongson Tx Summary */
#define DMA_INTR_ENA_NIE_TX_LOONGSON 0x00040000
/* Normal Loongson Rx Summary */
@@ -516,6 +518,19 @@ static int loongson_dwmac_acpi_config(struct pci_dev *pdev,
return 0;
}
+/* Loongson's DWMAC device may take nearly two seconds to complete DMA reset */
+static int loongson_dwmac_fix_reset(void *priv, void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+ return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ !(value & DMA_BUS_MODE_SFT_RESET),
+ 10000, 2000000);
+}
+
static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct plat_stmmacenet_data *plat;
@@ -555,7 +570,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
if (pci_resource_len(pdev, i) == 0)
continue;
- ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ ret = pcim_iomap_regions(pdev, BIT(0), DRIVER_NAME);
if (ret)
goto err_disable_device;
break;
@@ -566,6 +581,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
plat->bsp_priv = ld;
plat->setup = loongson_dwmac_setup;
+ plat->fix_soc_reset = loongson_dwmac_fix_reset;
ld->dev = &pdev->dev;
ld->loongson_id = readl(res.addr + GMAC_VERSION) & 0xff;
@@ -673,7 +689,7 @@ static const struct pci_device_id loongson_dwmac_id_table[] = {
MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
static struct pci_driver loongson_dwmac_driver = {
- .name = "dwmac-loongson-pci",
+ .name = DRIVER_NAME,
.id_table = loongson_dwmac_id_table,
.probe = loongson_dwmac_probe,
.remove = loongson_dwmac_remove,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d04543e5697b..c0ae7db96f46 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2094,6 +2094,11 @@ static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
pp_params.offset = stmmac_rx_offset(priv);
pp_params.max_len = dma_conf->dma_buf_sz;
+ if (priv->sph) {
+ pp_params.offset = 0;
+ pp_params.max_len += stmmac_rx_offset(priv);
+ }
+
rx_q->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rx_q->page_pool)) {
ret = PTR_ERR(rx_q->page_pool);
@@ -2424,6 +2429,11 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
u32 chan = 0;
u8 qmode = 0;
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
/* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
if (priv->plat->has_gmac4 || priv->plat->has_xgmac) {
rxfifosz /= rx_channels_count;
@@ -2892,6 +2902,11 @@ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
int rxfifosz = priv->plat->rx_fifo_size;
int txfifosz = priv->plat->tx_fifo_size;
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
/* Adjust for real per queue fifo size */
rxfifosz /= rx_channels_count;
txfifosz /= tx_channels_count;
@@ -5868,6 +5883,9 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
const int mtu = new_mtu;
int ret;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
txfifosz /= priv->plat->tx_queues_to_use;
if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
@@ -7219,29 +7237,15 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
}
- if (!priv->plat->rx_fifo_size) {
- if (priv->dma_cap.rx_fifo_size) {
- priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
- } else {
- dev_err(priv->device, "Can't specify Rx FIFO size\n");
- return -ENODEV;
- }
- } else if (priv->dma_cap.rx_fifo_size &&
- priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
+ if (priv->dma_cap.rx_fifo_size &&
+ priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
dev_warn(priv->device,
"Rx FIFO size (%u) exceeds dma capability\n",
priv->plat->rx_fifo_size);
priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
}
- if (!priv->plat->tx_fifo_size) {
- if (priv->dma_cap.tx_fifo_size) {
- priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
- } else {
- dev_err(priv->device, "Can't specify Tx FIFO size\n");
- return -ENODEV;
- }
- } else if (priv->dma_cap.tx_fifo_size &&
- priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
+ if (priv->dma_cap.tx_fifo_size &&
+ priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
dev_warn(priv->device,
"Tx FIFO size (%u) exceeds dma capability\n",
priv->plat->tx_fifo_size);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 0d5a862cd78a..3a13d60a947a 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -99,6 +99,7 @@ config TI_K3_AM65_CPSW_NUSS
select NET_DEVLINK
select TI_DAVINCI_MDIO
select PHYLINK
+ select PAGE_POOL
select TI_K3_CPPI_DESC_POOL
imply PHY_TI_GMII_SEL
depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index b663271e79f7..2806238629f8 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -828,21 +828,30 @@ static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_tx_chn *tx_chn = data;
+ enum am65_cpsw_tx_buf_type buf_type;
struct cppi5_host_desc_t *desc_tx;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
void **swdata;
desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+ buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
+ skb = *(swdata);
+ dev_kfree_skb_any(skb);
+ } else {
+ xdpf = *(swdata);
+ xdp_return_frame(xdpf);
+ }
- dev_kfree_skb_any(skb);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
}
static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
struct net_device *ndev,
- unsigned int len)
+ unsigned int len,
+ unsigned int headroom)
{
struct sk_buff *skb;
@@ -852,7 +861,7 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, AM65_CPSW_HEADROOM);
+ skb_reserve(skb, headroom);
skb->dev = ndev;
return skb;
@@ -1169,9 +1178,11 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
struct xdp_frame *xdpf;
struct bpf_prog *prog;
struct page *page;
+ int pkt_len;
u32 act;
int err;
+ pkt_len = *len;
prog = READ_ONCE(port->xdp_prog);
if (!prog)
return AM65_CPSW_XDP_PASS;
@@ -1189,8 +1200,10 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
xdpf = xdp_convert_buff_to_frame(xdp);
- if (unlikely(!xdpf))
+ if (unlikely(!xdpf)) {
+ ndev->stats.tx_dropped++;
goto drop;
+ }
__netif_tx_lock(netif_txq, cpu);
err = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
@@ -1199,14 +1212,14 @@ static int am65_cpsw_run_xdp(struct am65_cpsw_rx_flow *flow,
if (err)
goto drop;
- dev_sw_netstats_tx_add(ndev, 1, *len);
+ dev_sw_netstats_rx_add(ndev, pkt_len);
ret = AM65_CPSW_XDP_CONSUMED;
goto out;
case XDP_REDIRECT:
if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
goto drop;
- dev_sw_netstats_rx_add(ndev, *len);
+ dev_sw_netstats_rx_add(ndev, pkt_len);
ret = AM65_CPSW_XDP_REDIRECT;
goto out;
default:
@@ -1315,16 +1328,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
-
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- skb = am65_cpsw_build_skb(page_addr, ndev,
- AM65_CPSW_MAX_PACKET_SIZE);
- if (unlikely(!skb)) {
- new_page = page;
- goto requeue;
- }
-
if (port->xdp_prog) {
xdp_init_buff(&xdp, PAGE_SIZE, &port->xdp_rxq[flow->id]);
xdp_prepare_buff(&xdp, page_addr, AM65_CPSW_HEADROOM,
@@ -1334,9 +1339,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow,
if (*xdp_state != AM65_CPSW_XDP_PASS)
goto allocate;
- /* Compute additional headroom to be reserved */
- headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
- skb_reserve(skb, headroom);
+ headroom = xdp.data - xdp.data_hard_start;
+ } else {
+ headroom = AM65_CPSW_HEADROOM;
+ }
+
+ skb = am65_cpsw_build_skb(page_addr, ndev,
+ AM65_CPSW_MAX_PACKET_SIZE, headroom);
+ if (unlikely(!skb)) {
+ new_page = page;
+ goto requeue;
}
ndev_priv = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 768578c0d958..d59c1744840a 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -474,26 +474,7 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
static int icss_iep_perout_enable(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
- int ret = 0;
-
- mutex_lock(&iep->ptp_clk_mutex);
-
- if (iep->pps_enabled) {
- ret = -EBUSY;
- goto exit;
- }
-
- if (iep->perout_enabled == !!on)
- goto exit;
-
- ret = icss_iep_perout_enable_hw(iep, req, on);
- if (!ret)
- iep->perout_enabled = !!on;
-
-exit:
- mutex_unlock(&iep->ptp_clk_mutex);
-
- return ret;
+ return -EOPNOTSUPP;
}
static void icss_iep_cap_cmp_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 9e7fa012e4fa..f33178f90c42 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -2897,6 +2897,7 @@ static int axienet_probe(struct platform_device *pdev)
lp->phylink_config.dev = &ndev->dev;
lp->phylink_config.type = PHYLINK_NETDEV;
+ lp->phylink_config.mac_managed_pm = true;
lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
MAC_10FD | MAC_100FD | MAC_1000FD;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 642155cb8315..dbb3960126ee 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1902,21 +1902,9 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *geneve, *next;
- struct net_device *dev, *aux;
- /* gather any geneve devices that were moved into this ns */
- for_each_netdev_safe(net, dev, aux)
- if (dev->rtnl_link_ops == &geneve_link_ops)
- unregister_netdevice_queue(dev, head);
-
- /* now gather any other geneve devices that were created in this ns */
- list_for_each_entry_safe(geneve, next, &gn->geneve_list, next) {
- /* If geneve->dev is in the same netns, it was already added
- * to the list by the previous loop.
- */
- if (!net_eq(dev_net(geneve->dev), net))
- unregister_netdevice_queue(geneve->dev, head);
- }
+ list_for_each_entry_safe(geneve, next, &gn->geneve_list, next)
+ geneve_dellink(geneve->dev, head);
}
static void __net_exit geneve_exit_batch_rtnl(struct list_head *net_list,
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index d64740bf44ed..b7b46c5e6399 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -2481,11 +2481,6 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
list_for_each_entry(net, net_list, exit_list) {
struct gtp_net *gn = net_generic(net, gtp_net_id);
struct gtp_dev *gtp, *gtp_next;
- struct net_device *dev;
-
- for_each_netdev(net, dev)
- if (dev->rtnl_link_ops == &gtp_link_ops)
- gtp_dellink(dev, dev_to_kill);
list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
gtp_dellink(gtp->dev, dev_to_kill);
diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c
index c8c23d9be961..41f212209993 100644
--- a/drivers/net/ipa/data/ipa_data-v4.7.c
+++ b/drivers/net/ipa/data/ipa_data-v4.7.c
@@ -28,20 +28,18 @@ enum ipa_resource_type {
enum ipa_rsrc_group_id {
/* Source resource group identifiers */
IPA_RSRC_GROUP_SRC_UL_DL = 0,
- IPA_RSRC_GROUP_SRC_UC_RX_Q,
IPA_RSRC_GROUP_SRC_COUNT, /* Last in set; not a source group */
/* Destination resource group identifiers */
- IPA_RSRC_GROUP_DST_UL_DL_DPL = 0,
- IPA_RSRC_GROUP_DST_UNUSED_1,
+ IPA_RSRC_GROUP_DST_UL_DL = 0,
IPA_RSRC_GROUP_DST_COUNT, /* Last; not a destination group */
};
/* QSB configuration data for an SoC having IPA v4.7 */
static const struct ipa_qsb_data ipa_qsb_data[] = {
[IPA_QSB_MASTER_DDR] = {
- .max_writes = 8,
- .max_reads = 0, /* no limit (hardware max) */
+ .max_writes = 12,
+ .max_reads = 13,
.max_reads_beats = 120,
},
};
@@ -81,7 +79,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
.endpoint = {
.config = {
- .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL,
.aggregation = true,
.status_enable = true,
.rx = {
@@ -106,6 +104,7 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.filter_support = true,
.config = {
.resource_group = IPA_RSRC_GROUP_SRC_UL_DL,
+ .checksum = true,
.qmap = true,
.status_enable = true,
.tx = {
@@ -128,7 +127,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
},
.endpoint = {
.config = {
- .resource_group = IPA_RSRC_GROUP_DST_UL_DL_DPL,
+ .resource_group = IPA_RSRC_GROUP_DST_UL_DL,
+ .checksum = true,
.qmap = true,
.aggregation = true,
.rx = {
@@ -197,12 +197,12 @@ static const struct ipa_resource ipa_resource_src[] = {
/* Destination resource configuration data for an SoC having IPA v4.7 */
static const struct ipa_resource ipa_resource_dst[] = {
[IPA_RESOURCE_TYPE_DST_DATA_SECTORS] = {
- .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL] = {
.min = 7, .max = 7,
},
},
[IPA_RESOURCE_TYPE_DST_DPS_DMARS] = {
- .limits[IPA_RSRC_GROUP_DST_UL_DL_DPL] = {
+ .limits[IPA_RSRC_GROUP_DST_UL_DL] = {
.min = 2, .max = 2,
},
},
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index fd591ddb3884..ca62188a317a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -416,20 +416,25 @@ struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port, void *lyr3h,
static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
{
- const struct iphdr *ip4h = ip_hdr(skb);
struct net_device *dev = skb->dev;
struct net *net = dev_net(dev);
- struct rtable *rt;
int err, ret = NET_XMIT_DROP;
+ const struct iphdr *ip4h;
+ struct rtable *rt;
struct flowi4 fl4 = {
.flowi4_oif = dev->ifindex,
- .flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h)),
.flowi4_flags = FLOWI_FLAG_ANYSRC,
.flowi4_mark = skb->mark,
- .daddr = ip4h->daddr,
- .saddr = ip4h->saddr,
};
+ if (!pskb_network_may_pull(skb, sizeof(struct iphdr)))
+ goto err;
+
+ ip4h = ip_hdr(skb);
+ fl4.daddr = ip4h->daddr;
+ fl4.saddr = ip4h->saddr;
+ fl4.flowi4_tos = inet_dscp_to_dsfield(ip4h_dscp(ip4h));
+
rt = ip_route_output_flow(net, &fl4, NULL);
if (IS_ERR(rt))
goto err;
@@ -488,6 +493,12 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
struct net_device *dev = skb->dev;
int err, ret = NET_XMIT_DROP;
+ if (!pskb_network_may_pull(skb, sizeof(struct ipv6hdr))) {
+ DEV_STATS_INC(dev, tx_errors);
+ kfree_skb(skb);
+ return ret;
+ }
+
err = ipvlan_route_v6_outbound(dev, skb);
if (unlikely(err)) {
DEV_STATS_INC(dev, tx_errors);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index c8840c3b9a1b..f1d68153987e 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -244,8 +244,22 @@ static netdev_tx_t blackhole_netdev_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+static int blackhole_neigh_output(struct neighbour *n, struct sk_buff *skb)
+{
+ kfree_skb(skb);
+ return 0;
+}
+
+static int blackhole_neigh_construct(struct net_device *dev,
+ struct neighbour *n)
+{
+ n->output = blackhole_neigh_output;
+ return 0;
+}
+
static const struct net_device_ops blackhole_netdev_ops = {
.ndo_start_xmit = blackhole_netdev_xmit,
+ .ndo_neigh_construct = blackhole_neigh_construct,
};
/* This is a dst-dummy device used specifically for invalidated
diff --git a/drivers/net/mctp/mctp-i3c.c b/drivers/net/mctp/mctp-i3c.c
index d247fe483c58..c1e72253063b 100644
--- a/drivers/net/mctp/mctp-i3c.c
+++ b/drivers/net/mctp/mctp-i3c.c
@@ -507,6 +507,9 @@ static int mctp_i3c_header_create(struct sk_buff *skb, struct net_device *dev,
{
struct mctp_i3c_internal_hdr *ihdr;
+ if (!daddr || !saddr)
+ return -EINVAL;
+
skb_push(skb, sizeof(struct mctp_i3c_internal_hdr));
skb_reset_mac_header(skb);
ihdr = (void *)skb_mac_header(skb);
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index 5c80fbee7913..7ab358616e03 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -184,9 +184,11 @@ static const struct ethtool_ops nsim_ethtool_ops = {
static void nsim_ethtool_ring_init(struct netdevsim *ns)
{
+ ns->ethtool.ring.rx_pending = 512;
ns->ethtool.ring.rx_max_pending = 4096;
ns->ethtool.ring.rx_jumbo_max_pending = 4096;
ns->ethtool.ring.rx_mini_max_pending = 4096;
+ ns->ethtool.ring.tx_pending = 512;
ns->ethtool.ring.tx_max_pending = 4096;
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 214b62fba991..b00a315de060 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -2265,12 +2265,15 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
/* Allow the MAC to stop its clock if the PHY has the capability */
pl->mac_tx_clk_stop = phy_eee_tx_clock_stop_capable(phy) > 0;
- /* Explicitly configure whether the PHY is allowed to stop it's
- * receive clock.
- */
- ret = phy_eee_rx_clock_stop(phy, pl->config->eee_rx_clk_stop_enable);
- if (ret == -EOPNOTSUPP)
- ret = 0;
+ if (pl->mac_supports_eee_ops) {
+ /* Explicitly configure whether the PHY is allowed to stop it's
+ * receive clock.
+ */
+ ret = phy_eee_rx_clock_stop(phy,
+ pl->config->eee_rx_clk_stop_enable);
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ }
return ret;
}
diff --git a/drivers/net/phy/qcom/qca807x.c b/drivers/net/phy/qcom/qca807x.c
index 3279de857b47..2ad8c2586d64 100644
--- a/drivers/net/phy/qcom/qca807x.c
+++ b/drivers/net/phy/qcom/qca807x.c
@@ -774,7 +774,7 @@ static int qca807x_config_init(struct phy_device *phydev)
control_dac &= ~QCA807X_CONTROL_DAC_MASK;
if (!priv->dac_full_amplitude)
control_dac |= QCA807X_CONTROL_DAC_DSP_AMPLITUDE;
- if (!priv->dac_full_amplitude)
+ if (!priv->dac_full_bias_current)
control_dac |= QCA807X_CONTROL_DAC_DSP_BIAS_CURRENT;
if (!priv->dac_disable_bias_current_tweak)
control_dac |= QCA807X_CONTROL_DAC_BIAS_CURRENT_TWEAK;
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 4583e15ad03a..1420c4efa48e 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -72,6 +72,17 @@
#define PPP_PROTO_LEN 2
#define PPP_LCP_HDRLEN 4
+/* The filter instructions generated by libpcap are constructed
+ * assuming a four-byte PPP header on each packet, where the last
+ * 2 bytes are the protocol field defined in the RFC and the first
+ * byte of the first 2 bytes indicates the direction.
+ * The second byte is currently unused, but we still need to initialize
+ * it to prevent crafted BPF programs from reading them which would
+ * cause reading of uninitialized data.
+ */
+#define PPP_FILTER_OUTBOUND_TAG 0x0100
+#define PPP_FILTER_INBOUND_TAG 0x0000
+
/*
* An instance of /dev/ppp can be associated with either a ppp
* interface unit or a ppp channel. In both cases, file->private_data
@@ -1762,10 +1773,10 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
if (proto < 0x8000) {
#ifdef CONFIG_PPP_FILTER
- /* check if we should pass this packet */
- /* the filter instructions are constructed assuming
- a four-byte PPP header on each packet */
- *(u8 *)skb_push(skb, 2) = 1;
+ /* check if the packet passes the pass and active filters.
+ * See comment for PPP_FILTER_OUTBOUND_TAG above.
+ */
+ *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_OUTBOUND_TAG);
if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
@@ -2482,14 +2493,13 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
/* network protocol frame - give it to the kernel */
#ifdef CONFIG_PPP_FILTER
- /* check if the packet passes the pass and active filters */
- /* the filter instructions are constructed assuming
- a four-byte PPP header on each packet */
if (ppp->pass_filter || ppp->active_filter) {
if (skb_unclone(skb, GFP_ATOMIC))
goto err;
-
- *(u8 *)skb_push(skb, 2) = 0;
+ /* Check if the packet passes the pass and active filters.
+ * See comment for PPP_FILTER_INBOUND_TAG above.
+ */
+ *(__be16 *)skb_push(skb, 2) = htons(PPP_FILTER_INBOUND_TAG);
if (ppp->pass_filter &&
bpf_prog_run(ppp->pass_filter, skb) == 0) {
if (ppp->debug & 1)
diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
index fc9e23927b3b..7d60a714ca53 100644
--- a/drivers/net/pse-pd/pd692x0.c
+++ b/drivers/net/pse-pd/pd692x0.c
@@ -1047,7 +1047,7 @@ static int pd692x0_pi_get_pw_limit(struct pse_controller_dev *pcdev,
if (ret < 0)
return ret;
- return pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]);
+ return pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]);
}
static int pd692x0_pi_set_pw_limit(struct pse_controller_dev *pcdev,
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index 4f2a54afc4d0..4602e26eb8c8 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -319,7 +319,7 @@ static int pse_pi_get_current_limit(struct regulator_dev *rdev)
goto out;
mW = ret;
- ret = pse_pi_get_voltage(rdev);
+ ret = _pse_pi_get_voltage(rdev);
if (!ret) {
dev_err(pcdev->dev, "Voltage null\n");
ret = -ERANGE;
@@ -356,7 +356,7 @@ static int pse_pi_set_current_limit(struct regulator_dev *rdev, int min_uA,
id = rdev_get_id(rdev);
mutex_lock(&pcdev->lock);
- ret = pse_pi_get_voltage(rdev);
+ ret = _pse_pi_get_voltage(rdev);
if (!ret) {
dev_err(pcdev->dev, "Voltage null\n");
ret = -ERANGE;
diff --git a/drivers/net/team/team_core.c b/drivers/net/team/team_core.c
index dc7cbd6a9798..f4019815f473 100644
--- a/drivers/net/team/team_core.c
+++ b/drivers/net/team/team_core.c
@@ -2639,7 +2639,9 @@ int team_nl_options_set_doit(struct sk_buff *skb, struct genl_info *info)
ctx.data.u32_val = nla_get_u32(attr_data);
break;
case TEAM_OPTION_TYPE_STRING:
- if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
+ if (nla_len(attr_data) > TEAM_STRING_MAX_LEN ||
+ !memchr(nla_data(attr_data), '\0',
+ nla_len(attr_data))) {
err = -EINVAL;
goto team_put;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 28624cca91f8..acf96f262488 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -574,18 +574,14 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
return ret;
}
-static inline bool tun_capable(struct tun_struct *tun)
+static inline bool tun_not_capable(struct tun_struct *tun)
{
const struct cred *cred = current_cred();
struct net *net = dev_net(tun->dev);
- if (ns_capable(net->user_ns, CAP_NET_ADMIN))
- return 1;
- if (uid_valid(tun->owner) && uid_eq(cred->euid, tun->owner))
- return 1;
- if (gid_valid(tun->group) && in_egroup_p(tun->group))
- return 1;
- return 0;
+ return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
+ (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
+ !ns_capable(net->user_ns, CAP_NET_ADMIN);
}
static void tun_set_real_num_queues(struct tun_struct *tun)
@@ -2782,7 +2778,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
!!(tun->flags & IFF_MULTI_QUEUE))
return -EINVAL;
- if (!tun_capable(tun))
+ if (tun_not_capable(tun))
return -EPERM;
err = security_tun_dev_open(tun->security);
if (err < 0)
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index 46af78caf457..0bfa37c14059 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -179,9 +179,7 @@ static int genelink_bind(struct usbnet *dev, struct usb_interface *intf)
{
dev->hard_mtu = GL_RCV_BUF_SIZE;
dev->net->hard_header_len += 4;
- dev->in = usb_rcvbulkpipe(dev->udev, dev->driver_info->in);
- dev->out = usb_sndbulkpipe(dev->udev, dev->driver_info->out);
- return 0;
+ return usbnet_get_endpoints(dev, intf);
}
static const struct driver_info genelink_info = {
diff --git a/drivers/net/vmxnet3/vmxnet3_xdp.c b/drivers/net/vmxnet3/vmxnet3_xdp.c
index 1341374a4588..616ecc38d172 100644
--- a/drivers/net/vmxnet3/vmxnet3_xdp.c
+++ b/drivers/net/vmxnet3/vmxnet3_xdp.c
@@ -28,7 +28,7 @@ vmxnet3_xdp_get_tq(struct vmxnet3_adapter *adapter)
if (likely(cpu < tq_number))
tq = &adapter->tx_queue[cpu];
else
- tq = &adapter->tx_queue[reciprocal_scale(cpu, tq_number)];
+ tq = &adapter->tx_queue[cpu % tq_number];
return tq;
}
@@ -124,6 +124,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
u32 buf_size;
u32 dw2;
+ spin_lock_irq(&tq->tx_lock);
dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
dw2 |= xdpf->len;
ctx.sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
@@ -134,6 +135,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
if (vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) == 0) {
tq->stats.tx_ring_full++;
+ spin_unlock_irq(&tq->tx_lock);
return -ENOSPC;
}
@@ -142,8 +144,10 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
xdpf->data, buf_size,
DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
+ if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr)) {
+ spin_unlock_irq(&tq->tx_lock);
return -EFAULT;
+ }
tbi->map_type |= VMXNET3_MAP_SINGLE;
} else { /* XDP buffer from page pool */
page = virt_to_page(xdpf->data);
@@ -182,6 +186,7 @@ vmxnet3_xdp_xmit_frame(struct vmxnet3_adapter *adapter,
dma_wmb();
gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
VMXNET3_TXD_GEN);
+ spin_unlock_irq(&tq->tx_lock);
/* No need to handle the case when tx_num_deferred doesn't reach
* threshold. Backend driver at hypervisor side will poll and reset
@@ -225,6 +230,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
{
struct vmxnet3_adapter *adapter = netdev_priv(dev);
struct vmxnet3_tx_queue *tq;
+ struct netdev_queue *nq;
int i;
if (unlikely(test_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)))
@@ -236,6 +242,9 @@ vmxnet3_xdp_xmit(struct net_device *dev,
if (tq->stopped)
return -ENETDOWN;
+ nq = netdev_get_tx_queue(adapter->netdev, tq->qid);
+
+ __netif_tx_lock(nq, smp_processor_id());
for (i = 0; i < n; i++) {
if (vmxnet3_xdp_xmit_frame(adapter, frames[i], tq, true)) {
tq->stats.xdp_xmit_err++;
@@ -243,6 +252,7 @@ vmxnet3_xdp_xmit(struct net_device *dev,
}
}
tq->stats.xdp_xmit += i;
+ __netif_tx_unlock(nq);
return i;
}
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 05c10acb2a57..92516189e792 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -2898,8 +2898,11 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
int err;
- if (vxlan->cfg.flags & VXLAN_F_VNIFILTER)
- vxlan_vnigroup_init(vxlan);
+ if (vxlan->cfg.flags & VXLAN_F_VNIFILTER) {
+ err = vxlan_vnigroup_init(vxlan);
+ if (err)
+ return err;
+ }
err = gro_cells_init(&vxlan->gro_cells, dev);
if (err)
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 4dd6cdf84571..abb510d235a5 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -4851,6 +4851,22 @@ static struct ath12k_reg_rule
return reg_rule_ptr;
}
+static u8 ath12k_wmi_ignore_num_extra_rules(struct ath12k_wmi_reg_rule_ext_params *rule,
+ u32 num_reg_rules)
+{
+ u8 num_invalid_5ghz_rules = 0;
+ u32 count, start_freq;
+
+ for (count = 0; count < num_reg_rules; count++) {
+ start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
+
+ if (start_freq >= ATH12K_MIN_6G_FREQ)
+ num_invalid_5ghz_rules++;
+ }
+
+ return num_invalid_5ghz_rules;
+}
+
static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
struct sk_buff *skb,
struct ath12k_reg_info *reg_info)
@@ -4861,6 +4877,7 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
u32 num_2g_reg_rules, num_5g_reg_rules;
u32 num_6g_reg_rules_ap[WMI_REG_CURRENT_MAX_AP_TYPE];
u32 num_6g_reg_rules_cl[WMI_REG_CURRENT_MAX_AP_TYPE][WMI_REG_MAX_CLIENT_TYPE];
+ u8 num_invalid_5ghz_ext_rules;
u32 total_reg_rules = 0;
int ret, i, j;
@@ -4954,20 +4971,6 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
memcpy(reg_info->alpha2, &ev->alpha2, REG_ALPHA2_LEN);
- /* FIXME: Currently FW includes 6G reg rule also in 5G rule
- * list for country US.
- * Having same 6G reg rule in 5G and 6G rules list causes
- * intersect check to be true, and same rules will be shown
- * multiple times in iw cmd. So added hack below to avoid
- * parsing 6G rule from 5G reg rule list, and this can be
- * removed later, after FW updates to remove 6G reg rule
- * from 5G rules list.
- */
- if (memcmp(reg_info->alpha2, "US", 2) == 0) {
- reg_info->num_5g_reg_rules = REG_US_5G_NUM_REG_RULES;
- num_5g_reg_rules = reg_info->num_5g_reg_rules;
- }
-
reg_info->dfs_region = le32_to_cpu(ev->dfs_region);
reg_info->phybitmap = le32_to_cpu(ev->phybitmap);
reg_info->num_phy = le32_to_cpu(ev->num_phy);
@@ -5070,8 +5073,29 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
}
}
+ ext_wmi_reg_rule += num_2g_reg_rules;
+
+ /* Firmware might include 6 GHz reg rule in 5 GHz rule list
+ * for few countries along with separate 6 GHz rule.
+ * Having same 6 GHz reg rule in 5 GHz and 6 GHz rules list
+ * causes intersect check to be true, and same rules will be
+ * shown multiple times in iw cmd.
+ * Hence, avoid parsing 6 GHz rule from 5 GHz reg rule list
+ */
+ num_invalid_5ghz_ext_rules = ath12k_wmi_ignore_num_extra_rules(ext_wmi_reg_rule,
+ num_5g_reg_rules);
+
+ if (num_invalid_5ghz_ext_rules) {
+ ath12k_dbg(ab, ATH12K_DBG_WMI,
+ "CC: %s 5 GHz reg rules number %d from fw, %d number of invalid 5 GHz rules",
+ reg_info->alpha2, reg_info->num_5g_reg_rules,
+ num_invalid_5ghz_ext_rules);
+
+ num_5g_reg_rules = num_5g_reg_rules - num_invalid_5ghz_ext_rules;
+ reg_info->num_5g_reg_rules = num_5g_reg_rules;
+ }
+
if (num_5g_reg_rules) {
- ext_wmi_reg_rule += num_2g_reg_rules;
reg_info->reg_rules_5g_ptr =
create_ext_reg_rules_from_wmi(num_5g_reg_rules,
ext_wmi_reg_rule);
@@ -5083,7 +5107,12 @@ static int ath12k_pull_reg_chan_list_ext_update_ev(struct ath12k_base *ab,
}
}
- ext_wmi_reg_rule += num_5g_reg_rules;
+ /* We have adjusted the number of 5 GHz reg rules above. But still those
+ * many rules needs to be adjusted in ext_wmi_reg_rule.
+ *
+ * NOTE: num_invalid_5ghz_ext_rules will be 0 for rest other cases.
+ */
+ ext_wmi_reg_rule += (num_5g_reg_rules + num_invalid_5ghz_ext_rules);
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
reg_info->reg_rules_6g_ap_ptr[i] =
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index b6a197389277..45fe699ce8a5 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -4073,7 +4073,6 @@ struct ath12k_wmi_eht_rate_set_params {
#define MAX_REG_RULES 10
#define REG_ALPHA2_LEN 2
#define MAX_6G_REG_RULES 5
-#define REG_US_5G_NUM_REG_RULES 4
enum wmi_start_event_param {
WMI_VDEV_START_RESP_EVENT = 0,
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 60eb95fc19a5..6bc107476a2a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1172,6 +1172,7 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
mmc_pm_flag_t sdio_flags;
+ bool cap_power_off;
int ret = 0;
func = container_of(dev, struct sdio_func, dev);
@@ -1179,19 +1180,23 @@ static int brcmf_ops_sdio_suspend(struct device *dev)
if (func->num != 1)
return 0;
+ cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD);
bus_if = dev_get_drvdata(dev);
sdiodev = bus_if->bus_priv.sdio;
- if (sdiodev->wowl_enabled) {
+ if (sdiodev->wowl_enabled || !cap_power_off) {
brcmf_sdiod_freezer_on(sdiodev);
brcmf_sdio_wd_timer(sdiodev->bus, 0);
sdio_flags = MMC_PM_KEEP_POWER;
- if (sdiodev->settings->bus.sdio.oob_irq_supported)
- enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
- else
- sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+
+ if (sdiodev->wowl_enabled) {
+ if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ enable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
+ else
+ sdio_flags |= MMC_PM_WAKE_SDIO_IRQ;
+ }
if (sdio_set_host_pm_flags(sdiodev->func1, sdio_flags))
brcmf_err("Failed to set pm_flags %x\n", sdio_flags);
@@ -1213,18 +1218,19 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct sdio_func *func = container_of(dev, struct sdio_func, dev);
int ret = 0;
+ bool cap_power_off = !!(func->card->host->caps & MMC_CAP_POWER_OFF_CARD);
brcmf_dbg(SDIO, "Enter: F%d\n", func->num);
if (func->num != 2)
return 0;
- if (!sdiodev->wowl_enabled) {
+ if (!sdiodev->wowl_enabled && cap_power_off) {
/* bus was powered off and device removed, probe again */
ret = brcmf_sdiod_probe(sdiodev);
if (ret)
brcmf_err("Failed to probe device on resume\n");
} else {
- if (sdiodev->settings->bus.sdio.oob_irq_supported)
+ if (sdiodev->wowl_enabled && sdiodev->settings->bus.sdio.oob_irq_supported)
disable_irq_wake(sdiodev->settings->bus.sdio.oob_irq_nr);
brcmf_sdiod_freezer_off(sdiodev);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index e4395b1f8c11..d2caa80e9412 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -2712,7 +2712,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID, WCC),
- BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4355_DEVICE_ID, WCC_SEED),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID, WCC),
@@ -2723,7 +2723,7 @@ static const struct pci_device_id brcmf_pcie_devid_table[] = {
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_2G_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_5G_DEVICE_ID, WCC),
BRCMF_PCIE_DEVICE(BRCM_PCIE_43602_RAW_DEVICE_ID, WCC),
- BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC),
+ BRCMF_PCIE_DEVICE(BRCM_PCIE_4364_DEVICE_ID, WCC_SEED),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_2G_DEVICE_ID, BCA),
BRCMF_PCIE_DEVICE(BRCM_PCIE_4365_5G_DEVICE_ID, BCA),
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index fb2ea38e89ac..6594216f873c 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -558,41 +558,71 @@ static void iwl_dump_prph(struct iwl_fw_runtime *fwrt,
}
/*
- * alloc_sgtable - allocates scallerlist table in the given size,
- * fills it with pages and returns it
+ * alloc_sgtable - allocates (chained) scatterlist in the given size,
+ * fills it with pages and returns it
* @size: the size (in bytes) of the table
-*/
-static struct scatterlist *alloc_sgtable(int size)
+ */
+static struct scatterlist *alloc_sgtable(ssize_t size)
{
- int alloc_size, nents, i;
- struct page *new_page;
- struct scatterlist *iter;
- struct scatterlist *table;
+ struct scatterlist *result = NULL, *prev;
+ int nents, i, n_prev;
nents = DIV_ROUND_UP(size, PAGE_SIZE);
- table = kcalloc(nents, sizeof(*table), GFP_KERNEL);
- if (!table)
- return NULL;
- sg_init_table(table, nents);
- iter = table;
- for_each_sg(table, iter, sg_nents(table), i) {
- new_page = alloc_page(GFP_KERNEL);
- if (!new_page) {
- /* release all previous allocated pages in the table */
- iter = table;
- for_each_sg(table, iter, sg_nents(table), i) {
- new_page = sg_page(iter);
- if (new_page)
- __free_page(new_page);
- }
- kfree(table);
+
+#define N_ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(*result))
+ /*
+ * We need an additional entry for table chaining,
+ * this ensures the loop can finish i.e. we can
+ * fit at least two entries per page (obviously,
+ * many more really fit.)
+ */
+ BUILD_BUG_ON(N_ENTRIES_PER_PAGE < 2);
+
+ while (nents > 0) {
+ struct scatterlist *new, *iter;
+ int n_fill, n_alloc;
+
+ if (nents <= N_ENTRIES_PER_PAGE) {
+ /* last needed table */
+ n_fill = nents;
+ n_alloc = nents;
+ nents = 0;
+ } else {
+ /* fill a page with entries */
+ n_alloc = N_ENTRIES_PER_PAGE;
+ /* reserve one for chaining */
+ n_fill = n_alloc - 1;
+ nents -= n_fill;
+ }
+
+ new = kcalloc(n_alloc, sizeof(*new), GFP_KERNEL);
+ if (!new) {
+ if (result)
+ _devcd_free_sgtable(result);
return NULL;
}
- alloc_size = min_t(int, size, PAGE_SIZE);
- size -= PAGE_SIZE;
- sg_set_page(iter, new_page, alloc_size, 0);
+ sg_init_table(new, n_alloc);
+
+ if (!result)
+ result = new;
+ else
+ sg_chain(prev, n_prev, new);
+ prev = new;
+ n_prev = n_alloc;
+
+ for_each_sg(new, iter, n_fill, i) {
+ struct page *new_page = alloc_page(GFP_KERNEL);
+
+ if (!new_page) {
+ _devcd_free_sgtable(result);
+ return NULL;
+ }
+
+ sg_set_page(iter, new_page, PAGE_SIZE, 0);
+ }
}
- return table;
+
+ return result;
}
static void iwl_fw_get_prph_len(struct iwl_fw_runtime *fwrt,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
index 8e0c85a1240d..c7b261c8ec96 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
@@ -540,6 +540,9 @@ bool iwl_fwrt_read_err_table(struct iwl_trans *trans, u32 base, u32 *err_id)
} err_info = {};
int ret;
+ if (err_id)
+ *err_id = 0;
+
if (!base)
return false;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index d3a65f33097c..352b6e73e08f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -1181,7 +1181,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
if (tlv_len != sizeof(*fseq_ver))
goto invalid_tlv_len;
- IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n",
+ IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n",
fseq_ver->version);
}
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 129b6bdf9ef9..82ca7f8b1bb2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -3092,8 +3092,14 @@ static void iwl_mvm_d3_disconnect_iter(void *data, u8 *mac,
ieee80211_resume_disconnect(vif);
}
-static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+enum rt_status {
+ FW_ALIVE,
+ FW_NEEDS_RESET,
+ FW_ERROR,
+};
+
+static enum rt_status iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
u32 err_id;
@@ -3101,29 +3107,35 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
if (iwl_fwrt_read_err_table(mvm->trans,
mvm->trans->dbg.lmac_error_event_table[0],
&err_id)) {
- if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) {
- struct cfg80211_wowlan_wakeup wakeup = {
- .rfkill_release = true,
- };
- ieee80211_report_wowlan_wakeup(vif, &wakeup,
- GFP_KERNEL);
+ if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ IWL_WARN(mvm, "Rfkill was toggled during suspend\n");
+ if (vif) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+ }
+
+ return FW_NEEDS_RESET;
}
- return true;
+ return FW_ERROR;
}
/* check if we have lmac2 set and check for error */
if (iwl_fwrt_read_err_table(mvm->trans,
mvm->trans->dbg.lmac_error_event_table[1],
NULL))
- return true;
+ return FW_ERROR;
/* check for umac error */
if (iwl_fwrt_read_err_table(mvm->trans,
mvm->trans->dbg.umac_error_event_table,
NULL))
- return true;
+ return FW_ERROR;
- return false;
+ return FW_ALIVE;
}
/*
@@ -3492,6 +3504,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
bool d0i3_first = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_D0I3_END_FIRST);
bool resume_notif_based = iwl_mvm_d3_resume_notif_based(mvm);
+ enum rt_status rt_status;
bool keep = false;
mutex_lock(&mvm->mutex);
@@ -3515,14 +3528,19 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
- if (iwl_mvm_check_rt_status(mvm, vif)) {
- IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
+ rt_status = iwl_mvm_check_rt_status(mvm, vif);
+ if (rt_status != FW_ALIVE) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
- iwl_mvm_dump_nic_error_log(mvm);
- iwl_dbg_tlv_time_point(&mvm->fwrt,
- IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
- iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- false, 0);
+ if (rt_status == FW_ERROR) {
+ IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT,
+ NULL);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt,
+ &iwl_dump_desc_assert,
+ false, 0);
+ }
ret = 1;
goto err;
}
@@ -3679,6 +3697,7 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
.notif_expected =
IWL_D3_NOTIF_D3_END_NOTIF,
};
+ enum rt_status rt_status;
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -3688,14 +3707,20 @@ int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
mvm->last_reset_or_resume_time_jiffies = jiffies;
iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
- if (iwl_mvm_check_rt_status(mvm, NULL)) {
- IWL_ERR(mvm, "FW Error occurred during suspend. Restarting.\n");
+ rt_status = iwl_mvm_check_rt_status(mvm, NULL);
+ if (rt_status != FW_ALIVE) {
set_bit(STATUS_FW_ERROR, &mvm->trans->status);
- iwl_mvm_dump_nic_error_log(mvm);
- iwl_dbg_tlv_time_point(&mvm->fwrt,
- IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
- iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
- false, 0);
+ if (rt_status == FW_ERROR) {
+ IWL_ERR(mvm,
+ "iwl_mvm_check_rt_status failed, device is gone during suspend\n");
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT,
+ NULL);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt,
+ &iwl_dump_desc_assert,
+ false, 0);
+ }
mvm->trans->state = IWL_TRANS_NO_FW;
ret = -ENODEV;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 83e3c1160362..55d035b896e9 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -1479,6 +1479,13 @@ static ssize_t iwl_dbgfs_fw_dbg_clear_write(struct iwl_mvm *mvm,
if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000)
return -EOPNOTSUPP;
+ /*
+ * If the firmware is not running, silently succeed since there is
+ * no data to clear.
+ */
+ if (!iwl_mvm_firmware_running(mvm))
+ return count;
+
mutex_lock(&mvm->mutex);
iwl_fw_dbg_clear_monitor_buf(&mvm->fwrt);
mutex_unlock(&mvm->mutex);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index 09fd8752046e..14ea89f931bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -995,7 +995,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
*/
u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
u32 rate_n_flags = phy_data->rate_n_flags;
- u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
u8 offs = 0;
rx_status->bw = RATE_INFO_BW_HE_RU;
@@ -1050,13 +1050,13 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
if (he_mu)
he_mu->flags2 |=
- le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
rate_n_flags),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
- else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1)
+ else if (he_type == RATE_MCS_HE_TYPE_TRIG)
he->data6 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
- le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
rate_n_flags),
IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index 9216c43a35c4..ebfa88b38b71 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -1030,6 +1030,8 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
/* End TE, notify mac80211 */
mvmvif->time_event_data.id = SESSION_PROTECT_CONF_MAX_ID;
mvmvif->time_event_data.link_id = -1;
+ /* set the bit so the ROC cleanup will actually clean up */
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
iwl_mvm_roc_finished(mvm);
ieee80211_remain_on_channel_expired(mvm->hw);
} else if (le32_to_cpu(notif->start)) {
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 856b7e9f717d..45460f93d24a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2003-2015, 2018-2024 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -646,7 +646,8 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
unsigned int len);
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta,
- u8 **hdr, unsigned int hdr_room);
+ u8 **hdr, unsigned int hdr_room,
+ unsigned int offset);
void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 1f483f15c238..401919f9fe88 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2023-2024 Intel Corporation
+ * Copyright (C) 2018-2020, 2023-2025 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
@@ -188,7 +188,8 @@ static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
/* Our device supports 9 segments at most, it will fit in 1 page */
- sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
+ snap_ip_tcp_hdrlen + hdr_len);
if (!sgt)
return -ENOMEM;
@@ -347,6 +348,7 @@ iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
return tfd;
out_err:
+ iwl_pcie_free_tso_pages(trans, skb, out_meta);
iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
return NULL;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 334ebd4c12fa..7b6071a59b69 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2021, 2023-2025 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1855,6 +1855,7 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
* @cmd_meta: command meta to store the scatter list information for unmapping
* @hdr: output argument for TSO headers
* @hdr_room: requested length for TSO headers
+ * @offset: offset into the data from which mapping should start
*
* Allocate space for a scatter gather list and TSO headers and map the SKB
* using the scatter gather list. The SKB is unmapped again when the page is
@@ -1864,18 +1865,20 @@ dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
*/
struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_cmd_meta *cmd_meta,
- u8 **hdr, unsigned int hdr_room)
+ u8 **hdr, unsigned int hdr_room,
+ unsigned int offset)
{
struct sg_table *sgt;
+ unsigned int n_segments;
if (WARN_ON_ONCE(skb_has_frag_list(skb)))
return NULL;
+ n_segments = DIV_ROUND_UP(skb->len - offset, skb_shinfo(skb)->gso_size);
*hdr = iwl_pcie_get_page_hdr(trans,
hdr_room + __alignof__(struct sg_table) +
sizeof(struct sg_table) +
- (skb_shinfo(skb)->nr_frags + 1) *
- sizeof(struct scatterlist),
+ n_segments * sizeof(struct scatterlist),
skb);
if (!*hdr)
return NULL;
@@ -1883,11 +1886,11 @@ struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));
sgt->sgl = (void *)(sgt + 1);
- sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
+ sg_init_table(sgt->sgl, n_segments);
/* Only map the data, not the header (it is copied to the TSO page) */
- sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
- skb->data_len);
+ sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, offset,
+ skb->len - offset);
if (WARN_ON_ONCE(sgt->orig_nents <= 0))
return NULL;
@@ -1939,7 +1942,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
/* Our device supports 9 segments at most, it will fit in 1 page */
- sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
+ snap_ip_tcp_hdrlen + hdr_len + iv_len);
if (!sgt)
return -ENOMEM;
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index d5a9360323d2..8755c5e6a65b 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -220,7 +220,7 @@ static int mbim_rx_verify_nth16(struct mhi_mbim_context *mbim, struct sk_buff *s
if (mbim->rx_seq + 1 != le16_to_cpu(nth16->wSequence) &&
(mbim->rx_seq || le16_to_cpu(nth16->wSequence)) &&
!(mbim->rx_seq == 0xffff && !le16_to_cpu(nth16->wSequence))) {
- net_err_ratelimited("sequence number glitch prev=%d curr=%d\n",
+ net_dbg_ratelimited("sequence number glitch prev=%d curr=%d\n",
mbim->rx_seq, le16_to_cpu(nth16->wSequence));
}
mbim->rx_seq = le16_to_cpu(nth16->wSequence);
diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
index 1de11b722f04..a060f69558e7 100644
--- a/drivers/nvme/host/apple.c
+++ b/drivers/nvme/host/apple.c
@@ -1011,25 +1011,37 @@ static void apple_nvme_reset_work(struct work_struct *work)
ret = apple_rtkit_shutdown(anv->rtk);
if (ret)
goto out;
+
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
}
- writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ /*
+ * Only do the soft-reset if the CPU is not running, which means either we
+ * or the previous stage shut it down cleanly.
+ */
+ if (!(readl(anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL) &
+ APPLE_ANS_COPROC_CPU_CONTROL_RUN)) {
- ret = reset_control_assert(anv->reset);
- if (ret)
- goto out;
+ ret = reset_control_assert(anv->reset);
+ if (ret)
+ goto out;
- ret = apple_rtkit_reinit(anv->rtk);
- if (ret)
- goto out;
+ ret = apple_rtkit_reinit(anv->rtk);
+ if (ret)
+ goto out;
- ret = reset_control_deassert(anv->reset);
- if (ret)
- goto out;
+ ret = reset_control_deassert(anv->reset);
+ if (ret)
+ goto out;
+
+ writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
+ anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+
+ ret = apple_rtkit_boot(anv->rtk);
+ } else {
+ ret = apple_rtkit_wake(anv->rtk);
+ }
- writel(APPLE_ANS_COPROC_CPU_CONTROL_RUN,
- anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
- ret = apple_rtkit_boot(anv->rtk);
if (ret) {
dev_err(anv->dev, "ANS did not boot");
goto out;
@@ -1516,6 +1528,7 @@ static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
return anv;
put_dev:
+ apple_nvme_detach_genpd(anv);
put_device(anv->dev);
return ERR_PTR(ret);
}
@@ -1549,6 +1562,7 @@ out_uninit_ctrl:
nvme_uninit_ctrl(&anv->ctrl);
out_put_ctrl:
nvme_put_ctrl(&anv->ctrl);
+ apple_nvme_detach_genpd(anv);
return ret;
}
@@ -1563,9 +1577,12 @@ static void apple_nvme_remove(struct platform_device *pdev)
apple_nvme_disable(anv, true);
nvme_uninit_ctrl(&anv->ctrl);
- if (apple_rtkit_is_running(anv->rtk))
+ if (apple_rtkit_is_running(anv->rtk)) {
apple_rtkit_shutdown(anv->rtk);
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ }
+
apple_nvme_detach_genpd(anv);
}
@@ -1574,8 +1591,11 @@ static void apple_nvme_shutdown(struct platform_device *pdev)
struct apple_nvme *anv = platform_get_drvdata(pdev);
apple_nvme_disable(anv, true);
- if (apple_rtkit_is_running(anv->rtk))
+ if (apple_rtkit_is_running(anv->rtk)) {
apple_rtkit_shutdown(anv->rtk);
+
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ }
}
static int apple_nvme_resume(struct device *dev)
@@ -1592,10 +1612,11 @@ static int apple_nvme_suspend(struct device *dev)
apple_nvme_disable(anv, true);
- if (apple_rtkit_is_running(anv->rtk))
+ if (apple_rtkit_is_running(anv->rtk)) {
ret = apple_rtkit_shutdown(anv->rtk);
- writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ writel(0, anv->mmio_coproc + APPLE_ANS_COPROC_CPU_CONTROL);
+ }
return ret;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 40046770f1bf..f028913e2e62 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -564,8 +564,6 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (new_state) {
case NVME_CTRL_LIVE:
switch (old_state) {
- case NVME_CTRL_NEW:
- case NVME_CTRL_RESETTING:
case NVME_CTRL_CONNECTING:
changed = true;
fallthrough;
@@ -1700,7 +1698,13 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count)
status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0,
&result);
- if (status < 0)
+
+ /*
+ * It's either a kernel error or the host observed a connection
+ * lost. In either case it's not possible communicate with the
+ * controller and thus enter the error code path.
+ */
+ if (status < 0 || status == NVME_SC_HOST_PATH_ERROR)
return status;
/*
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 094be164ffdc..b9929a5a7f4e 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -785,49 +785,8 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
"NVME-FC{%d}: controller connectivity lost. Awaiting "
"Reconnect", ctrl->cnum);
- switch (nvme_ctrl_state(&ctrl->ctrl)) {
- case NVME_CTRL_NEW:
- case NVME_CTRL_LIVE:
- /*
- * Schedule a controller reset. The reset will terminate the
- * association and schedule the reconnect timer. Reconnects
- * will be attempted until either the ctlr_loss_tmo
- * (max_retries * connect_delay) expires or the remoteport's
- * dev_loss_tmo expires.
- */
- if (nvme_reset_ctrl(&ctrl->ctrl)) {
- dev_warn(ctrl->ctrl.device,
- "NVME-FC{%d}: Couldn't schedule reset.\n",
- ctrl->cnum);
- nvme_delete_ctrl(&ctrl->ctrl);
- }
- break;
-
- case NVME_CTRL_CONNECTING:
- /*
- * The association has already been terminated and the
- * controller is attempting reconnects. No need to do anything
- * futher. Reconnects will be attempted until either the
- * ctlr_loss_tmo (max_retries * connect_delay) expires or the
- * remoteport's dev_loss_tmo expires.
- */
- break;
-
- case NVME_CTRL_RESETTING:
- /*
- * Controller is already in the process of terminating the
- * association. No need to do anything further. The reconnect
- * step will kick in naturally after the association is
- * terminated.
- */
- break;
-
- case NVME_CTRL_DELETING:
- case NVME_CTRL_DELETING_NOIO:
- default:
- /* no action to take - let it delete */
- break;
- }
+ set_bit(ASSOC_FAILED, &ctrl->flags);
+ nvme_reset_ctrl(&ctrl->ctrl);
}
/**
@@ -2079,7 +2038,8 @@ done:
nvme_fc_complete_rq(rq);
check_error:
- if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING)
+ if (terminate_assoc &&
+ nvme_ctrl_state(&ctrl->ctrl) != NVME_CTRL_RESETTING)
queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
@@ -2533,6 +2493,8 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{
+ enum nvme_ctrl_state state = nvme_ctrl_state(&ctrl->ctrl);
+
/*
* if an error (io timeout, etc) while (re)connecting, the remote
* port requested terminating of the association (disconnect_ls)
@@ -2540,9 +2502,8 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
* the controller. Abort any ios on the association and let the
* create_association error path resolve things.
*/
- if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
+ if (state == NVME_CTRL_CONNECTING) {
__nvme_fc_abort_outstanding_ios(ctrl, true);
- set_bit(ASSOC_FAILED, &ctrl->flags);
dev_warn(ctrl->ctrl.device,
"NVME-FC{%d}: transport error during (re)connect\n",
ctrl->cnum);
@@ -2550,7 +2511,7 @@ nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
}
/* Otherwise, only proceed if in LIVE state - e.g. on first error */
- if (ctrl->ctrl.state != NVME_CTRL_LIVE)
+ if (state != NVME_CTRL_LIVE)
return;
dev_warn(ctrl->ctrl.device,
@@ -3061,7 +3022,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
struct nvmefc_ls_rcv_op *disls = NULL;
unsigned long flags;
int ret;
- bool changed;
++ctrl->ctrl.nr_reconnects;
@@ -3172,12 +3132,13 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
if (ret)
goto out_term_aen_ops;
- changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE)) {
+ ret = -EIO;
+ goto out_term_aen_ops;
+ }
ctrl->ctrl.nr_reconnects = 0;
-
- if (changed)
- nvme_start_ctrl(&ctrl->ctrl);
+ nvme_start_ctrl(&ctrl->ctrl);
return 0; /* Success */
@@ -3578,8 +3539,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
spin_unlock_irqrestore(&rport->lock, flags);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
- !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
goto fail_ctrl;
diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c
index e8930146847a..24e2c702da7a 100644
--- a/drivers/nvme/host/ioctl.c
+++ b/drivers/nvme/host/ioctl.c
@@ -128,8 +128,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
if (!nvme_ctrl_sgl_supported(ctrl))
dev_warn_once(ctrl->device, "using unchecked data buffer\n");
if (has_metadata) {
- if (!supports_metadata)
- return -EINVAL;
+ if (!supports_metadata) {
+ ret = -EINVAL;
+ goto out;
+ }
if (!nvme_ctrl_meta_sgl_supported(ctrl))
dev_warn_once(ctrl->device,
"using unchecked metadata buffer\n");
@@ -139,8 +141,10 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer,
struct iov_iter iter;
/* fixedbufs is only for non-vectored io */
- if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC))
- return -EINVAL;
+ if (WARN_ON_ONCE(flags & NVME_IOCTL_VEC)) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = io_uring_cmd_import_fixed(ubuffer, bufflen,
rq_data_dir(req), &iter, ioucmd);
if (ret < 0)
@@ -283,8 +287,7 @@ static bool nvme_validate_passthru_nsid(struct nvme_ctrl *ctrl,
{
if (ns && nsid != ns->head->ns_id) {
dev_err(ctrl->device,
- "%s: nsid (%u) in cmd does not match nsid (%u)"
- "of namespace\n",
+ "%s: nsid (%u) in cmd does not match nsid (%u) of namespace\n",
current->comm, nsid, ns->head->ns_id);
return false;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 278bed4e35bb..640590b21728 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1983,6 +1983,18 @@ static void nvme_map_cmb(struct nvme_dev *dev)
return;
/*
+ * Controllers may support a CMB size larger than their BAR, for
+ * example, due to being behind a bridge. Reduce the CMB to the
+ * reported size of the BAR
+ */
+ size = min(size, bar_size - offset);
+
+ if (!IS_ALIGNED(size, memremap_compat_align()) ||
+ !IS_ALIGNED(pci_resource_start(pdev, bar),
+ memremap_compat_align()))
+ return;
+
+ /*
* Tell the controller about the host side address mapping the CMB,
* and enable CMB decoding for the NVMe 1.4+ scheme:
*/
@@ -1992,17 +2004,10 @@ static void nvme_map_cmb(struct nvme_dev *dev)
dev->bar + NVME_REG_CMBMSC);
}
- /*
- * Controllers may support a CMB size larger than their BAR,
- * for example, due to being behind a bridge. Reduce the CMB to
- * the reported size of the BAR
- */
- if (size > bar_size - offset)
- size = bar_size - offset;
-
if (pci_p2pdma_add_resource(pdev, bar, size, offset)) {
dev_warn(dev->ctrl.device,
"failed to register the CMB\n");
+ hi_lo_writeq(0, dev->bar + NVME_REG_CMBMSC);
return;
}
@@ -2153,14 +2158,6 @@ static int nvme_alloc_host_mem_multi(struct nvme_dev *dev, u64 preferred,
return 0;
out_free_bufs:
- while (--i >= 0) {
- size_t size = le32_to_cpu(descs[i].size) * NVME_CTRL_PAGE_SIZE;
-
- dma_free_attrs(dev->dev, size, bufs[i],
- le64_to_cpu(descs[i].addr),
- DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
- }
-
kfree(bufs);
out_free_descs:
dma_free_coherent(dev->dev, descs_size, descs, descs_dma);
@@ -3147,7 +3144,9 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
* because of high power consumption (> 2 Watt) in s2idle
* sleep. Only some boards with Intel CPU are affected.
*/
- if (dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ if (dmi_match(DMI_BOARD_NAME, "DN50Z-140HC-YD") ||
+ dmi_match(DMI_BOARD_NAME, "GMxPXxx") ||
+ dmi_match(DMI_BOARD_NAME, "GXxMRXx") ||
dmi_match(DMI_BOARD_NAME, "PH4PG31") ||
dmi_match(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1") ||
dmi_match(DMI_BOARD_NAME, "PH6PG01_PH6PG71"))
@@ -3712,6 +3711,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
+ { PCI_DEVICE(0x1dbe, 0x5216), /* Acer/INNOGRIT FA100/5216 NVMe SSD */
+ .driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1dbe, 0x5236), /* ADATA XPG GAMMIX S70 */
.driver_data = NVME_QUIRK_BOGUS_NID, },
{ PCI_DEVICE(0x1e49, 0x0021), /* ZHITAI TiPro5000 NVMe SSD */
diff --git a/drivers/nvme/host/sysfs.c b/drivers/nvme/host/sysfs.c
index b68a9e5f1ea3..3a41b9ab0f13 100644
--- a/drivers/nvme/host/sysfs.c
+++ b/drivers/nvme/host/sysfs.c
@@ -792,7 +792,7 @@ static umode_t nvme_tls_attrs_are_visible(struct kobject *kobj,
return a->mode;
}
-const struct attribute_group nvme_tls_attrs_group = {
+static const struct attribute_group nvme_tls_attrs_group = {
.attrs = nvme_tls_attrs,
.is_visible = nvme_tls_attrs_are_visible,
};
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 841238f38fdd..327f3f2f5399 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -217,6 +217,19 @@ static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue)
return queue - queue->ctrl->queues;
}
+static inline bool nvme_tcp_recv_pdu_supported(enum nvme_tcp_pdu_type type)
+{
+ switch (type) {
+ case nvme_tcp_c2h_term:
+ case nvme_tcp_c2h_data:
+ case nvme_tcp_r2t:
+ case nvme_tcp_rsp:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
* Check if the queue is TLS encrypted
*/
@@ -763,6 +776,40 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
return 0;
}
+static void nvme_tcp_handle_c2h_term(struct nvme_tcp_queue *queue,
+ struct nvme_tcp_term_pdu *pdu)
+{
+ u16 fes;
+ const char *msg;
+ u32 plen = le32_to_cpu(pdu->hdr.plen);
+
+ static const char * const msg_table[] = {
+ [NVME_TCP_FES_INVALID_PDU_HDR] = "Invalid PDU Header Field",
+ [NVME_TCP_FES_PDU_SEQ_ERR] = "PDU Sequence Error",
+ [NVME_TCP_FES_HDR_DIGEST_ERR] = "Header Digest Error",
+ [NVME_TCP_FES_DATA_OUT_OF_RANGE] = "Data Transfer Out Of Range",
+ [NVME_TCP_FES_DATA_LIMIT_EXCEEDED] = "Data Transfer Limit Exceeded",
+ [NVME_TCP_FES_UNSUPPORTED_PARAM] = "Unsupported Parameter",
+ };
+
+ if (plen < NVME_TCP_MIN_C2HTERM_PLEN ||
+ plen > NVME_TCP_MAX_C2HTERM_PLEN) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Received a malformed C2HTermReq PDU (plen = %u)\n",
+ plen);
+ return;
+ }
+
+ fes = le16_to_cpu(pdu->fes);
+ if (fes && fes < ARRAY_SIZE(msg_table))
+ msg = msg_table[fes];
+ else
+ msg = "Unknown";
+
+ dev_err(queue->ctrl->ctrl.device,
+ "Received C2HTermReq (FES = %s)\n", msg);
+}
+
static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
unsigned int *offset, size_t *len)
{
@@ -784,6 +831,25 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
return 0;
hdr = queue->pdu;
+ if (unlikely(hdr->hlen != sizeof(struct nvme_tcp_rsp_pdu))) {
+ if (!nvme_tcp_recv_pdu_supported(hdr->type))
+ goto unsupported_pdu;
+
+ dev_err(queue->ctrl->ctrl.device,
+ "pdu type %d has unexpected header length (%d)\n",
+ hdr->type, hdr->hlen);
+ return -EPROTO;
+ }
+
+ if (unlikely(hdr->type == nvme_tcp_c2h_term)) {
+ /*
+ * C2HTermReq never includes Header or Data digests.
+ * Skip the checks.
+ */
+ nvme_tcp_handle_c2h_term(queue, (void *)queue->pdu);
+ return -EINVAL;
+ }
+
if (queue->hdr_digest) {
ret = nvme_tcp_verify_hdgst(queue, queue->pdu, hdr->hlen);
if (unlikely(ret))
@@ -807,10 +873,13 @@ static int nvme_tcp_recv_pdu(struct nvme_tcp_queue *queue, struct sk_buff *skb,
nvme_tcp_init_recv_ctx(queue);
return nvme_tcp_handle_r2t(queue, (void *)queue->pdu);
default:
- dev_err(queue->ctrl->ctrl.device,
- "unsupported pdu type (%d)\n", hdr->type);
- return -EINVAL;
+ goto unsupported_pdu;
}
+
+unsupported_pdu:
+ dev_err(queue->ctrl->ctrl.device,
+ "unsupported pdu type (%d)\n", hdr->type);
+ return -EINVAL;
}
static inline void nvme_tcp_end_request(struct request *rq, u16 status)
@@ -1449,8 +1518,11 @@ static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue)
msg.msg_control = cbuf;
msg.msg_controllen = sizeof(cbuf);
}
+ msg.msg_flags = MSG_WAITALL;
ret = kernel_recvmsg(queue->sock, &msg, &iov, 1,
iov.iov_len, msg.msg_flags);
+ if (ret >= 0 && ret < sizeof(*icresp))
+ ret = -ECONNRESET;
if (ret < 0) {
pr_warn("queue %d: failed to receive icresp, error %d\n",
nvme_tcp_queue_id(queue), ret);
@@ -1565,7 +1637,7 @@ static bool nvme_tcp_poll_queue(struct nvme_tcp_queue *queue)
ctrl->io_queues[HCTX_TYPE_POLL];
}
-/**
+/*
* Track the number of queues assigned to each cpu using a global per-cpu
* counter and select the least used cpu from the mq_map. Our goal is to spread
* different controllers I/O threads across different cpu cores.
@@ -2653,6 +2725,7 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
+ int ret;
if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
return 0;
@@ -2660,9 +2733,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
set_bit(NVME_TCP_Q_POLLING, &queue->flags);
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
- nvme_tcp_try_recv(queue);
+ ret = nvme_tcp_try_recv(queue);
clear_bit(NVME_TCP_Q_POLLING, &queue->flags);
- return queue->nr_cqe;
+ return ret < 0 ? ret : queue->nr_cqe;
}
static int nvme_tcp_get_address(struct nvme_ctrl *ctrl, char *buf, int size)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index e670dc185a96..acc138bbf8f2 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1068,6 +1068,7 @@ static void nvme_execute_identify_ns_nvm(struct nvmet_req *req)
goto out;
}
status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
+ kfree(id);
out:
nvmet_req_complete(req, status);
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index cdc4a09a6e8a..2e741696f371 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -606,6 +606,9 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
goto out_dev_put;
}
+ if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
+ goto out_pr_exit;
+
nvmet_ns_changed(subsys, ns->nsid);
ns->enabled = true;
xa_set_mark(&subsys->namespaces, ns->nsid, NVMET_NS_ENABLED);
@@ -613,6 +616,9 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
out_unlock:
mutex_unlock(&subsys->lock);
return ret;
+out_pr_exit:
+ if (ns->pr.enable)
+ nvmet_pr_exit_ns(ns);
out_dev_put:
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
@@ -638,6 +644,19 @@ void nvmet_ns_disable(struct nvmet_ns *ns)
mutex_unlock(&subsys->lock);
+ /*
+ * Now that we removed the namespaces from the lookup list, we
+ * can kill the per_cpu ref and wait for any remaining references
+ * to be dropped, as well as a RCU grace period for anyone only
+ * using the namepace under rcu_read_lock(). Note that we can't
+ * use call_rcu here as we need to ensure the namespaces have
+ * been fully destroyed before unloading the module.
+ */
+ percpu_ref_kill(&ns->ref);
+ synchronize_rcu();
+ wait_for_completion(&ns->disable_done);
+ percpu_ref_exit(&ns->ref);
+
if (ns->pr.enable)
nvmet_pr_exit_ns(ns);
@@ -660,22 +679,6 @@ void nvmet_ns_free(struct nvmet_ns *ns)
if (ns->nsid == subsys->max_nsid)
subsys->max_nsid = nvmet_max_nsid(subsys);
- mutex_unlock(&subsys->lock);
-
- /*
- * Now that we removed the namespaces from the lookup list, we
- * can kill the per_cpu ref and wait for any remaining references
- * to be dropped, as well as a RCU grace period for anyone only
- * using the namepace under rcu_read_lock(). Note that we can't
- * use call_rcu here as we need to ensure the namespaces have
- * been fully destroyed before unloading the module.
- */
- percpu_ref_kill(&ns->ref);
- synchronize_rcu();
- wait_for_completion(&ns->disable_done);
- percpu_ref_exit(&ns->ref);
-
- mutex_lock(&subsys->lock);
subsys->nr_namespaces--;
mutex_unlock(&subsys->lock);
@@ -705,9 +708,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
ns->nsid = nsid;
ns->subsys = subsys;
- if (percpu_ref_init(&ns->ref, nvmet_destroy_namespace, 0, GFP_KERNEL))
- goto out_free;
-
if (ns->nsid > subsys->max_nsid)
subsys->max_nsid = nsid;
@@ -730,8 +730,6 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
return ns;
out_exit:
subsys->max_nsid = nvmet_max_nsid(subsys);
- percpu_ref_exit(&ns->ref);
-out_free:
kfree(ns);
out_unlock:
mutex_unlock(&subsys->lock);
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index a7ff05b3be29..eb406c90c167 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -287,7 +287,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
args.subsysnqn = d->subsysnqn;
args.hostnqn = d->hostnqn;
args.hostid = &d->hostid;
- args.kato = c->kato;
+ args.kato = le32_to_cpu(c->kato);
ctrl = nvmet_alloc_ctrl(&args);
if (!ctrl)
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index c1f574fe3280..83be0657e6df 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -272,7 +272,7 @@ static void nvmet_bdev_execute_rw(struct nvmet_req *req)
iter_flags = SG_MITER_FROM_SG;
}
- if (req->cmd->rw.control & NVME_RW_LR)
+ if (req->cmd->rw.control & cpu_to_le16(NVME_RW_LR))
opf |= REQ_FAILFAST_DEV;
if (is_pci_p2pdma_page(sg_page(req->sg)))
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index b540216c0c9a..fcf4f460dc9a 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -589,7 +589,7 @@ struct nvmet_alloc_ctrl_args {
const struct nvmet_fabrics_ops *ops;
struct device *p2p_client;
u32 kato;
- u32 result;
+ __le32 result;
u16 error_loc;
u16 status;
};
@@ -647,7 +647,6 @@ void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
struct nvmet_host *host);
void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
u8 event_info, u8 log_page);
-bool nvmet_subsys_nsid_exists(struct nvmet_subsys *subsys, u32 nsid);
#define NVMET_MIN_QUEUE_SIZE 16
#define NVMET_MAX_QUEUE_SIZE 1024
@@ -784,37 +783,37 @@ u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
static inline bool nvmet_cc_en(u32 cc)
{
- return (cc >> NVME_CC_EN_SHIFT) & 0x1;
+ return (cc & NVME_CC_ENABLE) >> NVME_CC_EN_SHIFT;
}
static inline u8 nvmet_cc_css(u32 cc)
{
- return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
+ return (cc & NVME_CC_CSS_MASK) >> NVME_CC_CSS_SHIFT;
}
static inline u8 nvmet_cc_mps(u32 cc)
{
- return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
+ return (cc & NVME_CC_MPS_MASK) >> NVME_CC_MPS_SHIFT;
}
static inline u8 nvmet_cc_ams(u32 cc)
{
- return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
+ return (cc & NVME_CC_AMS_MASK) >> NVME_CC_AMS_SHIFT;
}
static inline u8 nvmet_cc_shn(u32 cc)
{
- return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
+ return (cc & NVME_CC_SHN_MASK) >> NVME_CC_SHN_SHIFT;
}
static inline u8 nvmet_cc_iosqes(u32 cc)
{
- return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
+ return (cc & NVME_CC_IOSQES_MASK) >> NVME_CC_IOSQES_SHIFT;
}
static inline u8 nvmet_cc_iocqes(u32 cc)
{
- return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
+ return (cc & NVME_CC_IOCQES_MASK) >> NVME_CC_IOCQES_SHIFT;
}
/* Convert a 32-bit number to a 16-bit 0's based number */
diff --git a/drivers/nvme/target/pci-epf.c b/drivers/nvme/target/pci-epf.c
index ac30b42cc622..565d2bd36dcd 100644
--- a/drivers/nvme/target/pci-epf.c
+++ b/drivers/nvme/target/pci-epf.c
@@ -46,7 +46,7 @@ static DEFINE_MUTEX(nvmet_pci_epf_ports_mutex);
/*
* BAR CC register and SQ polling intervals.
*/
-#define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(5)
+#define NVMET_PCI_EPF_CC_POLL_INTERVAL msecs_to_jiffies(10)
#define NVMET_PCI_EPF_SQ_POLL_INTERVAL msecs_to_jiffies(5)
#define NVMET_PCI_EPF_SQ_POLL_IDLE msecs_to_jiffies(5000)
@@ -1694,6 +1694,7 @@ static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work)
struct nvmet_pci_epf_ctrl *ctrl =
container_of(work, struct nvmet_pci_epf_ctrl, poll_sqs.work);
struct nvmet_pci_epf_queue *sq;
+ unsigned long limit = jiffies;
unsigned long last = 0;
int i, nr_sqs;
@@ -1708,6 +1709,16 @@ static void nvmet_pci_epf_poll_sqs_work(struct work_struct *work)
nr_sqs++;
}
+ /*
+ * If we have been running for a while, reschedule to let other
+ * tasks run and to avoid RCU stalls.
+ */
+ if (time_is_before_jiffies(limit + secs_to_jiffies(1))) {
+ cond_resched();
+ limit = jiffies;
+ continue;
+ }
+
if (nr_sqs) {
last = jiffies;
continue;
@@ -1822,14 +1833,14 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
if (ctrl->io_sqes < sizeof(struct nvme_command)) {
dev_err(ctrl->dev, "Unsupported I/O SQES %zu (need %zu)\n",
ctrl->io_sqes, sizeof(struct nvme_command));
- return -EINVAL;
+ goto err;
}
ctrl->io_cqes = 1UL << nvmet_cc_iocqes(ctrl->cc);
if (ctrl->io_cqes < sizeof(struct nvme_completion)) {
dev_err(ctrl->dev, "Unsupported I/O CQES %zu (need %zu)\n",
ctrl->io_sqes, sizeof(struct nvme_completion));
- return -EINVAL;
+ goto err;
}
/* Create the admin queue. */
@@ -1844,7 +1855,7 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
qsize, pci_addr, 0);
if (status != NVME_SC_SUCCESS) {
dev_err(ctrl->dev, "Failed to create admin completion queue\n");
- return -EINVAL;
+ goto err;
}
qsize = aqa & 0x00000fff;
@@ -1854,17 +1865,22 @@ static int nvmet_pci_epf_enable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
if (status != NVME_SC_SUCCESS) {
dev_err(ctrl->dev, "Failed to create admin submission queue\n");
nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
- return -EINVAL;
+ goto err;
}
ctrl->sq_ab = NVMET_PCI_EPF_SQ_AB;
ctrl->irq_vector_threshold = NVMET_PCI_EPF_IV_THRESHOLD;
ctrl->enabled = true;
+ ctrl->csts = NVME_CSTS_RDY;
/* Start polling the controller SQs. */
schedule_delayed_work(&ctrl->poll_sqs, 0);
return 0;
+
+err:
+ ctrl->csts = 0;
+ return -EINVAL;
}
static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
@@ -1889,6 +1905,8 @@ static void nvmet_pci_epf_disable_ctrl(struct nvmet_pci_epf_ctrl *ctrl)
/* Delete the admin queue last. */
nvmet_pci_epf_delete_sq(ctrl->tctrl, 0);
nvmet_pci_epf_delete_cq(ctrl->tctrl, 0);
+
+ ctrl->csts &= ~NVME_CSTS_RDY;
}
static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
@@ -1903,19 +1921,19 @@ static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
old_cc = ctrl->cc;
new_cc = nvmet_pci_epf_bar_read32(ctrl, NVME_REG_CC);
+ if (new_cc == old_cc)
+ goto reschedule_work;
+
ctrl->cc = new_cc;
if (nvmet_cc_en(new_cc) && !nvmet_cc_en(old_cc)) {
ret = nvmet_pci_epf_enable_ctrl(ctrl);
if (ret)
- return;
- ctrl->csts |= NVME_CSTS_RDY;
+ goto reschedule_work;
}
- if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc)) {
+ if (!nvmet_cc_en(new_cc) && nvmet_cc_en(old_cc))
nvmet_pci_epf_disable_ctrl(ctrl);
- ctrl->csts &= ~NVME_CSTS_RDY;
- }
if (nvmet_cc_shn(new_cc) && !nvmet_cc_shn(old_cc)) {
nvmet_pci_epf_disable_ctrl(ctrl);
@@ -1928,6 +1946,7 @@ static void nvmet_pci_epf_poll_cc_work(struct work_struct *work)
nvmet_update_cc(ctrl->tctrl, ctrl->cc);
nvmet_pci_epf_bar_write32(ctrl, NVME_REG_CSTS, ctrl->csts);
+reschedule_work:
schedule_delayed_work(&ctrl->poll_cc, NVMET_PCI_EPF_CC_POLL_INTERVAL);
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 1afd93026f9b..2a4536ef6184 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -996,6 +996,27 @@ out_err:
nvmet_req_complete(&cmd->req, status);
}
+static bool nvmet_rdma_recv_not_live(struct nvmet_rdma_queue *queue,
+ struct nvmet_rdma_rsp *rsp)
+{
+ unsigned long flags;
+ bool ret = true;
+
+ spin_lock_irqsave(&queue->state_lock, flags);
+ /*
+ * recheck queue state is not live to prevent a race condition
+ * with RDMA_CM_EVENT_ESTABLISHED handler.
+ */
+ if (queue->state == NVMET_RDMA_Q_LIVE)
+ ret = false;
+ else if (queue->state == NVMET_RDMA_Q_CONNECTING)
+ list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
+ else
+ nvmet_rdma_put_rsp(rsp);
+ spin_unlock_irqrestore(&queue->state_lock, flags);
+ return ret;
+}
+
static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
struct nvmet_rdma_cmd *cmd =
@@ -1038,17 +1059,9 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
rsp->n_rdma = 0;
rsp->invalidate_rkey = 0;
- if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) {
- unsigned long flags;
-
- spin_lock_irqsave(&queue->state_lock, flags);
- if (queue->state == NVMET_RDMA_Q_CONNECTING)
- list_add_tail(&rsp->wait_list, &queue->rsp_wait_list);
- else
- nvmet_rdma_put_rsp(rsp);
- spin_unlock_irqrestore(&queue->state_lock, flags);
+ if (unlikely(queue->state != NVMET_RDMA_Q_LIVE) &&
+ nvmet_rdma_recv_not_live(queue, rsp))
return;
- }
nvmet_rdma_handle_command(queue, rsp);
}
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 7c51c2a8c109..4f9cac8a5abe 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -571,10 +571,16 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
struct nvmet_tcp_cmd *cmd =
container_of(req, struct nvmet_tcp_cmd, req);
struct nvmet_tcp_queue *queue = cmd->queue;
+ enum nvmet_tcp_recv_state queue_state;
+ struct nvmet_tcp_cmd *queue_cmd;
struct nvme_sgl_desc *sgl;
u32 len;
- if (unlikely(cmd == queue->cmd)) {
+ /* Pairs with store_release in nvmet_prepare_receive_pdu() */
+ queue_state = smp_load_acquire(&queue->rcv_state);
+ queue_cmd = READ_ONCE(queue->cmd);
+
+ if (unlikely(cmd == queue_cmd)) {
sgl = &cmd->req.cmd->common.dptr.sgl;
len = le32_to_cpu(sgl->length);
@@ -583,7 +589,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req)
* Avoid using helpers, this might happen before
* nvmet_req_init is completed.
*/
- if (queue->rcv_state == NVMET_TCP_RECV_PDU &&
+ if (queue_state == NVMET_TCP_RECV_PDU &&
len && len <= cmd->req.port->inline_data_size &&
nvme_is_write(cmd->req.cmd))
return;
@@ -847,8 +853,9 @@ static void nvmet_prepare_receive_pdu(struct nvmet_tcp_queue *queue)
{
queue->offset = 0;
queue->left = sizeof(struct nvme_tcp_hdr);
- queue->cmd = NULL;
- queue->rcv_state = NVMET_TCP_RECV_PDU;
+ WRITE_ONCE(queue->cmd, NULL);
+ /* Ensure rcv_state is visible only after queue->cmd is set */
+ smp_store_release(&queue->rcv_state, NVMET_TCP_RECV_PDU);
}
static void nvmet_tcp_free_crypto(struct nvmet_tcp_queue *queue)
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 125833e5ce52..d177a2b9edaf 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -16,6 +16,8 @@
#include <linux/string.h>
#include <linux/dma-direct.h> /* for bus_dma_region */
+#include <kunit/visibility.h>
+
/* Uncomment me to enable of_dump_addr() debugging output */
// #define DEBUG
@@ -183,7 +185,7 @@ static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
#endif /* CONFIG_PCI */
-static int __of_address_resource_bounds(struct resource *r, u64 start, u64 size)
+VISIBLE_IF_KUNIT int __of_address_resource_bounds(struct resource *r, u64 start, u64 size)
{
if (overflows_type(start, r->start))
return -EOVERFLOW;
@@ -197,6 +199,7 @@ static int __of_address_resource_bounds(struct resource *r, u64 start, u64 size)
return 0;
}
+EXPORT_SYMBOL_IF_KUNIT(__of_address_resource_bounds);
/*
* of_pci_range_to_resource - Create a resource from an of_pci_range
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index f3e1193c8ded..1bdc7ceef3c5 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -208,4 +208,8 @@ static void __maybe_unused of_dump_addr(const char *s, const __be32 *addr, int n
static void __maybe_unused of_dump_addr(const char *s, const __be32 *addr, int na) { }
#endif
+#if IS_ENABLED(CONFIG_KUNIT)
+int __of_address_resource_bounds(struct resource *r, u64 start, u64 size);
+#endif
+
#endif /* _LINUX_OF_PRIVATE_H */
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 75e819f66a56..ee2e31522d7e 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -415,12 +415,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam
prop = of_get_flat_dt_prop(node, "alignment", &len);
if (prop) {
- if (len != dt_root_size_cells * sizeof(__be32)) {
+ if (len != dt_root_addr_cells * sizeof(__be32)) {
pr_err("invalid alignment property in '%s' node.\n",
uname);
return -EINVAL;
}
- align = dt_mem_next_cell(dt_root_size_cells, &prop);
+ align = dt_mem_next_cell(dt_root_addr_cells, &prop);
}
nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
diff --git a/drivers/of/of_test.c b/drivers/of/of_test.c
index b0557ded838f..8bba5a72c9c7 100644
--- a/drivers/of/of_test.c
+++ b/drivers/of/of_test.c
@@ -2,6 +2,7 @@
/*
* KUnit tests for OF APIs
*/
+#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -54,8 +55,124 @@ static struct kunit_suite of_dtb_suite = {
.init = of_dtb_test_init,
};
+struct of_address_resource_bounds_case {
+ u64 start;
+ u64 size;
+ int ret;
+
+ u64 res_start;
+ u64 res_end;
+};
+
+static void of_address_resource_bounds_case_desc(const struct of_address_resource_bounds_case *p,
+ char *name)
+{
+ snprintf(name, KUNIT_PARAM_DESC_SIZE, "start=0x%016llx,size=0x%016llx", p->start, p->size);
+}
+
+static const struct of_address_resource_bounds_case of_address_resource_bounds_cases[] = {
+ {
+ .start = 0,
+ .size = 0,
+ .ret = 0,
+ .res_start = 0,
+ .res_end = -1,
+ },
+ {
+ .start = 0,
+ .size = 0x1000,
+ .ret = 0,
+ .res_start = 0,
+ .res_end = 0xfff,
+ },
+ {
+ .start = 0x1000,
+ .size = 0,
+ .ret = 0,
+ .res_start = 0x1000,
+ .res_end = 0xfff,
+ },
+ {
+ .start = 0x1000,
+ .size = 0x1000,
+ .ret = 0,
+ .res_start = 0x1000,
+ .res_end = 0x1fff,
+ },
+ {
+ .start = 1,
+ .size = RESOURCE_SIZE_MAX,
+ .ret = 0,
+ .res_start = 1,
+ .res_end = RESOURCE_SIZE_MAX,
+ },
+ {
+ .start = RESOURCE_SIZE_MAX,
+ .size = 1,
+ .ret = 0,
+ .res_start = RESOURCE_SIZE_MAX,
+ .res_end = RESOURCE_SIZE_MAX,
+ },
+ {
+ .start = 2,
+ .size = RESOURCE_SIZE_MAX,
+ .ret = -EOVERFLOW,
+ },
+ {
+ .start = RESOURCE_SIZE_MAX,
+ .size = 2,
+ .ret = -EOVERFLOW,
+ },
+ {
+ .start = ULL(0x100000000),
+ .size = 1,
+ .ret = sizeof(resource_size_t) > sizeof(u32) ? 0 : -EOVERFLOW,
+ .res_start = ULL(0x100000000),
+ .res_end = ULL(0x100000000),
+ },
+ {
+ .start = 0x1000,
+ .size = 0xffffffff,
+ .ret = sizeof(resource_size_t) > sizeof(u32) ? 0 : -EOVERFLOW,
+ .res_start = 0x1000,
+ .res_end = ULL(0x100000ffe),
+ },
+};
+
+KUNIT_ARRAY_PARAM(of_address_resource_bounds,
+ of_address_resource_bounds_cases, of_address_resource_bounds_case_desc);
+
+static void of_address_resource_bounds(struct kunit *test)
+{
+ const struct of_address_resource_bounds_case *param = test->param_value;
+ struct resource r; /* Intentionally uninitialized */
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_OF_ADDRESS))
+ kunit_skip(test, "CONFIG_OF_ADDRESS not enabled\n");
+
+ ret = __of_address_resource_bounds(&r, param->start, param->size);
+ KUNIT_EXPECT_EQ(test, param->ret, ret);
+ if (ret == 0) {
+ KUNIT_EXPECT_EQ(test, (resource_size_t)param->res_start, r.start);
+ KUNIT_EXPECT_EQ(test, (resource_size_t)param->res_end, r.end);
+ KUNIT_EXPECT_EQ(test, param->size, resource_size(&r));
+ }
+}
+
+static struct kunit_case of_address_test_cases[] = {
+ KUNIT_CASE_PARAM(of_address_resource_bounds, of_address_resource_bounds_gen_params),
+ {}
+};
+
+static struct kunit_suite of_address_suite = {
+ .name = "of_address",
+ .test_cases = of_address_test_cases,
+};
+
kunit_test_suites(
- &of_dtb_suite,
+ &of_dtb_suite, &of_address_suite,
);
MODULE_DESCRIPTION("KUnit tests for OF APIs");
+MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING");
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index e0bc90597dca..da3e7edcf49d 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -108,9 +108,6 @@ void pci_save_aspm_l1ss_state(struct pci_dev *pdev)
pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL2, cap++);
pci_read_config_dword(pdev, pdev->l1ss + PCI_L1SS_CTL1, cap++);
- if (parent->state_saved)
- return;
-
/*
* Save parent's L1 substate configuration so we have it for
* pci_restore_aspm_l1ss_state(pdev) to restore.
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index b6536ed599c3..246744d8d268 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -339,13 +339,14 @@ out:
return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
}
-static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
+static __always_inline void pci_read_bases(struct pci_dev *dev,
+ unsigned int howmany, int rom)
{
u32 rombar, stdbars[PCI_STD_NUM_BARS];
unsigned int pos, reg;
u16 orig_cmd;
- BUILD_BUG_ON(howmany > PCI_STD_NUM_BARS);
+ BUILD_BUG_ON(statically_true(howmany > PCI_STD_NUM_BARS));
if (dev->non_compliant_bars)
return;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index b84ff7bade82..82b21e34c545 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -5522,7 +5522,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x443, quirk_intel_qat_vf_cap);
* AMD Matisse USB 3.0 Host Controller 0x149c
* Intel 82579LM Gigabit Ethernet Controller 0x1502
* Intel 82579V Gigabit Ethernet Controller 0x1503
- *
+ * Mediatek MT7922 802.11ax PCI Express Wireless Network Adapter
*/
static void quirk_no_flr(struct pci_dev *dev)
{
@@ -5534,6 +5534,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x149c, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x7901, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1502, quirk_no_flr);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x1503, quirk_no_flr);
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_MEDIATEK, 0x0616, quirk_no_flr);
/* FLR may cause the SolidRun SNET DPU (rev 0x1) to hang */
static void quirk_no_flr_snet(struct pci_dev *dev)
diff --git a/drivers/pci/tph.c b/drivers/pci/tph.c
index 1e604fbbda65..07de59ca2ebf 100644
--- a/drivers/pci/tph.c
+++ b/drivers/pci/tph.c
@@ -360,7 +360,7 @@ int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag)
return err;
}
- set_ctrl_reg_req_en(pdev, pdev->tph_mode);
+ set_ctrl_reg_req_en(pdev, pdev->tph_req_type);
pci_dbg(pdev, "set steering tag: %s table, index=%d, tag=%#04x\n",
(loc == PCI_TPH_LOC_MSIX) ? "MSI-X" : "ST", index, tag);
diff --git a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
index 45004f598e4d..e4c0a82d16d9 100644
--- a/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
+++ b/drivers/phy/freescale/phy-fsl-samsung-hdmi.c
@@ -325,7 +325,7 @@ to_fsl_samsung_hdmi_phy(struct clk_hw *hw)
return container_of(hw, struct fsl_samsung_hdmi_phy, hw);
}
-static void
+static int
fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy,
const struct phy_config *cfg)
{
@@ -341,6 +341,9 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy,
break;
}
+ if (unlikely(div == 4))
+ return -EINVAL;
+
writeb(FIELD_PREP(REG12_CK_DIV_MASK, div), phy->regs + PHY_REG(12));
/*
@@ -364,6 +367,8 @@ fsl_samsung_hdmi_phy_configure_pll_lock_det(struct fsl_samsung_hdmi_phy *phy,
FIELD_PREP(REG14_RP_CODE_MASK, 2) |
FIELD_PREP(REG14_TG_CODE_HIGH_MASK, fld_tg_code >> 8),
phy->regs + PHY_REG(14));
+
+ return 0;
}
static unsigned long fsl_samsung_hdmi_phy_find_pms(unsigned long fout, u8 *p, u16 *m, u8 *s)
@@ -466,7 +471,11 @@ static int fsl_samsung_hdmi_phy_configure(struct fsl_samsung_hdmi_phy *phy,
writeb(REG21_SEL_TX_CK_INV | FIELD_PREP(REG21_PMS_S_MASK,
cfg->pll_div_regs[2] >> 4), phy->regs + PHY_REG(21));
- fsl_samsung_hdmi_phy_configure_pll_lock_det(phy, cfg);
+ ret = fsl_samsung_hdmi_phy_configure_pll_lock_det(phy, cfg);
+ if (ret) {
+ dev_err(phy->dev, "pixclock too large\n");
+ return ret;
+ }
writeb(REG33_FIX_DA | REG33_MODE_SET_DONE, phy->regs + PHY_REG(33));
diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig
index 2f7a05f21dc5..dcb8e1628632 100644
--- a/drivers/phy/rockchip/Kconfig
+++ b/drivers/phy/rockchip/Kconfig
@@ -125,6 +125,7 @@ config PHY_ROCKCHIP_USBDP
depends on ARCH_ROCKCHIP && OF
depends on TYPEC
select GENERIC_PHY
+ select USB_COMMON
help
Enable this to support the Rockchip USB3.0/DP combo PHY with
Samsung IP block. This is required for USB3 support on RK3588.
diff --git a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
index a1532ef8bbe9..8c3ce57f8915 100644
--- a/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-naneng-combphy.c
@@ -324,7 +324,10 @@ static int rockchip_combphy_parse_dt(struct device *dev, struct rockchip_combphy
priv->ext_refclk = device_property_present(dev, "rockchip,ext-refclk");
- priv->phy_rst = devm_reset_control_get(dev, "phy");
+ priv->phy_rst = devm_reset_control_get_exclusive(dev, "phy");
+ /* fallback to old behaviour */
+ if (PTR_ERR(priv->phy_rst) == -ENOENT)
+ priv->phy_rst = devm_reset_control_array_get_exclusive(dev);
if (IS_ERR(priv->phy_rst))
return dev_err_probe(dev, PTR_ERR(priv->phy_rst), "failed to get phy reset\n");
diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c
index c421b495eb0f..46b8f6987c62 100644
--- a/drivers/phy/samsung/phy-exynos5-usbdrd.c
+++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c
@@ -488,9 +488,9 @@ exynos5_usbdrd_pipe3_set_refclk(struct phy_usb_instance *inst)
reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
/* FSEL settings corresponding to reference clock */
- reg &= ~PHYCLKRST_FSEL_PIPE_MASK |
- PHYCLKRST_MPLL_MULTIPLIER_MASK |
- PHYCLKRST_SSC_REFCLKSEL_MASK;
+ reg &= ~(PHYCLKRST_FSEL_PIPE_MASK |
+ PHYCLKRST_MPLL_MULTIPLIER_MASK |
+ PHYCLKRST_SSC_REFCLKSEL_MASK);
switch (phy_drd->extrefclk) {
case EXYNOS5_FSEL_50MHZ:
reg |= (PHYCLKRST_MPLL_MULTIPLIER_50M_REF |
@@ -532,9 +532,9 @@ exynos5_usbdrd_utmi_set_refclk(struct phy_usb_instance *inst)
reg &= ~PHYCLKRST_REFCLKSEL_MASK;
reg |= PHYCLKRST_REFCLKSEL_EXT_REFCLK;
- reg &= ~PHYCLKRST_FSEL_UTMI_MASK |
- PHYCLKRST_MPLL_MULTIPLIER_MASK |
- PHYCLKRST_SSC_REFCLKSEL_MASK;
+ reg &= ~(PHYCLKRST_FSEL_UTMI_MASK |
+ PHYCLKRST_MPLL_MULTIPLIER_MASK |
+ PHYCLKRST_SSC_REFCLKSEL_MASK);
reg |= PHYCLKRST_FSEL(phy_drd->extrefclk);
return reg;
@@ -1296,14 +1296,17 @@ static int exynos5_usbdrd_gs101_phy_exit(struct phy *phy)
struct exynos5_usbdrd_phy *phy_drd = to_usbdrd_phy(inst);
int ret;
+ if (inst->phy_cfg->id == EXYNOS5_DRDPHY_UTMI) {
+ ret = exynos850_usbdrd_phy_exit(phy);
+ if (ret)
+ return ret;
+ }
+
+ exynos5_usbdrd_phy_isol(inst, true);
+
if (inst->phy_cfg->id != EXYNOS5_DRDPHY_UTMI)
return 0;
- ret = exynos850_usbdrd_phy_exit(phy);
- if (ret)
- return ret;
-
- exynos5_usbdrd_phy_isol(inst, true);
return regulator_bulk_disable(phy_drd->drv_data->n_regulators,
phy_drd->regulators);
}
diff --git a/drivers/phy/st/phy-stm32-combophy.c b/drivers/phy/st/phy-stm32-combophy.c
index 49e9fa90a681..607b4d607eb5 100644
--- a/drivers/phy/st/phy-stm32-combophy.c
+++ b/drivers/phy/st/phy-stm32-combophy.c
@@ -111,6 +111,7 @@ static const struct clk_impedance imp_lookup[] = {
{ 4204000, { 511000, 609000, 706000, 802000 } },
{ 3999000, { 571000, 648000, 726000, 803000 } }
};
+#define DEFAULT_IMP_INDEX 3 /* Default impedance is 50 Ohm */
static int stm32_impedance_tune(struct stm32_combophy *combophy)
{
@@ -119,10 +120,9 @@ static int stm32_impedance_tune(struct stm32_combophy *combophy)
u8 imp_of, vswing_of;
u32 max_imp = imp_lookup[0].microohm;
u32 min_imp = imp_lookup[imp_size - 1].microohm;
- u32 max_vswing = imp_lookup[imp_size - 1].vswing[vswing_size - 1];
+ u32 max_vswing;
u32 min_vswing = imp_lookup[0].vswing[0];
u32 val;
- u32 regval;
if (!of_property_read_u32(combophy->dev->of_node, "st,output-micro-ohms", &val)) {
if (val < min_imp || val > max_imp) {
@@ -130,45 +130,43 @@ static int stm32_impedance_tune(struct stm32_combophy *combophy)
return -EINVAL;
}
- regval = 0;
- for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++) {
- if (imp_lookup[imp_of].microohm <= val) {
- regval = FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of);
+ for (imp_of = 0; imp_of < ARRAY_SIZE(imp_lookup); imp_of++)
+ if (imp_lookup[imp_of].microohm <= val)
break;
- }
- }
+
+ if (WARN_ON(imp_of == ARRAY_SIZE(imp_lookup)))
+ return -EINVAL;
dev_dbg(combophy->dev, "Set %u micro-ohms output impedance\n",
imp_lookup[imp_of].microohm);
regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR,
STM32MP25_PCIEPRG_IMPCTRL_OHM,
- regval);
- } else {
- regmap_read(combophy->regmap, SYSCFG_PCIEPRGCR, &val);
- imp_of = FIELD_GET(STM32MP25_PCIEPRG_IMPCTRL_OHM, val);
- }
+ FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_OHM, imp_of));
+ } else
+ imp_of = DEFAULT_IMP_INDEX;
if (!of_property_read_u32(combophy->dev->of_node, "st,output-vswing-microvolt", &val)) {
+ max_vswing = imp_lookup[imp_of].vswing[vswing_size - 1];
+
if (val < min_vswing || val > max_vswing) {
dev_err(combophy->dev, "Invalid value %u for output vswing\n", val);
return -EINVAL;
}
- regval = 0;
- for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++) {
- if (imp_lookup[imp_of].vswing[vswing_of] >= val) {
- regval = FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of);
+ for (vswing_of = 0; vswing_of < ARRAY_SIZE(imp_lookup[imp_of].vswing); vswing_of++)
+ if (imp_lookup[imp_of].vswing[vswing_of] >= val)
break;
- }
- }
+
+ if (WARN_ON(vswing_of == ARRAY_SIZE(imp_lookup[imp_of].vswing)))
+ return -EINVAL;
dev_dbg(combophy->dev, "Set %u microvolt swing\n",
imp_lookup[imp_of].vswing[vswing_of]);
regmap_update_bits(combophy->regmap, SYSCFG_PCIEPRGCR,
STM32MP25_PCIEPRG_IMPCTRL_VSWING,
- regval);
+ FIELD_PREP(STM32MP25_PCIEPRG_IMPCTRL_VSWING, vswing_of));
}
return 0;
diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c
index 0f60d5d1c167..fae6242aa730 100644
--- a/drivers/phy/tegra/xusb-tegra186.c
+++ b/drivers/phy/tegra/xusb-tegra186.c
@@ -928,6 +928,7 @@ static int tegra186_utmi_phy_init(struct phy *phy)
unsigned int index = lane->index;
struct device *dev = padctl->dev;
int err;
+ u32 reg;
port = tegra_xusb_find_usb2_port(padctl, index);
if (!port) {
@@ -935,6 +936,16 @@ static int tegra186_utmi_phy_init(struct phy *phy)
return -ENODEV;
}
+ if (port->mode == USB_DR_MODE_OTG ||
+ port->mode == USB_DR_MODE_PERIPHERAL) {
+ /* reset VBUS&ID OVERRIDE */
+ reg = padctl_readl(padctl, USB2_VBUS_ID);
+ reg &= ~VBUS_OVERRIDE;
+ reg &= ~ID_OVERRIDE(~0);
+ reg |= ID_OVERRIDE_FLOATING;
+ padctl_writel(padctl, reg, USB2_VBUS_ID);
+ }
+
if (port->supply && port->mode == USB_DR_MODE_HOST) {
err = regulator_enable(port->supply);
if (err) {
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index e0ca59ae3153..ff5d5e29629f 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -424,6 +424,12 @@ static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv)
return 0;
}
+static const struct regmap_config phy_gmii_sel_regmap_cfg = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+};
+
static int phy_gmii_sel_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -468,7 +474,14 @@ static int phy_gmii_sel_probe(struct platform_device *pdev)
priv->regmap = syscon_node_to_regmap(node->parent);
if (IS_ERR(priv->regmap)) {
- priv->regmap = device_node_to_regmap(node);
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return dev_err_probe(dev, PTR_ERR(base),
+ "failed to get base memory resource\n");
+
+ priv->regmap = regmap_init_mmio(dev, base, &phy_gmii_sel_regmap_cfg);
if (IS_ERR(priv->regmap))
return dev_err_probe(dev, PTR_ERR(priv->regmap),
"Failed to get syscon\n");
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 0b13d7f17b32..42547f64453e 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -89,12 +89,12 @@ static void pinconf_generic_dump_one(struct pinctrl_dev *pctldev,
seq_puts(s, items[i].display);
/* Print unit if available */
if (items[i].has_arg) {
- seq_printf(s, " (0x%x",
- pinconf_to_config_argument(config));
+ u32 val = pinconf_to_config_argument(config);
+
if (items[i].format)
- seq_printf(s, " %s)", items[i].format);
+ seq_printf(s, " (%u %s)", val, items[i].format);
else
- seq_puts(s, ")");
+ seq_printf(s, " (0x%x)", val);
}
}
}
diff --git a/drivers/pinctrl/pinctrl-cy8c95x0.c b/drivers/pinctrl/pinctrl-cy8c95x0.c
index 0d6c2027d4c1..d73004b4a45e 100644
--- a/drivers/pinctrl/pinctrl-cy8c95x0.c
+++ b/drivers/pinctrl/pinctrl-cy8c95x0.c
@@ -42,7 +42,7 @@
#define CY8C95X0_PORTSEL 0x18
/* Port settings, write PORTSEL first */
#define CY8C95X0_INTMASK 0x19
-#define CY8C95X0_PWMSEL 0x1A
+#define CY8C95X0_SELPWM 0x1A
#define CY8C95X0_INVERT 0x1B
#define CY8C95X0_DIRECTION 0x1C
/* Drive mode register change state on writing '1' */
@@ -328,14 +328,14 @@ static int cypress_get_pin_mask(struct cy8c95x0_pinctrl *chip, unsigned int pin)
static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
{
/*
- * Only 12 registers are present per port (see Table 6 in the
- * datasheet).
+ * Only 12 registers are present per port (see Table 6 in the datasheet).
*/
- if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) < 12)
- return true;
+ if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) >= 12)
+ return false;
switch (reg) {
case 0x24 ... 0x27:
+ case 0x31 ... 0x3f:
return false;
default:
return true;
@@ -344,8 +344,11 @@ static bool cy8c95x0_readable_register(struct device *dev, unsigned int reg)
static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
{
- if (reg >= CY8C95X0_VIRTUAL)
- return true;
+ /*
+ * Only 12 registers are present per port (see Table 6 in the datasheet).
+ */
+ if (reg >= CY8C95X0_VIRTUAL && (reg % MUXED_STRIDE) >= 12)
+ return false;
switch (reg) {
case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
@@ -353,6 +356,7 @@ static bool cy8c95x0_writeable_register(struct device *dev, unsigned int reg)
case CY8C95X0_DEVID:
return false;
case 0x24 ... 0x27:
+ case 0x31 ... 0x3f:
return false;
default:
return true;
@@ -365,8 +369,8 @@ static bool cy8c95x0_volatile_register(struct device *dev, unsigned int reg)
case CY8C95X0_INPUT_(0) ... CY8C95X0_INPUT_(7):
case CY8C95X0_INTSTATUS_(0) ... CY8C95X0_INTSTATUS_(7):
case CY8C95X0_INTMASK:
+ case CY8C95X0_SELPWM:
case CY8C95X0_INVERT:
- case CY8C95X0_PWMSEL:
case CY8C95X0_DIRECTION:
case CY8C95X0_DRV_PU:
case CY8C95X0_DRV_PD:
@@ -395,7 +399,7 @@ static bool cy8c95x0_muxed_register(unsigned int reg)
{
switch (reg) {
case CY8C95X0_INTMASK:
- case CY8C95X0_PWMSEL:
+ case CY8C95X0_SELPWM:
case CY8C95X0_INVERT:
case CY8C95X0_DIRECTION:
case CY8C95X0_DRV_PU:
@@ -466,7 +470,11 @@ static const struct regmap_config cy8c9520_i2c_regmap = {
.max_register = 0, /* Updated at runtime */
.num_reg_defaults_raw = 0, /* Updated at runtime */
.use_single_read = true, /* Workaround for regcache bug */
+#if IS_ENABLED(CONFIG_DEBUG_PINCTRL)
+ .disable_locking = false,
+#else
.disable_locking = true,
+#endif
};
static inline int cy8c95x0_regmap_update_bits_base(struct cy8c95x0_pinctrl *chip,
@@ -789,7 +797,7 @@ static int cy8c95x0_gpio_get_pincfg(struct cy8c95x0_pinctrl *chip,
reg = CY8C95X0_DIRECTION;
break;
case PIN_CONFIG_MODE_PWM:
- reg = CY8C95X0_PWMSEL;
+ reg = CY8C95X0_SELPWM;
break;
case PIN_CONFIG_OUTPUT:
reg = CY8C95X0_OUTPUT;
@@ -868,7 +876,7 @@ static int cy8c95x0_gpio_set_pincfg(struct cy8c95x0_pinctrl *chip,
reg = CY8C95X0_DRV_PP_FAST;
break;
case PIN_CONFIG_MODE_PWM:
- reg = CY8C95X0_PWMSEL;
+ reg = CY8C95X0_SELPWM;
break;
case PIN_CONFIG_OUTPUT_ENABLE:
return cy8c95x0_pinmux_direction(chip, off, !arg);
@@ -1153,7 +1161,7 @@ static void cy8c95x0_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *
bitmap_zero(mask, MAX_LINE);
__set_bit(pin, mask);
- if (cy8c95x0_read_regs_mask(chip, CY8C95X0_PWMSEL, pwm, mask)) {
+ if (cy8c95x0_read_regs_mask(chip, CY8C95X0_SELPWM, pwm, mask)) {
seq_puts(s, "not available");
return;
}
@@ -1198,7 +1206,7 @@ static int cy8c95x0_set_mode(struct cy8c95x0_pinctrl *chip, unsigned int off, bo
u8 port = cypress_get_port(chip, off);
u8 bit = cypress_get_pin_mask(chip, off);
- return cy8c95x0_regmap_write_bits(chip, CY8C95X0_PWMSEL, port, bit, mode ? bit : 0);
+ return cy8c95x0_regmap_write_bits(chip, CY8C95X0_SELPWM, port, bit, mode ? bit : 0);
}
static int cy8c95x0_pinmux_mode(struct cy8c95x0_pinctrl *chip,
@@ -1347,7 +1355,7 @@ static int cy8c95x0_irq_setup(struct cy8c95x0_pinctrl *chip, int irq)
ret = devm_request_threaded_irq(chip->dev, irq,
NULL, cy8c95x0_irq_handler,
- IRQF_ONESHOT | IRQF_SHARED | IRQF_TRIGGER_HIGH,
+ IRQF_ONESHOT | IRQF_SHARED,
dev_name(chip->dev), chip);
if (ret) {
dev_err(chip->dev, "failed to request irq %d\n", irq);
@@ -1438,15 +1446,15 @@ static int cy8c95x0_probe(struct i2c_client *client)
switch (chip->tpin) {
case 20:
strscpy(chip->name, cy8c95x0_id[0].name);
- regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 3 * MUXED_STRIDE;
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 3 * MUXED_STRIDE - 1;
break;
case 40:
strscpy(chip->name, cy8c95x0_id[1].name);
- regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 6 * MUXED_STRIDE;
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 6 * MUXED_STRIDE - 1;
break;
case 60:
strscpy(chip->name, cy8c95x0_id[2].name);
- regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 8 * MUXED_STRIDE;
+ regmap_range_conf.range_max = CY8C95X0_VIRTUAL + 8 * MUXED_STRIDE - 1;
break;
default:
return -ENODEV;
diff --git a/drivers/platform/cznic/Kconfig b/drivers/platform/cznic/Kconfig
index 49c383eb6785..13e37b49d9d0 100644
--- a/drivers/platform/cznic/Kconfig
+++ b/drivers/platform/cznic/Kconfig
@@ -6,6 +6,7 @@
menuconfig CZNIC_PLATFORMS
bool "Platform support for CZ.NIC's Turris hardware"
+ depends on ARCH_MVEBU || COMPILE_TEST
help
Say Y here to be able to choose driver support for CZ.NIC's Turris
devices. This option alone does not add any kernel code.
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index 764cc1fe90ae..a2cb2d5544f5 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -452,6 +452,7 @@ static int amd_pmf_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
mutex_init(&dev->update_mutex);
+ mutex_init(&dev->cb_mutex);
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
@@ -477,6 +478,7 @@ static void amd_pmf_remove(struct platform_device *pdev)
amd_pmf_dbgfs_unregister(dev);
mutex_destroy(&dev->lock);
mutex_destroy(&dev->update_mutex);
+ mutex_destroy(&dev->cb_mutex);
kfree(dev->buf);
}
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index 41b2b91b8fdc..e6bdee68ccf3 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -106,9 +106,12 @@ struct cookie_header {
#define PMF_TA_IF_VERSION_MAJOR 1
#define TA_PMF_ACTION_MAX 32
#define TA_PMF_UNDO_MAX 8
-#define TA_OUTPUT_RESERVED_MEM 906
+#define TA_OUTPUT_RESERVED_MEM 922
#define MAX_OPERATION_PARAMS 4
+#define TA_ERROR_CRYPTO_INVALID_PARAM 0x20002
+#define TA_ERROR_CRYPTO_BIN_TOO_LARGE 0x2000d
+
#define PMF_IF_V1 1
#define PMF_IF_V2 2
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index e6cf0b22dac3..d3083383f11f 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -297,12 +297,14 @@ int amd_pmf_get_pprof_modes(struct amd_pmf_dev *pmf)
switch (pmf->current_profile) {
case PLATFORM_PROFILE_PERFORMANCE:
+ case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
mode = POWER_MODE_PERFORMANCE;
break;
case PLATFORM_PROFILE_BALANCED:
mode = POWER_MODE_BALANCED_POWER;
break;
case PLATFORM_PROFILE_LOW_POWER:
+ case PLATFORM_PROFILE_QUIET:
mode = POWER_MODE_POWER_SAVER;
break;
default:
@@ -387,6 +389,14 @@ static int amd_pmf_profile_set(struct device *dev,
return 0;
}
+static int amd_pmf_hidden_choices(void *drvdata, unsigned long *choices)
+{
+ set_bit(PLATFORM_PROFILE_QUIET, choices);
+ set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+
+ return 0;
+}
+
static int amd_pmf_profile_probe(void *drvdata, unsigned long *choices)
{
set_bit(PLATFORM_PROFILE_LOW_POWER, choices);
@@ -398,6 +408,7 @@ static int amd_pmf_profile_probe(void *drvdata, unsigned long *choices)
static const struct platform_profile_ops amd_pmf_profile_ops = {
.probe = amd_pmf_profile_probe,
+ .hidden_choices = amd_pmf_hidden_choices,
.profile_get = amd_pmf_profile_get,
.profile_set = amd_pmf_profile_set,
};
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index 8c88769ea1d8..ceaff1ebb7b9 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -27,8 +27,11 @@ module_param(pb_side_load, bool, 0444);
MODULE_PARM_DESC(pb_side_load, "Sideload policy binaries debug policy failures");
#endif
-static const uuid_t amd_pmf_ta_uuid = UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d,
- 0xb1, 0x2d, 0xc5, 0x29, 0xb1, 0x3d, 0x85, 0x43);
+static const uuid_t amd_pmf_ta_uuid[] = { UUID_INIT(0xd9b39bf2, 0x66bd, 0x4154, 0xaf, 0xb8, 0x8a,
+ 0xcc, 0x2b, 0x2b, 0x60, 0xd6),
+ UUID_INIT(0x6fd93b77, 0x3fb8, 0x524d, 0xb1, 0x2d, 0xc5,
+ 0x29, 0xb1, 0x3d, 0x85, 0x43),
+ };
static const char *amd_pmf_uevent_as_str(unsigned int state)
{
@@ -321,9 +324,9 @@ static int amd_pmf_start_policy_engine(struct amd_pmf_dev *dev)
*/
schedule_delayed_work(&dev->pb_work, msecs_to_jiffies(pb_actions_ms * 3));
} else {
- dev_err(dev->dev, "ta invoke cmd init failed err: %x\n", res);
+ dev_dbg(dev->dev, "ta invoke cmd init failed err: %x\n", res);
dev->smart_pc_enabled = false;
- return -EIO;
+ return res;
}
return 0;
@@ -390,12 +393,12 @@ static int amd_pmf_amdtee_ta_match(struct tee_ioctl_version_data *ver, const voi
return ver->impl_id == TEE_IMPL_ID_AMDTEE;
}
-static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id)
+static int amd_pmf_ta_open_session(struct tee_context *ctx, u32 *id, const uuid_t *uuid)
{
struct tee_ioctl_open_session_arg sess_arg = {};
int rc;
- export_uuid(sess_arg.uuid, &amd_pmf_ta_uuid);
+ export_uuid(sess_arg.uuid, uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;
@@ -434,7 +437,7 @@ static int amd_pmf_register_input_device(struct amd_pmf_dev *dev)
return 0;
}
-static int amd_pmf_tee_init(struct amd_pmf_dev *dev)
+static int amd_pmf_tee_init(struct amd_pmf_dev *dev, const uuid_t *uuid)
{
u32 size;
int ret;
@@ -445,7 +448,7 @@ static int amd_pmf_tee_init(struct amd_pmf_dev *dev)
return PTR_ERR(dev->tee_ctx);
}
- ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id);
+ ret = amd_pmf_ta_open_session(dev->tee_ctx, &dev->session_id, uuid);
if (ret) {
dev_err(dev->dev, "Failed to open TA session (%d)\n", ret);
ret = -EINVAL;
@@ -489,7 +492,8 @@ static void amd_pmf_tee_deinit(struct amd_pmf_dev *dev)
int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
{
- int ret;
+ bool status;
+ int ret, i;
ret = apmf_check_smart_pc(dev);
if (ret) {
@@ -502,10 +506,6 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
return -ENODEV;
}
- ret = amd_pmf_tee_init(dev);
- if (ret)
- return ret;
-
INIT_DELAYED_WORK(&dev->pb_work, amd_pmf_invoke_cmd);
ret = amd_pmf_set_dram_addr(dev, true);
@@ -534,8 +534,30 @@ int amd_pmf_init_smart_pc(struct amd_pmf_dev *dev)
goto error;
}
- ret = amd_pmf_start_policy_engine(dev);
- if (ret)
+ for (i = 0; i < ARRAY_SIZE(amd_pmf_ta_uuid); i++) {
+ ret = amd_pmf_tee_init(dev, &amd_pmf_ta_uuid[i]);
+ if (ret)
+ return ret;
+
+ ret = amd_pmf_start_policy_engine(dev);
+ switch (ret) {
+ case TA_PMF_TYPE_SUCCESS:
+ status = true;
+ break;
+ case TA_ERROR_CRYPTO_INVALID_PARAM:
+ case TA_ERROR_CRYPTO_BIN_TOO_LARGE:
+ amd_pmf_tee_deinit(dev);
+ status = false;
+ break;
+ default:
+ goto error;
+ }
+
+ if (status)
+ break;
+ }
+
+ if (!status && !pb_side_load)
goto error;
if (pb_side_load)
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index dfb5d4b8c046..30bd366d7b58 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -1121,7 +1121,7 @@ static int ideapad_dytc_profile_init(struct ideapad_private *priv)
/* Create platform_profile structure and register */
priv->dytc->ppdev = devm_platform_profile_register(&priv->platform_device->dev,
- "ideapad-laptop", &priv->dytc,
+ "ideapad-laptop", priv->dytc,
&dytc_profile_ops);
if (IS_ERR(priv->dytc->ppdev)) {
err = PTR_ERR(priv->dytc->ppdev);
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 927a2993f616..88a1a9ff2f34 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -139,6 +139,13 @@ static const struct dmi_system_id button_array_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 3"),
},
},
+ {
+ .ident = "Microsoft Surface Go 4",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go 4"),
+ },
+ },
{ }
};
diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h
index 5c3c0dfa1bf8..f369fb0d3d82 100644
--- a/drivers/platform/x86/intel/ifs/ifs.h
+++ b/drivers/platform/x86/intel/ifs/ifs.h
@@ -23,12 +23,14 @@
* IFS Image
* ---------
*
- * Intel provides a firmware file containing the scan tests via
- * github [#f1]_. Similar to microcode there is a separate file for each
+ * Intel provides firmware files containing the scan tests via the webpage [#f1]_.
+ * Look under "In-Field Scan Test Images Download" section towards the
+ * end of the page. Similar to microcode, there are separate files for each
* family-model-stepping. IFS Images are not applicable for some test types.
* Wherever applicable the sysfs directory would provide a "current_batch" file
* (see below) for loading the image.
*
+ * .. [#f1] https://intel.com/InFieldScan
*
* IFS Image Loading
* -----------------
@@ -125,9 +127,6 @@
* 2) Hardware allows for some number of cores to be tested in parallel.
* The driver does not make use of this, it only tests one core at a time.
*
- * .. [#f1] https://github.com/intel/TBD
- *
- *
* Structural Based Functional Test at Field (SBAF):
* -------------------------------------------------
*
diff --git a/drivers/platform/x86/intel/int3472/discrete.c b/drivers/platform/x86/intel/int3472/discrete.c
index 31015ebe20d8..092252eb95a8 100644
--- a/drivers/platform/x86/intel/int3472/discrete.c
+++ b/drivers/platform/x86/intel/int3472/discrete.c
@@ -2,6 +2,7 @@
/* Author: Dan Scally <djrscally@gmail.com> */
#include <linux/acpi.h>
+#include <linux/array_size.h>
#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
@@ -55,7 +56,7 @@ static void skl_int3472_log_sensor_module_name(struct int3472_discrete_device *i
static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
struct acpi_resource_gpio *agpio,
- const char *func, u32 polarity)
+ const char *func, unsigned long gpio_flags)
{
char *path = agpio->resource_source.string_ptr;
struct acpi_device *adev;
@@ -70,14 +71,14 @@ static int skl_int3472_fill_gpiod_lookup(struct gpiod_lookup *table_entry,
if (!adev)
return -ENODEV;
- *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, polarity);
+ *table_entry = GPIO_LOOKUP(acpi_dev_name(adev), agpio->pin_table[0], func, gpio_flags);
return 0;
}
static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int3472,
struct acpi_resource_gpio *agpio,
- const char *func, u32 polarity)
+ const char *func, unsigned long gpio_flags)
{
int ret;
@@ -87,7 +88,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
}
ret = skl_int3472_fill_gpiod_lookup(&int3472->gpios.table[int3472->n_sensor_gpios],
- agpio, func, polarity);
+ agpio, func, gpio_flags);
if (ret)
return ret;
@@ -100,7 +101,7 @@ static int skl_int3472_map_gpio_to_sensor(struct int3472_discrete_device *int347
static struct gpio_desc *
skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
struct acpi_resource_gpio *agpio,
- const char *func, u32 polarity)
+ const char *func, unsigned long gpio_flags)
{
struct gpio_desc *desc;
int ret;
@@ -111,7 +112,7 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
return ERR_PTR(-ENOMEM);
lookup->dev_id = dev_name(int3472->dev);
- ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, polarity);
+ ret = skl_int3472_fill_gpiod_lookup(&lookup->table[0], agpio, func, gpio_flags);
if (ret)
return ERR_PTR(ret);
@@ -122,32 +123,76 @@ skl_int3472_gpiod_get_from_temp_lookup(struct int3472_discrete_device *int3472,
return desc;
}
-static void int3472_get_func_and_polarity(u8 type, const char **func, u32 *polarity)
+/**
+ * struct int3472_gpio_map - Map GPIOs to whatever is expected by the
+ * sensor driver (as in DT bindings)
+ * @hid: The ACPI HID of the device without the instance number e.g. INT347E
+ * @type_from: The GPIO type from ACPI ?SDT
+ * @type_to: The assigned GPIO type, typically same as @type_from
+ * @func: The function, e.g. "enable"
+ * @polarity_low: GPIO_ACTIVE_LOW true if the @polarity_low is true,
+ * GPIO_ACTIVE_HIGH otherwise
+ */
+struct int3472_gpio_map {
+ const char *hid;
+ u8 type_from;
+ u8 type_to;
+ bool polarity_low;
+ const char *func;
+};
+
+static const struct int3472_gpio_map int3472_gpio_map[] = {
+ { "INT347E", INT3472_GPIO_TYPE_RESET, INT3472_GPIO_TYPE_RESET, false, "enable" },
+};
+
+static void int3472_get_func_and_polarity(struct acpi_device *adev, u8 *type,
+ const char **func, unsigned long *gpio_flags)
{
- switch (type) {
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(int3472_gpio_map); i++) {
+ /*
+ * Map the firmware-provided GPIO to whatever a driver expects
+ * (as in DT bindings). First check if the type matches with the
+ * GPIO map, then further check that the device _HID matches.
+ */
+ if (*type != int3472_gpio_map[i].type_from)
+ continue;
+
+ if (!acpi_dev_hid_uid_match(adev, int3472_gpio_map[i].hid, NULL))
+ continue;
+
+ *type = int3472_gpio_map[i].type_to;
+ *gpio_flags = int3472_gpio_map[i].polarity_low ?
+ GPIO_ACTIVE_LOW : GPIO_ACTIVE_HIGH;
+ *func = int3472_gpio_map[i].func;
+ return;
+ }
+
+ switch (*type) {
case INT3472_GPIO_TYPE_RESET:
*func = "reset";
- *polarity = GPIO_ACTIVE_LOW;
+ *gpio_flags = GPIO_ACTIVE_LOW;
break;
case INT3472_GPIO_TYPE_POWERDOWN:
*func = "powerdown";
- *polarity = GPIO_ACTIVE_LOW;
+ *gpio_flags = GPIO_ACTIVE_LOW;
break;
case INT3472_GPIO_TYPE_CLK_ENABLE:
*func = "clk-enable";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_PRIVACY_LED:
*func = "privacy-led";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
case INT3472_GPIO_TYPE_POWER_ENABLE:
*func = "power-enable";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
default:
*func = "unknown";
- *polarity = GPIO_ACTIVE_HIGH;
+ *gpio_flags = GPIO_ACTIVE_HIGH;
break;
}
}
@@ -194,7 +239,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
struct gpio_desc *gpio;
const char *err_msg;
const char *func;
- u32 polarity;
+ unsigned long gpio_flags;
int ret;
if (!acpi_gpio_get_io_resource(ares, &agpio))
@@ -217,7 +262,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
type = FIELD_GET(INT3472_GPIO_DSM_TYPE, obj->integer.value);
- int3472_get_func_and_polarity(type, &func, &polarity);
+ int3472_get_func_and_polarity(int3472->sensor, &type, &func, &gpio_flags);
pin = FIELD_GET(INT3472_GPIO_DSM_PIN, obj->integer.value);
/* Pin field is not really used under Windows and wraps around at 8 bits */
@@ -227,16 +272,16 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
active_value = FIELD_GET(INT3472_GPIO_DSM_SENSOR_ON_VAL, obj->integer.value);
if (!active_value)
- polarity ^= GPIO_ACTIVE_LOW;
+ gpio_flags ^= GPIO_ACTIVE_LOW;
dev_dbg(int3472->dev, "%s %s pin %d active-%s\n", func,
agpio->resource_source.string_ptr, agpio->pin_table[0],
- str_high_low(polarity == GPIO_ACTIVE_HIGH));
+ str_high_low(gpio_flags == GPIO_ACTIVE_HIGH));
switch (type) {
case INT3472_GPIO_TYPE_RESET:
case INT3472_GPIO_TYPE_POWERDOWN:
- ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, polarity);
+ ret = skl_int3472_map_gpio_to_sensor(int3472, agpio, func, gpio_flags);
if (ret)
err_msg = "Failed to map GPIO pin to sensor\n";
@@ -244,7 +289,7 @@ static int skl_int3472_handle_gpio_resources(struct acpi_resource *ares,
case INT3472_GPIO_TYPE_CLK_ENABLE:
case INT3472_GPIO_TYPE_PRIVACY_LED:
case INT3472_GPIO_TYPE_POWER_ENABLE:
- gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, polarity);
+ gpio = skl_int3472_gpiod_get_from_temp_lookup(int3472, agpio, func, gpio_flags);
if (IS_ERR(gpio)) {
ret = PTR_ERR(gpio);
err_msg = "Failed to get GPIO\n";
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index 10f04b944117..1ee0fb5f8250 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -626,8 +626,8 @@ static u32 convert_ltr_scale(u32 val)
static int pmc_core_ltr_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- u64 decoded_snoop_ltr, decoded_non_snoop_ltr;
- u32 ltr_raw_data, scale, val;
+ u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val;
+ u32 ltr_raw_data, scale;
u16 snoop_ltr, nonsnoop_ltr;
unsigned int i, index, ltr_index = 0;
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index 8272f1dd0fbc..db3c031d1757 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -404,6 +404,11 @@ static const struct intel_vsec_platform_info oobmsm_info = {
.caps = VSEC_CAP_TELEMETRY | VSEC_CAP_SDSI | VSEC_CAP_TPMI,
};
+/* DMR OOBMSM info */
+static const struct intel_vsec_platform_info dmr_oobmsm_info = {
+ .caps = VSEC_CAP_TELEMETRY | VSEC_CAP_TPMI,
+};
+
/* TGL info */
static const struct intel_vsec_platform_info tgl_info = {
.caps = VSEC_CAP_TELEMETRY,
@@ -420,6 +425,7 @@ static const struct intel_vsec_platform_info lnl_info = {
#define PCI_DEVICE_ID_INTEL_VSEC_MTL_M 0x7d0d
#define PCI_DEVICE_ID_INTEL_VSEC_MTL_S 0xad0d
#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM 0x09a7
+#define PCI_DEVICE_ID_INTEL_VSEC_OOBMSM_DMR 0x09a1
#define PCI_DEVICE_ID_INTEL_VSEC_RPL 0xa77d
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
#define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d
@@ -430,6 +436,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_MTL_M, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_MTL_S, &mtl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM, &oobmsm_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_OOBMSM_DMR, &dmr_oobmsm_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_RPL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) },
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 1fcb0f99695a..1cc91173e012 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -7885,6 +7885,7 @@ static struct ibm_struct volume_driver_data = {
#define FAN_NS_CTRL_STATUS BIT(2) /* Bit which determines control is enabled or not */
#define FAN_NS_CTRL BIT(4) /* Bit which determines control is by host or EC */
+#define FAN_CLOCK_TPM (22500*60) /* Ticks per minute for a 22.5 kHz clock */
enum { /* Fan control constants */
fan_status_offset = 0x2f, /* EC register 0x2f */
@@ -7940,6 +7941,7 @@ static int fan_watchdog_maxinterval;
static bool fan_with_ns_addr;
static bool ecfw_with_fan_dec_rpm;
+static bool fan_speed_in_tpr;
static struct mutex fan_mutex;
@@ -8142,8 +8144,11 @@ static int fan_get_speed(unsigned int *speed)
!acpi_ec_read(fan_rpm_offset + 1, &hi)))
return -EIO;
- if (likely(speed))
+ if (likely(speed)) {
*speed = (hi << 8) | lo;
+ if (fan_speed_in_tpr && *speed != 0)
+ *speed = FAN_CLOCK_TPM / *speed;
+ }
break;
case TPACPI_FAN_RD_TPEC_NS:
if (!acpi_ec_read(fan_rpm_status_ns, &lo))
@@ -8176,8 +8181,11 @@ static int fan2_get_speed(unsigned int *speed)
if (rc)
return -EIO;
- if (likely(speed))
+ if (likely(speed)) {
*speed = (hi << 8) | lo;
+ if (fan_speed_in_tpr && *speed != 0)
+ *speed = FAN_CLOCK_TPM / *speed;
+ }
break;
case TPACPI_FAN_RD_TPEC_NS:
@@ -8788,6 +8796,7 @@ static const struct attribute_group fan_driver_attr_group = {
#define TPACPI_FAN_NOFAN 0x0008 /* no fan available */
#define TPACPI_FAN_NS 0x0010 /* For EC with non-Standard register addresses */
#define TPACPI_FAN_DECRPM 0x0020 /* For ECFW's with RPM in register as decimal */
+#define TPACPI_FAN_TPR 0x0040 /* Fan speed is in Ticks Per Revolution */
static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1),
@@ -8817,6 +8826,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = {
TPACPI_Q_LNV3('R', '0', 'V', TPACPI_FAN_NS), /* 11e Gen5 KL-Y */
TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */
TPACPI_Q_LNV3('R', '0', 'Q', TPACPI_FAN_DECRPM),/* L480 */
+ TPACPI_Q_LNV('8', 'F', TPACPI_FAN_TPR), /* ThinkPad x120e */
};
static int __init fan_init(struct ibm_init_struct *iibm)
@@ -8887,6 +8897,8 @@ static int __init fan_init(struct ibm_init_struct *iibm)
if (quirks & TPACPI_FAN_Q1)
fan_quirk1_setup();
+ if (quirks & TPACPI_FAN_TPR)
+ fan_speed_in_tpr = true;
/* Try and probe the 2nd fan */
tp_features.second_fan = 1; /* needed for get_speed to work */
res = fan2_get_speed(&speed);
@@ -9960,6 +9972,7 @@ static const struct tpacpi_quirk battery_quirk_table[] __initconst = {
* Individual addressing is broken on models that expose the
* primary battery as BAT1.
*/
+ TPACPI_Q_LNV('G', '8', true), /* ThinkPad X131e */
TPACPI_Q_LNV('8', 'F', true), /* Thinkpad X120e */
TPACPI_Q_LNV('J', '7', true), /* B5400 */
TPACPI_Q_LNV('J', 'I', true), /* Thinkpad 11e */
@@ -10319,6 +10332,10 @@ static struct ibm_struct proxsensor_driver_data = {
#define DYTC_MODE_PSC_BALANCE 5 /* Default mode aka balanced */
#define DYTC_MODE_PSC_PERFORM 7 /* High power mode aka performance */
+#define DYTC_MODE_PSCV9_LOWPOWER 1 /* Low power mode */
+#define DYTC_MODE_PSCV9_BALANCE 3 /* Default mode aka balanced */
+#define DYTC_MODE_PSCV9_PERFORM 4 /* High power mode aka performance */
+
#define DYTC_ERR_MASK 0xF /* Bits 0-3 in cmd result are the error result */
#define DYTC_ERR_SUCCESS 1 /* CMD completed successful */
@@ -10339,6 +10356,10 @@ static int dytc_capabilities;
static bool dytc_mmc_get_available;
static int profile_force;
+static int platform_psc_profile_lowpower = DYTC_MODE_PSC_LOWPOWER;
+static int platform_psc_profile_balanced = DYTC_MODE_PSC_BALANCE;
+static int platform_psc_profile_performance = DYTC_MODE_PSC_PERFORM;
+
static int convert_dytc_to_profile(int funcmode, int dytcmode,
enum platform_profile_option *profile)
{
@@ -10360,19 +10381,15 @@ static int convert_dytc_to_profile(int funcmode, int dytcmode,
}
return 0;
case DYTC_FUNCTION_PSC:
- switch (dytcmode) {
- case DYTC_MODE_PSC_LOWPOWER:
+ if (dytcmode == platform_psc_profile_lowpower)
*profile = PLATFORM_PROFILE_LOW_POWER;
- break;
- case DYTC_MODE_PSC_BALANCE:
+ else if (dytcmode == platform_psc_profile_balanced)
*profile = PLATFORM_PROFILE_BALANCED;
- break;
- case DYTC_MODE_PSC_PERFORM:
+ else if (dytcmode == platform_psc_profile_performance)
*profile = PLATFORM_PROFILE_PERFORMANCE;
- break;
- default: /* Unknown mode */
+ else
return -EINVAL;
- }
+
return 0;
case DYTC_FUNCTION_AMT:
/* For now return balanced. It's the closest we have to 'auto' */
@@ -10393,19 +10410,19 @@ static int convert_profile_to_dytc(enum platform_profile_option profile, int *pe
if (dytc_capabilities & BIT(DYTC_FC_MMC))
*perfmode = DYTC_MODE_MMC_LOWPOWER;
else if (dytc_capabilities & BIT(DYTC_FC_PSC))
- *perfmode = DYTC_MODE_PSC_LOWPOWER;
+ *perfmode = platform_psc_profile_lowpower;
break;
case PLATFORM_PROFILE_BALANCED:
if (dytc_capabilities & BIT(DYTC_FC_MMC))
*perfmode = DYTC_MODE_MMC_BALANCE;
else if (dytc_capabilities & BIT(DYTC_FC_PSC))
- *perfmode = DYTC_MODE_PSC_BALANCE;
+ *perfmode = platform_psc_profile_balanced;
break;
case PLATFORM_PROFILE_PERFORMANCE:
if (dytc_capabilities & BIT(DYTC_FC_MMC))
*perfmode = DYTC_MODE_MMC_PERFORM;
else if (dytc_capabilities & BIT(DYTC_FC_PSC))
- *perfmode = DYTC_MODE_PSC_PERFORM;
+ *perfmode = platform_psc_profile_performance;
break;
default: /* Unknown profile */
return -EOPNOTSUPP;
@@ -10599,6 +10616,7 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
if (output & BIT(DYTC_QUERY_ENABLE_BIT))
dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
+ dbg_printk(TPACPI_DBG_INIT, "DYTC version %d\n", dytc_version);
/* Check DYTC is enabled and supports mode setting */
if (dytc_version < 5)
return -ENODEV;
@@ -10637,6 +10655,11 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
}
} else if (dytc_capabilities & BIT(DYTC_FC_PSC)) { /* PSC MODE */
pr_debug("PSC is supported\n");
+ if (dytc_version >= 9) { /* update profiles for DYTC 9 and up */
+ platform_psc_profile_lowpower = DYTC_MODE_PSCV9_LOWPOWER;
+ platform_psc_profile_balanced = DYTC_MODE_PSCV9_BALANCE;
+ platform_psc_profile_performance = DYTC_MODE_PSCV9_PERFORM;
+ }
} else {
dbg_printk(TPACPI_DBG_INIT, "No DYTC support available\n");
return -ENODEV;
@@ -10646,8 +10669,8 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
"DYTC version %d: thermal mode available\n", dytc_version);
/* Create platform_profile structure and register */
- tpacpi_pprof = devm_platform_profile_register(&tpacpi_pdev->dev, "thinkpad-acpi",
- NULL, &dytc_profile_ops);
+ tpacpi_pprof = platform_profile_register(&tpacpi_pdev->dev, "thinkpad-acpi-profile",
+ NULL, &dytc_profile_ops);
/*
* If for some reason platform_profiles aren't enabled
* don't quit terminally.
@@ -10665,8 +10688,15 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm)
return 0;
}
+static void dytc_profile_exit(void)
+{
+ if (!IS_ERR_OR_NULL(tpacpi_pprof))
+ platform_profile_remove(tpacpi_pprof);
+}
+
static struct ibm_struct dytc_profile_driver_data = {
.name = "dytc-profile",
+ .exit = dytc_profile_exit,
};
/*************************************************************************
diff --git a/drivers/power/supply/axp20x_battery.c b/drivers/power/supply/axp20x_battery.c
index fa27195f074e..3c3158f31a48 100644
--- a/drivers/power/supply/axp20x_battery.c
+++ b/drivers/power/supply/axp20x_battery.c
@@ -466,10 +466,9 @@ static int axp717_battery_get_prop(struct power_supply *psy,
/*
* If a fault is detected it must also be cleared; if the
- * condition persists it should reappear (This is an
- * assumption, it's actually not documented). A restart was
- * not sufficient to clear the bit in testing despite the
- * register listed as POR.
+ * condition persists it should reappear. A restart was not
+ * sufficient to clear the bit in testing despite the register
+ * listed as POR.
*/
case POWER_SUPPLY_PROP_HEALTH:
ret = regmap_read(axp20x_batt->regmap, AXP717_PMU_FAULT,
@@ -480,26 +479,26 @@ static int axp717_battery_get_prop(struct power_supply *psy,
switch (reg & AXP717_BATT_PMU_FAULT_MASK) {
case AXP717_BATT_UVLO_2_5V:
val->intval = POWER_SUPPLY_HEALTH_DEAD;
- regmap_update_bits(axp20x_batt->regmap,
- AXP717_PMU_FAULT,
- AXP717_BATT_UVLO_2_5V,
- AXP717_BATT_UVLO_2_5V);
+ regmap_write_bits(axp20x_batt->regmap,
+ AXP717_PMU_FAULT,
+ AXP717_BATT_UVLO_2_5V,
+ AXP717_BATT_UVLO_2_5V);
return 0;
case AXP717_BATT_OVER_TEMP:
val->intval = POWER_SUPPLY_HEALTH_HOT;
- regmap_update_bits(axp20x_batt->regmap,
- AXP717_PMU_FAULT,
- AXP717_BATT_OVER_TEMP,
- AXP717_BATT_OVER_TEMP);
+ regmap_write_bits(axp20x_batt->regmap,
+ AXP717_PMU_FAULT,
+ AXP717_BATT_OVER_TEMP,
+ AXP717_BATT_OVER_TEMP);
return 0;
case AXP717_BATT_UNDER_TEMP:
val->intval = POWER_SUPPLY_HEALTH_COLD;
- regmap_update_bits(axp20x_batt->regmap,
- AXP717_PMU_FAULT,
- AXP717_BATT_UNDER_TEMP,
- AXP717_BATT_UNDER_TEMP);
+ regmap_write_bits(axp20x_batt->regmap,
+ AXP717_PMU_FAULT,
+ AXP717_BATT_UNDER_TEMP,
+ AXP717_BATT_UNDER_TEMP);
return 0;
default:
diff --git a/drivers/power/supply/da9150-fg.c b/drivers/power/supply/da9150-fg.c
index 652c1f213af1..4f28ef1bba1a 100644
--- a/drivers/power/supply/da9150-fg.c
+++ b/drivers/power/supply/da9150-fg.c
@@ -247,9 +247,9 @@ static int da9150_fg_current_avg(struct da9150_fg *fg,
DA9150_QIF_SD_GAIN_SIZE);
da9150_fg_read_sync_end(fg);
- div = (u64) (sd_gain * shunt_val * 65536ULL);
+ div = 65536ULL * sd_gain * shunt_val;
do_div(div, 1000000);
- res = (u64) (iavg * 1000000ULL);
+ res = 1000000ULL * iavg;
do_div(res, div);
val->intval = (int) res;
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index d0bb52a7a036..76c340b38015 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -1592,11 +1592,11 @@ __power_supply_register(struct device *parent,
if (rc)
goto register_thermal_failed;
- scoped_guard(rwsem_read, &psy->extensions_sem) {
- rc = power_supply_create_triggers(psy);
- if (rc)
- goto create_triggers_failed;
+ rc = power_supply_create_triggers(psy);
+ if (rc)
+ goto create_triggers_failed;
+ scoped_guard(rwsem_read, &psy->extensions_sem) {
rc = power_supply_add_hwmon_sysfs(psy);
if (rc)
goto add_hwmon_sysfs_failed;
diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c
index 52c32dcbf7d8..4112a0097338 100644
--- a/drivers/powercap/powercap_sys.c
+++ b/drivers/powercap/powercap_sys.c
@@ -627,8 +627,7 @@ struct powercap_control_type *powercap_register_control_type(
dev_set_name(&control_type->dev, "%s", name);
result = device_register(&control_type->dev);
if (result) {
- if (control_type->allocated)
- kfree(control_type);
+ put_device(&control_type->dev);
return ERR_PTR(result);
}
idr_init(&control_type->idr);
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
index cd94bf3bfaf2..b3f340ed3163 100644
--- a/drivers/pps/generators/Kconfig
+++ b/drivers/pps/generators/Kconfig
@@ -31,4 +31,20 @@ config PPS_GENERATOR_PARPORT
utilizes STROBE pin of a parallel port to send PPS signals. It uses
parport abstraction layer and hrtimers to precisely control the signal.
+config PPS_GENERATOR_TIO
+ tristate "TIO PPS signal generator"
+ depends on X86 && CPU_SUP_INTEL
+ help
+ If you say yes here you get support for a PPS TIO signal generator
+ which generates a pulse at a prescribed time based on the system clock.
+ It uses time translation and hrtimers to precisely generate a pulse.
+ This hardware is present on 2019 and newer Intel CPUs. However, this
+ driver is not useful without adding highly specialized hardware outside
+ the Linux system to observe these pulses.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pps_gen_tio.
+
+ If unsure, say N.
+
endif # PPS_GENERATOR
diff --git a/drivers/pps/generators/Makefile b/drivers/pps/generators/Makefile
index dc1aa5a4688b..e109920e8a2d 100644
--- a/drivers/pps/generators/Makefile
+++ b/drivers/pps/generators/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_PPS_GENERATOR) := pps_gen_core.o
obj-$(CONFIG_PPS_GENERATOR_DUMMY) += pps_gen-dummy.o
obj-$(CONFIG_PPS_GENERATOR_PARPORT) += pps_gen_parport.o
+obj-$(CONFIG_PPS_GENERATOR_TIO) += pps_gen_tio.o
ccflags-$(CONFIG_PPS_DEBUG) := -DDEBUG
diff --git a/drivers/pps/generators/pps_gen-dummy.c b/drivers/pps/generators/pps_gen-dummy.c
index b284c200cbe5..55de4aecf35e 100644
--- a/drivers/pps/generators/pps_gen-dummy.c
+++ b/drivers/pps/generators/pps_gen-dummy.c
@@ -61,7 +61,7 @@ static int pps_gen_dummy_enable(struct pps_gen_device *pps_gen, bool enable)
* The PPS info struct
*/
-static struct pps_gen_source_info pps_gen_dummy_info = {
+static const struct pps_gen_source_info pps_gen_dummy_info = {
.use_system_clock = true,
.get_time = pps_gen_dummy_get_time,
.enable = pps_gen_dummy_enable,
diff --git a/drivers/pps/generators/pps_gen.c b/drivers/pps/generators/pps_gen.c
index ca592f1736f4..5b8bb454913c 100644
--- a/drivers/pps/generators/pps_gen.c
+++ b/drivers/pps/generators/pps_gen.c
@@ -66,7 +66,7 @@ static long pps_gen_cdev_ioctl(struct file *file,
if (ret)
return -EFAULT;
- ret = pps_gen->info.enable(pps_gen, status);
+ ret = pps_gen->info->enable(pps_gen, status);
if (ret)
return ret;
pps_gen->enabled = status;
@@ -76,7 +76,7 @@ static long pps_gen_cdev_ioctl(struct file *file,
case PPS_GEN_USESYSTEMCLOCK:
dev_dbg(pps_gen->dev, "PPS_GEN_USESYSTEMCLOCK\n");
- ret = put_user(pps_gen->info.use_system_clock, uiuarg);
+ ret = put_user(pps_gen->info->use_system_clock, uiuarg);
if (ret)
return -EFAULT;
@@ -175,7 +175,7 @@ static int pps_gen_register_cdev(struct pps_gen_device *pps_gen)
devt = MKDEV(MAJOR(pps_gen_devt), pps_gen->id);
cdev_init(&pps_gen->cdev, &pps_gen_cdev_fops);
- pps_gen->cdev.owner = pps_gen->info.owner;
+ pps_gen->cdev.owner = pps_gen->info->owner;
err = cdev_add(&pps_gen->cdev, devt, 1);
if (err) {
@@ -183,8 +183,8 @@ static int pps_gen_register_cdev(struct pps_gen_device *pps_gen)
MAJOR(pps_gen_devt), pps_gen->id);
goto free_ida;
}
- pps_gen->dev = device_create(pps_gen_class, pps_gen->info.parent, devt,
- pps_gen, "pps-gen%d", pps_gen->id);
+ pps_gen->dev = device_create(pps_gen_class, pps_gen->info->parent, devt,
+ pps_gen, "pps-gen%d", pps_gen->id);
if (IS_ERR(pps_gen->dev)) {
err = PTR_ERR(pps_gen->dev);
goto del_cdev;
@@ -225,7 +225,7 @@ static void pps_gen_unregister_cdev(struct pps_gen_device *pps_gen)
* Return: the PPS generator device in case of success, and ERR_PTR(errno)
* otherwise.
*/
-struct pps_gen_device *pps_gen_register_source(struct pps_gen_source_info *info)
+struct pps_gen_device *pps_gen_register_source(const struct pps_gen_source_info *info)
{
struct pps_gen_device *pps_gen;
int err;
@@ -235,7 +235,7 @@ struct pps_gen_device *pps_gen_register_source(struct pps_gen_source_info *info)
err = -ENOMEM;
goto pps_gen_register_source_exit;
}
- pps_gen->info = *info;
+ pps_gen->info = info;
pps_gen->enabled = false;
init_waitqueue_head(&pps_gen->queue);
diff --git a/drivers/pps/generators/pps_gen_tio.c b/drivers/pps/generators/pps_gen_tio.c
new file mode 100644
index 000000000000..6c46b46c66cd
--- /dev/null
+++ b/drivers/pps/generators/pps_gen_tio.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel PPS signal Generator Driver
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/container_of.h>
+#include <linux/device.h>
+#include <linux/hrtimer.h>
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pps_gen_kernel.h>
+#include <linux/timekeeping.h>
+#include <linux/types.h>
+
+#include <asm/cpu_device_id.h>
+
+#define TIOCTL 0x00
+#define TIOCOMPV 0x10
+#define TIOEC 0x30
+
+/* Control Register */
+#define TIOCTL_EN BIT(0)
+#define TIOCTL_DIR BIT(1)
+#define TIOCTL_EP GENMASK(3, 2)
+#define TIOCTL_EP_RISING_EDGE FIELD_PREP(TIOCTL_EP, 0)
+#define TIOCTL_EP_FALLING_EDGE FIELD_PREP(TIOCTL_EP, 1)
+#define TIOCTL_EP_TOGGLE_EDGE FIELD_PREP(TIOCTL_EP, 2)
+
+/* Safety time to set hrtimer early */
+#define SAFE_TIME_NS (10 * NSEC_PER_MSEC)
+
+#define MAGIC_CONST (NSEC_PER_SEC - SAFE_TIME_NS)
+#define ART_HW_DELAY_CYCLES 2
+
+struct pps_tio {
+ struct pps_gen_source_info gen_info;
+ struct pps_gen_device *pps_gen;
+ struct hrtimer timer;
+ void __iomem *base;
+ u32 prev_count;
+ spinlock_t lock;
+ struct device *dev;
+};
+
+static inline u32 pps_tio_read(u32 offset, struct pps_tio *tio)
+{
+ return readl(tio->base + offset);
+}
+
+static inline void pps_ctl_write(u32 value, struct pps_tio *tio)
+{
+ writel(value, tio->base + TIOCTL);
+}
+
+/*
+ * For COMPV register, It's safer to write
+ * higher 32-bit followed by lower 32-bit
+ */
+static inline void pps_compv_write(u64 value, struct pps_tio *tio)
+{
+ hi_lo_writeq(value, tio->base + TIOCOMPV);
+}
+
+static inline ktime_t first_event(struct pps_tio *tio)
+{
+ return ktime_set(ktime_get_real_seconds() + 1, MAGIC_CONST);
+}
+
+static u32 pps_tio_disable(struct pps_tio *tio)
+{
+ u32 ctrl;
+
+ ctrl = pps_tio_read(TIOCTL, tio);
+ pps_compv_write(0, tio);
+
+ ctrl &= ~TIOCTL_EN;
+ pps_ctl_write(ctrl, tio);
+ tio->pps_gen->enabled = false;
+ tio->prev_count = 0;
+ return ctrl;
+}
+
+static void pps_tio_enable(struct pps_tio *tio)
+{
+ u32 ctrl;
+
+ ctrl = pps_tio_read(TIOCTL, tio);
+ ctrl |= TIOCTL_EN;
+ pps_ctl_write(ctrl, tio);
+ tio->pps_gen->enabled = true;
+}
+
+static void pps_tio_direction_output(struct pps_tio *tio)
+{
+ u32 ctrl;
+
+ ctrl = pps_tio_disable(tio);
+
+ /*
+ * We enable the device, be sure that the
+ * 'compare' value is invalid
+ */
+ pps_compv_write(0, tio);
+
+ ctrl &= ~(TIOCTL_DIR | TIOCTL_EP);
+ ctrl |= TIOCTL_EP_TOGGLE_EDGE;
+ pps_ctl_write(ctrl, tio);
+ pps_tio_enable(tio);
+}
+
+static bool pps_generate_next_pulse(ktime_t expires, struct pps_tio *tio)
+{
+ u64 art;
+
+ if (!ktime_real_to_base_clock(expires, CSID_X86_ART, &art)) {
+ pps_tio_disable(tio);
+ return false;
+ }
+
+ pps_compv_write(art - ART_HW_DELAY_CYCLES, tio);
+ return true;
+}
+
+static enum hrtimer_restart hrtimer_callback(struct hrtimer *timer)
+{
+ ktime_t expires, now;
+ u32 event_count;
+ struct pps_tio *tio = container_of(timer, struct pps_tio, timer);
+
+ guard(spinlock)(&tio->lock);
+
+ /*
+ * Check if any event is missed.
+ * If an event is missed, TIO will be disabled.
+ */
+ event_count = pps_tio_read(TIOEC, tio);
+ if (tio->prev_count && tio->prev_count == event_count)
+ goto err;
+ tio->prev_count = event_count;
+
+ expires = hrtimer_get_expires(timer);
+
+ now = ktime_get_real();
+ if (now - expires >= SAFE_TIME_NS)
+ goto err;
+
+ tio->pps_gen->enabled = pps_generate_next_pulse(expires + SAFE_TIME_NS, tio);
+ if (!tio->pps_gen->enabled)
+ return HRTIMER_NORESTART;
+
+ hrtimer_forward(timer, now, NSEC_PER_SEC / 2);
+ return HRTIMER_RESTART;
+
+err:
+ dev_err(tio->dev, "Event missed, Disabling Timed I/O");
+ pps_tio_disable(tio);
+ pps_gen_event(tio->pps_gen, PPS_GEN_EVENT_MISSEDPULSE, NULL);
+ return HRTIMER_NORESTART;
+}
+
+static int pps_tio_gen_enable(struct pps_gen_device *pps_gen, bool enable)
+{
+ struct pps_tio *tio = container_of(pps_gen->info, struct pps_tio, gen_info);
+
+ if (!timekeeping_clocksource_has_base(CSID_X86_ART)) {
+ dev_err_once(tio->dev, "PPS cannot be used as clock is not related to ART");
+ return -ENODEV;
+ }
+
+ guard(spinlock_irqsave)(&tio->lock);
+ if (enable && !pps_gen->enabled) {
+ pps_tio_direction_output(tio);
+ hrtimer_start(&tio->timer, first_event(tio), HRTIMER_MODE_ABS);
+ } else if (!enable && pps_gen->enabled) {
+ hrtimer_cancel(&tio->timer);
+ pps_tio_disable(tio);
+ }
+
+ return 0;
+}
+
+static int pps_tio_get_time(struct pps_gen_device *pps_gen,
+ struct timespec64 *time)
+{
+ struct system_time_snapshot snap;
+
+ ktime_get_snapshot(&snap);
+ *time = ktime_to_timespec64(snap.real);
+
+ return 0;
+}
+
+static int pps_gen_tio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pps_tio *tio;
+
+ if (!(cpu_feature_enabled(X86_FEATURE_TSC_KNOWN_FREQ) &&
+ cpu_feature_enabled(X86_FEATURE_ART))) {
+ dev_warn(dev, "TSC/ART is not enabled");
+ return -ENODEV;
+ }
+
+ tio = devm_kzalloc(dev, sizeof(*tio), GFP_KERNEL);
+ if (!tio)
+ return -ENOMEM;
+
+ tio->gen_info.use_system_clock = true;
+ tio->gen_info.enable = pps_tio_gen_enable;
+ tio->gen_info.get_time = pps_tio_get_time;
+ tio->gen_info.owner = THIS_MODULE;
+
+ tio->pps_gen = pps_gen_register_source(&tio->gen_info);
+ if (IS_ERR(tio->pps_gen))
+ return PTR_ERR(tio->pps_gen);
+
+ tio->dev = dev;
+ tio->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tio->base))
+ return PTR_ERR(tio->base);
+
+ pps_tio_disable(tio);
+ hrtimer_init(&tio->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ tio->timer.function = hrtimer_callback;
+ spin_lock_init(&tio->lock);
+ platform_set_drvdata(pdev, &tio);
+
+ return 0;
+}
+
+static void pps_gen_tio_remove(struct platform_device *pdev)
+{
+ struct pps_tio *tio = platform_get_drvdata(pdev);
+
+ hrtimer_cancel(&tio->timer);
+ pps_tio_disable(tio);
+ pps_gen_unregister_source(tio->pps_gen);
+}
+
+static const struct acpi_device_id intel_pmc_tio_acpi_match[] = {
+ { "INTC1021" },
+ { "INTC1022" },
+ { "INTC1023" },
+ { "INTC1024" },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, intel_pmc_tio_acpi_match);
+
+static struct platform_driver pps_gen_tio_driver = {
+ .probe = pps_gen_tio_probe,
+ .remove = pps_gen_tio_remove,
+ .driver = {
+ .name = "intel-pps-gen-tio",
+ .acpi_match_table = intel_pmc_tio_acpi_match,
+ },
+};
+module_platform_driver(pps_gen_tio_driver);
+
+MODULE_AUTHOR("Christopher Hall <christopher.s.hall@intel.com>");
+MODULE_AUTHOR("Lakshmi Sowjanya D <lakshmi.sowjanya.d@intel.com>");
+MODULE_AUTHOR("Pandith N <pandith.n@intel.com>");
+MODULE_AUTHOR("Thejesh Reddy T R <thejesh.reddy.t.r@intel.com>");
+MODULE_AUTHOR("Subramanian Mohan <subramanian.mohan@intel.com>");
+MODULE_DESCRIPTION("Intel PMC Time-Aware IO Generator Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pps/generators/sysfs.c b/drivers/pps/generators/sysfs.c
index faf8b1c6d202..6d6bc0006fea 100644
--- a/drivers/pps/generators/sysfs.c
+++ b/drivers/pps/generators/sysfs.c
@@ -19,7 +19,7 @@ static ssize_t system_show(struct device *dev, struct device_attribute *attr,
{
struct pps_gen_device *pps_gen = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%d\n", pps_gen->info.use_system_clock);
+ return sysfs_emit(buf, "%d\n", pps_gen->info->use_system_clock);
}
static DEVICE_ATTR_RO(system);
@@ -30,7 +30,7 @@ static ssize_t time_show(struct device *dev, struct device_attribute *attr,
struct timespec64 time;
int ret;
- ret = pps_gen->info.get_time(pps_gen, &time);
+ ret = pps_gen->info->get_time(pps_gen, &time);
if (ret)
return ret;
@@ -49,7 +49,7 @@ static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- ret = pps_gen->info.enable(pps_gen, status);
+ ret = pps_gen->info->enable(pps_gen, status);
if (ret)
return ret;
pps_gen->enabled = status;
diff --git a/drivers/ptp/ptp_vmclock.c b/drivers/ptp/ptp_vmclock.c
index 0a2cfc8ad3c5..b3a83b03d9c1 100644
--- a/drivers/ptp/ptp_vmclock.c
+++ b/drivers/ptp/ptp_vmclock.c
@@ -414,16 +414,16 @@ static ssize_t vmclock_miscdev_read(struct file *fp, char __user *buf,
}
static const struct file_operations vmclock_miscdev_fops = {
+ .owner = THIS_MODULE,
.mmap = vmclock_miscdev_mmap,
.read = vmclock_miscdev_read,
};
/* module operations */
-static void vmclock_remove(struct platform_device *pdev)
+static void vmclock_remove(void *data)
{
- struct device *dev = &pdev->dev;
- struct vmclock_state *st = dev_get_drvdata(dev);
+ struct vmclock_state *st = data;
if (st->ptp_clock)
ptp_clock_unregister(st->ptp_clock);
@@ -506,14 +506,13 @@ static int vmclock_probe(struct platform_device *pdev)
if (ret) {
dev_info(dev, "Failed to obtain physical address: %d\n", ret);
- goto out;
+ return ret;
}
if (resource_size(&st->res) < VMCLOCK_MIN_SIZE) {
dev_info(dev, "Region too small (0x%llx)\n",
resource_size(&st->res));
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
st->clk = devm_memremap(dev, st->res.start, resource_size(&st->res),
MEMREMAP_WB | MEMREMAP_DEC);
@@ -521,31 +520,34 @@ static int vmclock_probe(struct platform_device *pdev)
ret = PTR_ERR(st->clk);
dev_info(dev, "failed to map shared memory\n");
st->clk = NULL;
- goto out;
+ return ret;
}
if (le32_to_cpu(st->clk->magic) != VMCLOCK_MAGIC ||
le32_to_cpu(st->clk->size) > resource_size(&st->res) ||
le16_to_cpu(st->clk->version) != 1) {
dev_info(dev, "vmclock magic fields invalid\n");
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
ret = ida_alloc(&vmclock_ida, GFP_KERNEL);
if (ret < 0)
- goto out;
+ return ret;
st->index = ret;
ret = devm_add_action_or_reset(&pdev->dev, vmclock_put_idx, st);
if (ret)
- goto out;
+ return ret;
st->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "vmclock%d", st->index);
- if (!st->name) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!st->name)
+ return -ENOMEM;
+
+ st->miscdev.minor = MISC_DYNAMIC_MINOR;
+
+ ret = devm_add_action_or_reset(&pdev->dev, vmclock_remove, st);
+ if (ret)
+ return ret;
/*
* If the structure is big enough, it can be mapped to userspace.
@@ -554,13 +556,12 @@ static int vmclock_probe(struct platform_device *pdev)
* cross that bridge if/when we come to it.
*/
if (le32_to_cpu(st->clk->size) >= PAGE_SIZE) {
- st->miscdev.minor = MISC_DYNAMIC_MINOR;
st->miscdev.fops = &vmclock_miscdev_fops;
st->miscdev.name = st->name;
ret = misc_register(&st->miscdev);
if (ret)
- goto out;
+ return ret;
}
/* If there is valid clock information, register a PTP clock */
@@ -570,16 +571,14 @@ static int vmclock_probe(struct platform_device *pdev)
if (IS_ERR(st->ptp_clock)) {
ret = PTR_ERR(st->ptp_clock);
st->ptp_clock = NULL;
- vmclock_remove(pdev);
- goto out;
+ return ret;
}
}
if (!st->miscdev.minor && !st->ptp_clock) {
/* Neither miscdev nor PTP registered */
dev_info(dev, "vmclock: Neither miscdev nor PTP available; not registering\n");
- ret = -ENODEV;
- goto out;
+ return -ENODEV;
}
dev_info(dev, "%s: registered %s%s%s\n", st->name,
@@ -587,10 +586,7 @@ static int vmclock_probe(struct platform_device *pdev)
(st->miscdev.minor && st->ptp_clock) ? ", " : "",
st->ptp_clock ? "PTP" : "");
- dev_set_drvdata(dev, st);
-
- out:
- return ret;
+ return 0;
}
static const struct acpi_device_id vmclock_acpi_ids[] = {
@@ -601,7 +597,6 @@ MODULE_DEVICE_TABLE(acpi, vmclock_acpi_ids);
static struct platform_driver vmclock_platform_driver = {
.probe = vmclock_probe,
- .remove = vmclock_remove,
.driver = {
.name = "vmclock",
.acpi_match_table = vmclock_acpi_ids,
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 27afbb9d544b..cbf531d0ba68 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -1742,7 +1742,8 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
err = rio_add_net(net);
if (err) {
rmcd_debug(RDEV, "failed to register net, err=%d", err);
- kfree(net);
+ put_device(&net->dev);
+ mport->net = NULL;
goto cleanup;
}
}
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index fdcf742b2adb..c12941f71e2c 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -871,7 +871,10 @@ static struct rio_net *rio_scan_alloc_net(struct rio_mport *mport,
dev_set_name(&net->dev, "rnet_%d", net->id);
net->dev.parent = &mport->dev;
net->dev.release = rio_scan_release_dev;
- rio_add_net(net);
+ if (rio_add_net(net)) {
+ put_device(&net->dev);
+ net = NULL;
+ }
}
return net;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 89578b91c468..4ddf0efead68 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -5774,43 +5774,36 @@ regulator_register(struct device *dev,
goto clean;
}
- if (config->init_data) {
- /*
- * Providing of_match means the framework is expected to parse
- * DT to get the init_data. This would conflict with provided
- * init_data, if set. Warn if it happens.
- */
- if (regulator_desc->of_match)
- dev_warn(dev, "Using provided init data - OF match ignored\n");
+ /*
+ * DT may override the config->init_data provided if the platform
+ * needs to do so. If so, config->init_data is completely ignored.
+ */
+ init_data = regulator_of_get_init_data(dev, regulator_desc, config,
+ &rdev->dev.of_node);
+ /*
+ * Sometimes not all resources are probed already so we need to take
+ * that into account. This happens most the time if the ena_gpiod comes
+ * from a gpio extender or something else.
+ */
+ if (PTR_ERR(init_data) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto clean;
+ }
+
+ /*
+ * We need to keep track of any GPIO descriptor coming from the
+ * device tree until we have handled it over to the core. If the
+ * config that was passed in to this function DOES NOT contain
+ * a descriptor, and the config after this call DOES contain
+ * a descriptor, we definitely got one from parsing the device
+ * tree.
+ */
+ if (!cfg->ena_gpiod && config->ena_gpiod)
+ dangling_of_gpiod = true;
+ if (!init_data) {
init_data = config->init_data;
rdev->dev.of_node = of_node_get(config->of_node);
-
- } else {
- init_data = regulator_of_get_init_data(dev, regulator_desc,
- config,
- &rdev->dev.of_node);
-
- /*
- * Sometimes not all resources are probed already so we need to
- * take that into account. This happens most the time if the
- * ena_gpiod comes from a gpio extender or something else.
- */
- if (PTR_ERR(init_data) == -EPROBE_DEFER) {
- ret = -EPROBE_DEFER;
- goto clean;
- }
-
- /*
- * We need to keep track of any GPIO descriptor coming from the
- * device tree until we have handled it over to the core. If the
- * config that was passed in to this function DOES NOT contain a
- * descriptor, and the config after this call DOES contain a
- * descriptor, we definitely got one from parsing the device
- * tree.
- */
- if (!cfg->ena_gpiod && config->ena_gpiod)
- dangling_of_gpiod = true;
}
ww_mutex_init(&rdev->mutex, &regulator_ww_class);
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 4a0b3f19bd8e..4f01b1929240 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -695,7 +695,8 @@ static int info_update(void)
if (time_after(jiffies, chp_info_expires)) {
/* Data is too old, update. */
rc = sclp_chp_read_info(&chp_info);
- chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL ;
+ if (!rc)
+ chp_info_expires = jiffies + CHP_INFO_UPDATE_INTERVAL;
}
mutex_unlock(&info_lock);
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index e36e3ea165d3..2f34761e6413 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -588,6 +588,15 @@ out:
return ret;
}
+static void ism_dev_release(struct device *dev)
+{
+ struct ism_dev *ism;
+
+ ism = container_of(dev, struct ism_dev, dev);
+
+ kfree(ism);
+}
+
static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct ism_dev *ism;
@@ -601,6 +610,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
dev_set_drvdata(&pdev->dev, ism);
ism->pdev = pdev;
ism->dev.parent = &pdev->dev;
+ ism->dev.release = ism_dev_release;
device_initialize(&ism->dev);
dev_set_name(&ism->dev, dev_name(&pdev->dev));
ret = device_add(&ism->dev);
@@ -637,7 +647,7 @@ err:
device_del(&ism->dev);
err_dev:
dev_set_drvdata(&pdev->dev, NULL);
- kfree(ism);
+ put_device(&ism->dev);
return ret;
}
@@ -682,7 +692,7 @@ static void ism_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
device_del(&ism->dev);
dev_set_drvdata(&pdev->dev, NULL);
- kfree(ism);
+ put_device(&ism->dev);
}
static struct pci_driver ism_driver = {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index a3adaec5504e..20328d695ef9 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -7050,14 +7050,16 @@ int qeth_open(struct net_device *dev)
card->data.state = CH_STATE_UP;
netif_tx_start_all_queues(dev);
- local_bh_disable();
qeth_for_each_output_queue(card, queue, i) {
netif_napi_add_tx(dev, &queue->napi, qeth_tx_poll);
napi_enable(&queue->napi);
- napi_schedule(&queue->napi);
}
-
napi_enable(&card->napi);
+
+ local_bh_disable();
+ qeth_for_each_output_queue(card, queue, i) {
+ napi_schedule(&queue->napi);
+ }
napi_schedule(&card->napi);
/* kick-start the NAPI softirq: */
local_bh_enable();
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 1fd2da0264e3..47d74f881948 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -2867,7 +2867,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
cpu_to_le32(upper_32_bits(dma_handle)),
cpu_to_le32(lower_32_bits(dma_handle)),
- cpu_to_le32(sg_dma_len(sg_next(s))));
+ cpu_to_le32(sg_dma_len(s)));
remseg--;
}
dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index d776f13cd160..f1cfe0bb89b2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -872,13 +872,18 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
case 0x1a: /* start stop unit in progress */
case 0x1b: /* sanitize in progress */
case 0x1d: /* configuration in progress */
- case 0x24: /* depopulation in progress */
- case 0x25: /* depopulation restore in progress */
action = ACTION_DELAYED_RETRY;
break;
case 0x0a: /* ALUA state transition */
action = ACTION_DELAYED_REPREP;
break;
+ /*
+ * Depopulation might take many hours,
+ * thus it is not worthwhile to retry.
+ */
+ case 0x24: /* depopulation in progress */
+ case 0x25: /* depopulation restore in progress */
+ fallthrough;
default:
action = ACTION_FAIL;
break;
@@ -1664,13 +1669,6 @@ static blk_status_t scsi_prepare_cmd(struct request *req)
if (in_flight)
__set_bit(SCMD_STATE_INFLIGHT, &cmd->state);
- /*
- * Only clear the driver-private command data if the LLD does not supply
- * a function to initialize that data.
- */
- if (!shost->hostt->init_cmd_priv)
- memset(cmd + 1, 0, shost->hostt->cmd_size);
-
cmd->prot_op = SCSI_PROT_NORMAL;
if (blk_rq_bytes(req))
cmd->sc_data_direction = rq_dma_dir(req);
@@ -1837,6 +1835,13 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
if (!scsi_host_queue_ready(q, shost, sdev, cmd))
goto out_dec_target_busy;
+ /*
+ * Only clear the driver-private command data if the LLD does not supply
+ * a function to initialize that data.
+ */
+ if (shost->hostt->cmd_size && !shost->hostt->init_cmd_priv)
+ memset(cmd + 1, 0, shost->hostt->cmd_size);
+
if (!(req->rq_flags & RQF_DONTPREP)) {
ret = scsi_prepare_cmd(req);
if (ret != BLK_STS_OK)
diff --git a/drivers/scsi/scsi_lib_test.c b/drivers/scsi/scsi_lib_test.c
index 99834426a100..ae8af0e0047a 100644
--- a/drivers/scsi/scsi_lib_test.c
+++ b/drivers/scsi/scsi_lib_test.c
@@ -67,6 +67,13 @@ static void scsi_lib_test_multiple_sense(struct kunit *test)
};
int i;
+ /* Success */
+ sc.result = 0;
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, &failures));
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, NULL));
+ /* Command failed but caller did not pass in a failures array */
+ scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36);
+ KUNIT_EXPECT_EQ(test, 0, scsi_check_passthrough(&sc, NULL));
/* Match end of array */
scsi_build_sense(&sc, 0, ILLEGAL_REQUEST, 0x91, 0x36);
KUNIT_EXPECT_EQ(test, -EAGAIN, scsi_check_passthrough(&sc, &failures));
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 087fcbfc9aaa..96d7e1a9a7c7 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -246,7 +246,7 @@ static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev,
}
ret = sbitmap_init_node(&sdev->budget_map,
scsi_device_max_queue_depth(sdev),
- new_shift, GFP_KERNEL,
+ new_shift, GFP_NOIO,
sdev->request_queue->node, false, true);
if (!ret)
sbitmap_resize(&sdev->budget_map, depth);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 5a101ac06c47..a8614e54544e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1800,6 +1800,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
length = scsi_bufflen(scmnd);
payload = (struct vmbus_packet_mpb_array *)&cmd_request->mpb;
+ payload->range.len = 0;
payload_sz = 0;
if (scsi_sg_count(scmnd)) {
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index e7aa9bd4b44b..6f01d944f9c6 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -148,8 +148,9 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
}
ret = ctrl->xfer_msg(ctrl, txn);
-
- if (!ret && need_tid && !txn->msg->comp) {
+ if (ret == -ETIMEDOUT) {
+ slim_free_txn_tid(ctrl, txn);
+ } else if (!ret && need_tid && !txn->msg->comp) {
unsigned long ms = txn->rl + HZ;
time_left = wait_for_completion_timeout(txn->comp,
diff --git a/drivers/soc/loongson/loongson2_guts.c b/drivers/soc/loongson/loongson2_guts.c
index ae42e3a9127f..16913c3ef65c 100644
--- a/drivers/soc/loongson/loongson2_guts.c
+++ b/drivers/soc/loongson/loongson2_guts.c
@@ -114,8 +114,11 @@ static int loongson2_guts_probe(struct platform_device *pdev)
if (of_property_read_string(root, "model", &machine))
of_property_read_string_index(root, "compatible", 0, &machine);
of_node_put(root);
- if (machine)
+ if (machine) {
soc_dev_attr.machine = devm_kstrdup(dev, machine, GFP_KERNEL);
+ if (!soc_dev_attr.machine)
+ return -ENOMEM;
+ }
svr = loongson2_guts_get_svr();
soc_die = loongson2_soc_die_match(svr, loongson2_soc_die);
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 4783ab1adb8d..a3e88ced328a 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -365,7 +365,7 @@ static void smp2p_irq_print_chip(struct irq_data *irqd, struct seq_file *p)
{
struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd);
- seq_printf(p, " %8s", dev_name(entry->smp2p->dev));
+ seq_printf(p, "%8s", dev_name(entry->smp2p->dev));
}
static struct irq_chip smp2p_irq_chip = {
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ea8a31032927..2cfc14be8697 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -55,6 +55,9 @@ config SPI_MEM
This extension is meant to simplify interaction with SPI memories
by providing a high-level interface to send memory-like commands.
+config SPI_OFFLOAD
+ bool
+
comment "SPI Master Controller Drivers"
config SPI_AIROHA_SNFI
@@ -1317,4 +1320,16 @@ endif # SPI_SLAVE
config SPI_DYNAMIC
def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
+if SPI_OFFLOAD
+
+comment "SPI Offload triggers"
+
+config SPI_OFFLOAD_TRIGGER_PWM
+ tristate "SPI offload trigger using PWM"
+ depends on PWM
+ help
+ Generic SPI offload trigger implemented using PWM output.
+
+endif # SPI_OFFLOAD
+
endif # SPI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 9db7554c1864..0068d170bc99 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -10,6 +10,7 @@ ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
obj-$(CONFIG_SPI_MASTER) += spi.o
obj-$(CONFIG_SPI_MEM) += spi-mem.o
obj-$(CONFIG_SPI_MUX) += spi-mux.o
+obj-$(CONFIG_SPI_OFFLOAD) += spi-offload.o
obj-$(CONFIG_SPI_SPIDEV) += spidev.o
obj-$(CONFIG_SPI_LOOPBACK_TEST) += spi-loopback-test.o
@@ -163,3 +164,6 @@ obj-$(CONFIG_SPI_AMD) += spi-amd.o
# SPI slave protocol handlers
obj-$(CONFIG_SPI_SLAVE_TIME) += spi-slave-time.o
obj-$(CONFIG_SPI_SLAVE_SYSTEM_CONTROL) += spi-slave-system-control.o
+
+# SPI offload triggers
+obj-$(CONFIG_SPI_OFFLOAD_TRIGGER_PWM) += spi-offload-trigger-pwm.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index abdc49d9d940..d8c9be64d006 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -235,8 +235,8 @@
/**
* struct atmel_qspi_pcal - Pad Calibration Clock Division
* @pclk_rate: peripheral clock rate.
- * @pclkdiv: calibration clock division. The clock applied to the calibration
- * cell is divided by pclkdiv + 1.
+ * @pclk_div: calibration clock division. The clock applied to the calibration
+ * cell is divided by pclk_div + 1.
*/
struct atmel_qspi_pcal {
u32 pclk_rate;
diff --git a/drivers/spi/spi-offload-trigger-pwm.c b/drivers/spi/spi-offload-trigger-pwm.c
new file mode 100644
index 000000000000..b26d4437c589
--- /dev/null
+++ b/drivers/spi/spi-offload-trigger-pwm.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ *
+ * Generic PWM trigger for SPI offload.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/mod_devicetable.h>
+#include <linux/spi/offload/provider.h>
+#include <linux/types.h>
+
+struct spi_offload_trigger_pwm_state {
+ struct device *dev;
+ struct pwm_device *pwm;
+};
+
+static bool spi_offload_trigger_pwm_match(struct spi_offload_trigger *trigger,
+ enum spi_offload_trigger_type type,
+ u64 *args, u32 nargs)
+{
+ if (nargs)
+ return false;
+
+ return type == SPI_OFFLOAD_TRIGGER_PERIODIC;
+}
+
+static int spi_offload_trigger_pwm_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct spi_offload_trigger_periodic *periodic = &config->periodic;
+ struct pwm_waveform wf = { };
+ int ret;
+
+ if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC)
+ return -EINVAL;
+
+ if (!periodic->frequency_hz)
+ return -EINVAL;
+
+ wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz);
+ /* REVISIT: 50% duty-cycle for now - may add config parameter later */
+ wf.duty_length_ns = wf.period_length_ns / 2;
+
+ ret = pwm_round_waveform_might_sleep(st->pwm, &wf);
+ if (ret < 0)
+ return ret;
+
+ periodic->frequency_hz = DIV_ROUND_UP_ULL(NSEC_PER_SEC, wf.period_length_ns);
+
+ return 0;
+}
+
+static int spi_offload_trigger_pwm_enable(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct spi_offload_trigger_periodic *periodic = &config->periodic;
+ struct pwm_waveform wf = { };
+
+ if (config->type != SPI_OFFLOAD_TRIGGER_PERIODIC)
+ return -EINVAL;
+
+ if (!periodic->frequency_hz)
+ return -EINVAL;
+
+ wf.period_length_ns = DIV_ROUND_UP_ULL(NSEC_PER_SEC, periodic->frequency_hz);
+ /* REVISIT: 50% duty-cycle for now - may add config parameter later */
+ wf.duty_length_ns = wf.period_length_ns / 2;
+
+ return pwm_set_waveform_might_sleep(st->pwm, &wf, false);
+}
+
+static void spi_offload_trigger_pwm_disable(struct spi_offload_trigger *trigger)
+{
+ struct spi_offload_trigger_pwm_state *st = spi_offload_trigger_get_priv(trigger);
+ struct pwm_waveform wf;
+ int ret;
+
+ ret = pwm_get_waveform_might_sleep(st->pwm, &wf);
+ if (ret < 0) {
+ dev_err(st->dev, "failed to get waveform: %d\n", ret);
+ return;
+ }
+
+ wf.duty_length_ns = 0;
+
+ ret = pwm_set_waveform_might_sleep(st->pwm, &wf, false);
+ if (ret < 0)
+ dev_err(st->dev, "failed to disable PWM: %d\n", ret);
+}
+
+static const struct spi_offload_trigger_ops spi_offload_trigger_pwm_ops = {
+ .match = spi_offload_trigger_pwm_match,
+ .validate = spi_offload_trigger_pwm_validate,
+ .enable = spi_offload_trigger_pwm_enable,
+ .disable = spi_offload_trigger_pwm_disable,
+};
+
+static void spi_offload_trigger_pwm_release(void *data)
+{
+ pwm_disable(data);
+}
+
+static int spi_offload_trigger_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_offload_trigger_info info = {
+ .fwnode = dev_fwnode(dev),
+ .ops = &spi_offload_trigger_pwm_ops,
+ };
+ struct spi_offload_trigger_pwm_state *st;
+ struct pwm_state state;
+ int ret;
+
+ st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
+ if (!st)
+ return -ENOMEM;
+
+ info.priv = st;
+ st->dev = dev;
+
+ st->pwm = devm_pwm_get(dev, NULL);
+ if (IS_ERR(st->pwm))
+ return dev_err_probe(dev, PTR_ERR(st->pwm), "failed to get PWM\n");
+
+ /* init with duty_cycle = 0, output enabled to ensure trigger off */
+ pwm_init_state(st->pwm, &state);
+ state.enabled = true;
+
+ ret = pwm_apply_might_sleep(st->pwm, &state);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to apply PWM state\n");
+
+ ret = devm_add_action_or_reset(dev, spi_offload_trigger_pwm_release, st->pwm);
+ if (ret)
+ return ret;
+
+ return devm_spi_offload_trigger_register(dev, &info);
+}
+
+static const struct of_device_id spi_offload_trigger_pwm_of_match_table[] = {
+ { .compatible = "pwm-trigger" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, spi_offload_trigger_pwm_of_match_table);
+
+static struct platform_driver spi_offload_trigger_pwm_driver = {
+ .driver = {
+ .name = "pwm-trigger",
+ .of_match_table = spi_offload_trigger_pwm_of_match_table,
+ },
+ .probe = spi_offload_trigger_pwm_probe,
+};
+module_platform_driver(spi_offload_trigger_pwm_driver);
+
+MODULE_AUTHOR("David Lechner <dlechner@baylibre.com>");
+MODULE_DESCRIPTION("Generic PWM trigger");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-offload.c b/drivers/spi/spi-offload.c
new file mode 100644
index 000000000000..df5e963d5ee2
--- /dev/null
+++ b/drivers/spi/spi-offload.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2024 Analog Devices Inc.
+ * Copyright (C) 2024 BayLibre, SAS
+ */
+
+/*
+ * SPI Offloading support.
+ *
+ * Some SPI controllers support offloading of SPI transfers. Essentially, this
+ * is the ability for a SPI controller to perform SPI transfers with minimal
+ * or even no CPU intervention, e.g. via a specialized SPI controller with a
+ * hardware trigger or via a conventional SPI controller using a non-Linux MCU
+ * processor core to offload the work.
+ */
+
+#define DEFAULT_SYMBOL_NAMESPACE "SPI_OFFLOAD"
+
+#include <linux/cleanup.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/export.h>
+#include <linux/kref.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/property.h>
+#include <linux/spi/offload/consumer.h>
+#include <linux/spi/offload/provider.h>
+#include <linux/spi/offload/types.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+struct spi_controller_and_offload {
+ struct spi_controller *controller;
+ struct spi_offload *offload;
+};
+
+struct spi_offload_trigger {
+ struct list_head list;
+ struct kref ref;
+ struct fwnode_handle *fwnode;
+ /* synchronizes calling ops and driver registration */
+ struct mutex lock;
+ /*
+ * If the provider goes away while the consumer still has a reference,
+ * ops and priv will be set to NULL and all calls will fail with -ENODEV.
+ */
+ const struct spi_offload_trigger_ops *ops;
+ void *priv;
+};
+
+static LIST_HEAD(spi_offload_triggers);
+static DEFINE_MUTEX(spi_offload_triggers_lock);
+
+/**
+ * devm_spi_offload_alloc() - Allocate offload instance
+ * @dev: Device for devm purposes and assigned to &struct spi_offload.provider_dev
+ * @priv_size: Size of private data to allocate
+ *
+ * Offload providers should use this to allocate offload instances.
+ *
+ * Return: Pointer to new offload instance or error on failure.
+ */
+struct spi_offload *devm_spi_offload_alloc(struct device *dev,
+ size_t priv_size)
+{
+ struct spi_offload *offload;
+ void *priv;
+
+ offload = devm_kzalloc(dev, sizeof(*offload), GFP_KERNEL);
+ if (!offload)
+ return ERR_PTR(-ENOMEM);
+
+ priv = devm_kzalloc(dev, priv_size, GFP_KERNEL);
+ if (!priv)
+ return ERR_PTR(-ENOMEM);
+
+ offload->provider_dev = dev;
+ offload->priv = priv;
+
+ return offload;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_alloc);
+
+static void spi_offload_put(void *data)
+{
+ struct spi_controller_and_offload *resource = data;
+
+ resource->controller->put_offload(resource->offload);
+ kfree(resource);
+}
+
+/**
+ * devm_spi_offload_get() - Get an offload instance
+ * @dev: Device for devm purposes
+ * @spi: SPI device to use for the transfers
+ * @config: Offload configuration
+ *
+ * Peripheral drivers call this function to get an offload instance that meets
+ * the requirements specified in @config. If no suitable offload instance is
+ * available, -ENODEV is returned.
+ *
+ * Return: Offload instance or error on failure.
+ */
+struct spi_offload *devm_spi_offload_get(struct device *dev,
+ struct spi_device *spi,
+ const struct spi_offload_config *config)
+{
+ struct spi_controller_and_offload *resource;
+ int ret;
+
+ if (!spi || !config)
+ return ERR_PTR(-EINVAL);
+
+ if (!spi->controller->get_offload)
+ return ERR_PTR(-ENODEV);
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource)
+ return ERR_PTR(-ENOMEM);
+
+ resource->controller = spi->controller;
+ resource->offload = spi->controller->get_offload(spi, config);
+ if (IS_ERR(resource->offload)) {
+ kfree(resource);
+ return resource->offload;
+ }
+
+ ret = devm_add_action_or_reset(dev, spi_offload_put, resource);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return resource->offload;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_get);
+
+static void spi_offload_trigger_free(struct kref *ref)
+{
+ struct spi_offload_trigger *trigger =
+ container_of(ref, struct spi_offload_trigger, ref);
+
+ mutex_destroy(&trigger->lock);
+ fwnode_handle_put(trigger->fwnode);
+ kfree(trigger);
+}
+
+static void spi_offload_trigger_put(void *data)
+{
+ struct spi_offload_trigger *trigger = data;
+
+ scoped_guard(mutex, &trigger->lock)
+ if (trigger->ops && trigger->ops->release)
+ trigger->ops->release(trigger);
+
+ kref_put(&trigger->ref, spi_offload_trigger_free);
+}
+
+static struct spi_offload_trigger
+*spi_offload_trigger_get(enum spi_offload_trigger_type type,
+ struct fwnode_reference_args *args)
+{
+ struct spi_offload_trigger *trigger;
+ bool match = false;
+ int ret;
+
+ guard(mutex)(&spi_offload_triggers_lock);
+
+ list_for_each_entry(trigger, &spi_offload_triggers, list) {
+ if (trigger->fwnode != args->fwnode)
+ continue;
+
+ match = trigger->ops->match(trigger, type, args->args, args->nargs);
+ if (match)
+ break;
+ }
+
+ if (!match)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return ERR_PTR(-ENODEV);
+
+ if (trigger->ops->request) {
+ ret = trigger->ops->request(trigger, type, args->args, args->nargs);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ kref_get(&trigger->ref);
+
+ return trigger;
+}
+
+/**
+ * devm_spi_offload_trigger_get() - Get an offload trigger instance
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance connected to a trigger.
+ * @type: Trigger type to get.
+ *
+ * Return: Offload trigger instance or error on failure.
+ */
+struct spi_offload_trigger
+*devm_spi_offload_trigger_get(struct device *dev,
+ struct spi_offload *offload,
+ enum spi_offload_trigger_type type)
+{
+ struct spi_offload_trigger *trigger;
+ struct fwnode_reference_args args;
+ int ret;
+
+ ret = fwnode_property_get_reference_args(dev_fwnode(offload->provider_dev),
+ "trigger-sources",
+ "#trigger-source-cells", 0, 0,
+ &args);
+ if (ret)
+ return ERR_PTR(ret);
+
+ trigger = spi_offload_trigger_get(type, &args);
+ fwnode_handle_put(args.fwnode);
+ if (IS_ERR(trigger))
+ return trigger;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_trigger_put, trigger);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return trigger;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_get);
+
+/**
+ * spi_offload_trigger_validate - Validate the requested trigger
+ * @trigger: Offload trigger instance
+ * @config: Trigger config to validate
+ *
+ * On success, @config may be modifed to reflect what the hardware can do.
+ * For example, the frequency of a periodic trigger may be adjusted to the
+ * nearest supported value.
+ *
+ * Callers will likely need to do additional validation of the modified trigger
+ * parameters.
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int spi_offload_trigger_validate(struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return -ENODEV;
+
+ if (!trigger->ops->validate)
+ return -EOPNOTSUPP;
+
+ return trigger->ops->validate(trigger, config);
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_validate);
+
+/**
+ * spi_offload_trigger_enable - enables trigger for offload
+ * @offload: Offload instance
+ * @trigger: Offload trigger instance
+ * @config: Trigger config to validate
+ *
+ * There must be a prepared offload instance with the specified ID (i.e.
+ * spi_optimize_message() was called with the same offload assigned to the
+ * message). This will also reserve the bus for exclusive use by the offload
+ * instance until the trigger is disabled. Any other attempts to send a
+ * transfer or lock the bus will fail with -EBUSY during this time.
+ *
+ * Calls must be balanced with spi_offload_trigger_disable().
+ *
+ * Context: can sleep
+ * Return: 0 on success, else a negative error code.
+ */
+int spi_offload_trigger_enable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger,
+ struct spi_offload_trigger_config *config)
+{
+ int ret;
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return -ENODEV;
+
+ if (offload->ops && offload->ops->trigger_enable) {
+ ret = offload->ops->trigger_enable(offload);
+ if (ret)
+ return ret;
+ }
+
+ if (trigger->ops->enable) {
+ ret = trigger->ops->enable(trigger, config);
+ if (ret) {
+ if (offload->ops->trigger_disable)
+ offload->ops->trigger_disable(offload);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_enable);
+
+/**
+ * spi_offload_trigger_disable - disables hardware trigger for offload
+ * @offload: Offload instance
+ * @trigger: Offload trigger instance
+ *
+ * Disables the hardware trigger for the offload instance with the specified ID
+ * and releases the bus for use by other clients.
+ *
+ * Context: can sleep
+ */
+void spi_offload_trigger_disable(struct spi_offload *offload,
+ struct spi_offload_trigger *trigger)
+{
+ if (offload->ops && offload->ops->trigger_disable)
+ offload->ops->trigger_disable(offload);
+
+ guard(mutex)(&trigger->lock);
+
+ if (!trigger->ops)
+ return;
+
+ if (trigger->ops->disable)
+ trigger->ops->disable(trigger);
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_disable);
+
+static void spi_offload_release_dma_chan(void *chan)
+{
+ dma_release_channel(chan);
+}
+
+/**
+ * devm_spi_offload_tx_stream_request_dma_chan - Get the DMA channel info for the TX stream
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance
+ *
+ * This is the DMA channel that will provide data to transfers that use the
+ * %SPI_OFFLOAD_XFER_TX_STREAM offload flag.
+ *
+ * Return: Pointer to DMA channel info, or negative error code
+ */
+struct dma_chan
+*devm_spi_offload_tx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload)
+{
+ struct dma_chan *chan;
+ int ret;
+
+ if (!offload->ops || !offload->ops->tx_stream_request_dma_chan)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ chan = offload->ops->tx_stream_request_dma_chan(offload);
+ if (IS_ERR(chan))
+ return chan;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_tx_stream_request_dma_chan);
+
+/**
+ * devm_spi_offload_rx_stream_request_dma_chan - Get the DMA channel info for the RX stream
+ * @dev: Device for devm purposes.
+ * @offload: Offload instance
+ *
+ * This is the DMA channel that will receive data from transfers that use the
+ * %SPI_OFFLOAD_XFER_RX_STREAM offload flag.
+ *
+ * Return: Pointer to DMA channel info, or negative error code
+ */
+struct dma_chan
+*devm_spi_offload_rx_stream_request_dma_chan(struct device *dev,
+ struct spi_offload *offload)
+{
+ struct dma_chan *chan;
+ int ret;
+
+ if (!offload->ops || !offload->ops->rx_stream_request_dma_chan)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ chan = offload->ops->rx_stream_request_dma_chan(offload);
+ if (IS_ERR(chan))
+ return chan;
+
+ ret = devm_add_action_or_reset(dev, spi_offload_release_dma_chan, chan);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return chan;
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_rx_stream_request_dma_chan);
+
+/* Triggers providers */
+
+static void spi_offload_trigger_unregister(void *data)
+{
+ struct spi_offload_trigger *trigger = data;
+
+ scoped_guard(mutex, &spi_offload_triggers_lock)
+ list_del(&trigger->list);
+
+ scoped_guard(mutex, &trigger->lock) {
+ trigger->priv = NULL;
+ trigger->ops = NULL;
+ }
+
+ kref_put(&trigger->ref, spi_offload_trigger_free);
+}
+
+/**
+ * devm_spi_offload_trigger_register() - Allocate and register an offload trigger
+ * @dev: Device for devm purposes.
+ * @info: Provider-specific trigger info.
+ *
+ * Return: 0 on success, else a negative error code.
+ */
+int devm_spi_offload_trigger_register(struct device *dev,
+ struct spi_offload_trigger_info *info)
+{
+ struct spi_offload_trigger *trigger;
+
+ if (!info->fwnode || !info->ops)
+ return -EINVAL;
+
+ trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
+ if (!trigger)
+ return -ENOMEM;
+
+ kref_init(&trigger->ref);
+ mutex_init(&trigger->lock);
+ trigger->fwnode = fwnode_handle_get(info->fwnode);
+ trigger->ops = info->ops;
+ trigger->priv = info->priv;
+
+ scoped_guard(mutex, &spi_offload_triggers_lock)
+ list_add_tail(&trigger->list, &spi_offload_triggers);
+
+ return devm_add_action_or_reset(dev, spi_offload_trigger_unregister, trigger);
+}
+EXPORT_SYMBOL_GPL(devm_spi_offload_trigger_register);
+
+/**
+ * spi_offload_trigger_get_priv() - Get the private data for the trigger
+ *
+ * @trigger: Offload trigger instance.
+ *
+ * Return: Private data for the trigger.
+ */
+void *spi_offload_trigger_get_priv(struct spi_offload_trigger *trigger)
+{
+ return trigger->priv;
+}
+EXPORT_SYMBOL_GPL(spi_offload_trigger_get_priv);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 5f9cac41baff..06711a62fa3d 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -399,7 +399,7 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
lpss_ssp_select_cs(spi, config);
mask = LPSS_CS_CONTROL_CS_HIGH;
- __lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, mask, enable ? mask : 0);
+ __lpss_ssp_update_priv(drv_data, config->reg_cs_ctrl, mask, enable ? 0 : mask);
if (config->cs_clk_stays_gated) {
/*
* Changing CS alone when dynamic clock gating is on won't
diff --git a/drivers/spi/spi-sn-f-ospi.c b/drivers/spi/spi-sn-f-ospi.c
index 6ad4b729897e..c4969f66a0ba 100644
--- a/drivers/spi/spi-sn-f-ospi.c
+++ b/drivers/spi/spi-sn-f-ospi.c
@@ -116,6 +116,9 @@ struct f_ospi {
static u32 f_ospi_get_dummy_cycle(const struct spi_mem_op *op)
{
+ if (!op->dummy.nbytes)
+ return 0;
+
return (op->dummy.nbytes * 8) / op->dummy.buswidth;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a7a4647717d4..10c365e9100a 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -31,6 +31,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/sched/rt.h>
#include <linux/slab.h>
+#include <linux/spi/offload/types.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <uapi/linux/sched/types.h>
@@ -4158,6 +4159,15 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (_spi_xfer_word_delay_update(xfer, spi))
return -EINVAL;
+
+ /* Make sure controller supports required offload features. */
+ if (xfer->offload_flags) {
+ if (!message->offload)
+ return -EINVAL;
+
+ if (xfer->offload_flags & ~message->offload->xfer_flags)
+ return -EINVAL;
+ }
}
message->status = -EINPROGRESS;
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index 3318997a7009..cee51f64bc4b 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -16,16 +16,4 @@ config ADIS16203
To compile this driver as a module, say M here: the module will be
called adis16203.
-config ADIS16240
- tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16240 programmable
- impact Sensor and recorder.
-
- To compile this driver as a module, say M here: the module will be
- called adis16240.
-
endmenu
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 094cc9be35bd..acac7bc9b9c0 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -4,4 +4,3 @@
#
obj-$(CONFIG_ADIS16203) += adis16203.o
-obj-$(CONFIG_ADIS16240) += adis16240.o
diff --git a/drivers/staging/iio/accel/adis16240.c b/drivers/staging/iio/accel/adis16240.c
deleted file mode 100644
index 3be3eaf5d9d4..000000000000
--- a/drivers/staging/iio/accel/adis16240.c
+++ /dev/null
@@ -1,443 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * ADIS16240 Programmable Impact Sensor and Recorder driver
- *
- * Copyright 2010 Analog Devices Inc.
- */
-
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/imu/adis.h>
-
-#define ADIS16240_STARTUP_DELAY 220 /* ms */
-
-/* Flash memory write count */
-#define ADIS16240_FLASH_CNT 0x00
-
-/* Output, power supply */
-#define ADIS16240_SUPPLY_OUT 0x02
-
-/* Output, x-axis accelerometer */
-#define ADIS16240_XACCL_OUT 0x04
-
-/* Output, y-axis accelerometer */
-#define ADIS16240_YACCL_OUT 0x06
-
-/* Output, z-axis accelerometer */
-#define ADIS16240_ZACCL_OUT 0x08
-
-/* Output, auxiliary ADC input */
-#define ADIS16240_AUX_ADC 0x0A
-
-/* Output, temperature */
-#define ADIS16240_TEMP_OUT 0x0C
-
-/* Output, x-axis acceleration peak */
-#define ADIS16240_XPEAK_OUT 0x0E
-
-/* Output, y-axis acceleration peak */
-#define ADIS16240_YPEAK_OUT 0x10
-
-/* Output, z-axis acceleration peak */
-#define ADIS16240_ZPEAK_OUT 0x12
-
-/* Output, sum-of-squares acceleration peak */
-#define ADIS16240_XYZPEAK_OUT 0x14
-
-/* Output, Capture Buffer 1, X and Y acceleration */
-#define ADIS16240_CAPT_BUF1 0x16
-
-/* Output, Capture Buffer 2, Z acceleration */
-#define ADIS16240_CAPT_BUF2 0x18
-
-/* Diagnostic, error flags */
-#define ADIS16240_DIAG_STAT 0x1A
-
-/* Diagnostic, event counter */
-#define ADIS16240_EVNT_CNTR 0x1C
-
-/* Diagnostic, check sum value from firmware test */
-#define ADIS16240_CHK_SUM 0x1E
-
-/* Calibration, x-axis acceleration offset adjustment */
-#define ADIS16240_XACCL_OFF 0x20
-
-/* Calibration, y-axis acceleration offset adjustment */
-#define ADIS16240_YACCL_OFF 0x22
-
-/* Calibration, z-axis acceleration offset adjustment */
-#define ADIS16240_ZACCL_OFF 0x24
-
-/* Clock, hour and minute */
-#define ADIS16240_CLK_TIME 0x2E
-
-/* Clock, month and day */
-#define ADIS16240_CLK_DATE 0x30
-
-/* Clock, year */
-#define ADIS16240_CLK_YEAR 0x32
-
-/* Wake-up setting, hour and minute */
-#define ADIS16240_WAKE_TIME 0x34
-
-/* Wake-up setting, month and day */
-#define ADIS16240_WAKE_DATE 0x36
-
-/* Alarm 1 amplitude threshold */
-#define ADIS16240_ALM_MAG1 0x38
-
-/* Alarm 2 amplitude threshold */
-#define ADIS16240_ALM_MAG2 0x3A
-
-/* Alarm control */
-#define ADIS16240_ALM_CTRL 0x3C
-
-/* Capture, external trigger control */
-#define ADIS16240_XTRIG_CTRL 0x3E
-
-/* Capture, address pointer */
-#define ADIS16240_CAPT_PNTR 0x40
-
-/* Capture, configuration and control */
-#define ADIS16240_CAPT_CTRL 0x42
-
-/* General-purpose digital input/output control */
-#define ADIS16240_GPIO_CTRL 0x44
-
-/* Miscellaneous control */
-#define ADIS16240_MSC_CTRL 0x46
-
-/* Internal sample period (rate) control */
-#define ADIS16240_SMPL_PRD 0x48
-
-/* System command */
-#define ADIS16240_GLOB_CMD 0x4A
-
-/* MSC_CTRL */
-
-/* Enables sum-of-squares output (XYZPEAK_OUT) */
-#define ADIS16240_MSC_CTRL_XYZPEAK_OUT_EN BIT(15)
-
-/* Enables peak tracking output (XPEAK_OUT, YPEAK_OUT, and ZPEAK_OUT) */
-#define ADIS16240_MSC_CTRL_X_Y_ZPEAK_OUT_EN BIT(14)
-
-/* Self-test enable: 1 = apply electrostatic force, 0 = disabled */
-#define ADIS16240_MSC_CTRL_SELF_TEST_EN BIT(8)
-
-/* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16240_MSC_CTRL_DATA_RDY_EN BIT(2)
-
-/* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16240_MSC_CTRL_ACTIVE_HIGH BIT(1)
-
-/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
-#define ADIS16240_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
-
-/* DIAG_STAT */
-
-/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16240_DIAG_STAT_ALARM2 BIT(9)
-
-/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16240_DIAG_STAT_ALARM1 BIT(8)
-
-/* Capture buffer full: 1 = capture buffer is full */
-#define ADIS16240_DIAG_STAT_CPT_BUF_FUL BIT(7)
-
-/* Flash test, checksum flag: 1 = mismatch, 0 = match */
-#define ADIS16240_DIAG_STAT_CHKSUM BIT(6)
-
-/* Power-on, self-test flag: 1 = failure, 0 = pass */
-#define ADIS16240_DIAG_STAT_PWRON_FAIL_BIT 5
-
-/* Power-on self-test: 1 = in-progress, 0 = complete */
-#define ADIS16240_DIAG_STAT_PWRON_BUSY BIT(4)
-
-/* SPI communications failure */
-#define ADIS16240_DIAG_STAT_SPI_FAIL_BIT 3
-
-/* Flash update failure */
-#define ADIS16240_DIAG_STAT_FLASH_UPT_BIT 2
-
-/* Power supply above 3.625 V */
-#define ADIS16240_DIAG_STAT_POWER_HIGH_BIT 1
-
- /* Power supply below 2.225 V */
-#define ADIS16240_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-
-#define ADIS16240_GLOB_CMD_RESUME BIT(8)
-#define ADIS16240_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16240_GLOB_CMD_STANDBY BIT(2)
-
-#define ADIS16240_ERROR_ACTIVE BIT(14)
-
-/* At the moment triggers are only used for ring buffer
- * filling. This may change!
- */
-
-enum adis16240_scan {
- ADIS16240_SCAN_ACC_X,
- ADIS16240_SCAN_ACC_Y,
- ADIS16240_SCAN_ACC_Z,
- ADIS16240_SCAN_SUPPLY,
- ADIS16240_SCAN_AUX_ADC,
- ADIS16240_SCAN_TEMP,
-};
-
-static ssize_t adis16240_spi_read_signed(struct device *dev,
- struct device_attribute *attr,
- char *buf,
- unsigned int bits)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis *st = iio_priv(indio_dev);
- int ret;
- s16 val = 0;
- unsigned int shift = 16 - bits;
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
-
- ret = adis_read_reg_16(st,
- this_attr->address, (u16 *)&val);
- if (ret)
- return ret;
-
- if (val & ADIS16240_ERROR_ACTIVE)
- adis_check_status(st);
-
- val = (s16)(val << shift) >> shift;
- return sprintf(buf, "%d\n", val);
-}
-
-static ssize_t adis16240_read_12bit_signed(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return adis16240_spi_read_signed(dev, attr, buf, 12);
-}
-
-static IIO_DEVICE_ATTR(in_accel_xyz_squared_peak_raw, 0444,
- adis16240_read_12bit_signed, NULL,
- ADIS16240_XYZPEAK_OUT);
-
-static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("4096");
-
-static const u8 adis16240_addresses[][2] = {
- [ADIS16240_SCAN_ACC_X] = { ADIS16240_XACCL_OFF, ADIS16240_XPEAK_OUT },
- [ADIS16240_SCAN_ACC_Y] = { ADIS16240_YACCL_OFF, ADIS16240_YPEAK_OUT },
- [ADIS16240_SCAN_ACC_Z] = { ADIS16240_ZACCL_OFF, ADIS16240_ZPEAK_OUT },
-};
-
-static int adis16240_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- u8 addr;
- s16 val16;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16240_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 4;
- *val2 = 880000; /* 4.88 mV */
- return IIO_VAL_INT_PLUS_MICRO;
- }
- return -EINVAL;
- case IIO_TEMP:
- *val = 244; /* 0.244 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_PEAK_SCALE:
- *val = 0;
- *val2 = IIO_G_TO_M_S_2(51400); /* 51.4 mg */
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / 244 - 0x133; /* 25 C = 0x133 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- addr = adis16240_addresses[chan->scan_index][0];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret)
- return ret;
- *val = sign_extend32(val16, 9);
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_PEAK:
- addr = adis16240_addresses[chan->scan_index][1];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret)
- return ret;
- *val = sign_extend32(val16, 9);
- return IIO_VAL_INT;
- }
- return -EINVAL;
-}
-
-static int adis16240_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- u8 addr;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- addr = adis16240_addresses[chan->scan_index][0];
- return adis_write_reg_16(st, addr, val & GENMASK(9, 0));
- }
- return -EINVAL;
-}
-
-static const struct iio_chan_spec adis16240_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16240_SUPPLY_OUT, ADIS16240_SCAN_SUPPLY, 0, 10),
- ADIS_AUX_ADC_CHAN(ADIS16240_AUX_ADC, ADIS16240_SCAN_AUX_ADC, 0, 10),
- ADIS_ACCEL_CHAN(X, ADIS16240_XACCL_OUT, ADIS16240_SCAN_ACC_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 10),
- ADIS_ACCEL_CHAN(Y, ADIS16240_YACCL_OUT, ADIS16240_SCAN_ACC_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 10),
- ADIS_ACCEL_CHAN(Z, ADIS16240_ZACCL_OUT, ADIS16240_SCAN_ACC_Z,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 10),
- ADIS_TEMP_CHAN(ADIS16240_TEMP_OUT, ADIS16240_SCAN_TEMP, 0, 10),
- IIO_CHAN_SOFT_TIMESTAMP(6)
-};
-
-static struct attribute *adis16240_attributes[] = {
- &iio_dev_attr_in_accel_xyz_squared_peak_raw.dev_attr.attr,
- &iio_const_attr_sampling_frequency_available.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group adis16240_attribute_group = {
- .attrs = adis16240_attributes,
-};
-
-static const struct iio_info adis16240_info = {
- .attrs = &adis16240_attribute_group,
- .read_raw = adis16240_read_raw,
- .write_raw = adis16240_write_raw,
- .update_scan_mode = adis_update_scan_mode,
-};
-
-static const char * const adis16240_status_error_msgs[] = {
- [ADIS16240_DIAG_STAT_PWRON_FAIL_BIT] = "Power on, self-test failed",
- [ADIS16240_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16240_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16240_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16240_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.225V",
-};
-
-static const struct adis_timeout adis16240_timeouts = {
- .reset_ms = ADIS16240_STARTUP_DELAY,
- .sw_reset_ms = ADIS16240_STARTUP_DELAY,
- .self_test_ms = ADIS16240_STARTUP_DELAY,
-};
-
-static const struct adis_data adis16240_data = {
- .write_delay = 35,
- .read_delay = 35,
- .msc_ctrl_reg = ADIS16240_MSC_CTRL,
- .glob_cmd_reg = ADIS16240_GLOB_CMD,
- .diag_stat_reg = ADIS16240_DIAG_STAT,
-
- .self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN,
- .self_test_reg = ADIS16240_MSC_CTRL,
- .self_test_no_autoclear = true,
- .timeouts = &adis16240_timeouts,
-
- .status_error_msgs = adis16240_status_error_msgs,
- .status_error_mask = BIT(ADIS16240_DIAG_STAT_PWRON_FAIL_BIT) |
- BIT(ADIS16240_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16240_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16240_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16240_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16240_probe(struct spi_device *spi)
-{
- int ret;
- struct adis *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->info = &adis16240_info;
- indio_dev->channels = adis16240_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16240_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- spi->mode = SPI_MODE_3;
- ret = spi_setup(spi);
- if (ret) {
- dev_err(&spi->dev, "spi_setup failed!\n");
- return ret;
- }
-
- ret = adis_init(st, indio_dev, spi, &adis16240_data);
- if (ret)
- return ret;
- ret = devm_adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = __adis_initial_startup(st);
- if (ret)
- return ret;
-
- return devm_iio_device_register(&spi->dev, indio_dev);
-}
-
-static const struct of_device_id adis16240_of_match[] = {
- { .compatible = "adi,adis16240" },
- { },
-};
-MODULE_DEVICE_TABLE(of, adis16240_of_match);
-
-static struct spi_driver adis16240_driver = {
- .driver = {
- .name = "adis16240",
- .of_match_table = adis16240_of_match,
- },
- .probe = adis16240_probe,
-};
-module_spi_driver(adis16240_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16240");
-MODULE_IMPORT_NS("IIO_ADISLIB");
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 140ee4f9c137..db42810c7664 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -74,8 +74,6 @@
/**
* struct ad9832_state - driver instance specific data
* @spi: spi_device
- * @avdd: supply regulator for the analog section
- * @dvdd: supply regulator for the digital section
* @mclk: external master clock
* @ctrl_fp: cached frequency/phase control word
* @ctrl_ss: cached sync/selsrc control word
@@ -94,8 +92,6 @@
struct ad9832_state {
struct spi_device *spi;
- struct regulator *avdd;
- struct regulator *dvdd;
struct clk *mclk;
unsigned short ctrl_fp;
unsigned short ctrl_ss;
@@ -297,11 +293,6 @@ static const struct iio_info ad9832_info = {
.attrs = &ad9832_attribute_group,
};
-static void ad9832_reg_disable(void *reg)
-{
- regulator_disable(reg);
-}
-
static int ad9832_probe(struct spi_device *spi)
{
struct ad9832_platform_data *pdata = dev_get_platdata(&spi->dev);
@@ -320,33 +311,13 @@ static int ad9832_probe(struct spi_device *spi)
st = iio_priv(indio_dev);
- st->avdd = devm_regulator_get(&spi->dev, "avdd");
- if (IS_ERR(st->avdd))
- return PTR_ERR(st->avdd);
-
- ret = regulator_enable(st->avdd);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable specified AVDD supply\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ad9832_reg_disable, st->avdd);
+ ret = devm_regulator_get_enable(&spi->dev, "avdd");
if (ret)
- return ret;
-
- st->dvdd = devm_regulator_get(&spi->dev, "dvdd");
- if (IS_ERR(st->dvdd))
- return PTR_ERR(st->dvdd);
+ return dev_err_probe(&spi->dev, ret, "failed to enable specified AVDD voltage\n");
- ret = regulator_enable(st->dvdd);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable specified DVDD supply\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ad9832_reg_disable, st->dvdd);
+ ret = devm_regulator_get_enable(&spi->dev, "dvdd");
if (ret)
- return ret;
+ return dev_err_probe(&spi->dev, ret, "Failed to enable specified DVDD supply\n");
st->mclk = devm_clk_get_enabled(&spi->dev, "mclk");
if (IS_ERR(st->mclk))
diff --git a/drivers/staging/iio/frequency/ad9834.c b/drivers/staging/iio/frequency/ad9834.c
index 6e99e008c5f4..50413da2aa65 100644
--- a/drivers/staging/iio/frequency/ad9834.c
+++ b/drivers/staging/iio/frequency/ad9834.c
@@ -387,33 +387,15 @@ static const struct iio_info ad9833_info = {
.attrs = &ad9833_attribute_group,
};
-static void ad9834_disable_reg(void *data)
-{
- struct regulator *reg = data;
-
- regulator_disable(reg);
-}
-
static int ad9834_probe(struct spi_device *spi)
{
struct ad9834_state *st;
struct iio_dev *indio_dev;
- struct regulator *reg;
int ret;
- reg = devm_regulator_get(&spi->dev, "avdd");
- if (IS_ERR(reg))
- return PTR_ERR(reg);
-
- ret = regulator_enable(reg);
- if (ret) {
- dev_err(&spi->dev, "Failed to enable specified AVDD supply\n");
- return ret;
- }
-
- ret = devm_add_action_or_reset(&spi->dev, ad9834_disable_reg, reg);
+ ret = devm_regulator_get_enable(&spi->dev, "avdd");
if (ret)
- return ret;
+ return dev_err_probe(&spi->dev, ret, "Failed to enable specified AVDD supply\n");
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev) {
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index c42cbde8a31b..210648a0092e 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -117,9 +117,9 @@ static ssize_t target_stat_tgt_status_show(struct config_item *item,
char *page)
{
if (to_stat_tgt_dev(item)->export_count)
- return snprintf(page, PAGE_SIZE, "activated");
+ return snprintf(page, PAGE_SIZE, "activated\n");
else
- return snprintf(page, PAGE_SIZE, "deactivated");
+ return snprintf(page, PAGE_SIZE, "deactivated\n");
}
static ssize_t target_stat_tgt_non_access_lus_show(struct config_item *item,
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index 322a543b8c27..d0f397c90242 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -80,7 +80,6 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_supp *supp = &optee->supp;
struct optee_supp_req *req;
- bool interruptable;
u32 ret;
/*
@@ -111,36 +110,18 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
/*
* Wait for supplicant to process and return result, once we've
* returned from wait_for_completion(&req->c) successfully we have
- * exclusive access again.
+ * exclusive access again. Allow the wait to be killable such that
+ * the wait doesn't turn into an indefinite state if the supplicant
+ * gets hung for some reason.
*/
- while (wait_for_completion_interruptible(&req->c)) {
+ if (wait_for_completion_killable(&req->c)) {
mutex_lock(&supp->mutex);
- interruptable = !supp->ctx;
- if (interruptable) {
- /*
- * There's no supplicant available and since the
- * supp->mutex currently is held none can
- * become available until the mutex released
- * again.
- *
- * Interrupting an RPC to supplicant is only
- * allowed as a way of slightly improving the user
- * experience in case the supplicant hasn't been
- * started yet. During normal operation the supplicant
- * will serve all requests in a timely manner and
- * interrupting then wouldn't make sense.
- */
- if (req->in_queue) {
- list_del(&req->link);
- req->in_queue = false;
- }
+ if (req->in_queue) {
+ list_del(&req->link);
+ req->in_queue = false;
}
mutex_unlock(&supp->mutex);
-
- if (interruptable) {
- req->ret = TEEC_ERROR_COMMUNICATION;
- break;
- }
+ req->ret = TEEC_ERROR_COMMUNICATION;
}
ret = req->ret;
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 280071be30b1..6b7ab1814c12 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -57,8 +57,6 @@ struct time_in_idle {
* @max_level: maximum cooling level. One less than total number of valid
* cpufreq frequencies.
* @em: Reference on the Energy Model of the device
- * @cdev: thermal_cooling_device pointer to keep track of the
- * registered cooling device.
* @policy: cpufreq policy.
* @cooling_ops: cpufreq callbacks to thermal cooling device ops
* @idle_time: idle time stats
diff --git a/drivers/thermal/gov_power_allocator.c b/drivers/thermal/gov_power_allocator.c
index 3b644de3292e..0d9f636c80f4 100644
--- a/drivers/thermal/gov_power_allocator.c
+++ b/drivers/thermal/gov_power_allocator.c
@@ -370,7 +370,7 @@ static void divvy_up_power(struct power_actor *power, int num_actors,
for (i = 0; i < num_actors; i++) {
struct power_actor *pa = &power[i];
- u64 req_range = (u64)pa->req_power * power_range;
+ u64 req_range = (u64)pa->weighted_req_power * power_range;
pa->granted_power = DIV_ROUND_CLOSEST_ULL(req_range,
total_req_power);
@@ -641,6 +641,22 @@ clean_state:
return ret;
}
+static void power_allocator_update_weight(struct power_allocator_params *params)
+{
+ const struct thermal_trip_desc *td;
+ struct thermal_instance *instance;
+
+ if (!params->trip_max)
+ return;
+
+ td = trip_to_trip_desc(params->trip_max);
+
+ params->total_weight = 0;
+ list_for_each_entry(instance, &td->thermal_instances, trip_node)
+ if (power_actor_is_valid(instance))
+ params->total_weight += instance->weight;
+}
+
static void power_allocator_update_tz(struct thermal_zone_device *tz,
enum thermal_notify_event reason)
{
@@ -656,16 +672,12 @@ static void power_allocator_update_tz(struct thermal_zone_device *tz,
if (power_actor_is_valid(instance))
num_actors++;
- if (num_actors == params->num_actors)
- return;
+ if (num_actors != params->num_actors)
+ allocate_actors_buffer(params, num_actors);
- allocate_actors_buffer(params, num_actors);
- break;
+ fallthrough;
case THERMAL_INSTANCE_WEIGHT_CHANGED:
- params->total_weight = 0;
- list_for_each_entry(instance, &td->thermal_instances, trip_node)
- if (power_actor_is_valid(instance))
- params->total_weight += instance->weight;
+ power_allocator_update_weight(params);
break;
default:
break;
@@ -731,6 +743,8 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
tz->governor_data = params;
+ power_allocator_update_weight(params);
+
return 0;
free_params:
diff --git a/drivers/thermal/thermal_of.c b/drivers/thermal/thermal_of.c
index 5ab4ce4daaeb..5401f03d6b6c 100644
--- a/drivers/thermal/thermal_of.c
+++ b/drivers/thermal/thermal_of.c
@@ -274,6 +274,34 @@ static bool thermal_of_get_cooling_spec(struct device_node *map_np, int index,
return true;
}
+static bool thermal_of_cm_lookup(struct device_node *cm_np,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ struct cooling_spec *c)
+{
+ for_each_child_of_node_scoped(cm_np, child) {
+ struct device_node *tr_np;
+ int count, i;
+
+ tr_np = of_parse_phandle(child, "trip", 0);
+ if (tr_np != trip->priv)
+ continue;
+
+ /* The trip has been found, look up the cdev. */
+ count = of_count_phandle_with_args(child, "cooling-device",
+ "#cooling-cells");
+ if (count <= 0)
+ pr_err("Add a cooling_device property with at least one device\n");
+
+ for (i = 0; i < count; i++) {
+ if (thermal_of_get_cooling_spec(child, i, cdev, c))
+ return true;
+ }
+ }
+
+ return false;
+}
+
static bool thermal_of_should_bind(struct thermal_zone_device *tz,
const struct thermal_trip *trip,
struct thermal_cooling_device *cdev,
@@ -293,27 +321,7 @@ static bool thermal_of_should_bind(struct thermal_zone_device *tz,
goto out;
/* Look up the trip and the cdev in the cooling maps. */
- for_each_child_of_node_scoped(cm_np, child) {
- struct device_node *tr_np;
- int count, i;
-
- tr_np = of_parse_phandle(child, "trip", 0);
- if (tr_np != trip->priv)
- continue;
-
- /* The trip has been found, look up the cdev. */
- count = of_count_phandle_with_args(child, "cooling-device", "#cooling-cells");
- if (count <= 0)
- pr_err("Add a cooling_device property with at least one device\n");
-
- for (i = 0; i < count; i++) {
- result = thermal_of_get_cooling_spec(child, i, cdev, c);
- if (result)
- break;
- }
-
- break;
- }
+ result = thermal_of_cm_lookup(cm_np, trip, cdev, c);
of_node_put(cm_np);
out:
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index df08f13052ff..8bb1a01fef2a 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -798,7 +798,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
nonseekable_open(inode, filp);
/* We refuse fsnotify events on ptmx, since it's a shared resource */
- filp->f_mode |= FMODE_NONOTIFY;
+ file_set_fsnotify_mode(filp, FMODE_NONOTIFY);
retval = tty_alloc_file(filp);
if (retval)
diff --git a/drivers/tty/serial/8250/8250.h b/drivers/tty/serial/8250/8250.h
index 11e05aa014e5..b861585ca02a 100644
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -374,6 +374,7 @@ static inline int is_omap1510_8250(struct uart_8250_port *pt)
#ifdef CONFIG_SERIAL_8250_DMA
extern int serial8250_tx_dma(struct uart_8250_port *);
+extern void serial8250_tx_dma_flush(struct uart_8250_port *);
extern int serial8250_rx_dma(struct uart_8250_port *);
extern void serial8250_rx_dma_flush(struct uart_8250_port *);
extern int serial8250_request_dma(struct uart_8250_port *);
@@ -406,6 +407,7 @@ static inline int serial8250_tx_dma(struct uart_8250_port *p)
{
return -1;
}
+static inline void serial8250_tx_dma_flush(struct uart_8250_port *p) { }
static inline int serial8250_rx_dma(struct uart_8250_port *p)
{
return -1;
diff --git a/drivers/tty/serial/8250/8250_dma.c b/drivers/tty/serial/8250/8250_dma.c
index d215c494ee24..f245a84f4a50 100644
--- a/drivers/tty/serial/8250/8250_dma.c
+++ b/drivers/tty/serial/8250/8250_dma.c
@@ -149,6 +149,22 @@ err:
return ret;
}
+void serial8250_tx_dma_flush(struct uart_8250_port *p)
+{
+ struct uart_8250_dma *dma = p->dma;
+
+ if (!dma->tx_running)
+ return;
+
+ /*
+ * kfifo_reset() has been called by the serial core, avoid
+ * advancing and underflowing in __dma_tx_complete().
+ */
+ dma->tx_size = 0;
+
+ dmaengine_terminate_async(dma->rxchan);
+}
+
int serial8250_rx_dma(struct uart_8250_port *p)
{
struct uart_8250_dma *dma = p->dma;
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 64aed7efc569..11c860ea80f6 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -110,7 +110,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
spin_lock_init(&port->lock);
if (resource_type(&resource) == IORESOURCE_IO) {
- port->iotype = UPIO_PORT;
port->iobase = resource.start;
} else {
port->mapbase = resource.start;
diff --git a/drivers/tty/serial/8250/8250_platform.c b/drivers/tty/serial/8250/8250_platform.c
index 8bdc1879d952..c0343bfb8064 100644
--- a/drivers/tty/serial/8250/8250_platform.c
+++ b/drivers/tty/serial/8250/8250_platform.c
@@ -112,7 +112,6 @@ static int serial8250_probe_acpi(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct uart_8250_port uart = { };
struct resource *regs;
- unsigned char iotype;
int ret, line;
regs = platform_get_mem_or_io(pdev, 0);
@@ -122,13 +121,11 @@ static int serial8250_probe_acpi(struct platform_device *pdev)
switch (resource_type(regs)) {
case IORESOURCE_IO:
uart.port.iobase = regs->start;
- iotype = UPIO_PORT;
break;
case IORESOURCE_MEM:
uart.port.mapbase = regs->start;
uart.port.mapsize = resource_size(regs);
uart.port.flags = UPF_IOREMAP;
- iotype = UPIO_MEM;
break;
default:
return -EINVAL;
@@ -147,12 +144,6 @@ static int serial8250_probe_acpi(struct platform_device *pdev)
if (ret)
return ret;
- /*
- * The previous call may not set iotype correctly when reg-io-width
- * property is absent and it doesn't support IO port resource.
- */
- uart.port.iotype = iotype;
-
line = serial8250_register_8250_port(&uart);
if (line < 0)
return line;
diff --git a/drivers/tty/serial/8250/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index 7c06ae79d8e2..7a837fdf9df1 100644
--- a/drivers/tty/serial/8250/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
@@ -436,7 +436,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
struct uart_8250_port uart, *port;
int ret, flags = dev_id->driver_data;
- unsigned char iotype;
long line;
if (flags & UNKNOWN_DEV) {
@@ -448,14 +447,11 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
memset(&uart, 0, sizeof(uart));
if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) {
uart.port.iobase = pnp_port_start(dev, 2);
- iotype = UPIO_PORT;
} else if (pnp_port_valid(dev, 0)) {
uart.port.iobase = pnp_port_start(dev, 0);
- iotype = UPIO_PORT;
} else if (pnp_mem_valid(dev, 0)) {
uart.port.mapbase = pnp_mem_start(dev, 0);
uart.port.mapsize = pnp_mem_len(dev, 0);
- iotype = UPIO_MEM;
uart.port.flags = UPF_IOREMAP;
} else
return -ENODEV;
@@ -471,12 +467,6 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
if (ret)
return ret;
- /*
- * The previous call may not set iotype correctly when reg-io-width
- * property is absent and it doesn't support IO port resource.
- */
- uart.port.iotype = iotype;
-
if (flags & CIR_PORT) {
uart.port.flags |= UPF_FIXED_PORT | UPF_FIXED_TYPE;
uart.port.type = PORT_8250_CIR;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index d7976a21cca9..442967a6cd52 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2555,6 +2555,14 @@ static void serial8250_shutdown(struct uart_port *port)
serial8250_do_shutdown(port);
}
+static void serial8250_flush_buffer(struct uart_port *port)
+{
+ struct uart_8250_port *up = up_to_u8250p(port);
+
+ if (up->dma)
+ serial8250_tx_dma_flush(up);
+}
+
static unsigned int serial8250_do_get_divisor(struct uart_port *port,
unsigned int baud,
unsigned int *frac)
@@ -3244,6 +3252,7 @@ static const struct uart_ops serial8250_pops = {
.break_ctl = serial8250_break_ctl,
.startup = serial8250_startup,
.shutdown = serial8250_shutdown,
+ .flush_buffer = serial8250_flush_buffer,
.set_termios = serial8250_set_termios,
.set_ldisc = serial8250_set_ldisc,
.pm = serial8250_pm,
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 7b51cdc274fd..560f45ed19ae 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -1561,7 +1561,7 @@ int sc16is7xx_probe(struct device *dev, const struct sc16is7xx_devtype *devtype,
/* Always ask for fixed clock rate from a property. */
device_property_read_u32(dev, "clock-frequency", &uartclk);
- s->polling = !!irq;
+ s->polling = (irq <= 0);
if (s->polling)
dev_dbg(dev,
"No interrupt pin definition, falling back to polling mode\n");
diff --git a/drivers/tty/serial/serial_port.c b/drivers/tty/serial/serial_port.c
index d35f1d24156c..2fc48cd63f6c 100644
--- a/drivers/tty/serial/serial_port.c
+++ b/drivers/tty/serial/serial_port.c
@@ -173,6 +173,7 @@ EXPORT_SYMBOL(uart_remove_one_port);
* The caller is responsible to initialize the following fields of the @port
* ->dev (must be valid)
* ->flags
+ * ->iobase
* ->mapbase
* ->mapsize
* ->regshift (if @use_defaults is false)
@@ -214,7 +215,7 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
/* Read the registers I/O access type (default: MMIO 8-bit) */
ret = device_property_read_u32(dev, "reg-io-width", &value);
if (ret) {
- port->iotype = UPIO_MEM;
+ port->iotype = port->iobase ? UPIO_PORT : UPIO_MEM;
} else {
switch (value) {
case 1:
@@ -227,15 +228,16 @@ static int __uart_read_properties(struct uart_port *port, bool use_defaults)
port->iotype = device_is_big_endian(dev) ? UPIO_MEM32BE : UPIO_MEM32;
break;
default:
- if (!use_defaults) {
- dev_err(dev, "Unsupported reg-io-width (%u)\n", value);
- return -EINVAL;
- }
port->iotype = UPIO_UNKNOWN;
break;
}
}
+ if (!use_defaults && port->iotype == UPIO_UNKNOWN) {
+ dev_err(dev, "Unsupported reg-io-width (%u)\n", value);
+ return -EINVAL;
+ }
+
/* Read the address mapping base offset (default: no offset) */
ret = device_property_read_u32(dev, "reg-offset", &value);
if (ret)
diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c
index 8d4ad0a3f2cf..252186124669 100644
--- a/drivers/ufs/core/ufs_bsg.c
+++ b/drivers/ufs/core/ufs_bsg.c
@@ -194,10 +194,12 @@ out:
ufshcd_rpm_put_sync(hba);
kfree(buff);
bsg_reply->result = ret;
- job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply);
/* complete the job here only if no error */
- if (ret == 0)
+ if (ret == 0) {
+ job->reply_len = rpmb ? sizeof(struct ufs_rpmb_reply) :
+ sizeof(struct ufs_bsg_reply);
bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len);
+ }
return ret;
}
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index cd404ade48dc..464f13da259a 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -266,7 +266,7 @@ static bool ufshcd_has_pending_tasks(struct ufs_hba *hba)
static bool ufshcd_is_ufs_dev_busy(struct ufs_hba *hba)
{
- return hba->outstanding_reqs || ufshcd_has_pending_tasks(hba);
+ return scsi_host_busy(hba->host) || ufshcd_has_pending_tasks(hba);
}
static const struct ufs_dev_quirk ufs_fixups[] = {
@@ -628,8 +628,8 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
const struct scsi_device *sdev_ufs = hba->ufs_device_wlun;
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
- dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
- hba->outstanding_reqs, hba->outstanding_tasks);
+ dev_err(hba->dev, "%d outstanding reqs, tasks=0x%lx\n",
+ scsi_host_busy(hba->host), hba->outstanding_tasks);
dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n",
hba->saved_err, hba->saved_uic_err);
dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
@@ -2120,8 +2120,6 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
- spin_lock_init(&hba->clk_gating.lock);
-
hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(
"ufs_clk_gating_%d", WQ_MEM_RECLAIM | WQ_HIGHPRI,
hba->host->host_no);
@@ -3106,8 +3104,13 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
case UPIU_TRANSACTION_QUERY_RSP: {
u8 response = lrbp->ucd_rsp_ptr->header.response;
- if (response == 0)
+ if (response == 0) {
err = ufshcd_copy_query_response(hba, lrbp);
+ } else {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response in Query RSP: %x\n",
+ __func__, response);
+ }
break;
}
case UPIU_TRANSACTION_REJECT_UPIU:
@@ -5976,24 +5979,6 @@ out:
__func__, err);
}
-static void ufshcd_temp_exception_event_handler(struct ufs_hba *hba, u16 status)
-{
- u32 value;
-
- if (ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
- QUERY_ATTR_IDN_CASE_ROUGH_TEMP, 0, 0, &value))
- return;
-
- dev_info(hba->dev, "exception Tcase %d\n", value - 80);
-
- ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
-
- /*
- * A placeholder for the platform vendors to add whatever additional
- * steps required
- */
-}
-
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
{
u8 index;
@@ -6214,7 +6199,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
ufshcd_bkops_exception_event_handler(hba);
if (status & hba->ee_drv_mask & MASK_EE_URGENT_TEMP)
- ufshcd_temp_exception_event_handler(hba, status);
+ ufs_hwmon_notify_event(hba, status & MASK_EE_URGENT_TEMP);
ufs_debugfs_exception_event(hba, status);
}
@@ -8897,7 +8882,7 @@ static enum scsi_timeout_action ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
dev_info(hba->dev, "%s() finished; outstanding_tasks = %#lx.\n",
__func__, hba->outstanding_tasks);
- return hba->outstanding_reqs ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
+ return scsi_host_busy(hba->host) ? SCSI_EH_RESET_TIMER : SCSI_EH_DONE;
}
static const struct attribute_group *ufshcd_driver_groups[] = {
@@ -9160,7 +9145,7 @@ out:
if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
clk_disable_unprepare(clki->clk);
}
- } else if (!ret && on) {
+ } else if (!ret && on && hba->clk_gating.is_initialized) {
scoped_guard(spinlock_irqsave, &hba->clk_gating.lock)
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
@@ -10247,16 +10232,6 @@ EXPORT_SYMBOL_GPL(ufshcd_system_thaw);
#endif /* CONFIG_PM_SLEEP */
/**
- * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
- * @hba: pointer to Host Bus Adapter (HBA)
- */
-void ufshcd_dealloc_host(struct ufs_hba *hba)
-{
- scsi_host_put(hba->host);
-}
-EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
-
-/**
* ufshcd_set_dma_mask - Set dma mask based on the controller
* addressing capability
* @hba: per adapter instance
@@ -10275,11 +10250,25 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
}
/**
+ * ufshcd_devres_release - devres cleanup handler, invoked during release of
+ * hba->dev
+ * @host: pointer to SCSI host
+ */
+static void ufshcd_devres_release(void *host)
+{
+ scsi_host_put(host);
+}
+
+/**
* ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
* @dev: pointer to device handle
* @hba_handle: driver private handle
*
* Return: 0 on success, non-zero value on failure.
+ *
+ * NOTE: There is no corresponding ufshcd_dealloc_host() because this function
+ * keeps track of its allocations using devres and deallocates everything on
+ * device removal automatically.
*/
int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
{
@@ -10301,6 +10290,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
err = -ENOMEM;
goto out_error;
}
+
+ err = devm_add_action_or_reset(dev, ufshcd_devres_release,
+ host);
+ if (err)
+ return dev_err_probe(dev, err,
+ "failed to add ufshcd dealloc action\n");
+
host->nr_maps = HCTX_TYPE_POLL + 1;
hba = shost_priv(host);
hba->host = host;
@@ -10429,6 +10425,27 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->irq = irq;
hba->vps = &ufs_hba_vps;
+ /*
+ * Initialize clk_gating.lock early since it is being used in
+ * ufshcd_setup_clocks()
+ */
+ spin_lock_init(&hba->clk_gating.lock);
+
+ /*
+ * Set the default power management level for runtime and system PM.
+ * Host controller drivers can override them in their
+ * 'ufs_hba_variant_ops::init' callback.
+ *
+ * Default power saving mode is to keep UFS link in Hibern8 state
+ * and UFS device in sleep state.
+ */
+ hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+ hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+ UFS_SLEEP_PWR_MODE,
+ UIC_LINK_HIBERN8_STATE);
+
err = ufshcd_hba_init(hba);
if (err)
goto out_error;
@@ -10542,21 +10559,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
goto out_disable;
}
- /*
- * Set the default power management level for runtime and system PM if
- * not set by the host controller drivers.
- * Default power saving mode is to keep UFS link in Hibern8 state
- * and UFS device in sleep state.
- */
- if (!hba->rpm_lvl)
- hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
- if (!hba->spm_lvl)
- hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
- UFS_SLEEP_PWR_MODE,
- UIC_LINK_HIBERN8_STATE);
-
INIT_DELAYED_WORK(&hba->rpm_dev_flush_recheck_work, ufshcd_rpm_dev_flush_recheck_work);
INIT_DELAYED_WORK(&hba->ufs_rtc_update_work, ufshcd_rtc_work);
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index ea39c5d5b8cf..9cfcaad23cf9 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -562,7 +562,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
pm_runtime_forbid(&pdev->dev);
pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
- ufshcd_dealloc_host(hba);
}
/**
@@ -605,7 +604,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err = ufshcd_init(hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
- ufshcd_dealloc_host(hba);
return err;
}
diff --git a/drivers/ufs/host/ufshcd-pltfrm.c b/drivers/ufs/host/ufshcd-pltfrm.c
index 505572d4fa87..ffe5d1d2b215 100644
--- a/drivers/ufs/host/ufshcd-pltfrm.c
+++ b/drivers/ufs/host/ufshcd-pltfrm.c
@@ -465,21 +465,17 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
struct device *dev = &pdev->dev;
mmio_base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(mmio_base)) {
- err = PTR_ERR(mmio_base);
- goto out;
- }
+ if (IS_ERR(mmio_base))
+ return PTR_ERR(mmio_base);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto out;
- }
+ if (irq < 0)
+ return irq;
err = ufshcd_alloc_host(dev, &hba);
if (err) {
dev_err(dev, "Allocation failed\n");
- goto out;
+ return err;
}
hba->vops = vops;
@@ -488,13 +484,13 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
if (err) {
dev_err(dev, "%s: clock parse failed %d\n",
__func__, err);
- goto dealloc_host;
+ return err;
}
err = ufshcd_parse_regulator_info(hba);
if (err) {
dev_err(dev, "%s: regulator init failed %d\n",
__func__, err);
- goto dealloc_host;
+ return err;
}
ufshcd_init_lanes_per_dir(hba);
@@ -502,25 +498,20 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
err = ufshcd_parse_operating_points(hba);
if (err) {
dev_err(dev, "%s: OPP parse failed %d\n", __func__, err);
- goto dealloc_host;
+ return err;
}
err = ufshcd_init(hba, mmio_base, irq);
if (err) {
dev_err_probe(dev, err, "Initialization failed with error %d\n",
err);
- goto dealloc_host;
+ return err;
}
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
return 0;
-
-dealloc_host:
- ufshcd_dealloc_host(hba);
-out:
- return err;
}
EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
@@ -534,7 +525,6 @@ void ufshcd_pltfrm_remove(struct platform_device *pdev)
pm_runtime_get_sync(&pdev->dev);
ufshcd_remove(hba);
- ufshcd_dealloc_host(hba);
pm_runtime_disable(&pdev->dev);
pm_runtime_put_noidle(&pdev->dev);
}
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 0dd85d2635b9..47d06af33747 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1131,7 +1131,10 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
struct cxacru_data *instance;
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct usb_host_endpoint *cmd_ep = usb_dev->ep_in[CXACRU_EP_CMD];
- struct usb_endpoint_descriptor *in, *out;
+ static const u8 ep_addrs[] = {
+ CXACRU_EP_CMD + USB_DIR_IN,
+ CXACRU_EP_CMD + USB_DIR_OUT,
+ 0};
int ret;
/* instance init */
@@ -1179,13 +1182,11 @@ static int cxacru_bind(struct usbatm_data *usbatm_instance,
}
if (usb_endpoint_xfer_int(&cmd_ep->desc))
- ret = usb_find_common_endpoints(intf->cur_altsetting,
- NULL, NULL, &in, &out);
+ ret = usb_check_int_endpoints(intf, ep_addrs);
else
- ret = usb_find_common_endpoints(intf->cur_altsetting,
- &in, &out, NULL, NULL);
+ ret = usb_check_bulk_endpoints(intf, ep_addrs);
- if (ret) {
+ if (!ret) {
usb_err(usbatm_instance, "cxacru_bind: interface has incorrect endpoints\n");
ret = -ENODEV;
goto fail;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 6b37d1c47fce..c2ecfa3c8349 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -371,7 +371,7 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
static void acm_ctrl_irq(struct urb *urb)
{
struct acm *acm = urb->context;
- struct usb_cdc_notification *dr = urb->transfer_buffer;
+ struct usb_cdc_notification *dr;
unsigned int current_size = urb->actual_length;
unsigned int expected_size, copy_size, alloc_size;
int retval;
@@ -398,14 +398,25 @@ static void acm_ctrl_irq(struct urb *urb)
usb_mark_last_busy(acm->dev);
- if (acm->nb_index)
+ if (acm->nb_index == 0) {
+ /*
+ * The first chunk of a message must contain at least the
+ * notification header with the length field, otherwise we
+ * can't get an expected_size.
+ */
+ if (current_size < sizeof(struct usb_cdc_notification)) {
+ dev_dbg(&acm->control->dev, "urb too short\n");
+ goto exit;
+ }
+ dr = urb->transfer_buffer;
+ } else {
dr = (struct usb_cdc_notification *)acm->notification_buffer;
-
+ }
/* size = notification-header + (optional) data */
expected_size = sizeof(struct usb_cdc_notification) +
le16_to_cpu(dr->wLength);
- if (current_size < expected_size) {
+ if (acm->nb_index != 0 || current_size < expected_size) {
/* notification is transmitted fragmented, reassemble */
if (acm->nb_size < expected_size) {
u8 *new_buffer;
@@ -1727,13 +1738,16 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
- { USB_DEVICE(0x045b, 0x023c), /* Renesas USB Download mode */
+ { USB_DEVICE(0x045b, 0x023c), /* Renesas R-Car H3 USB Download mode */
+ .driver_info = DISABLE_ECHO, /* Don't echo banner */
+ },
+ { USB_DEVICE(0x045b, 0x0247), /* Renesas R-Car D3 USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
- { USB_DEVICE(0x045b, 0x0248), /* Renesas USB Download mode */
+ { USB_DEVICE(0x045b, 0x0248), /* Renesas R-Car M3-N USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
- { USB_DEVICE(0x045b, 0x024D), /* Renesas USB Download mode */
+ { USB_DEVICE(0x045b, 0x024D), /* Renesas R-Car E3 USB Download mode */
.driver_info = DISABLE_ECHO, /* Don't echo banner */
},
{ USB_DEVICE(0x0e8d, 0x0003), /* FIREFLY, MediaTek Inc; andrey.arapov@gmail.com */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index c3f839637cb5..dcba4281ea48 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1849,6 +1849,17 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
hdev = interface_to_usbdev(intf);
/*
+ * The USB 2.0 spec prohibits hubs from having more than one
+ * configuration or interface, and we rely on this prohibition.
+ * Refuse to accept a device that violates it.
+ */
+ if (hdev->descriptor.bNumConfigurations > 1 ||
+ hdev->actconfig->desc.bNumInterfaces > 1) {
+ dev_err(&intf->dev, "Invalid hub with more than one config or interface\n");
+ return -EINVAL;
+ }
+
+ /*
* Set default autosuspend delay as 0 to speedup bus suspend,
* based on the below considerations:
*
@@ -4698,7 +4709,6 @@ void usb_ep0_reinit(struct usb_device *udev)
EXPORT_SYMBOL_GPL(usb_ep0_reinit);
#define usb_sndaddr0pipe() (PIPE_CONTROL << 30)
-#define usb_rcvaddr0pipe() ((PIPE_CONTROL << 30) | USB_DIR_IN)
static int hub_set_address(struct usb_device *udev, int devnum)
{
@@ -4804,7 +4814,7 @@ static int get_bMaxPacketSize0(struct usb_device *udev,
for (i = 0; i < GET_MAXPACKET0_TRIES; ++i) {
/* Start with invalid values in case the transfer fails */
buf->bDescriptorType = buf->bMaxPacketSize0 = 0;
- rc = usb_control_msg(udev, usb_rcvaddr0pipe(),
+ rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
USB_DT_DEVICE << 8, 0,
buf, size,
@@ -6056,6 +6066,36 @@ void usb_hub_cleanup(void)
} /* usb_hub_cleanup() */
/**
+ * hub_hc_release_resources - clear resources used by host controller
+ * @udev: pointer to device being released
+ *
+ * Context: task context, might sleep
+ *
+ * Function releases the host controller resources in correct order before
+ * making any operation on resuming usb device. The host controller resources
+ * allocated for devices in tree should be released starting from the last
+ * usb device in tree toward the root hub. This function is used only during
+ * resuming device when usb device require reinitialization – that is, when
+ * flag udev->reset_resume is set.
+ *
+ * This call is synchronous, and may not be used in an interrupt context.
+ */
+static void hub_hc_release_resources(struct usb_device *udev)
+{
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev);
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ int i;
+
+ /* Release up resources for all children before this device */
+ for (i = 0; i < udev->maxchild; i++)
+ if (hub->ports[i]->child)
+ hub_hc_release_resources(hub->ports[i]->child);
+
+ if (hcd->driver->reset_device)
+ hcd->driver->reset_device(hcd, udev);
+}
+
+/**
* usb_reset_and_verify_device - perform a USB port reset to reinitialize a device
* @udev: device to reset (not in SUSPENDED or NOTATTACHED state)
*
@@ -6119,6 +6159,9 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
bos = udev->bos;
udev->bos = NULL;
+ if (udev->reset_resume)
+ hub_hc_release_resources(udev);
+
mutex_lock(hcd->address0_mutex);
for (i = 0; i < PORT_INIT_TRIES; ++i) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 67732c791c93..8efbacc5bc34 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -341,6 +341,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* Prolific Single-LUN Mass Storage Card Reader */
+ { USB_DEVICE(0x067b, 0x2731), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_NO_LPM },
+
/* Saitek Cyborg Gold Joystick */
{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -435,6 +439,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0c45, 0x7056), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* Sony Xperia XZ1 Compact (lilac) smartphone in fastboot mode */
+ { USB_DEVICE(0x0fce, 0x0dde), .driver_info = USB_QUIRK_NO_LPM },
+
/* Action Semiconductor flash disk */
{ USB_DEVICE(0x10d6, 0x2200), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
@@ -525,6 +532,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Blackmagic Design UltraStudio SDI */
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+ /* Teclast disk */
+ { USB_DEVICE(0x1f75, 0x0917), .driver_info = USB_QUIRK_NO_LPM },
+
/* Hauppauge HVR-950q */
{ USB_DEVICE(0x2040, 0x7200), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index e7bf9cc635be..bd4c788f03bc 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -4615,6 +4615,7 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
spin_lock_irqsave(&hsotg->lock, flags);
hsotg->driver = NULL;
+ hsotg->gadget.dev.of_node = NULL;
hsotg->gadget.speed = USB_SPEED_UNKNOWN;
hsotg->enabled = 0;
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index dfa1b5fe48dc..66a08b527165 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -131,11 +131,24 @@ void dwc3_enable_susphy(struct dwc3 *dwc, bool enable)
}
}
-void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
+void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy)
{
+ unsigned int hw_mode;
u32 reg;
reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+
+ /*
+ * For DRD controllers, GUSB3PIPECTL.SUSPENDENABLE and
+ * GUSB2PHYCFG.SUSPHY should be cleared during mode switching,
+ * and they can be set after core initialization.
+ */
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+ if (hw_mode == DWC3_GHWPARAMS0_MODE_DRD && !ignore_susphy) {
+ if (DWC3_GCTL_PRTCAP(reg) != mode)
+ dwc3_enable_susphy(dwc, false);
+ }
+
reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
reg |= DWC3_GCTL_PRTCAPDIR(mode);
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
@@ -216,7 +229,7 @@ static void __dwc3_set_mode(struct work_struct *work)
spin_lock_irqsave(&dwc->lock, flags);
- dwc3_set_prtcap(dwc, desired_dr_role);
+ dwc3_set_prtcap(dwc, desired_dr_role, false);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -658,16 +671,7 @@ static int dwc3_ss_phy_setup(struct dwc3 *dwc, int index)
*/
reg &= ~DWC3_GUSB3PIPECTL_UX_EXIT_PX;
- /*
- * Above DWC_usb3.0 1.94a, it is recommended to set
- * DWC3_GUSB3PIPECTL_SUSPHY to '0' during coreConsultant configuration.
- * So default value will be '0' when the core is reset. Application
- * needs to set it to '1' after the core initialization is completed.
- *
- * Similarly for DRD controllers, GUSB3PIPECTL.SUSPENDENABLE must be
- * cleared after power-on reset, and it can be set after core
- * initialization.
- */
+ /* Ensure the GUSB3PIPECTL.SUSPENDENABLE is cleared prior to phy init. */
reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
if (dwc->u2ss_inp3_quirk)
@@ -747,15 +751,7 @@ static int dwc3_hs_phy_setup(struct dwc3 *dwc, int index)
break;
}
- /*
- * Above DWC_usb3.0 1.94a, it is recommended to set
- * DWC3_GUSB2PHYCFG_SUSPHY to '0' during coreConsultant configuration.
- * So default value will be '0' when the core is reset. Application
- * needs to set it to '1' after the core initialization is completed.
- *
- * Similarly for DRD controllers, GUSB2PHYCFG.SUSPHY must be cleared
- * after power-on reset, and it can be set after core initialization.
- */
+ /* Ensure the GUSB2PHYCFG.SUSPHY is cleared prior to phy init. */
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
if (dwc->dis_enblslpm_quirk)
@@ -830,6 +826,25 @@ static int dwc3_phy_init(struct dwc3 *dwc)
goto err_exit_usb3_phy;
}
+ /*
+ * Above DWC_usb3.0 1.94a, it is recommended to set
+ * DWC3_GUSB3PIPECTL_SUSPHY and DWC3_GUSB2PHYCFG_SUSPHY to '0' during
+ * coreConsultant configuration. So default value will be '0' when the
+ * core is reset. Application needs to set it to '1' after the core
+ * initialization is completed.
+ *
+ * Certain phy requires to be in P0 power state during initialization.
+ * Make sure GUSB3PIPECTL.SUSPENDENABLE and GUSB2PHYCFG.SUSPHY are clear
+ * prior to phy init to maintain in the P0 state.
+ *
+ * After phy initialization, some phy operations can only be executed
+ * while in lower P states. Ensure GUSB3PIPECTL.SUSPENDENABLE and
+ * GUSB2PHYCFG.SUSPHY are set soon after initialization to avoid
+ * blocking phy ops.
+ */
+ if (!DWC3_VER_IS_WITHIN(DWC3, ANY, 194A))
+ dwc3_enable_susphy(dwc, true);
+
return 0;
err_exit_usb3_phy:
@@ -1588,7 +1603,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, false);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, false);
@@ -1600,7 +1615,7 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
return dev_err_probe(dev, ret, "failed to initialize gadget\n");
break;
case USB_DR_MODE_HOST:
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, false);
if (dwc->usb2_phy)
otg_set_vbus(dwc->usb2_phy->otg, true);
@@ -1645,7 +1660,7 @@ static void dwc3_core_exit_mode(struct dwc3 *dwc)
}
/* de-assert DRVVBUS for HOST and OTG mode */
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
}
static void dwc3_get_software_properties(struct dwc3 *dwc)
@@ -1835,8 +1850,6 @@ static void dwc3_get_properties(struct dwc3 *dwc)
dwc->tx_thr_num_pkt_prd = tx_thr_num_pkt_prd;
dwc->tx_max_burst_prd = tx_max_burst_prd;
- dwc->imod_interval = 0;
-
dwc->tx_fifo_resize_max_num = tx_fifo_resize_max_num;
}
@@ -1854,21 +1867,19 @@ static void dwc3_check_params(struct dwc3 *dwc)
unsigned int hwparam_gen =
DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3);
- /* Check for proper value of imod_interval */
- if (dwc->imod_interval && !dwc3_has_imod(dwc)) {
- dev_warn(dwc->dev, "Interrupt moderation not supported\n");
- dwc->imod_interval = 0;
- }
-
/*
+ * Enable IMOD for all supporting controllers.
+ *
+ * Particularly, DWC_usb3 v3.00a must enable this feature for
+ * the following reason:
+ *
* Workaround for STAR 9000961433 which affects only version
* 3.00a of the DWC_usb3 core. This prevents the controller
* interrupt from being masked while handling events. IMOD
* allows us to work around this issue. Enable it for the
* affected version.
*/
- if (!dwc->imod_interval &&
- DWC3_VER_IS(DWC3, 300A))
+ if (dwc3_has_imod((dwc)))
dwc->imod_interval = 1;
/* Check the maximum_speed parameter */
@@ -2457,7 +2468,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (ret)
return ret;
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE, true);
dwc3_gadget_resume(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
@@ -2465,7 +2476,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST, true);
break;
}
/* Restore GUSB2PHYCFG bits that were modified in suspend */
@@ -2494,7 +2505,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (ret)
return ret;
- dwc3_set_prtcap(dwc, dwc->current_dr_role);
+ dwc3_set_prtcap(dwc, dwc->current_dr_role, true);
dwc3_otg_init(dwc);
if (dwc->current_otg_role == DWC3_OTG_ROLE_HOST) {
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index ac7c730f81ac..aaa39e663f60 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -717,6 +717,7 @@ struct dwc3_event_buffer {
/**
* struct dwc3_ep - device side endpoint representation
* @endpoint: usb endpoint
+ * @nostream_work: work for handling bulk NoStream
* @cancelled_list: list of cancelled requests for this endpoint
* @pending_list: list of pending requests for this endpoint
* @started_list: list of started requests on this endpoint
@@ -1557,7 +1558,7 @@ struct dwc3_gadget_ep_cmd_params {
#define DWC3_HAS_OTG BIT(3)
/* prototypes */
-void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode);
+void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode, bool ignore_susphy);
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index d76ae676783c..7977860932b1 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -173,7 +173,7 @@ void dwc3_otg_init(struct dwc3 *dwc)
* block "Initialize GCTL for OTG operation".
*/
/* GCTL.PrtCapDir=2'b11 */
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true);
/* GUSB2PHYCFG0.SusPHY=0 */
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
@@ -556,7 +556,7 @@ int dwc3_drd_init(struct dwc3 *dwc)
dwc3_drd_update(dwc);
} else {
- dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG);
+ dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_OTG, true);
/* use OTG block to get ID event */
irq = dwc3_otg_get_irq(dwc);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index d27af65eb08a..89a4dc8ebf94 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2629,10 +2629,38 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
{
u32 reg;
u32 timeout = 2000;
+ u32 saved_config = 0;
if (pm_runtime_suspended(dwc->dev))
return 0;
+ /*
+ * When operating in USB 2.0 speeds (HS/FS), ensure that
+ * GUSB2PHYCFG.ENBLSLPM and GUSB2PHYCFG.SUSPHY are cleared before starting
+ * or stopping the controller. This resolves timeout issues that occur
+ * during frequent role switches between host and device modes.
+ *
+ * Save and clear these settings, then restore them after completing the
+ * controller start or stop sequence.
+ *
+ * This solution was discovered through experimentation as it is not
+ * mentioned in the dwc3 programming guide. It has been tested on an
+ * Exynos platforms.
+ */
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
+ saved_config |= DWC3_GUSB2PHYCFG_SUSPHY;
+ reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
+ }
+
+ if (reg & DWC3_GUSB2PHYCFG_ENBLSLPM) {
+ saved_config |= DWC3_GUSB2PHYCFG_ENBLSLPM;
+ reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
+ }
+
+ if (saved_config)
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (is_on) {
if (DWC3_VER_IS_WITHIN(DWC3, ANY, 187A)) {
@@ -2660,6 +2688,12 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on)
reg &= DWC3_DSTS_DEVCTRLHLT;
} while (--timeout && !(!is_on ^ !reg));
+ if (saved_config) {
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ reg |= saved_config;
+ dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+ }
+
if (!timeout)
return -ETIMEDOUT;
@@ -4467,14 +4501,18 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(0),
DWC3_GEVNTSIZ_SIZE(evt->length));
+ evt->flags &= ~DWC3_EVENT_PENDING;
+ /*
+ * Add an explicit write memory barrier to make sure that the update of
+ * clearing DWC3_EVENT_PENDING is observed in dwc3_check_event_buf()
+ */
+ wmb();
+
if (dwc->imod_interval) {
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), DWC3_GEVNTCOUNT_EHB);
dwc3_writel(dwc->regs, DWC3_DEV_IMOD(0), dwc->imod_interval);
}
- /* Keep the clearing of DWC3_EVENT_PENDING at the end */
- evt->flags &= ~DWC3_EVENT_PENDING;
-
return ret;
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index bdda8c74602d..869ad99afb48 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1050,10 +1050,11 @@ static int set_config(struct usb_composite_dev *cdev,
else
usb_gadget_set_remote_wakeup(gadget, 0);
done:
- if (power <= USB_SELF_POWER_VBUS_MAX_DRAW)
- usb_gadget_set_selfpowered(gadget);
- else
+ if (power > USB_SELF_POWER_VBUS_MAX_DRAW ||
+ (c && !(c->bmAttributes & USB_CONFIG_ATT_SELFPOWER)))
usb_gadget_clear_selfpowered(gadget);
+ else
+ usb_gadget_set_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, power);
if (result >= 0 && cdev->delayed_status)
@@ -2615,7 +2616,10 @@ void composite_suspend(struct usb_gadget *gadget)
cdev->suspended = 1;
- usb_gadget_set_selfpowered(gadget);
+ if (cdev->config &&
+ cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER)
+ usb_gadget_set_selfpowered(gadget);
+
usb_gadget_vbus_draw(gadget, 2);
}
@@ -2649,8 +2653,11 @@ void composite_resume(struct usb_gadget *gadget)
else
maxpower = min(maxpower, 900U);
- if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW)
+ if (maxpower > USB_SELF_POWER_VBUS_MAX_DRAW ||
+ !(cdev->config->bmAttributes & USB_CONFIG_ATT_SELFPOWER))
usb_gadget_clear_selfpowered(gadget);
+ else
+ usb_gadget_set_selfpowered(gadget);
usb_gadget_vbus_draw(gadget, maxpower);
} else {
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 837fcdfa3840..da82598fcef8 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -283,7 +283,7 @@ f_midi_complete(struct usb_ep *ep, struct usb_request *req)
/* Our transmit completed. See if there's more to go.
* f_midi_transmit eats req, don't queue it again. */
req->length = 0;
- f_midi_transmit(midi);
+ queue_work(system_highpri_wq, &midi->work);
return;
}
break;
@@ -907,6 +907,15 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
status = -ENODEV;
+ /*
+ * Reset wMaxPacketSize with maximum packet size of FS bulk transfer before
+ * endpoint claim. This ensures that the wMaxPacketSize does not exceed the
+ * limit during bind retries where configured dwc3 TX/RX FIFO's maxpacket
+ * size of 512 bytes for IN/OUT endpoints in support HS speed only.
+ */
+ bulk_in_desc.wMaxPacketSize = cpu_to_le16(64);
+ bulk_out_desc.wMaxPacketSize = cpu_to_le16(64);
+
/* allocate instance-specific endpoints */
midi->in_ep = usb_ep_autoconfig(cdev->gadget, &bulk_in_desc);
if (!midi->in_ep)
@@ -1000,11 +1009,11 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
}
/* configure the endpoint descriptors ... */
- ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
- ms_out_desc.bNumEmbMIDIJack = midi->in_ports;
+ ms_out_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
+ ms_out_desc.bNumEmbMIDIJack = midi->out_ports;
- ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->out_ports);
- ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
+ ms_in_desc.bLength = USB_DT_MS_ENDPOINT_SIZE(midi->in_ports);
+ ms_in_desc.bNumEmbMIDIJack = midi->in_ports;
/* ... and add them to the list */
endpoint_descriptor_index = i;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 09e2838917e2..f58590bf5e02 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -1052,8 +1052,8 @@ void gether_suspend(struct gether *link)
* There is a transfer in progress. So we trigger a remote
* wakeup to inform the host.
*/
- ether_wakeup_host(dev->port_usb);
- return;
+ if (!ether_wakeup_host(dev->port_usb))
+ return;
}
spin_lock_irqsave(&dev->lock, flags);
link->is_suspend = true;
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 79e223713d8b..fb77b0b21790 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -818,7 +818,7 @@ int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
return -EINVAL;
/* Allocate a kthread for asynchronous hw submit handler. */
- video->kworker = kthread_create_worker(0, "UVCG");
+ video->kworker = kthread_run_worker(0, "UVCG");
if (IS_ERR(video->kworker)) {
uvcg_err(&video->uvc->func, "failed to create UVCG kworker\n");
return PTR_ERR(video->kworker);
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index a6f46364be65..4b3d5075621a 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -1543,8 +1543,8 @@ void usb_del_gadget(struct usb_gadget *gadget)
kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
sysfs_remove_link(&udc->dev.kobj, "gadget");
- flush_work(&gadget->work);
device_del(&gadget->dev);
+ flush_work(&gadget->work);
ida_free(&gadget_id_numbers, gadget->id_number);
cancel_work_sync(&udc->vbus_work);
device_unregister(&udc->dev);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index fce5c41d9f29..89b304cf6d03 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -310,7 +310,7 @@ struct renesas_usb3_request {
struct list_head queue;
};
-#define USB3_EP_NAME_SIZE 8
+#define USB3_EP_NAME_SIZE 16
struct renesas_usb3_ep {
struct usb_ep ep;
struct renesas_usb3 *usb3;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 1f9c1b1435d8..0404489c2f6a 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -958,6 +958,15 @@ static void quirk_usb_disable_ehci(struct pci_dev *pdev)
* booting from USB disk or using a usb keyboard
*/
hcc_params = readl(base + EHCI_HCC_PARAMS);
+
+ /* LS7A EHCI controller doesn't have extended capabilities, the
+ * EECP (EHCI Extended Capabilities Pointer) field of HCCPARAMS
+ * register should be 0x0 but it reads as 0xa0. So clear it to
+ * avoid error messages on boot.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_LOONGSON && pdev->device == 0x7a14)
+ hcc_params &= ~(0xffL << 8);
+
offset = (hcc_params >> 8) & 0xff;
while (offset && --count) {
pci_read_config_dword(pdev, offset, &cap);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 9693464c0520..69c278b64084 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/unaligned.h>
#include <linux/bitfield.h>
+#include <linux/pci.h>
#include "xhci.h"
#include "xhci-trace.h"
@@ -770,9 +771,16 @@ static int xhci_exit_test_mode(struct xhci_hcd *xhci)
enum usb_link_tunnel_mode xhci_port_is_tunneled(struct xhci_hcd *xhci,
struct xhci_port *port)
{
+ struct usb_hcd *hcd;
void __iomem *base;
u32 offset;
+ /* Don't try and probe this capability for non-Intel hosts */
+ hcd = xhci_to_hcd(xhci);
+ if (!dev_is_pci(hcd->self.controller) ||
+ to_pci_dev(hcd->self.controller)->vendor != PCI_VENDOR_ID_INTEL)
+ return USB_LINK_UNKNOWN;
+
base = &xhci->cap_regs->hc_capbase;
offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_INTEL_SPR_SHADOW);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 92703efda1f7..fdf0c1008225 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2437,7 +2437,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* and our use of dma addresses in the trb_address_map radix tree needs
* TRB_SEGMENT_SIZE alignment, so we pick the greater alignment need.
*/
- if (xhci->quirks & XHCI_ZHAOXIN_TRB_FETCH)
+ if (xhci->quirks & XHCI_TRB_OVERFETCH)
+ /* Buggy HC prefetches beyond segment bounds - allocate dummy space at the end */
xhci->segment_pool = dma_pool_create("xHCI ring segments", dev,
TRB_SEGMENT_SIZE * 2, TRB_SEGMENT_SIZE * 2, xhci->page_size * 2);
else
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 2d1e205c14c6..54460d11f7ee 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -38,6 +38,8 @@
#define PCI_DEVICE_ID_ETRON_EJ168 0x7023
#define PCI_DEVICE_ID_ETRON_EJ188 0x7052
+#define PCI_DEVICE_ID_VIA_VL805 0x3483
+
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI 0x9cb1
@@ -418,8 +420,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == 0x3432)
xhci->quirks |= XHCI_BROKEN_STREAMS;
- if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483)
+ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == PCI_DEVICE_ID_VIA_VL805) {
xhci->quirks |= XHCI_LPM_SUPPORT;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
+ }
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) {
@@ -467,11 +471,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->device == 0x9202) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
- xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
}
if (pdev->device == 0x9203)
- xhci->quirks |= XHCI_ZHAOXIN_TRB_FETCH;
+ xhci->quirks |= XHCI_TRB_OVERFETCH;
}
if (pdev->vendor == PCI_VENDOR_ID_CDNS &&
@@ -653,8 +657,8 @@ put_runtime_pm:
}
EXPORT_SYMBOL_NS_GPL(xhci_pci_common_probe, "xhci");
-static const struct pci_device_id pci_ids_reject[] = {
- /* handled by xhci-pci-renesas */
+/* handled by xhci-pci-renesas if enabled */
+static const struct pci_device_id pci_ids_renesas[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0014) },
{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, 0x0015) },
{ /* end: all zeroes */ }
@@ -662,7 +666,8 @@ static const struct pci_device_id pci_ids_reject[] = {
static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
- if (pci_match_id(pci_ids_reject, dev))
+ if (IS_ENABLED(CONFIG_USB_XHCI_PCI_RENESAS) &&
+ pci_match_id(pci_ids_renesas, dev))
return -ENODEV;
return xhci_pci_common_probe(dev, id);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 45653114ccd7..1a90ebc8a30e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -780,8 +780,12 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
struct xhci_segment *seg;
ring = xhci->cmd_ring;
- xhci_for_each_ring_seg(ring->first_seg, seg)
+ xhci_for_each_ring_seg(ring->first_seg, seg) {
+ /* erase all TRBs before the link */
memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ /* clear link cycle bit */
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
+ }
xhci_initialize_ring_info(ring);
/*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8c164340a2c3..779b01dee068 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1632,7 +1632,7 @@ struct xhci_hcd {
#define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42)
#define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43)
#define XHCI_RESET_TO_DEFAULT BIT_ULL(44)
-#define XHCI_ZHAOXIN_TRB_FETCH BIT_ULL(45)
+#define XHCI_TRB_OVERFETCH BIT_ULL(45)
#define XHCI_ZHAOXIN_HOST BIT_ULL(46)
#define XHCI_WRITE_64_HI_LO BIT_ULL(47)
#define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48)
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index 6c3ececf9137..8423be59ec0f 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -212,7 +212,7 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop)
if (of_property_read_u32(node, "clock-frequency", &clk_rate))
clk_rate = 0;
- needs_clk = of_property_read_bool(node, "clocks");
+ needs_clk = of_property_present(node, "clocks");
}
nop->gpiod_reset = devm_gpiod_get_optional(dev, "reset",
GPIOD_ASIS);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 935fc496fe94..4b35ef216125 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -312,8 +312,10 @@ static int usbhsc_clk_get(struct device *dev, struct usbhs_priv *priv)
priv->clks[1] = of_clk_get(dev_of_node(dev), 1);
if (PTR_ERR(priv->clks[1]) == -ENOENT)
priv->clks[1] = NULL;
- else if (IS_ERR(priv->clks[1]))
+ else if (IS_ERR(priv->clks[1])) {
+ clk_put(priv->clks[0]);
return PTR_ERR(priv->clks[1]);
+ }
return 0;
}
@@ -779,6 +781,8 @@ static void usbhs_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "usb remove\n");
+ flush_delayed_work(&priv->notify_hotplug_work);
+
/* power off */
if (!usbhs_get_dparam(priv, runtime_pwctrl))
usbhsc_power_ctrl(priv, 0);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 105132ae87ac..e8e5723f5412 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -1094,7 +1094,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
goto usbhs_mod_gadget_probe_err_gpriv;
}
- gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
+ gpriv->transceiver = devm_usb_get_phy(dev, USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
!IS_ERR(gpriv->transceiver) ? "" : "no ");
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index c58a12c147f4..30482d4cf826 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -387,8 +387,11 @@ usb_role_switch_register(struct device *parent,
dev_set_name(&sw->dev, "%s-role-switch",
desc->name ? desc->name : dev_name(parent));
+ sw->registered = true;
+
ret = device_register(&sw->dev);
if (ret) {
+ sw->registered = false;
put_device(&sw->dev);
return ERR_PTR(ret);
}
@@ -399,8 +402,6 @@ usb_role_switch_register(struct device *parent,
dev_warn(&sw->dev, "failed to add component\n");
}
- sw->registered = true;
-
/* TODO: Symlinks for the host port and the device controller. */
return sw;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1e2ae0c6c41c..58bd54e8c483 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -619,15 +619,6 @@ static void option_instat_callback(struct urb *urb);
/* Luat Air72*U series based on UNISOC UIS8910 uses UNISOC's vendor ID */
#define LUAT_PRODUCT_AIR720U 0x4e00
-/* MeiG Smart Technology products */
-#define MEIGSMART_VENDOR_ID 0x2dee
-/* MeiG Smart SRM815/SRM825L based on Qualcomm 315 */
-#define MEIGSMART_PRODUCT_SRM825L 0x4d22
-/* MeiG Smart SLM320 based on UNISOC UIS8910 */
-#define MEIGSMART_PRODUCT_SLM320 0x4d41
-/* MeiG Smart SLM770A based on ASR1803 */
-#define MEIGSMART_PRODUCT_SLM770A 0x4d57
-
/* Device flags */
/* Highest interface number which can be used with NCTRL() and RSVD() */
@@ -1367,15 +1358,15 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990A (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990A (MBIM) */
.driver_info = NCTRL(0) | RSVD(1) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990A (RNDIS) */
.driver_info = NCTRL(2) | RSVD(3) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990A (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
- { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990 (PCIe) */
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1075, 0xff), /* Telit FN990A (PCIe) */
.driver_info = RSVD(0) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1080, 0xff), /* Telit FE990 (rmnet) */
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
@@ -1403,6 +1394,22 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10c8, 0xff), /* Telit FE910C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x60) }, /* Telit FN990B (rmnet) */
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x40) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d0, 0x30),
+ .driver_info = NCTRL(5) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x60) }, /* Telit FN990B (MBIM) */
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x40) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d1, 0x30),
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x60) }, /* Telit FN990B (RNDIS) */
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x40) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d2, 0x30),
+ .driver_info = NCTRL(6) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x60) }, /* Telit FN990B (ECM) */
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x40) },
+ { USB_DEVICE_INTERFACE_PROTOCOL(TELIT_VENDOR_ID, 0x10d3, 0x30),
+ .driver_info = NCTRL(6) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@@ -2347,6 +2354,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d41, 0xff, 0, 0) }, /* MeiG Smart SLM320 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d57, 0xff, 0, 0) }, /* MeiG Smart SLM770A */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0, 0) }, /* MeiG Smart SRM815 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x02) }, /* MeiG Smart SLM828 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0x10, 0x03) }, /* MeiG Smart SLM828 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x30) }, /* MeiG Smart SRM815 and SRM825L */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x40) }, /* MeiG Smart SRM825L */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x2dee, 0x4d22, 0xff, 0xff, 0x60) }, /* MeiG Smart SRM825L */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
@@ -2403,12 +2418,6 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, TOZED_PRODUCT_LT70C, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(UNISOC_VENDOR_ID, LUAT_PRODUCT_AIR720U, 0xff, 0, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM320, 0xff, 0, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SLM770A, 0xff, 0, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0, 0) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x30) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x40) },
- { USB_DEVICE_AND_INTERFACE_INFO(MEIGSMART_VENDOR_ID, MEIGSMART_PRODUCT_SRM825L, 0xff, 0xff, 0x60) },
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0530, 0xff), /* TCL IK512 MBIM */
.driver_info = NCTRL(1) },
{ USB_DEVICE_INTERFACE_CLASS(0x1bbb, 0x0640, 0xff), /* TCL IK512 ECM */
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 64f6dd0dc660..88c50b984e8a 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -334,6 +334,11 @@ static int rt1711h_probe(struct i2c_client *client)
{
int ret;
struct rt1711h_chip *chip;
+ const u16 alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED |
+ TCPC_ALERT_TX_FAILED | TCPC_ALERT_RX_HARD_RST |
+ TCPC_ALERT_RX_STATUS | TCPC_ALERT_POWER_STATUS |
+ TCPC_ALERT_CC_STATUS | TCPC_ALERT_RX_BUF_OVF |
+ TCPC_ALERT_FAULT;
chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -382,6 +387,12 @@ static int rt1711h_probe(struct i2c_client *client)
dev_name(chip->dev), chip);
if (ret < 0)
return ret;
+
+ /* Enable alert interrupts */
+ ret = rt1711h_write16(chip, TCPC_ALERT_MASK, alert_mask);
+ if (ret < 0)
+ return ret;
+
enable_irq_wake(client->irq);
return 0;
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 47be450d2be3..6bf1a22c785a 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -5591,8 +5591,7 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
port->pps_data.active, 0);
tcpm_set_charge(port, false);
- tcpm_set_state(port, hard_reset_state(port),
- port->timings.ps_src_off_time);
+ tcpm_set_state(port, ERROR_RECOVERY, port->timings.ps_src_off_time);
break;
case PR_SWAP_SNK_SRC_SOURCE_ON:
tcpm_enable_auto_vbus_discharge(port, true);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index fcf499cc9458..2a2915b0a645 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -25,7 +25,7 @@
* difficult to estimate the time it takes for the system to process the command
* before it is actually passed to the PPM.
*/
-#define UCSI_TIMEOUT_MS 5000
+#define UCSI_TIMEOUT_MS 10000
/*
* UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
@@ -1346,7 +1346,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
mutex_lock(&ucsi->ppm_lock);
- ret = ucsi->ops->read_cci(ucsi, &cci);
+ ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret < 0)
goto out;
@@ -1364,7 +1364,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
tmo = jiffies + msecs_to_jiffies(UCSI_TIMEOUT_MS);
do {
- ret = ucsi->ops->read_cci(ucsi, &cci);
+ ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret < 0)
goto out;
if (cci & UCSI_CCI_COMMAND_COMPLETE)
@@ -1393,7 +1393,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
/* Give the PPM time to process a reset before reading CCI */
msleep(20);
- ret = ucsi->ops->read_cci(ucsi, &cci);
+ ret = ucsi->ops->poll_cci(ucsi, &cci);
if (ret)
goto out;
@@ -1825,11 +1825,11 @@ static int ucsi_init(struct ucsi *ucsi)
err_unregister:
for (con = connector; con->port; con++) {
+ if (con->wq)
+ destroy_workqueue(con->wq);
ucsi_unregister_partner(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
ucsi_unregister_port_psy(con);
- if (con->wq)
- destroy_workqueue(con->wq);
usb_power_delivery_unregister_capabilities(con->port_sink_caps);
con->port_sink_caps = NULL;
@@ -1929,8 +1929,8 @@ struct ucsi *ucsi_create(struct device *dev, const struct ucsi_operations *ops)
struct ucsi *ucsi;
if (!ops ||
- !ops->read_version || !ops->read_cci || !ops->read_message_in ||
- !ops->sync_control || !ops->async_control)
+ !ops->read_version || !ops->read_cci || !ops->poll_cci ||
+ !ops->read_message_in || !ops->sync_control || !ops->async_control)
return ERR_PTR(-EINVAL);
ucsi = kzalloc(sizeof(*ucsi), GFP_KERNEL);
@@ -2013,10 +2013,6 @@ void ucsi_unregister(struct ucsi *ucsi)
for (i = 0; i < ucsi->cap.num_connectors; i++) {
cancel_work_sync(&ucsi->connector[i].work);
- ucsi_unregister_partner(&ucsi->connector[i]);
- ucsi_unregister_altmodes(&ucsi->connector[i],
- UCSI_RECIPIENT_CON);
- ucsi_unregister_port_psy(&ucsi->connector[i]);
if (ucsi->connector[i].wq) {
struct ucsi_work *uwork;
@@ -2032,6 +2028,11 @@ void ucsi_unregister(struct ucsi *ucsi)
destroy_workqueue(ucsi->connector[i].wq);
}
+ ucsi_unregister_partner(&ucsi->connector[i]);
+ ucsi_unregister_altmodes(&ucsi->connector[i],
+ UCSI_RECIPIENT_CON);
+ ucsi_unregister_port_psy(&ucsi->connector[i]);
+
usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_sink_caps);
ucsi->connector[i].port_sink_caps = NULL;
usb_power_delivery_unregister_capabilities(ucsi->connector[i].port_source_caps);
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index 82735eb34f0e..28780acc4af2 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -62,6 +62,7 @@ struct dentry;
* struct ucsi_operations - UCSI I/O operations
* @read_version: Read implemented UCSI version
* @read_cci: Read CCI register
+ * @poll_cci: Read CCI register while polling with notifications disabled
* @read_message_in: Read message data from UCSI
* @sync_control: Blocking control operation
* @async_control: Non-blocking control operation
@@ -76,6 +77,7 @@ struct dentry;
struct ucsi_operations {
int (*read_version)(struct ucsi *ucsi, u16 *version);
int (*read_cci)(struct ucsi *ucsi, u32 *cci);
+ int (*poll_cci)(struct ucsi *ucsi, u32 *cci);
int (*read_message_in)(struct ucsi *ucsi, void *val, size_t val_len);
int (*sync_control)(struct ucsi *ucsi, u64 command);
int (*async_control)(struct ucsi *ucsi, u64 command);
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 5c5515551963..ac1ebb5d9527 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -59,19 +59,24 @@ static int ucsi_acpi_read_version(struct ucsi *ucsi, u16 *version)
static int ucsi_acpi_read_cci(struct ucsi *ucsi, u32 *cci)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
- int ret;
-
- if (UCSI_COMMAND(ua->cmd) == UCSI_PPM_RESET) {
- ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
- if (ret)
- return ret;
- }
memcpy(cci, ua->base + UCSI_CCI, sizeof(*cci));
return 0;
}
+static int ucsi_acpi_poll_cci(struct ucsi *ucsi, u32 *cci)
+{
+ struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
+ int ret;
+
+ ret = ucsi_acpi_dsm(ua, UCSI_DSM_FUNC_READ);
+ if (ret)
+ return ret;
+
+ return ucsi_acpi_read_cci(ucsi, cci);
+}
+
static int ucsi_acpi_read_message_in(struct ucsi *ucsi, void *val, size_t val_len)
{
struct ucsi_acpi *ua = ucsi_get_drvdata(ucsi);
@@ -94,6 +99,7 @@ static int ucsi_acpi_async_control(struct ucsi *ucsi, u64 command)
static const struct ucsi_operations ucsi_acpi_ops = {
.read_version = ucsi_acpi_read_version,
.read_cci = ucsi_acpi_read_cci,
+ .poll_cci = ucsi_acpi_poll_cci,
.read_message_in = ucsi_acpi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = ucsi_acpi_async_control
@@ -142,6 +148,7 @@ static int ucsi_gram_sync_control(struct ucsi *ucsi, u64 command)
static const struct ucsi_operations ucsi_gram_ops = {
.read_version = ucsi_acpi_read_version,
.read_cci = ucsi_acpi_read_cci,
+ .poll_cci = ucsi_acpi_poll_cci,
.read_message_in = ucsi_gram_read_message_in,
.sync_control = ucsi_gram_sync_control,
.async_control = ucsi_acpi_async_control
diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
index 740171f24ef9..4b1668733a4b 100644
--- a/drivers/usb/typec/ucsi/ucsi_ccg.c
+++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
@@ -664,6 +664,7 @@ err_put:
static const struct ucsi_operations ucsi_ccg_ops = {
.read_version = ucsi_ccg_read_version,
.read_cci = ucsi_ccg_read_cci,
+ .poll_cci = ucsi_ccg_read_cci,
.read_message_in = ucsi_ccg_read_message_in,
.sync_control = ucsi_ccg_sync_control,
.async_control = ucsi_ccg_async_control,
diff --git a/drivers/usb/typec/ucsi/ucsi_glink.c b/drivers/usb/typec/ucsi/ucsi_glink.c
index fed39d458090..8af79101a2fc 100644
--- a/drivers/usb/typec/ucsi/ucsi_glink.c
+++ b/drivers/usb/typec/ucsi/ucsi_glink.c
@@ -206,6 +206,7 @@ static void pmic_glink_ucsi_connector_status(struct ucsi_connector *con)
static const struct ucsi_operations pmic_glink_ucsi_ops = {
.read_version = pmic_glink_ucsi_read_version,
.read_cci = pmic_glink_ucsi_read_cci,
+ .poll_cci = pmic_glink_ucsi_read_cci,
.read_message_in = pmic_glink_ucsi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = pmic_glink_ucsi_async_control,
diff --git a/drivers/usb/typec/ucsi/ucsi_stm32g0.c b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
index 6923fad31d79..57ef7d83a412 100644
--- a/drivers/usb/typec/ucsi/ucsi_stm32g0.c
+++ b/drivers/usb/typec/ucsi/ucsi_stm32g0.c
@@ -424,6 +424,7 @@ static irqreturn_t ucsi_stm32g0_irq_handler(int irq, void *data)
static const struct ucsi_operations ucsi_stm32g0_ops = {
.read_version = ucsi_stm32g0_read_version,
.read_cci = ucsi_stm32g0_read_cci,
+ .poll_cci = ucsi_stm32g0_read_cci,
.read_message_in = ucsi_stm32g0_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = ucsi_stm32g0_async_control,
diff --git a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
index 4cae85c0dc12..d33e3f2dd1d8 100644
--- a/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
+++ b/drivers/usb/typec/ucsi/ucsi_yoga_c630.c
@@ -74,6 +74,7 @@ static int yoga_c630_ucsi_async_control(struct ucsi *ucsi, u64 command)
static const struct ucsi_operations yoga_c630_ucsi_ops = {
.read_version = yoga_c630_ucsi_read_version,
.read_cci = yoga_c630_ucsi_read_cci,
+ .poll_cci = yoga_c630_ucsi_read_cci,
.read_message_in = yoga_c630_ucsi_read_message_in,
.sync_control = ucsi_sync_control_common,
.async_control = yoga_c630_ucsi_async_control,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9ac25d08f473..63612faeab72 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -666,7 +666,7 @@ static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
worker, name);
- if (!vtsk)
+ if (IS_ERR(vtsk))
goto free_worker;
mutex_init(&worker->mutex);
diff --git a/drivers/virt/acrn/hsm.c b/drivers/virt/acrn/hsm.c
index c24036c4e51e..e4e196abdaac 100644
--- a/drivers/virt/acrn/hsm.c
+++ b/drivers/virt/acrn/hsm.c
@@ -49,7 +49,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
switch (cmd & PMCMD_TYPE_MASK) {
case ACRN_PMCMD_GET_PX_CNT:
case ACRN_PMCMD_GET_CX_CNT:
- pm_info = kmalloc(sizeof(u64), GFP_KERNEL);
+ pm_info = kzalloc(sizeof(u64), GFP_KERNEL);
if (!pm_info)
return -ENOMEM;
@@ -64,7 +64,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
kfree(pm_info);
break;
case ACRN_PMCMD_GET_PX_DATA:
- px_data = kmalloc(sizeof(*px_data), GFP_KERNEL);
+ px_data = kzalloc(sizeof(*px_data), GFP_KERNEL);
if (!px_data)
return -ENOMEM;
@@ -79,7 +79,7 @@ static int pmcmd_ioctl(u64 cmd, void __user *uptr)
kfree(px_data);
break;
case ACRN_PMCMD_GET_CX_DATA:
- cx_data = kmalloc(sizeof(*cx_data), GFP_KERNEL);
+ cx_data = kzalloc(sizeof(*cx_data), GFP_KERNEL);
if (!cx_data)
return -ENOMEM;
diff --git a/drivers/virt/coco/sev-guest/sev-guest.c b/drivers/virt/coco/sev-guest/sev-guest.c
index 264b6523fe52..70fbc9a3e703 100644
--- a/drivers/virt/coco/sev-guest/sev-guest.c
+++ b/drivers/virt/coco/sev-guest/sev-guest.c
@@ -38,12 +38,6 @@ struct snp_guest_dev {
struct miscdevice misc;
struct snp_msg_desc *msg_desc;
-
- union {
- struct snp_report_req report;
- struct snp_derived_key_req derived_key;
- struct snp_ext_report_req ext_report;
- } req;
};
/*
@@ -71,7 +65,7 @@ struct snp_req_resp {
static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
- struct snp_report_req *report_req = &snp_dev->req.report;
+ struct snp_report_req *report_req __free(kfree) = NULL;
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_report_resp *report_resp;
struct snp_guest_req req = {};
@@ -80,6 +74,10 @@ static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_io
if (!arg->req_data || !arg->resp_data)
return -EINVAL;
+ report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
+ if (!report_req)
+ return -ENOMEM;
+
if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req)))
return -EFAULT;
@@ -116,7 +114,7 @@ e_free:
static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg)
{
- struct snp_derived_key_req *derived_key_req = &snp_dev->req.derived_key;
+ struct snp_derived_key_req *derived_key_req __free(kfree) = NULL;
struct snp_derived_key_resp derived_key_resp = {0};
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_guest_req req = {};
@@ -136,6 +134,10 @@ static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_reque
if (sizeof(buf) < resp_len)
return -ENOMEM;
+ derived_key_req = kzalloc(sizeof(*derived_key_req), GFP_KERNEL_ACCOUNT);
+ if (!derived_key_req)
+ return -ENOMEM;
+
if (copy_from_user(derived_key_req, (void __user *)arg->req_data,
sizeof(*derived_key_req)))
return -EFAULT;
@@ -168,16 +170,21 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
struct snp_req_resp *io)
{
- struct snp_ext_report_req *report_req = &snp_dev->req.ext_report;
+ struct snp_ext_report_req *report_req __free(kfree) = NULL;
struct snp_msg_desc *mdesc = snp_dev->msg_desc;
struct snp_report_resp *report_resp;
struct snp_guest_req req = {};
int ret, npages = 0, resp_len;
sockptr_t certs_address;
+ struct page *page;
if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data))
return -EINVAL;
+ report_req = kzalloc(sizeof(*report_req), GFP_KERNEL_ACCOUNT);
+ if (!report_req)
+ return -ENOMEM;
+
if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req)))
return -EFAULT;
@@ -203,8 +210,20 @@ static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_reques
* the host. If host does not supply any certs in it, then copy
* zeros to indicate that certificate data was not provided.
*/
- memset(mdesc->certs_data, 0, report_req->certs_len);
npages = report_req->certs_len >> PAGE_SHIFT;
+ page = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO,
+ get_order(report_req->certs_len));
+ if (!page)
+ return -ENOMEM;
+
+ req.certs_data = page_address(page);
+ ret = set_memory_decrypted((unsigned long)req.certs_data, npages);
+ if (ret) {
+ pr_err("failed to mark page shared, ret=%d\n", ret);
+ __free_pages(page, get_order(report_req->certs_len));
+ return -EFAULT;
+ }
+
cmd:
/*
* The intermediate response buffer is used while decrypting the
@@ -213,10 +232,12 @@ cmd:
*/
resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize;
report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT);
- if (!report_resp)
- return -ENOMEM;
+ if (!report_resp) {
+ ret = -ENOMEM;
+ goto e_free_data;
+ }
- mdesc->input.data_npages = npages;
+ req.input.data_npages = npages;
req.msg_version = arg->msg_version;
req.msg_type = SNP_MSG_REPORT_REQ;
@@ -231,7 +252,7 @@ cmd:
/* If certs length is invalid then copy the returned length */
if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) {
- report_req->certs_len = mdesc->input.data_npages << PAGE_SHIFT;
+ report_req->certs_len = req.input.data_npages << PAGE_SHIFT;
if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req)))
ret = -EFAULT;
@@ -240,7 +261,7 @@ cmd:
if (ret)
goto e_free;
- if (npages && copy_to_sockptr(certs_address, mdesc->certs_data, report_req->certs_len)) {
+ if (npages && copy_to_sockptr(certs_address, req.certs_data, report_req->certs_len)) {
ret = -EFAULT;
goto e_free;
}
@@ -250,6 +271,13 @@ cmd:
e_free:
kfree(report_resp);
+e_free_data:
+ if (npages) {
+ if (set_memory_encrypted((unsigned long)req.certs_data, npages))
+ WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n");
+ else
+ __free_pages(page, get_order(report_req->certs_len));
+ }
return ret;
}
diff --git a/drivers/virt/vboxguest/Kconfig b/drivers/virt/vboxguest/Kconfig
index 11b153e7454e..eaba28c95e73 100644
--- a/drivers/virt/vboxguest/Kconfig
+++ b/drivers/virt/vboxguest/Kconfig
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config VBOXGUEST
tristate "Virtual Box Guest integration support"
- depends on (ARM64 || X86) && PCI && INPUT
+ depends on (ARM64 || X86 || COMPILE_TEST) && PCI && INPUT
+ depends on HAS_IOPORT
help
This is a driver for the Virtual Box Guest PCI device used in
Virtual Box virtual machines. Enabling this driver will add
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index a337edcf8faf..1f65795cf5d7 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -74,19 +74,21 @@ static inline phys_addr_t xen_dma_to_phys(struct device *dev,
return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
}
+static inline bool range_requires_alignment(phys_addr_t p, size_t size)
+{
+ phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
+ phys_addr_t bus_addr = pfn_to_bfn(XEN_PFN_DOWN(p)) << XEN_PAGE_SHIFT;
+
+ return IS_ALIGNED(p, algn) && !IS_ALIGNED(bus_addr, algn);
+}
+
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
{
unsigned long next_bfn, xen_pfn = XEN_PFN_DOWN(p);
unsigned int i, nr_pages = XEN_PFN_UP(xen_offset_in_page(p) + size);
- phys_addr_t algn = 1ULL << (get_order(size) + PAGE_SHIFT);
next_bfn = pfn_to_bfn(xen_pfn);
- /* If buffer is physically aligned, ensure DMA alignment. */
- if (IS_ALIGNED(p, algn) &&
- !IS_ALIGNED((phys_addr_t)next_bfn << XEN_PAGE_SHIFT, algn))
- return 1;
-
for (i = 1; i < nr_pages; i++)
if (pfn_to_bfn(++xen_pfn) != ++next_bfn)
return 1;
@@ -111,7 +113,7 @@ static struct io_tlb_pool *xen_swiotlb_find_pool(struct device *dev,
}
#ifdef CONFIG_X86
-int xen_swiotlb_fixup(void *buf, unsigned long nslabs)
+int __init xen_swiotlb_fixup(void *buf, unsigned long nslabs)
{
int rc;
unsigned int order = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT);
@@ -156,7 +158,8 @@ xen_swiotlb_alloc_coherent(struct device *dev, size_t size,
*dma_handle = xen_phys_to_dma(dev, phys);
if (*dma_handle + size - 1 > dma_mask ||
- range_straddles_page_boundary(phys, size)) {
+ range_straddles_page_boundary(phys, size) ||
+ range_requires_alignment(phys, size)) {
if (xen_create_contiguous_region(phys, order, fls64(dma_mask),
dma_handle) != 0)
goto out_free_pages;
@@ -182,7 +185,8 @@ xen_swiotlb_free_coherent(struct device *dev, size_t size, void *vaddr,
size = ALIGN(size, XEN_PAGE_SIZE);
if (WARN_ON_ONCE(dma_handle + size - 1 > dev->coherent_dma_mask) ||
- WARN_ON_ONCE(range_straddles_page_boundary(phys, size)))
+ WARN_ON_ONCE(range_straddles_page_boundary(phys, size) ||
+ range_requires_alignment(phys, size)))
return;
if (TestClearPageXenRemapped(virt_to_page(vaddr)))