summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/arm64/iort.c6
-rw-r--r--drivers/acpi/ec.c4
-rw-r--r--drivers/acpi/nfit/core.c14
-rw-r--r--drivers/acpi/nfit/nfit.h13
-rw-r--r--drivers/acpi/numa/srat.c41
-rw-r--r--drivers/ata/ahci.c29
-rw-r--r--drivers/ata/ahci.h1
-rw-r--r--drivers/ata/ahci_imx.c2
-rw-r--r--drivers/ata/libata-pmp.c1
-rw-r--r--drivers/base/memory.c130
-rw-r--r--drivers/block/loop.c49
-rw-r--r--drivers/block/rbd.c215
-rw-r--r--drivers/block/xen-blkfront.c17
-rw-r--r--drivers/char/hw_random/omap3-rom-rng.c4
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c18
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c2
-rw-r--r--drivers/char/ipmi/kcs_bmc_aspeed.c151
-rw-r--r--drivers/cpuidle/cpuidle-haltpoll.c4
-rw-r--r--drivers/crypto/chelsio/chcr_ktls.c1
-rw-r--r--drivers/crypto/hisilicon/Kconfig2
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c8
-rw-r--r--drivers/dax/bus.c4
-rw-r--r--drivers/dax/super.c28
-rw-r--r--drivers/dma-buf/Kconfig11
-rw-r--r--drivers/dma/tegra20-apb-dma.c1
-rw-r--r--drivers/firmware/edd.c6
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c29
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c14
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c53
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dc_features.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h7
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h6
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c34
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c5
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h28
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c2
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c39
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c62
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c14
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c33
-rw-r--r--drivers/gpu/drm/drm_mm.c8
-rw-r--r--drivers/gpu/drm/drm_prime.c37
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c29
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c37
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c16
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c5
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/dac.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/base507c.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/core507d.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/corec37d.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c21
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/cursc37a.c9
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/ovly827e.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/device.h21
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/timer.h35
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/user.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c20
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c63
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvif/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/device.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvif/timer.c56
-rw-r--r--drivers/gpu/drm/nouveau/nvif/userc361.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c26
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c11
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c36
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c63
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c20
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c14
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c2
-rw-r--r--drivers/hv/hv_balloon.c25
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c4
-rw-r--r--drivers/ide/ide-scan-pci.c8
-rw-r--r--drivers/iio/Kconfig1
-rw-r--r--drivers/iio/Makefile1
-rw-r--r--drivers/iio/accel/cros_ec_accel_legacy.c8
-rw-r--r--drivers/iio/adc/Kconfig10
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/rn5t618-adc.c256
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c3
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c13
-rw-r--r--drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c375
-rw-r--r--drivers/iio/industrialio-core.c8
-rw-r--r--drivers/iio/light/Kconfig10
-rw-r--r--drivers/iio/light/Makefile1
-rw-r--r--drivers/iio/light/cros_ec_light_prox.c15
-rw-r--r--drivers/iio/light/iqs621-als.c617
-rw-r--r--drivers/iio/position/Kconfig19
-rw-r--r--drivers/iio/position/Makefile7
-rw-r--r--drivers/iio/position/iqs624-pos.c284
-rw-r--r--drivers/iio/pressure/cros_ec_baro.c14
-rw-r--r--drivers/iio/temperature/Kconfig10
-rw-r--r--drivers/iio/temperature/Makefile1
-rw-r--r--drivers/iio/temperature/iqs620at-temp.c97
-rw-r--r--drivers/input/keyboard/Kconfig10
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/iqs62x-keys.c335
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h11
-rw-r--r--drivers/input/touchscreen/elants_i2c.c1
-rw-r--r--drivers/input/touchscreen/goodix.c608
-rw-r--r--drivers/input/touchscreen/of_touchscreen.c35
-rw-r--r--drivers/iommu/Kconfig21
-rw-r--r--drivers/iommu/amd_iommu_types.h2
-rw-r--r--drivers/iommu/arm-smmu-v3.c214
-rw-r--r--drivers/iommu/arm-smmu.c55
-rw-r--r--drivers/iommu/intel-iommu.c3
-rw-r--r--drivers/iommu/intel-svm.c9
-rw-r--r--drivers/iommu/iommu.c46
-rw-r--r--drivers/iommu/ipmmu-vmsa.c7
-rw-r--r--drivers/iommu/mtk_iommu.c13
-rw-r--r--drivers/iommu/mtk_iommu_v1.c14
-rw-r--r--drivers/iommu/omap-iommu.c10
-rw-r--r--drivers/iommu/omap-iopgtable.h3
-rw-r--r--drivers/iommu/qcom_iommu.c63
-rw-r--r--drivers/iommu/tegra-gart.c2
-rw-r--r--drivers/iommu/virtio-iommu.c42
-rw-r--r--drivers/leds/Kconfig11
-rw-r--r--drivers/leds/Makefile99
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/leds-bd2802.c2
-rw-r--r--drivers/leds/leds-ip30.c86
-rw-r--r--drivers/leds/leds-is31fl32xx.c2
-rw-r--r--drivers/leds/leds-lm3532.c2
-rw-r--r--drivers/leds/leds-lm3697.c2
-rw-r--r--drivers/leds/leds-ns2.c99
-rw-r--r--drivers/leds/leds-pwm.c55
-rw-r--r--drivers/md/dm-linear.c18
-rw-r--r--drivers/md/dm-log-writes.c17
-rw-r--r--drivers/md/dm-stripe.c23
-rw-r--r--drivers/md/dm.c32
-rw-r--r--drivers/mfd/Kconfig23
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/aat2870-core.c2
-rw-r--r--drivers/mfd/cros_ec_dev.c2
-rw-r--r--drivers/mfd/da9062-core.c44
-rw-r--r--drivers/mfd/dln2.c30
-rw-r--r--drivers/mfd/intel-lpss-pci.c31
-rw-r--r--drivers/mfd/iqs62x.c1063
-rw-r--r--drivers/mfd/omap-usb-host.c2
-rw-r--r--drivers/mfd/omap-usb-tll.c4
-rw-r--r--drivers/mfd/qcom-pm8xxx.c2
-rw-r--r--drivers/mfd/rk808.c139
-rw-r--r--drivers/mfd/rn5t618.c109
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c52
-rw-r--r--drivers/misc/lkdtm/bugs.c75
-rw-r--r--drivers/misc/lkdtm/core.c3
-rw-r--r--drivers/misc/lkdtm/lkdtm.h3
-rw-r--r--drivers/misc/mic/Kconfig4
-rw-r--r--drivers/mtd/ubi/fastmap-wl.c15
-rw-r--r--drivers/mtd/ubi/ubi-media.h2
-rw-r--r--drivers/mtd/ubi/wl.c3
-rw-r--r--drivers/net/caif/Kconfig4
-rw-r--r--drivers/net/can/slcan.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.c9
-rw-r--r--drivers/net/dsa/mt7530.c3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c3
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h1
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c31
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c17
-rw-r--r--drivers/net/macsec.c3
-rw-r--r--drivers/net/phy/at803x.c4
-rw-r--r--drivers/net/phy/micrel.c7
-rw-r--r--drivers/net/tun.c10
-rw-r--r--drivers/net/usb/pegasus.c38
-rw-r--r--drivers/net/wimax/i2400m/driver.c7
-rw-r--r--drivers/nvdimm/bus.c6
-rw-r--r--drivers/nvdimm/dimm.c2
-rw-r--r--drivers/nvdimm/dimm_devs.c95
-rw-r--r--drivers/nvdimm/e820.c18
-rw-r--r--drivers/nvdimm/label.h2
-rw-r--r--drivers/nvdimm/namespace_devs.c28
-rw-r--r--drivers/nvdimm/nd.h7
-rw-r--r--drivers/nvdimm/of_pmem.c4
-rw-r--r--drivers/nvdimm/pfn.h12
-rw-r--r--drivers/nvdimm/pfn_devs.c40
-rw-r--r--drivers/nvdimm/pmem.c101
-rw-r--r--drivers/nvdimm/region_devs.c130
-rw-r--r--drivers/nvme/host/core.c34
-rw-r--r--drivers/nvme/host/fc.c14
-rw-r--r--drivers/nvme/host/multipath.c4
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/host/tcp.c18
-rw-r--r--drivers/nvme/target/configfs.c10
-rw-r--r--drivers/nvme/target/fc.c2
-rw-r--r--drivers/nvme/target/fcloop.c77
-rw-r--r--drivers/nvme/target/rdma.c205
-rw-r--r--drivers/parisc/eisa.c8
-rw-r--r--drivers/pci/ats.c4
-rw-r--r--drivers/pcmcia/cs_internal.h2
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c6
-rw-r--r--drivers/pcmcia/sa1100_simpad.c6
-rw-r--r--drivers/pcmcia/soc_common.h2
-rw-r--r--drivers/pcmcia/yenta_socket.c10
-rw-r--r--drivers/platform/chrome/Kconfig27
-rw-r--r--drivers/platform/chrome/Makefile5
-rw-r--r--drivers/platform/chrome/chromeos_laptop.c2
-rw-r--r--drivers/platform/chrome/cros_ec.c32
-rw-r--r--drivers/platform/chrome/cros_ec_chardev.c4
-rw-r--r--drivers/platform/chrome/cros_ec_lightbar.c50
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c9
-rw-r--r--drivers/platform/chrome/cros_ec_rpmsg.c16
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub.c111
-rw-r--r--drivers/platform/chrome/cros_ec_sensorhub_ring.c1046
-rw-r--r--drivers/platform/chrome/cros_ec_spi.c6
-rw-r--r--drivers/platform/chrome/cros_ec_sysfs.c36
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c357
-rw-r--r--drivers/platform/chrome/cros_ec_vbc.c4
-rw-r--r--drivers/platform/chrome/cros_usbpd_notify.c306
-rw-r--r--drivers/platform/chrome/wilco_ec/event.c4
-rw-r--r--drivers/platform/chrome/wilco_ec/properties.c3
-rw-r--r--drivers/platform/chrome/wilco_ec/sysfs.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c4
-rw-r--r--drivers/platform/x86/dell-rbtn.c4
-rw-r--r--drivers/platform/x86/dell-rbtn.h2
-rw-r--r--drivers/platform/x86/dell-smbios-base.c4
-rw-r--r--drivers/platform/x86/dell-smbios-smm.c2
-rw-r--r--drivers/platform/x86/dell-smbios.h2
-rw-r--r--drivers/platform/x86/dell-smo8800.c2
-rw-r--r--drivers/platform/x86/dell-wmi.c4
-rw-r--r--drivers/power/supply/Kconfig2
-rw-r--r--drivers/power/supply/bq2415x_charger.c4
-rw-r--r--drivers/power/supply/bq27xxx_battery.c2
-rw-r--r--drivers/power/supply/cros_usbpd-charger.c50
-rw-r--r--drivers/power/supply/isp1704_charger.c2
-rw-r--r--drivers/power/supply/rx51_battery.c4
-rw-r--r--drivers/ps3/sys-manager-core.c2
-rw-r--r--drivers/pwm/Kconfig58
-rw-r--r--drivers/pwm/core.c135
-rw-r--r--drivers/pwm/pwm-bcm2835.c1
-rw-r--r--drivers/pwm/pwm-imx-tpm.c2
-rw-r--r--drivers/pwm/pwm-imx27.c32
-rw-r--r--drivers/pwm/pwm-jz4740.c162
-rw-r--r--drivers/pwm/pwm-meson.c4
-rw-r--r--drivers/pwm/pwm-mxs.c1
-rw-r--r--drivers/pwm/pwm-omap-dmtimer.c219
-rw-r--r--drivers/pwm/pwm-pca9685.c97
-rw-r--r--drivers/pwm/pwm-rcar.c10
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c11
-rw-r--r--drivers/pwm/pwm-sun4i.c13
-rw-r--r--drivers/pwm/pwm-tegra.c6
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-rc5t619.c444
-rw-r--r--drivers/s390/block/dcssblk.c20
-rw-r--r--drivers/s390/cio/device.c13
-rw-r--r--drivers/s390/cio/qdio.h1
-rw-r--r--drivers/s390/cio/qdio_debug.c16
-rw-r--r--drivers/s390/cio/qdio_debug.h3
-rw-r--r--drivers/s390/cio/qdio_main.c63
-rw-r--r--drivers/s390/cio/qdio_setup.c10
-rw-r--r--drivers/s390/cio/qdio_thinint.c28
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c5
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c65
-rw-r--r--drivers/s390/scsi/zfcp_erp.c10
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c23
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c51
-rw-r--r--drivers/scsi/aacraid/commsup.c7
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c23
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h13
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c103
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c34
-rw-r--r--drivers/scsi/constants.c2
-rw-r--r--drivers/scsi/libfc/fc_rport.c10
-rw-r--r--drivers/scsi/lpfc/lpfc.h25
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c73
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c333
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h20
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c106
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c149
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c62
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c90
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c47
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c4
-rw-r--r--drivers/scsi/sr.c4
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c13
-rw-r--r--drivers/scsi/ufs/ufshcd.c87
-rw-r--r--drivers/scsi/ufs/ufshcd.h15
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile1
-rw-r--r--drivers/soc/kendryte/Kconfig14
-rw-r--r--drivers/soc/kendryte/Makefile3
-rw-r--r--drivers/soc/kendryte/k210-sysctl.c248
-rw-r--r--drivers/staging/gasket/gasket_core.c2
-rw-r--r--drivers/target/target_core_xcopy.c187
-rw-r--r--drivers/target/target_core_xcopy.h9
-rw-r--r--drivers/thermal/Kconfig42
-rw-r--r--drivers/thermal/Makefile3
-rw-r--r--drivers/thermal/cpufreq_cooling.c5
-rw-r--r--drivers/thermal/imx8mm_thermal.c236
-rw-r--r--drivers/thermal/imx_sc_thermal.c148
-rw-r--r--drivers/thermal/imx_thermal.c16
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c5
-rw-r--r--drivers/thermal/of-thermal.c62
-rw-r--r--drivers/thermal/qcom/tsens-8960.c4
-rw-r--r--drivers/thermal/qcom/tsens-common.c194
-rw-r--r--drivers/thermal/qcom/tsens-v0_1.c6
-rw-r--r--drivers/thermal/qcom/tsens-v1.c6
-rw-r--r--drivers/thermal/qcom/tsens-v2.c24
-rw-r--r--drivers/thermal/qcom/tsens.c65
-rw-r--r--drivers/thermal/qcom/tsens.h105
-rw-r--r--drivers/thermal/qoriq_thermal.c40
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c31
-rw-r--r--drivers/thermal/rcar_thermal.c53
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c4
-rw-r--r--drivers/thermal/sprd_thermal.c552
-rw-r--r--drivers/thermal/st/stm_thermal.c3
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c44
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.h4
-rw-r--r--drivers/vdpa/Kconfig37
-rw-r--r--drivers/vdpa/Makefile4
-rw-r--r--drivers/vdpa/ifcvf/Makefile3
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.c389
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_base.h118
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c435
-rw-r--r--drivers/vdpa/vdpa.c180
-rw-r--r--drivers/vdpa/vdpa_sim/Makefile2
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c629
-rw-r--r--drivers/vhost/Kconfig45
-rw-r--r--drivers/vhost/Kconfig.vringh6
-rw-r--r--drivers/vhost/Makefile6
-rw-r--r--drivers/vhost/iotlb.c177
-rw-r--r--drivers/vhost/net.c5
-rw-r--r--drivers/vhost/scsi.c2
-rw-r--r--drivers/vhost/vdpa.c883
-rw-r--r--drivers/vhost/vhost.c233
-rw-r--r--drivers/vhost/vhost.h45
-rw-r--r--drivers/vhost/vringh.c421
-rw-r--r--drivers/vhost/vsock.c2
-rw-r--r--drivers/video/backlight/corgi_lcd.c68
-rw-r--r--drivers/video/backlight/pwm_bl.c19
-rw-r--r--drivers/video/fbdev/core/fbcon.c3
-rw-r--r--drivers/virtio/Kconfig14
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio_balloon.c180
-rw-r--r--drivers/virtio/virtio_vdpa.c396
-rw-r--r--drivers/watchdog/Kconfig8
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/imx2_wdt.c37
-rw-r--r--drivers/watchdog/imx7ulp_wdt.c1
-rw-r--r--drivers/watchdog/imx_sc_wdt.c2
-rw-r--r--drivers/watchdog/npcm_wdt.c19
-rw-r--r--drivers/watchdog/orion_wdt.c2
-rw-r--r--drivers/watchdog/pm8916_wdt.c25
-rw-r--r--drivers/watchdog/qcom-wdt.c34
-rw-r--r--drivers/watchdog/rti_wdt.c255
-rw-r--r--drivers/watchdog/watchdog_core.c12
-rw-r--r--drivers/watchdog/watchdog_dev.c1
-rw-r--r--drivers/watchdog/wm831x_wdt.c27
-rw-r--r--drivers/watchdog/ziirave_wdt.c2
-rw-r--r--drivers/xen/events/events_2l.c16
-rw-r--r--drivers/xen/events/events_base.c93
-rw-r--r--drivers/xen/events/events_fifo.c22
-rw-r--r--drivers/xen/events/events_internal.h30
-rw-r--r--drivers/xen/evtchn.c13
-rw-r--r--drivers/xen/gntdev-common.h3
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/pvcalls-back.c5
-rw-r--r--drivers/xen/pvcalls-front.c15
-rw-r--r--drivers/xen/xen-pciback/xenbus.c7
-rw-r--r--drivers/xen/xen-scsiback.c3
-rw-r--r--drivers/xen/xenbus/xenbus_client.c6
429 files changed, 16577 insertions, 3880 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 74920bda3fda..dcecc9f6e33f 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -138,6 +138,10 @@ source "drivers/virt/Kconfig"
source "drivers/virtio/Kconfig"
+source "drivers/vdpa/Kconfig"
+
+source "drivers/vhost/Kconfig"
+
source "drivers/hv/Kconfig"
source "drivers/xen/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 7646549a1e93..c0cd1b9075e3 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_DMADEVICES) += dma/
obj-y += soc/
obj-$(CONFIG_VIRTIO) += virtio/
+obj-$(CONFIG_VDPA) += vdpa/
obj-$(CONFIG_XEN) += xen/
# regulators early, since some subsystems rely on them to initialize
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index ed3d2d1a7ae9..7d04424189df 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -1015,6 +1015,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
return ops;
if (dev_is_pci(dev)) {
+ struct iommu_fwspec *fwspec;
struct pci_bus *bus = to_pci_dev(dev)->bus;
struct iort_pci_alias_info info = { .dev = dev };
@@ -1027,8 +1028,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
err = pci_for_each_dma_alias(to_pci_dev(dev),
iort_pci_iommu_init, &info);
- if (!err && iort_pci_rc_supports_ats(node))
- dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
+ fwspec = dev_iommu_fwspec_get(dev);
+ if (fwspec && iort_pci_rc_supports_ats(node))
+ fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS;
} else {
int i = 0;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 4816df520f72..b4c0152e92aa 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1589,8 +1589,8 @@ static int acpi_ec_add(struct acpi_device *device)
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
- if ((boot_ec && boot_ec->handle == device->handle) ||
- !strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
+ if (boot_ec && (boot_ec->handle == device->handle ||
+ !strcmp(acpi_device_hid(device), ACPI_ECDT_HID))) {
/* Fast path: this device corresponds to the boot EC. */
ec = boot_ec;
} else {
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index a3320f93616d..fa4500f9cfd1 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -360,7 +360,7 @@ static union acpi_object *acpi_label_info(acpi_handle handle)
static u8 nfit_dsm_revid(unsigned family, unsigned func)
{
- static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = {
+ static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = {
[NVDIMM_FAMILY_INTEL] = {
[NVDIMM_INTEL_GET_MODES] = 2,
[NVDIMM_INTEL_GET_FWINFO] = 2,
@@ -386,7 +386,7 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func)
if (family > NVDIMM_FAMILY_MAX)
return 0;
- if (func > 31)
+ if (func > NVDIMM_CMD_MAX)
return 0;
id = revid_table[family][func];
if (id == 0)
@@ -492,7 +492,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
* Check for a valid command. For ND_CMD_CALL, we also have to
* make sure that the DSM function is supported.
*/
- if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
+ if (cmd == ND_CMD_CALL &&
+ (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask)))
return -ENOTTY;
else if (!test_bit(cmd, &cmd_mask))
return -ENOTTY;
@@ -2026,8 +2027,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
continue;
}
- if (nfit_mem->bdw && nfit_mem->memdev_pmem)
+ if (nfit_mem->bdw && nfit_mem->memdev_pmem) {
set_bit(NDD_ALIASING, &flags);
+ set_bit(NDD_LABELING, &flags);
+ }
/* collate flags across all memdevs for this dimm */
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@@ -3492,7 +3495,8 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
if (nvdimm && cmd == ND_CMD_CALL &&
call_pkg->nd_family == NVDIMM_FAMILY_INTEL) {
func = call_pkg->nd_command;
- if ((1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
+ if (func > NVDIMM_CMD_MAX ||
+ (1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK)
return -EOPNOTSUPP;
}
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 24241941181c..f5525f8bb770 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -34,6 +34,7 @@
| ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED)
#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV
+#define NVDIMM_CMD_MAX 31
#define NVDIMM_STANDARD_CMDMASK \
(1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \
@@ -144,32 +145,32 @@ struct nfit_spa {
unsigned long ars_state;
u32 clear_err_unit;
u32 max_ars;
- struct acpi_nfit_system_address spa[0];
+ struct acpi_nfit_system_address spa[];
};
struct nfit_dcr {
struct list_head list;
- struct acpi_nfit_control_region dcr[0];
+ struct acpi_nfit_control_region dcr[];
};
struct nfit_bdw {
struct list_head list;
- struct acpi_nfit_data_region bdw[0];
+ struct acpi_nfit_data_region bdw[];
};
struct nfit_idt {
struct list_head list;
- struct acpi_nfit_interleave idt[0];
+ struct acpi_nfit_interleave idt[];
};
struct nfit_flush {
struct list_head list;
- struct acpi_nfit_flush_address flush[0];
+ struct acpi_nfit_flush_address flush[];
};
struct nfit_memdev {
struct list_head list;
- struct acpi_nfit_memory_map memdev[0];
+ struct acpi_nfit_memory_map memdev[];
};
enum nfit_mem_flags {
diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c
index eadbf90e65d1..47b4969d9b93 100644
--- a/drivers/acpi/numa/srat.c
+++ b/drivers/acpi/numa/srat.c
@@ -72,47 +72,6 @@ int acpi_map_pxm_to_node(int pxm)
}
EXPORT_SYMBOL(acpi_map_pxm_to_node);
-/**
- * acpi_map_pxm_to_online_node - Map proximity ID to online node
- * @pxm: ACPI proximity ID
- *
- * This is similar to acpi_map_pxm_to_node(), but always returns an online
- * node. When the mapped node from a given proximity ID is offline, it
- * looks up the node distance table and returns the nearest online node.
- *
- * ACPI device drivers, which are called after the NUMA initialization has
- * completed in the kernel, can call this interface to obtain their device
- * NUMA topology from ACPI tables. Such drivers do not have to deal with
- * offline nodes. A node may be offline when a device proximity ID is
- * unique, SRAT memory entry does not exist, or NUMA is disabled, ex.
- * "numa=off" on x86.
- */
-int acpi_map_pxm_to_online_node(int pxm)
-{
- int node, min_node;
-
- node = acpi_map_pxm_to_node(pxm);
-
- if (node == NUMA_NO_NODE)
- node = 0;
-
- min_node = node;
- if (!node_online(node)) {
- int min_dist = INT_MAX, dist, n;
-
- for_each_online_node(n) {
- dist = node_distance(node, n);
- if (dist < min_dist) {
- min_dist = dist;
- min_node = n;
- }
- }
- }
-
- return min_node;
-}
-EXPORT_SYMBOL(acpi_map_pxm_to_online_node);
-
static void __init
acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index ad0185c8dcee..0101b65250cb 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -410,6 +410,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
{ PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
{ PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
+ { PCI_VDEVICE(INTEL, 0x02d7), board_ahci_mobile }, /* Comet Lake PCH RAID */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1495,7 +1496,7 @@ static irqreturn_t ahci_thunderx_irq_handler(int irq, void *dev_instance)
static void ahci_remap_check(struct pci_dev *pdev, int bar,
struct ahci_host_priv *hpriv)
{
- int i, count = 0;
+ int i;
u32 cap;
/*
@@ -1516,13 +1517,14 @@ static void ahci_remap_check(struct pci_dev *pdev, int bar,
continue;
/* We've found a remapped device */
- count++;
+ hpriv->remapped_nvme++;
}
- if (!count)
+ if (!hpriv->remapped_nvme)
return;
- dev_warn(&pdev->dev, "Found %d remapped NVMe devices.\n", count);
+ dev_warn(&pdev->dev, "Found %u remapped NVMe devices.\n",
+ hpriv->remapped_nvme);
dev_warn(&pdev->dev,
"Switch your BIOS from RAID to AHCI mode to use them.\n");
@@ -1642,6 +1644,18 @@ static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hp
}
}
+static ssize_t remapped_nvme_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+
+ return sprintf(buf, "%u\n", hpriv->remapped_nvme);
+}
+
+static DEVICE_ATTR_RO(remapped_nvme);
+
static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int board_id = ent->driver_data;
@@ -1745,6 +1759,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* detect remapped nvme devices */
ahci_remap_check(pdev, ahci_pci_bar, hpriv);
+ sysfs_add_file_to_group(&pdev->dev.kobj,
+ &dev_attr_remapped_nvme.attr,
+ NULL);
+
/* must set flag prior to save config in order to take effect */
if (ahci_broken_devslp(pdev))
hpriv->flags |= AHCI_HFLAG_NO_DEVSLP;
@@ -1896,6 +1914,9 @@ static void ahci_shutdown_one(struct pci_dev *pdev)
static void ahci_remove_one(struct pci_dev *pdev)
{
+ sysfs_remove_file_from_group(&pdev->dev.kobj,
+ &dev_attr_remapped_nvme.attr,
+ NULL);
pm_runtime_get_noresume(&pdev->dev);
ata_pci_remove_one(pdev);
}
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 3dbf398c92ea..d991dd46e89c 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -336,6 +336,7 @@ struct ahci_host_priv {
u32 em_loc; /* enclosure management location */
u32 em_buf_sz; /* EM buffer size in byte */
u32 em_msg_type; /* EM message type */
+ u32 remapped_nvme; /* NVMe remapped device count */
bool got_runtime_pm; /* Did we do pm_runtime_get? */
struct clk *clks[AHCI_MAX_CLKS]; /* Optional */
struct reset_control *rsts; /* Optional */
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 948d2c6557f3..388baf528fa8 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -782,7 +782,7 @@ static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
struct ata_host *host = dev_get_drvdata(ap->dev);
struct ahci_host_priv *hpriv = host->private_data;
struct imx_ahci_priv *imxpriv = hpriv->plat_data;
- int ret = -EIO;
+ int ret;
if (imxpriv->type == AHCI_IMX53)
ret = ahci_pmp_retry_srst_ops.softreset(link, class, deadline);
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 3ff14071617c..79f2aeeb482a 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -763,6 +763,7 @@ static int sata_pmp_eh_recover_pmp(struct ata_port *ap,
if (dev->flags & ATA_DFLAG_DETACH) {
detach = 1;
+ rc = -ENODEV;
goto fail;
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 4086718f6876..dbec3a05590a 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -27,6 +27,24 @@
#define MEMORY_CLASS_NAME "memory"
+static const char *const online_type_to_str[] = {
+ [MMOP_OFFLINE] = "offline",
+ [MMOP_ONLINE] = "online",
+ [MMOP_ONLINE_KERNEL] = "online_kernel",
+ [MMOP_ONLINE_MOVABLE] = "online_movable",
+};
+
+int memhp_online_type_from_str(const char *str)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) {
+ if (sysfs_streq(str, online_type_to_str[i]))
+ return i;
+ }
+ return -EINVAL;
+}
+
#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
static int sections_per_block;
@@ -145,45 +163,6 @@ int memory_notify(unsigned long val, void *v)
}
/*
- * The probe routines leave the pages uninitialized, just as the bootmem code
- * does. Make sure we do not access them, but instead use only information from
- * within sections.
- */
-static bool pages_correctly_probed(unsigned long start_pfn)
-{
- unsigned long section_nr = pfn_to_section_nr(start_pfn);
- unsigned long section_nr_end = section_nr + sections_per_block;
- unsigned long pfn = start_pfn;
-
- /*
- * memmap between sections is not contiguous except with
- * SPARSEMEM_VMEMMAP. We lookup the page once per section
- * and assume memmap is contiguous within each section
- */
- for (; section_nr < section_nr_end; section_nr++) {
- if (WARN_ON_ONCE(!pfn_valid(pfn)))
- return false;
-
- if (!present_section_nr(section_nr)) {
- pr_warn("section %ld pfn[%lx, %lx) not present\n",
- section_nr, pfn, pfn + PAGES_PER_SECTION);
- return false;
- } else if (!valid_section_nr(section_nr)) {
- pr_warn("section %ld pfn[%lx, %lx) no valid memmap\n",
- section_nr, pfn, pfn + PAGES_PER_SECTION);
- return false;
- } else if (online_section_nr(section_nr)) {
- pr_warn("section %ld pfn[%lx, %lx) is already online\n",
- section_nr, pfn, pfn + PAGES_PER_SECTION);
- return false;
- }
- pfn += PAGES_PER_SECTION;
- }
-
- return true;
-}
-
-/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
*/
@@ -199,9 +178,6 @@ memory_block_action(unsigned long start_section_nr, unsigned long action,
switch (action) {
case MEM_ONLINE:
- if (!pages_correctly_probed(start_pfn))
- return -EBUSY;
-
ret = online_pages(start_pfn, nr_pages, online_type, nid);
break;
case MEM_OFFLINE:
@@ -245,17 +221,14 @@ static int memory_subsys_online(struct device *dev)
return 0;
/*
- * If we are called from state_store(), online_type will be
- * set >= 0 Otherwise we were called from the device online
- * attribute and need to set the online_type.
+ * When called via device_online() without configuring the online_type,
+ * we want to default to MMOP_ONLINE.
*/
- if (mem->online_type < 0)
- mem->online_type = MMOP_ONLINE_KEEP;
+ if (mem->online_type == MMOP_OFFLINE)
+ mem->online_type = MMOP_ONLINE;
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
-
- /* clear online_type */
- mem->online_type = -1;
+ mem->online_type = MMOP_OFFLINE;
return ret;
}
@@ -267,40 +240,27 @@ static int memory_subsys_offline(struct device *dev)
if (mem->state == MEM_OFFLINE)
return 0;
- /* Can't offline block with non-present sections */
- if (mem->section_count != sections_per_block)
- return -EINVAL;
-
return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
}
static ssize_t state_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ const int online_type = memhp_online_type_from_str(buf);
struct memory_block *mem = to_memory_block(dev);
- int ret, online_type;
+ int ret;
+
+ if (online_type < 0)
+ return -EINVAL;
ret = lock_device_hotplug_sysfs();
if (ret)
return ret;
- if (sysfs_streq(buf, "online_kernel"))
- online_type = MMOP_ONLINE_KERNEL;
- else if (sysfs_streq(buf, "online_movable"))
- online_type = MMOP_ONLINE_MOVABLE;
- else if (sysfs_streq(buf, "online"))
- online_type = MMOP_ONLINE_KEEP;
- else if (sysfs_streq(buf, "offline"))
- online_type = MMOP_OFFLINE;
- else {
- ret = -EINVAL;
- goto err;
- }
-
switch (online_type) {
case MMOP_ONLINE_KERNEL:
case MMOP_ONLINE_MOVABLE:
- case MMOP_ONLINE_KEEP:
+ case MMOP_ONLINE:
/* mem->online_type is protected by device_hotplug_lock */
mem->online_type = online_type;
ret = device_online(&mem->dev);
@@ -312,7 +272,6 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
ret = -EINVAL; /* should never happen */
}
-err:
unlock_device_hotplug();
if (ret < 0)
@@ -380,7 +339,8 @@ static ssize_t valid_zones_show(struct device *dev,
}
nid = mem->nid;
- default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
+ default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn,
+ nr_pages);
strcat(buf, default_zone->name);
print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL,
@@ -418,23 +378,20 @@ static DEVICE_ATTR_RO(block_size_bytes);
static ssize_t auto_online_blocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- if (memhp_auto_online)
- return sprintf(buf, "online\n");
- else
- return sprintf(buf, "offline\n");
+ return sprintf(buf, "%s\n",
+ online_type_to_str[memhp_default_online_type]);
}
static ssize_t auto_online_blocks_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
- if (sysfs_streq(buf, "online"))
- memhp_auto_online = true;
- else if (sysfs_streq(buf, "offline"))
- memhp_auto_online = false;
- else
+ const int online_type = memhp_online_type_from_str(buf);
+
+ if (online_type < 0)
return -EINVAL;
+ memhp_default_online_type = online_type;
return count;
}
@@ -627,7 +584,7 @@ static int init_memory_block(struct memory_block **memory,
static int add_memory_block(unsigned long base_section_nr)
{
- int ret, section_count = 0;
+ int section_count = 0;
struct memory_block *mem;
unsigned long nr;
@@ -638,12 +595,8 @@ static int add_memory_block(unsigned long base_section_nr)
if (section_count == 0)
return 0;
- ret = init_memory_block(&mem, base_memory_block_id(base_section_nr),
- MEM_ONLINE);
- if (ret)
- return ret;
- mem->section_count = section_count;
- return 0;
+ return init_memory_block(&mem, base_memory_block_id(base_section_nr),
+ MEM_ONLINE);
}
static void unregister_memory(struct memory_block *memory)
@@ -679,7 +632,6 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
ret = init_memory_block(&mem, block_id, MEM_OFFLINE);
if (ret)
break;
- mem->section_count = sections_per_block;
}
if (ret) {
end_block_id = block_id;
@@ -688,7 +640,6 @@ int create_memory_block_devices(unsigned long start, unsigned long size)
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
continue;
- mem->section_count = 0;
unregister_memory(mem);
}
}
@@ -717,7 +668,6 @@ void remove_memory_block_devices(unsigned long start, unsigned long size)
mem = find_memory_block_by_id(block_id);
if (WARN_ON_ONCE(!mem))
continue;
- mem->section_count = 0;
unregister_memory_block_under_nodes(mem);
unregister_memory(mem);
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index a42c49e04954..da693e6a834e 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -429,11 +429,12 @@ static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
* information.
*/
struct file *file = lo->lo_backing_file;
+ struct request_queue *q = lo->lo_queue;
int ret;
mode |= FALLOC_FL_KEEP_SIZE;
- if ((!file->f_op->fallocate) || lo->lo_encrypt_key_size) {
+ if (!blk_queue_discard(q)) {
ret = -EOPNOTSUPP;
goto out;
}
@@ -463,7 +464,7 @@ static void lo_complete_rq(struct request *rq)
if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
req_op(rq) != REQ_OP_READ) {
if (cmd->ret < 0)
- ret = BLK_STS_IOERR;
+ ret = errno_to_blk_status(cmd->ret);
goto end_io;
}
@@ -868,27 +869,46 @@ static void loop_config_discard(struct loop_device *lo)
struct request_queue *q = lo->lo_queue;
/*
+ * If the backing device is a block device, mirror its zeroing
+ * capability. Set the discard sectors to the block device's zeroing
+ * capabilities because loop discards result in blkdev_issue_zeroout(),
+ * not blkdev_issue_discard(). This maintains consistent behavior with
+ * file-backed loop devices: discarded regions read back as zero.
+ */
+ if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
+ struct request_queue *backingq;
+
+ backingq = bdev_get_queue(inode->i_bdev);
+ blk_queue_max_discard_sectors(q,
+ backingq->limits.max_write_zeroes_sectors);
+
+ blk_queue_max_write_zeroes_sectors(q,
+ backingq->limits.max_write_zeroes_sectors);
+
+ /*
* We use punch hole to reclaim the free space used by the
* image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
*/
- if ((!file->f_op->fallocate) ||
- lo->lo_encrypt_key_size) {
+ } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
q->limits.discard_granularity = 0;
q->limits.discard_alignment = 0;
blk_queue_max_discard_sectors(q, 0);
blk_queue_max_write_zeroes_sectors(q, 0);
- blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
- return;
- }
- q->limits.discard_granularity = inode->i_sb->s_blocksize;
- q->limits.discard_alignment = 0;
+ } else {
+ q->limits.discard_granularity = inode->i_sb->s_blocksize;
+ q->limits.discard_alignment = 0;
+
+ blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
+ blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
+ }
- blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
- blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
- blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ if (q->limits.max_write_zeroes_sectors)
+ blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
}
static void loop_unprepare_queue(struct loop_device *lo)
@@ -1955,7 +1975,10 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
failed:
/* complete non-aio request */
if (!cmd->use_aio || ret) {
- cmd->ret = ret ? -EIO : 0;
+ if (ret == -EOPNOTSUPP)
+ cmd->ret = ret;
+ else
+ cmd->ret = ret ? -EIO : 0;
blk_mq_complete_request(rq);
}
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 6343402c09e6..1e0a6b19ae0d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -337,10 +337,7 @@ struct rbd_img_request {
u64 snap_id; /* for reads */
struct ceph_snap_context *snapc; /* for writes */
};
- union {
- struct request *rq; /* block request */
- struct rbd_obj_request *obj_request; /* obj req initiator */
- };
+ struct rbd_obj_request *obj_request; /* obj req initiator */
struct list_head lock_item;
struct list_head object_extents; /* obj_req.ex structs */
@@ -349,7 +346,6 @@ struct rbd_img_request {
struct pending_result pending;
struct work_struct work;
int work_result;
- struct kref kref;
};
#define for_each_obj_request(ireq, oreq) \
@@ -1320,15 +1316,6 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
kref_put(&obj_request->kref, rbd_obj_request_destroy);
}
-static void rbd_img_request_destroy(struct kref *kref);
-static void rbd_img_request_put(struct rbd_img_request *img_request)
-{
- rbd_assert(img_request != NULL);
- dout("%s: img %p (was %d)\n", __func__, img_request,
- kref_read(&img_request->kref));
- kref_put(&img_request->kref, rbd_img_request_destroy);
-}
-
static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
struct rbd_obj_request *obj_request)
{
@@ -1366,18 +1353,10 @@ static void rbd_osd_submit(struct ceph_osd_request *osd_req)
static void img_request_layered_set(struct rbd_img_request *img_request)
{
set_bit(IMG_REQ_LAYERED, &img_request->flags);
- smp_mb();
-}
-
-static void img_request_layered_clear(struct rbd_img_request *img_request)
-{
- clear_bit(IMG_REQ_LAYERED, &img_request->flags);
- smp_mb();
}
static bool img_request_layered_test(struct rbd_img_request *img_request)
{
- smp_mb();
return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
}
@@ -1619,10 +1598,8 @@ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
if (!rbd_dev->parent_spec)
return false;
- down_read(&rbd_dev->header_rwsem);
if (rbd_dev->parent_overlap)
counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
- up_read(&rbd_dev->header_rwsem);
if (counter < 0)
rbd_warn(rbd_dev, "parent reference overflow");
@@ -1630,63 +1607,54 @@ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
return counter > 0;
}
-/*
- * Caller is responsible for filling in the list of object requests
- * that comprises the image request, and the Linux request pointer
- * (if there is one).
- */
-static struct rbd_img_request *rbd_img_request_create(
- struct rbd_device *rbd_dev,
- enum obj_operation_type op_type,
- struct ceph_snap_context *snapc)
+static void rbd_img_request_init(struct rbd_img_request *img_request,
+ struct rbd_device *rbd_dev,
+ enum obj_operation_type op_type)
{
- struct rbd_img_request *img_request;
-
- img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
- if (!img_request)
- return NULL;
+ memset(img_request, 0, sizeof(*img_request));
img_request->rbd_dev = rbd_dev;
img_request->op_type = op_type;
- if (!rbd_img_is_write(img_request))
- img_request->snap_id = rbd_dev->spec->snap_id;
- else
- img_request->snapc = snapc;
-
- if (rbd_dev_parent_get(rbd_dev))
- img_request_layered_set(img_request);
INIT_LIST_HEAD(&img_request->lock_item);
INIT_LIST_HEAD(&img_request->object_extents);
mutex_init(&img_request->state_mutex);
- kref_init(&img_request->kref);
+}
+
+static void rbd_img_capture_header(struct rbd_img_request *img_req)
+{
+ struct rbd_device *rbd_dev = img_req->rbd_dev;
+
+ lockdep_assert_held(&rbd_dev->header_rwsem);
- return img_request;
+ if (rbd_img_is_write(img_req))
+ img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
+ else
+ img_req->snap_id = rbd_dev->spec->snap_id;
+
+ if (rbd_dev_parent_get(rbd_dev))
+ img_request_layered_set(img_req);
}
-static void rbd_img_request_destroy(struct kref *kref)
+static void rbd_img_request_destroy(struct rbd_img_request *img_request)
{
- struct rbd_img_request *img_request;
struct rbd_obj_request *obj_request;
struct rbd_obj_request *next_obj_request;
- img_request = container_of(kref, struct rbd_img_request, kref);
-
dout("%s: img %p\n", __func__, img_request);
WARN_ON(!list_empty(&img_request->lock_item));
for_each_obj_request_safe(img_request, obj_request, next_obj_request)
rbd_img_obj_request_del(img_request, obj_request);
- if (img_request_layered_test(img_request)) {
- img_request_layered_clear(img_request);
+ if (img_request_layered_test(img_request))
rbd_dev_parent_put(img_request->rbd_dev);
- }
if (rbd_img_is_write(img_request))
ceph_put_snap_context(img_request->snapc);
- kmem_cache_free(rbd_img_request_cache, img_request);
+ if (test_bit(IMG_REQ_CHILD, &img_request->flags))
+ kmem_cache_free(rbd_img_request_cache, img_request);
}
#define BITS_PER_OBJ 2
@@ -2849,17 +2817,22 @@ static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
{
struct rbd_img_request *img_req = obj_req->img_request;
+ struct rbd_device *parent = img_req->rbd_dev->parent;
struct rbd_img_request *child_img_req;
int ret;
- child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
- OBJ_OP_READ, NULL);
+ child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
if (!child_img_req)
return -ENOMEM;
+ rbd_img_request_init(child_img_req, parent, OBJ_OP_READ);
__set_bit(IMG_REQ_CHILD, &child_img_req->flags);
child_img_req->obj_request = obj_req;
+ down_read(&parent->header_rwsem);
+ rbd_img_capture_header(child_img_req);
+ up_read(&parent->header_rwsem);
+
dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
obj_req);
@@ -2888,7 +2861,7 @@ static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
obj_req->copyup_bvecs);
}
if (ret) {
- rbd_img_request_put(child_img_req);
+ rbd_img_request_destroy(child_img_req);
return ret;
}
@@ -3647,15 +3620,15 @@ again:
if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
struct rbd_obj_request *obj_req = img_req->obj_request;
- rbd_img_request_put(img_req);
+ rbd_img_request_destroy(img_req);
if (__rbd_obj_handle_request(obj_req, &result)) {
img_req = obj_req->img_request;
goto again;
}
} else {
- struct request *rq = img_req->rq;
+ struct request *rq = blk_mq_rq_from_pdu(img_req);
- rbd_img_request_put(img_req);
+ rbd_img_request_destroy(img_req);
blk_mq_end_request(rq, errno_to_blk_status(result));
}
}
@@ -4707,84 +4680,36 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
static void rbd_queue_workfn(struct work_struct *work)
{
- struct request *rq = blk_mq_rq_from_pdu(work);
- struct rbd_device *rbd_dev = rq->q->queuedata;
- struct rbd_img_request *img_request;
- struct ceph_snap_context *snapc = NULL;
+ struct rbd_img_request *img_request =
+ container_of(work, struct rbd_img_request, work);
+ struct rbd_device *rbd_dev = img_request->rbd_dev;
+ enum obj_operation_type op_type = img_request->op_type;
+ struct request *rq = blk_mq_rq_from_pdu(img_request);
u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
u64 length = blk_rq_bytes(rq);
- enum obj_operation_type op_type;
u64 mapping_size;
int result;
- switch (req_op(rq)) {
- case REQ_OP_DISCARD:
- op_type = OBJ_OP_DISCARD;
- break;
- case REQ_OP_WRITE_ZEROES:
- op_type = OBJ_OP_ZEROOUT;
- break;
- case REQ_OP_WRITE:
- op_type = OBJ_OP_WRITE;
- break;
- case REQ_OP_READ:
- op_type = OBJ_OP_READ;
- break;
- default:
- dout("%s: non-fs request type %d\n", __func__, req_op(rq));
- result = -EIO;
- goto err;
- }
-
/* Ignore/skip any zero-length requests */
-
if (!length) {
dout("%s: zero-length request\n", __func__);
result = 0;
- goto err_rq;
- }
-
- if (op_type != OBJ_OP_READ) {
- if (rbd_is_ro(rbd_dev)) {
- rbd_warn(rbd_dev, "%s on read-only mapping",
- obj_op_name(op_type));
- result = -EIO;
- goto err;
- }
- rbd_assert(!rbd_is_snap(rbd_dev));
- }
-
- if (offset && length > U64_MAX - offset + 1) {
- rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
- length);
- result = -EINVAL;
- goto err_rq; /* Shouldn't happen */
+ goto err_img_request;
}
blk_mq_start_request(rq);
down_read(&rbd_dev->header_rwsem);
mapping_size = rbd_dev->mapping.size;
- if (op_type != OBJ_OP_READ) {
- snapc = rbd_dev->header.snapc;
- ceph_get_snap_context(snapc);
- }
+ rbd_img_capture_header(img_request);
up_read(&rbd_dev->header_rwsem);
if (offset + length > mapping_size) {
rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
length, mapping_size);
result = -EIO;
- goto err_rq;
- }
-
- img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
- if (!img_request) {
- result = -ENOMEM;
- goto err_rq;
+ goto err_img_request;
}
- img_request->rq = rq;
- snapc = NULL; /* img_request consumes a ref */
dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
img_request, obj_op_name(op_type), offset, length);
@@ -4801,23 +4726,51 @@ static void rbd_queue_workfn(struct work_struct *work)
return;
err_img_request:
- rbd_img_request_put(img_request);
-err_rq:
+ rbd_img_request_destroy(img_request);
if (result)
rbd_warn(rbd_dev, "%s %llx at %llx result %d",
obj_op_name(op_type), length, offset, result);
- ceph_put_snap_context(snapc);
-err:
blk_mq_end_request(rq, errno_to_blk_status(result));
}
static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
- struct request *rq = bd->rq;
- struct work_struct *work = blk_mq_rq_to_pdu(rq);
+ struct rbd_device *rbd_dev = hctx->queue->queuedata;
+ struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
+ enum obj_operation_type op_type;
- queue_work(rbd_wq, work);
+ switch (req_op(bd->rq)) {
+ case REQ_OP_DISCARD:
+ op_type = OBJ_OP_DISCARD;
+ break;
+ case REQ_OP_WRITE_ZEROES:
+ op_type = OBJ_OP_ZEROOUT;
+ break;
+ case REQ_OP_WRITE:
+ op_type = OBJ_OP_WRITE;
+ break;
+ case REQ_OP_READ:
+ op_type = OBJ_OP_READ;
+ break;
+ default:
+ rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
+ return BLK_STS_IOERR;
+ }
+
+ rbd_img_request_init(img_req, rbd_dev, op_type);
+
+ if (rbd_img_is_write(img_req)) {
+ if (rbd_is_ro(rbd_dev)) {
+ rbd_warn(rbd_dev, "%s on read-only mapping",
+ obj_op_name(img_req->op_type));
+ return BLK_STS_IOERR;
+ }
+ rbd_assert(!rbd_is_snap(rbd_dev));
+ }
+
+ INIT_WORK(&img_req->work, rbd_queue_workfn);
+ queue_work(rbd_wq, &img_req->work);
return BLK_STS_OK;
}
@@ -4984,18 +4937,8 @@ out:
return ret;
}
-static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- struct work_struct *work = blk_mq_rq_to_pdu(rq);
-
- INIT_WORK(work, rbd_queue_workfn);
- return 0;
-}
-
static const struct blk_mq_ops rbd_mq_ops = {
.queue_rq = rbd_queue_rq,
- .init_request = rbd_init_request,
};
static int rbd_init_disk(struct rbd_device *rbd_dev)
@@ -5027,8 +4970,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- rbd_dev->tag_set.nr_hw_queues = 1;
- rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
+ rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
+ rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
if (err)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 915cf5b6388c..3b889ea950c2 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -47,6 +47,7 @@
#include <linux/bitmap.h>
#include <linux/list.h>
#include <linux/workqueue.h>
+#include <linux/sched/mm.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@@ -2189,10 +2190,12 @@ static void blkfront_setup_discard(struct blkfront_info *info)
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
{
- unsigned int psegs, grants;
+ unsigned int psegs, grants, memflags;
int err, i;
struct blkfront_info *info = rinfo->dev_info;
+ memflags = memalloc_noio_save();
+
if (info->max_indirect_segments == 0) {
if (!HAS_EXTRA_REQ)
grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
@@ -2224,7 +2227,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
BUG_ON(!list_empty(&rinfo->indirect_pages));
for (i = 0; i < num; i++) {
- struct page *indirect_page = alloc_page(GFP_NOIO);
+ struct page *indirect_page = alloc_page(GFP_KERNEL);
if (!indirect_page)
goto out_of_memory;
list_add(&indirect_page->lru, &rinfo->indirect_pages);
@@ -2235,15 +2238,15 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
rinfo->shadow[i].grants_used =
kvcalloc(grants,
sizeof(rinfo->shadow[i].grants_used[0]),
- GFP_NOIO);
+ GFP_KERNEL);
rinfo->shadow[i].sg = kvcalloc(psegs,
sizeof(rinfo->shadow[i].sg[0]),
- GFP_NOIO);
+ GFP_KERNEL);
if (info->max_indirect_segments)
rinfo->shadow[i].indirect_grants =
kvcalloc(INDIRECT_GREFS(grants),
sizeof(rinfo->shadow[i].indirect_grants[0]),
- GFP_NOIO);
+ GFP_KERNEL);
if ((rinfo->shadow[i].grants_used == NULL) ||
(rinfo->shadow[i].sg == NULL) ||
(info->max_indirect_segments &&
@@ -2252,6 +2255,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
sg_init_table(rinfo->shadow[i].sg, psegs);
}
+ memalloc_noio_restore(memflags);
return 0;
@@ -2271,6 +2275,9 @@ out_of_memory:
__free_page(indirect_page);
}
}
+
+ memalloc_noio_restore(memflags);
+
return -ENOMEM;
}
diff --git a/drivers/char/hw_random/omap3-rom-rng.c b/drivers/char/hw_random/omap3-rom-rng.c
index a431c5cbe2be..e0d77fa048fb 100644
--- a/drivers/char/hw_random/omap3-rom-rng.c
+++ b/drivers/char/hw_random/omap3-rom-rng.c
@@ -4,7 +4,7 @@
* Copyright (C) 2009 Nokia Corporation
* Author: Juha Yrjola <juha.yrjola@solidboot.com>
*
- * Copyright (C) 2013 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2013 Pali Rohár <pali@kernel.org>
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -178,5 +178,5 @@ module_platform_driver(omap3_rom_rng_driver);
MODULE_ALIAS("platform:omap3-rom-rng");
MODULE_AUTHOR("Juha Yrjola");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index cad9563f8f48..c48d8f086382 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -618,6 +618,8 @@ static DEFINE_MUTEX(ipmidriver_mutex);
static LIST_HEAD(ipmi_interfaces);
static DEFINE_MUTEX(ipmi_interfaces_mutex);
+#define ipmi_interfaces_mutex_held() \
+ lockdep_is_held(&ipmi_interfaces_mutex)
static struct srcu_struct ipmi_interfaces_srcu;
/*
@@ -1321,7 +1323,8 @@ static void _ipmi_destroy_user(struct ipmi_user *user)
* synchronize_srcu()) then free everything in that list.
*/
mutex_lock(&intf->cmd_rcvrs_mutex);
- list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
if (rcvr->user == user) {
list_del_rcu(&rcvr->link);
rcvr->next = rcvrs;
@@ -1599,7 +1602,8 @@ static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
{
struct cmd_rcvr *rcvr;
- list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
&& (rcvr->chans & (1 << chan)))
return rcvr;
@@ -1614,7 +1618,8 @@ static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
{
struct cmd_rcvr *rcvr;
- list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
&& (rcvr->chans & chans))
return 0;
@@ -3188,8 +3193,8 @@ static void __get_guid(struct ipmi_smi *intf)
if (rv)
/* Send failed, no GUID available. */
bmc->dyn_guid_set = 0;
-
- wait_event(intf->waitq, bmc->dyn_guid_set != 2);
+ else
+ wait_event(intf->waitq, bmc->dyn_guid_set != 2);
/* dyn_guid_set makes the guid data available. */
smp_rmb();
@@ -3450,7 +3455,8 @@ int ipmi_add_smi(struct module *owner,
/* Look for a hole in the numbers. */
i = 0;
link = &ipmi_interfaces;
- list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
+ list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
+ ipmi_interfaces_mutex_held()) {
if (tintf->intf_num != i) {
link = &tintf->link;
break;
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 8ac390c2b514..b7145f370d3b 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -313,6 +313,7 @@ static int start_send(struct ssif_info *ssif_info,
static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info,
unsigned long *flags)
+ __acquires(&ssif_info->lock)
{
spin_lock_irqsave(&ssif_info->lock, *flags);
return flags;
@@ -320,6 +321,7 @@ static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info,
static void ipmi_ssif_unlock_cond(struct ssif_info *ssif_info,
unsigned long *flags)
+ __releases(&ssif_info->lock)
{
spin_unlock_irqrestore(&ssif_info->lock, *flags);
}
diff --git a/drivers/char/ipmi/kcs_bmc_aspeed.c b/drivers/char/ipmi/kcs_bmc_aspeed.c
index 3c955946e647..a140203c079b 100644
--- a/drivers/char/ipmi/kcs_bmc_aspeed.c
+++ b/drivers/char/ipmi/kcs_bmc_aspeed.c
@@ -12,6 +12,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/regmap.h>
@@ -233,58 +234,154 @@ static const struct kcs_ioreg ast_kcs_bmc_ioregs[KCS_CHANNEL_MAX] = {
{ .idr = LPC_IDR4, .odr = LPC_ODR4, .str = LPC_STR4 },
};
-static int aspeed_kcs_probe(struct platform_device *pdev)
+static struct kcs_bmc *aspeed_kcs_probe_of_v1(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
struct aspeed_kcs_bmc *priv;
- struct kcs_bmc *kcs_bmc;
- u32 chan, addr;
+ struct device_node *np;
+ struct kcs_bmc *kcs;
+ u32 channel;
+ u32 slave;
int rc;
- rc = of_property_read_u32(dev->of_node, "kcs_chan", &chan);
- if ((rc != 0) || (chan == 0 || chan > KCS_CHANNEL_MAX)) {
- dev_err(dev, "no valid 'kcs_chan' configured\n");
- return -ENODEV;
+ np = pdev->dev.of_node;
+
+ rc = of_property_read_u32(np, "kcs_chan", &channel);
+ if ((rc != 0) || (channel == 0 || channel > KCS_CHANNEL_MAX)) {
+ dev_err(&pdev->dev, "no valid 'kcs_chan' configured\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ kcs = kcs_bmc_alloc(&pdev->dev, sizeof(struct aspeed_kcs_bmc), channel);
+ if (!kcs)
+ return ERR_PTR(-ENOMEM);
+
+ priv = kcs_bmc_priv(kcs);
+ priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
+ if (IS_ERR(priv->map)) {
+ dev_err(&pdev->dev, "Couldn't get regmap\n");
+ return ERR_PTR(-ENODEV);
}
- rc = of_property_read_u32(dev->of_node, "kcs_addr", &addr);
+ rc = of_property_read_u32(np, "kcs_addr", &slave);
if (rc) {
- dev_err(dev, "no valid 'kcs_addr' configured\n");
- return -ENODEV;
+ dev_err(&pdev->dev, "no valid 'kcs_addr' configured\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ kcs->ioreg = ast_kcs_bmc_ioregs[channel - 1];
+ aspeed_kcs_set_address(kcs, slave);
+
+ return kcs;
+}
+
+static int aspeed_kcs_calculate_channel(const struct kcs_ioreg *regs)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ast_kcs_bmc_ioregs); i++) {
+ if (!memcmp(&ast_kcs_bmc_ioregs[i], regs, sizeof(*regs)))
+ return i + 1;
}
- kcs_bmc = kcs_bmc_alloc(dev, sizeof(*priv), chan);
- if (!kcs_bmc)
- return -ENOMEM;
+ return -EINVAL;
+}
+
+static struct kcs_bmc *aspeed_kcs_probe_of_v2(struct platform_device *pdev)
+{
+ struct aspeed_kcs_bmc *priv;
+ struct device_node *np;
+ struct kcs_ioreg ioreg;
+ struct kcs_bmc *kcs;
+ const __be32 *reg;
+ int channel;
+ u32 slave;
+ int rc;
+
+ np = pdev->dev.of_node;
+
+ /* Don't translate addresses, we want offsets for the regmaps */
+ reg = of_get_address(np, 0, NULL, NULL);
+ if (!reg)
+ return ERR_PTR(-EINVAL);
+ ioreg.idr = be32_to_cpup(reg);
+
+ reg = of_get_address(np, 1, NULL, NULL);
+ if (!reg)
+ return ERR_PTR(-EINVAL);
+ ioreg.odr = be32_to_cpup(reg);
+
+ reg = of_get_address(np, 2, NULL, NULL);
+ if (!reg)
+ return ERR_PTR(-EINVAL);
+ ioreg.str = be32_to_cpup(reg);
- priv = kcs_bmc_priv(kcs_bmc);
- priv->map = syscon_node_to_regmap(dev->parent->of_node);
+ channel = aspeed_kcs_calculate_channel(&ioreg);
+ if (channel < 0)
+ return ERR_PTR(channel);
+
+ kcs = kcs_bmc_alloc(&pdev->dev, sizeof(struct aspeed_kcs_bmc), channel);
+ if (!kcs)
+ return ERR_PTR(-ENOMEM);
+
+ kcs->ioreg = ioreg;
+
+ priv = kcs_bmc_priv(kcs);
+ priv->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
if (IS_ERR(priv->map)) {
- dev_err(dev, "Couldn't get regmap\n");
- return -ENODEV;
+ dev_err(&pdev->dev, "Couldn't get regmap\n");
+ return ERR_PTR(-ENODEV);
}
- kcs_bmc->ioreg = ast_kcs_bmc_ioregs[chan - 1];
+ rc = of_property_read_u32(np, "aspeed,lpc-io-reg", &slave);
+ if (rc)
+ return ERR_PTR(rc);
+
+ aspeed_kcs_set_address(kcs, slave);
+
+ return kcs;
+}
+
+static int aspeed_kcs_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct kcs_bmc *kcs_bmc;
+ struct device_node *np;
+ int rc;
+
+ np = pdev->dev.of_node;
+ if (of_device_is_compatible(np, "aspeed,ast2400-kcs-bmc") ||
+ of_device_is_compatible(np, "aspeed,ast2500-kcs-bmc"))
+ kcs_bmc = aspeed_kcs_probe_of_v1(pdev);
+ else if (of_device_is_compatible(np, "aspeed,ast2400-kcs-bmc-v2") ||
+ of_device_is_compatible(np, "aspeed,ast2500-kcs-bmc-v2"))
+ kcs_bmc = aspeed_kcs_probe_of_v2(pdev);
+ else
+ return -EINVAL;
+
+ if (IS_ERR(kcs_bmc))
+ return PTR_ERR(kcs_bmc);
+
kcs_bmc->io_inputb = aspeed_kcs_inb;
kcs_bmc->io_outputb = aspeed_kcs_outb;
- dev_set_drvdata(dev, kcs_bmc);
-
- aspeed_kcs_set_address(kcs_bmc, addr);
- aspeed_kcs_enable_channel(kcs_bmc, true);
rc = aspeed_kcs_config_irq(kcs_bmc, pdev);
if (rc)
return rc;
+ dev_set_drvdata(dev, kcs_bmc);
+
+ aspeed_kcs_enable_channel(kcs_bmc, true);
+
rc = misc_register(&kcs_bmc->miscdev);
if (rc) {
dev_err(dev, "Unable to register device\n");
return rc;
}
- pr_info("channel=%u addr=0x%x idr=0x%x odr=0x%x str=0x%x\n",
- chan, addr,
- kcs_bmc->ioreg.idr, kcs_bmc->ioreg.odr, kcs_bmc->ioreg.str);
+ dev_dbg(&pdev->dev,
+ "Probed KCS device %d (IDR=0x%x, ODR=0x%x, STR=0x%x)\n",
+ kcs_bmc->channel, kcs_bmc->ioreg.idr, kcs_bmc->ioreg.odr,
+ kcs_bmc->ioreg.str);
return 0;
}
@@ -301,6 +398,8 @@ static int aspeed_kcs_remove(struct platform_device *pdev)
static const struct of_device_id ast_kcs_bmc_match[] = {
{ .compatible = "aspeed,ast2400-kcs-bmc" },
{ .compatible = "aspeed,ast2500-kcs-bmc" },
+ { .compatible = "aspeed,ast2400-kcs-bmc-v2" },
+ { .compatible = "aspeed,ast2500-kcs-bmc-v2" },
{ }
};
MODULE_DEVICE_TABLE(of, ast_kcs_bmc_match);
diff --git a/drivers/cpuidle/cpuidle-haltpoll.c b/drivers/cpuidle/cpuidle-haltpoll.c
index db124bc1ca2c..fcc53215bac8 100644
--- a/drivers/cpuidle/cpuidle-haltpoll.c
+++ b/drivers/cpuidle/cpuidle-haltpoll.c
@@ -94,7 +94,7 @@ static void haltpoll_uninit(void)
haltpoll_cpuidle_devices = NULL;
}
-static bool haltpool_want(void)
+static bool haltpoll_want(void)
{
return kvm_para_has_hint(KVM_HINTS_REALTIME) || force;
}
@@ -110,7 +110,7 @@ static int __init haltpoll_init(void)
cpuidle_poll_state_init(drv);
- if (!kvm_para_available() || !haltpool_want())
+ if (!kvm_para_available() || !haltpoll_want())
return -ENODEV;
ret = cpuidle_register_driver(drv);
diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c
index 73658b71d4a3..cd1769ecdc1c 100644
--- a/drivers/crypto/chelsio/chcr_ktls.c
+++ b/drivers/crypto/chelsio/chcr_ktls.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2020 Chelsio Communications. All rights reserved. */
#ifdef CONFIG_CHELSIO_TLS_DEVICE
+#include <linux/highmem.h>
#include "chcr_ktls.h"
#include "clip_tbl.h"
diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig
index 095850d01dcc..f09c6cf7823e 100644
--- a/drivers/crypto/hisilicon/Kconfig
+++ b/drivers/crypto/hisilicon/Kconfig
@@ -27,6 +27,7 @@ config CRYPTO_DEV_HISI_SEC2
select CRYPTO_SHA256
select CRYPTO_SHA512
depends on PCI && PCI_MSI
+ depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
help
Support for HiSilicon SEC Engine of version 2 in crypto subsystem.
@@ -58,6 +59,7 @@ config CRYPTO_DEV_HISI_ZIP
config CRYPTO_DEV_HISI_HPRE
tristate "Support for HISI HPRE accelerator"
depends on PCI && PCI_MSI
+ depends on UACCE || UACCE=n
depends on ARM64 || (COMPILE_TEST && 64BIT)
select CRYPTO_DEV_HISI_QM
select CRYPTO_DH
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 946fb62949b2..06202bcffb33 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -1161,13 +1161,13 @@ static inline u32 create_aead_null_output_list(struct aead_request *req,
inputlen);
if (status != inputlen) {
status = -EINVAL;
- goto error;
+ goto error_free;
}
status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr,
inputlen);
if (status != inputlen) {
status = -EINVAL;
- goto error;
+ goto error_free;
}
kfree(ptr);
}
@@ -1209,8 +1209,10 @@ static inline u32 create_aead_null_output_list(struct aead_request *req,
req_info->outcnt = argcnt;
return 0;
-error:
+
+error_free:
kfree(ptr);
+error:
return status;
}
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 46e46047a1f7..df238c8b6ef2 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -421,8 +421,10 @@ struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id,
* device outside of mmap of the resulting character device.
*/
dax_dev = alloc_dax(dev_dax, NULL, NULL, DAXDEV_F_SYNC);
- if (!dax_dev)
+ if (IS_ERR(dax_dev)) {
+ rc = PTR_ERR(dax_dev);
goto err;
+ }
/* a device_dax instance is dead while the driver is not attached */
kill_dax(dax_dev);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 0aa4b6bc5101..8e32345be0f7 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -344,6 +344,23 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
}
EXPORT_SYMBOL_GPL(dax_copy_to_iter);
+int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ if (!dax_alive(dax_dev))
+ return -ENXIO;
+ /*
+ * There are no callers that want to zero more than one page as of now.
+ * Once users are there, this check can be removed after the
+ * device mapper code has been updated to split ranges across targets.
+ */
+ if (nr_pages != 1)
+ return -EIO;
+
+ return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages);
+}
+EXPORT_SYMBOL_GPL(dax_zero_page_range);
+
#ifdef CONFIG_ARCH_HAS_PMEM_API
void arch_wb_cache_pmem(void *addr, size_t size);
void dax_flush(struct dax_device *dax_dev, void *addr, size_t size)
@@ -551,9 +568,16 @@ struct dax_device *alloc_dax(void *private, const char *__host,
dev_t devt;
int minor;
+ if (ops && !ops->zero_page_range) {
+ pr_debug("%s: error: device does not provide dax"
+ " operation zero_page_range()\n",
+ __host ? __host : "Unknown");
+ return ERR_PTR(-EINVAL);
+ }
+
host = kstrdup(__host, GFP_KERNEL);
if (__host && !host)
- return NULL;
+ return ERR_PTR(-ENOMEM);
minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL);
if (minor < 0)
@@ -576,7 +600,7 @@ struct dax_device *alloc_dax(void *private, const char *__host,
ida_simple_remove(&dax_minor_ida, minor);
err_minor:
kfree(host);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL_GPL(alloc_dax);
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index ef73b678419c..9626673f1d83 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -43,11 +43,12 @@ config DMABUF_MOVE_NOTIFY
bool "Move notify between drivers (EXPERIMENTAL)"
default n
help
- Don''t pin buffers if the dynamic DMA-buf interface is available on both the
- exporter as well as the importer. This fixes a security problem where
- userspace is able to pin unrestricted amounts of memory through DMA-buf.
- But marked experimental because we don''t jet have a consistent execution
- context and memory management between drivers.
+ Don't pin buffers if the dynamic DMA-buf interface is available on
+ both the exporter as well as the importer. This fixes a security
+ problem where userspace is able to pin unrestricted amounts of memory
+ through DMA-buf.
+ This is marked experimental because we don't yet have a consistent
+ execution context and memory management between drivers.
config DMABUF_SELFTESTS
tristate "Selftests for the dma-buf interfaces"
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 265303a396ca..f6a2f42ffc51 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1493,7 +1493,6 @@ static int tegra_dma_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, i);
if (irq < 0) {
ret = irq;
- dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
goto err_pm_disable;
}
diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
index 29906e39ab4b..14d0970a7198 100644
--- a/drivers/firmware/edd.c
+++ b/drivers/firmware/edd.c
@@ -341,7 +341,7 @@ edd_show_legacy_max_cylinder(struct edd_device *edev, char *buf)
if (!info || !buf)
return -EINVAL;
- p += snprintf(p, left, "%u\n", info->legacy_max_cylinder);
+ p += scnprintf(p, left, "%u\n", info->legacy_max_cylinder);
return (p - buf);
}
@@ -356,7 +356,7 @@ edd_show_legacy_max_head(struct edd_device *edev, char *buf)
if (!info || !buf)
return -EINVAL;
- p += snprintf(p, left, "%u\n", info->legacy_max_head);
+ p += scnprintf(p, left, "%u\n", info->legacy_max_head);
return (p - buf);
}
@@ -371,7 +371,7 @@ edd_show_legacy_sectors_per_track(struct edd_device *edev, char *buf)
if (!info || !buf)
return -EINVAL;
- p += snprintf(p, left, "%u\n", info->legacy_sectors_per_track);
+ p += scnprintf(p, left, "%u\n", info->legacy_sectors_per_track);
return (p - buf);
}
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index db0c1a9c1699..fc9f8ab533a7 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -75,14 +75,12 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
/*
- * If CONFIG_DEBUG_ALIGN_RODATA is not set, produce a
- * displacement in the interval [0, MIN_KIMG_ALIGN) that
- * doesn't violate this kernel's de-facto alignment
+ * Produce a displacement in the interval [0, MIN_KIMG_ALIGN)
+ * that doesn't violate this kernel's de-facto alignment
* constraints.
*/
u32 mask = (MIN_KIMG_ALIGN - 1) & ~(EFI_KIMG_ALIGN - 1);
- u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ?
- (phys_seed >> 32) & mask : TEXT_OFFSET;
+ u32 offset = (phys_seed >> 32) & mask;
/*
* With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index faa3e7102156..559dc24ef436 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2340,8 +2340,6 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
- amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
- amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
@@ -3356,6 +3354,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
}
}
+ amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
+ amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+
amdgpu_amdkfd_suspend(adev, !fbcon);
amdgpu_ras_suspend(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index f197f1be0969..abe94a55ecad 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -89,7 +89,8 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
adev->pm.ac_power = true;
else
adev->pm.ac_power = false;
- if (adev->powerplay.pp_funcs->enable_bapm)
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
mutex_unlock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index be50867ea644..deaa26808841 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -818,7 +818,7 @@ static int psp_ras_initialize(struct psp_context *psp)
if (!psp->adev->psp.ta_ras_ucode_size ||
!psp->adev->psp.ta_ras_start_addr) {
- dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n");
+ dev_info(psp->adev->dev, "RAS: optional ras ta ucode is not available\n");
return 0;
}
@@ -902,7 +902,7 @@ static int psp_hdcp_initialize(struct psp_context *psp)
if (!psp->adev->psp.ta_hdcp_ucode_size ||
!psp->adev->psp.ta_hdcp_start_addr) {
- dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n");
+ dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
return 0;
}
@@ -1048,7 +1048,7 @@ static int psp_dtm_initialize(struct psp_context *psp)
if (!psp->adev->psp.ta_dtm_ucode_size ||
!psp->adev->psp.ta_dtm_start_addr) {
- dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n");
+ dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 3c32a94d2424..ab379b44679c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1424,12 +1424,22 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
{
struct amdgpu_ras *ras =
container_of(work, struct amdgpu_ras, recovery_work);
+ struct amdgpu_device *remote_adev = NULL;
+ struct amdgpu_device *adev = ras->adev;
+ struct list_head device_list, *device_list_handle = NULL;
+ struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, false);
+
+ /* Build list of devices to query RAS related errors */
+ if (hive && adev->gmc.xgmi.num_physical_nodes > 1) {
+ device_list_handle = &hive->device_list;
+ } else {
+ list_add_tail(&adev->gmc.xgmi.head, &device_list);
+ device_list_handle = &device_list;
+ }
- /*
- * Query and print non zero error counter per IP block for
- * awareness before recovering GPU.
- */
- amdgpu_ras_log_on_err_counter(ras->adev);
+ list_for_each_entry(remote_adev, device_list_handle, gmc.xgmi.head) {
+ amdgpu_ras_log_on_err_counter(remote_adev);
+ }
if (amdgpu_device_should_recover_gpu(ras->adev))
amdgpu_device_gpu_recover(ras->adev, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index c8f2aa1db13b..d78059fd2c72 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -279,7 +279,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2_nv12[] =
#define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
- (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
+ (SH_MEM_ALIGNMENT_MODE_DWORD << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
(SH_MEM_RETRY_MODE_ALL << SH_MEM_CONFIG__RETRY_MODE__SHIFT) | \
(3 << SH_MEM_CONFIG__INITIAL_INST_PREFETCH__SHIFT))
@@ -1113,7 +1113,7 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev)
return r;
}
- memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
+ memset(hpd, 0, mec_hpd_size);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
@@ -4104,6 +4104,12 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
/* It is disabled by HW by default */
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ /* 0 - Disable some blocks' MGCG */
+ WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000);
+ WREG32_SOC15(GC, 0, mmCGTT_WD_CLK_CTRL, 0xff000000);
+ WREG32_SOC15(GC, 0, mmCGTT_VGT_CLK_CTRL, 0xff000000);
+ WREG32_SOC15(GC, 0, mmCGTT_IA_CLK_CTRL, 0xff000000);
+
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
@@ -4143,19 +4149,20 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
if (def != data)
WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
- /* 2 - disable MGLS in RLC */
+ /* 2 - disable MGLS in CP */
+ data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
+ if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
+ data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
+ WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
+ }
+
+ /* 3 - disable MGLS in RLC */
data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL);
if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) {
data &= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK;
WREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL, data);
}
- /* 3 - disable MGLS in CP */
- data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL);
- if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) {
- data &= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
- WREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL, data);
- }
}
}
@@ -4266,7 +4273,7 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
/* === CGCG /CGLS for GFX 3D Only === */
gfx_v10_0_update_3d_clock_gating(adev, enable);
/* === MGCG + MGLS === */
- gfx_v10_0_update_medium_grain_clock_gating(adev, enable);
+ /* gfx_v10_0_update_medium_grain_clock_gating(adev, enable); */
}
if (adev->cg_flags &
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 37c8231f1407..e6b113ed2f40 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1217,6 +1217,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.mec_fw_write_wait = true;
break;
default:
+ adev->gfx.me_fw_write_wait = true;
+ adev->gfx.mec_fw_write_wait = true;
break;
}
}
@@ -1946,7 +1948,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
return r;
}
- memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size);
+ memset(hpd, 0, mec_hpd_size);
amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index cceb46faf212..dce945ef21a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -710,14 +710,16 @@ static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev,
sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT);
if (sec_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, SEC %d\n", i,
vml2_mems[i], sec_count);
err_data->ce_count += sec_count;
}
ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT);
if (ded_count) {
- DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i,
+ dev_info(adev->dev,
+ "Instance[%d]: SubBlock %s, DED %d\n", i,
vml2_mems[i], ded_count);
err_data->ue_count += ded_count;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index 0d413fabd015..c0e3efcb09bf 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -1539,8 +1539,11 @@ static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = {
{ SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 },
};
-static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
- uint32_t value, uint32_t *sec_count, uint32_t *ded_count)
+static int mmhub_v9_4_get_ras_error_count(struct amdgpu_device *adev,
+ const struct soc15_reg_entry *reg,
+ uint32_t value,
+ uint32_t *sec_count,
+ uint32_t *ded_count)
{
uint32_t i;
uint32_t sec_cnt, ded_cnt;
@@ -1553,7 +1556,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v9_4_ras_fields[i].sec_count_mask) >>
mmhub_v9_4_ras_fields[i].sec_count_shift;
if (sec_cnt) {
- DRM_INFO("MMHUB SubBlock %s, SEC %d\n",
+ dev_info(adev->dev, "MMHUB SubBlock %s, SEC %d\n",
mmhub_v9_4_ras_fields[i].name,
sec_cnt);
*sec_count += sec_cnt;
@@ -1563,7 +1566,7 @@ static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
mmhub_v9_4_ras_fields[i].ded_count_mask) >>
mmhub_v9_4_ras_fields[i].ded_count_shift;
if (ded_cnt) {
- DRM_INFO("MMHUB SubBlock %s, DED %d\n",
+ dev_info(adev->dev, "MMHUB SubBlock %s, DED %d\n",
mmhub_v9_4_ras_fields[i].name,
ded_cnt);
*ded_count += ded_cnt;
@@ -1588,7 +1591,7 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
reg_value =
RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
if (reg_value)
- mmhub_v9_4_get_ras_error_count(&mmhub_v9_4_edc_cnt_regs[i],
+ mmhub_v9_4_get_ras_error_count(adev, &mmhub_v9_4_edc_cnt_regs[i],
reg_value, &sec_count, &ded_count);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index d5386f15c4a5..05bc6d96ec52 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -1112,9 +1112,9 @@ kfd_gtt_out:
return 0;
kfd_gtt_no_free_chunk:
- pr_debug("Allocation failed with mem_obj = %p\n", mem_obj);
+ pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
mutex_unlock(&kfd->gtt_sa_lock);
- kfree(mem_obj);
+ kfree(*mem_obj);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index d3674d805a0a..f7c5cdc10a70 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3639,6 +3639,9 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
case DRM_FORMAT_NV12:
plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
break;
+ case DRM_FORMAT_P010:
+ plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
+ break;
default:
DRM_ERROR(
"Unsupported screen format %s\n",
@@ -4720,10 +4723,10 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
static int
amdgpu_dm_connector_late_register(struct drm_connector *connector)
{
+#if defined(CONFIG_DEBUG_FS)
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
-#if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(amdgpu_dm_connector);
#endif
@@ -5535,6 +5538,8 @@ static int get_plane_formats(const struct drm_plane *plane,
if (plane_cap && plane_cap->pixel_format_support.nv12)
formats[num_formats++] = DRM_FORMAT_NV12;
+ if (plane_cap && plane_cap->pixel_format_support.p010)
+ formats[num_formats++] = DRM_FORMAT_P010;
break;
case DRM_PLANE_TYPE_OVERLAY:
@@ -5587,12 +5592,15 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
}
if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
- plane_cap && plane_cap->pixel_format_support.nv12) {
+ plane_cap &&
+ (plane_cap->pixel_format_support.nv12 ||
+ plane_cap->pixel_format_support.p010)) {
/* This only affects YUV formats. */
drm_plane_create_color_properties(
plane,
BIT(DRM_COLOR_YCBCR_BT601) |
- BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_BT709) |
+ BIT(DRM_COLOR_YCBCR_BT2020),
BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE),
DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
@@ -5921,7 +5929,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
adev->mode_info.underscan_vborder_property,
0);
- drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
+ if (!aconnector->mst_port)
+ drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
/* This defaults to the max in the range, but we want 8bpc for non-edp. */
aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
@@ -5940,8 +5949,9 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
&aconnector->base.base,
dm->ddev->mode_config.hdr_output_metadata_property, 0);
- drm_connector_attach_vrr_capable_property(
- &aconnector->base);
+ if (!aconnector->mst_port)
+ drm_connector_attach_vrr_capable_property(&aconnector->base);
+
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
@@ -6264,12 +6274,6 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
y <= -amdgpu_crtc->max_cursor_height)
return 0;
- if (crtc->primary->state) {
- /* avivo cursor are offset into the total surface */
- x += crtc->primary->state->src_x >> 16;
- y += crtc->primary->state->src_y >> 16;
- }
-
if (x < 0) {
xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
x = 0;
@@ -6279,6 +6283,7 @@ static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
y = 0;
}
position->enable = true;
+ position->translate_by_source = true;
position->x = x;
position->y = y;
position->x_hotspot = xorigin;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 5b70ed3cdb88..78e1c11d4ae5 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -192,10 +192,13 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
&hdcp_work->srm_version);
display->adjust.disable = 0;
- if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0)
+ if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
+ hdcp_w->link.adjust.hdcp1.disable = 0;
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
- else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1)
+ } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) {
+ hdcp_w->link.adjust.hdcp1.disable = 1;
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1;
+ }
schedule_delayed_work(&hdcp_w->property_validate_dwork,
msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS));
@@ -263,7 +266,7 @@ static void event_callback(struct work_struct *work)
mutex_lock(&hdcp_work->mutex);
- cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+ cancel_delayed_work(&hdcp_work->callback_dwork);
mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK,
&hdcp_work->output);
@@ -344,6 +347,8 @@ static void event_watchdog_timer(struct work_struct *work)
mutex_lock(&hdcp_work->mutex);
+ cancel_delayed_work(&hdcp_work->watchdog_timer_dwork);
+
mod_hdcp_process_event(&hdcp_work->hdcp,
MOD_HDCP_EVENT_WATCHDOG_TIMEOUT,
&hdcp_work->output);
@@ -414,7 +419,8 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
link->dp.mst_supported = config->mst_supported;
display->adjust.disable = 1;
- link->adjust.auth_delay = 2;
+ link->adjust.auth_delay = 3;
+ link->adjust.hdcp1.disable = 0;
hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index e8208df420d9..fabbe78d5aef 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -410,6 +410,14 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
drm_connector_attach_encoder(&aconnector->base,
&aconnector->mst_encoder->base);
+ connector->max_bpc_property = master->base.max_bpc_property;
+ if (connector->max_bpc_property)
+ drm_connector_attach_max_bpc_property(connector, 8, 16);
+
+ connector->vrr_capable_property = master->base.vrr_capable_property;
+ if (connector->vrr_capable_property)
+ drm_connector_attach_vrr_capable_property(connector);
+
drm_object_attach_property(
&connector->base,
dev->mode_config.path_property,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index ab267ddd4abe..24c5765890fa 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -643,7 +643,7 @@ static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params
/* Find lowest DPM, FCLK is filled in reverse order*/
for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) {
- if (clock_table->FClocks[i].Freq != 0) {
+ if (clock_table->FClocks[i].Freq != 0 && clock_table->FClocks[i].Vol != 0) {
j = i;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 2ffb22177df9..8489f1e56892 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -283,6 +283,8 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
int i = 0;
bool ret = false;
+ stream->adjust = *adjust;
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -1360,6 +1362,26 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
+static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
+{
+ int i;
+ struct pipe_ctx *pipe;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe->plane_state)
+ continue;
+
+ /* Must set to false to start with, due to OR in update function */
+ pipe->plane_state->status.is_flip_pending = false;
+ dc->hwss.update_pending_status(pipe);
+ if (pipe->plane_state->status.is_flip_pending)
+ return true;
+ }
+ return false;
+}
+
bool dc_post_update_surfaces_to_stream(struct dc *dc)
{
int i;
@@ -1370,6 +1392,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc);
+ if (is_flip_pending_in_pipes(dc, context))
+ return true;
+
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
@@ -1703,6 +1728,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (u->coeff_reduction_factor)
update_flags->bits.coeff_reduction_change = 1;
+ if (u->gamut_remap_matrix)
+ update_flags->bits.gamut_remap_change = 1;
+
if (u->gamma) {
enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
@@ -1728,7 +1756,8 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (update_flags->bits.input_csc_change
|| update_flags->bits.coeff_reduction_change
- || update_flags->bits.gamma_change) {
+ || update_flags->bits.gamma_change
+ || update_flags->bits.gamut_remap_change) {
type = UPDATE_TYPE_FULL;
elevate_update_type(&overall_type, type);
}
@@ -1832,8 +1861,9 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
// Else we fallback to mem compare.
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
- } else if (dc->wm_optimized_required)
- dc->optimized_required = true;
+ }
+
+ dc->optimized_required |= dc->wm_optimized_required;
}
return type;
@@ -1973,6 +2003,10 @@ static void copy_surface_update_to_plane(
if (srf_update->coeff_reduction_factor)
surface->coeff_reduction_factor =
*srf_update->coeff_reduction_factor;
+
+ if (srf_update->gamut_remap_matrix)
+ surface->gamut_remap_matrix =
+ *srf_update->gamut_remap_matrix;
}
static void copy_stream_update_to_stream(struct dc *dc,
@@ -2431,7 +2465,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
enum surface_update_type update_type;
struct dc_state *context;
struct dc_context *dc_ctx = dc->ctx;
- int i;
+ int i, j;
stream_status = dc_stream_get_status(stream);
context = dc->current_state;
@@ -2469,6 +2503,17 @@ void dc_commit_updates_for_stream(struct dc *dc,
copy_surface_update_to_plane(surface, &srf_updates[i]);
+ if (update_type >= UPDATE_TYPE_MED) {
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx =
+ &context->res_ctx.pipe_ctx[j];
+
+ if (pipe_ctx->plane_state != surface)
+ continue;
+
+ resource_build_scaling_params(pipe_ctx);
+ }
+ }
}
copy_stream_update_to_stream(dc, context, stream, stream_update);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 75c7ce4c7581..f4bcc71b2920 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1077,6 +1077,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
* on certain displays, such as the Sharp 4k
*/
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
+ pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left;
pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index d3ceb39e428e..1935cf6601eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -726,6 +726,7 @@ union surface_update_flags {
uint32_t output_tf_change:1;
uint32_t pixel_format_change:1;
uint32_t plane_size_change:1;
+ uint32_t gamut_remap_change:1;
/* Full updates */
uint32_t new_plane:1;
@@ -760,6 +761,7 @@ struct dc_plane_state {
struct dc_csc_transform input_csc_color_matrix;
struct fixed31_32 coeff_reduction_factor;
struct fixed31_32 hdr_mult;
+ struct colorspace_transform gamut_remap_matrix;
// TODO: No longer used, remove
struct dc_hdr_static_metadata hdr_static_ctx;
@@ -839,6 +841,7 @@ struct dc_surface_update {
const struct dc_transfer_func *func_shaper;
const struct dc_3dlut *lut3d_func;
const struct dc_transfer_func *blend_tf;
+ const struct colorspace_transform *gamut_remap_matrix;
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 25c50bcab9e9..a8dc3082e3e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -385,6 +385,8 @@ struct dc_cursor_position {
*/
bool enable;
+ /* Translate cursor x/y by the source rectangle for each plane. */
+ bool translate_by_source;
};
struct dc_cursor_mi_param {
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 0976e378659f..c279982947e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -2685,6 +2685,23 @@ void dce110_set_cursor_position(struct pipe_ctx *pipe_ctx)
.mirror = pipe_ctx->plane_state->horizontal_mirror
};
+ /**
+ * If the cursor's source viewport is clipped then we need to
+ * translate the cursor to appear in the correct position on
+ * the screen.
+ *
+ * This translation isn't affected by scaling so it needs to be
+ * done *after* we adjust the position for the scale factor.
+ *
+ * This is only done by opt-in for now since there are still
+ * some usecases like tiled display that might enable the
+ * cursor on both streams while expecting dc to clip it.
+ */
+ if (pos_cpy.translate_by_source) {
+ pos_cpy.x += pipe_ctx->plane_state->src_rect.x;
+ pos_cpy.y += pipe_ctx->plane_state->src_rect.y;
+ }
+
if (pipe_ctx->plane_state->address.type
== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
pos_cpy.enable = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 9cc3314966bd..b0357546471b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2004,6 +2004,12 @@ void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
adjust.temperature_matrix[i] =
pipe_ctx->stream->gamut_remap_matrix.matrix[i];
+ } else if (pipe_ctx->plane_state &&
+ pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) {
+ adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
+ for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
+ adjust.temperature_matrix[i] =
+ pipe_ctx->plane_state->gamut_remap_matrix.matrix[i];
}
pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
@@ -3015,12 +3021,50 @@ void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
int x_pos = pos_cpy.x;
int y_pos = pos_cpy.y;
- // translate cursor from stream space to plane space
+ /**
+ * DC cursor is stream space, HW cursor is plane space and drawn
+ * as part of the framebuffer.
+ *
+ * Cursor position can't be negative, but hotspot can be used to
+ * shift cursor out of the plane bounds. Hotspot must be smaller
+ * than the cursor size.
+ */
+
+ /**
+ * Translate cursor from stream space to plane space.
+ *
+ * If the cursor is scaled then we need to scale the position
+ * to be in the approximately correct place. We can't do anything
+ * about the actual size being incorrect, that's a limitation of
+ * the hardware.
+ */
x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
pipe_ctx->plane_state->dst_rect.width;
y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
pipe_ctx->plane_state->dst_rect.height;
+ /**
+ * If the cursor's source viewport is clipped then we need to
+ * translate the cursor to appear in the correct position on
+ * the screen.
+ *
+ * This translation isn't affected by scaling so it needs to be
+ * done *after* we adjust the position for the scale factor.
+ *
+ * This is only done by opt-in for now since there are still
+ * some usecases like tiled display that might enable the
+ * cursor on both streams while expecting dc to clip it.
+ */
+ if (pos_cpy.translate_by_source) {
+ x_pos += pipe_ctx->plane_state->src_rect.x;
+ y_pos += pipe_ctx->plane_state->src_rect.y;
+ }
+
+ /**
+ * If the position is negative then we need to add to the hotspot
+ * to shift the cursor outside the plane.
+ */
+
if (x_pos < 0) {
pos_cpy.x_hotspot -= x_pos;
x_pos = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 63acb8ff7462..17d96ec6acd8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -343,6 +343,23 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab
}
/**
+ * optc1_set_timing_double_buffer() - DRR double buffering control
+ *
+ * Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN,
+ * VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers.
+ *
+ * Options: any time, start of frame, dp start of frame (range timing)
+ */
+void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t mode = enable ? 2 : 0;
+
+ REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mode);
+}
+
+/**
* unblank_crtc
* Call ASIC Control Object to UnBlank CRTC.
*/
@@ -1353,6 +1370,7 @@ void optc1_clear_optc_underflow(struct timing_generator *optc)
void optc1_tg_init(struct timing_generator *optc)
{
optc1_set_blank_data_double_buffer(optc, true);
+ optc1_set_timing_double_buffer(optc, true);
optc1_clear_optc_underflow(optc);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index f277656d5464..9a459a8fe8a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -185,6 +185,7 @@ struct dcn_optc_registers {
SF(OTG0_OTG_GLOBAL_CONTROL0, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\
SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\
SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\
SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\
SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\
@@ -643,6 +644,8 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc);
void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable);
+void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable);
+
bool optc1_get_otg_active_size(struct timing_generator *optc,
uint32_t *otg_active_width,
uint32_t *otg_active_height);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 261bdc3a8218..07265ca7d28c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -552,7 +552,8 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
- .fp16 = true
+ .fp16 = true,
+ .p010 = true
},
.max_upscale_factor = {
@@ -584,7 +585,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_pplib_clock_request = false,
.disable_pplib_wm_range = false,
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
- .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
+ .pipe_split_policy = MPC_SPLIT_DYNAMIC,
.force_single_disp_pipe_split = true,
.disable_dcc = DCC_ENABLE,
.voltage_align_fclk = true,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index 233318260da4..22f421e82733 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1373,6 +1373,7 @@ static void dcn20_update_dchubp_dpp(
}
if (pipe_ctx->update_flags.bits.viewport ||
+ (context == dc->current_state && plane_state->update_flags.bits.position_change) ||
(context == dc->current_state && plane_state->update_flags.bits.scaling_change) ||
(context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index a67395208991..5cdbba0cd873 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1012,7 +1012,8 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
- .fp16 = true
+ .fp16 = true,
+ .p010 = true
},
.max_upscale_factor = {
@@ -3342,7 +3343,7 @@ void dcn20_cap_soc_clocks(
void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb,
struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states)
{
- struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES];
+ struct _vcs_dpi_voltage_scaling_st calculated_states[DC__VOLTAGE_STATES];
int i;
int num_calculated_states = 0;
int min_dcfclk = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 51b5910cd05f..b25484aa8222 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -300,7 +300,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.xfc_bus_transport_time_us = 4,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 1,
- .num_states = 9
+ .num_states = 8
};
#ifndef MAX
@@ -838,7 +838,8 @@ static const struct dc_plane_cap plane_cap = {
.pixel_format_support = {
.argb8888 = true,
.nv12 = true,
- .fp16 = true
+ .fp16 = true,
+ .p010 = true
},
.max_upscale_factor = {
@@ -1376,21 +1377,8 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
unsigned int i, j, k;
int closest_clk_lvl;
- // diags does not retrieve proper values from SMU
- // cap states to 5 and make state 5 the max state
- if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) || IS_DIAG_DC(dc->ctx->dce_environment)) {
- dcn2_1_soc.num_states = 5;
-
- dcn2_1_soc.clock_limits[5].state = 5;
- dcn2_1_soc.clock_limits[5].dcfclk_mhz = 810.0;
- dcn2_1_soc.clock_limits[5].fabricclk_mhz = 1600.0;
- dcn2_1_soc.clock_limits[5].dispclk_mhz = 1395.0;
- dcn2_1_soc.clock_limits[5].dppclk_mhz = 1285.0;
- dcn2_1_soc.clock_limits[5].phyclk_mhz = 1325.0;
- dcn2_1_soc.clock_limits[5].socclk_mhz = 953.0;
- dcn2_1_soc.clock_limits[5].dscclk_mhz = 489.0;
- dcn2_1_soc.clock_limits[5].dram_speed_mts = 4266.0;
- } else {
+ // Default clock levels are used for diags, which may lead to overclocking.
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) {
dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
dcn2_1_soc.num_chans = bw_params->num_channels;
@@ -1403,16 +1391,16 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
/*
- * Other levels: find cloest DCN clocks that fit the given clock limit using dcfclk
- * as indicater
+ * Other levels: find closest DCN clocks that fit the given clock limit using dcfclk
+ * as indicator
*/
closest_clk_lvl = -1;
/* index currently being filled */
k = 1;
for (i = 1; i < clk_table->num_entries; i++) {
- /* loop backwards, skip duplicate state, +1 because SMU has precision issue */
- for (j = dcn2_1_soc.num_states - 2; j >= k; j--) {
+ /* loop backwards, skip duplicate state*/
+ for (j = dcn2_1_soc.num_states - 1; j >= k; j--) {
if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
closest_clk_lvl = j;
break;
@@ -1437,13 +1425,13 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
k++;
}
}
-
- /* duplicate last level */
- dcn2_1_soc.clock_limits[k] = dcn2_1_soc.clock_limits[k - 1];
- dcn2_1_soc.clock_limits[k].state = k;
- dcn2_1_soc.num_states = k + 1;
+ dcn2_1_soc.num_states = k;
}
+ /* duplicate last level */
+ dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1];
+ dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states;
+
dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
index ea4cde952f4f..2a1983324629 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h
@@ -29,7 +29,7 @@
#define DC__PRESENT 1
#define DC__PRESENT__1 1
#define DC__NUM_DPP 4
-#define DC__VOLTAGE_STATES 7
+#define DC__VOLTAGE_STATES 9
#define DC__NUM_DPP__4 1
#define DC__NUM_DPP__0_PRESENT 1
#define DC__NUM_DPP__1_PRESENT 1
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index dfd3be452766..687010c17324 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -22,11 +22,12 @@
* Authors: AMD
*
*/
+
+#include "dc_features.h"
+
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
-#define MAX_CLOCK_LIMIT_STATES 9
-
typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
typedef struct _vcs_dpi_ip_params_st ip_params_st;
@@ -68,7 +69,7 @@ struct _vcs_dpi_voltage_scaling_st {
};
struct _vcs_dpi_soc_bounding_box_st {
- struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
unsigned int num_states;
double sr_exit_time_us;
double sr_enter_plus_exit_time_us;
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 8a87d0ed90ae..2359e88d6029 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -136,6 +136,7 @@
#define RAVEN2_A0 0x81
#define RAVEN1_F0 0xF0
#define RAVEN_UNKNOWN 0xFF
+#define RENOIR_A0 0x91
#ifndef ASICREV_IS_RAVEN
#define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
#endif
@@ -171,8 +172,6 @@ enum {
#define ASICREV_IS_NAVI10_P(eChipRev) (eChipRev < NV_NAVI12_P_A0)
#define ASICREV_IS_NAVI12_P(eChipRev) ((eChipRev >= NV_NAVI12_P_A0) && (eChipRev < NV_NAVI14_M_A0))
#define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
-#define RENOIR_A0 0x91
-#define DEVICE_ID_RENOIR_1636 0x1636 // Renoir
#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < RAVEN1_F0))
/*
@@ -183,6 +182,9 @@ enum {
#define DEVICE_ID_TEMASH_9839 0x9839
#define DEVICE_ID_TEMASH_983D 0x983D
+/* RENOIR */
+#define DEVICE_ID_RENOIR_1636 0x1636
+
/* Asic Family IDs for different asic family. */
#define FAMILY_CI 120 /* Sea Islands: Hawaii (P), Bonaire (M) */
#define FAMILY_KV 125 /* Fusion => Kaveri: Spectre, Spooky; Kabini: Kalindi */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index 4e542826cd26..c33454a9e0b4 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -734,6 +734,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
{
struct core_freesync *core_freesync = NULL;
unsigned long long nominal_field_rate_in_uhz = 0;
+ unsigned long long rounded_nominal_in_uhz = 0;
unsigned int refresh_range = 0;
unsigned long long min_refresh_in_uhz = 0;
unsigned long long max_refresh_in_uhz = 0;
@@ -750,17 +751,20 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
min_refresh_in_uhz = in_config->min_refresh_in_uhz;
max_refresh_in_uhz = in_config->max_refresh_in_uhz;
- // Don't allow min > max
- if (min_refresh_in_uhz > max_refresh_in_uhz)
- min_refresh_in_uhz = max_refresh_in_uhz;
-
// Full range may be larger than current video timing, so cap at nominal
if (max_refresh_in_uhz > nominal_field_rate_in_uhz)
max_refresh_in_uhz = nominal_field_rate_in_uhz;
// Full range may be larger than current video timing, so cap at nominal
- if (min_refresh_in_uhz > nominal_field_rate_in_uhz)
- min_refresh_in_uhz = nominal_field_rate_in_uhz;
+ if (min_refresh_in_uhz > max_refresh_in_uhz)
+ min_refresh_in_uhz = max_refresh_in_uhz;
+
+ // If a monitor reports exactly max refresh of 2x of min, enforce it on nominal
+ rounded_nominal_in_uhz =
+ div_u64(nominal_field_rate_in_uhz + 50000, 100000) * 100000;
+ if (in_config->max_refresh_in_uhz == (2 * in_config->min_refresh_in_uhz) &&
+ in_config->max_refresh_in_uhz == rounded_nominal_in_uhz)
+ min_refresh_in_uhz = div_u64(nominal_field_rate_in_uhz, 2);
if (!vrr_settings_require_update(core_freesync,
in_config, (unsigned int)min_refresh_in_uhz, (unsigned int)max_refresh_in_uhz,
@@ -792,11 +796,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
refresh_range = in_out_vrr->max_refresh_in_uhz -
in_out_vrr->min_refresh_in_uhz;
- in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
- 2 * in_out_vrr->min_duration_in_us;
- if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
- in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
-
in_out_vrr->supported = true;
}
@@ -804,9 +803,14 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
in_out_vrr->btr.btr_enabled = in_config->btr;
- if (in_out_vrr->max_refresh_in_uhz <
- 2 * in_out_vrr->min_refresh_in_uhz)
+ if (in_out_vrr->max_refresh_in_uhz < (2 * in_out_vrr->min_refresh_in_uhz))
in_out_vrr->btr.btr_enabled = false;
+ else {
+ in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us -
+ 2 * in_out_vrr->min_duration_in_us;
+ if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN)
+ in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN;
+ }
in_out_vrr->btr.btr_active = false;
in_out_vrr->btr.inserted_duration_in_us = 0;
@@ -1008,8 +1012,8 @@ unsigned long long mod_freesync_calc_nominal_field_rate(
unsigned int total = stream->timing.h_total * stream->timing.v_total;
/* Calculate nominal field rate for stream, rounded up to nearest integer */
- nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10;
- nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL;
+ nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz;
+ nominal_field_rate_in_uhz *= 100000000ULL;
nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index e9fbd94f8635..cc1d3f470b99 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -328,8 +328,7 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
/* add display to connection */
hdcp->connection.link = *link;
*display_container = *display;
- status = mod_hdcp_add_display_to_topology(hdcp, display_container);
-
+ status = mod_hdcp_add_display_to_topology(hdcp, display->index);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
@@ -375,7 +374,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
status = mod_hdcp_remove_display_from_topology(hdcp, index);
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- memset(display, 0, sizeof(struct mod_hdcp_display));
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
/* request authentication when connection is not reset */
if (current_state(hdcp) != HDCP_UNINITIALIZED)
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index 60ff1a0028ac..5cb4546be0ef 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -328,7 +328,7 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
/* psp functions */
enum mod_hdcp_status mod_hdcp_add_display_to_topology(
- struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
+ struct mod_hdcp *hdcp, uint8_t index);
enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
struct mod_hdcp *hdcp, uint8_t index);
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
@@ -503,6 +503,11 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display)
return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
}
+static inline uint8_t is_display_added(struct mod_hdcp_display *display)
+{
+ return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
+}
+
static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
{
return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
@@ -510,23 +515,34 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis
static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
{
- uint8_t active_count = 0;
+ uint8_t added_count = 0;
uint8_t i;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
if (is_display_active(&hdcp->displays[i]))
- active_count++;
- return active_count;
+ added_count++;
+ return added_count;
+}
+
+static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
+{
+ uint8_t added_count = 0;
+ uint8_t i;
+
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_added(&hdcp->displays[i]))
+ added_count++;
+ return added_count;
}
-static inline struct mod_hdcp_display *get_first_active_display(
+static inline struct mod_hdcp_display *get_first_added_display(
struct mod_hdcp *hdcp)
{
uint8_t i;
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_active(&hdcp->displays[i])) {
+ if (is_display_added(&hdcp->displays[i])) {
display = &hdcp->displays[i];
break;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index f244b72e74e0..37c8c05497d6 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 549c113abcf7..491c00f48026 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 836e47954938..c2929815c3ee 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -54,7 +54,7 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
- if (!display || !is_display_active(display))
+ if (!display || !is_display_added(display))
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
@@ -73,21 +73,25 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
return MOD_HDCP_STATUS_SUCCESS;
- }
-
-enum mod_hdcp_status mod_hdcp_add_display_to_topology(
- struct mod_hdcp *hdcp, struct mod_hdcp_display *display)
+
+}
+enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp,
+ uint8_t index)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display =
+ get_active_display_at_index(hdcp, index);
struct mod_hdcp_link *link = &hdcp->connection.link;
if (!psp->dtm_context.dtm_initialized) {
DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
- display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
+ if (!display || is_display_added(display))
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
@@ -109,11 +113,10 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology(
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
- display->state = MOD_HDCP_DISPLAY_INACTIVE;
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
- }
+ display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
return MOD_HDCP_STATUS_SUCCESS;
@@ -123,7 +126,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
- struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
struct ta_hdcp_shared_memory *hdcp_cmd;
if (!psp->hdcp_context.hdcp_initialized) {
@@ -176,7 +179,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
if (is_display_encryption_enabled(
&hdcp->displays[i])) {
hdcp->displays[i].state =
- MOD_HDCP_DISPLAY_ACTIVE;
+ MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
HDCP_HDCP1_DISABLED_TRACE(hdcp,
hdcp->displays[i].index);
}
@@ -228,7 +231,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -298,7 +301,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->displays[i].adjust.disable)
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+ hdcp->displays[i].adjust.disable)
continue;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -360,7 +364,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
if (!psp->hdcp_context.hdcp_initialized) {
DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
@@ -419,7 +423,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
if (is_display_encryption_enabled(
&hdcp->displays[i])) {
hdcp->displays[i].state =
- MOD_HDCP_DISPLAY_ACTIVE;
+ MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
HDCP_HDCP2_DISABLED_TRACE(hdcp,
hdcp->displays[i].index);
}
@@ -658,7 +662,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_active_display(hdcp);
+ struct mod_hdcp_display *display = get_first_added_display(hdcp);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -743,7 +747,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->displays[i].adjust.disable)
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
+ hdcp->displays[i].adjust.disable)
continue;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index eae9309cfb24..c088602bc1a0 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -117,6 +117,7 @@ enum mod_hdcp_operation_mode {
enum mod_hdcp_display_state {
MOD_HDCP_DISPLAY_INACTIVE = 0,
MOD_HDCP_DISPLAY_ACTIVE,
+ MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
};
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index c195575366a3..2a12614a12c2 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -1452,7 +1452,8 @@ static int pp_get_asic_baco_state(void *handle, int *state)
if (!hwmgr)
return -EINVAL;
- if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
+ if (!(hwmgr->not_vf && amdgpu_dpm) ||
+ !hwmgr->hwmgr_func->get_asic_baco_state)
return 0;
mutex_lock(&hwmgr->smu_lock);
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index c6d3bef15320..1ef0923f7190 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -35,6 +35,7 @@
#include "arcturus_ppt.h"
#include "smu_v11_0_pptable.h"
#include "arcturus_ppsmc.h"
+#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "amdgpu_xgmi.h"
#include <linux/i2c.h>
@@ -793,8 +794,21 @@ static int arcturus_force_clk_levels(struct smu_context *smu,
struct arcturus_dpm_table *dpm_table;
struct arcturus_single_dpm_table *single_dpm_table;
uint32_t soft_min_level, soft_max_level;
+ uint32_t smu_version;
int ret = 0;
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ if (smu_version >= 0x361200) {
+ pr_err("Forcing clock level is not supported with "
+ "54.18 and onwards SMU firmwares\n");
+ return -EOPNOTSUPP;
+ }
+
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
@@ -1511,6 +1525,38 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
return 0;
}
+static int arcturus_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ if (smu_version >= 0x361200) {
+ pr_err("Forcing clock level is not supported with "
+ "54.18 and onwards SMU firmwares\n");
+ return -EOPNOTSUPP;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return smu_v11_0_set_performance_level(smu, level);
+}
+
static void arcturus_dump_pptable(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -2210,6 +2256,18 @@ static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control)
i2c_del_adapter(control);
}
+static bool arcturus_is_baco_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val;
+
+ if (!smu_v11_0_baco_is_support(smu))
+ return false;
+
+ val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
+ return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
+}
+
static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
{
PPTable_t *pptable = smu->smu_table.driver_pptable;
@@ -2272,7 +2330,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.get_profiling_clk_mask = arcturus_get_profiling_clk_mask,
.get_power_profile_mode = arcturus_get_power_profile_mode,
.set_power_profile_mode = arcturus_set_power_profile_mode,
- .set_performance_level = smu_v11_0_set_performance_level,
+ .set_performance_level = arcturus_set_performance_level,
/* debug (internal used) */
.dump_pptable = arcturus_dump_pptable,
.get_power_limit = arcturus_get_power_limit,
@@ -2321,7 +2379,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_is_support= arcturus_is_baco_supported,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = smu_v11_0_baco_enter,
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 9c60b38ab53a..15030284b444 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -28,13 +28,15 @@
#include "smu_internal.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
+#include "soc15_common.h"
#include "smu_v11_0.h"
#include "smu11_driver_if_navi10.h"
#include "atom.h"
#include "navi10_ppt.h"
#include "smu_v11_0_pptable.h"
#include "smu_v11_0_ppsmc.h"
-#include "nbio/nbio_7_4_sh_mask.h"
+#include "nbio/nbio_2_3_offset.h"
+#include "nbio/nbio_2_3_sh_mask.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
@@ -1985,6 +1987,18 @@ static int navi10_setup_od_limits(struct smu_context *smu) {
return 0;
}
+static bool navi10_is_baco_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val;
+
+ if (!smu_v11_0_baco_is_support(smu))
+ return false;
+
+ val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
+ return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
+}
+
static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
OverDriveTable_t *od_table, *boot_od_table;
int ret = 0;
@@ -2361,7 +2375,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_is_support= navi10_is_baco_supported,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = smu_v11_0_baco_enter,
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 7bf52ecba01d..ff73a735b888 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -239,6 +239,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
SmuMetrics_t metrics;
+ bool cur_value_match_level = false;
if (!clk_table || clk_type >= SMU_CLK_COUNT)
return -EINVAL;
@@ -297,8 +298,13 @@ static int renoir_print_clk_levels(struct smu_context *smu,
GET_DPM_CUR_FREQ(clk_table, clk_type, i, value);
size += sprintf(buf + size, "%d: %uMhz %s\n", i, value,
cur_value == value ? "*" : "");
+ if (cur_value == value)
+ cur_value_match_level = true;
}
+ if (!cur_value_match_level)
+ size += sprintf(buf + size, " %uMhz *\n", cur_value);
+
return size;
}
@@ -887,6 +893,17 @@ static int renoir_read_sensor(struct smu_context *smu,
return ret;
}
+static bool renoir_is_dpm_running(struct smu_context *smu)
+{
+ /*
+ * Util now, the pmfw hasn't exported the interface of SMU
+ * feature mask to APU SKU so just force on all the feature
+ * at early initial stage.
+ */
+ return true;
+
+}
+
static const struct pptable_funcs renoir_ppt_funcs = {
.get_smu_msg_index = renoir_get_smu_msg_index,
.get_smu_clk_index = renoir_get_smu_clk_index,
@@ -927,6 +944,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.mode2_reset = smu_v12_0_mode2_reset,
.set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range,
.set_driver_table_location = smu_v12_0_set_driver_table_location,
+ .is_dpm_running = renoir_is_dpm_running,
};
void renoir_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
index 2a390ddd37dd..89cd6da118a3 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.h
@@ -37,7 +37,7 @@ extern void renoir_set_ppt_funcs(struct smu_context *smu);
freq = table->SocClocks[dpm_level].Freq; \
break; \
case SMU_MCLK: \
- freq = table->MemClocks[dpm_level].Freq; \
+ freq = table->FClocks[dpm_level].Freq; \
break; \
case SMU_DCEFCLK: \
freq = table->DcfClocks[dpm_level].Freq; \
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index d19e1d0d56c0..541c932a6005 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -42,8 +42,6 @@
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
#include "asic_reg/mp/mp_11_0_offset.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
-#include "asic_reg/nbio/nbio_7_4_offset.h"
-#include "asic_reg/nbio/nbio_7_4_sh_mask.h"
#include "asic_reg/smuio/smuio_11_0_0_offset.h"
#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
@@ -1662,9 +1660,7 @@ static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v
bool smu_v11_0_baco_is_support(struct smu_context *smu)
{
- struct amdgpu_device *adev = smu->adev;
struct smu_baco_context *smu_baco = &smu->smu_baco;
- uint32_t val;
bool baco_support;
mutex_lock(&smu_baco->mutex);
@@ -1679,11 +1675,7 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
!smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
return false;
- val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
- if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK)
- return true;
-
- return false;
+ return true;
}
enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
@@ -1700,11 +1692,9 @@ enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
{
-
struct smu_baco_context *smu_baco = &smu->smu_baco;
struct amdgpu_device *adev = smu->adev;
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
- uint32_t bif_doorbell_intr_cntl;
uint32_t data;
int ret = 0;
@@ -1713,14 +1703,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
mutex_lock(&smu_baco->mutex);
- bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
-
if (state == SMU_BACO_STATE_ENTER) {
- bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
- BIF_DOORBELL_INT_CNTL,
- DOORBELL_INTERRUPT_DISABLE, 1);
- WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
-
if (!ras || !ras->supported) {
data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
data |= 0x80000000;
@@ -1735,11 +1718,6 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
if (ret)
goto out;
- bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl,
- BIF_DOORBELL_INT_CNTL,
- DOORBELL_INTERRUPT_DISABLE, 0);
- WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl);
-
/* clear vbios scratch 6 and 7 for coming asic reinit */
WREG32(adev->bios_scratch_reg_offset + 6, 0);
WREG32(adev->bios_scratch_reg_offset + 7, 0);
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 49ff3756bd9f..3f1044326dcb 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -35,6 +35,7 @@
#include "vega20_ppt.h"
#include "vega20_pptable.h"
#include "vega20_ppsmc.h"
+#include "nbio/nbio_7_4_offset.h"
#include "nbio/nbio_7_4_sh_mask.h"
#include "asic_reg/thm/thm_11_0_2_offset.h"
#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
@@ -3174,6 +3175,17 @@ static int vega20_update_pcie_parameters(struct smu_context *smu,
return ret;
}
+static bool vega20_is_baco_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t val;
+
+ if (!smu_v11_0_baco_is_support(smu))
+ return false;
+
+ val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0);
+ return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false;
+}
static const struct pptable_funcs vega20_ppt_funcs = {
.tables_init = vega20_tables_init,
@@ -3262,7 +3274,7 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.register_irq_handler = smu_v11_0_register_irq_handler,
.set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme,
.get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc,
- .baco_is_support= smu_v11_0_baco_is_support,
+ .baco_is_support= vega20_is_baco_supported,
.baco_get_state = smu_v11_0_baco_get_state,
.baco_set_state = smu_v11_0_baco_set_state,
.baco_enter = smu_v11_0_baco_enter,
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 9ded2cef57dd..76736fb8ed94 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1652,8 +1652,7 @@ static ssize_t analogix_dpaux_transfer(struct drm_dp_aux *aux,
}
struct analogix_dp_device *
-analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
- struct analogix_dp_plat_data *plat_data)
+analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data)
{
struct platform_device *pdev = to_platform_device(dev);
struct analogix_dp_device *dp;
@@ -1756,22 +1755,30 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
irq_flags, "analogix-dp", dp);
if (ret) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto err_disable_pm_runtime;
+ return ERR_PTR(ret);
}
disable_irq(dp->irq);
+ return dp;
+}
+EXPORT_SYMBOL_GPL(analogix_dp_probe);
+
+int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
+{
+ int ret;
+
dp->drm_dev = drm_dev;
dp->encoder = dp->plat_data->encoder;
dp->aux.name = "DP-AUX";
dp->aux.transfer = analogix_dpaux_transfer;
- dp->aux.dev = &pdev->dev;
+ dp->aux.dev = dp->dev;
ret = drm_dp_aux_register(&dp->aux);
if (ret)
- return ERR_PTR(ret);
+ return ret;
- pm_runtime_enable(dev);
+ pm_runtime_enable(dp->dev);
ret = analogix_dp_create_bridge(drm_dev, dp);
if (ret) {
@@ -1779,13 +1786,12 @@ analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
goto err_disable_pm_runtime;
}
- return dp;
+ return 0;
err_disable_pm_runtime:
+ pm_runtime_disable(dp->dev);
- pm_runtime_disable(dev);
-
- return ERR_PTR(ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(analogix_dp_bind);
@@ -1802,10 +1808,15 @@ void analogix_dp_unbind(struct analogix_dp_device *dp)
drm_dp_aux_unregister(&dp->aux);
pm_runtime_disable(dp->dev);
- clk_disable_unprepare(dp->clock);
}
EXPORT_SYMBOL_GPL(analogix_dp_unbind);
+void analogix_dp_remove(struct analogix_dp_device *dp)
+{
+ clk_disable_unprepare(dp->clock);
+}
+EXPORT_SYMBOL_GPL(analogix_dp_remove);
+
#ifdef CONFIG_PM
int analogix_dp_suspend(struct analogix_dp_device *dp)
{
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index bc6e208949e8..8981abe8b7c9 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -45,7 +45,6 @@
#include <linux/export.h>
#include <linux/interval_tree_generic.h>
#include <linux/seq_file.h>
-#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
@@ -367,11 +366,6 @@ next_hole(struct drm_mm *mm,
struct drm_mm_node *node,
enum drm_mm_insert_mode mode)
{
- /* Searching is slow; check if we ran out of time/patience */
- cond_resched();
- if (fatal_signal_pending(current))
- return NULL;
-
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
@@ -563,7 +557,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
return 0;
}
- return signal_pending(current) ? -ERESTARTSYS : -ENOSPC;
+ return -ENOSPC;
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 1de2cde2277c..282774e469ac 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -962,27 +962,40 @@ int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
unsigned count;
struct scatterlist *sg;
struct page *page;
- u32 len, index;
+ u32 page_len, page_index;
dma_addr_t addr;
+ u32 dma_len, dma_index;
- index = 0;
+ /*
+ * Scatterlist elements contains both pages and DMA addresses, but
+ * one shoud not assume 1:1 relation between them. The sg->length is
+ * the size of the physical memory chunk described by the sg->page,
+ * while sg_dma_len(sg) is the size of the DMA (IO virtual) chunk
+ * described by the sg_dma_address(sg).
+ */
+ page_index = 0;
+ dma_index = 0;
for_each_sg(sgt->sgl, sg, sgt->nents, count) {
- len = sg_dma_len(sg);
+ page_len = sg->length;
page = sg_page(sg);
+ dma_len = sg_dma_len(sg);
addr = sg_dma_address(sg);
- while (len > 0) {
- if (WARN_ON(index >= max_entries))
+ while (pages && page_len > 0) {
+ if (WARN_ON(page_index >= max_entries))
return -1;
- if (pages)
- pages[index] = page;
- if (addrs)
- addrs[index] = addr;
-
+ pages[page_index] = page;
page++;
+ page_len -= PAGE_SIZE;
+ page_index++;
+ }
+ while (addrs && dma_len > 0) {
+ if (WARN_ON(dma_index >= max_entries))
+ return -1;
+ addrs[dma_index] = addr;
addr += PAGE_SIZE;
- len -= PAGE_SIZE;
- index++;
+ dma_len -= PAGE_SIZE;
+ dma_index++;
}
}
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index d23d3502ca91..5ee090691390 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -159,15 +159,8 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
int ret;
- dp->dev = dev;
dp->drm_dev = drm_dev;
- dp->plat_data.dev_type = EXYNOS_DP;
- dp->plat_data.power_on_start = exynos_dp_poweron;
- dp->plat_data.power_off = exynos_dp_poweroff;
- dp->plat_data.attach = exynos_dp_bridge_attach;
- dp->plat_data.get_modes = exynos_dp_get_modes;
-
if (!dp->plat_data.panel && !dp->ptn_bridge) {
ret = exynos_dp_dt_parse_panel(dp);
if (ret)
@@ -185,13 +178,11 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
dp->plat_data.encoder = encoder;
- dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
- if (IS_ERR(dp->adp)) {
+ ret = analogix_dp_bind(dp->adp, dp->drm_dev);
+ if (ret)
dp->encoder.funcs->destroy(&dp->encoder);
- return PTR_ERR(dp->adp);
- }
- return 0;
+ return ret;
}
static void exynos_dp_unbind(struct device *dev, struct device *master,
@@ -222,6 +213,7 @@ static int exynos_dp_probe(struct platform_device *pdev)
if (!dp)
return -ENOMEM;
+ dp->dev = dev;
/*
* We just use the drvdata until driver run into component
* add function, and then we would set drvdata to null, so
@@ -247,16 +239,29 @@ static int exynos_dp_probe(struct platform_device *pdev)
/* The remote port can be either a panel or a bridge */
dp->plat_data.panel = panel;
+ dp->plat_data.dev_type = EXYNOS_DP;
+ dp->plat_data.power_on_start = exynos_dp_poweron;
+ dp->plat_data.power_off = exynos_dp_poweroff;
+ dp->plat_data.attach = exynos_dp_bridge_attach;
+ dp->plat_data.get_modes = exynos_dp_get_modes;
dp->plat_data.skip_connector = !!bridge;
+
dp->ptn_bridge = bridge;
out:
+ dp->adp = analogix_dp_probe(dev, &dp->plat_data);
+ if (IS_ERR(dp->adp))
+ return PTR_ERR(dp->adp);
+
return component_add(&pdev->dev, &exynos_dp_ops);
}
static int exynos_dp_remove(struct platform_device *pdev)
{
+ struct exynos_dp_device *dp = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &exynos_dp_ops);
+ analogix_dp_remove(dp->adp);
return 0;
}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 9f887a86e555..6cd1f6253814 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -28,9 +28,6 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
-subdir-ccflags-y += \
- $(call as-instr,movntdqa (%eax)$(comma)%xmm0,-DCONFIG_AS_MOVNTDQA)
-
subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 73d0f4648c06..2c617c98db3a 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -947,7 +947,8 @@ static const struct cnl_ddi_buf_trans *
ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
int *n_entries)
{
- if (type == INTEL_OUTPUT_DP && rate > 270000) {
+ if (type != INTEL_OUTPUT_HDMI && type != INTEL_OUTPUT_EDP &&
+ rate > 270000) {
*n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3);
return ehl_combo_phy_ddi_translations_hbr2_hbr3;
}
@@ -959,7 +960,7 @@ static const struct cnl_ddi_buf_trans *
tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
int *n_entries)
{
- if (type != INTEL_OUTPUT_DP) {
+ if (type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_EDP) {
return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
} else if (rate > 270000) {
*n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
@@ -1869,7 +1870,11 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
return;
dig_port = enc_to_dig_port(encoder);
- intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain);
+
+ if (!intel_phy_is_tc(dev_priv, phy) ||
+ dig_port->tc_mode != TC_PORT_TBT_ALT)
+ intel_display_power_get(dev_priv,
+ dig_port->ddi_io_power_domain);
/*
* AUX power is only needed for (e)DP mode, and for HDMI mode on TC
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 36d069504836..b7440f06c5e2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -896,11 +896,13 @@ static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
static void reloc_gpu_flush(struct reloc_cache *cache)
{
- GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
+ struct drm_i915_gem_object *obj = cache->rq->batch->obj;
+
+ GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
- __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
- i915_gem_object_unpin_map(cache->rq->batch->obj);
+ __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
+ i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(cache->rq->engine->gt);
@@ -1477,10 +1479,8 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
* can read from this userspace address.
*/
offset = gen8_canonical_addr(offset & ~UPDATE);
- if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) {
- remain = -EFAULT;
- goto out;
- }
+ __put_user(offset,
+ &urelocs[r - stack].presumed_offset);
}
} while (r++, --count);
urelocs += ARRAY_SIZE(stack);
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index aed498a0d032..4c5a209cb669 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -191,10 +191,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
enum i915_cache_level level,
u32 flags)
{
- struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- struct sgt_iter sgt_iter;
- gen8_pte_t __iomem *gtt_entries;
const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
+ gen8_pte_t __iomem *gte;
+ gen8_pte_t __iomem *end;
+ struct sgt_iter iter;
dma_addr_t addr;
/*
@@ -202,10 +203,17 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
* not to allow the user to override access to a read only page.
*/
- gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
- gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, sgt_iter, vma->pages)
- gen8_set_pte(gtt_entries++, pte_encode | addr);
+ gte = (gen8_pte_t __iomem *)ggtt->gsm;
+ gte += vma->node.start / I915_GTT_PAGE_SIZE;
+ end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
+
+ for_each_sgt_daddr(addr, iter, vma->pages)
+ gen8_set_pte(gte++, pte_encode | addr);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ gen8_set_pte(gte++, vm->scratch[0].encode);
/*
* We want to flush the TLBs only after we're certain all the PTE
@@ -241,13 +249,22 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
u32 flags)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
- gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
- unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
+ gen6_pte_t __iomem *gte;
+ gen6_pte_t __iomem *end;
struct sgt_iter iter;
dma_addr_t addr;
+ gte = (gen6_pte_t __iomem *)ggtt->gsm;
+ gte += vma->node.start / I915_GTT_PAGE_SIZE;
+ end = gte + vma->node.size / I915_GTT_PAGE_SIZE;
+
for_each_sgt_daddr(addr, iter, vma->pages)
- iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
+ iowrite32(vm->pte_encode(addr, level, flags), gte++);
+ GEM_BUG_ON(gte > end);
+
+ /* Fill the allocated but "unused" space beyond the end of the buffer */
+ while (gte < end)
+ iowrite32(vm->scratch[0].encode, gte++);
/*
* We want to flush the TLBs only after we're certain all the PTE
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 9e065ad0658f..a3cc080a46c6 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -164,6 +164,7 @@ struct decode_info {
#define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
#define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
#define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
+#define OP_SWTESS_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x03)
#define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
@@ -967,18 +968,6 @@ static int cmd_handler_lri(struct parser_exec_state *s)
{
int i, ret = 0;
int cmd_len = cmd_length(s);
- u32 valid_len = CMD_LEN(1);
-
- /*
- * Official intel docs are somewhat sloppy , check the definition of
- * MI_LOAD_REGISTER_IMM.
- */
- #define MAX_VALID_LEN 127
- if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) {
- gvt_err("len is not valid: len=%u valid_len=%u\n",
- cmd_len, valid_len);
- return -EFAULT;
- }
for (i = 1; i < cmd_len; i += 2) {
if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) {
@@ -2485,6 +2474,9 @@ static const struct cmd_info cmd_info[] = {
{"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
ADDR_FIX_1(1), 8, NULL},
+ {"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS,
+ F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_2(1, 2), 3, NULL},
+
{"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
{"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 6e5c9885d9fe..a83df2f84eb9 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -221,7 +221,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -241,7 +241,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -261,7 +261,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 0182e2a5acff..2faf50e1b051 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -462,11 +462,14 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
-/* ascendingly sorted */
+/* sorted in ascending order */
static i915_reg_t force_nonpriv_white_list[] = {
+ _MMIO(0xd80),
GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec)
GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248)
- PS_INVOCATION_COUNT,//_MMIO(0x2348)
+ CL_PRIMITIVES_COUNT, //_MMIO(0x2340)
+ PS_INVOCATION_COUNT, //_MMIO(0x2348)
+ PS_DEPTH_COUNT, //_MMIO(0x2350)
GEN8_CS_CHICKEN1,//_MMIO(0x2580)
_MMIO(0x2690),
_MMIO(0x2694),
@@ -491,6 +494,7 @@ static i915_reg_t force_nonpriv_white_list[] = {
_MMIO(0xe18c),
_MMIO(0xe48c),
_MMIO(0xe5f4),
+ _MMIO(0x64844),
};
/* a simple bsearch */
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1c95bf8cbed0..cb11c3184085 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -296,8 +296,8 @@ shadow_context_descriptor_update(struct intel_context *ce,
* Update bits 0-11 of the context descriptor which includes flags
* like GEN8_CTX_* cached in desc_template
*/
- desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
- desc |= workload->ctx_desc.addressing_mode <<
+ desc &= ~(0x3ull << GEN8_CTX_ADDRESSING_MODE_SHIFT);
+ desc |= (u64)workload->ctx_desc.addressing_mode <<
GEN8_CTX_ADDRESSING_MODE_SHIFT;
ce->lrc_desc = desc;
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index fdd550405fd3..7b3b83bd5ab8 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -35,7 +35,6 @@
static DEFINE_STATIC_KEY_FALSE(has_movntdqa);
-#ifdef CONFIG_AS_MOVNTDQA
static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len)
{
kernel_fpu_begin();
@@ -93,10 +92,6 @@ static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len)
kernel_fpu_end();
}
-#else
-static void __memcpy_ntdqa(void *dst, const void *src, unsigned long len) {}
-static void __memcpy_ntdqu(void *dst, const void *src, unsigned long len) {}
-#endif
/**
* i915_memcpy_from_wc: perform an accelerated *aligned* read from WC
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 6650f478b226..47b989834af1 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -633,7 +633,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
if (config->platform.iommu) {
iommu_dev = &pdev->dev;
- if (!iommu_dev->iommu_fwspec)
+ if (!dev_iommu_fwspec_get(iommu_dev))
iommu_dev = iommu_dev->parent;
aspace = msm_gem_address_space_create(iommu_dev,
diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c
index e8eef88a8382..ffdd447d8706 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/dac.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c
@@ -35,7 +35,8 @@
#include <subdev/bios/gpio.h>
#include <subdev/gpio.h>
-#include <subdev/timer.h>
+
+#include <nvif/timer.h>
int nv04_dac_output_offset(struct drm_encoder *encoder)
{
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 3fdfafa8b0ad..b674d68ef28a 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -26,6 +26,7 @@
#include "hw.h"
#include <subdev/bios/pll.h>
+#include <nvif/timer.h>
#define CHIPSET_NFORCE 0x01a0
#define CHIPSET_NFORCE2 0x01f0
diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
index 00a85f1e1a4a..ee782151d332 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c
@@ -23,6 +23,7 @@
#include <nvif/cl507c.h>
#include <nvif/event.h>
+#include <nvif/timer.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_fourcc.h>
diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
index e7fcfa6e6467..c5152c39c684 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c
@@ -23,6 +23,7 @@
#include "head.h"
#include <nvif/cl507d.h>
+#include <nvif/timer.h>
#include "nouveau_bo.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
index 3b36dc8d36b2..c03cb987856b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c
@@ -24,6 +24,8 @@
#include <nouveau_bo.h>
+#include <nvif/timer.h>
+
void
corec37d_wndw_owner(struct nv50_core *core)
{
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 397143b639c6..8c5cf096f69b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -24,21 +24,36 @@
#include "head.h"
#include <nvif/cl507a.h>
+#include <nvif/timer.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_plane_helper.h>
+bool
+curs507a_space(struct nv50_wndw *wndw)
+{
+ nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 2,
+ if (nvif_rd32(&wndw->wimm.base.user, 0x0008) >= 4)
+ return true;
+ );
+ WARN_ON(1);
+ return false;
+}
+
static void
curs507a_update(struct nv50_wndw *wndw, u32 *interlock)
{
- nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
+ if (curs507a_space(wndw))
+ nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000);
}
static void
curs507a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 |
- asyw->point.x);
+ if (curs507a_space(wndw)) {
+ nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 |
+ asyw->point.x);
+ }
}
const struct nv50_wimm_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
index 23fb29d41efe..96dff4f09f57 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c
@@ -25,14 +25,17 @@
static void
cursc37a_update(struct nv50_wndw *wndw, u32 *interlock)
{
- nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001);
+ if (curs507a_space(wndw))
+ nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001);
}
static void
cursc37a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
- nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 |
- asyw->point.x);
+ if (curs507a_space(wndw)) {
+ nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 |
+ asyw->point.x);
+ }
}
static const struct nv50_wimm_func
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 4d1c58468dbc..6be9df1820c5 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -45,6 +45,7 @@
#include <nvif/cl5070.h>
#include <nvif/cl507d.h>
#include <nvif/event.h>
+#include <nvif/timer.h>
#include "nouveau_drv.h"
#include "nouveau_dma.h"
diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
index 2e68fc736fe1..4f7ce57f2036 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c
@@ -24,6 +24,8 @@
#include <nouveau_bo.h>
+#include <nvif/timer.h>
+
static void
ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
{
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
index caf397475918..a7412b9d3a98 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h
@@ -97,6 +97,7 @@ struct nv50_wimm_func {
};
extern const struct nv50_wimm_func curs507a;
+bool curs507a_space(struct nv50_wndw *);
int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32,
struct nv50_wndw **);
diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h
index 25d969dcf67d..c2a572c67a76 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/device.h
@@ -23,27 +23,6 @@ int nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32,
void nvif_device_fini(struct nvif_device *);
u64 nvif_device_time(struct nvif_device *);
-/* Delay based on GPU time (ie. PTIMER).
- *
- * Will return -ETIMEDOUT unless the loop was terminated with 'break',
- * where it will return the number of nanoseconds taken instead.
- */
-#define nvif_nsec(d,n,cond...) ({ \
- struct nvif_device *_device = (d); \
- u64 _nsecs = (n), _time0 = nvif_device_time(_device); \
- s64 _taken = 0; \
- \
- do { \
- cond \
- } while (_taken = nvif_device_time(_device) - _time0, _taken < _nsecs);\
- \
- if (_taken >= _nsecs) \
- _taken = -ETIMEDOUT; \
- _taken; \
-})
-#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond)
-#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond)
-
/*XXX*/
#include <subdev/bios.h>
#include <subdev/fb.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvif/timer.h b/drivers/gpu/drm/nouveau/include/nvif/timer.h
new file mode 100644
index 000000000000..57587a985c4b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/include/nvif/timer.h
@@ -0,0 +1,35 @@
+#ifndef __NVIF_TIMER_H__
+#define __NVIF_TIMER_H__
+#include <nvif/os.h>
+
+struct nvif_timer_wait {
+ struct nvif_device *device;
+ u64 limit;
+ u64 time0;
+ u64 time1;
+ int reads;
+};
+
+void nvif_timer_wait_init(struct nvif_device *, u64 nsec,
+ struct nvif_timer_wait *);
+s64 nvif_timer_wait_test(struct nvif_timer_wait *);
+
+/* Delay based on GPU time (ie. PTIMER).
+ *
+ * Will return -ETIMEDOUT unless the loop was terminated with 'break',
+ * where it will return the number of nanoseconds taken instead.
+ */
+#define nvif_nsec(d,n,cond...) ({ \
+ struct nvif_timer_wait _wait; \
+ s64 _taken = 0; \
+ \
+ nvif_timer_wait_init((d), (n), &_wait); \
+ do { \
+ cond \
+ } while ((_taken = nvif_timer_wait_test(&_wait)) >= 0); \
+ \
+ _taken; \
+})
+#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond)
+#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond)
+#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvif/user.h b/drivers/gpu/drm/nouveau/include/nvif/user.h
index 03c11826b693..6825574d93c2 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/user.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/user.h
@@ -10,6 +10,7 @@ struct nvif_user {
struct nvif_user_func {
void (*doorbell)(struct nvif_user *, u32 token);
+ u64 (*time)(struct nvif_user *);
};
int nvif_user_init(struct nvif_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 2b4b21b02e40..c40f127de3d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1494,8 +1494,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
ret = nvif_object_map_handle(&mem->mem.object,
&args, argc,
&handle, &length);
- if (ret != 1)
- return ret ? ret : -EINVAL;
+ if (ret != 1) {
+ if (WARN_ON(ret == 0))
+ return -EINVAL;
+ if (ret == -ENOSPC)
+ return -EAGAIN;
+ return ret;
+ }
reg->bus.base = 0;
reg->bus.offset = handle;
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 7dfbbbc1beea..15a3d40edf02 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -222,22 +222,18 @@ nouveau_drm_debugfs_init(struct drm_minor *minor)
{
struct nouveau_drm *drm = nouveau_drm(minor->dev);
struct dentry *dentry;
- int i, ret;
+ int i;
for (i = 0; i < ARRAY_SIZE(nouveau_debugfs_files); i++) {
- dentry = debugfs_create_file(nouveau_debugfs_files[i].name,
- S_IRUGO | S_IWUSR,
- minor->debugfs_root, minor->dev,
- nouveau_debugfs_files[i].fops);
- if (!dentry)
- return -ENOMEM;
+ debugfs_create_file(nouveau_debugfs_files[i].name,
+ S_IRUGO | S_IWUSR,
+ minor->debugfs_root, minor->dev,
+ nouveau_debugfs_files[i].fops);
}
- ret = drm_debugfs_create_files(nouveau_debugfs_list,
- NOUVEAU_DEBUGFS_ENTRIES,
- minor->debugfs_root, minor);
- if (ret)
- return ret;
+ drm_debugfs_create_files(nouveau_debugfs_list,
+ NOUVEAU_DEBUGFS_ENTRIES,
+ minor->debugfs_root, minor);
/* Set the size of the vbios since we know it, and it's confusing to
* userspace if it wants to seek() but the file has a length of 0
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 6b1629c14dd7..ca4087f5a15b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -618,6 +618,64 @@ nouveau_drm_device_fini(struct drm_device *dev)
kfree(drm);
}
+/*
+ * On some Intel PCIe bridge controllers doing a
+ * D0 -> D3hot -> D3cold -> D0 sequence causes Nvidia GPUs to not reappear.
+ * Skipping the intermediate D3hot step seems to make it work again. This is
+ * probably caused by not meeting the expectation the involved AML code has
+ * when the GPU is put into D3hot state before invoking it.
+ *
+ * This leads to various manifestations of this issue:
+ * - AML code execution to power on the GPU hits an infinite loop (as the
+ * code waits on device memory to change).
+ * - kernel crashes, as all PCI reads return -1, which most code isn't able
+ * to handle well enough.
+ *
+ * In all cases dmesg will contain at least one line like this:
+ * 'nouveau 0000:01:00.0: Refused to change power state, currently in D3'
+ * followed by a lot of nouveau timeouts.
+ *
+ * In the \_SB.PCI0.PEG0.PG00._OFF code deeper down writes bit 0x80 to the not
+ * documented PCI config space register 0x248 of the Intel PCIe bridge
+ * controller (0x1901) in order to change the state of the PCIe link between
+ * the PCIe port and the GPU. There are alternative code paths using other
+ * registers, which seem to work fine (executed pre Windows 8):
+ * - 0xbc bit 0x20 (publicly available documentation claims 'reserved')
+ * - 0xb0 bit 0x10 (link disable)
+ * Changing the conditions inside the firmware by poking into the relevant
+ * addresses does resolve the issue, but it seemed to be ACPI private memory
+ * and not any device accessible memory at all, so there is no portable way of
+ * changing the conditions.
+ * On a XPS 9560 that means bits [0,3] on \CPEX need to be cleared.
+ *
+ * The only systems where this behavior can be seen are hybrid graphics laptops
+ * with a secondary Nvidia Maxwell, Pascal or Turing GPU. It's unclear whether
+ * this issue only occurs in combination with listed Intel PCIe bridge
+ * controllers and the mentioned GPUs or other devices as well.
+ *
+ * documentation on the PCIe bridge controller can be found in the
+ * "7th Generation Intel® Processor Families for H Platforms Datasheet Volume 2"
+ * Section "12 PCI Express* Controller (x16) Registers"
+ */
+
+static void quirk_broken_nv_runpm(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct pci_dev *bridge = pci_upstream_bridge(pdev);
+
+ if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL)
+ return;
+
+ switch (bridge->device) {
+ case 0x1901:
+ drm->old_pm_cap = pdev->pm_cap;
+ pdev->pm_cap = 0;
+ NV_INFO(drm, "Disabling PCI power management to avoid bug\n");
+ break;
+ }
+}
+
static int nouveau_drm_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
@@ -699,6 +757,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
if (ret)
goto fail_drm_dev_init;
+ quirk_broken_nv_runpm(pdev);
return 0;
fail_drm_dev_init:
@@ -734,7 +793,11 @@ static void
nouveau_drm_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ /* revert our workaround */
+ if (drm->old_pm_cap)
+ pdev->pm_cap = drm->old_pm_cap;
nouveau_drm_device_remove(dev);
pci_disable_device(pdev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index c2c332fbde97..2a6519737800 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -140,6 +140,8 @@ struct nouveau_drm {
struct list_head clients;
+ u8 old_pm_cap;
+
struct {
struct agp_bridge_data *bridge;
u32 base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index e3797b2d4d17..645fedd77e21 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -171,6 +171,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
mm = get_task_mm(current);
down_read(&mm->mmap_sem);
+ if (!cli->svm.svmm) {
+ up_read(&mm->mmap_sem);
+ return -EINVAL;
+ }
+
for (addr = args->va_start, end = args->va_start + size; addr < end;) {
struct vm_area_struct *vma;
unsigned long next;
@@ -179,6 +184,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data,
if (!vma)
break;
+ addr = max(addr, vma->vm_start);
next = min(vma->vm_end, end);
/* This is a best effort so we ignore errors */
nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
@@ -656,9 +662,6 @@ nouveau_svm_fault(struct nvif_notify *notify)
limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT);
if (start < svmm->unmanaged.limit)
limit = min_t(u64, limit, svmm->unmanaged.start);
- else
- if (limit > svmm->unmanaged.start)
- start = max_t(u64, start, svmm->unmanaged.limit);
SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
mm = svmm->notifier.mm;
diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild
index 50d583d63807..f194d354c1f5 100644
--- a/drivers/gpu/drm/nouveau/nvif/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvif/Kbuild
@@ -8,6 +8,7 @@ nvif-y += nvif/fifo.o
nvif-y += nvif/mem.o
nvif-y += nvif/mmu.o
nvif-y += nvif/notify.o
+nvif-y += nvif/timer.o
nvif-y += nvif/vmm.o
# Usermode classes
diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c
index 1ec101ba3b42..0e92db44bbc8 100644
--- a/drivers/gpu/drm/nouveau/nvif/device.c
+++ b/drivers/gpu/drm/nouveau/nvif/device.c
@@ -27,11 +27,15 @@
u64
nvif_device_time(struct nvif_device *device)
{
- struct nv_device_time_v0 args = {};
- int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME,
- &args, sizeof(args));
- WARN_ON_ONCE(ret != 0);
- return args.time;
+ if (!device->user.func) {
+ struct nv_device_time_v0 args = {};
+ int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME,
+ &args, sizeof(args));
+ WARN_ON_ONCE(ret != 0);
+ return args.time;
+ }
+
+ return device->user.func->time(&device->user);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvif/timer.c b/drivers/gpu/drm/nouveau/nvif/timer.c
new file mode 100644
index 000000000000..602c1a258d10
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvif/timer.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright 2020 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <nvif/timer.h>
+#include <nvif/device.h>
+
+s64
+nvif_timer_wait_test(struct nvif_timer_wait *wait)
+{
+ u64 time = nvif_device_time(wait->device);
+
+ if (wait->reads == 0) {
+ wait->time0 = time;
+ wait->time1 = time;
+ }
+
+ if (wait->time1 == time) {
+ if (WARN_ON(wait->reads++ == 16))
+ return -ETIMEDOUT;
+ } else {
+ wait->time1 = time;
+ wait->reads = 1;
+ }
+
+ if (wait->time1 - wait->time0 > wait->limit)
+ return -ETIMEDOUT;
+
+ return wait->time1 - wait->time0;
+}
+
+void
+nvif_timer_wait_init(struct nvif_device *device, u64 nsec,
+ struct nvif_timer_wait *wait)
+{
+ wait->device = device;
+ wait->limit = nsec;
+ wait->reads = 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvif/userc361.c b/drivers/gpu/drm/nouveau/nvif/userc361.c
index 19f9958e7e01..1116f871b272 100644
--- a/drivers/gpu/drm/nouveau/nvif/userc361.c
+++ b/drivers/gpu/drm/nouveau/nvif/userc361.c
@@ -21,6 +21,19 @@
*/
#include <nvif/user.h>
+static u64
+nvif_userc361_time(struct nvif_user *user)
+{
+ u32 hi, lo;
+
+ do {
+ hi = nvif_rd32(&user->object, 0x084);
+ lo = nvif_rd32(&user->object, 0x080);
+ } while (hi != nvif_rd32(&user->object, 0x084));
+
+ return ((u64)hi << 32 | lo);
+}
+
static void
nvif_userc361_doorbell(struct nvif_user *user, u32 token)
{
@@ -30,4 +43,5 @@ nvif_userc361_doorbell(struct nvif_user *user, u32 token)
const struct nvif_user_func
nvif_userc361 = {
.doorbell = nvif_userc361_doorbell,
+ .time = nvif_userc361_time,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index dd8f85b8b3a7..f2f5636efac4 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1981,8 +1981,34 @@ gf100_gr_init_(struct nvkm_gr *base)
{
struct gf100_gr *gr = gf100_gr(base);
struct nvkm_subdev *subdev = &base->engine.subdev;
+ struct nvkm_device *device = subdev->device;
+ bool reset = device->chipset == 0x137 || device->chipset == 0x138;
u32 ret;
+ /* On certain GP107/GP108 boards, we trigger a weird issue where
+ * GR will stop responding to PRI accesses after we've asked the
+ * SEC2 RTOS to boot the GR falcons. This happens with far more
+ * frequency when cold-booting a board (ie. returning from D3).
+ *
+ * The root cause for this is not known and has proven difficult
+ * to isolate, with many avenues being dead-ends.
+ *
+ * A workaround was discovered by Karol, whereby putting GR into
+ * reset for an extended period right before initialisation
+ * prevents the problem from occuring.
+ *
+ * XXX: As RM does not require any such workaround, this is more
+ * of a hack than a true fix.
+ */
+ reset = nvkm_boolopt(device->cfgopt, "NvGrResetWar", reset);
+ if (reset) {
+ nvkm_mask(device, 0x000200, 0x00001000, 0x00000000);
+ nvkm_rd32(device, 0x000200);
+ msleep(50);
+ nvkm_mask(device, 0x000200, 0x00001000, 0x00001000);
+ nvkm_rd32(device, 0x000200);
+ }
+
nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
ret = nvkm_falcon_get(&gr->fecs.falcon, subdev);
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 0ce81b1f36af..3ad828eaefe1 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -361,7 +361,6 @@ static int panel_dpi_probe(struct device *dev,
struct panel_desc *desc;
unsigned int bus_flags;
struct videomode vm;
- const char *mapping;
int ret;
np = dev->of_node;
@@ -386,16 +385,6 @@ static int panel_dpi_probe(struct device *dev,
of_property_read_u32(np, "width-mm", &desc->size.width);
of_property_read_u32(np, "height-mm", &desc->size.height);
- of_property_read_string(np, "data-mapping", &mapping);
- if (!strcmp(mapping, "rgb24"))
- desc->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
- else if (!strcmp(mapping, "rgb565"))
- desc->bus_format = MEDIA_BUS_FMT_RGB565_1X16;
- else if (!strcmp(mapping, "bgr666"))
- desc->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
- else if (!strcmp(mapping, "lvds666"))
- desc->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
-
/* Extract bus_flags from display_timing */
bus_flags = 0;
vm.flags = timing->flags;
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index f38f5e113c6b..ce98c08aa8b4 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -325,15 +325,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
void *data)
{
struct rockchip_dp_device *dp = dev_get_drvdata(dev);
- const struct rockchip_dp_chip_data *dp_data;
struct drm_device *drm_dev = data;
int ret;
- dp_data = of_device_get_match_data(dev);
- if (!dp_data)
- return -ENODEV;
-
- dp->data = dp_data;
dp->drm_dev = drm_dev;
ret = rockchip_dp_drm_create_encoder(dp);
@@ -344,16 +338,9 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
dp->plat_data.encoder = &dp->encoder;
- dp->plat_data.dev_type = dp->data->chip_type;
- dp->plat_data.power_on_start = rockchip_dp_poweron_start;
- dp->plat_data.power_off = rockchip_dp_powerdown;
- dp->plat_data.get_modes = rockchip_dp_get_modes;
-
- dp->adp = analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
- if (IS_ERR(dp->adp)) {
- ret = PTR_ERR(dp->adp);
+ ret = analogix_dp_bind(dp->adp, drm_dev);
+ if (ret)
goto err_cleanup_encoder;
- }
return 0;
err_cleanup_encoder:
@@ -368,8 +355,6 @@ static void rockchip_dp_unbind(struct device *dev, struct device *master,
analogix_dp_unbind(dp->adp);
dp->encoder.funcs->destroy(&dp->encoder);
-
- dp->adp = ERR_PTR(-ENODEV);
}
static const struct component_ops rockchip_dp_component_ops = {
@@ -380,10 +365,15 @@ static const struct component_ops rockchip_dp_component_ops = {
static int rockchip_dp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct rockchip_dp_chip_data *dp_data;
struct drm_panel *panel = NULL;
struct rockchip_dp_device *dp;
int ret;
+ dp_data = of_device_get_match_data(dev);
+ if (!dp_data)
+ return -ENODEV;
+
ret = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
if (ret < 0)
return ret;
@@ -394,7 +384,12 @@ static int rockchip_dp_probe(struct platform_device *pdev)
dp->dev = dev;
dp->adp = ERR_PTR(-ENODEV);
+ dp->data = dp_data;
dp->plat_data.panel = panel;
+ dp->plat_data.dev_type = dp->data->chip_type;
+ dp->plat_data.power_on_start = rockchip_dp_poweron_start;
+ dp->plat_data.power_off = rockchip_dp_powerdown;
+ dp->plat_data.get_modes = rockchip_dp_get_modes;
ret = rockchip_dp_of_probe(dp);
if (ret < 0)
@@ -402,12 +397,19 @@ static int rockchip_dp_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dp);
+ dp->adp = analogix_dp_probe(dev, &dp->plat_data);
+ if (IS_ERR(dp->adp))
+ return PTR_ERR(dp->adp);
+
return component_add(dev, &rockchip_dp_component_ops);
}
static int rockchip_dp_remove(struct platform_device *pdev)
{
+ struct rockchip_dp_device *dp = platform_get_drvdata(pdev);
+
component_del(&pdev->dev, &rockchip_dp_component_ops);
+ analogix_dp_remove(dp->adp);
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 6ee3b96f0d13..0ad30b112982 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -442,66 +442,6 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
}
EXPORT_SYMBOL(ttm_bo_vm_fault);
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-/**
- * ttm_pgprot_is_wrprotecting - Is a page protection value write-protecting?
- * @prot: The page protection value
- *
- * Return: true if @prot is write-protecting. false otherwise.
- */
-static bool ttm_pgprot_is_wrprotecting(pgprot_t prot)
-{
- /*
- * This is meant to say "pgprot_wrprotect(prot) == prot" in a generic
- * way. Unfortunately there is no generic pgprot_wrprotect.
- */
- return pte_val(pte_wrprotect(__pte(pgprot_val(prot)))) ==
- pgprot_val(prot);
-}
-
-static vm_fault_t ttm_bo_vm_huge_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size)
-{
- struct vm_area_struct *vma = vmf->vma;
- pgprot_t prot;
- struct ttm_buffer_object *bo = vma->vm_private_data;
- vm_fault_t ret;
- pgoff_t fault_page_size = 0;
- bool write = vmf->flags & FAULT_FLAG_WRITE;
-
- switch (pe_size) {
- case PE_SIZE_PMD:
- fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT;
- break;
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
- case PE_SIZE_PUD:
- fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT;
- break;
-#endif
- default:
- WARN_ON_ONCE(1);
- return VM_FAULT_FALLBACK;
- }
-
- /* Fallback on write dirty-tracking or COW */
- if (write && ttm_pgprot_is_wrprotecting(vma->vm_page_prot))
- return VM_FAULT_FALLBACK;
-
- ret = ttm_bo_vm_reserve(bo, vmf);
- if (ret)
- return ret;
-
- prot = vm_get_page_prot(vma->vm_flags);
- ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size);
- if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
-
- dma_resv_unlock(bo->base.resv);
-
- return ret;
-}
-#endif
-
void ttm_bo_vm_open(struct vm_area_struct *vma)
{
struct ttm_buffer_object *bo = vma->vm_private_data;
@@ -604,9 +544,6 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,
.access = ttm_bo_vm_access,
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- .huge_fault = ttm_bo_vm_huge_fault,
-#endif
};
static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 8512d970a09f..ac8f75db2ecd 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -41,6 +41,10 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
return -ENODEV;
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "vboxvideodrmfb");
+ if (ret)
+ return ret;
+
vbox = kzalloc(sizeof(*vbox), GFP_KERNEL);
if (!vbox)
return -ENOMEM;
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index cea18dc15f77..340719238753 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -681,11 +681,23 @@ static enum drm_mode_status
vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc,
const struct drm_display_mode *mode)
{
- /* HSM clock must be 108% of the pixel clock. Additionally,
- * the AXI clock needs to be at least 25% of pixel clock, but
- * HSM ends up being the limiting factor.
+ /*
+ * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must
+ * be faster than pixel clock, infinitesimally faster, tested in
+ * simulation. Otherwise, exact value is unimportant for HDMI
+ * operation." This conflicts with bcm2835's vc4 documentation, which
+ * states HSM's clock has to be at least 108% of the pixel clock.
+ *
+ * Real life tests reveal that vc4's firmware statement holds up, and
+ * users are able to use pixel clocks closer to HSM's, namely for
+ * 1920x1200@60Hz. So it was decided to have leave a 1% margin between
+ * both clocks. Which, for RPi0-3 implies a maximum pixel clock of
+ * 162MHz.
+ *
+ * Additionally, the AXI clock needs to be at least 25% of
+ * pixel clock, but HSM ends up being the limiting factor.
*/
- if (mode->clock > HSM_CLOCK_FREQ / (1000 * 108 / 100))
+ if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100))
return MODE_CLOCK_HIGH;
return MODE_OK;
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 2bfb13d1932e..d9039bb7c5e3 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -123,15 +123,17 @@ bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size)
{
- struct virtio_gpu_object *bo;
+ struct virtio_gpu_object_shmem *shmem;
+ struct drm_gem_shmem_object *dshmem;
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
- if (!bo)
+ shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
+ if (!shmem)
return NULL;
- bo->base.base.funcs = &virtio_gpu_shmem_funcs;
- bo->base.map_cached = true;
- return &bo->base.base;
+ dshmem = &shmem->base.base;
+ dshmem->base.funcs = &virtio_gpu_shmem_funcs;
+ dshmem->map_cached = true;
+ return &dshmem->base;
}
static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
index 4be49c1aef51..374142018171 100644
--- a/drivers/gpu/drm/xen/xen_drm_front.c
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -401,7 +401,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
obj = xen_drm_front_gem_create(dev, args->size);
if (IS_ERR_OR_NULL(obj)) {
- ret = PTR_ERR(obj);
+ ret = PTR_ERR_OR_ZERO(obj);
goto fail;
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index a02ce43d778d..32e3bc0aa665 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -533,7 +533,6 @@ struct hv_dynmem_device {
* State to synchronize hot-add.
*/
struct completion ol_waitevent;
- bool ha_waiting;
/*
* This thread handles hot-add
* requests from the host as well as notifying
@@ -634,10 +633,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val,
switch (val) {
case MEM_ONLINE:
case MEM_CANCEL_ONLINE:
- if (dm_device.ha_waiting) {
- dm_device.ha_waiting = false;
- complete(&dm_device.ol_waitevent);
- }
+ complete(&dm_device.ol_waitevent);
break;
case MEM_OFFLINE:
@@ -726,8 +722,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
has->covered_end_pfn += processed_pfn;
spin_unlock_irqrestore(&dm_device.ha_lock, flags);
- init_completion(&dm_device.ol_waitevent);
- dm_device.ha_waiting = !memhp_auto_online;
+ reinit_completion(&dm_device.ol_waitevent);
nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn));
ret = add_memory(nid, PFN_PHYS((start_pfn)),
@@ -753,15 +748,14 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
}
/*
- * Wait for the memory block to be onlined when memory onlining
- * is done outside of kernel (memhp_auto_online). Since the hot
- * add has succeeded, it is ok to proceed even if the pages in
- * the hot added region have not been "onlined" within the
- * allowed time.
+ * Wait for memory to get onlined. If the kernel onlined the
+ * memory when adding it, this will return directly. Otherwise,
+ * it will wait for user space to online the memory. This helps
+ * to avoid adding memory faster than it is getting onlined. As
+ * adding succeeded, it is ok to proceed even if the memory was
+ * not onlined in time.
*/
- if (dm_device.ha_waiting)
- wait_for_completion_timeout(&dm_device.ol_waitevent,
- 5*HZ);
+ wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ);
post_status(&dm_device);
}
}
@@ -1706,6 +1700,7 @@ static int balloon_probe(struct hv_device *dev,
#ifdef CONFIG_MEMORY_HOTPLUG
set_online_page_callback(&hv_online_page);
+ init_completion(&dm_device.ol_waitevent);
register_memory_notifier(&hv_memory_nb);
#endif
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index d4c83009d625..ab719d372b0d 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -7,7 +7,7 @@
* Hwmon integration:
* Copyright (C) 2011 Jean Delvare <jdelvare@suse.de>
* Copyright (C) 2013, 2014 Guenter Roeck <linux@roeck-us.net>
- * Copyright (C) 2014, 2015 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2014, 2015 Pali Rohár <pali@kernel.org>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -86,7 +86,7 @@ static unsigned int auto_fan;
#define I8K_HWMON_HAVE_FAN3 (1 << 12)
MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_DESCRIPTION("Dell laptop SMM BIOS hwmon driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("i8k");
diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c
index acf874800ca4..b0411a1827a3 100644
--- a/drivers/ide/ide-scan-pci.c
+++ b/drivers/ide/ide-scan-pci.c
@@ -89,8 +89,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
static int __init ide_scan_pcibus(void)
{
struct pci_dev *dev = NULL;
- struct pci_driver *d;
- struct list_head *l, *n;
+ struct pci_driver *d, *tmp;
pre_init = 0;
for_each_pci_dev(dev)
@@ -101,9 +100,8 @@ static int __init ide_scan_pcibus(void)
* are post init.
*/
- list_for_each_safe(l, n, &ide_pci_drivers) {
- list_del(l);
- d = list_entry(l, struct pci_driver, node);
+ list_for_each_entry_safe(d, tmp, &ide_pci_drivers, node) {
+ list_del(&d->node);
if (__pci_register_driver(d, d->driver.owner,
d->driver.mod_name))
printk(KERN_ERR "%s: failed to register %s driver\n",
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 5bd51853b15e..d5c073a8aa3e 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -88,6 +88,7 @@ source "drivers/iio/orientation/Kconfig"
if IIO_TRIGGER
source "drivers/iio/trigger/Kconfig"
endif #IIO_TRIGGER
+source "drivers/iio/position/Kconfig"
source "drivers/iio/potentiometer/Kconfig"
source "drivers/iio/potentiostat/Kconfig"
source "drivers/iio/pressure/Kconfig"
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index bff682ad1cfb..1712011c0f4a 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -31,6 +31,7 @@ obj-y += light/
obj-y += magnetometer/
obj-y += multiplexer/
obj-y += orientation/
+obj-y += position/
obj-y += potentiometer/
obj-y += potentiostat/
obj-y += pressure/
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index 68e847c6255e..2532b9ad3384 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -170,7 +170,8 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
- ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
+ cros_ec_sensors_capture, NULL);
if (ret)
return ret;
@@ -190,11 +191,6 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev)
state->sign[CROS_EC_SENSOR_Z] = -1;
}
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
- cros_ec_sensors_capture, NULL);
- if (ret)
- return ret;
-
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index f4da821c4022..12bb8b7ca1ff 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -795,6 +795,16 @@ config RCAR_GYRO_ADC
To compile this driver as a module, choose M here: the
module will be called rcar-gyroadc.
+config RN5T618_ADC
+ tristate "ADC for the RN5T618/RC5T619 family of chips"
+ depends on MFD_RN5T618
+ help
+ Say yes here to build support for the integrated ADC inside the
+ RN5T618/619 series PMICs:
+
+ This driver can also be built as a module. If so, the module
+ will be called rn5t618-adc.
+
config ROCKCHIP_SARADC
tristate "Rockchip SARADC driver"
depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST)
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 8462455b4228..637807861112 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -75,6 +75,7 @@ obj-$(CONFIG_QCOM_VADC_COMMON) += qcom-vadc-common.o
obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o
obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o
obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o
+obj-$(CONFIG_RN5T618_ADC) += rn5t618-adc.o
obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o
obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o
obj-$(CONFIG_SPEAR_ADC) += spear_adc.o
diff --git a/drivers/iio/adc/rn5t618-adc.c b/drivers/iio/adc/rn5t618-adc.c
new file mode 100644
index 000000000000..f21027e4e26a
--- /dev/null
+++ b/drivers/iio/adc/rn5t618-adc.c
@@ -0,0 +1,256 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * ADC driver for the RICOH RN5T618 power management chip family
+ *
+ * Copyright (C) 2019 Andreas Kemnade
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mfd/rn5t618.h>
+#include <linux/platform_device.h>
+#include <linux/completion.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/slab.h>
+
+#define RN5T618_ADC_CONVERSION_TIMEOUT (msecs_to_jiffies(500))
+#define RN5T618_REFERENCE_VOLT 2500
+
+/* mask for selecting channels for single conversion */
+#define RN5T618_ADCCNT3_CHANNEL_MASK 0x7
+/* average 4-time conversion mode */
+#define RN5T618_ADCCNT3_AVG BIT(3)
+/* set for starting a single conversion, gets cleared by hw when done */
+#define RN5T618_ADCCNT3_GODONE BIT(4)
+/* automatic conversion, period is in ADCCNT2, selected channels are
+ * in ADCCNT1
+ */
+#define RN5T618_ADCCNT3_AUTO BIT(5)
+#define RN5T618_ADCEND_IRQ BIT(0)
+
+struct rn5t618_adc_data {
+ struct device *dev;
+ struct rn5t618 *rn5t618;
+ struct completion conv_completion;
+ int irq;
+};
+
+struct rn5t618_channel_ratios {
+ u16 numerator;
+ u16 denominator;
+};
+
+enum rn5t618_channels {
+ LIMMON = 0,
+ VBAT,
+ VADP,
+ VUSB,
+ VSYS,
+ VTHM,
+ AIN1,
+ AIN0
+};
+
+static const struct rn5t618_channel_ratios rn5t618_ratios[8] = {
+ [LIMMON] = {50, 32}, /* measured across 20mOhm, amplified by 32 */
+ [VBAT] = {2, 1},
+ [VADP] = {3, 1},
+ [VUSB] = {3, 1},
+ [VSYS] = {3, 1},
+ [VTHM] = {1, 1},
+ [AIN1] = {1, 1},
+ [AIN0] = {1, 1},
+};
+
+static int rn5t618_read_adc_reg(struct rn5t618 *rn5t618, int reg, u16 *val)
+{
+ u8 data[2];
+ int ret;
+
+ ret = regmap_bulk_read(rn5t618->regmap, reg, data, sizeof(data));
+ if (ret < 0)
+ return ret;
+
+ *val = (data[0] << 4) | (data[1] & 0xF);
+
+ return 0;
+}
+
+static irqreturn_t rn5t618_adc_irq(int irq, void *data)
+{
+ struct rn5t618_adc_data *adc = data;
+ unsigned int r = 0;
+ int ret;
+
+ /* clear low & high threshold irqs */
+ regmap_write(adc->rn5t618->regmap, RN5T618_IR_ADC1, 0);
+ regmap_write(adc->rn5t618->regmap, RN5T618_IR_ADC2, 0);
+
+ ret = regmap_read(adc->rn5t618->regmap, RN5T618_IR_ADC3, &r);
+ if (ret < 0)
+ dev_err(adc->dev, "failed to read IRQ status: %d\n", ret);
+
+ regmap_write(adc->rn5t618->regmap, RN5T618_IR_ADC3, 0);
+
+ if (r & RN5T618_ADCEND_IRQ)
+ complete(&adc->conv_completion);
+
+ return IRQ_HANDLED;
+}
+
+static int rn5t618_adc_read(struct iio_dev *iio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct rn5t618_adc_data *adc = iio_priv(iio_dev);
+ u16 raw;
+ int ret;
+
+ if (mask == IIO_CHAN_INFO_SCALE) {
+ *val = RN5T618_REFERENCE_VOLT *
+ rn5t618_ratios[chan->channel].numerator;
+ *val2 = rn5t618_ratios[chan->channel].denominator * 4095;
+
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ /* select channel */
+ ret = regmap_update_bits(adc->rn5t618->regmap, RN5T618_ADCCNT3,
+ RN5T618_ADCCNT3_CHANNEL_MASK,
+ chan->channel);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(adc->rn5t618->regmap, RN5T618_EN_ADCIR3,
+ RN5T618_ADCEND_IRQ);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(adc->rn5t618->regmap, RN5T618_ADCCNT3,
+ RN5T618_ADCCNT3_AVG,
+ mask == IIO_CHAN_INFO_AVERAGE_RAW ?
+ RN5T618_ADCCNT3_AVG : 0);
+ if (ret < 0)
+ return ret;
+
+ init_completion(&adc->conv_completion);
+ /* single conversion */
+ ret = regmap_update_bits(adc->rn5t618->regmap, RN5T618_ADCCNT3,
+ RN5T618_ADCCNT3_GODONE,
+ RN5T618_ADCCNT3_GODONE);
+ if (ret < 0)
+ return ret;
+
+ ret = wait_for_completion_timeout(&adc->conv_completion,
+ RN5T618_ADC_CONVERSION_TIMEOUT);
+ if (ret == 0) {
+ dev_warn(adc->dev, "timeout waiting for adc result\n");
+ return -ETIMEDOUT;
+ }
+
+ ret = rn5t618_read_adc_reg(adc->rn5t618,
+ RN5T618_ILIMDATAH + 2 * chan->channel,
+ &raw);
+ if (ret < 0)
+ return ret;
+
+ *val = raw;
+
+ return IIO_VAL_INT;
+}
+
+static const struct iio_info rn5t618_adc_iio_info = {
+ .read_raw = &rn5t618_adc_read,
+};
+
+#define RN5T618_ADC_CHANNEL(_channel, _type, _name) { \
+ .type = _type, \
+ .channel = _channel, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_AVERAGE_RAW) | \
+ BIT(IIO_CHAN_INFO_SCALE), \
+ .datasheet_name = _name, \
+ .indexed = 1. \
+}
+
+static const struct iio_chan_spec rn5t618_adc_iio_channels[] = {
+ RN5T618_ADC_CHANNEL(LIMMON, IIO_CURRENT, "LIMMON"),
+ RN5T618_ADC_CHANNEL(VBAT, IIO_VOLTAGE, "VBAT"),
+ RN5T618_ADC_CHANNEL(VADP, IIO_VOLTAGE, "VADP"),
+ RN5T618_ADC_CHANNEL(VUSB, IIO_VOLTAGE, "VUSB"),
+ RN5T618_ADC_CHANNEL(VSYS, IIO_VOLTAGE, "VSYS"),
+ RN5T618_ADC_CHANNEL(VTHM, IIO_VOLTAGE, "VTHM"),
+ RN5T618_ADC_CHANNEL(AIN1, IIO_VOLTAGE, "AIN1"),
+ RN5T618_ADC_CHANNEL(AIN0, IIO_VOLTAGE, "AIN0")
+};
+
+static int rn5t618_adc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct iio_dev *iio_dev;
+ struct rn5t618_adc_data *adc;
+ struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
+
+ iio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
+ if (!iio_dev) {
+ dev_err(&pdev->dev, "failed allocating iio device\n");
+ return -ENOMEM;
+ }
+
+ adc = iio_priv(iio_dev);
+ adc->dev = &pdev->dev;
+ adc->rn5t618 = rn5t618;
+
+ if (rn5t618->irq_data)
+ adc->irq = regmap_irq_get_virq(rn5t618->irq_data,
+ RN5T618_IRQ_ADC);
+
+ if (adc->irq <= 0) {
+ dev_err(&pdev->dev, "get virq failed\n");
+ return -EINVAL;
+ }
+
+ init_completion(&adc->conv_completion);
+
+ iio_dev->name = dev_name(&pdev->dev);
+ iio_dev->dev.parent = &pdev->dev;
+ iio_dev->info = &rn5t618_adc_iio_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+ iio_dev->channels = rn5t618_adc_iio_channels;
+ iio_dev->num_channels = ARRAY_SIZE(rn5t618_adc_iio_channels);
+
+ /* stop any auto-conversion */
+ ret = regmap_write(rn5t618->regmap, RN5T618_ADCCNT3, 0);
+ if (ret < 0)
+ return ret;
+
+ platform_set_drvdata(pdev, iio_dev);
+
+ ret = devm_request_threaded_irq(adc->dev, adc->irq, NULL,
+ rn5t618_adc_irq,
+ IRQF_ONESHOT, dev_name(adc->dev),
+ adc);
+ if (ret < 0) {
+ dev_err(adc->dev, "request irq %d failed: %d\n", adc->irq, ret);
+ return ret;
+ }
+
+ return devm_iio_device_register(adc->dev, iio_dev);
+}
+
+static struct platform_driver rn5t618_adc_driver = {
+ .driver = {
+ .name = "rn5t618-adc",
+ },
+ .probe = rn5t618_adc_probe,
+};
+
+module_platform_driver(rn5t618_adc_driver);
+MODULE_ALIAS("platform:rn5t618-adc");
+MODULE_DESCRIPTION("RICOH RN5T618 ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
index 1dcc2a16ab2d..af801e203623 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c
@@ -97,7 +97,7 @@ static int cros_ec_lid_angle_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
- ret = cros_ec_sensors_core_init(pdev, indio_dev, false);
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, false, NULL, NULL);
if (ret)
return ret;
@@ -127,7 +127,6 @@ MODULE_DEVICE_TABLE(platform, cros_ec_lid_angle_ids);
static struct platform_driver cros_ec_lid_angle_platform_driver = {
.driver = {
.name = DRV_NAME,
- .pm = &cros_ec_sensors_pm_ops,
},
.probe = cros_ec_lid_angle_probe,
.id_table = cros_ec_lid_angle_ids,
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
index 576e45faafaf..a66941fdb385 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c
@@ -230,10 +230,14 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
- ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
+ cros_ec_sensors_capture,
+ cros_ec_sensors_push_data);
if (ret)
return ret;
+ iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes);
+
indio_dev->info = &ec_sensors_info;
state = iio_priv(indio_dev);
for (channel = state->channels, i = CROS_EC_SENSOR_X;
@@ -245,7 +249,6 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
BIT(IIO_CHAN_INFO_CALIBSCALE);
channel->info_mask_shared_by_all =
BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_FREQUENCY) |
BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->info_mask_shared_by_all_available =
BIT(IIO_CHAN_INFO_SAMP_FREQ);
@@ -292,11 +295,6 @@ static int cros_ec_sensors_probe(struct platform_device *pdev)
else
state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
- cros_ec_sensors_capture, NULL);
- if (ret)
- return ret;
-
return devm_iio_device_register(dev, indio_dev);
}
@@ -317,7 +315,6 @@ MODULE_DEVICE_TABLE(platform, cros_ec_sensors_ids);
static struct platform_driver cros_ec_sensors_platform_driver = {
.driver = {
.name = "cros-ec-sensors",
- .pm = &cros_ec_sensors_pm_ops,
},
.probe = cros_ec_sensors_probe,
.id_table = cros_ec_sensors_ids,
diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
index d3a3626c7cd8..c831915ca7e5 100644
--- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
+++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c
@@ -11,7 +11,9 @@
#include <linux/iio/common/cros_ec_sensors_core.h>
#include <linux/iio/iio.h>
#include <linux/iio/kfifo_buf.h>
+#include <linux/iio/sysfs.h>
#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -20,6 +22,12 @@
#include <linux/platform_data/cros_ec_sensorhub.h>
#include <linux/platform_device.h>
+/*
+ * Hard coded to the first device to support sensor fifo. The EC has a 2048
+ * byte fifo and will trigger an interrupt when fifo is 2/3 full.
+ */
+#define CROS_EC_FIFO_SIZE (2048 * 2 / 3)
+
static char *cros_ec_loc[] = {
[MOTIONSENSE_LOC_BASE] = "base",
[MOTIONSENSE_LOC_LID] = "lid",
@@ -53,8 +61,15 @@ static int cros_ec_get_host_cmd_version_mask(struct cros_ec_device *ec_dev,
static void get_default_min_max_freq(enum motionsensor_type type,
u32 *min_freq,
- u32 *max_freq)
+ u32 *max_freq,
+ u32 *max_fifo_events)
{
+ /*
+ * We don't know fifo size, set to size previously used by older
+ * hardware.
+ */
+ *max_fifo_events = CROS_EC_FIFO_SIZE;
+
switch (type) {
case MOTIONSENSE_TYPE_ACCEL:
case MOTIONSENSE_TYPE_GYRO:
@@ -82,9 +97,155 @@ static void get_default_min_max_freq(enum motionsensor_type type,
}
}
+static int cros_ec_sensor_set_ec_rate(struct cros_ec_sensors_core_state *st,
+ int rate)
+{
+ int ret;
+
+ if (rate > U16_MAX)
+ rate = U16_MAX;
+
+ mutex_lock(&st->cmd_lock);
+ st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+ st->param.ec_rate.data = rate;
+ ret = cros_ec_motion_send_host_cmd(st, 0);
+ mutex_unlock(&st->cmd_lock);
+ return ret;
+}
+
+static ssize_t cros_ec_sensor_set_report_latency(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ int integer, fract, ret;
+ int latency;
+
+ ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract);
+ if (ret)
+ return ret;
+
+ /* EC rate is in ms. */
+ latency = integer * 1000 + fract / 1000;
+ ret = cros_ec_sensor_set_ec_rate(st, latency);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static ssize_t cros_ec_sensor_get_report_latency(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ int latency, ret;
+
+ mutex_lock(&st->cmd_lock);
+ st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
+ st->param.ec_rate.data = EC_MOTION_SENSE_NO_VALUE;
+
+ ret = cros_ec_motion_send_host_cmd(st, 0);
+ latency = st->resp->ec_rate.ret;
+ mutex_unlock(&st->cmd_lock);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%d.%06u\n",
+ latency / 1000,
+ (latency % 1000) * 1000);
+}
+
+static IIO_DEVICE_ATTR(hwfifo_timeout, 0644,
+ cros_ec_sensor_get_report_latency,
+ cros_ec_sensor_set_report_latency, 0);
+
+static ssize_t hwfifo_watermark_max_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->fifo_max_event_count);
+}
+
+static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0);
+
+const struct attribute *cros_ec_sensor_fifo_attributes[] = {
+ &iio_dev_attr_hwfifo_timeout.dev_attr.attr,
+ &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr,
+ NULL,
+};
+EXPORT_SYMBOL_GPL(cros_ec_sensor_fifo_attributes);
+
+int cros_ec_sensors_push_data(struct iio_dev *indio_dev,
+ s16 *data,
+ s64 timestamp)
+{
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ s16 *out;
+ s64 delta;
+ unsigned int i;
+
+ /*
+ * Ignore samples if the buffer is not set: it is needed if the ODR is
+ * set but the buffer is not enabled yet.
+ */
+ if (!iio_buffer_enabled(indio_dev))
+ return 0;
+
+ out = (s16 *)st->samples;
+ for_each_set_bit(i,
+ indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ *out = data[i];
+ out++;
+ }
+
+ if (iio_device_get_clock(indio_dev) != CLOCK_BOOTTIME)
+ delta = iio_get_time_ns(indio_dev) - cros_ec_get_time_ns();
+ else
+ delta = 0;
+
+ iio_push_to_buffers_with_timestamp(indio_dev, st->samples,
+ timestamp + delta);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensors_push_data);
+
+static void cros_ec_sensors_core_clean(void *arg)
+{
+ struct platform_device *pdev = (struct platform_device *)arg;
+ struct cros_ec_sensorhub *sensor_hub =
+ dev_get_drvdata(pdev->dev.parent);
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
+ u8 sensor_num = st->param.info.sensor_num;
+
+ cros_ec_sensorhub_unregister_push_data(sensor_hub, sensor_num);
+}
+
+/**
+ * cros_ec_sensors_core_init() - basic initialization of the core structure
+ * @pdev: platform device created for the sensors
+ * @indio_dev: iio device structure of the device
+ * @physical_device: true if the device refers to a physical device
+ * @trigger_capture: function pointer to call buffer is triggered,
+ * for backward compatibility.
+ * @push_data: function to call when cros_ec_sensorhub receives
+ * a sample for that sensor.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
int cros_ec_sensors_core_init(struct platform_device *pdev,
struct iio_dev *indio_dev,
- bool physical_device)
+ bool physical_device,
+ cros_ec_sensors_capture_t trigger_capture,
+ cros_ec_sensorhub_push_data_cb_t push_data)
{
struct device *dev = &pdev->dev;
struct cros_ec_sensors_core_state *state = iio_priv(indio_dev);
@@ -92,6 +253,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
struct cros_ec_dev *ec = sensor_hub->ec;
struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev);
u32 ver_mask;
+ int frequencies[ARRAY_SIZE(state->frequencies) / 2] = { 0 };
int ret, i;
platform_set_drvdata(pdev, indio_dev);
@@ -123,8 +285,6 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
indio_dev->name = pdev->name;
if (physical_device) {
- indio_dev->modes = INDIO_DIRECT_MODE;
-
state->param.cmd = MOTIONSENSE_CMD_INFO;
state->param.info.sensor_num = sensor_platform->sensor_num;
ret = cros_ec_motion_send_host_cmd(state, 0);
@@ -142,16 +302,63 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
state->calib[i].scale = MOTION_SENSE_DEFAULT_SCALE;
/* 0 is a correct value used to stop the device */
- state->frequencies[0] = 0;
if (state->msg->version < 3) {
get_default_min_max_freq(state->resp->info.type,
- &state->frequencies[1],
- &state->frequencies[2]);
+ &frequencies[1],
+ &frequencies[2],
+ &state->fifo_max_event_count);
} else {
- state->frequencies[1] =
- state->resp->info_3.min_frequency;
- state->frequencies[2] =
- state->resp->info_3.max_frequency;
+ frequencies[1] = state->resp->info_3.min_frequency;
+ frequencies[2] = state->resp->info_3.max_frequency;
+ state->fifo_max_event_count =
+ state->resp->info_3.fifo_max_event_count;
+ }
+ for (i = 0; i < ARRAY_SIZE(frequencies); i++) {
+ state->frequencies[2 * i] = frequencies[i] / 1000;
+ state->frequencies[2 * i + 1] =
+ (frequencies[i] % 1000) * 1000;
+ }
+
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+ /*
+ * Create a software buffer, feed by the EC FIFO.
+ * We can not use trigger here, as events are generated
+ * as soon as sample_frequency is set.
+ */
+ struct iio_buffer *buffer;
+
+ buffer = devm_iio_kfifo_allocate(dev);
+ if (!buffer)
+ return -ENOMEM;
+
+ iio_device_attach_buffer(indio_dev, buffer);
+ indio_dev->modes = INDIO_BUFFER_SOFTWARE;
+
+ ret = cros_ec_sensorhub_register_push_data(
+ sensor_hub, sensor_platform->sensor_num,
+ indio_dev, push_data);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(
+ dev, cros_ec_sensors_core_clean, pdev);
+ if (ret)
+ return ret;
+
+ /* Timestamp coming from FIFO are in ns since boot. */
+ ret = iio_device_set_clock(indio_dev, CLOCK_BOOTTIME);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * The only way to get samples in buffer is to set a
+ * software tigger (systrig, hrtimer).
+ */
+ ret = devm_iio_triggered_buffer_setup(
+ dev, indio_dev, NULL, trigger_capture,
+ NULL);
+ if (ret)
+ return ret;
}
}
@@ -159,6 +366,16 @@ int cros_ec_sensors_core_init(struct platform_device *pdev,
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_init);
+/**
+ * cros_ec_motion_send_host_cmd() - send motion sense host command
+ * @state: pointer to state information for device
+ * @opt_length: optional length to reduce the response size, useful on the data
+ * path. Otherwise, the maximal allowed response size is used
+ *
+ * When called, the sub-command is assumed to be set in param->cmd.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state,
u16 opt_length)
{
@@ -421,6 +638,14 @@ int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_read_lpc);
+/**
+ * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol
+ * @indio_dev: pointer to IIO device
+ * @scan_mask: bitmap of the sensor indices to scan
+ * @data: location to store data
+ *
+ * Return: 0 on success, -errno on failure.
+ */
int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev,
unsigned long scan_mask, s16 *data)
{
@@ -445,6 +670,18 @@ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_read_cmd);
+/**
+ * cros_ec_sensors_capture() - the trigger handler function
+ * @irq: the interrupt number.
+ * @p: a pointer to the poll function.
+ *
+ * On a trigger event occurring, if the pollfunc is attached then this
+ * handler is called as a threaded interrupt (and hence may sleep). It
+ * is responsible for grabbing data from the device and pushing it into
+ * the associated buffer.
+ *
+ * Return: IRQ_HANDLED
+ */
irqreturn_t cros_ec_sensors_capture(int irq, void *p)
{
struct iio_poll_func *pf = p;
@@ -480,26 +717,24 @@ done:
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_capture);
+/**
+ * cros_ec_sensors_core_read() - function to request a value from the sensor
+ * @st: pointer to state information for device
+ * @chan: channel specification structure table
+ * @val: will contain one element making up the returned value
+ * @val2: will contain another element making up the returned value
+ * @mask: specifies which values to be requested
+ *
+ * Return: the type of value returned by the device
+ */
int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
struct iio_chan_spec const *chan,
int *val, int *val2, long mask)
{
- int ret;
+ int ret, frequency;
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
- st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
- st->param.ec_rate.data =
- EC_MOTION_SENSE_NO_VALUE;
-
- ret = cros_ec_motion_send_host_cmd(st, 0);
- if (ret)
- break;
-
- *val = st->resp->ec_rate.ret;
- ret = IIO_VAL_INT;
- break;
- case IIO_CHAN_INFO_FREQUENCY:
st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
st->param.sensor_odr.data =
EC_MOTION_SENSE_NO_VALUE;
@@ -508,8 +743,10 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
if (ret)
break;
- *val = st->resp->sensor_odr.ret;
- ret = IIO_VAL_INT;
+ frequency = st->resp->sensor_odr.ret;
+ *val = frequency / 1000;
+ *val2 = (frequency % 1000) * 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
break;
default:
ret = -EINVAL;
@@ -520,6 +757,17 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st,
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read);
+/**
+ * cros_ec_sensors_core_read_avail() - get available values
+ * @indio_dev: pointer to state information for device
+ * @chan: channel specification structure table
+ * @vals: list of available values
+ * @type: type of data returned
+ * @length: number of data returned in the array
+ * @mask: specifies which values to be requested
+ *
+ * Return: an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST
+ */
int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
const int **vals,
@@ -533,7 +781,7 @@ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SAMP_FREQ:
*length = ARRAY_SIZE(state->frequencies);
*vals = (const int *)&state->frequencies;
- *type = IIO_VAL_INT;
+ *type = IIO_VAL_INT_PLUS_MICRO;
return IIO_AVAIL_LIST;
}
@@ -541,31 +789,33 @@ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read_avail);
+/**
+ * cros_ec_sensors_core_write() - function to write a value to the sensor
+ * @st: pointer to state information for device
+ * @chan: channel specification structure table
+ * @val: first part of value to write
+ * @val2: second part of value to write
+ * @mask: specifies which values to write
+ *
+ * Return: the type of value returned by the device
+ */
int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
{
- int ret;
+ int ret, frequency;
switch (mask) {
- case IIO_CHAN_INFO_FREQUENCY:
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ frequency = val * 1000 + val2 / 1000;
st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR;
- st->param.sensor_odr.data = val;
+ st->param.sensor_odr.data = frequency;
/* Always roundup, so caller gets at least what it asks for. */
st->param.sensor_odr.roundup = 1;
ret = cros_ec_motion_send_host_cmd(st, 0);
break;
- case IIO_CHAN_INFO_SAMP_FREQ:
- st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
- st->param.ec_rate.data = val;
-
- ret = cros_ec_motion_send_host_cmd(st, 0);
- if (ret)
- break;
- st->curr_sampl_freq = val;
- break;
default:
ret = -EINVAL;
break;
@@ -574,52 +824,5 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st,
}
EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write);
-static int __maybe_unused cros_ec_sensors_prepare(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
-
- if (st->curr_sampl_freq == 0)
- return 0;
-
- /*
- * If the sensors are sampled at high frequency, we will not be able to
- * sleep. Set sampling to a long period if necessary.
- */
- if (st->curr_sampl_freq < CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY) {
- mutex_lock(&st->cmd_lock);
- st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
- st->param.ec_rate.data = CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY;
- cros_ec_motion_send_host_cmd(st, 0);
- mutex_unlock(&st->cmd_lock);
- }
- return 0;
-}
-
-static void __maybe_unused cros_ec_sensors_complete(struct device *dev)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct cros_ec_sensors_core_state *st = iio_priv(indio_dev);
-
- if (st->curr_sampl_freq == 0)
- return;
-
- if (st->curr_sampl_freq < CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY) {
- mutex_lock(&st->cmd_lock);
- st->param.cmd = MOTIONSENSE_CMD_EC_RATE;
- st->param.ec_rate.data = st->curr_sampl_freq;
- cros_ec_motion_send_host_cmd(st, 0);
- mutex_unlock(&st->cmd_lock);
- }
-}
-
-const struct dev_pm_ops cros_ec_sensors_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .prepare = cros_ec_sensors_prepare,
- .complete = cros_ec_sensors_complete
-#endif
-};
-EXPORT_SYMBOL_GPL(cros_ec_sensors_pm_ops);
-
MODULE_DESCRIPTION("ChromeOS EC sensor hub core functions");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index eac63c1bb8da..2352c426bfb5 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -189,7 +189,12 @@ ssize_t iio_read_const_attr(struct device *dev,
}
EXPORT_SYMBOL(iio_read_const_attr);
-static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
+/**
+ * iio_device_set_clock() - Set current timestamping clock for the device
+ * @indio_dev: IIO device structure containing the device
+ * @clock_id: timestamping clock posix identifier to set.
+ */
+int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
{
int ret;
const struct iio_event_interface *ev_int = indio_dev->event_interface;
@@ -207,6 +212,7 @@ static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
return 0;
}
+EXPORT_SYMBOL(iio_device_set_clock);
/**
* iio_get_time_ns() - utility function to get a time stamp for events etc
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 74970f18a93b..b27719cefcf9 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -194,6 +194,16 @@ config GP2AP020A00F
To compile this driver as a module, choose M here: the
module will be called gp2ap020a00f.
+config IQS621_ALS
+ tristate "Azoteq IQS621/622 ambient light sensors"
+ depends on MFD_IQS62X || COMPILE_TEST
+ help
+ Say Y here if you want to build support for the Azoteq IQS621
+ and IQS622 ambient light sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called iqs621-als.
+
config SENSORS_ISL29018
tristate "Intersil 29018 light and proximity sensor"
depends on I2C
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 5c1ebaf578fb..d1c8aa30b9a8 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_GP2AP002) += gp2ap002.o
obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o
obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o
+obj-$(CONFIG_IQS621_ALS) += iqs621-als.o
obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o
obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o
obj-$(CONFIG_ISL29125) += isl29125.o
diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c
index 7a838e2956f4..2198b50909ed 100644
--- a/drivers/iio/light/cros_ec_light_prox.c
+++ b/drivers/iio/light/cros_ec_light_prox.c
@@ -177,10 +177,14 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
- ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
+ cros_ec_sensors_capture,
+ cros_ec_sensors_push_data);
if (ret)
return ret;
+ iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes);
+
indio_dev->info = &cros_ec_light_prox_info;
state = iio_priv(indio_dev);
state->core.type = state->core.resp->info.type;
@@ -189,8 +193,7 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
/* Common part */
channel->info_mask_shared_by_all =
- BIT(IIO_CHAN_INFO_SAMP_FREQ) |
- BIT(IIO_CHAN_INFO_FREQUENCY);
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->info_mask_shared_by_all_available =
BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
@@ -236,11 +239,6 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev)
state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
- cros_ec_sensors_capture, NULL);
- if (ret)
- return ret;
-
return devm_iio_device_register(dev, indio_dev);
}
@@ -258,7 +256,6 @@ MODULE_DEVICE_TABLE(platform, cros_ec_light_prox_ids);
static struct platform_driver cros_ec_light_prox_platform_driver = {
.driver = {
.name = "cros-ec-light-prox",
- .pm = &cros_ec_sensors_pm_ops,
},
.probe = cros_ec_light_prox_probe,
.id_table = cros_ec_light_prox_ids,
diff --git a/drivers/iio/light/iqs621-als.c b/drivers/iio/light/iqs621-als.c
new file mode 100644
index 000000000000..b2988a782bd0
--- /dev/null
+++ b/drivers/iio/light/iqs621-als.c
@@ -0,0 +1,617 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS621/622 Ambient Light Sensors
+ *
+ * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#include <linux/device.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/iqs62x.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define IQS621_ALS_FLAGS_LIGHT BIT(7)
+#define IQS621_ALS_FLAGS_RANGE GENMASK(3, 0)
+
+#define IQS621_ALS_UI_OUT 0x17
+
+#define IQS621_ALS_THRESH_DARK 0x80
+#define IQS621_ALS_THRESH_LIGHT 0x81
+
+#define IQS622_IR_RANGE 0x15
+#define IQS622_IR_FLAGS 0x16
+#define IQS622_IR_FLAGS_TOUCH BIT(1)
+#define IQS622_IR_FLAGS_PROX BIT(0)
+
+#define IQS622_IR_UI_OUT 0x17
+
+#define IQS622_IR_THRESH_PROX 0x91
+#define IQS622_IR_THRESH_TOUCH 0x92
+
+struct iqs621_als_private {
+ struct iqs62x_core *iqs62x;
+ struct notifier_block notifier;
+ struct mutex lock;
+ bool light_en;
+ bool range_en;
+ bool prox_en;
+ u8 als_flags;
+ u8 ir_flags_mask;
+ u8 ir_flags;
+ u8 thresh_light;
+ u8 thresh_dark;
+ u8 thresh_prox;
+};
+
+static int iqs621_als_init(struct iqs621_als_private *iqs621_als)
+{
+ struct iqs62x_core *iqs62x = iqs621_als->iqs62x;
+ unsigned int event_mask = 0;
+ int ret;
+
+ switch (iqs621_als->ir_flags_mask) {
+ case IQS622_IR_FLAGS_TOUCH:
+ ret = regmap_write(iqs62x->regmap, IQS622_IR_THRESH_TOUCH,
+ iqs621_als->thresh_prox);
+ break;
+
+ case IQS622_IR_FLAGS_PROX:
+ ret = regmap_write(iqs62x->regmap, IQS622_IR_THRESH_PROX,
+ iqs621_als->thresh_prox);
+ break;
+
+ default:
+ ret = regmap_write(iqs62x->regmap, IQS621_ALS_THRESH_LIGHT,
+ iqs621_als->thresh_light);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(iqs62x->regmap, IQS621_ALS_THRESH_DARK,
+ iqs621_als->thresh_dark);
+ }
+
+ if (ret)
+ return ret;
+
+ if (iqs621_als->light_en || iqs621_als->range_en)
+ event_mask |= iqs62x->dev_desc->als_mask;
+
+ if (iqs621_als->prox_en)
+ event_mask |= iqs62x->dev_desc->ir_mask;
+
+ return regmap_update_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
+ event_mask, 0);
+}
+
+static int iqs621_als_notifier(struct notifier_block *notifier,
+ unsigned long event_flags, void *context)
+{
+ struct iqs62x_event_data *event_data = context;
+ struct iqs621_als_private *iqs621_als;
+ struct iio_dev *indio_dev;
+ bool light_new, light_old;
+ bool prox_new, prox_old;
+ u8 range_new, range_old;
+ s64 timestamp;
+ int ret;
+
+ iqs621_als = container_of(notifier, struct iqs621_als_private,
+ notifier);
+ indio_dev = iio_priv_to_dev(iqs621_als);
+ timestamp = iio_get_time_ns(indio_dev);
+
+ mutex_lock(&iqs621_als->lock);
+
+ if (event_flags & BIT(IQS62X_EVENT_SYS_RESET)) {
+ ret = iqs621_als_init(iqs621_als);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "Failed to re-initialize device: %d\n", ret);
+ ret = NOTIFY_BAD;
+ } else {
+ ret = NOTIFY_OK;
+ }
+
+ goto err_mutex;
+ }
+
+ if (!iqs621_als->light_en && !iqs621_als->range_en &&
+ !iqs621_als->prox_en) {
+ ret = NOTIFY_DONE;
+ goto err_mutex;
+ }
+
+ /* IQS621 only */
+ light_new = event_data->als_flags & IQS621_ALS_FLAGS_LIGHT;
+ light_old = iqs621_als->als_flags & IQS621_ALS_FLAGS_LIGHT;
+
+ if (iqs621_als->light_en && light_new && !light_old)
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ timestamp);
+ else if (iqs621_als->light_en && !light_new && light_old)
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ timestamp);
+
+ /* IQS621 and IQS622 */
+ range_new = event_data->als_flags & IQS621_ALS_FLAGS_RANGE;
+ range_old = iqs621_als->als_flags & IQS621_ALS_FLAGS_RANGE;
+
+ if (iqs621_als->range_en && (range_new > range_old))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
+ IIO_EV_TYPE_CHANGE,
+ IIO_EV_DIR_RISING),
+ timestamp);
+ else if (iqs621_als->range_en && (range_new < range_old))
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
+ IIO_EV_TYPE_CHANGE,
+ IIO_EV_DIR_FALLING),
+ timestamp);
+
+ /* IQS622 only */
+ prox_new = event_data->ir_flags & iqs621_als->ir_flags_mask;
+ prox_old = iqs621_als->ir_flags & iqs621_als->ir_flags_mask;
+
+ if (iqs621_als->prox_en && prox_new && !prox_old)
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ timestamp);
+ else if (iqs621_als->prox_en && !prox_new && prox_old)
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ timestamp);
+
+ iqs621_als->als_flags = event_data->als_flags;
+ iqs621_als->ir_flags = event_data->ir_flags;
+ ret = NOTIFY_OK;
+
+err_mutex:
+ mutex_unlock(&iqs621_als->lock);
+
+ return ret;
+}
+
+static void iqs621_als_notifier_unregister(void *context)
+{
+ struct iqs621_als_private *iqs621_als = context;
+ struct iio_dev *indio_dev = iio_priv_to_dev(iqs621_als);
+ int ret;
+
+ ret = blocking_notifier_chain_unregister(&iqs621_als->iqs62x->nh,
+ &iqs621_als->notifier);
+ if (ret)
+ dev_err(indio_dev->dev.parent,
+ "Failed to unregister notifier: %d\n", ret);
+}
+
+static int iqs621_als_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct iqs621_als_private *iqs621_als = iio_priv(indio_dev);
+ struct iqs62x_core *iqs62x = iqs621_als->iqs62x;
+ int ret;
+ __le16 val_buf;
+
+ switch (chan->type) {
+ case IIO_INTENSITY:
+ ret = regmap_read(iqs62x->regmap, chan->address, val);
+ if (ret)
+ return ret;
+
+ *val &= IQS621_ALS_FLAGS_RANGE;
+ return IIO_VAL_INT;
+
+ case IIO_PROXIMITY:
+ case IIO_LIGHT:
+ ret = regmap_raw_read(iqs62x->regmap, chan->address, &val_buf,
+ sizeof(val_buf));
+ if (ret)
+ return ret;
+
+ *val = le16_to_cpu(val_buf);
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int iqs621_als_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct iqs621_als_private *iqs621_als = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&iqs621_als->lock);
+
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = iqs621_als->light_en;
+ break;
+
+ case IIO_INTENSITY:
+ ret = iqs621_als->range_en;
+ break;
+
+ case IIO_PROXIMITY:
+ ret = iqs621_als->prox_en;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&iqs621_als->lock);
+
+ return ret;
+}
+
+static int iqs621_als_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct iqs621_als_private *iqs621_als = iio_priv(indio_dev);
+ struct iqs62x_core *iqs62x = iqs621_als->iqs62x;
+ unsigned int val;
+ int ret;
+
+ mutex_lock(&iqs621_als->lock);
+
+ ret = regmap_read(iqs62x->regmap, iqs62x->dev_desc->als_flags, &val);
+ if (ret)
+ goto err_mutex;
+ iqs621_als->als_flags = val;
+
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ret = regmap_update_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
+ iqs62x->dev_desc->als_mask,
+ iqs621_als->range_en || state ? 0 :
+ 0xFF);
+ if (!ret)
+ iqs621_als->light_en = state;
+ break;
+
+ case IIO_INTENSITY:
+ ret = regmap_update_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
+ iqs62x->dev_desc->als_mask,
+ iqs621_als->light_en || state ? 0 :
+ 0xFF);
+ if (!ret)
+ iqs621_als->range_en = state;
+ break;
+
+ case IIO_PROXIMITY:
+ ret = regmap_read(iqs62x->regmap, IQS622_IR_FLAGS, &val);
+ if (ret)
+ goto err_mutex;
+ iqs621_als->ir_flags = val;
+
+ ret = regmap_update_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
+ iqs62x->dev_desc->ir_mask,
+ state ? 0 : 0xFF);
+ if (!ret)
+ iqs621_als->prox_en = state;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+err_mutex:
+ mutex_unlock(&iqs621_als->lock);
+
+ return ret;
+}
+
+static int iqs621_als_read_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int *val, int *val2)
+{
+ struct iqs621_als_private *iqs621_als = iio_priv(indio_dev);
+ int ret = IIO_VAL_INT;
+
+ mutex_lock(&iqs621_als->lock);
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ *val = iqs621_als->thresh_light * 16;
+ break;
+
+ case IIO_EV_DIR_FALLING:
+ *val = iqs621_als->thresh_dark * 4;
+ break;
+
+ case IIO_EV_DIR_EITHER:
+ if (iqs621_als->ir_flags_mask == IQS622_IR_FLAGS_TOUCH)
+ *val = iqs621_als->thresh_prox * 4;
+ else
+ *val = iqs621_als->thresh_prox;
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&iqs621_als->lock);
+
+ return ret;
+}
+
+static int iqs621_als_write_event_value(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ enum iio_event_info info,
+ int val, int val2)
+{
+ struct iqs621_als_private *iqs621_als = iio_priv(indio_dev);
+ struct iqs62x_core *iqs62x = iqs621_als->iqs62x;
+ unsigned int thresh_reg, thresh_val;
+ u8 ir_flags_mask, *thresh_cache;
+ int ret = -EINVAL;
+
+ mutex_lock(&iqs621_als->lock);
+
+ switch (dir) {
+ case IIO_EV_DIR_RISING:
+ thresh_reg = IQS621_ALS_THRESH_LIGHT;
+ thresh_val = val / 16;
+
+ thresh_cache = &iqs621_als->thresh_light;
+ ir_flags_mask = 0;
+ break;
+
+ case IIO_EV_DIR_FALLING:
+ thresh_reg = IQS621_ALS_THRESH_DARK;
+ thresh_val = val / 4;
+
+ thresh_cache = &iqs621_als->thresh_dark;
+ ir_flags_mask = 0;
+ break;
+
+ case IIO_EV_DIR_EITHER:
+ /*
+ * The IQS622 supports two detection thresholds, both measured
+ * in the same arbitrary units reported by read_raw: proximity
+ * (0 through 255 in steps of 1), and touch (0 through 1020 in
+ * steps of 4).
+ *
+ * Based on the single detection threshold chosen by the user,
+ * select the hardware threshold that gives the best trade-off
+ * between range and resolution.
+ *
+ * By default, the close-range (but coarse) touch threshold is
+ * chosen during probe.
+ */
+ switch (val) {
+ case 0 ... 255:
+ thresh_reg = IQS622_IR_THRESH_PROX;
+ thresh_val = val;
+
+ ir_flags_mask = IQS622_IR_FLAGS_PROX;
+ break;
+
+ case 256 ... 1020:
+ thresh_reg = IQS622_IR_THRESH_TOUCH;
+ thresh_val = val / 4;
+
+ ir_flags_mask = IQS622_IR_FLAGS_TOUCH;
+ break;
+
+ default:
+ goto err_mutex;
+ }
+
+ thresh_cache = &iqs621_als->thresh_prox;
+ break;
+
+ default:
+ goto err_mutex;
+ }
+
+ if (thresh_val > 0xFF)
+ goto err_mutex;
+
+ ret = regmap_write(iqs62x->regmap, thresh_reg, thresh_val);
+ if (ret)
+ goto err_mutex;
+
+ *thresh_cache = thresh_val;
+ iqs621_als->ir_flags_mask = ir_flags_mask;
+
+err_mutex:
+ mutex_unlock(&iqs621_als->lock);
+
+ return ret;
+}
+
+static const struct iio_info iqs621_als_info = {
+ .read_raw = &iqs621_als_read_raw,
+ .read_event_config = iqs621_als_read_event_config,
+ .write_event_config = iqs621_als_write_event_config,
+ .read_event_value = iqs621_als_read_event_value,
+ .write_event_value = iqs621_als_write_event_value,
+};
+
+static const struct iio_event_spec iqs621_als_range_events[] = {
+ {
+ .type = IIO_EV_TYPE_CHANGE,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_event_spec iqs621_als_light_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_RISING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_FALLING,
+ .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
+static const struct iio_chan_spec iqs621_als_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .address = IQS621_ALS_FLAGS,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .event_spec = iqs621_als_range_events,
+ .num_event_specs = ARRAY_SIZE(iqs621_als_range_events),
+ },
+ {
+ .type = IIO_LIGHT,
+ .address = IQS621_ALS_UI_OUT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .event_spec = iqs621_als_light_events,
+ .num_event_specs = ARRAY_SIZE(iqs621_als_light_events),
+ },
+};
+
+static const struct iio_event_spec iqs622_als_prox_events[] = {
+ {
+ .type = IIO_EV_TYPE_THRESH,
+ .dir = IIO_EV_DIR_EITHER,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE) |
+ BIT(IIO_EV_INFO_VALUE),
+ },
+};
+
+static const struct iio_chan_spec iqs622_als_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .address = IQS622_ALS_FLAGS,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .event_spec = iqs621_als_range_events,
+ .num_event_specs = ARRAY_SIZE(iqs621_als_range_events),
+ .modified = true,
+ },
+ {
+ .type = IIO_INTENSITY,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .address = IQS622_IR_RANGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .modified = true,
+ },
+ {
+ .type = IIO_PROXIMITY,
+ .address = IQS622_IR_UI_OUT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .event_spec = iqs622_als_prox_events,
+ .num_event_specs = ARRAY_SIZE(iqs622_als_prox_events),
+ },
+};
+
+static int iqs621_als_probe(struct platform_device *pdev)
+{
+ struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent);
+ struct iqs621_als_private *iqs621_als;
+ struct iio_dev *indio_dev;
+ unsigned int val;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*iqs621_als));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ iqs621_als = iio_priv(indio_dev);
+ iqs621_als->iqs62x = iqs62x;
+
+ if (iqs62x->dev_desc->prod_num == IQS622_PROD_NUM) {
+ ret = regmap_read(iqs62x->regmap, IQS622_IR_THRESH_TOUCH,
+ &val);
+ if (ret)
+ return ret;
+ iqs621_als->thresh_prox = val;
+ iqs621_als->ir_flags_mask = IQS622_IR_FLAGS_TOUCH;
+
+ indio_dev->channels = iqs622_als_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iqs622_als_channels);
+ } else {
+ ret = regmap_read(iqs62x->regmap, IQS621_ALS_THRESH_LIGHT,
+ &val);
+ if (ret)
+ return ret;
+ iqs621_als->thresh_light = val;
+
+ ret = regmap_read(iqs62x->regmap, IQS621_ALS_THRESH_DARK,
+ &val);
+ if (ret)
+ return ret;
+ iqs621_als->thresh_dark = val;
+
+ indio_dev->channels = iqs621_als_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iqs621_als_channels);
+ }
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->name = iqs62x->dev_desc->dev_name;
+ indio_dev->info = &iqs621_als_info;
+
+ mutex_init(&iqs621_als->lock);
+
+ iqs621_als->notifier.notifier_call = iqs621_als_notifier;
+ ret = blocking_notifier_chain_register(&iqs621_als->iqs62x->nh,
+ &iqs621_als->notifier);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register notifier: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev,
+ iqs621_als_notifier_unregister,
+ iqs621_als);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&pdev->dev, indio_dev);
+}
+
+static struct platform_driver iqs621_als_platform_driver = {
+ .driver = {
+ .name = "iqs621-als",
+ },
+ .probe = iqs621_als_probe,
+};
+module_platform_driver(iqs621_als_platform_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS621/622 Ambient Light Sensors");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iqs621-als");
diff --git a/drivers/iio/position/Kconfig b/drivers/iio/position/Kconfig
new file mode 100644
index 000000000000..eda67f008c5b
--- /dev/null
+++ b/drivers/iio/position/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Linear and angular position sensors
+#
+# When adding new entries keep the list in alphabetical order
+
+menu "Linear and angular position sensors"
+
+config IQS624_POS
+ tristate "Azoteq IQS624/625 angular position sensors"
+ depends on MFD_IQS62X || COMPILE_TEST
+ help
+ Say Y here if you want to build support for the Azoteq IQS624
+ and IQS625 angular position sensors.
+
+ To compile this driver as a module, choose M here: the module
+ will be called iqs624-pos.
+
+endmenu
diff --git a/drivers/iio/position/Makefile b/drivers/iio/position/Makefile
new file mode 100644
index 000000000000..3cbe7a734352
--- /dev/null
+++ b/drivers/iio/position/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for IIO linear and angular position sensors
+#
+
+# When adding new entries keep the list in alphabetical order
+
+obj-$(CONFIG_IQS624_POS) += iqs624-pos.o
diff --git a/drivers/iio/position/iqs624-pos.c b/drivers/iio/position/iqs624-pos.c
new file mode 100644
index 000000000000..77096c31c2ba
--- /dev/null
+++ b/drivers/iio/position/iqs624-pos.c
@@ -0,0 +1,284 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS624/625 Angular Position Sensors
+ *
+ * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#include <linux/device.h>
+#include <linux/iio/events.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/iqs62x.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define IQS624_POS_DEG_OUT 0x16
+
+#define IQS624_POS_SCALE1 (314159 / 180)
+#define IQS624_POS_SCALE2 100000
+
+struct iqs624_pos_private {
+ struct iqs62x_core *iqs62x;
+ struct notifier_block notifier;
+ struct mutex lock;
+ bool angle_en;
+ u16 angle;
+};
+
+static int iqs624_pos_angle_en(struct iqs62x_core *iqs62x, bool angle_en)
+{
+ unsigned int event_mask = IQS624_HALL_UI_WHL_EVENT;
+
+ /*
+ * The IQS625 reports angular position in the form of coarse intervals,
+ * so only interval change events are unmasked. Conversely, the IQS624
+ * reports angular position down to one degree of resolution, so wheel
+ * movement events are unmasked instead.
+ */
+ if (iqs62x->dev_desc->prod_num == IQS625_PROD_NUM)
+ event_mask = IQS624_HALL_UI_INT_EVENT;
+
+ return regmap_update_bits(iqs62x->regmap, IQS624_HALL_UI, event_mask,
+ angle_en ? 0 : 0xFF);
+}
+
+static int iqs624_pos_notifier(struct notifier_block *notifier,
+ unsigned long event_flags, void *context)
+{
+ struct iqs62x_event_data *event_data = context;
+ struct iqs624_pos_private *iqs624_pos;
+ struct iqs62x_core *iqs62x;
+ struct iio_dev *indio_dev;
+ u16 angle = event_data->ui_data;
+ s64 timestamp;
+ int ret;
+
+ iqs624_pos = container_of(notifier, struct iqs624_pos_private,
+ notifier);
+ indio_dev = iio_priv_to_dev(iqs624_pos);
+ timestamp = iio_get_time_ns(indio_dev);
+
+ iqs62x = iqs624_pos->iqs62x;
+ if (iqs62x->dev_desc->prod_num == IQS625_PROD_NUM)
+ angle = event_data->interval;
+
+ mutex_lock(&iqs624_pos->lock);
+
+ if (event_flags & BIT(IQS62X_EVENT_SYS_RESET)) {
+ ret = iqs624_pos_angle_en(iqs62x, iqs624_pos->angle_en);
+ if (ret) {
+ dev_err(indio_dev->dev.parent,
+ "Failed to re-initialize device: %d\n", ret);
+ ret = NOTIFY_BAD;
+ } else {
+ ret = NOTIFY_OK;
+ }
+ } else if (iqs624_pos->angle_en && (angle != iqs624_pos->angle)) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_ANGL, 0,
+ IIO_EV_TYPE_CHANGE,
+ IIO_EV_DIR_NONE),
+ timestamp);
+
+ iqs624_pos->angle = angle;
+ ret = NOTIFY_OK;
+ } else {
+ ret = NOTIFY_DONE;
+ }
+
+ mutex_unlock(&iqs624_pos->lock);
+
+ return ret;
+}
+
+static void iqs624_pos_notifier_unregister(void *context)
+{
+ struct iqs624_pos_private *iqs624_pos = context;
+ struct iio_dev *indio_dev = iio_priv_to_dev(iqs624_pos);
+ int ret;
+
+ ret = blocking_notifier_chain_unregister(&iqs624_pos->iqs62x->nh,
+ &iqs624_pos->notifier);
+ if (ret)
+ dev_err(indio_dev->dev.parent,
+ "Failed to unregister notifier: %d\n", ret);
+}
+
+static int iqs624_pos_angle_get(struct iqs62x_core *iqs62x, unsigned int *val)
+{
+ int ret;
+ __le16 val_buf;
+
+ if (iqs62x->dev_desc->prod_num == IQS625_PROD_NUM)
+ return regmap_read(iqs62x->regmap, iqs62x->dev_desc->interval,
+ val);
+
+ ret = regmap_raw_read(iqs62x->regmap, IQS624_POS_DEG_OUT, &val_buf,
+ sizeof(val_buf));
+ if (ret)
+ return ret;
+
+ *val = le16_to_cpu(val_buf);
+
+ return 0;
+}
+
+static int iqs624_pos_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct iqs624_pos_private *iqs624_pos = iio_priv(indio_dev);
+ struct iqs62x_core *iqs62x = iqs624_pos->iqs62x;
+ unsigned int scale = 1;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = iqs624_pos_angle_get(iqs62x, val);
+ if (ret)
+ return ret;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ if (iqs62x->dev_desc->prod_num == IQS625_PROD_NUM) {
+ ret = regmap_read(iqs62x->regmap, IQS624_INTERVAL_DIV,
+ &scale);
+ if (ret)
+ return ret;
+ }
+
+ *val = scale * IQS624_POS_SCALE1;
+ *val2 = IQS624_POS_SCALE2;
+ return IIO_VAL_FRACTIONAL;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int iqs624_pos_read_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir)
+{
+ struct iqs624_pos_private *iqs624_pos = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&iqs624_pos->lock);
+ ret = iqs624_pos->angle_en;
+ mutex_unlock(&iqs624_pos->lock);
+
+ return ret;
+}
+
+static int iqs624_pos_write_event_config(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ int state)
+{
+ struct iqs624_pos_private *iqs624_pos = iio_priv(indio_dev);
+ struct iqs62x_core *iqs62x = iqs624_pos->iqs62x;
+ unsigned int val;
+ int ret;
+
+ mutex_lock(&iqs624_pos->lock);
+
+ ret = iqs624_pos_angle_get(iqs62x, &val);
+ if (ret)
+ goto err_mutex;
+
+ ret = iqs624_pos_angle_en(iqs62x, state);
+ if (ret)
+ goto err_mutex;
+
+ iqs624_pos->angle = val;
+ iqs624_pos->angle_en = state;
+
+err_mutex:
+ mutex_unlock(&iqs624_pos->lock);
+
+ return ret;
+}
+
+static const struct iio_info iqs624_pos_info = {
+ .read_raw = &iqs624_pos_read_raw,
+ .read_event_config = iqs624_pos_read_event_config,
+ .write_event_config = iqs624_pos_write_event_config,
+};
+
+static const struct iio_event_spec iqs624_pos_events[] = {
+ {
+ .type = IIO_EV_TYPE_CHANGE,
+ .dir = IIO_EV_DIR_NONE,
+ .mask_separate = BIT(IIO_EV_INFO_ENABLE),
+ },
+};
+
+static const struct iio_chan_spec iqs624_pos_channels[] = {
+ {
+ .type = IIO_ANGL,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .event_spec = iqs624_pos_events,
+ .num_event_specs = ARRAY_SIZE(iqs624_pos_events),
+ },
+};
+
+static int iqs624_pos_probe(struct platform_device *pdev)
+{
+ struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent);
+ struct iqs624_pos_private *iqs624_pos;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*iqs624_pos));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ iqs624_pos = iio_priv(indio_dev);
+ iqs624_pos->iqs62x = iqs62x;
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->channels = iqs624_pos_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iqs624_pos_channels);
+ indio_dev->name = iqs62x->dev_desc->dev_name;
+ indio_dev->info = &iqs624_pos_info;
+
+ mutex_init(&iqs624_pos->lock);
+
+ iqs624_pos->notifier.notifier_call = iqs624_pos_notifier;
+ ret = blocking_notifier_chain_register(&iqs624_pos->iqs62x->nh,
+ &iqs624_pos->notifier);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register notifier: %d\n", ret);
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(&pdev->dev,
+ iqs624_pos_notifier_unregister,
+ iqs624_pos);
+ if (ret)
+ return ret;
+
+ return devm_iio_device_register(&pdev->dev, indio_dev);
+}
+
+static struct platform_driver iqs624_pos_platform_driver = {
+ .driver = {
+ .name = "iqs624-pos",
+ },
+ .probe = iqs624_pos_probe,
+};
+module_platform_driver(iqs624_pos_platform_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS624/625 Angular Position Sensors");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iqs624-pos");
diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c
index b521bebd551c..c079b8960082 100644
--- a/drivers/iio/pressure/cros_ec_baro.c
+++ b/drivers/iio/pressure/cros_ec_baro.c
@@ -134,10 +134,14 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
if (!indio_dev)
return -ENOMEM;
- ret = cros_ec_sensors_core_init(pdev, indio_dev, true);
+ ret = cros_ec_sensors_core_init(pdev, indio_dev, true,
+ cros_ec_sensors_capture,
+ cros_ec_sensors_push_data);
if (ret)
return ret;
+ iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes);
+
indio_dev->info = &cros_ec_baro_info;
state = iio_priv(indio_dev);
state->core.type = state->core.resp->info.type;
@@ -147,8 +151,7 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
channel->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
channel->info_mask_shared_by_all =
BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_SAMP_FREQ) |
- BIT(IIO_CHAN_INFO_FREQUENCY);
+ BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->info_mask_shared_by_all_available =
BIT(IIO_CHAN_INFO_SAMP_FREQ);
channel->scan_type.realbits = CROS_EC_SENSOR_BITS;
@@ -182,11 +185,6 @@ static int cros_ec_baro_probe(struct platform_device *pdev)
state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd;
- ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
- cros_ec_sensors_capture, NULL);
- if (ret)
- return ret;
-
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
index e1ccb4003015..f1f2a1499c9e 100644
--- a/drivers/iio/temperature/Kconfig
+++ b/drivers/iio/temperature/Kconfig
@@ -4,6 +4,16 @@
#
menu "Temperature sensors"
+config IQS620AT_TEMP
+ tristate "Azoteq IQS620AT temperature sensor"
+ depends on MFD_IQS62X || COMPILE_TEST
+ help
+ Say Y here if you want to build support for the Azoteq IQS620AT
+ temperature sensor.
+
+ To compile this driver as a module, choose M here: the module
+ will be called iqs620at-temp.
+
config LTC2983
tristate "Analog Devices Multi-Sensor Digital Temperature Measurement System"
depends on SPI
diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile
index d6b850b0cf63..90c113115422 100644
--- a/drivers/iio/temperature/Makefile
+++ b/drivers/iio/temperature/Makefile
@@ -3,6 +3,7 @@
# Makefile for industrial I/O temperature drivers
#
+obj-$(CONFIG_IQS620AT_TEMP) += iqs620at-temp.o
obj-$(CONFIG_LTC2983) += ltc2983.o
obj-$(CONFIG_HID_SENSOR_TEMP) += hid-sensor-temperature.o
obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o
diff --git a/drivers/iio/temperature/iqs620at-temp.c b/drivers/iio/temperature/iqs620at-temp.c
new file mode 100644
index 000000000000..3fd52b3eb030
--- /dev/null
+++ b/drivers/iio/temperature/iqs620at-temp.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS620AT Temperature Sensor
+ *
+ * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/mfd/iqs62x.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define IQS620_TEMP_UI_OUT 0x1A
+
+#define IQS620_TEMP_SCALE 1000
+#define IQS620_TEMP_OFFSET (-100)
+
+static int iqs620_temp_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct iqs62x_core *iqs62x = iio_device_get_drvdata(indio_dev);
+ int ret;
+ __le16 val_buf;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = regmap_raw_read(iqs62x->regmap, IQS620_TEMP_UI_OUT,
+ &val_buf, sizeof(val_buf));
+ if (ret)
+ return ret;
+
+ *val = le16_to_cpu(val_buf);
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = IQS620_TEMP_SCALE;
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_OFFSET:
+ *val = IQS620_TEMP_OFFSET;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info iqs620_temp_info = {
+ .read_raw = &iqs620_temp_read_raw,
+};
+
+static const struct iio_chan_spec iqs620_temp_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET),
+ },
+};
+
+static int iqs620_temp_probe(struct platform_device *pdev)
+{
+ struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent);
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, 0);
+ if (!indio_dev)
+ return -ENOMEM;
+
+ iio_device_set_drvdata(indio_dev, iqs62x);
+
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->channels = iqs620_temp_channels;
+ indio_dev->num_channels = ARRAY_SIZE(iqs620_temp_channels);
+ indio_dev->name = iqs62x->dev_desc->dev_name;
+ indio_dev->info = &iqs620_temp_info;
+
+ return devm_iio_device_register(&pdev->dev, indio_dev);
+}
+
+static struct platform_driver iqs620_temp_platform_driver = {
+ .driver = {
+ .name = "iqs620at-temp",
+ },
+ .probe = iqs620_temp_probe,
+};
+module_platform_driver(iqs620_temp_platform_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS620AT Temperature Sensor");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iqs620at-temp");
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 4706ff09f0e8..28de965a08d5 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -663,6 +663,16 @@ config KEYBOARD_IPAQ_MICRO
To compile this driver as a module, choose M here: the
module will be called ipaq-micro-keys.
+config KEYBOARD_IQS62X
+ tristate "Azoteq IQS620A/621/622/624/625 keys and switches"
+ depends on MFD_IQS62X
+ help
+ Say Y here to enable key and switch support for the Azoteq IQS620A,
+ IQS621, IQS622, IQS624 and IQS625 multi-function sensors.
+
+ To compile this driver as a module, choose M here: the module will
+ be called iqs62x-keys.
+
config KEYBOARD_OMAP
tristate "TI OMAP keypad support"
depends on ARCH_OMAP1
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index f5b17524adf2..1d689fdd5c00 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_KEYBOARD_TCA8418) += tca8418_keypad.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
obj-$(CONFIG_KEYBOARD_IPAQ_MICRO) += ipaq-micro-keys.o
+obj-$(CONFIG_KEYBOARD_IQS62X) += iqs62x-keys.o
obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o
obj-$(CONFIG_KEYBOARD_IMX_SC_KEY) += imx_sc_key.o
obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o
diff --git a/drivers/input/keyboard/iqs62x-keys.c b/drivers/input/keyboard/iqs62x-keys.c
new file mode 100644
index 000000000000..93446b21f98f
--- /dev/null
+++ b/drivers/input/keyboard/iqs62x-keys.c
@@ -0,0 +1,335 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS620A/621/622/624/625 Keys and Switches
+ *
+ * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com>
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/kernel.h>
+#include <linux/mfd/iqs62x.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+enum {
+ IQS62X_SW_HALL_N,
+ IQS62X_SW_HALL_S,
+};
+
+static const char * const iqs62x_switch_names[] = {
+ [IQS62X_SW_HALL_N] = "hall-switch-north",
+ [IQS62X_SW_HALL_S] = "hall-switch-south",
+};
+
+struct iqs62x_switch_desc {
+ enum iqs62x_event_flag flag;
+ unsigned int code;
+ bool enabled;
+};
+
+struct iqs62x_keys_private {
+ struct iqs62x_core *iqs62x;
+ struct input_dev *input;
+ struct notifier_block notifier;
+ struct iqs62x_switch_desc switches[ARRAY_SIZE(iqs62x_switch_names)];
+ unsigned int keycode[IQS62X_NUM_KEYS];
+ unsigned int keycodemax;
+ u8 interval;
+};
+
+static int iqs62x_keys_parse_prop(struct platform_device *pdev,
+ struct iqs62x_keys_private *iqs62x_keys)
+{
+ struct fwnode_handle *child;
+ unsigned int val;
+ int ret, i;
+
+ ret = device_property_count_u32(&pdev->dev, "linux,keycodes");
+ if (ret > IQS62X_NUM_KEYS) {
+ dev_err(&pdev->dev, "Too many keycodes present\n");
+ return -EINVAL;
+ } else if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to count keycodes: %d\n", ret);
+ return ret;
+ }
+ iqs62x_keys->keycodemax = ret;
+
+ ret = device_property_read_u32_array(&pdev->dev, "linux,keycodes",
+ iqs62x_keys->keycode,
+ iqs62x_keys->keycodemax);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read keycodes: %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++) {
+ child = device_get_named_child_node(&pdev->dev,
+ iqs62x_switch_names[i]);
+ if (!child)
+ continue;
+
+ ret = fwnode_property_read_u32(child, "linux,code", &val);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read switch code: %d\n",
+ ret);
+ return ret;
+ }
+ iqs62x_keys->switches[i].code = val;
+ iqs62x_keys->switches[i].enabled = true;
+
+ if (fwnode_property_present(child, "azoteq,use-prox"))
+ iqs62x_keys->switches[i].flag = (i == IQS62X_SW_HALL_N ?
+ IQS62X_EVENT_HALL_N_P :
+ IQS62X_EVENT_HALL_S_P);
+ else
+ iqs62x_keys->switches[i].flag = (i == IQS62X_SW_HALL_N ?
+ IQS62X_EVENT_HALL_N_T :
+ IQS62X_EVENT_HALL_S_T);
+ }
+
+ return 0;
+}
+
+static int iqs62x_keys_init(struct iqs62x_keys_private *iqs62x_keys)
+{
+ struct iqs62x_core *iqs62x = iqs62x_keys->iqs62x;
+ enum iqs62x_event_flag flag;
+ unsigned int event_reg, val;
+ unsigned int event_mask = 0;
+ int ret, i;
+
+ switch (iqs62x->dev_desc->prod_num) {
+ case IQS620_PROD_NUM:
+ case IQS621_PROD_NUM:
+ case IQS622_PROD_NUM:
+ event_reg = IQS620_GLBL_EVENT_MASK;
+
+ /*
+ * Discreet button, hysteresis and SAR UI flags represent keys
+ * and are unmasked if mapped to a valid keycode.
+ */
+ for (i = 0; i < iqs62x_keys->keycodemax; i++) {
+ if (iqs62x_keys->keycode[i] == KEY_RESERVED)
+ continue;
+
+ if (iqs62x_events[i].reg == IQS62X_EVENT_PROX)
+ event_mask |= iqs62x->dev_desc->prox_mask;
+ else if (iqs62x_events[i].reg == IQS62X_EVENT_HYST)
+ event_mask |= (iqs62x->dev_desc->hyst_mask |
+ iqs62x->dev_desc->sar_mask);
+ }
+
+ ret = regmap_read(iqs62x->regmap, iqs62x->dev_desc->hall_flags,
+ &val);
+ if (ret)
+ return ret;
+
+ /*
+ * Hall UI flags represent switches and are unmasked if their
+ * corresponding child nodes are present.
+ */
+ for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++) {
+ if (!(iqs62x_keys->switches[i].enabled))
+ continue;
+
+ flag = iqs62x_keys->switches[i].flag;
+
+ if (iqs62x_events[flag].reg != IQS62X_EVENT_HALL)
+ continue;
+
+ event_mask |= iqs62x->dev_desc->hall_mask;
+
+ input_report_switch(iqs62x_keys->input,
+ iqs62x_keys->switches[i].code,
+ (val & iqs62x_events[flag].mask) ==
+ iqs62x_events[flag].val);
+ }
+
+ input_sync(iqs62x_keys->input);
+ break;
+
+ case IQS624_PROD_NUM:
+ event_reg = IQS624_HALL_UI;
+
+ /*
+ * Interval change events represent keys and are unmasked if
+ * either wheel movement flag is mapped to a valid keycode.
+ */
+ if (iqs62x_keys->keycode[IQS62X_EVENT_WHEEL_UP] != KEY_RESERVED)
+ event_mask |= IQS624_HALL_UI_INT_EVENT;
+
+ if (iqs62x_keys->keycode[IQS62X_EVENT_WHEEL_DN] != KEY_RESERVED)
+ event_mask |= IQS624_HALL_UI_INT_EVENT;
+
+ ret = regmap_read(iqs62x->regmap, iqs62x->dev_desc->interval,
+ &val);
+ if (ret)
+ return ret;
+
+ iqs62x_keys->interval = val;
+ break;
+
+ default:
+ return 0;
+ }
+
+ return regmap_update_bits(iqs62x->regmap, event_reg, event_mask, 0);
+}
+
+static int iqs62x_keys_notifier(struct notifier_block *notifier,
+ unsigned long event_flags, void *context)
+{
+ struct iqs62x_event_data *event_data = context;
+ struct iqs62x_keys_private *iqs62x_keys;
+ int ret, i;
+
+ iqs62x_keys = container_of(notifier, struct iqs62x_keys_private,
+ notifier);
+
+ if (event_flags & BIT(IQS62X_EVENT_SYS_RESET)) {
+ ret = iqs62x_keys_init(iqs62x_keys);
+ if (ret) {
+ dev_err(iqs62x_keys->input->dev.parent,
+ "Failed to re-initialize device: %d\n", ret);
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_OK;
+ }
+
+ for (i = 0; i < iqs62x_keys->keycodemax; i++) {
+ if (iqs62x_events[i].reg == IQS62X_EVENT_WHEEL &&
+ event_data->interval == iqs62x_keys->interval)
+ continue;
+
+ input_report_key(iqs62x_keys->input, iqs62x_keys->keycode[i],
+ event_flags & BIT(i));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++)
+ if (iqs62x_keys->switches[i].enabled)
+ input_report_switch(iqs62x_keys->input,
+ iqs62x_keys->switches[i].code,
+ event_flags &
+ BIT(iqs62x_keys->switches[i].flag));
+
+ input_sync(iqs62x_keys->input);
+
+ if (event_data->interval == iqs62x_keys->interval)
+ return NOTIFY_OK;
+
+ /*
+ * Each frame contains at most one wheel event (up or down), in which
+ * case a complementary release cycle is emulated.
+ */
+ if (event_flags & BIT(IQS62X_EVENT_WHEEL_UP)) {
+ input_report_key(iqs62x_keys->input,
+ iqs62x_keys->keycode[IQS62X_EVENT_WHEEL_UP],
+ 0);
+ input_sync(iqs62x_keys->input);
+ } else if (event_flags & BIT(IQS62X_EVENT_WHEEL_DN)) {
+ input_report_key(iqs62x_keys->input,
+ iqs62x_keys->keycode[IQS62X_EVENT_WHEEL_DN],
+ 0);
+ input_sync(iqs62x_keys->input);
+ }
+
+ iqs62x_keys->interval = event_data->interval;
+
+ return NOTIFY_OK;
+}
+
+static int iqs62x_keys_probe(struct platform_device *pdev)
+{
+ struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent);
+ struct iqs62x_keys_private *iqs62x_keys;
+ struct input_dev *input;
+ int ret, i;
+
+ iqs62x_keys = devm_kzalloc(&pdev->dev, sizeof(*iqs62x_keys),
+ GFP_KERNEL);
+ if (!iqs62x_keys)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, iqs62x_keys);
+
+ ret = iqs62x_keys_parse_prop(pdev, iqs62x_keys);
+ if (ret)
+ return ret;
+
+ input = devm_input_allocate_device(&pdev->dev);
+ if (!input)
+ return -ENOMEM;
+
+ input->keycodemax = iqs62x_keys->keycodemax;
+ input->keycode = iqs62x_keys->keycode;
+ input->keycodesize = sizeof(*iqs62x_keys->keycode);
+
+ input->name = iqs62x->dev_desc->dev_name;
+ input->id.bustype = BUS_I2C;
+
+ for (i = 0; i < iqs62x_keys->keycodemax; i++)
+ if (iqs62x_keys->keycode[i] != KEY_RESERVED)
+ input_set_capability(input, EV_KEY,
+ iqs62x_keys->keycode[i]);
+
+ for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++)
+ if (iqs62x_keys->switches[i].enabled)
+ input_set_capability(input, EV_SW,
+ iqs62x_keys->switches[i].code);
+
+ iqs62x_keys->iqs62x = iqs62x;
+ iqs62x_keys->input = input;
+
+ ret = iqs62x_keys_init(iqs62x_keys);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize device: %d\n", ret);
+ return ret;
+ }
+
+ ret = input_register_device(iqs62x_keys->input);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register device: %d\n", ret);
+ return ret;
+ }
+
+ iqs62x_keys->notifier.notifier_call = iqs62x_keys_notifier;
+ ret = blocking_notifier_chain_register(&iqs62x_keys->iqs62x->nh,
+ &iqs62x_keys->notifier);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to register notifier: %d\n", ret);
+
+ return ret;
+}
+
+static int iqs62x_keys_remove(struct platform_device *pdev)
+{
+ struct iqs62x_keys_private *iqs62x_keys = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = blocking_notifier_chain_unregister(&iqs62x_keys->iqs62x->nh,
+ &iqs62x_keys->notifier);
+ if (ret)
+ dev_err(&pdev->dev, "Failed to unregister notifier: %d\n", ret);
+
+ return ret;
+}
+
+static struct platform_driver iqs62x_keys_platform_driver = {
+ .driver = {
+ .name = "iqs62x-keys",
+ },
+ .probe = iqs62x_keys_probe,
+ .remove = iqs62x_keys_remove,
+};
+module_platform_driver(iqs62x_keys_platform_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS620A/621/622/624/625 Keys and Switches");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iqs62x-keys");
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index dc974c288e88..08e919dbeb5d 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -530,6 +530,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
},
},
+ {
+ /*
+ * Acer Aspire 5738z
+ * Touchpad stops working in mux mode when dis- + re-enabled
+ * with the touchpad enable/disable toggle hotkey
+ */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"),
+ },
+ },
{ }
};
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 491179967b29..14c577c16b16 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1309,6 +1309,7 @@ static int elants_i2c_probe(struct i2c_client *client,
input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0);
input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
+ input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1);
error = input_register_device(ts->input);
if (error) {
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index 0403102e807e..02c75ea385e0 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -29,33 +29,6 @@
#include <linux/of.h>
#include <asm/unaligned.h>
-struct goodix_ts_data;
-
-struct goodix_chip_data {
- u16 config_addr;
- int config_len;
- int (*check_config)(struct goodix_ts_data *, const struct firmware *);
-};
-
-struct goodix_ts_data {
- struct i2c_client *client;
- struct input_dev *input_dev;
- const struct goodix_chip_data *chip;
- struct touchscreen_properties prop;
- unsigned int max_touch_num;
- unsigned int int_trigger_type;
- struct regulator *avdd28;
- struct regulator *vddio;
- struct gpio_desc *gpiod_int;
- struct gpio_desc *gpiod_rst;
- u16 id;
- u16 version;
- const char *cfg_name;
- struct completion firmware_loading_complete;
- unsigned long irq_flags;
- unsigned int contact_size;
-};
-
#define GOODIX_GPIO_INT_NAME "irq"
#define GOODIX_GPIO_RST_NAME "reset"
@@ -65,10 +38,13 @@ struct goodix_ts_data {
#define GOODIX_CONTACT_SIZE 8
#define GOODIX_MAX_CONTACT_SIZE 9
#define GOODIX_MAX_CONTACTS 10
+#define GOODIX_MAX_KEYS 7
-#define GOODIX_CONFIG_MAX_LENGTH 240
+#define GOODIX_CONFIG_MIN_LENGTH 186
#define GOODIX_CONFIG_911_LENGTH 186
#define GOODIX_CONFIG_967_LENGTH 228
+#define GOODIX_CONFIG_GT9X_LENGTH 240
+#define GOODIX_CONFIG_MAX_LENGTH 240
/* Register defines */
#define GOODIX_REG_COMMAND 0x8040
@@ -80,39 +56,118 @@ struct goodix_ts_data {
#define GOODIX_REG_ID 0x8140
#define GOODIX_BUFFER_STATUS_READY BIT(7)
+#define GOODIX_HAVE_KEY BIT(4)
#define GOODIX_BUFFER_STATUS_TIMEOUT 20
#define RESOLUTION_LOC 1
#define MAX_CONTACTS_LOC 5
#define TRIGGER_LOC 6
+/* Our special handling for GPIO accesses through ACPI is x86 specific */
+#if defined CONFIG_X86 && defined CONFIG_ACPI
+#define ACPI_GPIO_SUPPORT
+#endif
+
+struct goodix_ts_data;
+
+enum goodix_irq_pin_access_method {
+ IRQ_PIN_ACCESS_NONE,
+ IRQ_PIN_ACCESS_GPIO,
+ IRQ_PIN_ACCESS_ACPI_GPIO,
+ IRQ_PIN_ACCESS_ACPI_METHOD,
+};
+
+struct goodix_chip_data {
+ u16 config_addr;
+ int config_len;
+ int (*check_config)(struct goodix_ts_data *ts, const u8 *cfg, int len);
+ void (*calc_config_checksum)(struct goodix_ts_data *ts);
+};
+
+struct goodix_chip_id {
+ const char *id;
+ const struct goodix_chip_data *data;
+};
+
+#define GOODIX_ID_MAX_LEN 4
+
+struct goodix_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+ const struct goodix_chip_data *chip;
+ struct touchscreen_properties prop;
+ unsigned int max_touch_num;
+ unsigned int int_trigger_type;
+ struct regulator *avdd28;
+ struct regulator *vddio;
+ struct gpio_desc *gpiod_int;
+ struct gpio_desc *gpiod_rst;
+ int gpio_count;
+ int gpio_int_idx;
+ char id[GOODIX_ID_MAX_LEN + 1];
+ u16 version;
+ const char *cfg_name;
+ bool reset_controller_at_probe;
+ bool load_cfg_from_disk;
+ struct completion firmware_loading_complete;
+ unsigned long irq_flags;
+ enum goodix_irq_pin_access_method irq_pin_access_method;
+ unsigned int contact_size;
+ u8 config[GOODIX_CONFIG_MAX_LENGTH];
+ unsigned short keymap[GOODIX_MAX_KEYS];
+};
+
static int goodix_check_cfg_8(struct goodix_ts_data *ts,
- const struct firmware *cfg);
+ const u8 *cfg, int len);
static int goodix_check_cfg_16(struct goodix_ts_data *ts,
- const struct firmware *cfg);
+ const u8 *cfg, int len);
+static void goodix_calc_cfg_checksum_8(struct goodix_ts_data *ts);
+static void goodix_calc_cfg_checksum_16(struct goodix_ts_data *ts);
static const struct goodix_chip_data gt1x_chip_data = {
.config_addr = GOODIX_GT1X_REG_CONFIG_DATA,
- .config_len = GOODIX_CONFIG_MAX_LENGTH,
+ .config_len = GOODIX_CONFIG_GT9X_LENGTH,
.check_config = goodix_check_cfg_16,
+ .calc_config_checksum = goodix_calc_cfg_checksum_16,
};
static const struct goodix_chip_data gt911_chip_data = {
.config_addr = GOODIX_GT9X_REG_CONFIG_DATA,
.config_len = GOODIX_CONFIG_911_LENGTH,
.check_config = goodix_check_cfg_8,
+ .calc_config_checksum = goodix_calc_cfg_checksum_8,
};
static const struct goodix_chip_data gt967_chip_data = {
.config_addr = GOODIX_GT9X_REG_CONFIG_DATA,
.config_len = GOODIX_CONFIG_967_LENGTH,
.check_config = goodix_check_cfg_8,
+ .calc_config_checksum = goodix_calc_cfg_checksum_8,
};
static const struct goodix_chip_data gt9x_chip_data = {
.config_addr = GOODIX_GT9X_REG_CONFIG_DATA,
- .config_len = GOODIX_CONFIG_MAX_LENGTH,
+ .config_len = GOODIX_CONFIG_GT9X_LENGTH,
.check_config = goodix_check_cfg_8,
+ .calc_config_checksum = goodix_calc_cfg_checksum_8,
+};
+
+static const struct goodix_chip_id goodix_chip_ids[] = {
+ { .id = "1151", .data = &gt1x_chip_data },
+ { .id = "5663", .data = &gt1x_chip_data },
+ { .id = "5688", .data = &gt1x_chip_data },
+ { .id = "917S", .data = &gt1x_chip_data },
+
+ { .id = "911", .data = &gt911_chip_data },
+ { .id = "9271", .data = &gt911_chip_data },
+ { .id = "9110", .data = &gt911_chip_data },
+ { .id = "927", .data = &gt911_chip_data },
+ { .id = "928", .data = &gt911_chip_data },
+
+ { .id = "912", .data = &gt967_chip_data },
+ { .id = "9147", .data = &gt967_chip_data },
+ { .id = "967", .data = &gt967_chip_data },
+ { }
};
static const unsigned long goodix_irq_flags[] = {
@@ -168,6 +223,22 @@ static const struct dmi_system_id nine_bytes_report[] = {
{}
};
+/*
+ * Those tablets have their x coordinate inverted
+ */
+static const struct dmi_system_id inverted_x_screen[] = {
+#if defined(CONFIG_DMI) && defined(CONFIG_X86)
+ {
+ .ident = "Cube I15-TC",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Cube"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "I15-TC")
+ },
+ },
+#endif
+ {}
+};
+
/**
* goodix_i2c_read - read data from a register of the i2c slave device.
*
@@ -235,28 +306,16 @@ static int goodix_i2c_write_u8(struct i2c_client *client, u16 reg, u8 value)
return goodix_i2c_write(client, reg, &value, sizeof(value));
}
-static const struct goodix_chip_data *goodix_get_chip_data(u16 id)
+static const struct goodix_chip_data *goodix_get_chip_data(const char *id)
{
- switch (id) {
- case 1151:
- case 5663:
- case 5688:
- return &gt1x_chip_data;
-
- case 911:
- case 9271:
- case 9110:
- case 927:
- case 928:
- return &gt911_chip_data;
-
- case 912:
- case 967:
- return &gt967_chip_data;
+ unsigned int i;
- default:
- return &gt9x_chip_data;
+ for (i = 0; goodix_chip_ids[i].id; i++) {
+ if (!strcmp(goodix_chip_ids[i].id, id))
+ return goodix_chip_ids[i].data;
}
+
+ return &gt9x_chip_data;
}
static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
@@ -264,6 +323,13 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
unsigned long max_timeout;
int touch_num;
int error;
+ u16 addr = GOODIX_READ_COOR_ADDR;
+ /*
+ * We are going to read 1-byte header,
+ * ts->contact_size * max(1, touch_num) bytes of coordinates
+ * and 1-byte footer which contains the touch-key code.
+ */
+ const int header_contact_keycode_size = 1 + ts->contact_size + 1;
/*
* The 'buffer status' bit, which indicates that the data is valid, is
@@ -272,8 +338,8 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
*/
max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT);
do {
- error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR,
- data, ts->contact_size + 1);
+ error = goodix_i2c_read(ts->client, addr, data,
+ header_contact_keycode_size);
if (error) {
dev_err(&ts->client->dev, "I2C transfer error: %d\n",
error);
@@ -286,11 +352,10 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
return -EPROTO;
if (touch_num > 1) {
- data += 1 + ts->contact_size;
+ addr += header_contact_keycode_size;
+ data += header_contact_keycode_size;
error = goodix_i2c_read(ts->client,
- GOODIX_READ_COOR_ADDR +
- 1 + ts->contact_size,
- data,
+ addr, data,
ts->contact_size *
(touch_num - 1));
if (error)
@@ -307,7 +372,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data)
* The Goodix panel will send spurious interrupts after a
* 'finger up' event, which will always cause a timeout.
*/
- return 0;
+ return -ENOMSG;
}
static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data)
@@ -340,6 +405,25 @@ static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data)
input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w);
}
+static void goodix_ts_report_key(struct goodix_ts_data *ts, u8 *data)
+{
+ int touch_num;
+ u8 key_value;
+ int i;
+
+ if (data[0] & GOODIX_HAVE_KEY) {
+ touch_num = data[0] & 0x0f;
+ key_value = data[1 + ts->contact_size * touch_num];
+ for (i = 0; i < GOODIX_MAX_KEYS; i++)
+ if (key_value & BIT(i))
+ input_report_key(ts->input_dev,
+ ts->keymap[i], 1);
+ } else {
+ for (i = 0; i < GOODIX_MAX_KEYS; i++)
+ input_report_key(ts->input_dev, ts->keymap[i], 0);
+ }
+}
+
/**
* goodix_process_events - Process incoming events
*
@@ -350,7 +434,7 @@ static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data)
*/
static void goodix_process_events(struct goodix_ts_data *ts)
{
- u8 point_data[1 + GOODIX_MAX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
+ u8 point_data[2 + GOODIX_MAX_CONTACT_SIZE * GOODIX_MAX_CONTACTS];
int touch_num;
int i;
@@ -358,11 +442,7 @@ static void goodix_process_events(struct goodix_ts_data *ts)
if (touch_num < 0)
return;
- /*
- * Bit 4 of the first byte reports the status of the capacitive
- * Windows/Home button.
- */
- input_report_key(ts->input_dev, KEY_LEFTMETA, point_data[0] & BIT(4));
+ goodix_ts_report_key(ts, point_data);
for (i = 0; i < touch_num; i++)
if (ts->contact_size == 9)
@@ -406,22 +486,21 @@ static int goodix_request_irq(struct goodix_ts_data *ts)
ts->irq_flags, ts->client->name, ts);
}
-static int goodix_check_cfg_8(struct goodix_ts_data *ts,
- const struct firmware *cfg)
+static int goodix_check_cfg_8(struct goodix_ts_data *ts, const u8 *cfg, int len)
{
- int i, raw_cfg_len = cfg->size - 2;
+ int i, raw_cfg_len = len - 2;
u8 check_sum = 0;
for (i = 0; i < raw_cfg_len; i++)
- check_sum += cfg->data[i];
+ check_sum += cfg[i];
check_sum = (~check_sum) + 1;
- if (check_sum != cfg->data[raw_cfg_len]) {
+ if (check_sum != cfg[raw_cfg_len]) {
dev_err(&ts->client->dev,
"The checksum of the config fw is not correct");
return -EINVAL;
}
- if (cfg->data[raw_cfg_len + 1] != 1) {
+ if (cfg[raw_cfg_len + 1] != 1) {
dev_err(&ts->client->dev,
"Config fw must have Config_Fresh register set");
return -EINVAL;
@@ -430,22 +509,35 @@ static int goodix_check_cfg_8(struct goodix_ts_data *ts,
return 0;
}
-static int goodix_check_cfg_16(struct goodix_ts_data *ts,
- const struct firmware *cfg)
+static void goodix_calc_cfg_checksum_8(struct goodix_ts_data *ts)
{
- int i, raw_cfg_len = cfg->size - 3;
+ int i, raw_cfg_len = ts->chip->config_len - 2;
+ u8 check_sum = 0;
+
+ for (i = 0; i < raw_cfg_len; i++)
+ check_sum += ts->config[i];
+ check_sum = (~check_sum) + 1;
+
+ ts->config[raw_cfg_len] = check_sum;
+ ts->config[raw_cfg_len + 1] = 1; /* Set "config_fresh" bit */
+}
+
+static int goodix_check_cfg_16(struct goodix_ts_data *ts, const u8 *cfg,
+ int len)
+{
+ int i, raw_cfg_len = len - 3;
u16 check_sum = 0;
for (i = 0; i < raw_cfg_len; i += 2)
- check_sum += get_unaligned_be16(&cfg->data[i]);
+ check_sum += get_unaligned_be16(&cfg[i]);
check_sum = (~check_sum) + 1;
- if (check_sum != get_unaligned_be16(&cfg->data[raw_cfg_len])) {
+ if (check_sum != get_unaligned_be16(&cfg[raw_cfg_len])) {
dev_err(&ts->client->dev,
"The checksum of the config fw is not correct");
return -EINVAL;
}
- if (cfg->data[raw_cfg_len + 2] != 1) {
+ if (cfg[raw_cfg_len + 2] != 1) {
dev_err(&ts->client->dev,
"Config fw must have Config_Fresh register set");
return -EINVAL;
@@ -454,22 +546,35 @@ static int goodix_check_cfg_16(struct goodix_ts_data *ts,
return 0;
}
+static void goodix_calc_cfg_checksum_16(struct goodix_ts_data *ts)
+{
+ int i, raw_cfg_len = ts->chip->config_len - 3;
+ u16 check_sum = 0;
+
+ for (i = 0; i < raw_cfg_len; i += 2)
+ check_sum += get_unaligned_be16(&ts->config[i]);
+ check_sum = (~check_sum) + 1;
+
+ put_unaligned_be16(check_sum, &ts->config[raw_cfg_len]);
+ ts->config[raw_cfg_len + 2] = 1; /* Set "config_fresh" bit */
+}
+
/**
* goodix_check_cfg - Checks if config fw is valid
*
* @ts: goodix_ts_data pointer
* @cfg: firmware config data
*/
-static int goodix_check_cfg(struct goodix_ts_data *ts,
- const struct firmware *cfg)
+static int goodix_check_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len)
{
- if (cfg->size > GOODIX_CONFIG_MAX_LENGTH) {
+ if (len < GOODIX_CONFIG_MIN_LENGTH ||
+ len > GOODIX_CONFIG_MAX_LENGTH) {
dev_err(&ts->client->dev,
"The length of the config fw is not correct");
return -EINVAL;
}
- return ts->chip->check_config(ts, cfg);
+ return ts->chip->check_config(ts, cfg, len);
}
/**
@@ -478,17 +583,15 @@ static int goodix_check_cfg(struct goodix_ts_data *ts,
* @ts: goodix_ts_data pointer
* @cfg: config firmware to write to device
*/
-static int goodix_send_cfg(struct goodix_ts_data *ts,
- const struct firmware *cfg)
+static int goodix_send_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len)
{
int error;
- error = goodix_check_cfg(ts, cfg);
+ error = goodix_check_cfg(ts, cfg, len);
if (error)
return error;
- error = goodix_i2c_write(ts->client, ts->chip->config_addr, cfg->data,
- cfg->size);
+ error = goodix_i2c_write(ts->client, ts->chip->config_addr, cfg, len);
if (error) {
dev_err(&ts->client->dev, "Failed to write config data: %d",
error);
@@ -502,17 +605,93 @@ static int goodix_send_cfg(struct goodix_ts_data *ts,
return 0;
}
+#ifdef ACPI_GPIO_SUPPORT
+static int goodix_pin_acpi_direction_input(struct goodix_ts_data *ts)
+{
+ acpi_handle handle = ACPI_HANDLE(&ts->client->dev);
+ acpi_status status;
+
+ status = acpi_evaluate_object(handle, "INTI", NULL, NULL);
+ return ACPI_SUCCESS(status) ? 0 : -EIO;
+}
+
+static int goodix_pin_acpi_output_method(struct goodix_ts_data *ts, int value)
+{
+ acpi_handle handle = ACPI_HANDLE(&ts->client->dev);
+ acpi_status status;
+
+ status = acpi_execute_simple_method(handle, "INTO", value);
+ return ACPI_SUCCESS(status) ? 0 : -EIO;
+}
+#else
+static int goodix_pin_acpi_direction_input(struct goodix_ts_data *ts)
+{
+ dev_err(&ts->client->dev,
+ "%s called on device without ACPI support\n", __func__);
+ return -EINVAL;
+}
+
+static int goodix_pin_acpi_output_method(struct goodix_ts_data *ts, int value)
+{
+ dev_err(&ts->client->dev,
+ "%s called on device without ACPI support\n", __func__);
+ return -EINVAL;
+}
+#endif
+
+static int goodix_irq_direction_output(struct goodix_ts_data *ts, int value)
+{
+ switch (ts->irq_pin_access_method) {
+ case IRQ_PIN_ACCESS_NONE:
+ dev_err(&ts->client->dev,
+ "%s called without an irq_pin_access_method set\n",
+ __func__);
+ return -EINVAL;
+ case IRQ_PIN_ACCESS_GPIO:
+ return gpiod_direction_output(ts->gpiod_int, value);
+ case IRQ_PIN_ACCESS_ACPI_GPIO:
+ /*
+ * The IRQ pin triggers on a falling edge, so its gets marked
+ * as active-low, use output_raw to avoid the value inversion.
+ */
+ return gpiod_direction_output_raw(ts->gpiod_int, value);
+ case IRQ_PIN_ACCESS_ACPI_METHOD:
+ return goodix_pin_acpi_output_method(ts, value);
+ }
+
+ return -EINVAL; /* Never reached */
+}
+
+static int goodix_irq_direction_input(struct goodix_ts_data *ts)
+{
+ switch (ts->irq_pin_access_method) {
+ case IRQ_PIN_ACCESS_NONE:
+ dev_err(&ts->client->dev,
+ "%s called without an irq_pin_access_method set\n",
+ __func__);
+ return -EINVAL;
+ case IRQ_PIN_ACCESS_GPIO:
+ return gpiod_direction_input(ts->gpiod_int);
+ case IRQ_PIN_ACCESS_ACPI_GPIO:
+ return gpiod_direction_input(ts->gpiod_int);
+ case IRQ_PIN_ACCESS_ACPI_METHOD:
+ return goodix_pin_acpi_direction_input(ts);
+ }
+
+ return -EINVAL; /* Never reached */
+}
+
static int goodix_int_sync(struct goodix_ts_data *ts)
{
int error;
- error = gpiod_direction_output(ts->gpiod_int, 0);
+ error = goodix_irq_direction_output(ts, 0);
if (error)
return error;
msleep(50); /* T5: 50ms */
- error = gpiod_direction_input(ts->gpiod_int);
+ error = goodix_irq_direction_input(ts);
if (error)
return error;
@@ -536,7 +715,7 @@ static int goodix_reset(struct goodix_ts_data *ts)
msleep(20); /* T2: > 10ms */
/* HIGH: 0x28/0x29, LOW: 0xBA/0xBB */
- error = gpiod_direction_output(ts->gpiod_int, ts->client->addr == 0x14);
+ error = goodix_irq_direction_output(ts, ts->client->addr == 0x14);
if (error)
return error;
@@ -560,6 +739,124 @@ static int goodix_reset(struct goodix_ts_data *ts)
return 0;
}
+#ifdef ACPI_GPIO_SUPPORT
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+
+static const struct x86_cpu_id baytrail_cpu_ids[] = {
+ { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, X86_FEATURE_ANY, },
+ {}
+};
+
+static inline bool is_byt(void)
+{
+ const struct x86_cpu_id *id = x86_match_cpu(baytrail_cpu_ids);
+
+ return !!id;
+}
+
+static const struct acpi_gpio_params first_gpio = { 0, 0, false };
+static const struct acpi_gpio_params second_gpio = { 1, 0, false };
+
+static const struct acpi_gpio_mapping acpi_goodix_int_first_gpios[] = {
+ { GOODIX_GPIO_INT_NAME "-gpios", &first_gpio, 1 },
+ { GOODIX_GPIO_RST_NAME "-gpios", &second_gpio, 1 },
+ { },
+};
+
+static const struct acpi_gpio_mapping acpi_goodix_int_last_gpios[] = {
+ { GOODIX_GPIO_RST_NAME "-gpios", &first_gpio, 1 },
+ { GOODIX_GPIO_INT_NAME "-gpios", &second_gpio, 1 },
+ { },
+};
+
+static const struct acpi_gpio_mapping acpi_goodix_reset_only_gpios[] = {
+ { GOODIX_GPIO_RST_NAME "-gpios", &first_gpio, 1 },
+ { },
+};
+
+static int goodix_resource(struct acpi_resource *ares, void *data)
+{
+ struct goodix_ts_data *ts = data;
+ struct device *dev = &ts->client->dev;
+ struct acpi_resource_gpio *gpio;
+
+ switch (ares->type) {
+ case ACPI_RESOURCE_TYPE_GPIO:
+ gpio = &ares->data.gpio;
+ if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) {
+ if (ts->gpio_int_idx == -1) {
+ ts->gpio_int_idx = ts->gpio_count;
+ } else {
+ dev_err(dev, "More then one GpioInt resource, ignoring ACPI GPIO resources\n");
+ ts->gpio_int_idx = -2;
+ }
+ }
+ ts->gpio_count++;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * This function gets called in case we fail to get the irq GPIO directly
+ * because the ACPI tables lack GPIO-name to APCI _CRS index mappings
+ * (no _DSD UUID daffd814-6eba-4d8c-8a91-bc9bbf4aa301 data).
+ * In that case we add our own mapping and then goodix_get_gpio_config()
+ * retries to get the GPIOs based on the added mapping.
+ */
+static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
+{
+ const struct acpi_gpio_mapping *gpio_mapping = NULL;
+ struct device *dev = &ts->client->dev;
+ LIST_HEAD(resources);
+ int ret;
+
+ ts->gpio_count = 0;
+ ts->gpio_int_idx = -1;
+ ret = acpi_dev_get_resources(ACPI_COMPANION(dev), &resources,
+ goodix_resource, ts);
+ if (ret < 0) {
+ dev_err(dev, "Error getting ACPI resources: %d\n", ret);
+ return ret;
+ }
+
+ acpi_dev_free_resource_list(&resources);
+
+ if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) {
+ ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
+ gpio_mapping = acpi_goodix_int_first_gpios;
+ } else if (ts->gpio_count == 2 && ts->gpio_int_idx == 1) {
+ ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
+ gpio_mapping = acpi_goodix_int_last_gpios;
+ } else if (ts->gpio_count == 1 && ts->gpio_int_idx == -1 &&
+ acpi_has_method(ACPI_HANDLE(dev), "INTI") &&
+ acpi_has_method(ACPI_HANDLE(dev), "INTO")) {
+ dev_info(dev, "Using ACPI INTI and INTO methods for IRQ pin access\n");
+ ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_METHOD;
+ gpio_mapping = acpi_goodix_reset_only_gpios;
+ } else if (is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) {
+ dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n");
+ ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO;
+ gpio_mapping = acpi_goodix_int_last_gpios;
+ } else {
+ dev_warn(dev, "Unexpected ACPI resources: gpio_count %d, gpio_int_idx %d\n",
+ ts->gpio_count, ts->gpio_int_idx);
+ return -EINVAL;
+ }
+
+ return devm_acpi_dev_add_driver_gpios(dev, gpio_mapping);
+}
+#else
+static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_X86 && CONFIG_ACPI */
+
/**
* goodix_get_gpio_config - Get GPIO config from ACPI/DT
*
@@ -570,6 +867,7 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
int error;
struct device *dev;
struct gpio_desc *gpiod;
+ bool added_acpi_mappings = false;
if (!ts->client)
return -EINVAL;
@@ -593,6 +891,7 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
return error;
}
+retry_get_irq_gpio:
/* Get the interrupt GPIO pin number */
gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_INT_NAME, GPIOD_IN);
if (IS_ERR(gpiod)) {
@@ -602,6 +901,11 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
GOODIX_GPIO_INT_NAME, error);
return error;
}
+ if (!gpiod && has_acpi_companion(dev) && !added_acpi_mappings) {
+ added_acpi_mappings = true;
+ if (goodix_add_acpi_gpio_mappings(ts) == 0)
+ goto retry_get_irq_gpio;
+ }
ts->gpiod_int = gpiod;
@@ -617,6 +921,31 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
ts->gpiod_rst = gpiod;
+ switch (ts->irq_pin_access_method) {
+ case IRQ_PIN_ACCESS_ACPI_GPIO:
+ /*
+ * We end up here if goodix_add_acpi_gpio_mappings() has
+ * called devm_acpi_dev_add_driver_gpios() because the ACPI
+ * tables did not contain name to index mappings.
+ * Check that we successfully got both GPIOs after we've
+ * added our own acpi_gpio_mapping and if we did not get both
+ * GPIOs reset irq_pin_access_method to IRQ_PIN_ACCESS_NONE.
+ */
+ if (!ts->gpiod_int || !ts->gpiod_rst)
+ ts->irq_pin_access_method = IRQ_PIN_ACCESS_NONE;
+ break;
+ case IRQ_PIN_ACCESS_ACPI_METHOD:
+ if (!ts->gpiod_rst)
+ ts->irq_pin_access_method = IRQ_PIN_ACCESS_NONE;
+ break;
+ default:
+ if (ts->gpiod_int && ts->gpiod_rst) {
+ ts->reset_controller_at_probe = true;
+ ts->load_cfg_from_disk = true;
+ ts->irq_pin_access_method = IRQ_PIN_ACCESS_GPIO;
+ }
+ }
+
return 0;
}
@@ -629,12 +958,11 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts)
*/
static void goodix_read_config(struct goodix_ts_data *ts)
{
- u8 config[GOODIX_CONFIG_MAX_LENGTH];
int x_max, y_max;
int error;
error = goodix_i2c_read(ts->client, ts->chip->config_addr,
- config, ts->chip->config_len);
+ ts->config, ts->chip->config_len);
if (error) {
dev_warn(&ts->client->dev, "Error reading config: %d\n",
error);
@@ -643,15 +971,17 @@ static void goodix_read_config(struct goodix_ts_data *ts)
return;
}
- ts->int_trigger_type = config[TRIGGER_LOC] & 0x03;
- ts->max_touch_num = config[MAX_CONTACTS_LOC] & 0x0f;
+ ts->int_trigger_type = ts->config[TRIGGER_LOC] & 0x03;
+ ts->max_touch_num = ts->config[MAX_CONTACTS_LOC] & 0x0f;
- x_max = get_unaligned_le16(&config[RESOLUTION_LOC]);
- y_max = get_unaligned_le16(&config[RESOLUTION_LOC + 2]);
+ x_max = get_unaligned_le16(&ts->config[RESOLUTION_LOC]);
+ y_max = get_unaligned_le16(&ts->config[RESOLUTION_LOC + 2]);
if (x_max && y_max) {
input_abs_set_max(ts->input_dev, ABS_MT_POSITION_X, x_max - 1);
input_abs_set_max(ts->input_dev, ABS_MT_POSITION_Y, y_max - 1);
}
+
+ ts->chip->calc_config_checksum(ts);
}
/**
@@ -663,7 +993,7 @@ static int goodix_read_version(struct goodix_ts_data *ts)
{
int error;
u8 buf[6];
- char id_str[5];
+ char id_str[GOODIX_ID_MAX_LEN + 1];
error = goodix_i2c_read(ts->client, GOODIX_REG_ID, buf, sizeof(buf));
if (error) {
@@ -671,14 +1001,13 @@ static int goodix_read_version(struct goodix_ts_data *ts)
return error;
}
- memcpy(id_str, buf, 4);
- id_str[4] = 0;
- if (kstrtou16(id_str, 10, &ts->id))
- ts->id = 0x1001;
+ memcpy(id_str, buf, GOODIX_ID_MAX_LEN);
+ id_str[GOODIX_ID_MAX_LEN] = 0;
+ strscpy(ts->id, id_str, GOODIX_ID_MAX_LEN + 1);
ts->version = get_unaligned_le16(&buf[4]);
- dev_info(&ts->client->dev, "ID %d, version: %04x\n", ts->id,
+ dev_info(&ts->client->dev, "ID %s, version: %04x\n", ts->id,
ts->version);
return 0;
@@ -722,6 +1051,7 @@ static int goodix_i2c_test(struct i2c_client *client)
static int goodix_configure_dev(struct goodix_ts_data *ts)
{
int error;
+ int i;
ts->int_trigger_type = GOODIX_INT_TRIGGER;
ts->max_touch_num = GOODIX_MAX_CONTACTS;
@@ -736,11 +1066,23 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
ts->input_dev->phys = "input/ts";
ts->input_dev->id.bustype = BUS_I2C;
ts->input_dev->id.vendor = 0x0416;
- ts->input_dev->id.product = ts->id;
+ if (kstrtou16(ts->id, 10, &ts->input_dev->id.product))
+ ts->input_dev->id.product = 0x1001;
ts->input_dev->id.version = ts->version;
+ ts->input_dev->keycode = ts->keymap;
+ ts->input_dev->keycodesize = sizeof(ts->keymap[0]);
+ ts->input_dev->keycodemax = GOODIX_MAX_KEYS;
+
/* Capacitive Windows/Home button on some devices */
- input_set_capability(ts->input_dev, EV_KEY, KEY_LEFTMETA);
+ for (i = 0; i < GOODIX_MAX_KEYS; ++i) {
+ if (i == 0)
+ ts->keymap[i] = KEY_LEFTMETA;
+ else
+ ts->keymap[i] = KEY_F1 + (i - 1);
+
+ input_set_capability(ts->input_dev, EV_KEY, ts->keymap[i]);
+ }
input_set_capability(ts->input_dev, EV_ABS, ABS_MT_POSITION_X);
input_set_capability(ts->input_dev, EV_ABS, ABS_MT_POSITION_Y);
@@ -780,6 +1122,12 @@ static int goodix_configure_dev(struct goodix_ts_data *ts)
"Non-standard 9-bytes report format quirk\n");
}
+ if (dmi_check_system(inverted_x_screen)) {
+ ts->prop.invert_x = true;
+ dev_dbg(&ts->client->dev,
+ "Applying 'inverted x screen' quirk\n");
+ }
+
error = input_mt_init_slots(ts->input_dev, ts->max_touch_num,
INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED);
if (error) {
@@ -820,7 +1168,7 @@ static void goodix_config_cb(const struct firmware *cfg, void *ctx)
if (cfg) {
/* send device configuration to the firmware */
- error = goodix_send_cfg(ts, cfg);
+ error = goodix_send_cfg(ts, cfg->data, cfg->size);
if (error)
goto err_release_cfg;
}
@@ -889,7 +1237,8 @@ static int goodix_ts_probe(struct i2c_client *client,
if (error)
return error;
- if (ts->gpiod_int && ts->gpiod_rst) {
+reset:
+ if (ts->reset_controller_at_probe) {
/* reset the controller */
error = goodix_reset(ts);
if (error) {
@@ -900,6 +1249,12 @@ static int goodix_ts_probe(struct i2c_client *client,
error = goodix_i2c_test(client);
if (error) {
+ if (!ts->reset_controller_at_probe &&
+ ts->irq_pin_access_method != IRQ_PIN_ACCESS_NONE) {
+ /* Retry after a controller reset */
+ ts->reset_controller_at_probe = true;
+ goto reset;
+ }
dev_err(&client->dev, "I2C communication failure: %d\n", error);
return error;
}
@@ -912,10 +1267,10 @@ static int goodix_ts_probe(struct i2c_client *client,
ts->chip = goodix_get_chip_data(ts->id);
- if (ts->gpiod_int && ts->gpiod_rst) {
+ if (ts->load_cfg_from_disk) {
/* update device config */
ts->cfg_name = devm_kasprintf(&client->dev, GFP_KERNEL,
- "goodix_%d_cfg.bin", ts->id);
+ "goodix_%s_cfg.bin", ts->id);
if (!ts->cfg_name)
return -ENOMEM;
@@ -943,7 +1298,7 @@ static int goodix_ts_remove(struct i2c_client *client)
{
struct goodix_ts_data *ts = i2c_get_clientdata(client);
- if (ts->gpiod_int && ts->gpiod_rst)
+ if (ts->load_cfg_from_disk)
wait_for_completion(&ts->firmware_loading_complete);
return 0;
@@ -955,19 +1310,20 @@ static int __maybe_unused goodix_suspend(struct device *dev)
struct goodix_ts_data *ts = i2c_get_clientdata(client);
int error;
+ if (ts->load_cfg_from_disk)
+ wait_for_completion(&ts->firmware_loading_complete);
+
/* We need gpio pins to suspend/resume */
- if (!ts->gpiod_int || !ts->gpiod_rst) {
+ if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_NONE) {
disable_irq(client->irq);
return 0;
}
- wait_for_completion(&ts->firmware_loading_complete);
-
/* Free IRQ as IRQ pin is used as output in the suspend sequence */
goodix_free_irq(ts);
/* Output LOW on the INT pin for 5 ms */
- error = gpiod_direction_output(ts->gpiod_int, 0);
+ error = goodix_irq_direction_output(ts, 0);
if (error) {
goodix_request_irq(ts);
return error;
@@ -979,7 +1335,7 @@ static int __maybe_unused goodix_suspend(struct device *dev)
GOODIX_CMD_SCREEN_OFF);
if (error) {
dev_err(&ts->client->dev, "Screen off command failed\n");
- gpiod_direction_input(ts->gpiod_int);
+ goodix_irq_direction_input(ts);
goodix_request_irq(ts);
return -EAGAIN;
}
@@ -997,9 +1353,10 @@ static int __maybe_unused goodix_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct goodix_ts_data *ts = i2c_get_clientdata(client);
+ u8 config_ver;
int error;
- if (!ts->gpiod_int || !ts->gpiod_rst) {
+ if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_NONE) {
enable_irq(client->irq);
return 0;
}
@@ -1008,7 +1365,7 @@ static int __maybe_unused goodix_resume(struct device *dev)
* Exit sleep mode by outputting HIGH level to INT pin
* for 2ms~5ms.
*/
- error = gpiod_direction_output(ts->gpiod_int, 1);
+ error = goodix_irq_direction_output(ts, 1);
if (error)
return error;
@@ -1018,6 +1375,27 @@ static int __maybe_unused goodix_resume(struct device *dev)
if (error)
return error;
+ error = goodix_i2c_read(ts->client, ts->chip->config_addr,
+ &config_ver, 1);
+ if (error)
+ dev_warn(dev, "Error reading config version: %d, resetting controller\n",
+ error);
+ else if (config_ver != ts->config[0])
+ dev_info(dev, "Config version mismatch %d != %d, resetting controller\n",
+ config_ver, ts->config[0]);
+
+ if (error != 0 || config_ver != ts->config[0]) {
+ error = goodix_reset(ts);
+ if (error) {
+ dev_err(dev, "Controller reset failed.\n");
+ return error;
+ }
+
+ error = goodix_send_cfg(ts, ts->config, ts->chip->config_len);
+ if (error)
+ return error;
+ }
+
error = goodix_request_irq(ts);
if (error)
return error;
@@ -1050,6 +1428,8 @@ static const struct of_device_id goodix_of_match[] = {
{ .compatible = "goodix,gt911" },
{ .compatible = "goodix,gt9110" },
{ .compatible = "goodix,gt912" },
+ { .compatible = "goodix,gt9147" },
+ { .compatible = "goodix,gt917s" },
{ .compatible = "goodix,gt927" },
{ .compatible = "goodix,gt9271" },
{ .compatible = "goodix,gt928" },
diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
index e16ec4c7043a..97342e14b4f1 100644
--- a/drivers/input/touchscreen/of_touchscreen.c
+++ b/drivers/input/touchscreen/of_touchscreen.c
@@ -66,7 +66,7 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
{
struct device *dev = input->dev.parent;
struct input_absinfo *absinfo;
- unsigned int axis;
+ unsigned int axis, axis_x, axis_y;
unsigned int minimum, maximum, fuzz;
bool data_present;
@@ -74,33 +74,34 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
if (!input->absinfo)
return;
- axis = multitouch ? ABS_MT_POSITION_X : ABS_X;
+ axis_x = multitouch ? ABS_MT_POSITION_X : ABS_X;
+ axis_y = multitouch ? ABS_MT_POSITION_Y : ABS_Y;
+
data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-x",
- input_abs_get_min(input, axis),
+ input_abs_get_min(input, axis_x),
&minimum) |
touchscreen_get_prop_u32(dev, "touchscreen-size-x",
input_abs_get_max(input,
- axis) + 1,
+ axis_x) + 1,
&maximum) |
touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
- input_abs_get_fuzz(input, axis),
+ input_abs_get_fuzz(input, axis_x),
&fuzz);
if (data_present)
- touchscreen_set_params(input, axis, minimum, maximum - 1, fuzz);
+ touchscreen_set_params(input, axis_x, minimum, maximum - 1, fuzz);
- axis = multitouch ? ABS_MT_POSITION_Y : ABS_Y;
data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-y",
- input_abs_get_min(input, axis),
+ input_abs_get_min(input, axis_y),
&minimum) |
touchscreen_get_prop_u32(dev, "touchscreen-size-y",
input_abs_get_max(input,
- axis) + 1,
+ axis_y) + 1,
&maximum) |
touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
- input_abs_get_fuzz(input, axis),
+ input_abs_get_fuzz(input, axis_y),
&fuzz);
if (data_present)
- touchscreen_set_params(input, axis, minimum, maximum - 1, fuzz);
+ touchscreen_set_params(input, axis_y, minimum, maximum - 1, fuzz);
axis = multitouch ? ABS_MT_PRESSURE : ABS_PRESSURE;
data_present = touchscreen_get_prop_u32(dev,
@@ -117,15 +118,13 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
if (!prop)
return;
- axis = multitouch ? ABS_MT_POSITION_X : ABS_X;
-
- prop->max_x = input_abs_get_max(input, axis);
- prop->max_y = input_abs_get_max(input, axis + 1);
+ prop->max_x = input_abs_get_max(input, axis_x);
+ prop->max_y = input_abs_get_max(input, axis_y);
prop->invert_x =
device_property_read_bool(dev, "touchscreen-inverted-x");
if (prop->invert_x) {
- absinfo = &input->absinfo[axis];
+ absinfo = &input->absinfo[axis_x];
absinfo->maximum -= absinfo->minimum;
absinfo->minimum = 0;
}
@@ -133,7 +132,7 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
prop->invert_y =
device_property_read_bool(dev, "touchscreen-inverted-y");
if (prop->invert_y) {
- absinfo = &input->absinfo[axis + 1];
+ absinfo = &input->absinfo[axis_y];
absinfo->maximum -= absinfo->minimum;
absinfo->minimum = 0;
}
@@ -141,7 +140,7 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
prop->swap_x_y =
device_property_read_bool(dev, "touchscreen-swapped-x-y");
if (prop->swap_x_y)
- swap(input->absinfo[axis], input->absinfo[axis + 1]);
+ swap(input->absinfo[axis_x], input->absinfo[axis_y]);
}
EXPORT_SYMBOL(touchscreen_parse_properties);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index d2fade984999..58b4a4dbfc78 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -188,6 +188,7 @@ config INTEL_IOMMU
select NEED_DMA_MAP_STATE
select DMAR_TABLE
select SWIOTLB
+ select IOASID
help
DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices.
@@ -273,7 +274,7 @@ config IRQ_REMAP
# OMAP IOMMU support
config OMAP_IOMMU
bool "OMAP IOMMU Support"
- depends on ARM && MMU
+ depends on ARM && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
depends on ARCH_OMAP2PLUS || COMPILE_TEST
select IOMMU_API
---help---
@@ -291,7 +292,7 @@ config OMAP_IOMMU_DEBUG
config ROCKCHIP_IOMMU
bool "Rockchip IOMMU Support"
- depends on ARM || ARM64
+ depends on ARM || ARM64 || (COMPILE_TEST && (ARM64 || IA64 || SPARC))
depends on ARCH_ROCKCHIP || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
@@ -325,7 +326,7 @@ config TEGRA_IOMMU_SMMU
config EXYNOS_IOMMU
bool "Exynos IOMMU Support"
- depends on ARCH_EXYNOS && MMU
+ depends on ARCH_EXYNOS && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC))
depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes
select IOMMU_API
select ARM_DMA_USE_IOMMU
@@ -361,7 +362,7 @@ config IPMMU_VMSA
config SPAPR_TCE_IOMMU
bool "sPAPR TCE IOMMU Support"
- depends on PPC_POWERNV || PPC_PSERIES
+ depends on PPC_POWERNV || PPC_PSERIES || (PPC && COMPILE_TEST)
select IOMMU_API
help
Enables bits of IOMMU API required by VFIO. The iommu_ops
@@ -370,7 +371,7 @@ config SPAPR_TCE_IOMMU
# ARM IOMMU support
config ARM_SMMU
tristate "ARM Ltd. System MMU (SMMU) Support"
- depends on (ARM64 || ARM) && MMU
+ depends on (ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)) && MMU
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
select ARM_DMA_USE_IOMMU if ARM
@@ -440,7 +441,7 @@ config S390_IOMMU
config S390_CCW_IOMMU
bool "S390 CCW IOMMU Support"
- depends on S390 && CCW
+ depends on S390 && CCW || COMPILE_TEST
select IOMMU_API
help
Enables bits of IOMMU API required by VFIO. The iommu_ops
@@ -448,7 +449,7 @@ config S390_CCW_IOMMU
config S390_AP_IOMMU
bool "S390 AP IOMMU Support"
- depends on S390 && ZCRYPT
+ depends on S390 && ZCRYPT || COMPILE_TEST
select IOMMU_API
help
Enables bits of IOMMU API required by VFIO. The iommu_ops
@@ -456,7 +457,7 @@ config S390_AP_IOMMU
config MTK_IOMMU
bool "MTK IOMMU Support"
- depends on ARM || ARM64
+ depends on ARM || ARM64 || COMPILE_TEST
depends on ARCH_MEDIATEK || COMPILE_TEST
select ARM_DMA_USE_IOMMU
select IOMMU_API
@@ -506,8 +507,8 @@ config HYPERV_IOMMU
guests to run with x2APIC mode enabled.
config VIRTIO_IOMMU
- bool "Virtio IOMMU driver"
- depends on VIRTIO=y
+ tristate "Virtio IOMMU driver"
+ depends on VIRTIO
depends on ARM64
select IOMMU_API
select INTERVAL_TREE
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index f8d01d6b00da..ca8c4522045b 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -348,7 +348,7 @@
#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
-#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
+#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL)
#define DTE_GCR3_INDEX_A 0
#define DTE_GCR3_INDEX_B 1
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index aa3ac2a03807..82508730feb7 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -69,6 +69,9 @@
#define IDR1_SSIDSIZE GENMASK(10, 6)
#define IDR1_SIDSIZE GENMASK(5, 0)
+#define ARM_SMMU_IDR3 0xc
+#define IDR3_RIL (1 << 10)
+
#define ARM_SMMU_IDR5 0x14
#define IDR5_STALL_MAX GENMASK(31, 16)
#define IDR5_GRAN64K (1 << 6)
@@ -346,9 +349,14 @@
#define CMDQ_CFGI_1_LEAF (1UL << 0)
#define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0)
+#define CMDQ_TLBI_0_NUM GENMASK_ULL(16, 12)
+#define CMDQ_TLBI_RANGE_NUM_MAX 31
+#define CMDQ_TLBI_0_SCALE GENMASK_ULL(24, 20)
#define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32)
#define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48)
#define CMDQ_TLBI_1_LEAF (1UL << 0)
+#define CMDQ_TLBI_1_TTL GENMASK_ULL(9, 8)
+#define CMDQ_TLBI_1_TG GENMASK_ULL(11, 10)
#define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12)
#define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12)
@@ -473,9 +481,13 @@ struct arm_smmu_cmdq_ent {
#define CMDQ_OP_TLBI_S2_IPA 0x2a
#define CMDQ_OP_TLBI_NSNH_ALL 0x30
struct {
+ u8 num;
+ u8 scale;
u16 asid;
u16 vmid;
bool leaf;
+ u8 ttl;
+ u8 tg;
u64 addr;
} tlbi;
@@ -548,6 +560,11 @@ struct arm_smmu_cmdq {
atomic_t lock;
};
+struct arm_smmu_cmdq_batch {
+ u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
+ int num;
+};
+
struct arm_smmu_evtq {
struct arm_smmu_queue q;
u32 max_stalls;
@@ -627,6 +644,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_HYP (1 << 12)
#define ARM_SMMU_FEAT_STALL_FORCE (1 << 13)
#define ARM_SMMU_FEAT_VAX (1 << 14)
+#define ARM_SMMU_FEAT_RANGE_INV (1 << 15)
u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
@@ -895,14 +913,22 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31);
break;
case CMDQ_OP_TLBI_NH_VA:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
break;
case CMDQ_OP_TLBI_S2_IPA:
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num);
+ cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale);
cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid);
cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl);
+ cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg);
cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
break;
case CMDQ_OP_TLBI_NH_ASID:
@@ -1482,6 +1508,24 @@ static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu)
return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true);
}
+static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_batch *cmds,
+ struct arm_smmu_cmdq_ent *cmd)
+{
+ if (cmds->num == CMDQ_BATCH_ENTRIES) {
+ arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false);
+ cmds->num = 0;
+ }
+ arm_smmu_cmdq_build_cmd(&cmds->cmds[cmds->num * CMDQ_ENT_DWORDS], cmd);
+ cmds->num++;
+}
+
+static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu,
+ struct arm_smmu_cmdq_batch *cmds)
+{
+ return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true);
+}
+
/* Context descriptor manipulation functions */
static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
int ssid, bool leaf)
@@ -1489,6 +1533,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
size_t i;
unsigned long flags;
struct arm_smmu_master *master;
+ struct arm_smmu_cmdq_batch cmds = {};
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cmdq_ent cmd = {
.opcode = CMDQ_OP_CFGI_CD,
@@ -1502,12 +1547,12 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain,
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
for (i = 0; i < master->num_sids; i++) {
cmd.cfgi.sid = master->sids[i];
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
}
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- arm_smmu_cmdq_issue_sync(smmu);
+ arm_smmu_cmdq_batch_submit(smmu, &cmds);
}
static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu,
@@ -1531,6 +1576,7 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst,
u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) |
CTXDESC_L1_DESC_V;
+ /* See comment in arm_smmu_write_ctx_desc() */
WRITE_ONCE(*dst, cpu_to_le64(val));
}
@@ -1726,7 +1772,8 @@ arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span);
val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
- *dst = cpu_to_le64(val);
+ /* See comment in arm_smmu_write_ctx_desc() */
+ WRITE_ONCE(*dst, cpu_to_le64(val));
}
static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
@@ -2132,17 +2179,16 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
cmd->atc.size = log2_span;
}
-static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
- struct arm_smmu_cmdq_ent *cmd)
+static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
{
int i;
+ struct arm_smmu_cmdq_ent cmd;
- if (!master->ats_enabled)
- return 0;
+ arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
for (i = 0; i < master->num_sids; i++) {
- cmd->atc.sid = master->sids[i];
- arm_smmu_cmdq_issue_cmd(master->smmu, cmd);
+ cmd.atc.sid = master->sids[i];
+ arm_smmu_cmdq_issue_cmd(master->smmu, &cmd);
}
return arm_smmu_cmdq_issue_sync(master->smmu);
@@ -2151,10 +2197,11 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
int ssid, unsigned long iova, size_t size)
{
- int ret = 0;
+ int i;
unsigned long flags;
struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_master *master;
+ struct arm_smmu_cmdq_batch cmds = {};
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
return 0;
@@ -2179,11 +2226,18 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
- list_for_each_entry(master, &smmu_domain->devices, domain_head)
- ret |= arm_smmu_atc_inv_master(master, &cmd);
+ list_for_each_entry(master, &smmu_domain->devices, domain_head) {
+ if (!master->ats_enabled)
+ continue;
+
+ for (i = 0; i < master->num_sids; i++) {
+ cmd.atc.sid = master->sids[i];
+ arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
+ }
+ }
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
- return ret ? -ETIMEDOUT : 0;
+ return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
}
/* IO_PGTABLE API */
@@ -2218,10 +2272,10 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
size_t granule, bool leaf,
struct arm_smmu_domain *smmu_domain)
{
- u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
struct arm_smmu_device *smmu = smmu_domain->smmu;
- unsigned long start = iova, end = iova + size;
- int i = 0;
+ unsigned long start = iova, end = iova + size, num_pages = 0, tg = 0;
+ size_t inv_range = granule;
+ struct arm_smmu_cmdq_batch cmds = {};
struct arm_smmu_cmdq_ent cmd = {
.tlbi = {
.leaf = leaf,
@@ -2239,19 +2293,50 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size,
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
}
+ if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
+ /* Get the leaf page size */
+ tg = __ffs(smmu_domain->domain.pgsize_bitmap);
+
+ /* Convert page size of 12,14,16 (log2) to 1,2,3 */
+ cmd.tlbi.tg = (tg - 10) / 2;
+
+ /* Determine what level the granule is at */
+ cmd.tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
+
+ num_pages = size >> tg;
+ }
+
while (iova < end) {
- if (i == CMDQ_BATCH_ENTRIES) {
- arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, false);
- i = 0;
+ if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
+ /*
+ * On each iteration of the loop, the range is 5 bits
+ * worth of the aligned size remaining.
+ * The range in pages is:
+ *
+ * range = (num_pages & (0x1f << __ffs(num_pages)))
+ */
+ unsigned long scale, num;
+
+ /* Determine the power of 2 multiple number of pages */
+ scale = __ffs(num_pages);
+ cmd.tlbi.scale = scale;
+
+ /* Determine how many chunks of 2^scale size we have */
+ num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX;
+ cmd.tlbi.num = num - 1;
+
+ /* range is num * 2^scale * pgsize */
+ inv_range = num << (scale + tg);
+
+ /* Clear out the lower order bits for the next iteration */
+ num_pages -= num << scale;
}
cmd.tlbi.addr = iova;
- arm_smmu_cmdq_build_cmd(&cmds[i * CMDQ_ENT_DWORDS], &cmd);
- iova += granule;
- i++;
+ arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd);
+ iova += inv_range;
}
-
- arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, true);
+ arm_smmu_cmdq_batch_submit(smmu, &cmds);
/*
* Unfortunately, this can't be leaf-only since we may have
@@ -2611,7 +2696,6 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master)
static void arm_smmu_disable_ats(struct arm_smmu_master *master)
{
- struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_domain *smmu_domain = master->domain;
if (!master->ats_enabled)
@@ -2623,11 +2707,57 @@ static void arm_smmu_disable_ats(struct arm_smmu_master *master)
* ATC invalidation via the SMMU.
*/
wmb();
- arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd);
- arm_smmu_atc_inv_master(master, &cmd);
+ arm_smmu_atc_inv_master(master);
atomic_dec(&smmu_domain->nr_ats_masters);
}
+static int arm_smmu_enable_pasid(struct arm_smmu_master *master)
+{
+ int ret;
+ int features;
+ int num_pasids;
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(master->dev))
+ return -ENODEV;
+
+ pdev = to_pci_dev(master->dev);
+
+ features = pci_pasid_features(pdev);
+ if (features < 0)
+ return features;
+
+ num_pasids = pci_max_pasids(pdev);
+ if (num_pasids <= 0)
+ return num_pasids;
+
+ ret = pci_enable_pasid(pdev, features);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable PASID\n");
+ return ret;
+ }
+
+ master->ssid_bits = min_t(u8, ilog2(num_pasids),
+ master->smmu->ssid_bits);
+ return 0;
+}
+
+static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
+{
+ struct pci_dev *pdev;
+
+ if (!dev_is_pci(master->dev))
+ return;
+
+ pdev = to_pci_dev(master->dev);
+
+ if (!pdev->pasid_enabled)
+ return;
+
+ master->ssid_bits = 0;
+ pci_disable_pasid(pdev);
+}
+
static void arm_smmu_detach_dev(struct arm_smmu_master *master)
{
unsigned long flags;
@@ -2659,7 +2789,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
if (!fwspec)
return -ENOENT;
- master = fwspec->iommu_priv;
+ master = dev_iommu_priv_get(dev);
smmu = master->smmu;
arm_smmu_detach_dev(master);
@@ -2795,7 +2925,7 @@ static int arm_smmu_add_device(struct device *dev)
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return -ENODEV;
- if (WARN_ON_ONCE(fwspec->iommu_priv))
+ if (WARN_ON_ONCE(dev_iommu_priv_get(dev)))
return -EBUSY;
smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
@@ -2810,7 +2940,7 @@ static int arm_smmu_add_device(struct device *dev)
master->smmu = smmu;
master->sids = fwspec->ids;
master->num_sids = fwspec->num_ids;
- fwspec->iommu_priv = master;
+ dev_iommu_priv_set(dev, master);
/* Check the SIDs are in range of the SMMU and our stream table */
for (i = 0; i < master->num_sids; i++) {
@@ -2831,13 +2961,23 @@ static int arm_smmu_add_device(struct device *dev)
master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits);
+ /*
+ * Note that PASID must be enabled before, and disabled after ATS:
+ * PCI Express Base 4.0r1.0 - 10.5.1.3 ATS Control Register
+ *
+ * Behavior is undefined if this bit is Set and the value of the PASID
+ * Enable, Execute Requested Enable, or Privileged Mode Requested bits
+ * are changed.
+ */
+ arm_smmu_enable_pasid(master);
+
if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB))
master->ssid_bits = min_t(u8, master->ssid_bits,
CTXDESC_LINEAR_CDMAX);
ret = iommu_device_link(&smmu->iommu, dev);
if (ret)
- goto err_free_master;
+ goto err_disable_pasid;
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group)) {
@@ -2850,9 +2990,11 @@ static int arm_smmu_add_device(struct device *dev)
err_unlink:
iommu_device_unlink(&smmu->iommu, dev);
+err_disable_pasid:
+ arm_smmu_disable_pasid(master);
err_free_master:
kfree(master);
- fwspec->iommu_priv = NULL;
+ dev_iommu_priv_set(dev, NULL);
return ret;
}
@@ -2865,11 +3007,12 @@ static void arm_smmu_remove_device(struct device *dev)
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
- master = fwspec->iommu_priv;
+ master = dev_iommu_priv_get(dev);
smmu = master->smmu;
arm_smmu_detach_dev(master);
iommu_group_remove_device(dev);
iommu_device_unlink(&smmu->iommu, dev);
+ arm_smmu_disable_pasid(master);
kfree(master);
iommu_fwspec_free(dev);
}
@@ -3700,6 +3843,11 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
if (smmu->sid_bits <= STRTAB_SPLIT)
smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
+ /* IDR3 */
+ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3);
+ if (FIELD_GET(IDR3_RIL, reg))
+ smmu->features |= ARM_SMMU_FEAT_RANGE_INV;
+
/* IDR5 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 16c4b87af42b..a6a5796e9c41 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -98,12 +98,10 @@ struct arm_smmu_master_cfg {
s16 smendx[];
};
#define INVALID_SMENDX -1
-#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
-#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
-#define fwspec_smendx(fw, i) \
- (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
-#define for_each_cfg_sme(fw, i, idx) \
- for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
+#define cfg_smendx(cfg, fw, i) \
+ (i >= fw->num_ids ? INVALID_SMENDX : cfg->smendx[i])
+#define for_each_cfg_sme(cfg, fw, i, idx) \
+ for (i = 0; idx = cfg_smendx(cfg, fw, i), i < fw->num_ids; ++i)
static bool using_legacy_binding, using_generic_binding;
@@ -1061,7 +1059,7 @@ static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
static int arm_smmu_master_alloc_smes(struct device *dev)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
struct arm_smmu_device *smmu = cfg->smmu;
struct arm_smmu_smr *smrs = smmu->smrs;
struct iommu_group *group;
@@ -1069,7 +1067,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
mutex_lock(&smmu->stream_map_mutex);
/* Figure out a viable stream map entry allocation */
- for_each_cfg_sme(fwspec, i, idx) {
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]);
u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]);
@@ -1100,7 +1098,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev)
iommu_group_put(group);
/* It worked! Now, poke the actual hardware */
- for_each_cfg_sme(fwspec, i, idx) {
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
arm_smmu_write_sme(smmu, idx);
smmu->s2crs[idx].group = group;
}
@@ -1117,14 +1115,14 @@ out_err:
return ret;
}
-static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
+static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg,
+ struct iommu_fwspec *fwspec)
{
- struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
- struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
+ struct arm_smmu_device *smmu = cfg->smmu;
int i, idx;
mutex_lock(&smmu->stream_map_mutex);
- for_each_cfg_sme(fwspec, i, idx) {
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
if (arm_smmu_free_sme(smmu, idx))
arm_smmu_write_sme(smmu, idx);
cfg->smendx[i] = INVALID_SMENDX;
@@ -1133,6 +1131,7 @@ static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
}
static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
+ struct arm_smmu_master_cfg *cfg,
struct iommu_fwspec *fwspec)
{
struct arm_smmu_device *smmu = smmu_domain->smmu;
@@ -1146,7 +1145,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
else
type = S2CR_TYPE_TRANS;
- for_each_cfg_sme(fwspec, i, idx) {
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
continue;
@@ -1160,10 +1159,11 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- int ret;
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct arm_smmu_master_cfg *cfg;
struct arm_smmu_device *smmu;
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ int ret;
if (!fwspec || fwspec->ops != &arm_smmu_ops) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
@@ -1177,10 +1177,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* domains, just say no (but more politely than by dereferencing NULL).
* This should be at least a WARN_ON once that's sorted.
*/
- if (!fwspec->iommu_priv)
+ cfg = dev_iommu_priv_get(dev);
+ if (!cfg)
return -ENODEV;
- smmu = fwspec_smmu(fwspec);
+ smmu = cfg->smmu;
ret = arm_smmu_rpm_get(smmu);
if (ret < 0)
@@ -1204,7 +1205,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
/* Looks ok, so add the device to the domain */
- ret = arm_smmu_domain_add_master(smmu_domain, fwspec);
+ ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec);
/*
* Setup an autosuspend delay to avoid bouncing runpm state.
@@ -1383,7 +1384,7 @@ struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
static int arm_smmu_add_device(struct device *dev)
{
- struct arm_smmu_device *smmu;
+ struct arm_smmu_device *smmu = NULL;
struct arm_smmu_master_cfg *cfg;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
int i, ret;
@@ -1429,7 +1430,7 @@ static int arm_smmu_add_device(struct device *dev)
goto out_free;
cfg->smmu = smmu;
- fwspec->iommu_priv = cfg;
+ dev_iommu_priv_set(dev, cfg);
while (i--)
cfg->smendx[i] = INVALID_SMENDX;
@@ -1467,7 +1468,7 @@ static void arm_smmu_remove_device(struct device *dev)
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
- cfg = fwspec->iommu_priv;
+ cfg = dev_iommu_priv_get(dev);
smmu = cfg->smmu;
ret = arm_smmu_rpm_get(smmu);
@@ -1475,23 +1476,25 @@ static void arm_smmu_remove_device(struct device *dev)
return;
iommu_device_unlink(&smmu->iommu, dev);
- arm_smmu_master_free_smes(fwspec);
+ arm_smmu_master_free_smes(cfg, fwspec);
arm_smmu_rpm_put(smmu);
+ dev_iommu_priv_set(dev, NULL);
iommu_group_remove_device(dev);
- kfree(fwspec->iommu_priv);
+ kfree(cfg);
iommu_fwspec_free(dev);
}
static struct iommu_group *arm_smmu_device_group(struct device *dev)
{
+ struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
+ struct arm_smmu_device *smmu = cfg->smmu;
struct iommu_group *group = NULL;
int i, idx;
- for_each_cfg_sme(fwspec, i, idx) {
+ for_each_cfg_sme(cfg, fwspec, i, idx) {
if (group && smmu->s2crs[idx].group &&
group != smmu->s2crs[idx].group)
return ERR_PTR(-EINVAL);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 4be549478691..ef0a5246700e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4501,7 +4501,8 @@ static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
struct dmar_atsr_unit *atsru;
struct acpi_dmar_atsr *tmp;
- list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
+ list_for_each_entry_rcu(atsru, &dmar_atsr_units, list,
+ dmar_rcu_check()) {
tmp = (struct acpi_dmar_atsr *)atsru->hdr;
if (atsr->segment != tmp->segment)
continue;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index d7f2a5358900..2998418f0a38 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -531,7 +531,7 @@ struct page_req_dsc {
u64 priv_data[2];
};
-#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
+#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20)
static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
{
@@ -611,14 +611,15 @@ static irqreturn_t prq_event_thread(int irq, void *d)
* any faults on kernel addresses. */
if (!svm->mm)
goto bad_req;
- /* If the mm is already defunct, don't handle faults. */
- if (!mmget_not_zero(svm->mm))
- goto bad_req;
/* If address is not canonical, return invalid response */
if (!is_canonical_address(address))
goto bad_req;
+ /* If the mm is already defunct, don't handle faults. */
+ if (!mmget_not_zero(svm->mm))
+ goto bad_req;
+
down_read(&svm->mm->mmap_sem);
vma = find_extend_vma(svm->mm, address);
if (!vma || address < vma->vm_start)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3e3528436e0b..2b471419e26c 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -152,9 +152,9 @@ void iommu_device_unregister(struct iommu_device *iommu)
}
EXPORT_SYMBOL_GPL(iommu_device_unregister);
-static struct iommu_param *iommu_get_dev_param(struct device *dev)
+static struct dev_iommu *dev_iommu_get(struct device *dev)
{
- struct iommu_param *param = dev->iommu_param;
+ struct dev_iommu *param = dev->iommu;
if (param)
return param;
@@ -164,14 +164,14 @@ static struct iommu_param *iommu_get_dev_param(struct device *dev)
return NULL;
mutex_init(&param->lock);
- dev->iommu_param = param;
+ dev->iommu = param;
return param;
}
-static void iommu_free_dev_param(struct device *dev)
+static void dev_iommu_free(struct device *dev)
{
- kfree(dev->iommu_param);
- dev->iommu_param = NULL;
+ kfree(dev->iommu);
+ dev->iommu = NULL;
}
int iommu_probe_device(struct device *dev)
@@ -183,7 +183,7 @@ int iommu_probe_device(struct device *dev)
if (!ops)
return -EINVAL;
- if (!iommu_get_dev_param(dev))
+ if (!dev_iommu_get(dev))
return -ENOMEM;
if (!try_module_get(ops->owner)) {
@@ -200,7 +200,7 @@ int iommu_probe_device(struct device *dev)
err_module_put:
module_put(ops->owner);
err_free_dev_param:
- iommu_free_dev_param(dev);
+ dev_iommu_free(dev);
return ret;
}
@@ -211,9 +211,9 @@ void iommu_release_device(struct device *dev)
if (dev->iommu_group)
ops->remove_device(dev);
- if (dev->iommu_param) {
+ if (dev->iommu) {
module_put(ops->owner);
- iommu_free_dev_param(dev);
+ dev_iommu_free(dev);
}
}
@@ -972,7 +972,7 @@ int iommu_register_device_fault_handler(struct device *dev,
iommu_dev_fault_handler_t handler,
void *data)
{
- struct iommu_param *param = dev->iommu_param;
+ struct dev_iommu *param = dev->iommu;
int ret = 0;
if (!param)
@@ -1015,7 +1015,7 @@ EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
*/
int iommu_unregister_device_fault_handler(struct device *dev)
{
- struct iommu_param *param = dev->iommu_param;
+ struct dev_iommu *param = dev->iommu;
int ret = 0;
if (!param)
@@ -1055,7 +1055,7 @@ EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);
*/
int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
{
- struct iommu_param *param = dev->iommu_param;
+ struct dev_iommu *param = dev->iommu;
struct iommu_fault_event *evt_pending = NULL;
struct iommu_fault_param *fparam;
int ret = 0;
@@ -1104,7 +1104,7 @@ int iommu_page_response(struct device *dev,
int ret = -EINVAL;
struct iommu_fault_event *evt;
struct iommu_fault_page_request *prm;
- struct iommu_param *param = dev->iommu_param;
+ struct dev_iommu *param = dev->iommu;
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
if (!domain || !domain->ops->page_response)
@@ -2405,7 +2405,11 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode,
if (fwspec)
return ops == fwspec->ops ? 0 : -EINVAL;
- fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!dev_iommu_get(dev))
+ return -ENOMEM;
+
+ /* Preallocate for the overwhelmingly common case of 1 ID */
+ fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL);
if (!fwspec)
return -ENOMEM;
@@ -2432,15 +2436,15 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_free);
int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
{
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- size_t size;
- int i;
+ int i, new_num;
if (!fwspec)
return -EINVAL;
- size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]);
- if (size > sizeof(*fwspec)) {
- fwspec = krealloc(fwspec, size, GFP_KERNEL);
+ new_num = fwspec->num_ids + num_ids;
+ if (new_num > 1) {
+ fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num),
+ GFP_KERNEL);
if (!fwspec)
return -ENOMEM;
@@ -2450,7 +2454,7 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
for (i = 0; i < num_ids; i++)
fwspec->ids[fwspec->num_ids + i] = ids[i];
- fwspec->num_ids += num_ids;
+ fwspec->num_ids = new_num;
return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index ecb3f9464dd5..310cf09feea3 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -89,9 +89,7 @@ static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom)
static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
-
- return fwspec ? fwspec->iommu_priv : NULL;
+ return dev_iommu_priv_get(dev);
}
#define TLB_LOOP_TIMEOUT 100 /* 100us */
@@ -727,14 +725,13 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain,
static int ipmmu_init_platform_device(struct device *dev,
struct of_phandle_args *args)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct platform_device *ipmmu_pdev;
ipmmu_pdev = of_find_device_by_node(args->np);
if (!ipmmu_pdev)
return -ENODEV;
- fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev);
+ dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev));
return 0;
}
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 95945f467c03..5f4d6df59cf6 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -358,8 +358,8 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain)
static int mtk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
if (!data)
return -ENODEV;
@@ -378,7 +378,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
static void mtk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
if (!data)
return;
@@ -450,7 +450,7 @@ static int mtk_iommu_add_device(struct device *dev)
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return -ENODEV; /* Not a iommu client device */
- data = fwspec->iommu_priv;
+ data = dev_iommu_priv_get(dev);
iommu_device_link(&data->iommu, dev);
group = iommu_group_get_for_dev(dev);
@@ -469,7 +469,7 @@ static void mtk_iommu_remove_device(struct device *dev)
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return;
- data = fwspec->iommu_priv;
+ data = dev_iommu_priv_get(dev);
iommu_device_unlink(&data->iommu, dev);
iommu_group_remove_device(dev);
@@ -496,7 +496,6 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev)
static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct platform_device *m4updev;
if (args->args_count != 1) {
@@ -505,13 +504,13 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
return -EINVAL;
}
- if (!fwspec->iommu_priv) {
+ if (!dev_iommu_priv_get(dev)) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
- fwspec->iommu_priv = platform_get_drvdata(m4updev);
+ dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
}
return iommu_fwspec_add_ids(dev, args->args, 1);
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c
index e93b94ecac45..a31be05601c9 100644
--- a/drivers/iommu/mtk_iommu_v1.c
+++ b/drivers/iommu/mtk_iommu_v1.c
@@ -263,8 +263,8 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain)
static int mtk_iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
struct mtk_iommu_domain *dom = to_mtk_domain(domain);
- struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
int ret;
if (!data)
@@ -286,7 +286,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain,
static void mtk_iommu_detach_device(struct iommu_domain *domain,
struct device *dev)
{
- struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
+ struct mtk_iommu_data *data = dev_iommu_priv_get(dev);
if (!data)
return;
@@ -387,20 +387,20 @@ static int mtk_iommu_create_mapping(struct device *dev,
return -EINVAL;
}
- if (!fwspec->iommu_priv) {
+ if (!dev_iommu_priv_get(dev)) {
/* Get the m4u device */
m4updev = of_find_device_by_node(args->np);
if (WARN_ON(!m4updev))
return -EINVAL;
- fwspec->iommu_priv = platform_get_drvdata(m4updev);
+ dev_iommu_priv_set(dev, platform_get_drvdata(m4updev));
}
ret = iommu_fwspec_add_ids(dev, args->args, 1);
if (ret)
return ret;
- data = fwspec->iommu_priv;
+ data = dev_iommu_priv_get(dev);
m4udev = data->dev;
mtk_mapping = m4udev->archdata.iommu;
if (!mtk_mapping) {
@@ -459,7 +459,7 @@ static int mtk_iommu_add_device(struct device *dev)
if (err)
return err;
- data = fwspec->iommu_priv;
+ data = dev_iommu_priv_get(dev);
mtk_mapping = data->dev->archdata.iommu;
err = arm_iommu_attach_device(dev, mtk_mapping);
if (err) {
@@ -478,7 +478,7 @@ static void mtk_iommu_remove_device(struct device *dev)
if (!fwspec || fwspec->ops != &mtk_iommu_ops)
return;
- data = fwspec->iommu_priv;
+ data = dev_iommu_priv_get(dev);
iommu_device_unlink(&data->iommu, dev);
iommu_group_remove_device(dev);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index be551cc34be4..887fefcb03b4 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -167,7 +167,7 @@ static int omap2_iommu_enable(struct omap_iommu *obj)
{
u32 l, pa;
- if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
+ if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K))
return -EINVAL;
pa = virt_to_phys(obj->iopgd);
@@ -434,7 +434,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
bytes = iopgsz_to_bytes(cr.cam & 3);
if ((start <= da) && (da < start + bytes)) {
- dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
+ dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n",
__func__, start, da, bytes);
iotlb_load_cr(obj, &cr);
iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
@@ -1352,11 +1352,11 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
omap_pgsz = bytes_to_iopgsz(bytes);
if (omap_pgsz < 0) {
- dev_err(dev, "invalid size to map: %d\n", bytes);
+ dev_err(dev, "invalid size to map: %zu\n", bytes);
return -EINVAL;
}
- dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
+ dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n", da, &pa, bytes);
iotlb_init_entry(&e, da, pa, omap_pgsz);
@@ -1393,7 +1393,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
size_t bytes = 0;
int i;
- dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
+ dev_dbg(dev, "unmapping da 0x%lx size %zu\n", da, size);
iommu = omap_domain->iommus;
for (i = 0; i < omap_domain->num_iommus; i++, iommu++) {
diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h
index 1a4adb59a859..51d74002cc30 100644
--- a/drivers/iommu/omap-iopgtable.h
+++ b/drivers/iommu/omap-iopgtable.h
@@ -63,7 +63,8 @@
*
* va to pa translation
*/
-static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
+static inline phys_addr_t omap_iommu_translate(unsigned long d, dma_addr_t va,
+ dma_addr_t mask)
{
return (d & mask) | (va & (~mask));
}
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 4328da0b0a9f..0e2a96467767 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -48,7 +48,7 @@ struct qcom_iommu_dev {
void __iomem *local_base;
u32 sec_id;
u8 num_ctxs;
- struct qcom_iommu_ctx *ctxs[0]; /* indexed by asid-1 */
+ struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */
};
struct qcom_iommu_ctx {
@@ -74,16 +74,19 @@ static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
static const struct iommu_ops qcom_iommu_ops;
-static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec)
+static struct qcom_iommu_dev * to_iommu(struct device *dev)
{
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+
if (!fwspec || fwspec->ops != &qcom_iommu_ops)
return NULL;
- return fwspec->iommu_priv;
+
+ return dev_iommu_priv_get(dev);
}
-static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid)
+static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
+ struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
if (!qcom_iommu)
return NULL;
return qcom_iommu->ctxs[asid - 1];
@@ -115,11 +118,14 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
static void qcom_iommu_tlb_sync(void *cookie)
{
- struct iommu_fwspec *fwspec = cookie;
+ struct iommu_fwspec *fwspec;
+ struct device *dev = cookie;
unsigned i;
+ fwspec = dev_iommu_fwspec_get(dev);
+
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
unsigned int val, ret;
iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
@@ -133,11 +139,14 @@ static void qcom_iommu_tlb_sync(void *cookie)
static void qcom_iommu_tlb_inv_context(void *cookie)
{
- struct iommu_fwspec *fwspec = cookie;
+ struct device *dev = cookie;
+ struct iommu_fwspec *fwspec;
unsigned i;
+ fwspec = dev_iommu_fwspec_get(dev);
+
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
}
@@ -147,13 +156,16 @@ static void qcom_iommu_tlb_inv_context(void *cookie)
static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
- struct iommu_fwspec *fwspec = cookie;
+ struct device *dev = cookie;
+ struct iommu_fwspec *fwspec;
unsigned i, reg;
reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
+ fwspec = dev_iommu_fwspec_get(dev);
+
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
size_t s = size;
iova = (iova >> 12) << 12;
@@ -222,9 +234,10 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
static int qcom_iommu_init_domain(struct iommu_domain *domain,
struct qcom_iommu_dev *qcom_iommu,
- struct iommu_fwspec *fwspec)
+ struct device *dev)
{
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct io_pgtable_ops *pgtbl_ops;
struct io_pgtable_cfg pgtbl_cfg;
int i, ret = 0;
@@ -243,7 +256,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
};
qcom_domain->iommu = qcom_iommu;
- pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec);
+ pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
if (!pgtbl_ops) {
dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
ret = -ENOMEM;
@@ -256,7 +269,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
domain->geometry.force_aperture = true;
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
if (!ctx->secure_init) {
ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
@@ -363,8 +376,7 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
+ struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
int ret;
@@ -375,7 +387,7 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
/* Ensure that the domain is finalized */
pm_runtime_get_sync(qcom_iommu->dev);
- ret = qcom_iommu_init_domain(domain, qcom_iommu, fwspec);
+ ret = qcom_iommu_init_domain(domain, qcom_iommu, dev);
pm_runtime_put_sync(qcom_iommu->dev);
if (ret < 0)
return ret;
@@ -397,9 +409,9 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev
static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec);
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
+ struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+ struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
unsigned i;
if (WARN_ON(!qcom_domain->iommu))
@@ -407,7 +419,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
pm_runtime_get_sync(qcom_iommu->dev);
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
/* Disable the context bank: */
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
@@ -514,7 +526,7 @@ static bool qcom_iommu_capable(enum iommu_cap cap)
static int qcom_iommu_add_device(struct device *dev)
{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev));
+ struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
struct iommu_group *group;
struct device_link *link;
@@ -545,7 +557,7 @@ static int qcom_iommu_add_device(struct device *dev)
static void qcom_iommu_remove_device(struct device *dev)
{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev));
+ struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
if (!qcom_iommu)
return;
@@ -557,7 +569,6 @@ static void qcom_iommu_remove_device(struct device *dev)
static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct qcom_iommu_dev *qcom_iommu;
struct platform_device *iommu_pdev;
unsigned asid = args->args[0];
@@ -583,14 +594,14 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
WARN_ON(asid > qcom_iommu->num_ctxs))
return -EINVAL;
- if (!fwspec->iommu_priv) {
- fwspec->iommu_priv = qcom_iommu;
+ if (!dev_iommu_priv_get(dev)) {
+ dev_iommu_priv_set(dev, qcom_iommu);
} else {
/* make sure devices iommus dt node isn't referring to
* multiple different iommu devices. Multiple context
* banks are ok, but multiple devices are not:
*/
- if (WARN_ON(qcom_iommu != fwspec->iommu_priv))
+ if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev)))
return -EINVAL;
}
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 3fb7ba72507d..db6559e8336f 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -247,7 +247,7 @@ static int gart_iommu_add_device(struct device *dev)
{
struct iommu_group *group;
- if (!dev->iommu_fwspec)
+ if (!dev_iommu_fwspec_get(dev))
return -ENODEV;
group = iommu_group_get_for_dev(dev);
diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index cce329d71fba..d5cac4f46ca5 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -466,7 +466,7 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
struct virtio_iommu_req_probe *probe;
struct virtio_iommu_probe_property *prop;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct viommu_endpoint *vdev = fwspec->iommu_priv;
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
if (!fwspec->num_ids)
return -EINVAL;
@@ -607,24 +607,36 @@ static struct iommu_domain *viommu_domain_alloc(unsigned type)
return &vdomain->domain;
}
-static int viommu_domain_finalise(struct viommu_dev *viommu,
+static int viommu_domain_finalise(struct viommu_endpoint *vdev,
struct iommu_domain *domain)
{
int ret;
+ unsigned long viommu_page_size;
+ struct viommu_dev *viommu = vdev->viommu;
struct viommu_domain *vdomain = to_viommu_domain(domain);
- vdomain->viommu = viommu;
- vdomain->map_flags = viommu->map_flags;
+ viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
+ if (viommu_page_size > PAGE_SIZE) {
+ dev_err(vdev->dev,
+ "granule 0x%lx larger than system page size 0x%lx\n",
+ viommu_page_size, PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
+ viommu->last_domain, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+
+ vdomain->id = (unsigned int)ret;
domain->pgsize_bitmap = viommu->pgsize_bitmap;
domain->geometry = viommu->geometry;
- ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
- viommu->last_domain, GFP_KERNEL);
- if (ret >= 0)
- vdomain->id = (unsigned int)ret;
+ vdomain->map_flags = viommu->map_flags;
+ vdomain->viommu = viommu;
- return ret > 0 ? 0 : ret;
+ return 0;
}
static void viommu_domain_free(struct iommu_domain *domain)
@@ -648,7 +660,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
int ret = 0;
struct virtio_iommu_req_attach req;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct viommu_endpoint *vdev = fwspec->iommu_priv;
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
struct viommu_domain *vdomain = to_viommu_domain(domain);
mutex_lock(&vdomain->mutex);
@@ -657,7 +669,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Properly initialize the domain now that we know which viommu
* owns it.
*/
- ret = viommu_domain_finalise(vdev->viommu, domain);
+ ret = viommu_domain_finalise(vdev, domain);
} else if (vdomain->viommu != vdev->viommu) {
dev_err(dev, "cannot attach to foreign vIOMMU\n");
ret = -EXDEV;
@@ -807,8 +819,7 @@ static void viommu_iotlb_sync(struct iommu_domain *domain,
static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
{
struct iommu_resv_region *entry, *new_entry, *msi = NULL;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct viommu_endpoint *vdev = fwspec->iommu_priv;
+ struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
list_for_each_entry(entry, &vdev->resv_regions, list) {
@@ -876,7 +887,7 @@ static int viommu_add_device(struct device *dev)
vdev->dev = dev;
vdev->viommu = viommu;
INIT_LIST_HEAD(&vdev->resv_regions);
- fwspec->iommu_priv = vdev;
+ dev_iommu_priv_set(dev, vdev);
if (viommu->probe_size) {
/* Get additional information for this endpoint */
@@ -920,7 +931,7 @@ static void viommu_remove_device(struct device *dev)
if (!fwspec || fwspec->ops != &viommu_ops)
return;
- vdev = fwspec->iommu_priv;
+ vdev = dev_iommu_priv_get(dev);
iommu_group_remove_device(dev);
iommu_device_unlink(&vdev->viommu->iommu, dev);
@@ -1082,7 +1093,6 @@ static int viommu_probe(struct virtio_device *vdev)
#ifdef CONFIG_PCI
if (pci_bus_type.iommu_ops != &viommu_ops) {
- pci_request_acs();
ret = bus_set_iommu(&pci_bus_type, &viommu_ops);
if (ret)
goto err_unregister;
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index d82f1dea3711..c664d84e1667 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -846,6 +846,17 @@ config LEDS_TPS6105X
It is a single boost converter primarily for white LEDs and
audio amplifiers.
+config LEDS_IP30
+ tristate "LED support for SGI Octane machines"
+ depends on LEDS_CLASS
+ depends on SGI_MFD_IOC3
+ help
+ This option enables support for the Red and White LEDs of
+ SGI Octane machines.
+
+ To compile this driver as a module, choose M here: the module
+ will be called leds-ip30.
+
comment "LED Triggers"
source "drivers/leds/trigger/Kconfig"
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index d7e1107753fb..45235d5fb218 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -6,91 +6,92 @@ obj-$(CONFIG_LEDS_CLASS) += led-class.o
obj-$(CONFIG_LEDS_CLASS_FLASH) += led-class-flash.o
obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o
-# LED Platform Drivers
+# LED Platform Drivers (keep this sorted, M-| sort)
obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o
obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o
+obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
+obj-$(CONFIG_LEDS_AN30259A) += leds-an30259a.o
obj-$(CONFIG_LEDS_APU) += leds-apu.o
obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o
-obj-$(CONFIG_LEDS_AN30259A) += leds-an30259a.o
+obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o
obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o
obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
+obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
+obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
+obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
+obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
obj-$(CONFIG_LEDS_CPCAP) += leds-cpcap.o
-obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
+obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
+obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
+obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
+obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
+obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o
+obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
+obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o
+obj-$(CONFIG_LEDS_IP30) += leds-ip30.o
+obj-$(CONFIG_LEDS_IPAQ_MICRO) += leds-ipaq-micro.o
+obj-$(CONFIG_LEDS_IS31FL319X) += leds-is31fl319x.o
+obj-$(CONFIG_LEDS_IS31FL32XX) += leds-is31fl32xx.o
+obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o
obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
obj-$(CONFIG_LEDS_LM3532) += leds-lm3532.o
obj-$(CONFIG_LEDS_LM3533) += leds-lm3533.o
+obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
+obj-$(CONFIG_LEDS_LM3601X) += leds-lm3601x.o
+obj-$(CONFIG_LEDS_LM36274) += leds-lm36274.o
obj-$(CONFIG_LEDS_LM3642) += leds-lm3642.o
-obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
-obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
-obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
-obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
-obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o
-obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o
-obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
-obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
-obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o
-obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
+obj-$(CONFIG_LEDS_LM3692X) += leds-lm3692x.o
+obj-$(CONFIG_LEDS_LM3697) += leds-lm3697.o
+obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
obj-$(CONFIG_LEDS_LP3952) += leds-lp3952.o
-obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o
obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
obj-$(CONFIG_LEDS_LP5562) += leds-lp5562.o
+obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o
obj-$(CONFIG_LEDS_LP8501) += leds-lp8501.o
obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o
obj-$(CONFIG_LEDS_LP8860) += leds-lp8860.o
-obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
-obj-$(CONFIG_LEDS_TLC591XX) += leds-tlc591xx.o
-obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
-obj-$(CONFIG_LEDS_IPAQ_MICRO) += leds-ipaq-micro.o
-obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
-obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
-obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
-obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
-obj-$(CONFIG_LEDS_PCA963X) += leds-pca963x.o
-obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
-obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
-obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
-obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
-obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
-obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
-obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o
obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o
-obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o
-obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
-obj-$(CONFIG_LEDS_NS2) += leds-ns2.o
-obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o
-obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
obj-$(CONFIG_LEDS_MAX77650) += leds-max77650.o
obj-$(CONFIG_LEDS_MAX77693) += leds-max77693.o
obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
-obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o
-obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o
-obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
+obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o
obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o
-obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o
-obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
-obj-$(CONFIG_LEDS_IS31FL319X) += leds-is31fl319x.o
-obj-$(CONFIG_LEDS_IS31FL32XX) += leds-is31fl32xx.o
-obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
+obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o
obj-$(CONFIG_LEDS_MLXREG) += leds-mlxreg.o
-obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o
-obj-$(CONFIG_LEDS_SPI_BYTE) += leds-spi-byte.o
obj-$(CONFIG_LEDS_MT6323) += leds-mt6323.o
-obj-$(CONFIG_LEDS_LM3692X) += leds-lm3692x.o
+obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
+obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o
+obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o
+obj-$(CONFIG_LEDS_NS2) += leds-ns2.o
+obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
+obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o
+obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
+obj-$(CONFIG_LEDS_PCA963X) += leds-pca963x.o
+obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o
+obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o
+obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
+obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o
+obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o
-obj-$(CONFIG_LEDS_LM3601X) += leds-lm3601x.o
+obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o
+obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o
+obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
obj-$(CONFIG_LEDS_TI_LMU_COMMON) += leds-ti-lmu-common.o
-obj-$(CONFIG_LEDS_LM3697) += leds-lm3697.o
-obj-$(CONFIG_LEDS_LM36274) += leds-lm36274.o
+obj-$(CONFIG_LEDS_TLC591XX) += leds-tlc591xx.o
obj-$(CONFIG_LEDS_TPS6105X) += leds-tps6105x.o
+obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
+obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
+obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_CR0014114) += leds-cr0014114.o
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
obj-$(CONFIG_LEDS_EL15203000) += leds-el15203000.o
+obj-$(CONFIG_LEDS_SPI_BYTE) += leds-spi-byte.o
# LED Userspace Drivers
obj-$(CONFIG_LEDS_USER) += uleds.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 1fc40e8af75e..3363a6551a70 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -376,7 +376,7 @@ int led_classdev_register_ext(struct device *parent,
if (ret)
dev_warn(parent, "Led %s renamed to %s due to name collision",
- led_cdev->name, dev_name(led_cdev->dev));
+ proposed_name, dev_name(led_cdev->dev));
if (led_cdev->flags & LED_BRIGHT_HW_CHANGED) {
ret = led_add_brightness_hw_changed(led_cdev);
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index bd61a823d0ca..8bbaef5a2986 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -660,7 +660,6 @@ static int bd2802_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct bd2802_led *led;
- struct bd2802_led_platform_data *pdata;
int ret, i;
led = devm_kzalloc(&client->dev, sizeof(struct bd2802_led), GFP_KERNEL);
@@ -668,7 +667,6 @@ static int bd2802_probe(struct i2c_client *client,
return -ENOMEM;
led->client = client;
- pdata = led->pdata = dev_get_platdata(&client->dev);
i2c_set_clientdata(client, led);
/*
diff --git a/drivers/leds/leds-ip30.c b/drivers/leds/leds-ip30.c
new file mode 100644
index 000000000000..d4ec7361c616
--- /dev/null
+++ b/drivers/leds/leds-ip30.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LED Driver for SGI Octane machines
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/leds.h>
+
+#define IP30_LED_SYSTEM 0
+#define IP30_LED_FAULT 1
+
+struct ip30_led {
+ struct led_classdev cdev;
+ u32 __iomem *reg;
+};
+
+static void ip30led_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct ip30_led *led = container_of(led_cdev, struct ip30_led, cdev);
+
+ writel(value, led->reg);
+}
+
+static int ip30led_create(struct platform_device *pdev, int num)
+{
+ struct resource *res;
+ struct ip30_led *data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, num);
+ if (!res)
+ return -EBUSY;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->reg))
+ return PTR_ERR(data->reg);
+
+
+ switch (num) {
+ case IP30_LED_SYSTEM:
+ data->cdev.name = "white:power";
+ break;
+ case IP30_LED_FAULT:
+ data->cdev.name = "red:fault";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ data->cdev.brightness = readl(data->reg);
+ data->cdev.max_brightness = 1;
+ data->cdev.brightness_set = ip30led_set;
+
+ return devm_led_classdev_register(&pdev->dev, &data->cdev);
+}
+
+static int ip30led_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = ip30led_create(pdev, IP30_LED_SYSTEM);
+ if (ret < 0)
+ return ret;
+
+ return ip30led_create(pdev, IP30_LED_FAULT);
+}
+
+static struct platform_driver ip30led_driver = {
+ .probe = ip30led_probe,
+ .driver = {
+ .name = "ip30-leds",
+ },
+};
+
+module_platform_driver(ip30led_driver);
+
+MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfer@suse.de>");
+MODULE_DESCRIPTION("SGI Octane LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ip30-leds");
diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c
index 6f29b8943913..cd768f991da1 100644
--- a/drivers/leds/leds-is31fl32xx.c
+++ b/drivers/leds/leds-is31fl32xx.c
@@ -44,7 +44,7 @@ struct is31fl32xx_priv {
const struct is31fl32xx_chipdef *cdef;
struct i2c_client *client;
unsigned int num_leds;
- struct is31fl32xx_led_data leds[0];
+ struct is31fl32xx_led_data leds[];
};
/**
diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c
index 188a57da981a..aa9bf8cda673 100644
--- a/drivers/leds/leds-lm3532.c
+++ b/drivers/leds/leds-lm3532.c
@@ -140,7 +140,7 @@ struct lm3532_led {
int ctrl_brt_pointer;
int num_leds;
int full_scale_current;
- int enabled:1;
+ unsigned int enabled:1;
u32 led_strings[LM3532_MAX_CONTROL_BANKS];
char label[LED_MAX_NAME_SIZE];
};
diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c
index b71711aff8a3..872d26f9706a 100644
--- a/drivers/leds/leds-lm3697.c
+++ b/drivers/leds/leds-lm3697.c
@@ -246,7 +246,7 @@ static int lm3697_probe_dt(struct lm3697 *priv)
led->num_leds = fwnode_property_count_u32(child, "led-sources");
if (led->num_leds > LM3697_MAX_LED_STRINGS) {
- dev_err(&priv->client->dev, "To many LED strings defined\n");
+ dev_err(&priv->client->dev, "Too many LED strings defined\n");
continue;
}
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 7c500dfdcfa3..538ca5755602 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -12,14 +12,38 @@
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/leds.h>
#include <linux/module.h>
-#include <linux/platform_data/leds-kirkwood-ns2.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include "leds.h"
+enum ns2_led_modes {
+ NS_V2_LED_OFF,
+ NS_V2_LED_ON,
+ NS_V2_LED_SATA,
+};
+
+struct ns2_led_modval {
+ enum ns2_led_modes mode;
+ int cmd_level;
+ int slow_level;
+};
+
+struct ns2_led {
+ const char *name;
+ const char *default_trigger;
+ struct gpio_desc *cmd;
+ struct gpio_desc *slow;
+ int num_modes;
+ struct ns2_led_modval *modval;
+};
+
+struct ns2_led_platform_data {
+ int num_leds;
+ struct ns2_led *leds;
+};
+
/*
* The Network Space v2 dual-GPIO LED is wired to a CPLD. Three different LED
* modes are available: off, on and SATA activity blinking. The LED modes are
@@ -29,8 +53,8 @@
struct ns2_led_data {
struct led_classdev cdev;
- unsigned int cmd;
- unsigned int slow;
+ struct gpio_desc *cmd;
+ struct gpio_desc *slow;
bool can_sleep;
unsigned char sata; /* True when SATA mode active. */
rwlock_t rw_lock; /* Lock GPIOs. */
@@ -46,8 +70,8 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat,
int cmd_level;
int slow_level;
- cmd_level = gpio_get_value_cansleep(led_dat->cmd);
- slow_level = gpio_get_value_cansleep(led_dat->slow);
+ cmd_level = gpiod_get_value_cansleep(led_dat->cmd);
+ slow_level = gpiod_get_value_cansleep(led_dat->slow);
for (i = 0; i < led_dat->num_modes; i++) {
if (cmd_level == led_dat->modval[i].cmd_level &&
@@ -80,15 +104,15 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat,
write_lock_irqsave(&led_dat->rw_lock, flags);
if (!led_dat->can_sleep) {
- gpio_set_value(led_dat->cmd,
- led_dat->modval[i].cmd_level);
- gpio_set_value(led_dat->slow,
- led_dat->modval[i].slow_level);
+ gpiod_set_value(led_dat->cmd,
+ led_dat->modval[i].cmd_level);
+ gpiod_set_value(led_dat->slow,
+ led_dat->modval[i].slow_level);
goto exit_unlock;
}
- gpio_set_value_cansleep(led_dat->cmd, led_dat->modval[i].cmd_level);
- gpio_set_value_cansleep(led_dat->slow, led_dat->modval[i].slow_level);
+ gpiod_set_value_cansleep(led_dat->cmd, led_dat->modval[i].cmd_level);
+ gpiod_set_value_cansleep(led_dat->slow, led_dat->modval[i].slow_level);
exit_unlock:
write_unlock_irqrestore(&led_dat->rw_lock, flags);
@@ -176,26 +200,6 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
int ret;
enum ns2_led_modes mode;
- ret = devm_gpio_request_one(&pdev->dev, template->cmd,
- gpio_get_value_cansleep(template->cmd) ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- template->name);
- if (ret) {
- dev_err(&pdev->dev, "%s: failed to setup command GPIO\n",
- template->name);
- return ret;
- }
-
- ret = devm_gpio_request_one(&pdev->dev, template->slow,
- gpio_get_value_cansleep(template->slow) ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- template->name);
- if (ret) {
- dev_err(&pdev->dev, "%s: failed to setup slow GPIO\n",
- template->name);
- return ret;
- }
-
rwlock_init(&led_dat->rw_lock);
led_dat->cdev.name = template->name;
@@ -205,8 +209,8 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat,
led_dat->cdev.groups = ns2_led_groups;
led_dat->cmd = template->cmd;
led_dat->slow = template->slow;
- led_dat->can_sleep = gpio_cansleep(led_dat->cmd) |
- gpio_cansleep(led_dat->slow);
+ led_dat->can_sleep = gpiod_cansleep(led_dat->cmd) |
+ gpiod_cansleep(led_dat->slow);
if (led_dat->can_sleep)
led_dat->cdev.brightness_set_blocking = ns2_led_set_blocking;
else
@@ -261,17 +265,26 @@ ns2_leds_get_of_pdata(struct device *dev, struct ns2_led_platform_data *pdata)
const char *string;
int i, num_modes;
struct ns2_led_modval *modval;
+ struct gpio_desc *gd;
- ret = of_get_named_gpio(child, "cmd-gpio", 0);
- if (ret < 0)
- goto err_node_put;
- led->cmd = ret;
- ret = of_get_named_gpio(child, "slow-gpio", 0);
- if (ret < 0)
- goto err_node_put;
- led->slow = ret;
ret = of_property_read_string(child, "label", &string);
led->name = (ret == 0) ? string : child->name;
+
+ gd = gpiod_get_from_of_node(child, "cmd-gpio", 0,
+ GPIOD_ASIS, led->name);
+ if (IS_ERR(gd)) {
+ ret = PTR_ERR(gd);
+ goto err_node_put;
+ }
+ led->cmd = gd;
+ gd = gpiod_get_from_of_node(child, "slow-gpio", 0,
+ GPIOD_ASIS, led->name);
+ if (IS_ERR(gd)) {
+ ret = PTR_ERR(gd);
+ goto err_node_put;
+ }
+ led->slow = gd;
+
ret = of_property_read_string(child, "linux,default-trigger",
&string);
if (ret == 0)
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 8b6965a563e9..6c8a724aac51 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -16,60 +16,55 @@
#include <linux/leds.h>
#include <linux/err.h>
#include <linux/pwm.h>
-#include <linux/leds_pwm.h>
#include <linux/slab.h>
+struct led_pwm {
+ const char *name;
+ const char *default_trigger;
+ u8 active_low;
+ unsigned int max_brightness;
+};
+
+struct led_pwm_platform_data {
+ int num_leds;
+ struct led_pwm *leds;
+};
+
struct led_pwm_data {
struct led_classdev cdev;
struct pwm_device *pwm;
+ struct pwm_state pwmstate;
unsigned int active_low;
- unsigned int period;
- int duty;
};
struct led_pwm_priv {
int num_leds;
- struct led_pwm_data leds[0];
+ struct led_pwm_data leds[];
};
-static void __led_pwm_set(struct led_pwm_data *led_dat)
-{
- int new_duty = led_dat->duty;
-
- pwm_config(led_dat->pwm, new_duty, led_dat->period);
-
- if (new_duty == 0)
- pwm_disable(led_dat->pwm);
- else
- pwm_enable(led_dat->pwm);
-}
-
static int led_pwm_set(struct led_classdev *led_cdev,
enum led_brightness brightness)
{
struct led_pwm_data *led_dat =
container_of(led_cdev, struct led_pwm_data, cdev);
unsigned int max = led_dat->cdev.max_brightness;
- unsigned long long duty = led_dat->period;
+ unsigned long long duty = led_dat->pwmstate.period;
duty *= brightness;
do_div(duty, max);
if (led_dat->active_low)
- duty = led_dat->period - duty;
-
- led_dat->duty = duty;
-
- __led_pwm_set(led_dat);
+ duty = led_dat->pwmstate.period - duty;
- return 0;
+ led_dat->pwmstate.duty_cycle = duty;
+ led_dat->pwmstate.enabled = duty > 0;
+ return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate);
}
static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
struct led_pwm *led, struct fwnode_handle *fwnode)
{
struct led_pwm_data *led_data = &priv->leds[priv->num_leds];
- struct pwm_args pargs;
int ret;
led_data->active_low = led->active_low;
@@ -93,17 +88,7 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv,
led_data->cdev.brightness_set_blocking = led_pwm_set;
- /*
- * FIXME: pwm_apply_args() should be removed when switching to the
- * atomic PWM API.
- */
- pwm_apply_args(led_data->pwm);
-
- pwm_get_args(led_data->pwm, &pargs);
-
- led_data->period = pargs.period;
- if (!led_data->period && (led->pwm_period_ns > 0))
- led_data->period = led->pwm_period_ns;
+ pwm_init_state(led_data->pwm, &led_data->pwmstate);
ret = devm_led_classdev_register(dev, &led_data->cdev);
if (ret == 0) {
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 8d07fdf63a47..e1db43446327 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -201,10 +201,27 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
}
+static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ int ret;
+ struct linear_c *lc = ti->private;
+ struct block_device *bdev = lc->dev->bdev;
+ struct dax_device *dax_dev = lc->dev->dax_dev;
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+
+ dev_sector = linear_map_sector(ti, sector);
+ ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
+ if (ret)
+ return ret;
+ return dax_zero_page_range(dax_dev, pgoff, nr_pages);
+}
+
#else
#define linear_dax_direct_access NULL
#define linear_dax_copy_from_iter NULL
#define linear_dax_copy_to_iter NULL
+#define linear_dax_zero_page_range NULL
#endif
static struct target_type linear_target = {
@@ -226,6 +243,7 @@ static struct target_type linear_target = {
.direct_access = linear_dax_direct_access,
.dax_copy_from_iter = linear_dax_copy_from_iter,
.dax_copy_to_iter = linear_dax_copy_to_iter,
+ .dax_zero_page_range = linear_dax_zero_page_range,
};
int __init dm_linear_init(void)
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 99721c76225d..8ea20b56b4d6 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -994,10 +994,26 @@ static size_t log_writes_dax_copy_to_iter(struct dm_target *ti,
return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i);
}
+static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ int ret;
+ struct log_writes_c *lc = ti->private;
+ sector_t sector = pgoff * PAGE_SECTORS;
+
+ ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages << PAGE_SHIFT,
+ &pgoff);
+ if (ret)
+ return ret;
+ return dax_zero_page_range(lc->dev->dax_dev, pgoff,
+ nr_pages << PAGE_SHIFT);
+}
+
#else
#define log_writes_dax_direct_access NULL
#define log_writes_dax_copy_from_iter NULL
#define log_writes_dax_copy_to_iter NULL
+#define log_writes_dax_zero_page_range NULL
#endif
static struct target_type log_writes_target = {
@@ -1016,6 +1032,7 @@ static struct target_type log_writes_target = {
.direct_access = log_writes_dax_direct_access,
.dax_copy_from_iter = log_writes_dax_copy_from_iter,
.dax_copy_to_iter = log_writes_dax_copy_to_iter,
+ .dax_zero_page_range = log_writes_dax_zero_page_range,
};
static int __init dm_log_writes_init(void)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 63bbcc20f49a..fa813c0f993d 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -360,10 +360,32 @@ static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff,
return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i);
}
+static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ int ret;
+ sector_t dev_sector, sector = pgoff * PAGE_SECTORS;
+ struct stripe_c *sc = ti->private;
+ struct dax_device *dax_dev;
+ struct block_device *bdev;
+ uint32_t stripe;
+
+ stripe_map_sector(sc, sector, &stripe, &dev_sector);
+ dev_sector += sc->stripe[stripe].physical_start;
+ dax_dev = sc->stripe[stripe].dev->dax_dev;
+ bdev = sc->stripe[stripe].dev->bdev;
+
+ ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff);
+ if (ret)
+ return ret;
+ return dax_zero_page_range(dax_dev, pgoff, nr_pages);
+}
+
#else
#define stripe_dax_direct_access NULL
#define stripe_dax_copy_from_iter NULL
#define stripe_dax_copy_to_iter NULL
+#define stripe_dax_zero_page_range NULL
#endif
/*
@@ -486,6 +508,7 @@ static struct target_type stripe_target = {
.direct_access = stripe_dax_direct_access,
.dax_copy_from_iter = stripe_dax_copy_from_iter,
.dax_copy_to_iter = stripe_dax_copy_to_iter,
+ .dax_zero_page_range = stripe_dax_zero_page_range,
};
int __init dm_stripe_init(void)
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 21c0207e3207..db9e46114653 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1199,6 +1199,35 @@ static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
return ret;
}
+static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ struct mapped_device *md = dax_get_private(dax_dev);
+ sector_t sector = pgoff * PAGE_SECTORS;
+ struct dm_target *ti;
+ int ret = -EIO;
+ int srcu_idx;
+
+ ti = dm_dax_get_live_target(md, sector, &srcu_idx);
+
+ if (!ti)
+ goto out;
+ if (WARN_ON(!ti->type->dax_zero_page_range)) {
+ /*
+ * ->zero_page_range() is mandatory dax operation. If we are
+ * here, something is wrong.
+ */
+ dm_put_live_table(md, srcu_idx);
+ goto out;
+ }
+ ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
+
+ out:
+ dm_put_live_table(md, srcu_idx);
+
+ return ret;
+}
+
/*
* A target may call dm_accept_partial_bio only from the map routine. It is
* allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET,
@@ -1969,7 +1998,7 @@ static struct mapped_device *alloc_dev(int minor)
if (IS_ENABLED(CONFIG_DAX_DRIVER)) {
md->dax_dev = alloc_dax(md, md->disk->disk_name,
&dm_dax_ops, 0);
- if (!md->dax_dev)
+ if (IS_ERR(md->dax_dev))
goto bad;
}
@@ -3200,6 +3229,7 @@ static const struct dax_operations dm_dax_ops = {
.dax_supported = dm_dax_supported,
.copy_from_iter = dm_dax_copy_from_iter,
.copy_to_iter = dm_dax_copy_to_iter,
+ .zero_page_range = dm_dax_zero_page_range,
};
/*
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 2b203290e7b9..0a59249198d3 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -642,6 +642,19 @@ config MFD_IPAQ_MICRO
AT90LS8535 microcontroller flashed with a special iPAQ
firmware using the custom protocol implemented in this driver.
+config MFD_IQS62X
+ tristate "Azoteq IQS620A/621/622/624/625 core support"
+ depends on I2C
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Say Y here if you want to build core support for the Azoteq IQS620A,
+ IQS621, IQS622, IQS624 and IQS625 multi-function sensors. Additional
+ options must be selected to enable device-specific functions.
+
+ To compile this driver as a module, choose M here: the module will
+ be called iqs62x.
+
config MFD_JANZ_CMODIO
tristate "Janz CMOD-IO PCI MODULbus Carrier Board"
select MFD_CORE
@@ -893,6 +906,7 @@ config MFD_CPCAP
tristate "Support for Motorola CPCAP"
depends on SPI
depends on OF || COMPILE_TEST
+ select MFD_CORE
select REGMAP_SPI
select REGMAP_IRQ
help
@@ -1058,6 +1072,7 @@ config MFD_RN5T618
depends on OF
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
help
Say yes here to add support for the Ricoh RN5T567,
RN5T618, RC5T619 PMIC.
@@ -1201,7 +1216,7 @@ config AB8500_CORE
chip. This connects to U8500 either on the SSP/SPI bus (deprecated
since hardware version v1.0) or the I2C bus via PRCMU. It also adds
the irq_chip parts for handling the Mixed Signal chip events.
- This chip embeds various other multimedia funtionalities as well.
+ This chip embeds various other multimedia functionalities as well.
config AB8500_DEBUG
bool "Enable debug info via debugfs"
@@ -1851,7 +1866,7 @@ config MFD_WM8994
has on board GPIO and regulator functionality which is
supported via the relevant subsystems. This driver provides
core support for the WM8994, in order to use the actual
- functionaltiy of the device other drivers must be enabled.
+ functionality of the device other drivers must be enabled.
config MFD_WM97xx
tristate "Wolfson Microelectronics WM97xx"
@@ -1864,7 +1879,7 @@ config MFD_WM97xx
designed for smartphone applications. As well as audio functionality
it has on board GPIO and a touchscreen functionality which is
supported via the relevant subsystems. This driver provides core
- support for the WM97xx, in order to use the actual functionaltiy of
+ support for the WM97xx, in order to use the actual functionality of
the device other drivers must be enabled.
config MFD_STW481X
@@ -1957,7 +1972,7 @@ config MFD_STPMIC1
Support for ST Microelectronics STPMIC1 PMIC. STPMIC1 has power on
key, watchdog and regulator functionalities which are supported via
the relevant subsystems. This driver provides core support for the
- STPMIC1. In order to use the actual functionaltiy of the device other
+ STPMIC1. In order to use the actual functionality of the device other
drivers must be enabled.
To compile this driver as a module, choose M here: the
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index b83f172545e1..f935d10cbf0f 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -226,6 +226,7 @@ obj-$(CONFIG_MFD_AS3711) += as3711.o
obj-$(CONFIG_MFD_AS3722) += as3722.o
obj-$(CONFIG_MFD_STW481X) += stw481x.o
obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o
+obj-$(CONFIG_MFD_IQS62X) += iqs62x.o
obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o
obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o
obj-$(CONFIG_MFD_HI655X_PMIC) += hi655x-pmic.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 78ee4b28fca2..a17cf759739d 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -221,7 +221,7 @@ static ssize_t aat2870_dump_reg(struct aat2870_data *aat2870, char *buf)
count += sprintf(buf, "aat2870 registers\n");
for (addr = 0; addr < AAT2870_REG_NUM; addr++) {
- count += sprintf(buf + count, "0x%02x: ", addr);
+ count += snprintf(buf + count, PAGE_SIZE - count, "0x%02x: ", addr);
if (count >= PAGE_SIZE - 1)
break;
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 39e611695053..32c2b912b58b 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -211,7 +211,7 @@ static int ec_device_probe(struct platform_device *pdev)
* explicitly added on platforms that don't have the PD notifier ACPI
* device entry defined.
*/
- if (IS_ENABLED(CONFIG_OF)) {
+ if (IS_ENABLED(CONFIG_OF) && ec->ec_dev->dev->of_node) {
if (cros_ec_check_features(ec, EC_FEATURE_USB_PD)) {
retval = mfd_add_hotplug_devices(ec->dev,
cros_usbpd_notify_cells,
diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c
index 419c73533401..fc30726e2e27 100644
--- a/drivers/mfd/da9062-core.c
+++ b/drivers/mfd/da9062-core.c
@@ -21,6 +21,9 @@
#define DA9062_REG_EVENT_B_OFFSET 1
#define DA9062_REG_EVENT_C_OFFSET 2
+#define DA9062_IRQ_LOW 0
+#define DA9062_IRQ_HIGH 1
+
static struct regmap_irq da9061_irqs[] = {
/* EVENT A */
[DA9061_IRQ_ONKEY] = {
@@ -369,6 +372,33 @@ static int da9062_get_device_type(struct da9062 *chip)
return ret;
}
+static u32 da9062_configure_irq_type(struct da9062 *chip, int irq, u32 *trigger)
+{
+ u32 irq_type = 0;
+ struct irq_data *irq_data = irq_get_irq_data(irq);
+
+ if (!irq_data) {
+ dev_err(chip->dev, "Invalid IRQ: %d\n", irq);
+ return -EINVAL;
+ }
+ *trigger = irqd_get_trigger_type(irq_data);
+
+ switch (*trigger) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ irq_type = DA9062_IRQ_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ irq_type = DA9062_IRQ_LOW;
+ break;
+ default:
+ dev_warn(chip->dev, "Unsupported IRQ type: %d\n", *trigger);
+ return -EINVAL;
+ }
+ return regmap_update_bits(chip->regmap, DA9062AA_CONFIG_A,
+ DA9062AA_IRQ_TYPE_MASK,
+ irq_type << DA9062AA_IRQ_TYPE_SHIFT);
+}
+
static const struct regmap_range da9061_aa_readable_ranges[] = {
regmap_reg_range(DA9062AA_PAGE_CON, DA9062AA_STATUS_B),
regmap_reg_range(DA9062AA_STATUS_D, DA9062AA_EVENT_C),
@@ -388,6 +418,7 @@ static const struct regmap_range da9061_aa_readable_ranges[] = {
regmap_reg_range(DA9062AA_VBUCK1_A, DA9062AA_VBUCK4_A),
regmap_reg_range(DA9062AA_VBUCK3_A, DA9062AA_VBUCK3_A),
regmap_reg_range(DA9062AA_VLDO1_A, DA9062AA_VLDO4_A),
+ regmap_reg_range(DA9062AA_CONFIG_A, DA9062AA_CONFIG_A),
regmap_reg_range(DA9062AA_VBUCK1_B, DA9062AA_VBUCK4_B),
regmap_reg_range(DA9062AA_VBUCK3_B, DA9062AA_VBUCK3_B),
regmap_reg_range(DA9062AA_VLDO1_B, DA9062AA_VLDO4_B),
@@ -417,6 +448,7 @@ static const struct regmap_range da9061_aa_writeable_ranges[] = {
regmap_reg_range(DA9062AA_VBUCK1_A, DA9062AA_VBUCK4_A),
regmap_reg_range(DA9062AA_VBUCK3_A, DA9062AA_VBUCK3_A),
regmap_reg_range(DA9062AA_VLDO1_A, DA9062AA_VLDO4_A),
+ regmap_reg_range(DA9062AA_CONFIG_A, DA9062AA_CONFIG_A),
regmap_reg_range(DA9062AA_VBUCK1_B, DA9062AA_VBUCK4_B),
regmap_reg_range(DA9062AA_VBUCK3_B, DA9062AA_VBUCK3_B),
regmap_reg_range(DA9062AA_VLDO1_B, DA9062AA_VLDO4_B),
@@ -596,6 +628,7 @@ static int da9062_i2c_probe(struct i2c_client *i2c,
const struct regmap_irq_chip *irq_chip;
const struct regmap_config *config;
int cell_num;
+ u32 trigger_type = 0;
int ret;
chip = devm_kzalloc(&i2c->dev, sizeof(*chip), GFP_KERNEL);
@@ -654,10 +687,15 @@ static int da9062_i2c_probe(struct i2c_client *i2c,
if (ret)
return ret;
+ ret = da9062_configure_irq_type(chip, i2c->irq, &trigger_type);
+ if (ret < 0) {
+ dev_err(chip->dev, "Failed to configure IRQ type\n");
+ return ret;
+ }
+
ret = regmap_add_irq_chip(chip->regmap, i2c->irq,
- IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED,
- -1, irq_chip,
- &chip->regmap_irq);
+ trigger_type | IRQF_SHARED | IRQF_ONESHOT,
+ -1, irq_chip, &chip->regmap_irq);
if (ret) {
dev_err(chip->dev, "Failed to request IRQ %d: %d\n",
i2c->irq, ret);
diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c
index 7841c11411d0..39276fa626d2 100644
--- a/drivers/mfd/dln2.c
+++ b/drivers/mfd/dln2.c
@@ -90,6 +90,11 @@ struct dln2_mod_rx_slots {
spinlock_t lock;
};
+enum dln2_endpoint {
+ DLN2_EP_OUT = 0,
+ DLN2_EP_IN = 1,
+};
+
struct dln2_dev {
struct usb_device *usb_dev;
struct usb_interface *interface;
@@ -640,35 +645,56 @@ static int dln2_start_rx_urbs(struct dln2_dev *dln2, gfp_t gfp)
return 0;
}
+enum {
+ DLN2_ACPI_MATCH_GPIO = 0,
+ DLN2_ACPI_MATCH_I2C = 1,
+ DLN2_ACPI_MATCH_SPI = 2,
+};
+
static struct dln2_platform_data dln2_pdata_gpio = {
.handle = DLN2_HANDLE_GPIO,
};
+static struct mfd_cell_acpi_match dln2_acpi_match_gpio = {
+ .adr = DLN2_ACPI_MATCH_GPIO,
+};
+
/* Only one I2C port seems to be supported on current hardware */
static struct dln2_platform_data dln2_pdata_i2c = {
.handle = DLN2_HANDLE_I2C,
.port = 0,
};
+static struct mfd_cell_acpi_match dln2_acpi_match_i2c = {
+ .adr = DLN2_ACPI_MATCH_I2C,
+};
+
/* Only one SPI port supported */
static struct dln2_platform_data dln2_pdata_spi = {
.handle = DLN2_HANDLE_SPI,
.port = 0,
};
+static struct mfd_cell_acpi_match dln2_acpi_match_spi = {
+ .adr = DLN2_ACPI_MATCH_SPI,
+};
+
static const struct mfd_cell dln2_devs[] = {
{
.name = "dln2-gpio",
+ .acpi_match = &dln2_acpi_match_gpio,
.platform_data = &dln2_pdata_gpio,
.pdata_size = sizeof(struct dln2_platform_data),
},
{
.name = "dln2-i2c",
+ .acpi_match = &dln2_acpi_match_i2c,
.platform_data = &dln2_pdata_i2c,
.pdata_size = sizeof(struct dln2_platform_data),
},
{
.name = "dln2-spi",
+ .acpi_match = &dln2_acpi_match_spi,
.platform_data = &dln2_pdata_spi,
.pdata_size = sizeof(struct dln2_platform_data),
},
@@ -733,10 +759,10 @@ static int dln2_probe(struct usb_interface *interface,
hostif->desc.bNumEndpoints < 2)
return -ENODEV;
- epin = &hostif->endpoint[0].desc;
- epout = &hostif->endpoint[1].desc;
+ epout = &hostif->endpoint[DLN2_EP_OUT].desc;
if (!usb_endpoint_is_bulk_out(epout))
return -ENODEV;
+ epin = &hostif->endpoint[DLN2_EP_IN].desc;
if (!usb_endpoint_is_bulk_in(epin))
return -ENODEV;
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
index c40a6c7d0cf8..7fc0c5d4edff 100644
--- a/drivers/mfd/intel-lpss-pci.c
+++ b/drivers/mfd/intel-lpss-pci.c
@@ -139,6 +139,11 @@ static const struct intel_lpss_platform_info cnl_i2c_info = {
.properties = spt_i2c_properties,
};
+static const struct intel_lpss_platform_info ehl_i2c_info = {
+ .clk_rate = 100000000,
+ .properties = bxt_i2c_properties,
+};
+
static const struct pci_device_id intel_lpss_pci_ids[] = {
/* CML-LP */
{ PCI_VDEVICE(INTEL, 0x02a8), (kernel_ulong_t)&spt_uart_info },
@@ -231,15 +236,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0x4b2a), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x4b2b), (kernel_ulong_t)&bxt_info },
{ PCI_VDEVICE(INTEL, 0x4b37), (kernel_ulong_t)&bxt_info },
- { PCI_VDEVICE(INTEL, 0x4b44), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4b45), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4b4b), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4b4c), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b44), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b45), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b4b), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b4c), (kernel_ulong_t)&ehl_i2c_info },
{ PCI_VDEVICE(INTEL, 0x4b4d), (kernel_ulong_t)&bxt_uart_info },
- { PCI_VDEVICE(INTEL, 0x4b78), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4b79), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4b7a), (kernel_ulong_t)&bxt_i2c_info },
- { PCI_VDEVICE(INTEL, 0x4b7b), (kernel_ulong_t)&bxt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b78), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b79), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b7a), (kernel_ulong_t)&ehl_i2c_info },
+ { PCI_VDEVICE(INTEL, 0x4b7b), (kernel_ulong_t)&ehl_i2c_info },
/* JSL */
{ PCI_VDEVICE(INTEL, 0x4da8), (kernel_ulong_t)&spt_uart_info },
{ PCI_VDEVICE(INTEL, 0x4da9), (kernel_ulong_t)&spt_uart_info },
@@ -347,6 +352,16 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&cnl_i2c_info },
{ PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&spt_info },
+ /* CML-V */
+ { PCI_VDEVICE(INTEL, 0xa3a7), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa3a8), (kernel_ulong_t)&spt_uart_info },
+ { PCI_VDEVICE(INTEL, 0xa3a9), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa3aa), (kernel_ulong_t)&spt_info },
+ { PCI_VDEVICE(INTEL, 0xa3e0), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e1), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e2), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e3), (kernel_ulong_t)&spt_i2c_info },
+ { PCI_VDEVICE(INTEL, 0xa3e6), (kernel_ulong_t)&spt_uart_info },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids);
diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c
new file mode 100644
index 000000000000..af764bc87d7c
--- /dev/null
+++ b/drivers/mfd/iqs62x.c
@@ -0,0 +1,1063 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Azoteq IQS620A/621/622/624/625 Multi-Function Sensors
+ *
+ * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com>
+ *
+ * These devices rely on application-specific register settings and calibration
+ * data developed in and exported from a suite of GUIs offered by the vendor. A
+ * separate tool converts the GUIs' ASCII-based output into a standard firmware
+ * file parsed by the driver.
+ *
+ * Link to datasheets and GUIs: https://www.azoteq.com/
+ *
+ * Link to conversion tool: https://github.com/jlabundy/iqs62x-h2bin.git
+ */
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/iqs62x.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+
+#define IQS62X_PROD_NUM 0x00
+
+#define IQS62X_SYS_FLAGS 0x10
+#define IQS62X_SYS_FLAGS_IN_ATI BIT(2)
+
+#define IQS620_HALL_FLAGS 0x16
+#define IQS621_HALL_FLAGS 0x19
+#define IQS622_HALL_FLAGS IQS621_HALL_FLAGS
+
+#define IQS624_INTERVAL_NUM 0x18
+#define IQS625_INTERVAL_NUM 0x12
+
+#define IQS622_PROX_SETTINGS_4 0x48
+#define IQS620_PROX_SETTINGS_4 0x50
+#define IQS620_PROX_SETTINGS_4_SAR_EN BIT(7)
+
+#define IQS621_ALS_CAL_DIV_LUX 0x82
+#define IQS621_ALS_CAL_DIV_IR 0x83
+
+#define IQS620_TEMP_CAL_MULT 0xC2
+#define IQS620_TEMP_CAL_DIV 0xC3
+#define IQS620_TEMP_CAL_OFFS 0xC4
+
+#define IQS62X_SYS_SETTINGS 0xD0
+#define IQS62X_SYS_SETTINGS_SOFT_RESET BIT(7)
+#define IQS62X_SYS_SETTINGS_ACK_RESET BIT(6)
+#define IQS62X_SYS_SETTINGS_EVENT_MODE BIT(5)
+#define IQS62X_SYS_SETTINGS_CLK_DIV BIT(4)
+#define IQS62X_SYS_SETTINGS_REDO_ATI BIT(1)
+
+#define IQS62X_PWR_SETTINGS 0xD2
+#define IQS62X_PWR_SETTINGS_DIS_AUTO BIT(5)
+#define IQS62X_PWR_SETTINGS_PWR_MODE_MASK (BIT(4) | BIT(3))
+#define IQS62X_PWR_SETTINGS_PWR_MODE_HALT (BIT(4) | BIT(3))
+#define IQS62X_PWR_SETTINGS_PWR_MODE_NORM 0
+
+#define IQS62X_OTP_CMD 0xF0
+#define IQS62X_OTP_CMD_FG3 0x13
+#define IQS62X_OTP_DATA 0xF1
+#define IQS62X_MAX_REG 0xFF
+
+#define IQS62X_HALL_CAL_MASK GENMASK(3, 0)
+
+#define IQS62X_FW_REC_TYPE_INFO 0
+#define IQS62X_FW_REC_TYPE_PROD 1
+#define IQS62X_FW_REC_TYPE_HALL 2
+#define IQS62X_FW_REC_TYPE_MASK 3
+#define IQS62X_FW_REC_TYPE_DATA 4
+
+#define IQS62X_ATI_POLL_SLEEP_US 10000
+#define IQS62X_ATI_POLL_TIMEOUT_US 500000
+#define IQS62X_ATI_STABLE_DELAY_MS 150
+
+struct iqs62x_fw_rec {
+ u8 type;
+ u8 addr;
+ u8 len;
+ u8 data;
+} __packed;
+
+struct iqs62x_fw_blk {
+ struct list_head list;
+ u8 addr;
+ u8 mask;
+ u8 len;
+ u8 data[];
+};
+
+struct iqs62x_info {
+ u8 prod_num;
+ u8 sw_num;
+ u8 hw_num;
+} __packed;
+
+static int iqs62x_dev_init(struct iqs62x_core *iqs62x)
+{
+ struct iqs62x_fw_blk *fw_blk;
+ unsigned int val;
+ int ret;
+ u8 clk_div = 1;
+
+ list_for_each_entry(fw_blk, &iqs62x->fw_blk_head, list) {
+ if (fw_blk->mask)
+ ret = regmap_update_bits(iqs62x->regmap, fw_blk->addr,
+ fw_blk->mask, *fw_blk->data);
+ else
+ ret = regmap_raw_write(iqs62x->regmap, fw_blk->addr,
+ fw_blk->data, fw_blk->len);
+ if (ret)
+ return ret;
+ }
+
+ switch (iqs62x->dev_desc->prod_num) {
+ case IQS620_PROD_NUM:
+ case IQS622_PROD_NUM:
+ ret = regmap_read(iqs62x->regmap,
+ iqs62x->dev_desc->prox_settings, &val);
+ if (ret)
+ return ret;
+
+ if (val & IQS620_PROX_SETTINGS_4_SAR_EN)
+ iqs62x->ui_sel = IQS62X_UI_SAR1;
+
+ /* fall through */
+
+ case IQS621_PROD_NUM:
+ ret = regmap_write(iqs62x->regmap, IQS620_GLBL_EVENT_MASK,
+ IQS620_GLBL_EVENT_MASK_PMU |
+ iqs62x->dev_desc->prox_mask |
+ iqs62x->dev_desc->sar_mask |
+ iqs62x->dev_desc->hall_mask |
+ iqs62x->dev_desc->hyst_mask |
+ iqs62x->dev_desc->temp_mask |
+ iqs62x->dev_desc->als_mask |
+ iqs62x->dev_desc->ir_mask);
+ if (ret)
+ return ret;
+ break;
+
+ default:
+ ret = regmap_write(iqs62x->regmap, IQS624_HALL_UI,
+ IQS624_HALL_UI_WHL_EVENT |
+ IQS624_HALL_UI_INT_EVENT |
+ IQS624_HALL_UI_AUTO_CAL);
+ if (ret)
+ return ret;
+
+ /*
+ * The IQS625 default interval divider is below the minimum
+ * permissible value, and the datasheet mandates that it is
+ * corrected during initialization (unless an updated value
+ * has already been provided by firmware).
+ *
+ * To protect against an unacceptably low user-entered value
+ * stored in the firmware, the same check is extended to the
+ * IQS624 as well.
+ */
+ ret = regmap_read(iqs62x->regmap, IQS624_INTERVAL_DIV, &val);
+ if (ret)
+ return ret;
+
+ if (val >= iqs62x->dev_desc->interval_div)
+ break;
+
+ ret = regmap_write(iqs62x->regmap, IQS624_INTERVAL_DIV,
+ iqs62x->dev_desc->interval_div);
+ if (ret)
+ return ret;
+ }
+
+ ret = regmap_read(iqs62x->regmap, IQS62X_SYS_SETTINGS, &val);
+ if (ret)
+ return ret;
+
+ if (val & IQS62X_SYS_SETTINGS_CLK_DIV)
+ clk_div = iqs62x->dev_desc->clk_div;
+
+ ret = regmap_write(iqs62x->regmap, IQS62X_SYS_SETTINGS, val |
+ IQS62X_SYS_SETTINGS_ACK_RESET |
+ IQS62X_SYS_SETTINGS_EVENT_MODE |
+ IQS62X_SYS_SETTINGS_REDO_ATI);
+ if (ret)
+ return ret;
+
+ ret = regmap_read_poll_timeout(iqs62x->regmap, IQS62X_SYS_FLAGS, val,
+ !(val & IQS62X_SYS_FLAGS_IN_ATI),
+ IQS62X_ATI_POLL_SLEEP_US,
+ IQS62X_ATI_POLL_TIMEOUT_US * clk_div);
+ if (ret)
+ return ret;
+
+ msleep(IQS62X_ATI_STABLE_DELAY_MS * clk_div);
+
+ return 0;
+}
+
+static int iqs62x_firmware_parse(struct iqs62x_core *iqs62x,
+ const struct firmware *fw)
+{
+ struct i2c_client *client = iqs62x->client;
+ struct iqs62x_fw_rec *fw_rec;
+ struct iqs62x_fw_blk *fw_blk;
+ unsigned int val;
+ size_t pos = 0;
+ int ret = 0;
+ u8 mask, len, *data;
+ u8 hall_cal_index = 0;
+
+ while (pos < fw->size) {
+ if (pos + sizeof(*fw_rec) > fw->size) {
+ ret = -EINVAL;
+ break;
+ }
+ fw_rec = (struct iqs62x_fw_rec *)(fw->data + pos);
+ pos += sizeof(*fw_rec);
+
+ if (pos + fw_rec->len - 1 > fw->size) {
+ ret = -EINVAL;
+ break;
+ }
+ pos += fw_rec->len - 1;
+
+ switch (fw_rec->type) {
+ case IQS62X_FW_REC_TYPE_INFO:
+ continue;
+
+ case IQS62X_FW_REC_TYPE_PROD:
+ if (fw_rec->data == iqs62x->dev_desc->prod_num)
+ continue;
+
+ dev_err(&client->dev,
+ "Incompatible product number: 0x%02X\n",
+ fw_rec->data);
+ ret = -EINVAL;
+ break;
+
+ case IQS62X_FW_REC_TYPE_HALL:
+ if (!hall_cal_index) {
+ ret = regmap_write(iqs62x->regmap,
+ IQS62X_OTP_CMD,
+ IQS62X_OTP_CMD_FG3);
+ if (ret)
+ break;
+
+ ret = regmap_read(iqs62x->regmap,
+ IQS62X_OTP_DATA, &val);
+ if (ret)
+ break;
+
+ hall_cal_index = val & IQS62X_HALL_CAL_MASK;
+ if (!hall_cal_index) {
+ dev_err(&client->dev,
+ "Uncalibrated device\n");
+ ret = -ENODATA;
+ break;
+ }
+ }
+
+ if (hall_cal_index > fw_rec->len) {
+ ret = -EINVAL;
+ break;
+ }
+
+ mask = 0;
+ data = &fw_rec->data + hall_cal_index - 1;
+ len = sizeof(*data);
+ break;
+
+ case IQS62X_FW_REC_TYPE_MASK:
+ if (fw_rec->len < (sizeof(mask) + sizeof(*data))) {
+ ret = -EINVAL;
+ break;
+ }
+
+ mask = fw_rec->data;
+ data = &fw_rec->data + sizeof(mask);
+ len = sizeof(*data);
+ break;
+
+ case IQS62X_FW_REC_TYPE_DATA:
+ mask = 0;
+ data = &fw_rec->data;
+ len = fw_rec->len;
+ break;
+
+ default:
+ dev_err(&client->dev,
+ "Unrecognized record type: 0x%02X\n",
+ fw_rec->type);
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ break;
+
+ fw_blk = devm_kzalloc(&client->dev,
+ struct_size(fw_blk, data, len),
+ GFP_KERNEL);
+ if (!fw_blk) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ fw_blk->addr = fw_rec->addr;
+ fw_blk->mask = mask;
+ fw_blk->len = len;
+ memcpy(fw_blk->data, data, len);
+
+ list_add(&fw_blk->list, &iqs62x->fw_blk_head);
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS] = {
+ [IQS62X_EVENT_PROX_CH0_T] = {
+ .reg = IQS62X_EVENT_PROX,
+ .mask = BIT(4),
+ .val = BIT(4),
+ },
+ [IQS62X_EVENT_PROX_CH0_P] = {
+ .reg = IQS62X_EVENT_PROX,
+ .mask = BIT(0),
+ .val = BIT(0),
+ },
+ [IQS62X_EVENT_PROX_CH1_T] = {
+ .reg = IQS62X_EVENT_PROX,
+ .mask = BIT(5),
+ .val = BIT(5),
+ },
+ [IQS62X_EVENT_PROX_CH1_P] = {
+ .reg = IQS62X_EVENT_PROX,
+ .mask = BIT(1),
+ .val = BIT(1),
+ },
+ [IQS62X_EVENT_PROX_CH2_T] = {
+ .reg = IQS62X_EVENT_PROX,
+ .mask = BIT(6),
+ .val = BIT(6),
+ },
+ [IQS62X_EVENT_PROX_CH2_P] = {
+ .reg = IQS62X_EVENT_PROX,
+ .mask = BIT(2),
+ .val = BIT(2),
+ },
+ [IQS62X_EVENT_HYST_POS_T] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(6) | BIT(7),
+ .val = BIT(6),
+ },
+ [IQS62X_EVENT_HYST_POS_P] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(5) | BIT(7),
+ .val = BIT(5),
+ },
+ [IQS62X_EVENT_HYST_NEG_T] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(6) | BIT(7),
+ .val = BIT(6) | BIT(7),
+ },
+ [IQS62X_EVENT_HYST_NEG_P] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(5) | BIT(7),
+ .val = BIT(5) | BIT(7),
+ },
+ [IQS62X_EVENT_SAR1_ACT] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(4),
+ .val = BIT(4),
+ },
+ [IQS62X_EVENT_SAR1_QRD] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(2),
+ .val = BIT(2),
+ },
+ [IQS62X_EVENT_SAR1_MOVE] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(1),
+ .val = BIT(1),
+ },
+ [IQS62X_EVENT_SAR1_HALT] = {
+ .reg = IQS62X_EVENT_HYST,
+ .mask = BIT(0),
+ .val = BIT(0),
+ },
+ [IQS62X_EVENT_WHEEL_UP] = {
+ .reg = IQS62X_EVENT_WHEEL,
+ .mask = BIT(7) | BIT(6),
+ .val = BIT(7),
+ },
+ [IQS62X_EVENT_WHEEL_DN] = {
+ .reg = IQS62X_EVENT_WHEEL,
+ .mask = BIT(7) | BIT(6),
+ .val = BIT(7) | BIT(6),
+ },
+ [IQS62X_EVENT_HALL_N_T] = {
+ .reg = IQS62X_EVENT_HALL,
+ .mask = BIT(2) | BIT(0),
+ .val = BIT(2),
+ },
+ [IQS62X_EVENT_HALL_N_P] = {
+ .reg = IQS62X_EVENT_HALL,
+ .mask = BIT(1) | BIT(0),
+ .val = BIT(1),
+ },
+ [IQS62X_EVENT_HALL_S_T] = {
+ .reg = IQS62X_EVENT_HALL,
+ .mask = BIT(2) | BIT(0),
+ .val = BIT(2) | BIT(0),
+ },
+ [IQS62X_EVENT_HALL_S_P] = {
+ .reg = IQS62X_EVENT_HALL,
+ .mask = BIT(1) | BIT(0),
+ .val = BIT(1) | BIT(0),
+ },
+ [IQS62X_EVENT_SYS_RESET] = {
+ .reg = IQS62X_EVENT_SYS,
+ .mask = BIT(7),
+ .val = BIT(7),
+ },
+};
+EXPORT_SYMBOL_GPL(iqs62x_events);
+
+static irqreturn_t iqs62x_irq(int irq, void *context)
+{
+ struct iqs62x_core *iqs62x = context;
+ struct i2c_client *client = iqs62x->client;
+ struct iqs62x_event_data event_data;
+ struct iqs62x_event_desc event_desc;
+ enum iqs62x_event_reg event_reg;
+ unsigned long event_flags = 0;
+ int ret, i, j;
+ u8 event_map[IQS62X_EVENT_SIZE];
+
+ /*
+ * The device asserts the RDY output to signal the beginning of a
+ * communication window, which is closed by an I2C stop condition.
+ * As such, all interrupt status is captured in a single read and
+ * broadcast to any interested sub-device drivers.
+ */
+ ret = regmap_raw_read(iqs62x->regmap, IQS62X_SYS_FLAGS, event_map,
+ sizeof(event_map));
+ if (ret) {
+ dev_err(&client->dev, "Failed to read device status: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < sizeof(event_map); i++) {
+ event_reg = iqs62x->dev_desc->event_regs[iqs62x->ui_sel][i];
+
+ switch (event_reg) {
+ case IQS62X_EVENT_UI_LO:
+ event_data.ui_data = get_unaligned_le16(&event_map[i]);
+
+ /* fall through */
+
+ case IQS62X_EVENT_UI_HI:
+ case IQS62X_EVENT_NONE:
+ continue;
+
+ case IQS62X_EVENT_ALS:
+ event_data.als_flags = event_map[i];
+ continue;
+
+ case IQS62X_EVENT_IR:
+ event_data.ir_flags = event_map[i];
+ continue;
+
+ case IQS62X_EVENT_INTER:
+ event_data.interval = event_map[i];
+ continue;
+
+ case IQS62X_EVENT_HYST:
+ event_map[i] <<= iqs62x->dev_desc->hyst_shift;
+
+ /* fall through */
+
+ case IQS62X_EVENT_WHEEL:
+ case IQS62X_EVENT_HALL:
+ case IQS62X_EVENT_PROX:
+ case IQS62X_EVENT_SYS:
+ break;
+ }
+
+ for (j = 0; j < IQS62X_NUM_EVENTS; j++) {
+ event_desc = iqs62x_events[j];
+
+ if (event_desc.reg != event_reg)
+ continue;
+
+ if ((event_map[i] & event_desc.mask) == event_desc.val)
+ event_flags |= BIT(j);
+ }
+ }
+
+ /*
+ * The device resets itself in response to the I2C master stalling
+ * communication past a fixed timeout. In this case, all registers
+ * are restored and any interested sub-device drivers are notified.
+ */
+ if (event_flags & BIT(IQS62X_EVENT_SYS_RESET)) {
+ dev_err(&client->dev, "Unexpected device reset\n");
+
+ ret = iqs62x_dev_init(iqs62x);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to re-initialize device: %d\n", ret);
+ return IRQ_NONE;
+ }
+ }
+
+ ret = blocking_notifier_call_chain(&iqs62x->nh, event_flags,
+ &event_data);
+ if (ret & NOTIFY_STOP_MASK)
+ return IRQ_NONE;
+
+ /*
+ * Once the communication window is closed, a small delay is added to
+ * ensure the device's RDY output has been deasserted by the time the
+ * interrupt handler returns.
+ */
+ usleep_range(50, 100);
+
+ return IRQ_HANDLED;
+}
+
+static void iqs62x_firmware_load(const struct firmware *fw, void *context)
+{
+ struct iqs62x_core *iqs62x = context;
+ struct i2c_client *client = iqs62x->client;
+ int ret;
+
+ if (fw) {
+ ret = iqs62x_firmware_parse(iqs62x, fw);
+ if (ret) {
+ dev_err(&client->dev, "Failed to parse firmware: %d\n",
+ ret);
+ goto err_out;
+ }
+ }
+
+ ret = iqs62x_dev_init(iqs62x);
+ if (ret) {
+ dev_err(&client->dev, "Failed to initialize device: %d\n", ret);
+ goto err_out;
+ }
+
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, iqs62x_irq, IRQF_ONESHOT,
+ client->name, iqs62x);
+ if (ret) {
+ dev_err(&client->dev, "Failed to request IRQ: %d\n", ret);
+ goto err_out;
+ }
+
+ ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE,
+ iqs62x->dev_desc->sub_devs,
+ iqs62x->dev_desc->num_sub_devs,
+ NULL, 0, NULL);
+ if (ret)
+ dev_err(&client->dev, "Failed to add sub-devices: %d\n", ret);
+
+err_out:
+ complete_all(&iqs62x->fw_done);
+}
+
+static const struct mfd_cell iqs620at_sub_devs[] = {
+ {
+ .name = "iqs62x-keys",
+ .of_compatible = "azoteq,iqs620a-keys",
+ },
+ {
+ .name = "iqs620a-pwm",
+ .of_compatible = "azoteq,iqs620a-pwm",
+ },
+ { .name = "iqs620at-temp", },
+};
+
+static const struct mfd_cell iqs620a_sub_devs[] = {
+ {
+ .name = "iqs62x-keys",
+ .of_compatible = "azoteq,iqs620a-keys",
+ },
+ {
+ .name = "iqs620a-pwm",
+ .of_compatible = "azoteq,iqs620a-pwm",
+ },
+};
+
+static const struct mfd_cell iqs621_sub_devs[] = {
+ {
+ .name = "iqs62x-keys",
+ .of_compatible = "azoteq,iqs621-keys",
+ },
+ { .name = "iqs621-als", },
+};
+
+static const struct mfd_cell iqs622_sub_devs[] = {
+ {
+ .name = "iqs62x-keys",
+ .of_compatible = "azoteq,iqs622-keys",
+ },
+ { .name = "iqs621-als", },
+};
+
+static const struct mfd_cell iqs624_sub_devs[] = {
+ {
+ .name = "iqs62x-keys",
+ .of_compatible = "azoteq,iqs624-keys",
+ },
+ { .name = "iqs624-pos", },
+};
+
+static const struct mfd_cell iqs625_sub_devs[] = {
+ {
+ .name = "iqs62x-keys",
+ .of_compatible = "azoteq,iqs625-keys",
+ },
+ { .name = "iqs624-pos", },
+};
+
+static const u8 iqs620at_cal_regs[] = {
+ IQS620_TEMP_CAL_MULT,
+ IQS620_TEMP_CAL_DIV,
+ IQS620_TEMP_CAL_OFFS,
+};
+
+static const u8 iqs621_cal_regs[] = {
+ IQS621_ALS_CAL_DIV_LUX,
+ IQS621_ALS_CAL_DIV_IR,
+};
+
+static const enum iqs62x_event_reg iqs620a_event_regs[][IQS62X_EVENT_SIZE] = {
+ [IQS62X_UI_PROX] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_PROX, /* 0x12 */
+ IQS62X_EVENT_HYST, /* 0x13 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_HALL, /* 0x16 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ },
+ [IQS62X_UI_SAR1] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_HYST, /* 0x13 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_HALL, /* 0x16 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ },
+};
+
+static const enum iqs62x_event_reg iqs621_event_regs[][IQS62X_EVENT_SIZE] = {
+ [IQS62X_UI_PROX] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_PROX, /* 0x12 */
+ IQS62X_EVENT_HYST, /* 0x13 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_ALS, /* 0x16 */
+ IQS62X_EVENT_UI_LO, /* 0x17 */
+ IQS62X_EVENT_UI_HI, /* 0x18 */
+ IQS62X_EVENT_HALL, /* 0x19 */
+ },
+};
+
+static const enum iqs62x_event_reg iqs622_event_regs[][IQS62X_EVENT_SIZE] = {
+ [IQS62X_UI_PROX] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_PROX, /* 0x12 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_ALS, /* 0x14 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_IR, /* 0x16 */
+ IQS62X_EVENT_UI_LO, /* 0x17 */
+ IQS62X_EVENT_UI_HI, /* 0x18 */
+ IQS62X_EVENT_HALL, /* 0x19 */
+ },
+ [IQS62X_UI_SAR1] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_HYST, /* 0x13 */
+ IQS62X_EVENT_ALS, /* 0x14 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_IR, /* 0x16 */
+ IQS62X_EVENT_UI_LO, /* 0x17 */
+ IQS62X_EVENT_UI_HI, /* 0x18 */
+ IQS62X_EVENT_HALL, /* 0x19 */
+ },
+};
+
+static const enum iqs62x_event_reg iqs624_event_regs[][IQS62X_EVENT_SIZE] = {
+ [IQS62X_UI_PROX] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_PROX, /* 0x12 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_WHEEL, /* 0x14 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_UI_LO, /* 0x16 */
+ IQS62X_EVENT_UI_HI, /* 0x17 */
+ IQS62X_EVENT_INTER, /* 0x18 */
+ IQS62X_EVENT_NONE,
+ },
+};
+
+static const enum iqs62x_event_reg iqs625_event_regs[][IQS62X_EVENT_SIZE] = {
+ [IQS62X_UI_PROX] = {
+ IQS62X_EVENT_SYS, /* 0x10 */
+ IQS62X_EVENT_PROX, /* 0x11 */
+ IQS62X_EVENT_INTER, /* 0x12 */
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ IQS62X_EVENT_NONE,
+ },
+};
+
+static const struct iqs62x_dev_desc iqs62x_devs[] = {
+ {
+ .dev_name = "iqs620at",
+ .sub_devs = iqs620at_sub_devs,
+ .num_sub_devs = ARRAY_SIZE(iqs620at_sub_devs),
+
+ .prod_num = IQS620_PROD_NUM,
+ .sw_num = 0x08,
+ .cal_regs = iqs620at_cal_regs,
+ .num_cal_regs = ARRAY_SIZE(iqs620at_cal_regs),
+
+ .prox_mask = BIT(0),
+ .sar_mask = BIT(1) | BIT(7),
+ .hall_mask = BIT(2),
+ .hyst_mask = BIT(3),
+ .temp_mask = BIT(4),
+
+ .prox_settings = IQS620_PROX_SETTINGS_4,
+ .hall_flags = IQS620_HALL_FLAGS,
+
+ .clk_div = 4,
+ .fw_name = "iqs620a.bin",
+ .event_regs = &iqs620a_event_regs[IQS62X_UI_PROX],
+ },
+ {
+ .dev_name = "iqs620a",
+ .sub_devs = iqs620a_sub_devs,
+ .num_sub_devs = ARRAY_SIZE(iqs620a_sub_devs),
+
+ .prod_num = IQS620_PROD_NUM,
+ .sw_num = 0x08,
+
+ .prox_mask = BIT(0),
+ .sar_mask = BIT(1) | BIT(7),
+ .hall_mask = BIT(2),
+ .hyst_mask = BIT(3),
+ .temp_mask = BIT(4),
+
+ .prox_settings = IQS620_PROX_SETTINGS_4,
+ .hall_flags = IQS620_HALL_FLAGS,
+
+ .clk_div = 4,
+ .fw_name = "iqs620a.bin",
+ .event_regs = &iqs620a_event_regs[IQS62X_UI_PROX],
+ },
+ {
+ .dev_name = "iqs621",
+ .sub_devs = iqs621_sub_devs,
+ .num_sub_devs = ARRAY_SIZE(iqs621_sub_devs),
+
+ .prod_num = IQS621_PROD_NUM,
+ .sw_num = 0x09,
+ .cal_regs = iqs621_cal_regs,
+ .num_cal_regs = ARRAY_SIZE(iqs621_cal_regs),
+
+ .prox_mask = BIT(0),
+ .hall_mask = BIT(1),
+ .als_mask = BIT(2),
+ .hyst_mask = BIT(3),
+ .temp_mask = BIT(4),
+
+ .als_flags = IQS621_ALS_FLAGS,
+ .hall_flags = IQS621_HALL_FLAGS,
+ .hyst_shift = 5,
+
+ .clk_div = 2,
+ .fw_name = "iqs621.bin",
+ .event_regs = &iqs621_event_regs[IQS62X_UI_PROX],
+ },
+ {
+ .dev_name = "iqs622",
+ .sub_devs = iqs622_sub_devs,
+ .num_sub_devs = ARRAY_SIZE(iqs622_sub_devs),
+
+ .prod_num = IQS622_PROD_NUM,
+ .sw_num = 0x06,
+
+ .prox_mask = BIT(0),
+ .sar_mask = BIT(1),
+ .hall_mask = BIT(2),
+ .als_mask = BIT(3),
+ .ir_mask = BIT(4),
+
+ .prox_settings = IQS622_PROX_SETTINGS_4,
+ .als_flags = IQS622_ALS_FLAGS,
+ .hall_flags = IQS622_HALL_FLAGS,
+
+ .clk_div = 2,
+ .fw_name = "iqs622.bin",
+ .event_regs = &iqs622_event_regs[IQS62X_UI_PROX],
+ },
+ {
+ .dev_name = "iqs624",
+ .sub_devs = iqs624_sub_devs,
+ .num_sub_devs = ARRAY_SIZE(iqs624_sub_devs),
+
+ .prod_num = IQS624_PROD_NUM,
+ .sw_num = 0x0B,
+
+ .interval = IQS624_INTERVAL_NUM,
+ .interval_div = 3,
+
+ .clk_div = 2,
+ .fw_name = "iqs624.bin",
+ .event_regs = &iqs624_event_regs[IQS62X_UI_PROX],
+ },
+ {
+ .dev_name = "iqs625",
+ .sub_devs = iqs625_sub_devs,
+ .num_sub_devs = ARRAY_SIZE(iqs625_sub_devs),
+
+ .prod_num = IQS625_PROD_NUM,
+ .sw_num = 0x0B,
+
+ .interval = IQS625_INTERVAL_NUM,
+ .interval_div = 10,
+
+ .clk_div = 2,
+ .fw_name = "iqs625.bin",
+ .event_regs = &iqs625_event_regs[IQS62X_UI_PROX],
+ },
+};
+
+static const struct regmap_config iqs62x_map_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = IQS62X_MAX_REG,
+};
+
+static int iqs62x_probe(struct i2c_client *client)
+{
+ struct iqs62x_core *iqs62x;
+ struct iqs62x_info info;
+ unsigned int val;
+ int ret, i, j;
+ u8 sw_num = 0;
+ const char *fw_name = NULL;
+
+ iqs62x = devm_kzalloc(&client->dev, sizeof(*iqs62x), GFP_KERNEL);
+ if (!iqs62x)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, iqs62x);
+ iqs62x->client = client;
+
+ BLOCKING_INIT_NOTIFIER_HEAD(&iqs62x->nh);
+ INIT_LIST_HEAD(&iqs62x->fw_blk_head);
+ init_completion(&iqs62x->fw_done);
+
+ iqs62x->regmap = devm_regmap_init_i2c(client, &iqs62x_map_config);
+ if (IS_ERR(iqs62x->regmap)) {
+ ret = PTR_ERR(iqs62x->regmap);
+ dev_err(&client->dev, "Failed to initialize register map: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = regmap_raw_read(iqs62x->regmap, IQS62X_PROD_NUM, &info,
+ sizeof(info));
+ if (ret)
+ return ret;
+
+ /*
+ * The following sequence validates the device's product and software
+ * numbers. It then determines if the device is factory-calibrated by
+ * checking for nonzero values in the device's designated calibration
+ * registers (if applicable). Depending on the device, the absence of
+ * calibration data indicates a reduced feature set or invalid device.
+ *
+ * For devices given in both calibrated and uncalibrated versions, the
+ * calibrated version (e.g. IQS620AT) appears first in the iqs62x_devs
+ * array. The uncalibrated version (e.g. IQS620A) appears next and has
+ * the same product and software numbers, but no calibration registers
+ * are specified.
+ */
+ for (i = 0; i < ARRAY_SIZE(iqs62x_devs); i++) {
+ if (info.prod_num != iqs62x_devs[i].prod_num)
+ continue;
+
+ iqs62x->dev_desc = &iqs62x_devs[i];
+
+ if (info.sw_num < iqs62x->dev_desc->sw_num)
+ continue;
+
+ sw_num = info.sw_num;
+
+ /*
+ * Read each of the device's designated calibration registers,
+ * if any, and exit from the inner loop early if any are equal
+ * to zero (indicating the device is uncalibrated). This could
+ * be acceptable depending on the device (e.g. IQS620A instead
+ * of IQS620AT).
+ */
+ for (j = 0; j < iqs62x->dev_desc->num_cal_regs; j++) {
+ ret = regmap_read(iqs62x->regmap,
+ iqs62x->dev_desc->cal_regs[j], &val);
+ if (ret)
+ return ret;
+
+ if (!val)
+ break;
+ }
+
+ /*
+ * If the number of nonzero values read from the device equals
+ * the number of designated calibration registers (which could
+ * be zero), exit from the outer loop early to signal that the
+ * device's product and software numbers match a known device,
+ * and the device is calibrated (if applicable).
+ */
+ if (j == iqs62x->dev_desc->num_cal_regs)
+ break;
+ }
+
+ if (!iqs62x->dev_desc) {
+ dev_err(&client->dev, "Unrecognized product number: 0x%02X\n",
+ info.prod_num);
+ return -EINVAL;
+ }
+
+ if (!sw_num) {
+ dev_err(&client->dev, "Unrecognized software number: 0x%02X\n",
+ info.sw_num);
+ return -EINVAL;
+ }
+
+ if (i == ARRAY_SIZE(iqs62x_devs)) {
+ dev_err(&client->dev, "Uncalibrated device\n");
+ return -ENODATA;
+ }
+
+ device_property_read_string(&client->dev, "firmware-name", &fw_name);
+
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ fw_name ? : iqs62x->dev_desc->fw_name,
+ &client->dev, GFP_KERNEL, iqs62x,
+ iqs62x_firmware_load);
+ if (ret)
+ dev_err(&client->dev, "Failed to request firmware: %d\n", ret);
+
+ return ret;
+}
+
+static int iqs62x_remove(struct i2c_client *client)
+{
+ struct iqs62x_core *iqs62x = i2c_get_clientdata(client);
+
+ wait_for_completion(&iqs62x->fw_done);
+
+ return 0;
+}
+
+static int __maybe_unused iqs62x_suspend(struct device *dev)
+{
+ struct iqs62x_core *iqs62x = dev_get_drvdata(dev);
+ int ret;
+
+ wait_for_completion(&iqs62x->fw_done);
+
+ /*
+ * As per the datasheet, automatic mode switching must be disabled
+ * before the device is placed in or taken out of halt mode.
+ */
+ ret = regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS,
+ IQS62X_PWR_SETTINGS_DIS_AUTO, 0xFF);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS,
+ IQS62X_PWR_SETTINGS_PWR_MODE_MASK,
+ IQS62X_PWR_SETTINGS_PWR_MODE_HALT);
+}
+
+static int __maybe_unused iqs62x_resume(struct device *dev)
+{
+ struct iqs62x_core *iqs62x = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS,
+ IQS62X_PWR_SETTINGS_PWR_MODE_MASK,
+ IQS62X_PWR_SETTINGS_PWR_MODE_NORM);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS,
+ IQS62X_PWR_SETTINGS_DIS_AUTO, 0);
+}
+
+static SIMPLE_DEV_PM_OPS(iqs62x_pm, iqs62x_suspend, iqs62x_resume);
+
+static const struct of_device_id iqs62x_of_match[] = {
+ { .compatible = "azoteq,iqs620a" },
+ { .compatible = "azoteq,iqs621" },
+ { .compatible = "azoteq,iqs622" },
+ { .compatible = "azoteq,iqs624" },
+ { .compatible = "azoteq,iqs625" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, iqs62x_of_match);
+
+static struct i2c_driver iqs62x_i2c_driver = {
+ .driver = {
+ .name = "iqs62x",
+ .of_match_table = iqs62x_of_match,
+ .pm = &iqs62x_pm,
+ },
+ .probe_new = iqs62x_probe,
+ .remove = iqs62x_remove,
+};
+module_i2c_driver(iqs62x_i2c_driver);
+
+MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>");
+MODULE_DESCRIPTION("Azoteq IQS620A/621/622/624/625 Multi-Function Sensors");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 4798d9f3f9d5..1f4f01b02d98 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -840,7 +840,7 @@ MODULE_DEVICE_TABLE(of, usbhs_omap_dt_ids);
static struct platform_driver usbhs_omap_driver = {
.driver = {
- .name = (char *)usbhs_driver_name,
+ .name = usbhs_driver_name,
.pm = &usbhsomap_dev_pm_ops,
.of_match_table = usbhs_omap_dt_ids,
},
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index 265f5e350e1c..4b7f73c317e8 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -99,7 +99,7 @@
struct usbtll_omap {
void __iomem *base;
int nch; /* num. of channels */
- struct clk *ch_clk[0]; /* must be the last member */
+ struct clk *ch_clk[]; /* must be the last member */
};
/*-------------------------------------------------------------------------*/
@@ -304,7 +304,7 @@ MODULE_DEVICE_TABLE(of, usbtll_omap_dt_ids);
static struct platform_driver usbtll_omap_driver = {
.driver = {
- .name = (char *)usbtll_driver_name,
+ .name = usbtll_driver_name,
.of_match_table = usbtll_omap_dt_ids,
},
.probe = usbtll_omap_probe,
diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c
index 29133326c6fd..acd172ddcbd6 100644
--- a/drivers/mfd/qcom-pm8xxx.c
+++ b/drivers/mfd/qcom-pm8xxx.c
@@ -76,7 +76,7 @@ struct pm_irq_chip {
unsigned int num_masters;
const struct pm_irq_data *pm_irq_data;
/* MUST BE AT THE END OF THIS STRUCT */
- u8 config[0];
+ u8 config[];
};
static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, unsigned int bp,
diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c
index a69a6742ecdc..d109b9f14407 100644
--- a/drivers/mfd/rk808.c
+++ b/drivers/mfd/rk808.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/regmap.h>
-#include <linux/syscore_ops.h>
struct rk808_reg_data {
int addr;
@@ -186,7 +185,6 @@ static const struct rk808_reg_data rk805_pre_init_reg[] = {
{RK805_BUCK4_CONFIG_REG, RK805_BUCK3_4_ILMAX_MASK,
RK805_BUCK4_ILMAX_3500MA},
{RK805_BUCK4_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_400MA},
- {RK805_GPIO_IO_POL_REG, SLP_SD_MSK, SLEEP_FUN},
{RK805_THERMAL_REG, TEMP_HOTDIE_MSK, TEMP115C},
};
@@ -449,88 +447,60 @@ static const struct regmap_irq_chip rk818_irq_chip = {
static struct i2c_client *rk808_i2c_client;
-static void rk805_device_shutdown(void)
+static void rk808_pm_power_off(void)
{
int ret;
+ unsigned int reg, bit;
struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
- if (!rk808)
- return;
-
- ret = regmap_update_bits(rk808->regmap,
- RK805_DEV_CTRL_REG,
- DEV_OFF, DEV_OFF);
- if (ret)
- dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n");
-}
-
-static void rk805_device_shutdown_prepare(void)
-{
- int ret;
- struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
-
- if (!rk808)
- return;
-
- ret = regmap_update_bits(rk808->regmap,
- RK805_GPIO_IO_POL_REG,
- SLP_SD_MSK, SHUTDOWN_FUN);
- if (ret)
- dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n");
-}
-
-static void rk808_device_shutdown(void)
-{
- int ret;
- struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
-
- if (!rk808)
- return;
-
- ret = regmap_update_bits(rk808->regmap,
- RK808_DEVCTRL_REG,
- DEV_OFF_RST, DEV_OFF_RST);
- if (ret)
- dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n");
-}
-
-static void rk818_device_shutdown(void)
-{
- int ret;
- struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
-
- if (!rk808)
+ switch (rk808->variant) {
+ case RK805_ID:
+ reg = RK805_DEV_CTRL_REG;
+ bit = DEV_OFF;
+ break;
+ case RK808_ID:
+ reg = RK808_DEVCTRL_REG,
+ bit = DEV_OFF_RST;
+ break;
+ case RK818_ID:
+ reg = RK818_DEVCTRL_REG;
+ bit = DEV_OFF;
+ break;
+ default:
return;
-
- ret = regmap_update_bits(rk808->regmap,
- RK818_DEVCTRL_REG,
- DEV_OFF, DEV_OFF);
+ }
+ ret = regmap_update_bits(rk808->regmap, reg, bit, bit);
if (ret)
dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n");
}
-static void rk8xx_syscore_shutdown(void)
+static void rk8xx_shutdown(struct i2c_client *client)
{
- struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
+ struct rk808 *rk808 = i2c_get_clientdata(client);
int ret;
- if (system_state == SYSTEM_POWER_OFF &&
- (rk808->variant == RK809_ID || rk808->variant == RK817_ID)) {
+ switch (rk808->variant) {
+ case RK805_ID:
+ ret = regmap_update_bits(rk808->regmap,
+ RK805_GPIO_IO_POL_REG,
+ SLP_SD_MSK,
+ SHUTDOWN_FUN);
+ break;
+ case RK809_ID:
+ case RK817_ID:
ret = regmap_update_bits(rk808->regmap,
RK817_SYS_CFG(3),
RK817_SLPPIN_FUNC_MSK,
SLPPIN_DN_FUN);
- if (ret) {
- dev_warn(&rk808_i2c_client->dev,
- "Cannot switch to power down function\n");
- }
+ break;
+ default:
+ return;
}
+ if (ret)
+ dev_warn(&client->dev,
+ "Cannot switch to power down function\n");
}
-static struct syscore_ops rk808_syscore_ops = {
- .shutdown = rk8xx_syscore_shutdown,
-};
-
static const struct of_device_id rk808_of_match[] = {
{ .compatible = "rockchip,rk805" },
{ .compatible = "rockchip,rk808" },
@@ -550,7 +520,7 @@ static int rk808_probe(struct i2c_client *client,
const struct mfd_cell *cells;
int nr_pre_init_regs;
int nr_cells;
- int pm_off = 0, msb, lsb;
+ int msb, lsb;
unsigned char pmic_id_msb, pmic_id_lsb;
int ret;
int i;
@@ -594,8 +564,6 @@ static int rk808_probe(struct i2c_client *client,
nr_pre_init_regs = ARRAY_SIZE(rk805_pre_init_reg);
cells = rk805s;
nr_cells = ARRAY_SIZE(rk805s);
- rk808->pm_pwroff_fn = rk805_device_shutdown;
- rk808->pm_pwroff_prep_fn = rk805_device_shutdown_prepare;
break;
case RK808_ID:
rk808->regmap_cfg = &rk808_regmap_config;
@@ -604,7 +572,6 @@ static int rk808_probe(struct i2c_client *client,
nr_pre_init_regs = ARRAY_SIZE(rk808_pre_init_reg);
cells = rk808s;
nr_cells = ARRAY_SIZE(rk808s);
- rk808->pm_pwroff_fn = rk808_device_shutdown;
break;
case RK818_ID:
rk808->regmap_cfg = &rk818_regmap_config;
@@ -613,7 +580,6 @@ static int rk808_probe(struct i2c_client *client,
nr_pre_init_regs = ARRAY_SIZE(rk818_pre_init_reg);
cells = rk818s;
nr_cells = ARRAY_SIZE(rk818s);
- rk808->pm_pwroff_fn = rk818_device_shutdown;
break;
case RK809_ID:
case RK817_ID:
@@ -623,7 +589,6 @@ static int rk808_probe(struct i2c_client *client,
nr_pre_init_regs = ARRAY_SIZE(rk817_pre_init_reg);
cells = rk817s;
nr_cells = ARRAY_SIZE(rk817s);
- register_syscore_ops(&rk808_syscore_ops);
break;
default:
dev_err(&client->dev, "Unsupported RK8XX ID %lu\n",
@@ -674,17 +639,9 @@ static int rk808_probe(struct i2c_client *client,
goto err_irq;
}
- pm_off = of_property_read_bool(np,
- "rockchip,system-power-controller");
- if (pm_off && !pm_power_off) {
+ if (of_property_read_bool(np, "rockchip,system-power-controller")) {
rk808_i2c_client = client;
- pm_power_off = rk808->pm_pwroff_fn;
- }
-
- if (pm_off && !pm_power_off_prepare) {
- if (!rk808_i2c_client)
- rk808_i2c_client = client;
- pm_power_off_prepare = rk808->pm_pwroff_prep_fn;
+ pm_power_off = rk808_pm_power_off;
}
return 0;
@@ -704,25 +661,24 @@ static int rk808_remove(struct i2c_client *client)
* pm_power_off may points to a function from another module.
* Check if the pointer is set by us and only then overwrite it.
*/
- if (rk808->pm_pwroff_fn && pm_power_off == rk808->pm_pwroff_fn)
+ if (pm_power_off == rk808_pm_power_off)
pm_power_off = NULL;
- /**
- * As above, check if the pointer is set by us before overwrite.
- */
- if (rk808->pm_pwroff_prep_fn &&
- pm_power_off_prepare == rk808->pm_pwroff_prep_fn)
- pm_power_off_prepare = NULL;
-
return 0;
}
static int __maybe_unused rk8xx_suspend(struct device *dev)
{
- struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
+ struct rk808 *rk808 = i2c_get_clientdata(to_i2c_client(dev));
int ret = 0;
switch (rk808->variant) {
+ case RK805_ID:
+ ret = regmap_update_bits(rk808->regmap,
+ RK805_GPIO_IO_POL_REG,
+ SLP_SD_MSK,
+ SLEEP_FUN);
+ break;
case RK809_ID:
case RK817_ID:
ret = regmap_update_bits(rk808->regmap,
@@ -739,7 +695,7 @@ static int __maybe_unused rk8xx_suspend(struct device *dev)
static int __maybe_unused rk8xx_resume(struct device *dev)
{
- struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client);
+ struct rk808 *rk808 = i2c_get_clientdata(to_i2c_client(dev));
int ret = 0;
switch (rk808->variant) {
@@ -766,6 +722,7 @@ static struct i2c_driver rk808_i2c_driver = {
},
.probe = rk808_probe,
.remove = rk808_remove,
+ .shutdown = rk8xx_shutdown,
};
module_i2c_driver(rk808_i2c_driver);
diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c
index ead2e79036a9..232de50562f9 100644
--- a/drivers/mfd/rn5t618.c
+++ b/drivers/mfd/rn5t618.c
@@ -8,10 +8,13 @@
#include <linux/delay.h>
#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/mfd/rn5t618.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/platform_device.h>
#include <linux/reboot.h>
#include <linux/regmap.h>
@@ -20,6 +23,13 @@ static const struct mfd_cell rn5t618_cells[] = {
{ .name = "rn5t618-wdt" },
};
+static const struct mfd_cell rc5t619_cells[] = {
+ { .name = "rn5t618-adc" },
+ { .name = "rn5t618-regulator" },
+ { .name = "rc5t619-rtc" },
+ { .name = "rn5t618-wdt" },
+};
+
static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -32,6 +42,8 @@ static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg)
case RN5T618_IR_GPF:
case RN5T618_MON_IOIN:
case RN5T618_INTMON:
+ case RN5T618_RTC_CTRL1 ... RN5T618_RTC_CTRL2:
+ case RN5T618_RTC_SECONDS ... RN5T618_RTC_YEAR:
return true;
default:
return false;
@@ -46,9 +58,56 @@ static const struct regmap_config rn5t618_regmap_config = {
.cache_type = REGCACHE_RBTREE,
};
+static const struct regmap_irq rc5t619_irqs[] = {
+ REGMAP_IRQ_REG(RN5T618_IRQ_SYS, 0, BIT(0)),
+ REGMAP_IRQ_REG(RN5T618_IRQ_DCDC, 0, BIT(1)),
+ REGMAP_IRQ_REG(RN5T618_IRQ_RTC, 0, BIT(2)),
+ REGMAP_IRQ_REG(RN5T618_IRQ_ADC, 0, BIT(3)),
+ REGMAP_IRQ_REG(RN5T618_IRQ_GPIO, 0, BIT(4)),
+ REGMAP_IRQ_REG(RN5T618_IRQ_CHG, 0, BIT(6)),
+};
+
+static const struct regmap_irq_chip rc5t619_irq_chip = {
+ .name = "rc5t619",
+ .irqs = rc5t619_irqs,
+ .num_irqs = ARRAY_SIZE(rc5t619_irqs),
+ .num_regs = 1,
+ .status_base = RN5T618_INTMON,
+ .mask_base = RN5T618_INTEN,
+ .mask_invert = true,
+};
+
static struct rn5t618 *rn5t618_pm_power_off;
static struct notifier_block rn5t618_restart_handler;
+static int rn5t618_irq_init(struct rn5t618 *rn5t618)
+{
+ const struct regmap_irq_chip *irq_chip = NULL;
+ int ret;
+
+ if (!rn5t618->irq)
+ return 0;
+
+ switch (rn5t618->variant) {
+ case RC5T619:
+ irq_chip = &rc5t619_irq_chip;
+ break;
+ default:
+ dev_err(rn5t618->dev, "Currently no IRQ support for variant %d\n",
+ (int)rn5t618->variant);
+ return -ENOENT;
+ }
+
+ ret = devm_regmap_add_irq_chip(rn5t618->dev, rn5t618->regmap,
+ rn5t618->irq,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ 0, irq_chip, &rn5t618->irq_data);
+ if (ret)
+ dev_err(rn5t618->dev, "Failed to register IRQ chip\n");
+
+ return ret;
+}
+
static void rn5t618_trigger_poweroff_sequence(bool repower)
{
/* disable automatic repower-on */
@@ -87,8 +146,7 @@ static const struct of_device_id rn5t618_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rn5t618_of_match);
-static int rn5t618_i2c_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+static int rn5t618_i2c_probe(struct i2c_client *i2c)
{
const struct of_device_id *of_id;
struct rn5t618 *priv;
@@ -106,6 +164,8 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, priv);
priv->variant = (long)of_id->data;
+ priv->irq = i2c->irq;
+ priv->dev = &i2c->dev;
priv->regmap = devm_regmap_init_i2c(i2c, &rn5t618_regmap_config);
if (IS_ERR(priv->regmap)) {
@@ -114,8 +174,16 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c,
return ret;
}
- ret = devm_mfd_add_devices(&i2c->dev, -1, rn5t618_cells,
- ARRAY_SIZE(rn5t618_cells), NULL, 0, NULL);
+ if (priv->variant == RC5T619)
+ ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_NONE,
+ rc5t619_cells,
+ ARRAY_SIZE(rc5t619_cells),
+ NULL, 0, NULL);
+ else
+ ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_NONE,
+ rn5t618_cells,
+ ARRAY_SIZE(rn5t618_cells),
+ NULL, 0, NULL);
if (ret) {
dev_err(&i2c->dev, "failed to add sub-devices: %d\n", ret);
return ret;
@@ -138,7 +206,7 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c,
return ret;
}
- return 0;
+ return rn5t618_irq_init(priv);
}
static int rn5t618_i2c_remove(struct i2c_client *i2c)
@@ -155,19 +223,38 @@ static int rn5t618_i2c_remove(struct i2c_client *i2c)
return 0;
}
-static const struct i2c_device_id rn5t618_i2c_id[] = {
- { }
-};
-MODULE_DEVICE_TABLE(i2c, rn5t618_i2c_id);
+static int __maybe_unused rn5t618_i2c_suspend(struct device *dev)
+{
+ struct rn5t618 *priv = dev_get_drvdata(dev);
+
+ if (priv->irq)
+ disable_irq(priv->irq);
+
+ return 0;
+}
+
+static int __maybe_unused rn5t618_i2c_resume(struct device *dev)
+{
+ struct rn5t618 *priv = dev_get_drvdata(dev);
+
+ if (priv->irq)
+ enable_irq(priv->irq);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(rn5t618_i2c_dev_pm_ops,
+ rn5t618_i2c_suspend,
+ rn5t618_i2c_resume);
static struct i2c_driver rn5t618_i2c_driver = {
.driver = {
.name = "rn5t618",
.of_match_table = of_match_ptr(rn5t618_of_match),
+ .pm = &rn5t618_i2c_dev_pm_ops,
},
- .probe = rn5t618_i2c_probe,
+ .probe_new = rn5t618_i2c_probe,
.remove = rn5t618_i2c_remove,
- .id_table = rn5t618_i2c_id,
};
module_i2c_driver(rn5t618_i2c_driver);
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index c0529a1cd5ea..ebdf2f11ae28 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -10,6 +10,7 @@
#include <linux/of_device.h>
#include <linux/regmap.h>
#include <linux/spi/spi.h>
+#include <uapi/linux/usb/charger.h>
#define SPRD_PMIC_INT_MASK_STATUS 0x0
#define SPRD_PMIC_INT_RAW_STATUS 0x4
@@ -17,6 +18,16 @@
#define SPRD_SC2731_IRQ_BASE 0x140
#define SPRD_SC2731_IRQ_NUMS 16
+#define SPRD_SC2731_CHG_DET 0xedc
+
+/* PMIC charger detection definition */
+#define SPRD_PMIC_CHG_DET_DELAY_US 200000
+#define SPRD_PMIC_CHG_DET_TIMEOUT 2000000
+#define SPRD_PMIC_CHG_DET_DONE BIT(11)
+#define SPRD_PMIC_SDP_TYPE BIT(7)
+#define SPRD_PMIC_DCP_TYPE BIT(6)
+#define SPRD_PMIC_CDP_TYPE BIT(5)
+#define SPRD_PMIC_CHG_TYPE_MASK GENMASK(7, 5)
struct sprd_pmic {
struct regmap *regmap;
@@ -24,12 +35,14 @@ struct sprd_pmic {
struct regmap_irq *irqs;
struct regmap_irq_chip irq_chip;
struct regmap_irq_chip_data *irq_data;
+ const struct sprd_pmic_data *pdata;
int irq;
};
struct sprd_pmic_data {
u32 irq_base;
u32 num_irqs;
+ u32 charger_det;
};
/*
@@ -40,8 +53,46 @@ struct sprd_pmic_data {
static const struct sprd_pmic_data sc2731_data = {
.irq_base = SPRD_SC2731_IRQ_BASE,
.num_irqs = SPRD_SC2731_IRQ_NUMS,
+ .charger_det = SPRD_SC2731_CHG_DET,
};
+enum usb_charger_type sprd_pmic_detect_charger_type(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct sprd_pmic *ddata = spi_get_drvdata(spi);
+ const struct sprd_pmic_data *pdata = ddata->pdata;
+ enum usb_charger_type type;
+ u32 val;
+ int ret;
+
+ ret = regmap_read_poll_timeout(ddata->regmap, pdata->charger_det, val,
+ (val & SPRD_PMIC_CHG_DET_DONE),
+ SPRD_PMIC_CHG_DET_DELAY_US,
+ SPRD_PMIC_CHG_DET_TIMEOUT);
+ if (ret) {
+ dev_err(&spi->dev, "failed to detect charger type\n");
+ return UNKNOWN_TYPE;
+ }
+
+ switch (val & SPRD_PMIC_CHG_TYPE_MASK) {
+ case SPRD_PMIC_CDP_TYPE:
+ type = CDP_TYPE;
+ break;
+ case SPRD_PMIC_DCP_TYPE:
+ type = DCP_TYPE;
+ break;
+ case SPRD_PMIC_SDP_TYPE:
+ type = SDP_TYPE;
+ break;
+ default:
+ type = UNKNOWN_TYPE;
+ break;
+ }
+
+ return type;
+}
+EXPORT_SYMBOL_GPL(sprd_pmic_detect_charger_type);
+
static const struct mfd_cell sprd_pmic_devs[] = {
{
.name = "sc27xx-wdt",
@@ -181,6 +232,7 @@ static int sprd_pmic_probe(struct spi_device *spi)
spi_set_drvdata(spi, ddata);
ddata->dev = &spi->dev;
ddata->irq = spi->irq;
+ ddata->pdata = pdata;
ddata->irq_chip.name = dev_name(&spi->dev);
ddata->irq_chip.status_base =
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index cc92bc3ed820..886459e0ddd9 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -11,6 +11,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/task_stack.h>
#include <linux/uaccess.h>
+#include <linux/slab.h>
#ifdef CONFIG_X86_32
#include <asm/desc.h>
@@ -175,6 +176,80 @@ void lkdtm_HUNG_TASK(void)
schedule();
}
+volatile unsigned int huge = INT_MAX - 2;
+volatile unsigned int ignored;
+
+void lkdtm_OVERFLOW_SIGNED(void)
+{
+ int value;
+
+ value = huge;
+ pr_info("Normal signed addition ...\n");
+ value += 1;
+ ignored = value;
+
+ pr_info("Overflowing signed addition ...\n");
+ value += 4;
+ ignored = value;
+}
+
+
+void lkdtm_OVERFLOW_UNSIGNED(void)
+{
+ unsigned int value;
+
+ value = huge;
+ pr_info("Normal unsigned addition ...\n");
+ value += 1;
+ ignored = value;
+
+ pr_info("Overflowing unsigned addition ...\n");
+ value += 4;
+ ignored = value;
+}
+
+/* Intentially using old-style flex array definition of 1 byte. */
+struct array_bounds_flex_array {
+ int one;
+ int two;
+ char data[1];
+};
+
+struct array_bounds {
+ int one;
+ int two;
+ char data[8];
+ int three;
+};
+
+void lkdtm_ARRAY_BOUNDS(void)
+{
+ struct array_bounds_flex_array *not_checked;
+ struct array_bounds *checked;
+ volatile int i;
+
+ not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL);
+ checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL);
+
+ pr_info("Array access within bounds ...\n");
+ /* For both, touch all bytes in the actual member size. */
+ for (i = 0; i < sizeof(checked->data); i++)
+ checked->data[i] = 'A';
+ /*
+ * For the uninstrumented flex array member, also touch 1 byte
+ * beyond to verify it is correctly uninstrumented.
+ */
+ for (i = 0; i < sizeof(not_checked->data) + 1; i++)
+ not_checked->data[i] = 'A';
+
+ pr_info("Array access beyond bounds ...\n");
+ for (i = 0; i < sizeof(checked->data) + 1; i++)
+ checked->data[i] = 'B';
+
+ kfree(not_checked);
+ kfree(checked);
+}
+
void lkdtm_CORRUPT_LIST_ADD(void)
{
/*
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 5ce4ac8c06fc..a5e344df9166 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -130,6 +130,9 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(HARDLOCKUP),
CRASHTYPE(SPINLOCKUP),
CRASHTYPE(HUNG_TASK),
+ CRASHTYPE(OVERFLOW_SIGNED),
+ CRASHTYPE(OVERFLOW_UNSIGNED),
+ CRASHTYPE(ARRAY_BOUNDS),
CRASHTYPE(EXEC_DATA),
CRASHTYPE(EXEC_STACK),
CRASHTYPE(EXEC_KMALLOC),
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 8d13d0176624..601a2156a0d4 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -22,6 +22,9 @@ void lkdtm_SOFTLOCKUP(void);
void lkdtm_HARDLOCKUP(void);
void lkdtm_SPINLOCKUP(void);
void lkdtm_HUNG_TASK(void);
+void lkdtm_OVERFLOW_SIGNED(void);
+void lkdtm_OVERFLOW_UNSIGNED(void);
+void lkdtm_ARRAY_BOUNDS(void);
void lkdtm_CORRUPT_LIST_ADD(void);
void lkdtm_CORRUPT_LIST_DEL(void);
void lkdtm_CORRUPT_USER_DS(void);
diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig
index b6841ba6d922..8f201d019f5a 100644
--- a/drivers/misc/mic/Kconfig
+++ b/drivers/misc/mic/Kconfig
@@ -133,8 +133,4 @@ config VOP
OS and tools for MIC to use with this driver are available from
<http://software.intel.com/en-us/mic-developer>.
-if VOP
-source "drivers/vhost/Kconfig.vringh"
-endif
-
endmenu
diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
index 426820ab9afe..b486250923c5 100644
--- a/drivers/mtd/ubi/fastmap-wl.c
+++ b/drivers/mtd/ubi/fastmap-wl.c
@@ -39,6 +39,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
return victim;
}
+static inline void return_unused_peb(struct ubi_device *ubi,
+ struct ubi_wl_entry *e)
+{
+ wl_tree_add(e, &ubi->free);
+ ubi->free_count++;
+}
+
/**
* return_unused_pool_pebs - returns unused PEB to the free tree.
* @ubi: UBI device description object
@@ -52,8 +59,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
for (i = pool->used; i < pool->size; i++) {
e = ubi->lookuptbl[pool->pebs[i]];
- wl_tree_add(e, &ubi->free);
- ubi->free_count++;
+ return_unused_peb(ubi, e);
}
}
@@ -361,6 +367,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
return_unused_pool_pebs(ubi, &ubi->fm_pool);
return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+ if (ubi->fm_anchor) {
+ return_unused_peb(ubi, ubi->fm_anchor);
+ ubi->fm_anchor = NULL;
+ }
+
if (ubi->fm) {
for (i = 0; i < ubi->fm->used_blocks; i++)
kfree(ubi->fm->e[i]);
diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h
index b5fe8f82281b..386db0598e95 100644
--- a/drivers/mtd/ubi/ubi-media.h
+++ b/drivers/mtd/ubi/ubi-media.h
@@ -498,6 +498,6 @@ struct ubi_fm_volhdr {
struct ubi_fm_eba {
__be32 magic;
__be32 reserved_pebs;
- __be32 pnum[0];
+ __be32 pnum[];
} __packed;
#endif /* !__UBI_MEDIA_H__ */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 837d690a8c60..5146cce5fe32 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1875,7 +1875,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
goto out_free;
#ifdef CONFIG_MTD_UBI_FASTMAP
- ubi_ensure_anchor_pebs(ubi);
+ if (!ubi->ro_mode && !ubi->fm_disabled)
+ ubi_ensure_anchor_pebs(ubi);
#endif
return 0;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index e74e2bb61236..9db0570c5beb 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -58,8 +58,4 @@ config CAIF_VIRTIO
---help---
The CAIF driver for CAIF over Virtio.
-if CAIF_VIRTIO
-source "drivers/vhost/Kconfig.vringh"
-endif
-
endif # CAIF_DRIVERS
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index 086dfb1b9d0b..91cdc0a2b1a7 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -148,7 +148,7 @@ static void slc_bump(struct slcan *sl)
u32 tmpid;
char *cmd = sl->rbuff;
- cf.can_id = 0;
+ memset(&cf, 0, sizeof(cf));
switch (*cmd) {
case 'r':
@@ -187,8 +187,6 @@ static void slc_bump(struct slcan *sl)
else
return;
- *(u64 *) (&cf.data) = 0; /* clear payload */
-
/* RTR frames may have a dlc > 0 but they never have any data bytes */
if (!(cf.can_id & CAN_RTR_FLAG)) {
for (i = 0; i < cf.can_dlc; i++) {
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index affa5c6e135c..c7ac63f41918 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -480,7 +480,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
priv->slave_mii_bus->parent = ds->dev->parent;
priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
- err = of_mdiobus_register(priv->slave_mii_bus, dn);
+ err = mdiobus_register(priv->slave_mii_bus);
if (err && dn)
of_node_put(dn);
@@ -1079,6 +1079,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
const struct bcm_sf2_of_data *data;
struct b53_platform_data *pdata;
struct dsa_switch_ops *ops;
+ struct device_node *ports;
struct bcm_sf2_priv *priv;
struct b53_device *dev;
struct dsa_switch *ds;
@@ -1146,7 +1147,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
set_bit(0, priv->cfp.used);
set_bit(0, priv->cfp.unique);
- bcm_sf2_identify_ports(priv, dn->child);
+ ports = of_find_node_by_name(dn, "ports");
+ if (ports) {
+ bcm_sf2_identify_ports(priv, ports);
+ of_node_put(ports);
+ }
priv->irq0 = irq_of_parse_and_map(dn, 0);
priv->irq1 = irq_of_parse_and_map(dn, 1);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index ef57552db260..2d0d91db0ddb 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1403,6 +1403,9 @@ mt7530_setup(struct dsa_switch *ds)
continue;
phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
+ if (!phy_node)
+ continue;
+
if (phy_node->parent == priv->dev->of_node->parent) {
ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV)
diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
index 97901c114bfa..fbe9d88b13c7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
+++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c
@@ -491,7 +491,7 @@ get_ingress_preclass_record(struct aq_hw_s *hw,
rec->snap[1] = packed_record[8] & 0xFF;
rec->llc = (packed_record[8] >> 8) & 0xFF;
- rec->llc = packed_record[9] << 8;
+ rec->llc |= packed_record[9] << 8;
rec->mac_sa[0] = packed_record[10];
rec->mac_sa[0] |= packed_record[11] << 16;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9638d65d8261..517caedc0a87 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6874,7 +6874,8 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
/* In this option, the first PHY makes sure to pass the
* traffic through itself only.
- * Its not clear how to reset the link on the second phy
+ * It's not clear how to reset the link on the second
+ * phy.
*/
active_external_phy = EXT_PHY1;
break;
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h
index a04eccbc78e8..1e0ffe8f4152 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.h
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h
@@ -24,7 +24,7 @@ struct cavium_ptp {
struct ptp_clock *ptp_clock;
};
-#if IS_ENABLED(CONFIG_CAVIUM_PTP)
+#if IS_REACHABLE(CONFIG_CAVIUM_PTP)
struct cavium_ptp *cavium_ptp_get(void);
void cavium_ptp_put(struct cavium_ptp *ptp);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 75fde0d4d493..a70018f067aa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3132,7 +3132,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
return ret;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- pi->xact_addr_filt = ret;
return 0;
}
@@ -6672,6 +6671,10 @@ static void shutdown_one(struct pci_dev *pdev)
if (adapter->port[i]->reg_state == NETREG_REGISTERED)
cxgb_close(adapter->port[i]);
+ rtnl_lock();
+ cxgb4_mqprio_stop_offload(adapter);
+ rtnl_unlock();
+
if (is_uld(adapter)) {
detach_ulds(adapter);
t4_uld_clean_up(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
index ec3eb45ee3b4..e6af4906d674 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
@@ -301,6 +301,7 @@ static void cxgb4_mqprio_free_hw_resources(struct net_device *dev)
cxgb4_clear_msix_aff(eorxq->msix->vec,
eorxq->msix->aff_mask);
free_irq(eorxq->msix->vec, &eorxq->rspq);
+ cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx);
}
free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl);
@@ -611,6 +612,28 @@ out:
return ret;
}
+void cxgb4_mqprio_stop_offload(struct adapter *adap)
+{
+ struct cxgb4_tc_port_mqprio *tc_port_mqprio;
+ struct net_device *dev;
+ u8 i;
+
+ if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio)
+ return;
+
+ for_each_port(adap, i) {
+ dev = adap->port[i];
+ if (!dev)
+ continue;
+
+ tc_port_mqprio = &adap->tc_mqprio->port_mqprio[i];
+ if (!tc_port_mqprio->mqprio.qopt.num_tc)
+ continue;
+
+ cxgb4_mqprio_disable_offload(dev);
+ }
+}
+
int cxgb4_init_tc_mqprio(struct adapter *adap)
{
struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
index c532f1ef8451..ff8794132b22 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
@@ -38,6 +38,7 @@ struct cxgb4_tc_mqprio {
int cxgb4_setup_tc_mqprio(struct net_device *dev,
struct tc_mqprio_qopt_offload *mqprio);
+void cxgb4_mqprio_stop_offload(struct adapter *adap);
int cxgb4_init_tc_mqprio(struct adapter *adap);
void cxgb4_cleanup_tc_mqprio(struct adapter *adap);
#endif /* __CXGB4_TC_MQPRIO_H__ */
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 835b7816e372..87236206366f 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1731,7 +1731,7 @@ static int ftgmac100_setup_clk(struct ftgmac100 *priv)
if (rc)
goto cleanup_clk;
- /* RCLK is for RMII, typically used for NCSI. Optional because its not
+ /* RCLK is for RMII, typically used for NCSI. Optional because it's not
* necessary if it's the AST2400 MAC, or the MAC is configured for
* RGMII, or the controller is not an ASPEED-based controller.
*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 2f76908cae73..51117a5a6bbf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -150,14 +150,20 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
u8 prio = act->vlan.prio;
u16 vid = act->vlan.vid;
- return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
- act->id, vid,
- proto, prio, extack);
+ err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
+ act->id, vid,
+ proto, prio, extack);
+ if (err)
+ return err;
+ break;
}
case FLOW_ACTION_PRIORITY:
- return mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei,
- act->priority,
- extack);
+ err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei,
+ act->priority,
+ extack);
+ if (err)
+ return err;
+ break;
case FLOW_ACTION_MANGLE: {
enum flow_action_mangle_base htype = act->mangle.htype;
__be32 be_mask = (__force __be32) act->mangle.mask;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 9096ffd89e50..fbf714d027d8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -643,7 +643,7 @@ static int mlxsw_sp_trap_policer_bs(u64 burst, u8 *p_burst_size,
{
int bs = fls64(burst) - 1;
- if (burst != (1 << bs)) {
+ if (burst != (BIT_ULL(bs))) {
NL_SET_ERR_MSG_MOD(extack, "Policer burst size is not power of two");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 1a5fc2ae351c..29810a1aa210 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -369,8 +369,8 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u8 abs_vport_id = 0;
- int rc = -EINVAL;
u16 rx_mode = 0;
+ int rc;
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 1305522f72d6..40efe60eff8d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -282,7 +282,6 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
{
struct rmnet_priv *priv = netdev_priv(dev);
struct net_device *real_dev;
- struct rmnet_endpoint *ep;
struct rmnet_port *port;
u16 mux_id;
@@ -297,19 +296,27 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
if (data[IFLA_RMNET_MUX_ID]) {
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
- if (rmnet_get_endpoint(port, mux_id)) {
- NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
- return -EINVAL;
- }
- ep = rmnet_get_endpoint(port, priv->mux_id);
- if (!ep)
- return -ENODEV;
- hlist_del_init_rcu(&ep->hlnode);
- hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+ if (mux_id != priv->mux_id) {
+ struct rmnet_endpoint *ep;
+
+ ep = rmnet_get_endpoint(port, priv->mux_id);
+ if (!ep)
+ return -ENODEV;
- ep->mux_id = mux_id;
- priv->mux_id = mux_id;
+ if (rmnet_get_endpoint(port, mux_id)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MUX ID already exists");
+ return -EINVAL;
+ }
+
+ hlist_del_init_rcu(&ep->hlnode);
+ hlist_add_head_rcu(&ep->hlnode,
+ &port->muxed_ep[mux_id]);
+
+ ep->mux_id = mux_id;
+ priv->mux_id = mux_id;
+ }
}
if (data[IFLA_RMNET_FLAGS]) {
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 55cb5730beb6..bf5bf05970a2 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -5441,9 +5441,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
- NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
@@ -5460,26 +5459,26 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Disallow toggling */
dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ if (rtl_chip_supports_csum_v2(tp))
+ dev->hw_features |= NETIF_F_IPV6_CSUM;
+
+ dev->features |= dev->hw_features;
+
+ /* There has been a number of reports that using SG/TSO results in
+ * tx timeouts. However for a lot of people SG/TSO works fine.
+ * Therefore disable both features by default, but allow users to
+ * enable them. Use at own risk!
+ */
if (rtl_chip_supports_csum_v2(tp)) {
- dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
} else {
+ dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
}
- /* RTL8168e-vl and one RTL8168c variant are known to have a
- * HW issue with TSO.
- */
- if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
- tp->mac_version == RTL_GIGA_MAC_VER_22) {
- dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
- dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
- }
-
- dev->features |= dev->hw_features;
-
dev->hw_features |= NETIF_F_RXALL;
dev->hw_features |= NETIF_F_RXFCS;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 542784300620..efc6ec1b8027 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -207,7 +207,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
reg++;
}
- while (reg <= perfect_addr_number) {
+ while (reg < perfect_addr_number) {
writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
writel(0, ioaddr + GMAC_ADDR_LOW(reg));
reg++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 0e4575f7bedb..ad4df9bddcf3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -577,8 +577,13 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
value |= XGMAC_VLAN_EDVLP;
value |= XGMAC_VLAN_ESVL;
value |= XGMAC_VLAN_DOVLTC;
+ } else {
+ value &= ~XGMAC_VLAN_EDVLP;
+ value &= ~XGMAC_VLAN_ESVL;
+ value &= ~XGMAC_VLAN_DOVLTC;
}
+ value &= ~XGMAC_VLAN_VID;
writel(value, ioaddr + XGMAC_VLAN_TAG);
} else if (perfect_match) {
u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
@@ -589,13 +594,19 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
value = readl(ioaddr + XGMAC_VLAN_TAG);
+ value &= ~XGMAC_VLAN_VTHM;
value |= XGMAC_VLAN_ETV;
if (is_double) {
value |= XGMAC_VLAN_EDVLP;
value |= XGMAC_VLAN_ESVL;
value |= XGMAC_VLAN_DOVLTC;
+ } else {
+ value &= ~XGMAC_VLAN_EDVLP;
+ value &= ~XGMAC_VLAN_ESVL;
+ value &= ~XGMAC_VLAN_DOVLTC;
}
+ value &= ~XGMAC_VLAN_VID;
writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
} else {
u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 2fb671e61ee8..e6898fd5223f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4566,9 +4566,13 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
return ret;
}
- ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (priv->hw->num_vlan) {
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret)
+ return ret;
+ }
- return ret;
+ return 0;
}
static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
@@ -4581,9 +4585,12 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
is_double = true;
clear_bit(vid, priv->active_vlans);
- ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
- if (ret)
- return ret;
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret)
+ return ret;
+ }
return stmmac_vlan_update(priv, is_double);
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index da82d7f16a09..0d580d81d910 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -2594,6 +2594,9 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
return PTR_ERR(dev);
macsec = macsec_priv(dev);
+ if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE])
+ return -EINVAL;
+
offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
if (macsec->offload == offload)
return 0;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 481cf48c9b9e..31f731e6df72 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -425,8 +425,8 @@ static int at803x_parse_dt(struct phy_device *phydev)
*/
if (at803x_match_phy_id(phydev, ATH8030_PHY_ID) ||
at803x_match_phy_id(phydev, ATH8035_PHY_ID)) {
- priv->clk_25m_reg &= ~AT8035_CLK_OUT_MASK;
- priv->clk_25m_mask &= ~AT8035_CLK_OUT_MASK;
+ priv->clk_25m_reg &= AT8035_CLK_OUT_MASK;
+ priv->clk_25m_mask &= AT8035_CLK_OUT_MASK;
}
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2ec19e5540bf..05d20343b816 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -25,6 +25,7 @@
#include <linux/micrel_phy.h>
#include <linux/of.h>
#include <linux/clk.h>
+#include <linux/delay.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@@ -952,6 +953,12 @@ static int kszphy_resume(struct phy_device *phydev)
genphy_resume(phydev);
+ /* After switching from power-down to normal mode, an internal global
+ * reset is automatically generated. Wait a minimum of 1 ms before
+ * read/write access to the PHY registers.
+ */
+ usleep_range(1000, 2000);
+
ret = kszphy_config_reset(phydev);
if (ret)
return ret;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 228fe449dc6d..07476c6510f2 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1678,8 +1678,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
alloc_frag->offset += buflen;
}
err = tun_xdp_act(tun, xdp_prog, &xdp, act);
- if (err < 0)
- goto err_xdp;
+ if (err < 0) {
+ if (act == XDP_REDIRECT || act == XDP_TX)
+ put_page(alloc_frag->page);
+ goto out;
+ }
+
if (err == XDP_REDIRECT)
xdp_do_flush();
if (err != XDP_PASS)
@@ -1693,8 +1697,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
-err_xdp:
- put_page(alloc_frag->page);
out:
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 8783e2ab3ec0..0ef7e1f443e3 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -54,6 +54,7 @@ static const char driver_name[] = "pegasus";
#undef PEGASUS_WRITE_EEPROM
#define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
BMSR_100FULL | BMSR_ANEGCAPABLE)
+#define CARRIER_CHECK_DELAY (2 * HZ)
static bool loopback;
static bool mii_mode;
@@ -1089,17 +1090,12 @@ static inline void setup_pegasus_II(pegasus_t *pegasus)
set_register(pegasus, Reg81, 2);
}
-
-static int pegasus_count;
-static struct workqueue_struct *pegasus_workqueue;
-#define CARRIER_CHECK_DELAY (2 * HZ)
-
static void check_carrier(struct work_struct *work)
{
pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work);
set_carrier(pegasus->net);
if (!(pegasus->flags & PEGASUS_UNPLUG)) {
- queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
+ queue_delayed_work(system_long_wq, &pegasus->carrier_check,
CARRIER_CHECK_DELAY);
}
}
@@ -1120,18 +1116,6 @@ static int pegasus_blacklisted(struct usb_device *udev)
return 0;
}
-/* we rely on probe() and remove() being serialized so we
- * don't need extra locking on pegasus_count.
- */
-static void pegasus_dec_workqueue(void)
-{
- pegasus_count--;
- if (pegasus_count == 0) {
- destroy_workqueue(pegasus_workqueue);
- pegasus_workqueue = NULL;
- }
-}
-
static int pegasus_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -1144,14 +1128,6 @@ static int pegasus_probe(struct usb_interface *intf,
if (pegasus_blacklisted(dev))
return -ENODEV;
- if (pegasus_count == 0) {
- pegasus_workqueue = alloc_workqueue("pegasus", WQ_MEM_RECLAIM,
- 0);
- if (!pegasus_workqueue)
- return -ENOMEM;
- }
- pegasus_count++;
-
net = alloc_etherdev(sizeof(struct pegasus));
if (!net)
goto out;
@@ -1209,7 +1185,7 @@ static int pegasus_probe(struct usb_interface *intf,
res = register_netdev(net);
if (res)
goto out3;
- queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
+ queue_delayed_work(system_long_wq, &pegasus->carrier_check,
CARRIER_CHECK_DELAY);
dev_info(&intf->dev, "%s, %s, %pM\n", net->name,
usb_dev_id[dev_index].name, net->dev_addr);
@@ -1222,7 +1198,6 @@ out2:
out1:
free_netdev(net);
out:
- pegasus_dec_workqueue();
return res;
}
@@ -1237,7 +1212,7 @@ static void pegasus_disconnect(struct usb_interface *intf)
}
pegasus->flags |= PEGASUS_UNPLUG;
- cancel_delayed_work(&pegasus->carrier_check);
+ cancel_delayed_work_sync(&pegasus->carrier_check);
unregister_netdev(pegasus->net);
unlink_all_urbs(pegasus);
free_all_urbs(pegasus);
@@ -1246,7 +1221,6 @@ static void pegasus_disconnect(struct usb_interface *intf)
pegasus->rx_skb = NULL;
}
free_netdev(pegasus->net);
- pegasus_dec_workqueue();
}
static int pegasus_suspend(struct usb_interface *intf, pm_message_t message)
@@ -1254,7 +1228,7 @@ static int pegasus_suspend(struct usb_interface *intf, pm_message_t message)
struct pegasus *pegasus = usb_get_intfdata(intf);
netif_device_detach(pegasus->net);
- cancel_delayed_work(&pegasus->carrier_check);
+ cancel_delayed_work_sync(&pegasus->carrier_check);
if (netif_running(pegasus->net)) {
usb_kill_urb(pegasus->rx_urb);
usb_kill_urb(pegasus->intr_urb);
@@ -1276,7 +1250,7 @@ static int pegasus_resume(struct usb_interface *intf)
pegasus->intr_urb->actual_length = 0;
intr_callback(pegasus->intr_urb);
}
- queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check,
+ queue_delayed_work(system_long_wq, &pegasus->carrier_check,
CARRIER_CHECK_DELAY);
return 0;
}
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index f66c0f8f6f4a..ecb3fccca603 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -740,9 +740,6 @@ EXPORT_SYMBOL_GPL(i2400m_error_recovery);
static
int i2400m_bm_buf_alloc(struct i2400m *i2400m)
{
- int result;
-
- result = -ENOMEM;
i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL);
if (i2400m->bm_cmd_buf == NULL)
goto error_bm_cmd_kzalloc;
@@ -754,7 +751,7 @@ int i2400m_bm_buf_alloc(struct i2400m *i2400m)
error_bm_ack_buf_kzalloc:
kfree(i2400m->bm_cmd_buf);
error_bm_cmd_kzalloc:
- return result;
+ return -ENOMEM;
}
@@ -843,7 +840,7 @@ EXPORT_SYMBOL_GPL(i2400m_reset);
*/
int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
{
- int result = -ENODEV;
+ int result;
struct device *dev = i2400m_dev(i2400m);
struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
struct net_device *net_dev = i2400m->wimax_dev.net_dev;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index a8b515968569..09087c38fabd 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -1042,8 +1042,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
return -EFAULT;
}
- if (!desc || (desc->out_num + desc->in_num == 0) ||
- !test_bit(cmd, &cmd_mask))
+ if (!desc ||
+ (desc->out_num + desc->in_num == 0) ||
+ cmd > ND_CMD_CALL ||
+ !test_bit(cmd, &cmd_mask))
return -ENOTTY;
/* fail write commands (when read-only) */
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 64776ed15bb3..7d4ddc4d9322 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -99,7 +99,7 @@ static int nvdimm_probe(struct device *dev)
if (ndd->ns_current >= 0) {
rc = nd_label_reserve_dpa(ndd);
if (rc == 0)
- nvdimm_set_aliasing(dev);
+ nvdimm_set_labeling(dev);
}
nvdimm_bus_unlock(dev);
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 94ea6dba6b4f..b7b77e8d9027 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -32,7 +32,7 @@ int nvdimm_check_config_data(struct device *dev)
if (!nvdimm->cmd_mask ||
!test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
- if (test_bit(NDD_ALIASING, &nvdimm->flags))
+ if (test_bit(NDD_LABELING, &nvdimm->flags))
return -ENXIO;
else
return -ENOTTY;
@@ -173,11 +173,11 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
return rc;
}
-void nvdimm_set_aliasing(struct device *dev)
+void nvdimm_set_labeling(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- set_bit(NDD_ALIASING, &nvdimm->flags);
+ set_bit(NDD_LABELING, &nvdimm->flags);
}
void nvdimm_set_locked(struct device *dev)
@@ -312,8 +312,9 @@ static ssize_t flags_show(struct device *dev,
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- return sprintf(buf, "%s%s\n",
+ return sprintf(buf, "%s%s%s\n",
test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
+ test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
}
static DEVICE_ATTR_RO(flags);
@@ -562,6 +563,21 @@ int nvdimm_security_freeze(struct nvdimm *nvdimm)
return rc;
}
+static unsigned long dpa_align(struct nd_region *nd_region)
+{
+ struct device *dev = &nd_region->dev;
+
+ if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
+ "bus lock required for capacity provision\n"))
+ return 0;
+ if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
+ % nd_region->ndr_mappings,
+ "invalid region align %#lx mappings: %d\n",
+ nd_region->align, nd_region->ndr_mappings))
+ return 0;
+ return nd_region->align / nd_region->ndr_mappings;
+}
+
int alias_dpa_busy(struct device *dev, void *data)
{
resource_size_t map_end, blk_start, new;
@@ -570,6 +586,7 @@ int alias_dpa_busy(struct device *dev, void *data)
struct nd_region *nd_region;
struct nvdimm_drvdata *ndd;
struct resource *res;
+ unsigned long align;
int i;
if (!is_memory(dev))
@@ -607,13 +624,21 @@ int alias_dpa_busy(struct device *dev, void *data)
* Find the free dpa from the end of the last pmem allocation to
* the end of the interleave-set mapping.
*/
+ align = dpa_align(nd_region);
+ if (!align)
+ return 0;
+
for_each_dpa_resource(ndd, res) {
+ resource_size_t start, end;
+
if (strncmp(res->name, "pmem", 4) != 0)
continue;
- if ((res->start >= blk_start && res->start < map_end)
- || (res->end >= blk_start
- && res->end <= map_end)) {
- new = max(blk_start, min(map_end + 1, res->end + 1));
+
+ start = ALIGN_DOWN(res->start, align);
+ end = ALIGN(res->end + 1, align) - 1;
+ if ((start >= blk_start && start < map_end)
+ || (end >= blk_start && end <= map_end)) {
+ new = max(blk_start, min(map_end, end) + 1);
if (new != blk_start) {
blk_start = new;
goto retry;
@@ -653,6 +678,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
.res = NULL,
};
struct resource *res;
+ unsigned long align;
if (!ndd)
return 0;
@@ -660,10 +686,20 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
/* now account for busy blk allocations in unaliased dpa */
+ align = dpa_align(nd_region);
+ if (!align)
+ return 0;
for_each_dpa_resource(ndd, res) {
+ resource_size_t start, end, size;
+
if (strncmp(res->name, "blk", 3) != 0)
continue;
- info.available -= resource_size(res);
+ start = ALIGN_DOWN(res->start, align);
+ end = ALIGN(res->end + 1, align) - 1;
+ size = end - start + 1;
+ if (size >= info.available)
+ return 0;
+ info.available -= size;
}
return info.available;
@@ -682,19 +718,31 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
struct nvdimm_bus *nvdimm_bus;
resource_size_t max = 0;
struct resource *res;
+ unsigned long align;
/* if a dimm is disabled the available capacity is zero */
if (!ndd)
return 0;
+ align = dpa_align(nd_region);
+ if (!align)
+ return 0;
+
nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
return 0;
for_each_dpa_resource(ndd, res) {
+ resource_size_t start, end;
+
if (strcmp(res->name, "pmem-reserve") != 0)
continue;
- if (resource_size(res) > max)
- max = resource_size(res);
+ /* trim free space relative to current alignment setting */
+ start = ALIGN(res->start, align);
+ end = ALIGN_DOWN(res->end + 1, align) - 1;
+ if (end < start)
+ continue;
+ if (end - start + 1 > max)
+ max = end - start + 1;
}
release_free_pmem(nvdimm_bus, nd_mapping);
return max;
@@ -722,24 +770,33 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res;
const char *reason;
+ unsigned long align;
if (!ndd)
return 0;
+ align = dpa_align(nd_region);
+ if (!align)
+ return 0;
+
map_start = nd_mapping->start;
map_end = map_start + nd_mapping->size - 1;
blk_start = max(map_start, map_end + 1 - *overlap);
for_each_dpa_resource(ndd, res) {
- if (res->start >= map_start && res->start < map_end) {
+ resource_size_t start, end;
+
+ start = ALIGN_DOWN(res->start, align);
+ end = ALIGN(res->end + 1, align) - 1;
+ if (start >= map_start && start < map_end) {
if (strncmp(res->name, "blk", 3) == 0)
blk_start = min(blk_start,
- max(map_start, res->start));
- else if (res->end > map_end) {
+ max(map_start, start));
+ else if (end > map_end) {
reason = "misaligned to iset";
goto err;
} else
- busy += resource_size(res);
- } else if (res->end >= map_start && res->end <= map_end) {
+ busy += end - start + 1;
+ } else if (end >= map_start && end <= map_end) {
if (strncmp(res->name, "blk", 3) == 0) {
/*
* If a BLK allocation overlaps the start of
@@ -748,8 +805,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
*/
blk_start = map_start;
} else
- busy += resource_size(res);
- } else if (map_start > res->start && map_start < res->end) {
+ busy += end - start + 1;
+ } else if (map_start > start && map_start < end) {
/* total eclipse of the mapping */
busy += nd_mapping->size;
blk_start = map_start;
@@ -759,7 +816,7 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
*overlap = map_end + 1 - blk_start;
available = blk_start - map_start;
if (busy < available)
- return available - busy;
+ return ALIGN_DOWN(available - busy, align);
return 0;
err:
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index e02f60ad6c99..4cd18be9d0e9 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -7,6 +7,7 @@
#include <linux/memory_hotplug.h>
#include <linux/libnvdimm.h>
#include <linux/module.h>
+#include <linux/numa.h>
static int e820_pmem_remove(struct platform_device *pdev)
{
@@ -16,27 +17,16 @@ static int e820_pmem_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_MEMORY_HOTPLUG
-static int e820_range_to_nid(resource_size_t addr)
-{
- return memory_add_physaddr_to_nid(addr);
-}
-#else
-static int e820_range_to_nid(resource_size_t addr)
-{
- return NUMA_NO_NODE;
-}
-#endif
-
static int e820_register_one(struct resource *res, void *data)
{
struct nd_region_desc ndr_desc;
struct nvdimm_bus *nvdimm_bus = data;
+ int nid = phys_to_target_node(res->start);
memset(&ndr_desc, 0, sizeof(ndr_desc));
ndr_desc.res = res;
- ndr_desc.numa_node = e820_range_to_nid(res->start);
- ndr_desc.target_node = ndr_desc.numa_node;
+ ndr_desc.numa_node = numa_map_to_online_node(nid);
+ ndr_desc.target_node = nid;
set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags);
if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc))
return -ENXIO;
diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
index 4c7b775c2811..956b6d1bd8cc 100644
--- a/drivers/nvdimm/label.h
+++ b/drivers/nvdimm/label.h
@@ -62,7 +62,7 @@ struct nd_namespace_index {
__le16 major;
__le16 minor;
__le64 checksum;
- u8 free[0];
+ u8 free[];
};
/**
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 032dc61725ff..ae155e860fdc 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -10,6 +10,7 @@
#include <linux/nd.h>
#include "nd-core.h"
#include "pmem.h"
+#include "pfn.h"
#include "nd.h"
static void namespace_io_release(struct device *dev)
@@ -541,6 +542,11 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
{
bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
+ unsigned long align;
+
+ align = nd_region->align / nd_region->ndr_mappings;
+ valid->start = ALIGN(valid->start, align);
+ valid->end = ALIGN_DOWN(valid->end + 1, align) - 1;
if (valid->start >= valid->end)
goto invalid;
@@ -980,10 +986,10 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
return -ENXIO;
}
- div_u64_rem(val, PAGE_SIZE * nd_region->ndr_mappings, &remainder);
+ div_u64_rem(val, nd_region->align, &remainder);
if (remainder) {
dev_dbg(dev, "%llu is not %ldK aligned\n", val,
- (PAGE_SIZE * nd_region->ndr_mappings) / SZ_1K);
+ nd_region->align / SZ_1K);
return -EINVAL;
}
@@ -1739,6 +1745,22 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
return ERR_PTR(-ENODEV);
}
+ /*
+ * Note, alignment validation for fsdax and devdax mode
+ * namespaces happens in nd_pfn_validate() where infoblock
+ * padding parameters can be applied.
+ */
+ if (pmem_should_map_pages(dev)) {
+ struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
+ struct resource *res = &nsio->res;
+
+ if (!IS_ALIGNED(res->start | (res->end + 1),
+ memremap_compat_align())) {
+ dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res);
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ }
+
if (is_namespace_pmem(&ndns->dev)) {
struct nd_namespace_pmem *nspm;
@@ -2521,7 +2543,7 @@ static int init_active_labels(struct nd_region *nd_region)
if (!ndd) {
if (test_bit(NDD_LOCKED, &nvdimm->flags))
/* fail, label data may be unreadable */;
- else if (test_bit(NDD_ALIASING, &nvdimm->flags))
+ else if (test_bit(NDD_LABELING, &nvdimm->flags))
/* fail, labels needed to disambiguate dpa */;
else
return 0;
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index c9f6a5b5253a..85dbb2a322b9 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -39,7 +39,7 @@ struct nd_region_data {
int ns_count;
int ns_active;
unsigned int hints_shift;
- void __iomem *flush_wpq[0];
+ void __iomem *flush_wpq[];
};
static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
@@ -146,6 +146,7 @@ struct nd_region {
struct device *btt_seed;
struct device *pfn_seed;
struct device *dax_seed;
+ unsigned long align;
u16 ndr_mappings;
u64 ndr_size;
u64 ndr_start;
@@ -156,7 +157,7 @@ struct nd_region {
struct nd_interleave_set *nd_set;
struct nd_percpu_lane __percpu *lane;
int (*flush)(struct nd_region *nd_region, struct bio *bio);
- struct nd_mapping mapping[0];
+ struct nd_mapping mapping[];
};
struct nd_blk_region {
@@ -252,7 +253,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
void *buf, size_t len);
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
unsigned int len);
-void nvdimm_set_aliasing(struct device *dev);
+void nvdimm_set_labeling(struct device *dev);
void nvdimm_set_locked(struct device *dev);
void nvdimm_clear_locked(struct device *dev);
int nvdimm_security_setup_events(struct device *dev);
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 8224d1431ea9..6826a274a1f1 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -62,8 +62,10 @@ static int of_pmem_region_probe(struct platform_device *pdev)
if (is_volatile)
region = nvdimm_volatile_region_create(bus, &ndr_desc);
- else
+ else {
+ set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags);
region = nvdimm_pmem_region_create(bus, &ndr_desc);
+ }
if (!region)
dev_warn(&pdev->dev, "Unable to register region %pR from %pOF\n",
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
index acb19517f678..37cb1b8a2a39 100644
--- a/drivers/nvdimm/pfn.h
+++ b/drivers/nvdimm/pfn.h
@@ -24,6 +24,18 @@ struct nd_pfn_sb {
__le64 npfns;
__le32 mode;
/* minor-version-1 additions for section alignment */
+ /**
+ * @start_pad: Deprecated attribute to pad start-misaligned namespaces
+ *
+ * start_pad is deprecated because the original definition did
+ * not comprehend that dataoff is relative to the base address
+ * of the namespace not the start_pad adjusted base. The result
+ * is that the dax path is broken, but the block-I/O path is
+ * not. The kernel will no longer create namespaces using start
+ * padding, but it still supports block-I/O for legacy
+ * configurations mainly to allow a backup, reconfigure the
+ * namespace, and restore flow to repair dax operation.
+ */
__le32 start_pad;
__le32 end_trunc;
/* minor-version-2 record the base alignment of the mapping */
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index b94f7a7e94b8..34db557dbad1 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -446,6 +446,7 @@ static bool nd_supported_alignment(unsigned long align)
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
{
u64 checksum, offset;
+ struct resource *res;
enum nd_pfn_mode mode;
struct nd_namespace_io *nsio;
unsigned long align, start_pad;
@@ -561,14 +562,14 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
nd_pfn->align, align, nd_pfn->mode,
mode);
- return -EINVAL;
+ return -EOPNOTSUPP;
}
}
if (align > nvdimm_namespace_capacity(ndns)) {
dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
align, nvdimm_namespace_capacity(ndns));
- return -EINVAL;
+ return -EOPNOTSUPP;
}
/*
@@ -578,18 +579,31 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
* established.
*/
nsio = to_nd_namespace_io(&ndns->dev);
- if (offset >= resource_size(&nsio->res)) {
+ res = &nsio->res;
+ if (offset >= resource_size(res)) {
dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
dev_name(&ndns->dev));
- return -EBUSY;
+ return -EOPNOTSUPP;
}
- if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
+ if ((align && !IS_ALIGNED(res->start + offset + start_pad, align))
|| !IS_ALIGNED(offset, PAGE_SIZE)) {
dev_err(&nd_pfn->dev,
"bad offset: %#llx dax disabled align: %#lx\n",
offset, align);
- return -ENXIO;
+ return -EOPNOTSUPP;
+ }
+
+ if (!IS_ALIGNED(res->start + le32_to_cpu(pfn_sb->start_pad),
+ memremap_compat_align())) {
+ dev_err(&nd_pfn->dev, "resource start misaligned\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!IS_ALIGNED(res->end + 1 - le32_to_cpu(pfn_sb->end_trunc),
+ memremap_compat_align())) {
+ dev_err(&nd_pfn->dev, "resource end misaligned\n");
+ return -EOPNOTSUPP;
}
return 0;
@@ -750,7 +764,19 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
start = nsio->res.start;
size = resource_size(&nsio->res);
npfns = PHYS_PFN(size - SZ_8K);
- align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
+ align = max(nd_pfn->align, memremap_compat_align());
+
+ /*
+ * When @start is misaligned fail namespace creation. See
+ * the 'struct nd_pfn_sb' commentary on why ->start_pad is not
+ * an option.
+ */
+ if (!IS_ALIGNED(start, memremap_compat_align())) {
+ dev_err(&nd_pfn->dev, "%s: start %pa misaligned to %#lx\n",
+ dev_name(&ndns->dev), &start,
+ memremap_compat_align());
+ return -EINVAL;
+ }
end_trunc = start + size - ALIGN_DOWN(start + size, align);
if (nd_pfn->mode == PFN_MODE_PMEM) {
/*
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 4ffc6f7ca131..2df6994acf83 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -136,9 +136,25 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
return BLK_STS_OK;
}
-static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
- unsigned int len, unsigned int off, unsigned int op,
- sector_t sector)
+static blk_status_t pmem_do_read(struct pmem_device *pmem,
+ struct page *page, unsigned int page_off,
+ sector_t sector, unsigned int len)
+{
+ blk_status_t rc;
+ phys_addr_t pmem_off = sector * 512 + pmem->data_offset;
+ void *pmem_addr = pmem->virt_addr + pmem_off;
+
+ if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
+ return BLK_STS_IOERR;
+
+ rc = read_pmem(page, page_off, pmem_addr, len);
+ flush_dcache_page(page);
+ return rc;
+}
+
+static blk_status_t pmem_do_write(struct pmem_device *pmem,
+ struct page *page, unsigned int page_off,
+ sector_t sector, unsigned int len)
{
blk_status_t rc = BLK_STS_OK;
bool bad_pmem = false;
@@ -148,34 +164,25 @@ static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
if (unlikely(is_bad_pmem(&pmem->bb, sector, len)))
bad_pmem = true;
- if (!op_is_write(op)) {
- if (unlikely(bad_pmem))
- rc = BLK_STS_IOERR;
- else {
- rc = read_pmem(page, off, pmem_addr, len);
- flush_dcache_page(page);
- }
- } else {
- /*
- * Note that we write the data both before and after
- * clearing poison. The write before clear poison
- * handles situations where the latest written data is
- * preserved and the clear poison operation simply marks
- * the address range as valid without changing the data.
- * In this case application software can assume that an
- * interrupted write will either return the new good
- * data or an error.
- *
- * However, if pmem_clear_poison() leaves the data in an
- * indeterminate state we need to perform the write
- * after clear poison.
- */
- flush_dcache_page(page);
- write_pmem(pmem_addr, page, off, len);
- if (unlikely(bad_pmem)) {
- rc = pmem_clear_poison(pmem, pmem_off, len);
- write_pmem(pmem_addr, page, off, len);
- }
+ /*
+ * Note that we write the data both before and after
+ * clearing poison. The write before clear poison
+ * handles situations where the latest written data is
+ * preserved and the clear poison operation simply marks
+ * the address range as valid without changing the data.
+ * In this case application software can assume that an
+ * interrupted write will either return the new good
+ * data or an error.
+ *
+ * However, if pmem_clear_poison() leaves the data in an
+ * indeterminate state we need to perform the write
+ * after clear poison.
+ */
+ flush_dcache_page(page);
+ write_pmem(pmem_addr, page, page_off, len);
+ if (unlikely(bad_pmem)) {
+ rc = pmem_clear_poison(pmem, pmem_off, len);
+ write_pmem(pmem_addr, page, page_off, len);
}
return rc;
@@ -197,8 +204,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
- rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
- bvec.bv_offset, bio_op(bio), iter.bi_sector);
+ if (op_is_write(bio_op(bio)))
+ rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset,
+ iter.bi_sector, bvec.bv_len);
+ else
+ rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset,
+ iter.bi_sector, bvec.bv_len);
if (rc) {
bio->bi_status = rc;
break;
@@ -223,9 +234,12 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
struct pmem_device *pmem = bdev->bd_queue->queuedata;
blk_status_t rc;
- rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE,
- 0, op, sector);
-
+ if (op_is_write(op))
+ rc = pmem_do_write(pmem, page, 0, sector,
+ hpage_nr_pages(page) * PAGE_SIZE);
+ else
+ rc = pmem_do_read(pmem, page, 0, sector,
+ hpage_nr_pages(page) * PAGE_SIZE);
/*
* The ->rw_page interface is subtle and tricky. The core
* retries on any error, so we can only invoke page_endio() in
@@ -268,6 +282,16 @@ static const struct block_device_operations pmem_fops = {
.revalidate_disk = nvdimm_revalidate_disk,
};
+static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
+ size_t nr_pages)
+{
+ struct pmem_device *pmem = dax_get_private(dax_dev);
+
+ return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0,
+ PFN_PHYS(pgoff) >> SECTOR_SHIFT,
+ PAGE_SIZE));
+}
+
static long pmem_dax_direct_access(struct dax_device *dax_dev,
pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
{
@@ -299,6 +323,7 @@ static const struct dax_operations pmem_dax_ops = {
.dax_supported = generic_fsdax_supported,
.copy_from_iter = pmem_copy_from_iter,
.copy_to_iter = pmem_copy_to_iter,
+ .zero_page_range = pmem_dax_zero_page_range,
};
static const struct attribute_group *pmem_attribute_groups[] = {
@@ -461,9 +486,9 @@ static int pmem_attach_disk(struct device *dev,
if (is_nvdimm_sync(nd_region))
flags = DAXDEV_F_SYNC;
dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags);
- if (!dax_dev) {
+ if (IS_ERR(dax_dev)) {
put_disk(disk);
- return -ENOMEM;
+ return PTR_ERR(dax_dev);
}
dax_write_cache(dax_dev, nvdimm_has_cache(nd_region));
pmem->dax_dev = dax_dev;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index a19e535830d9..ccbb5b43b8b2 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -195,16 +195,16 @@ EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
int nd_region_to_nstype(struct nd_region *nd_region)
{
if (is_memory(&nd_region->dev)) {
- u16 i, alias;
+ u16 i, label;
- for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
+ for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) {
struct nd_mapping *nd_mapping = &nd_region->mapping[i];
struct nvdimm *nvdimm = nd_mapping->nvdimm;
- if (test_bit(NDD_ALIASING, &nvdimm->flags))
- alias++;
+ if (test_bit(NDD_LABELING, &nvdimm->flags))
+ label++;
}
- if (alias)
+ if (label)
return ND_DEVICE_NAMESPACE_PMEM;
else
return ND_DEVICE_NAMESPACE_IO;
@@ -216,21 +216,25 @@ int nd_region_to_nstype(struct nd_region *nd_region)
}
EXPORT_SYMBOL(nd_region_to_nstype);
-static ssize_t size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static unsigned long long region_size(struct nd_region *nd_region)
{
- struct nd_region *nd_region = to_nd_region(dev);
- unsigned long long size = 0;
-
- if (is_memory(dev)) {
- size = nd_region->ndr_size;
+ if (is_memory(&nd_region->dev)) {
+ return nd_region->ndr_size;
} else if (nd_region->ndr_mappings == 1) {
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
- size = nd_mapping->size;
+ return nd_mapping->size;
}
- return sprintf(buf, "%llu\n", size);
+ return 0;
+}
+
+static ssize_t size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+
+ return sprintf(buf, "%llu\n", region_size(nd_region));
}
static DEVICE_ATTR_RO(size);
@@ -529,6 +533,54 @@ static ssize_t read_only_store(struct device *dev,
}
static DEVICE_ATTR_RW(read_only);
+static ssize_t align_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+
+ return sprintf(buf, "%#lx\n", nd_region->align);
+}
+
+static ssize_t align_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct nd_region *nd_region = to_nd_region(dev);
+ unsigned long val, dpa;
+ u32 remainder;
+ int rc;
+
+ rc = kstrtoul(buf, 0, &val);
+ if (rc)
+ return rc;
+
+ if (!nd_region->ndr_mappings)
+ return -ENXIO;
+
+ /*
+ * Ensure space-align is evenly divisible by the region
+ * interleave-width because the kernel typically has no facility
+ * to determine which DIMM(s), dimm-physical-addresses, would
+ * contribute to the tail capacity in system-physical-address
+ * space for the namespace.
+ */
+ dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder);
+ if (!is_power_of_2(dpa) || dpa < PAGE_SIZE
+ || val > region_size(nd_region) || remainder)
+ return -EINVAL;
+
+ /*
+ * Given that space allocation consults this value multiple
+ * times ensure it does not change for the duration of the
+ * allocation.
+ */
+ nvdimm_bus_lock(dev);
+ nd_region->align = val;
+ nvdimm_bus_unlock(dev);
+
+ return len;
+}
+static DEVICE_ATTR_RW(align);
+
static ssize_t region_badblocks_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -571,6 +623,7 @@ static DEVICE_ATTR_RO(persistence_domain);
static struct attribute *nd_region_attributes[] = {
&dev_attr_size.attr,
+ &dev_attr_align.attr,
&dev_attr_nstype.attr,
&dev_attr_mappings.attr,
&dev_attr_btt_seed.attr,
@@ -626,6 +679,19 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
return a->mode;
}
+ if (a == &dev_attr_align.attr) {
+ int i;
+
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+ if (test_bit(NDD_LABELING, &nvdimm->flags))
+ return a->mode;
+ }
+ return 0;
+ }
+
if (a != &dev_attr_set_cookie.attr
&& a != &dev_attr_available_size.attr)
return a->mode;
@@ -935,6 +1001,41 @@ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
}
EXPORT_SYMBOL(nd_region_release_lane);
+/*
+ * PowerPC requires this alignment for memremap_pages(). All other archs
+ * should be ok with SUBSECTION_SIZE (see memremap_compat_align()).
+ */
+#define MEMREMAP_COMPAT_ALIGN_MAX SZ_16M
+
+static unsigned long default_align(struct nd_region *nd_region)
+{
+ unsigned long align;
+ int i, mappings;
+ u32 remainder;
+
+ if (is_nd_blk(&nd_region->dev))
+ align = PAGE_SIZE;
+ else
+ align = MEMREMAP_COMPAT_ALIGN_MAX;
+
+ for (i = 0; i < nd_region->ndr_mappings; i++) {
+ struct nd_mapping *nd_mapping = &nd_region->mapping[i];
+ struct nvdimm *nvdimm = nd_mapping->nvdimm;
+
+ if (test_bit(NDD_ALIASING, &nvdimm->flags)) {
+ align = MEMREMAP_COMPAT_ALIGN_MAX;
+ break;
+ }
+ }
+
+ mappings = max_t(u16, 1, nd_region->ndr_mappings);
+ div_u64_rem(align, mappings, &remainder);
+ if (remainder)
+ align *= mappings;
+
+ return align;
+}
+
static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
struct nd_region_desc *ndr_desc,
const struct device_type *dev_type, const char *caller)
@@ -1039,6 +1140,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
dev->of_node = ndr_desc->of_node;
nd_region->ndr_size = resource_size(ndr_desc->res);
nd_region->ndr_start = ndr_desc->res->start;
+ nd_region->align = default_align(nd_region);
if (ndr_desc->flush)
nd_region->flush = ndr_desc->flush;
else
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 4f907e3beda1..91c1bd659947 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -6,6 +6,7 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
+#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/hdreg.h>
@@ -1252,6 +1253,18 @@ static void nvme_enable_aen(struct nvme_ctrl *ctrl)
queue_work(nvme_wq, &ctrl->async_event_work);
}
+/*
+ * Convert integer values from ioctl structures to user pointers, silently
+ * ignoring the upper bits in the compat case to match behaviour of 32-bit
+ * kernels.
+ */
+static void __user *nvme_to_user_ptr(uintptr_t ptrval)
+{
+ if (in_compat_syscall())
+ ptrval = (compat_uptr_t)ptrval;
+ return (void __user *)ptrval;
+}
+
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
{
struct nvme_user_io io;
@@ -1275,7 +1288,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
length = (io.nblocks + 1) << ns->lba_shift;
meta_len = (io.nblocks + 1) * ns->ms;
- metadata = (void __user *)(uintptr_t)io.metadata;
+ metadata = nvme_to_user_ptr(io.metadata);
if (ns->ext) {
length += meta_len;
@@ -1298,7 +1311,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
c.rw.appmask = cpu_to_le16(io.appmask);
return nvme_submit_user_cmd(ns->queue, &c,
- (void __user *)(uintptr_t)io.addr, length,
+ nvme_to_user_ptr(io.addr), length,
metadata, meta_len, lower_32_bits(io.slba), NULL, 0);
}
@@ -1418,9 +1431,9 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
- (void __user *)(uintptr_t)cmd.metadata,
- cmd.metadata_len, 0, &result, timeout);
+ nvme_to_user_ptr(cmd.addr), cmd.data_len,
+ nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
+ 0, &result, timeout);
nvme_passthru_end(ctrl, effects);
if (status >= 0) {
@@ -1465,8 +1478,8 @@ static int nvme_user_cmd64(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
- (void __user *)(uintptr_t)cmd.addr, cmd.data_len,
- (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
+ nvme_to_user_ptr(cmd.addr), cmd.data_len,
+ nvme_to_user_ptr(cmd.metadata), cmd.metadata_len,
0, &cmd.result, timeout);
nvme_passthru_end(ctrl, effects);
@@ -1884,6 +1897,13 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+ if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
+ struct backing_dev_info *info =
+ ns->head->disk->queue->backing_dev_info;
+
+ info->capabilities |= BDI_CAP_STABLE_WRITES;
+ }
+
revalidate_disk(ns->head->disk);
}
#endif
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index a8bf2fb1287b..7dfc4a2ecf1e 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -342,8 +342,7 @@ nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
!template->ls_req || !template->fcp_io ||
!template->ls_abort || !template->fcp_abort ||
!template->max_hw_queues || !template->max_sgl_segments ||
- !template->max_dif_sgl_segments || !template->dma_boundary ||
- !template->module) {
+ !template->max_dif_sgl_segments || !template->dma_boundary) {
ret = -EINVAL;
goto out_reghost_failed;
}
@@ -2016,7 +2015,6 @@ nvme_fc_ctrl_free(struct kref *ref)
{
struct nvme_fc_ctrl *ctrl =
container_of(ref, struct nvme_fc_ctrl, ref);
- struct nvme_fc_lport *lport = ctrl->lport;
unsigned long flags;
if (ctrl->ctrl.tagset) {
@@ -2043,7 +2041,6 @@ nvme_fc_ctrl_free(struct kref *ref)
if (ctrl->ctrl.opts)
nvmf_free_options(ctrl->ctrl.opts);
kfree(ctrl);
- module_put(lport->ops->module);
}
static void
@@ -3074,15 +3071,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
goto out_fail;
}
- if (!try_module_get(lport->ops->module)) {
- ret = -EUNATCH;
- goto out_free_ctrl;
- }
-
idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
if (idx < 0) {
ret = -ENOSPC;
- goto out_mod_put;
+ goto out_free_ctrl;
}
ctrl->ctrl.opts = opts;
@@ -3232,8 +3224,6 @@ out_free_queues:
out_free_ida:
put_device(ctrl->dev);
ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
-out_mod_put:
- module_put(lport->ops->module);
out_free_ctrl:
kfree(ctrl);
out_fail:
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 61bf87592570..54603bd3e02d 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -510,7 +510,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (!nr_nsids)
return 0;
- down_write(&ctrl->namespaces_rwsem);
+ down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
unsigned nsid = le32_to_cpu(desc->nsids[n]);
@@ -521,7 +521,7 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
if (++n == nr_nsids)
break;
}
- up_write(&ctrl->namespaces_rwsem);
+ up_read(&ctrl->namespaces_rwsem);
return 0;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 76dbb55625ac..cac8a930396a 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1342,7 +1342,7 @@ static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
int ret;
sge->addr = qe->dma;
- sge->length = sizeof(struct nvme_command),
+ sge->length = sizeof(struct nvme_command);
sge->lkey = queue->device->pd->local_dma_lkey;
wr.next = NULL;
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 0ef14f0fad86..c15a92163c1f 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -174,16 +174,14 @@ static inline bool nvme_tcp_async_req(struct nvme_tcp_request *req)
static inline bool nvme_tcp_has_inline_data(struct nvme_tcp_request *req)
{
struct request *rq;
- unsigned int bytes;
if (unlikely(nvme_tcp_async_req(req)))
return false; /* async events don't have a request */
rq = blk_mq_rq_from_pdu(req);
- bytes = blk_rq_payload_bytes(rq);
- return rq_data_dir(rq) == WRITE && bytes &&
- bytes <= nvme_tcp_inline_data_size(req->queue);
+ return rq_data_dir(rq) == WRITE && req->data_len &&
+ req->data_len <= nvme_tcp_inline_data_size(req->queue);
}
static inline struct page *nvme_tcp_req_cur_page(struct nvme_tcp_request *req)
@@ -1075,7 +1073,7 @@ static void nvme_tcp_io_work(struct work_struct *w)
if (result > 0)
pending = true;
else if (unlikely(result < 0))
- break;
+ return;
if (!pending)
return;
@@ -2164,7 +2162,9 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue,
c->common.flags |= NVME_CMD_SGL_METABUF;
- if (rq_data_dir(rq) == WRITE && req->data_len &&
+ if (!blk_rq_nr_phys_segments(rq))
+ nvme_tcp_set_sg_null(c);
+ else if (rq_data_dir(rq) == WRITE &&
req->data_len <= nvme_tcp_inline_data_size(queue))
nvme_tcp_set_sg_inline(queue, c, req->data_len);
else
@@ -2191,7 +2191,8 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns,
req->data_sent = 0;
req->pdu_len = 0;
req->pdu_sent = 0;
- req->data_len = blk_rq_payload_bytes(rq);
+ req->data_len = blk_rq_nr_phys_segments(rq) ?
+ blk_rq_payload_bytes(rq) : 0;
req->curr_bio = rq->bio;
if (rq_data_dir(rq) == WRITE &&
@@ -2298,6 +2299,9 @@ static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
+ if (!test_bit(NVME_TCP_Q_LIVE, &queue->flags))
+ return 0;
+
if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue))
sk_busy_loop(sk, true);
nvme_tcp_try_recv(queue);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 7aa10788b7c8..58cabd7b6fc5 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1098,12 +1098,19 @@ static struct configfs_attribute *nvmet_referral_attrs[] = {
NULL,
};
-static void nvmet_referral_release(struct config_item *item)
+static void nvmet_referral_notify(struct config_group *group,
+ struct config_item *item)
{
struct nvmet_port *parent = to_nvmet_port(item->ci_parent->ci_parent);
struct nvmet_port *port = to_nvmet_port(item);
nvmet_referral_disable(parent, port);
+}
+
+static void nvmet_referral_release(struct config_item *item)
+{
+ struct nvmet_port *port = to_nvmet_port(item);
+
kfree(port);
}
@@ -1134,6 +1141,7 @@ static struct config_group *nvmet_referral_make(
static struct configfs_group_operations nvmet_referral_group_ops = {
.make_group = nvmet_referral_make,
+ .disconnect_notify = nvmet_referral_notify,
};
static const struct config_item_type nvmet_referrals_type = {
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index a0db6371b43e..a8ceb7721640 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -684,7 +684,7 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
disconnect = atomic_xchg(&queue->connected, 0);
spin_lock_irqsave(&queue->qlock, flags);
- /* about outstanding io's */
+ /* abort outstanding io's */
for (i = 0; i < queue->sqsize; fod++, i++) {
if (fod->active) {
spin_lock(&fod->flock);
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 1c50af6219f3..f69ce66e2d44 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -198,10 +198,13 @@ struct fcloop_lport_priv {
};
struct fcloop_rport {
- struct nvme_fc_remote_port *remoteport;
- struct nvmet_fc_target_port *targetport;
- struct fcloop_nport *nport;
- struct fcloop_lport *lport;
+ struct nvme_fc_remote_port *remoteport;
+ struct nvmet_fc_target_port *targetport;
+ struct fcloop_nport *nport;
+ struct fcloop_lport *lport;
+ spinlock_t lock;
+ struct list_head ls_list;
+ struct work_struct ls_work;
};
struct fcloop_tport {
@@ -224,11 +227,10 @@ struct fcloop_nport {
};
struct fcloop_lsreq {
- struct fcloop_tport *tport;
struct nvmefc_ls_req *lsreq;
- struct work_struct work;
struct nvmefc_tgt_ls_req tgt_ls_req;
int status;
+ struct list_head ls_list; /* fcloop_rport->ls_list */
};
struct fcloop_rscn {
@@ -292,21 +294,32 @@ fcloop_delete_queue(struct nvme_fc_local_port *localport,
{
}
-
-/*
- * Transmit of LS RSP done (e.g. buffers all set). call back up
- * initiator "done" flows.
- */
static void
-fcloop_tgt_lsrqst_done_work(struct work_struct *work)
+fcloop_rport_lsrqst_work(struct work_struct *work)
{
- struct fcloop_lsreq *tls_req =
- container_of(work, struct fcloop_lsreq, work);
- struct fcloop_tport *tport = tls_req->tport;
- struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_rport *rport =
+ container_of(work, struct fcloop_rport, ls_work);
+ struct fcloop_lsreq *tls_req;
- if (!tport || tport->remoteport)
- lsreq->done(lsreq, tls_req->status);
+ spin_lock(&rport->lock);
+ for (;;) {
+ tls_req = list_first_entry_or_null(&rport->ls_list,
+ struct fcloop_lsreq, ls_list);
+ if (!tls_req)
+ break;
+
+ list_del(&tls_req->ls_list);
+ spin_unlock(&rport->lock);
+
+ tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
+ /*
+ * callee may free memory containing tls_req.
+ * do not reference lsreq after this.
+ */
+
+ spin_lock(&rport->lock);
+ }
+ spin_unlock(&rport->lock);
}
static int
@@ -319,17 +332,18 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
int ret = 0;
tls_req->lsreq = lsreq;
- INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
+ INIT_LIST_HEAD(&tls_req->ls_list);
if (!rport->targetport) {
tls_req->status = -ECONNREFUSED;
- tls_req->tport = NULL;
- schedule_work(&tls_req->work);
+ spin_lock(&rport->lock);
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ spin_unlock(&rport->lock);
+ schedule_work(&rport->ls_work);
return ret;
}
tls_req->status = 0;
- tls_req->tport = rport->targetport->private;
ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
lsreq->rqstaddr, lsreq->rqstlen);
@@ -337,18 +351,28 @@ fcloop_ls_req(struct nvme_fc_local_port *localport,
}
static int
-fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
+fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
struct nvmefc_tgt_ls_req *tgt_lsreq)
{
struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
struct nvmefc_ls_req *lsreq = tls_req->lsreq;
+ struct fcloop_tport *tport = targetport->private;
+ struct nvme_fc_remote_port *remoteport = tport->remoteport;
+ struct fcloop_rport *rport;
memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
((lsreq->rsplen < tgt_lsreq->rsplen) ?
lsreq->rsplen : tgt_lsreq->rsplen));
+
tgt_lsreq->done(tgt_lsreq);
- schedule_work(&tls_req->work);
+ if (remoteport) {
+ rport = remoteport->private;
+ spin_lock(&rport->lock);
+ list_add_tail(&rport->ls_list, &tls_req->ls_list);
+ spin_unlock(&rport->lock);
+ schedule_work(&rport->ls_work);
+ }
return 0;
}
@@ -834,6 +858,7 @@ fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
{
struct fcloop_rport *rport = remoteport->private;
+ flush_work(&rport->ls_work);
fcloop_nport_put(rport->nport);
}
@@ -850,7 +875,6 @@ fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
#define FCLOOP_DMABOUND_4G 0xFFFFFFFF
static struct nvme_fc_port_template fctemplate = {
- .module = THIS_MODULE,
.localport_delete = fcloop_localport_delete,
.remoteport_delete = fcloop_remoteport_delete,
.create_queue = fcloop_create_queue,
@@ -1136,6 +1160,9 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
rport->nport = nport;
rport->lport = nport->lport;
nport->rport = rport;
+ spin_lock_init(&rport->lock);
+ INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
+ INIT_LIST_HEAD(&rport->ls_list);
return count;
}
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index c90c06839d64..fd47de0e4e4e 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -78,6 +78,7 @@ enum nvmet_rdma_queue_state {
struct nvmet_rdma_queue {
struct rdma_cm_id *cm_id;
+ struct ib_qp *qp;
struct nvmet_port *port;
struct ib_cq *cq;
atomic_t sq_wr_avail;
@@ -105,6 +106,13 @@ struct nvmet_rdma_queue {
struct list_head queue_list;
};
+struct nvmet_rdma_port {
+ struct nvmet_port *nport;
+ struct sockaddr_storage addr;
+ struct rdma_cm_id *cm_id;
+ struct delayed_work repair_work;
+};
+
struct nvmet_rdma_device {
struct ib_device *device;
struct ib_pd *pd;
@@ -461,7 +469,7 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
if (ndev->srq)
ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
else
- ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
+ ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
if (unlikely(ret))
pr_err("post_recv cmd failed\n");
@@ -500,7 +508,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
if (rsp->n_rdma) {
- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
+ rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
queue->cm_id->port_num, rsp->req.sg,
rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
}
@@ -584,7 +592,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
WARN_ON(rsp->n_rdma <= 0);
atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
+ rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
queue->cm_id->port_num, rsp->req.sg,
rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
rsp->n_rdma = 0;
@@ -739,7 +747,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
}
if (nvmet_rdma_need_data_in(rsp)) {
- if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
+ if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
queue->cm_id->port_num, &rsp->read_cqe, NULL))
nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
} else {
@@ -911,7 +919,8 @@ static void nvmet_rdma_free_dev(struct kref *ref)
static struct nvmet_rdma_device *
nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
{
- struct nvmet_port *port = cm_id->context;
+ struct nvmet_rdma_port *port = cm_id->context;
+ struct nvmet_port *nport = port->nport;
struct nvmet_rdma_device *ndev;
int inline_page_count;
int inline_sge_count;
@@ -928,17 +937,17 @@ nvmet_rdma_find_get_device(struct rdma_cm_id *cm_id)
if (!ndev)
goto out_err;
- inline_page_count = num_pages(port->inline_data_size);
+ inline_page_count = num_pages(nport->inline_data_size);
inline_sge_count = max(cm_id->device->attrs.max_sge_rd,
cm_id->device->attrs.max_recv_sge) - 1;
if (inline_page_count > inline_sge_count) {
pr_warn("inline_data_size %d cannot be supported by device %s. Reducing to %lu.\n",
- port->inline_data_size, cm_id->device->name,
+ nport->inline_data_size, cm_id->device->name,
inline_sge_count * PAGE_SIZE);
- port->inline_data_size = inline_sge_count * PAGE_SIZE;
+ nport->inline_data_size = inline_sge_count * PAGE_SIZE;
inline_page_count = inline_sge_count;
}
- ndev->inline_data_size = port->inline_data_size;
+ ndev->inline_data_size = nport->inline_data_size;
ndev->inline_page_count = inline_page_count;
ndev->device = cm_id->device;
kref_init(&ndev->ref);
@@ -1024,6 +1033,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
pr_err("failed to create_qp ret= %d\n", ret);
goto err_destroy_cq;
}
+ queue->qp = queue->cm_id->qp;
atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
@@ -1052,11 +1062,10 @@ err_destroy_cq:
static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
{
- struct ib_qp *qp = queue->cm_id->qp;
-
- ib_drain_qp(qp);
- rdma_destroy_id(queue->cm_id);
- ib_destroy_qp(qp);
+ ib_drain_qp(queue->qp);
+ if (queue->cm_id)
+ rdma_destroy_id(queue->cm_id);
+ ib_destroy_qp(queue->qp);
ib_free_cq(queue->cq);
}
@@ -1266,6 +1275,7 @@ static int nvmet_rdma_cm_accept(struct rdma_cm_id *cm_id,
static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
struct rdma_cm_event *event)
{
+ struct nvmet_rdma_port *port = cm_id->context;
struct nvmet_rdma_device *ndev;
struct nvmet_rdma_queue *queue;
int ret = -EINVAL;
@@ -1281,7 +1291,7 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
ret = -ENOMEM;
goto put_device;
}
- queue->port = cm_id->context;
+ queue->port = port->nport;
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
@@ -1290,9 +1300,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) {
- schedule_work(&queue->release_work);
- /* Destroying rdma_cm id is not needed here */
- return 0;
+ /*
+ * Don't destroy the cm_id in free path, as we implicitly
+ * destroy the cm_id here with non-zero ret code.
+ */
+ queue->cm_id = NULL;
+ goto free_queue;
}
mutex_lock(&nvmet_rdma_queue_mutex);
@@ -1301,6 +1314,8 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
return 0;
+free_queue:
+ nvmet_rdma_free_queue(queue);
put_device:
kref_put(&ndev->ref, nvmet_rdma_free_dev);
@@ -1406,7 +1421,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
struct nvmet_rdma_queue *queue)
{
- struct nvmet_port *port;
+ struct nvmet_rdma_port *port;
if (queue) {
/*
@@ -1425,7 +1440,7 @@ static int nvmet_rdma_device_removal(struct rdma_cm_id *cm_id,
* cm_id destroy. use atomic xchg to make sure
* we don't compete with remove_port.
*/
- if (xchg(&port->priv, NULL) != cm_id)
+ if (xchg(&port->cm_id, NULL) != cm_id)
return 0;
/*
@@ -1456,6 +1471,13 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id,
nvmet_rdma_queue_established(queue);
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
+ if (!queue) {
+ struct nvmet_rdma_port *port = cm_id->context;
+
+ schedule_delayed_work(&port->repair_work, 0);
+ break;
+ }
+ /* FALLTHROUGH */
case RDMA_CM_EVENT_DISCONNECTED:
case RDMA_CM_EVENT_TIMEWAIT_EXIT:
nvmet_rdma_queue_disconnect(queue);
@@ -1498,42 +1520,19 @@ restart:
mutex_unlock(&nvmet_rdma_queue_mutex);
}
-static int nvmet_rdma_add_port(struct nvmet_port *port)
+static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
{
- struct rdma_cm_id *cm_id;
- struct sockaddr_storage addr = { };
- __kernel_sa_family_t af;
- int ret;
+ struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
- switch (port->disc_addr.adrfam) {
- case NVMF_ADDR_FAMILY_IP4:
- af = AF_INET;
- break;
- case NVMF_ADDR_FAMILY_IP6:
- af = AF_INET6;
- break;
- default:
- pr_err("address family %d not supported\n",
- port->disc_addr.adrfam);
- return -EINVAL;
- }
-
- if (port->inline_data_size < 0) {
- port->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
- } else if (port->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
- pr_warn("inline_data_size %u is too large, reducing to %u\n",
- port->inline_data_size,
- NVMET_RDMA_MAX_INLINE_DATA_SIZE);
- port->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
- }
+ if (cm_id)
+ rdma_destroy_id(cm_id);
+}
- ret = inet_pton_with_scope(&init_net, af, port->disc_addr.traddr,
- port->disc_addr.trsvcid, &addr);
- if (ret) {
- pr_err("malformed ip/port passed: %s:%s\n",
- port->disc_addr.traddr, port->disc_addr.trsvcid);
- return ret;
- }
+static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
+{
+ struct sockaddr *addr = (struct sockaddr *)&port->addr;
+ struct rdma_cm_id *cm_id;
+ int ret;
cm_id = rdma_create_id(&init_net, nvmet_rdma_cm_handler, port,
RDMA_PS_TCP, IB_QPT_RC);
@@ -1552,23 +1551,19 @@ static int nvmet_rdma_add_port(struct nvmet_port *port)
goto out_destroy_id;
}
- ret = rdma_bind_addr(cm_id, (struct sockaddr *)&addr);
+ ret = rdma_bind_addr(cm_id, addr);
if (ret) {
- pr_err("binding CM ID to %pISpcs failed (%d)\n",
- (struct sockaddr *)&addr, ret);
+ pr_err("binding CM ID to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;
}
ret = rdma_listen(cm_id, 128);
if (ret) {
- pr_err("listening to %pISpcs failed (%d)\n",
- (struct sockaddr *)&addr, ret);
+ pr_err("listening to %pISpcs failed (%d)\n", addr, ret);
goto out_destroy_id;
}
- pr_info("enabling port %d (%pISpcs)\n",
- le16_to_cpu(port->disc_addr.portid), (struct sockaddr *)&addr);
- port->priv = cm_id;
+ port->cm_id = cm_id;
return 0;
out_destroy_id:
@@ -1576,18 +1571,92 @@ out_destroy_id:
return ret;
}
-static void nvmet_rdma_remove_port(struct nvmet_port *port)
+static void nvmet_rdma_repair_port_work(struct work_struct *w)
{
- struct rdma_cm_id *cm_id = xchg(&port->priv, NULL);
+ struct nvmet_rdma_port *port = container_of(to_delayed_work(w),
+ struct nvmet_rdma_port, repair_work);
+ int ret;
- if (cm_id)
- rdma_destroy_id(cm_id);
+ nvmet_rdma_disable_port(port);
+ ret = nvmet_rdma_enable_port(port);
+ if (ret)
+ schedule_delayed_work(&port->repair_work, 5 * HZ);
+}
+
+static int nvmet_rdma_add_port(struct nvmet_port *nport)
+{
+ struct nvmet_rdma_port *port;
+ __kernel_sa_family_t af;
+ int ret;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ nport->priv = port;
+ port->nport = nport;
+ INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work);
+
+ switch (nport->disc_addr.adrfam) {
+ case NVMF_ADDR_FAMILY_IP4:
+ af = AF_INET;
+ break;
+ case NVMF_ADDR_FAMILY_IP6:
+ af = AF_INET6;
+ break;
+ default:
+ pr_err("address family %d not supported\n",
+ nport->disc_addr.adrfam);
+ ret = -EINVAL;
+ goto out_free_port;
+ }
+
+ if (nport->inline_data_size < 0) {
+ nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE;
+ } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) {
+ pr_warn("inline_data_size %u is too large, reducing to %u\n",
+ nport->inline_data_size,
+ NVMET_RDMA_MAX_INLINE_DATA_SIZE);
+ nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE;
+ }
+
+ ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr,
+ nport->disc_addr.trsvcid, &port->addr);
+ if (ret) {
+ pr_err("malformed ip/port passed: %s:%s\n",
+ nport->disc_addr.traddr, nport->disc_addr.trsvcid);
+ goto out_free_port;
+ }
+
+ ret = nvmet_rdma_enable_port(port);
+ if (ret)
+ goto out_free_port;
+
+ pr_info("enabling port %d (%pISpcs)\n",
+ le16_to_cpu(nport->disc_addr.portid),
+ (struct sockaddr *)&port->addr);
+
+ return 0;
+
+out_free_port:
+ kfree(port);
+ return ret;
+}
+
+static void nvmet_rdma_remove_port(struct nvmet_port *nport)
+{
+ struct nvmet_rdma_port *port = nport->priv;
+
+ cancel_delayed_work_sync(&port->repair_work);
+ nvmet_rdma_disable_port(port);
+ kfree(port);
}
static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
- struct nvmet_port *port, char *traddr)
+ struct nvmet_port *nport, char *traddr)
{
- struct rdma_cm_id *cm_id = port->priv;
+ struct nvmet_rdma_port *port = nport->priv;
+ struct rdma_cm_id *cm_id = port->cm_id;
if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) {
struct nvmet_rdma_rsp *rsp =
@@ -1597,7 +1666,7 @@ static void nvmet_rdma_disc_port_addr(struct nvmet_req *req,
sprintf(traddr, "%pISc", addr);
} else {
- memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
+ memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE);
}
}
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 9d00a24277aa..f96e5eaee87e 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -243,11 +243,6 @@ static irqreturn_t dummy_irq2_handler(int _, void *dev)
return IRQ_HANDLED;
}
-static struct irqaction irq2_action = {
- .handler = dummy_irq2_handler,
- .name = "cascade",
-};
-
static void init_eisa_pic(void)
{
unsigned long flags;
@@ -335,7 +330,8 @@ static int __init eisa_probe(struct parisc_device *dev)
}
/* Reserve IRQ2 */
- setup_irq(2, &irq2_action);
+ if (request_irq(2, dummy_irq2_handler, 0, "cascade", NULL))
+ pr_err("Failed to request irq 2 (cascade)\n");
for (i = 0; i < 16; i++) {
irq_set_chip_and_handler(i, &eisa_interrupt_type,
handle_simple_irq);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index 3ef0bb281e7c..390e92f2d8d1 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -366,6 +366,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
return 0;
}
+EXPORT_SYMBOL_GPL(pci_enable_pasid);
/**
* pci_disable_pasid - Disable the PASID capability
@@ -390,6 +391,7 @@ void pci_disable_pasid(struct pci_dev *pdev)
pdev->pasid_enabled = 0;
}
+EXPORT_SYMBOL_GPL(pci_disable_pasid);
/**
* pci_restore_pasid_state - Restore PASID capabilities
@@ -441,6 +443,7 @@ int pci_pasid_features(struct pci_dev *pdev)
return supported;
}
+EXPORT_SYMBOL_GPL(pci_pasid_features);
#define PASID_NUMBER_SHIFT 8
#define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT)
@@ -469,4 +472,5 @@ int pci_max_pasids(struct pci_dev *pdev)
return (1 << supported);
}
+EXPORT_SYMBOL_GPL(pci_max_pasids);
#endif /* CONFIG_PCI_PASID */
diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h
index 33c9b6ea7364..fb9b17fa0fb5 100644
--- a/drivers/pcmcia/cs_internal.h
+++ b/drivers/pcmcia/cs_internal.h
@@ -40,7 +40,7 @@ struct cis_cache_entry {
unsigned int addr;
unsigned int len;
unsigned int attr;
- unsigned char cache[0];
+ unsigned char cache[];
};
struct pccard_resource_ops {
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 0a04eb04f3a2..d3ef5534991e 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -329,7 +329,7 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
static struct platform_driver omap_cf_driver = {
.driver = {
- .name = (char *) driver_name,
+ .name = driver_name,
},
.remove = __exit_p(omap_cf_remove),
};
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 9e6922c08ef6..3b05760e69d6 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -1076,7 +1076,7 @@ static ssize_t show_io_db(struct device *dev,
for (p = data->io_db.next; p != &data->io_db; p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1),
"0x%08lx - 0x%08lx\n",
((unsigned long) p->base),
((unsigned long) p->base + p->num - 1));
@@ -1133,7 +1133,7 @@ static ssize_t show_mem_db(struct device *dev,
p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1),
"0x%08lx - 0x%08lx\n",
((unsigned long) p->base),
((unsigned long) p->base + p->num - 1));
@@ -1142,7 +1142,7 @@ static ssize_t show_mem_db(struct device *dev,
for (p = data->mem_db.next; p != &data->mem_db; p = p->next) {
if (ret > (PAGE_SIZE - 10))
continue;
- ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1),
+ ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1),
"0x%08lx - 0x%08lx\n",
((unsigned long) p->base),
((unsigned long) p->base + p->num - 1));
diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c
index e2e8729afd9d..784ada5b8c4f 100644
--- a/drivers/pcmcia/sa1100_simpad.c
+++ b/drivers/pcmcia/sa1100_simpad.c
@@ -14,7 +14,7 @@
#include <asm/mach-types.h>
#include <mach/simpad.h>
#include "sa1100_generic.h"
-
+
static int simpad_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
@@ -66,7 +66,7 @@ simpad_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
break;
- case 33:
+ case 33:
simpad_clear_cs3_bit(VCC_3V_EN|EN1);
simpad_set_cs3_bit(VCC_5V_EN|EN0);
break;
@@ -95,7 +95,7 @@ static void simpad_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
simpad_set_cs3_bit(PCMCIA_RESET);
}
-static struct pcmcia_low_level simpad_pcmcia_ops = {
+static struct pcmcia_low_level simpad_pcmcia_ops = {
.owner = THIS_MODULE,
.hw_init = simpad_pcmcia_hw_init,
.hw_shutdown = simpad_pcmcia_hw_shutdown,
diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h
index b7f993f1bbd0..222e81c79365 100644
--- a/drivers/pcmcia/soc_common.h
+++ b/drivers/pcmcia/soc_common.h
@@ -88,7 +88,7 @@ struct soc_pcmcia_socket {
struct skt_dev_info {
int nskt;
- struct soc_pcmcia_socket skt[0];
+ struct soc_pcmcia_socket skt[];
};
struct pcmcia_state {
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 49b1c6a1bdbe..bf6529b0b5b0 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -180,12 +180,12 @@ static ssize_t show_yenta_registers(struct device *yentadev, struct device_attri
for (i = 0; i < 0x24; i += 4) {
unsigned val;
if (!(i & 15))
- offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
val = cb_readl(socket, i);
- offset += snprintf(buf + offset, PAGE_SIZE - offset, " %08x", val);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, " %08x", val);
}
- offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n\nExCA registers:");
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n\nExCA registers:");
for (i = 0; i < 0x45; i++) {
unsigned char val;
if (!(i & 7)) {
@@ -193,10 +193,10 @@ static ssize_t show_yenta_registers(struct device *yentadev, struct device_attri
memcpy(buf + offset, " -", 2);
offset += 2;
} else
- offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i);
}
val = exca_readb(socket, i);
- offset += snprintf(buf + offset, PAGE_SIZE - offset, " %02x", val);
+ offset += scnprintf(buf + offset, PAGE_SIZE - offset, " %02x", val);
}
buf[offset++] = '\n';
return offset;
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index 5f57282a28da..03ea5129ed0c 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -7,7 +7,7 @@ config MFD_CROS_EC
tristate "Platform support for Chrome hardware (transitional)"
select CHROME_PLATFORMS
select CROS_EC
- select CONFIG_MFD_CROS_EC_DEV
+ select MFD_CROS_EC_DEV
depends on X86 || ARM || ARM64 || COMPILE_TEST
help
This is a transitional Kconfig option and will be removed after
@@ -214,6 +214,17 @@ config CROS_EC_SYSFS
To compile this driver as a module, choose M here: the
module will be called cros_ec_sysfs.
+config CROS_EC_TYPEC
+ tristate "ChromeOS EC Type-C Connector Control"
+ depends on MFD_CROS_EC_DEV && TYPEC
+ default MFD_CROS_EC_DEV
+ help
+ If you say Y here, you get support for accessing Type C connector
+ information from the Chrome OS EC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called cros_ec_typec.
+
config CROS_USBPD_LOGGER
tristate "Logging driver for USB PD charger"
depends on CHARGER_CROS_USBPD
@@ -226,6 +237,20 @@ config CROS_USBPD_LOGGER
To compile this driver as a module, choose M here: the
module will be called cros_usbpd_logger.
+config CROS_USBPD_NOTIFY
+ tristate "ChromeOS Type-C power delivery event notifier"
+ depends on MFD_CROS_EC_DEV
+ default MFD_CROS_EC_DEV
+ help
+ If you say Y here, you get support for Type-C PD event notifications
+ from the ChromeOS EC. On ACPI platorms this driver will bind to the
+ GOOG0003 ACPI device, and on platforms which don't have this device it
+ will get initialized on ECs which support the feature
+ EC_FEATURE_USB_PD.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cros_usbpd_notify.
+
source "drivers/platform/chrome/wilco_ec/Kconfig"
endif # CHROMEOS_PLATFORMS
diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile
index aacd5920d8a1..41baccba033f 100644
--- a/drivers/platform/chrome/Makefile
+++ b/drivers/platform/chrome/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_CROS_EC_ISHTP) += cros_ec_ishtp.o
obj-$(CONFIG_CROS_EC_RPMSG) += cros_ec_rpmsg.o
obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o
cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_mec.o
+obj-$(CONFIG_CROS_EC_TYPEC) += cros_ec_typec.o
obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o
obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o cros_ec_trace.o
obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o
@@ -19,8 +20,10 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o
obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o
obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o
obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o
-obj-$(CONFIG_CROS_EC_SENSORHUB) += cros_ec_sensorhub.o
+cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o
+obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o
obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o
obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o
+obj-$(CONFIG_CROS_USBPD_NOTIFY) += cros_usbpd_notify.o
obj-$(CONFIG_WILCO_EC) += wilco_ec/
diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c
index 4f3651fcd9fe..472a03daa869 100644
--- a/drivers/platform/chrome/chromeos_laptop.c
+++ b/drivers/platform/chrome/chromeos_laptop.c
@@ -103,7 +103,7 @@ chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter,
pr_debug("%d-%02x is probed at %02x\n",
adapter->nr, info->addr, dummy->addr);
i2c_unregister_device(dummy);
- client = i2c_new_device(adapter, info);
+ client = i2c_new_client_device(adapter, info);
}
}
diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c
index 6fc8f2c3ac51..3104680b7485 100644
--- a/drivers/platform/chrome/cros_ec.c
+++ b/drivers/platform/chrome/cros_ec.c
@@ -120,7 +120,7 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
buf.msg.command = EC_CMD_HOST_SLEEP_EVENT;
- ret = cros_ec_cmd_xfer(ec_dev, &buf.msg);
+ ret = cros_ec_cmd_xfer_status(ec_dev, &buf.msg);
/* For now, report failure to transition to S0ix with a warning. */
if (ret >= 0 && ec_dev->host_sleep_v1 &&
@@ -138,6 +138,24 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event)
return ret;
}
+static int cros_ec_ready_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_device *ec_dev = container_of(nb, struct cros_ec_device,
+ notifier_ready);
+ u32 host_event = cros_ec_get_host_event(ec_dev);
+
+ if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_INTERFACE_READY)) {
+ mutex_lock(&ec_dev->lock);
+ cros_ec_query_all(ec_dev);
+ mutex_unlock(&ec_dev->lock);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
/**
* cros_ec_register() - Register a new ChromeOS EC, using the provided info.
* @ec_dev: Device to register.
@@ -237,6 +255,18 @@ int cros_ec_register(struct cros_ec_device *ec_dev)
dev_dbg(ec_dev->dev, "Error %d clearing sleep event to ec",
err);
+ if (ec_dev->mkbp_event_supported) {
+ /*
+ * Register the notifier for EC_HOST_EVENT_INTERFACE_READY
+ * event.
+ */
+ ec_dev->notifier_ready.notifier_call = cros_ec_ready_event;
+ err = blocking_notifier_chain_register(&ec_dev->event_notifier,
+ &ec_dev->notifier_ready);
+ if (err)
+ return err;
+ }
+
dev_info(dev, "Chrome EC device registered\n");
return 0;
diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c
index c65e70bc168d..e0bce869c49a 100644
--- a/drivers/platform/chrome/cros_ec_chardev.c
+++ b/drivers/platform/chrome/cros_ec_chardev.c
@@ -48,7 +48,7 @@ struct ec_event {
struct list_head node;
size_t size;
u8 event_type;
- u8 data[0];
+ u8 data[];
};
static int ec_get_version(struct cros_ec_dev *ec, char *str, int maxlen)
@@ -301,7 +301,7 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg)
}
s_cmd->command += ec->cmd_offset;
- ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, s_cmd);
/* Only copy data to userland if data was received. */
if (ret < 0)
goto exit;
diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c
index b4c110c5fee0..b59180bff5a3 100644
--- a/drivers/platform/chrome/cros_ec_lightbar.c
+++ b/drivers/platform/chrome/cros_ec_lightbar.c
@@ -116,7 +116,7 @@ static int get_lightbar_version(struct cros_ec_dev *ec,
param = (struct ec_params_lightbar *)msg->data;
param->cmd = LIGHTBAR_CMD_VERSION;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
ret = 0;
goto exit;
@@ -193,15 +193,10 @@ static ssize_t brightness_store(struct device *dev,
if (ret)
goto exit;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
- if (msg->result != EC_RES_SUCCESS) {
- ret = -EINVAL;
- goto exit;
- }
-
ret = count;
exit:
kfree(msg);
@@ -258,13 +253,10 @@ static ssize_t led_rgb_store(struct device *dev, struct device_attribute *attr,
goto exit;
}
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
- if (msg->result != EC_RES_SUCCESS)
- goto exit;
-
i = 0;
ok = 1;
}
@@ -305,14 +297,13 @@ static ssize_t sequence_show(struct device *dev,
if (ret)
goto exit;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
- if (ret < 0)
- goto exit;
-
- if (msg->result != EC_RES_SUCCESS) {
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret == -EPROTO) {
ret = scnprintf(buf, PAGE_SIZE,
"ERROR: EC returned %d\n", msg->result);
goto exit;
+ } else if (ret < 0) {
+ goto exit;
}
resp = (struct ec_response_lightbar *)msg->data;
@@ -344,13 +335,10 @@ static int lb_send_empty_cmd(struct cros_ec_dev *ec, uint8_t cmd)
if (ret)
goto error;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto error;
- if (msg->result != EC_RES_SUCCESS) {
- ret = -EINVAL;
- goto error;
- }
+
ret = 0;
error:
kfree(msg);
@@ -377,13 +365,10 @@ static int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable)
if (ret)
goto error;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto error;
- if (msg->result != EC_RES_SUCCESS) {
- ret = -EINVAL;
- goto error;
- }
+
ret = 0;
error:
kfree(msg);
@@ -425,15 +410,10 @@ static ssize_t sequence_store(struct device *dev, struct device_attribute *attr,
if (ret)
goto exit;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
- if (msg->result != EC_RES_SUCCESS) {
- ret = -EINVAL;
- goto exit;
- }
-
ret = count;
exit:
kfree(msg);
@@ -487,13 +467,9 @@ static ssize_t program_store(struct device *dev, struct device_attribute *attr,
*/
msg->outsize = count + extra_bytes;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0)
goto exit;
- if (msg->result != EC_RES_SUCCESS) {
- ret = -EINVAL;
- goto exit;
- }
ret = count;
exit:
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 3cfa643f1d07..3e745e0fe092 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -553,7 +553,10 @@ EXPORT_SYMBOL(cros_ec_cmd_xfer);
* replied with success status. It's not necessary to check msg->result when
* using this function.
*
- * Return: The number of bytes transferred on success or negative error code.
+ * Return:
+ * >=0 - The number of bytes transferred
+ * -ENOTSUPP - Operation not supported
+ * -EPROTO - Protocol error
*/
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg)
@@ -563,6 +566,10 @@ int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
ret = cros_ec_cmd_xfer(ec_dev, msg);
if (ret < 0) {
dev_err(ec_dev->dev, "Command xfer error (err:%d)\n", ret);
+ } else if (msg->result == EC_RES_INVALID_VERSION) {
+ dev_dbg(ec_dev->dev, "Command invalid version (err:%d)\n",
+ msg->result);
+ return -ENOTSUPP;
} else if (msg->result != EC_RES_SUCCESS) {
dev_dbg(ec_dev->dev, "Command result (err: %d)\n", msg->result);
return -EPROTO;
diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c
index dbc3f5523b83..7e8629e3db74 100644
--- a/drivers/platform/chrome/cros_ec_rpmsg.c
+++ b/drivers/platform/chrome/cros_ec_rpmsg.c
@@ -44,6 +44,8 @@ struct cros_ec_rpmsg {
struct completion xfer_ack;
struct work_struct host_event_work;
struct rpmsg_endpoint *ept;
+ bool has_pending_host_event;
+ bool probe_done;
};
/**
@@ -177,7 +179,14 @@ static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
memcpy(ec_dev->din, resp->data, len);
complete(&ec_rpmsg->xfer_ack);
} else if (resp->type == HOST_EVENT_MARK) {
- schedule_work(&ec_rpmsg->host_event_work);
+ /*
+ * If the host event is sent before cros_ec_register is
+ * finished, queue the host event.
+ */
+ if (ec_rpmsg->probe_done)
+ schedule_work(&ec_rpmsg->host_event_work);
+ else
+ ec_rpmsg->has_pending_host_event = true;
} else {
dev_warn(ec_dev->dev, "rpmsg received invalid type = %d",
resp->type);
@@ -240,6 +249,11 @@ static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev)
return ret;
}
+ ec_rpmsg->probe_done = true;
+
+ if (ec_rpmsg->has_pending_host_event)
+ schedule_work(&ec_rpmsg->host_event_work);
+
return 0;
}
diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c
index 79fefd3bb0fa..b7f2c00db5e1 100644
--- a/drivers/platform/chrome/cros_ec_sensorhub.c
+++ b/drivers/platform/chrome/cros_ec_sensorhub.c
@@ -50,10 +50,8 @@ static int cros_ec_sensorhub_register(struct device *dev,
struct cros_ec_sensorhub *sensorhub)
{
int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 };
+ struct cros_ec_command *msg = sensorhub->msg;
struct cros_ec_dev *ec = sensorhub->ec;
- struct ec_params_motion_sense *params;
- struct ec_response_motion_sense *resp;
- struct cros_ec_command *msg;
int ret, i, sensor_num;
char *name;
@@ -65,27 +63,19 @@ static int cros_ec_sensorhub_register(struct device *dev,
return sensor_num;
}
+ sensorhub->sensor_num = sensor_num;
if (sensor_num == 0) {
dev_err(dev, "Zero sensors reported.\n");
return -EINVAL;
}
- /* Prepare a message to send INFO command to each sensor. */
- msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*resp)),
- GFP_KERNEL);
- if (!msg)
- return -ENOMEM;
-
msg->version = 1;
- msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
- msg->outsize = sizeof(*params);
- msg->insize = sizeof(*resp);
- params = (struct ec_params_motion_sense *)msg->data;
- resp = (struct ec_response_motion_sense *)msg->data;
+ msg->insize = sizeof(struct ec_response_motion_sense);
+ msg->outsize = sizeof(struct ec_params_motion_sense);
for (i = 0; i < sensor_num; i++) {
- params->cmd = MOTIONSENSE_CMD_INFO;
- params->info.sensor_num = i;
+ sensorhub->params->cmd = MOTIONSENSE_CMD_INFO;
+ sensorhub->params->info.sensor_num = i;
ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
if (ret < 0) {
@@ -94,7 +84,7 @@ static int cros_ec_sensorhub_register(struct device *dev,
continue;
}
- switch (resp->info.type) {
+ switch (sensorhub->resp->info.type) {
case MOTIONSENSE_TYPE_ACCEL:
name = "cros-ec-accel";
break;
@@ -117,15 +107,16 @@ static int cros_ec_sensorhub_register(struct device *dev,
name = "cros-ec-activity";
break;
default:
- dev_warn(dev, "unknown type %d\n", resp->info.type);
+ dev_warn(dev, "unknown type %d\n",
+ sensorhub->resp->info.type);
continue;
}
ret = cros_ec_sensorhub_allocate_sensor(dev, name, i);
if (ret)
- goto error;
+ return ret;
- sensor_type[resp->info.type]++;
+ sensor_type[sensorhub->resp->info.type]++;
}
if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2)
@@ -137,29 +128,41 @@ static int cros_ec_sensorhub_register(struct device *dev,
"cros-ec-lid-angle",
0);
if (ret)
- goto error;
+ return ret;
}
- kfree(msg);
return 0;
-
-error:
- kfree(msg);
- return ret;
}
static int cros_ec_sensorhub_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ec = dev_get_drvdata(dev->parent);
struct cros_ec_sensorhub *data;
+ struct cros_ec_command *msg;
int ret;
int i;
+ msg = devm_kzalloc(dev, sizeof(struct cros_ec_command) +
+ max((u16)sizeof(struct ec_params_motion_sense),
+ ec->ec_dev->max_response), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset;
+
data = devm_kzalloc(dev, sizeof(struct cros_ec_sensorhub), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->ec = dev_get_drvdata(dev->parent);
+ mutex_init(&data->cmd_lock);
+
+ data->dev = dev;
+ data->ec = ec;
+ data->msg = msg;
+ data->params = (struct ec_params_motion_sense *)msg->data;
+ data->resp = (struct ec_response_motion_sense *)msg->data;
+
dev_set_drvdata(dev, data);
/* Check whether this EC is a sensor hub. */
@@ -172,7 +175,8 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev)
* If the device has sensors but does not claim to
* be a sensor hub, we are in legacy mode.
*/
- for (i = 0; i < 2; i++) {
+ data->sensor_num = 2;
+ for (i = 0; i < data->sensor_num; i++) {
ret = cros_ec_sensorhub_allocate_sensor(dev,
"cros-ec-accel-legacy", i);
if (ret)
@@ -180,12 +184,63 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev)
}
}
+ /*
+ * If the EC does not have a FIFO, the sensors will query their data
+ * themselves via sysfs or a software trigger.
+ */
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) {
+ ret = cros_ec_sensorhub_ring_add(data);
+ if (ret)
+ return ret;
+ /*
+ * The msg and its data is not under the control of the ring
+ * handler.
+ */
+ return devm_add_action_or_reset(dev,
+ cros_ec_sensorhub_ring_remove,
+ data);
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/*
+ * When the EC is suspending, we must stop sending interrupt,
+ * we may use the same interrupt line for waking up the device.
+ * Tell the EC to stop sending non-interrupt event on the iio ring.
+ */
+static int cros_ec_sensorhub_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct cros_ec_sensorhub *sensorhub = platform_get_drvdata(pdev);
+ struct cros_ec_dev *ec = sensorhub->ec;
+
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO))
+ return cros_ec_sensorhub_ring_fifo_enable(sensorhub, false);
return 0;
}
+static int cros_ec_sensorhub_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct cros_ec_sensorhub *sensorhub = platform_get_drvdata(pdev);
+ struct cros_ec_dev *ec = sensorhub->ec;
+
+ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO))
+ return cros_ec_sensorhub_ring_fifo_enable(sensorhub, true);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cros_ec_sensorhub_pm_ops,
+ cros_ec_sensorhub_suspend,
+ cros_ec_sensorhub_resume);
+
static struct platform_driver cros_ec_sensorhub_driver = {
.driver = {
.name = DRV_NAME,
+ .pm = &cros_ec_sensorhub_pm_ops,
},
.probe = cros_ec_sensorhub_probe,
};
diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
new file mode 100644
index 000000000000..230e6cf3da2f
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c
@@ -0,0 +1,1046 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for Chrome OS EC Sensor hub FIFO.
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/iio/iio.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_ec_sensorhub.h>
+#include <linux/platform_device.h>
+#include <linux/sort.h>
+#include <linux/slab.h>
+
+/* Precision of fixed point for the m values from the filter */
+#define M_PRECISION BIT(23)
+
+/* Only activate the filter once we have at least this many elements. */
+#define TS_HISTORY_THRESHOLD 8
+
+/*
+ * If we don't have any history entries for this long, empty the filter to
+ * make sure there are no big discontinuities.
+ */
+#define TS_HISTORY_BORED_US 500000
+
+/* To measure by how much the filter is overshooting, if it happens. */
+#define FUTURE_TS_ANALYTICS_COUNT_MAX 100
+
+static inline int
+cros_sensorhub_send_sample(struct cros_ec_sensorhub *sensorhub,
+ struct cros_ec_sensors_ring_sample *sample)
+{
+ cros_ec_sensorhub_push_data_cb_t cb;
+ int id = sample->sensor_id;
+ struct iio_dev *indio_dev;
+
+ if (id > sensorhub->sensor_num)
+ return -EINVAL;
+
+ cb = sensorhub->push_data[id].push_data_cb;
+ if (!cb)
+ return 0;
+
+ indio_dev = sensorhub->push_data[id].indio_dev;
+
+ if (sample->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH)
+ return 0;
+
+ return cb(indio_dev, sample->vector, sample->timestamp);
+}
+
+/**
+ * cros_ec_sensorhub_register_push_data() - register the callback to the hub.
+ *
+ * @sensorhub : Sensor Hub object
+ * @sensor_num : The sensor the caller is interested in.
+ * @indio_dev : The iio device to use when a sample arrives.
+ * @cb : The callback to call when a sample arrives.
+ *
+ * The callback cb will be used by cros_ec_sensorhub_ring to distribute events
+ * from the EC.
+ *
+ * Return: 0 when callback is registered.
+ * EINVAL is the sensor number is invalid or the slot already used.
+ */
+int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub,
+ u8 sensor_num,
+ struct iio_dev *indio_dev,
+ cros_ec_sensorhub_push_data_cb_t cb)
+{
+ if (sensor_num >= sensorhub->sensor_num)
+ return -EINVAL;
+ if (sensorhub->push_data[sensor_num].indio_dev)
+ return -EINVAL;
+
+ sensorhub->push_data[sensor_num].indio_dev = indio_dev;
+ sensorhub->push_data[sensor_num].push_data_cb = cb;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensorhub_register_push_data);
+
+void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub,
+ u8 sensor_num)
+{
+ sensorhub->push_data[sensor_num].indio_dev = NULL;
+ sensorhub->push_data[sensor_num].push_data_cb = NULL;
+}
+EXPORT_SYMBOL_GPL(cros_ec_sensorhub_unregister_push_data);
+
+/**
+ * cros_ec_sensorhub_ring_fifo_enable() - Enable or disable interrupt generation
+ * for FIFO events.
+ * @sensorhub: Sensor Hub object
+ * @on: true when events are requested.
+ *
+ * To be called before sleeping or when noone is listening.
+ * Return: 0 on success, or an error when we can not communicate with the EC.
+ *
+ */
+int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub,
+ bool on)
+{
+ int ret, i;
+
+ mutex_lock(&sensorhub->cmd_lock);
+ if (sensorhub->tight_timestamps)
+ for (i = 0; i < sensorhub->sensor_num; i++)
+ sensorhub->batch_state[i].last_len = 0;
+
+ sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INT_ENABLE;
+ sensorhub->params->fifo_int_enable.enable = on;
+
+ sensorhub->msg->outsize = sizeof(struct ec_params_motion_sense);
+ sensorhub->msg->insize = sizeof(struct ec_response_motion_sense);
+
+ ret = cros_ec_cmd_xfer_status(sensorhub->ec->ec_dev, sensorhub->msg);
+ mutex_unlock(&sensorhub->cmd_lock);
+
+ /* We expect to receive a payload of 4 bytes, ignore. */
+ if (ret > 0)
+ ret = 0;
+
+ return ret;
+}
+
+static int cros_ec_sensor_ring_median_cmp(const void *pv1, const void *pv2)
+{
+ s64 v1 = *(s64 *)pv1;
+ s64 v2 = *(s64 *)pv2;
+
+ if (v1 > v2)
+ return 1;
+ else if (v1 < v2)
+ return -1;
+ else
+ return 0;
+}
+
+/*
+ * cros_ec_sensor_ring_median: Gets median of an array of numbers
+ *
+ * For now it's implemented using an inefficient > O(n) sort then return
+ * the middle element. A more optimal method would be something like
+ * quickselect, but given that n = 64 we can probably live with it in the
+ * name of clarity.
+ *
+ * Warning: the input array gets modified (sorted)!
+ */
+static s64 cros_ec_sensor_ring_median(s64 *array, size_t length)
+{
+ sort(array, length, sizeof(s64), cros_ec_sensor_ring_median_cmp, NULL);
+ return array[length / 2];
+}
+
+/*
+ * IRQ Timestamp Filtering
+ *
+ * Lower down in cros_ec_sensor_ring_process_event(), for each sensor event
+ * we have to calculate it's timestamp in the AP timebase. There are 3 time
+ * points:
+ * a - EC timebase, sensor event
+ * b - EC timebase, IRQ
+ * c - AP timebase, IRQ
+ * a' - what we want: sensor even in AP timebase
+ *
+ * While a and b are recorded at accurate times (due to the EC real time
+ * nature); c is pretty untrustworthy, even though it's recorded the
+ * first thing in ec_irq_handler(). There is a very good change we'll get
+ * added lantency due to:
+ * other irqs
+ * ddrfreq
+ * cpuidle
+ *
+ * Normally a' = c - b + a, but if we do that naive math any jitter in c
+ * will get coupled in a', which we don't want. We want a function
+ * a' = cros_ec_sensor_ring_ts_filter(a) which will filter out outliers in c.
+ *
+ * Think of a graph of AP time(b) on the y axis vs EC time(c) on the x axis.
+ * The slope of the line won't be exactly 1, there will be some clock drift
+ * between the 2 chips for various reasons (mechanical stress, temperature,
+ * voltage). We need to extrapolate values for a future x, without trusting
+ * recent y values too much.
+ *
+ * We use a median filter for the slope, then another median filter for the
+ * y-intercept to calculate this function:
+ * dx[n] = x[n-1] - x[n]
+ * dy[n] = x[n-1] - x[n]
+ * m[n] = dy[n] / dx[n]
+ * median_m = median(m[n-k:n])
+ * error[i] = y[n-i] - median_m * x[n-i]
+ * median_error = median(error[:k])
+ * predicted_y = median_m * x + median_error
+ *
+ * Implementation differences from above:
+ * - Redefined y to be actually c - b, this gives us a lot more precision
+ * to do the math. (c-b)/b variations are more obvious than c/b variations.
+ * - Since we don't have floating point, any operations involving slope are
+ * done using fixed point math (*M_PRECISION)
+ * - Since x and y grow with time, we keep zeroing the graph (relative to
+ * the last sample), this way math involving *x[n-i] will not overflow
+ * - EC timestamps are kept in us, it improves the slope calculation precision
+ */
+
+/**
+ * cros_ec_sensor_ring_ts_filter_update() - Update filter history.
+ *
+ * @state: Filter information.
+ * @b: IRQ timestamp, EC timebase (us)
+ * @c: IRQ timestamp, AP timebase (ns)
+ *
+ * Given a new IRQ timestamp pair (EC and AP timebases), add it to the filter
+ * history.
+ */
+static void
+cros_ec_sensor_ring_ts_filter_update(struct cros_ec_sensors_ts_filter_state
+ *state,
+ s64 b, s64 c)
+{
+ s64 x, y;
+ s64 dx, dy;
+ s64 m; /* stored as *M_PRECISION */
+ s64 *m_history_copy = state->temp_buf;
+ s64 *error = state->temp_buf;
+ int i;
+
+ /* we trust b the most, that'll be our independent variable */
+ x = b;
+ /* y is the offset between AP and EC times, in ns */
+ y = c - b * 1000;
+
+ dx = (state->x_history[0] + state->x_offset) - x;
+ if (dx == 0)
+ return; /* we already have this irq in the history */
+ dy = (state->y_history[0] + state->y_offset) - y;
+ m = div64_s64(dy * M_PRECISION, dx);
+
+ /* Empty filter if we haven't seen any action in a while. */
+ if (-dx > TS_HISTORY_BORED_US)
+ state->history_len = 0;
+
+ /* Move everything over, also update offset to all absolute coords .*/
+ for (i = state->history_len - 1; i >= 1; i--) {
+ state->x_history[i] = state->x_history[i - 1] + dx;
+ state->y_history[i] = state->y_history[i - 1] + dy;
+
+ state->m_history[i] = state->m_history[i - 1];
+ /*
+ * Also use the same loop to copy m_history for future
+ * median extraction.
+ */
+ m_history_copy[i] = state->m_history[i - 1];
+ }
+
+ /* Store the x and y, but remember offset is actually last sample. */
+ state->x_offset = x;
+ state->y_offset = y;
+ state->x_history[0] = 0;
+ state->y_history[0] = 0;
+
+ state->m_history[0] = m;
+ m_history_copy[0] = m;
+
+ if (state->history_len < CROS_EC_SENSORHUB_TS_HISTORY_SIZE)
+ state->history_len++;
+
+ /* Precalculate things for the filter. */
+ if (state->history_len > TS_HISTORY_THRESHOLD) {
+ state->median_m =
+ cros_ec_sensor_ring_median(m_history_copy,
+ state->history_len - 1);
+
+ /*
+ * Calculate y-intercepts as if m_median is the slope and
+ * points in the history are on the line. median_error will
+ * still be in the offset coordinate system.
+ */
+ for (i = 0; i < state->history_len; i++)
+ error[i] = state->y_history[i] -
+ div_s64(state->median_m * state->x_history[i],
+ M_PRECISION);
+ state->median_error =
+ cros_ec_sensor_ring_median(error, state->history_len);
+ } else {
+ state->median_m = 0;
+ state->median_error = 0;
+ }
+}
+
+/**
+ * cros_ec_sensor_ring_ts_filter() - Translate EC timebase timestamp to AP
+ * timebase
+ *
+ * @state: filter information.
+ * @x: any ec timestamp (us):
+ *
+ * cros_ec_sensor_ring_ts_filter(a) => a' event timestamp, AP timebase
+ * cros_ec_sensor_ring_ts_filter(b) => calculated timestamp when the EC IRQ
+ * should have happened on the AP, with low jitter
+ *
+ * Note: The filter will only activate once state->history_len goes
+ * over TS_HISTORY_THRESHOLD. Otherwise it'll just do the naive c - b + a
+ * transform.
+ *
+ * How to derive the formula, starting from:
+ * f(x) = median_m * x + median_error
+ * That's the calculated AP - EC offset (at the x point in time)
+ * Undo the coordinate system transform:
+ * f(x) = median_m * (x - x_offset) + median_error + y_offset
+ * Remember to undo the "y = c - b * 1000" modification:
+ * f(x) = median_m * (x - x_offset) + median_error + y_offset + x * 1000
+ *
+ * Return: timestamp in AP timebase (ns)
+ */
+static s64
+cros_ec_sensor_ring_ts_filter(struct cros_ec_sensors_ts_filter_state *state,
+ s64 x)
+{
+ return div_s64(state->median_m * (x - state->x_offset), M_PRECISION)
+ + state->median_error + state->y_offset + x * 1000;
+}
+
+/*
+ * Since a and b were originally 32 bit values from the EC,
+ * they overflow relatively often, casting is not enough, so we need to
+ * add an offset.
+ */
+static void
+cros_ec_sensor_ring_fix_overflow(s64 *ts,
+ const s64 overflow_period,
+ struct cros_ec_sensors_ec_overflow_state
+ *state)
+{
+ s64 adjust;
+
+ *ts += state->offset;
+ if (abs(state->last - *ts) > (overflow_period / 2)) {
+ adjust = state->last > *ts ? overflow_period : -overflow_period;
+ state->offset += adjust;
+ *ts += adjust;
+ }
+ state->last = *ts;
+}
+
+static void
+cros_ec_sensor_ring_check_for_past_timestamp(struct cros_ec_sensorhub
+ *sensorhub,
+ struct cros_ec_sensors_ring_sample
+ *sample)
+{
+ const u8 sensor_id = sample->sensor_id;
+
+ /* If this event is earlier than one we saw before... */
+ if (sensorhub->batch_state[sensor_id].newest_sensor_event >
+ sample->timestamp)
+ /* mark it for spreading. */
+ sample->timestamp =
+ sensorhub->batch_state[sensor_id].last_ts;
+ else
+ sensorhub->batch_state[sensor_id].newest_sensor_event =
+ sample->timestamp;
+}
+
+/**
+ * cros_ec_sensor_ring_process_event() - Process one EC FIFO event
+ *
+ * @sensorhub: Sensor Hub object.
+ * @fifo_info: FIFO information from the EC (includes b point, EC timebase).
+ * @fifo_timestamp: EC IRQ, kernel timebase (aka c).
+ * @current_timestamp: calculated event timestamp, kernel timebase (aka a').
+ * @in: incoming FIFO event from EC (includes a point, EC timebase).
+ * @out: outgoing event to user space (includes a').
+ *
+ * Process one EC event, add it in the ring if necessary.
+ *
+ * Return: true if out event has been populated.
+ */
+static bool
+cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub,
+ const struct ec_response_motion_sense_fifo_info
+ *fifo_info,
+ const ktime_t fifo_timestamp,
+ ktime_t *current_timestamp,
+ struct ec_response_motion_sensor_data *in,
+ struct cros_ec_sensors_ring_sample *out)
+{
+ const s64 now = cros_ec_get_time_ns();
+ int axis, async_flags;
+
+ /* Do not populate the filter based on asynchronous events. */
+ async_flags = in->flags &
+ (MOTIONSENSE_SENSOR_FLAG_ODR | MOTIONSENSE_SENSOR_FLAG_FLUSH);
+
+ if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP && !async_flags) {
+ s64 a = in->timestamp;
+ s64 b = fifo_info->timestamp;
+ s64 c = fifo_timestamp;
+
+ cros_ec_sensor_ring_fix_overflow(&a, 1LL << 32,
+ &sensorhub->overflow_a);
+ cros_ec_sensor_ring_fix_overflow(&b, 1LL << 32,
+ &sensorhub->overflow_b);
+
+ if (sensorhub->tight_timestamps) {
+ cros_ec_sensor_ring_ts_filter_update(
+ &sensorhub->filter, b, c);
+ *current_timestamp = cros_ec_sensor_ring_ts_filter(
+ &sensorhub->filter, a);
+ } else {
+ s64 new_timestamp;
+
+ /*
+ * Disable filtering since we might add more jitter
+ * if b is in a random point in time.
+ */
+ new_timestamp = fifo_timestamp -
+ fifo_info->timestamp * 1000 +
+ in->timestamp * 1000;
+ /*
+ * The timestamp can be stale if we had to use the fifo
+ * info timestamp.
+ */
+ if (new_timestamp - *current_timestamp > 0)
+ *current_timestamp = new_timestamp;
+ }
+ }
+
+ if (in->flags & MOTIONSENSE_SENSOR_FLAG_ODR) {
+ if (sensorhub->tight_timestamps) {
+ sensorhub->batch_state[in->sensor_num].last_len = 0;
+ sensorhub->batch_state[in->sensor_num].penul_len = 0;
+ }
+ /*
+ * ODR change is only useful for the sensor_ring, it does not
+ * convey information to clients.
+ */
+ return false;
+ }
+
+ if (in->flags & MOTIONSENSE_SENSOR_FLAG_FLUSH) {
+ out->sensor_id = in->sensor_num;
+ out->timestamp = *current_timestamp;
+ out->flag = in->flags;
+ if (sensorhub->tight_timestamps)
+ sensorhub->batch_state[out->sensor_id].last_len = 0;
+ /*
+ * No other payload information provided with
+ * flush ack.
+ */
+ return true;
+ }
+
+ if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP)
+ /* If we just have a timestamp, skip this entry. */
+ return false;
+
+ /* Regular sample */
+ out->sensor_id = in->sensor_num;
+ if (*current_timestamp - now > 0) {
+ /*
+ * This fix is needed to overcome the timestamp filter putting
+ * events in the future.
+ */
+ sensorhub->future_timestamp_total_ns +=
+ *current_timestamp - now;
+ if (++sensorhub->future_timestamp_count ==
+ FUTURE_TS_ANALYTICS_COUNT_MAX) {
+ s64 avg = div_s64(sensorhub->future_timestamp_total_ns,
+ sensorhub->future_timestamp_count);
+ dev_warn_ratelimited(sensorhub->dev,
+ "100 timestamps in the future, %lldns shaved on average\n",
+ avg);
+ sensorhub->future_timestamp_count = 0;
+ sensorhub->future_timestamp_total_ns = 0;
+ }
+ out->timestamp = now;
+ } else {
+ out->timestamp = *current_timestamp;
+ }
+
+ out->flag = in->flags;
+ for (axis = 0; axis < 3; axis++)
+ out->vector[axis] = in->data[axis];
+
+ if (sensorhub->tight_timestamps)
+ cros_ec_sensor_ring_check_for_past_timestamp(sensorhub, out);
+ return true;
+}
+
+/*
+ * cros_ec_sensor_ring_spread_add: Calculate proper timestamps then add to
+ * ringbuffer.
+ *
+ * This is the new spreading code, assumes every sample's timestamp
+ * preceeds the sample. Run if tight_timestamps == true.
+ *
+ * Sometimes the EC receives only one interrupt (hence timestamp) for
+ * a batch of samples. Only the first sample will have the correct
+ * timestamp. So we must interpolate the other samples.
+ * We use the previous batch timestamp and our current batch timestamp
+ * as a way to calculate period, then spread the samples evenly.
+ *
+ * s0 int, 0ms
+ * s1 int, 10ms
+ * s2 int, 20ms
+ * 30ms point goes by, no interrupt, previous one is still asserted
+ * downloading s2 and s3
+ * s3 sample, 20ms (incorrect timestamp)
+ * s4 int, 40ms
+ *
+ * The batches are [(s0), (s1), (s2, s3), (s4)]. Since the 3rd batch
+ * has 2 samples in them, we adjust the timestamp of s3.
+ * s2 - s1 = 10ms, so s3 must be s2 + 10ms => 20ms. If s1 would have
+ * been part of a bigger batch things would have gotten a little
+ * more complicated.
+ *
+ * Note: we also assume another sensor sample doesn't break up a batch
+ * in 2 or more partitions. Example, there can't ever be a sync sensor
+ * in between S2 and S3. This simplifies the following code.
+ */
+static void
+cros_ec_sensor_ring_spread_add(struct cros_ec_sensorhub *sensorhub,
+ unsigned long sensor_mask,
+ struct cros_ec_sensors_ring_sample *last_out)
+{
+ struct cros_ec_sensors_ring_sample *batch_start, *next_batch_start;
+ int id;
+
+ for_each_set_bit(id, &sensor_mask, sensorhub->sensor_num) {
+ for (batch_start = sensorhub->ring; batch_start < last_out;
+ batch_start = next_batch_start) {
+ /*
+ * For each batch (where all samples have the same
+ * timestamp).
+ */
+ int batch_len, sample_idx;
+ struct cros_ec_sensors_ring_sample *batch_end =
+ batch_start;
+ struct cros_ec_sensors_ring_sample *s;
+ s64 batch_timestamp = batch_start->timestamp;
+ s64 sample_period;
+
+ /*
+ * Skip over batches that start with the sensor types
+ * we're not looking at right now.
+ */
+ if (batch_start->sensor_id != id) {
+ next_batch_start = batch_start + 1;
+ continue;
+ }
+
+ /*
+ * Do not start a batch
+ * from a flush, as it happens asynchronously to the
+ * regular flow of events.
+ */
+ if (batch_start->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH) {
+ cros_sensorhub_send_sample(sensorhub,
+ batch_start);
+ next_batch_start = batch_start + 1;
+ continue;
+ }
+
+ if (batch_start->timestamp <=
+ sensorhub->batch_state[id].last_ts) {
+ batch_timestamp =
+ sensorhub->batch_state[id].last_ts;
+ batch_len = sensorhub->batch_state[id].last_len;
+
+ sample_idx = batch_len;
+
+ sensorhub->batch_state[id].last_ts =
+ sensorhub->batch_state[id].penul_ts;
+ sensorhub->batch_state[id].last_len =
+ sensorhub->batch_state[id].penul_len;
+ } else {
+ /*
+ * Push first sample in the batch to the,
+ * kifo, it's guaranteed to be correct, the
+ * rest will follow later on.
+ */
+ sample_idx = 1;
+ batch_len = 1;
+ cros_sensorhub_send_sample(sensorhub,
+ batch_start);
+ batch_start++;
+ }
+
+ /* Find all samples have the same timestamp. */
+ for (s = batch_start; s < last_out; s++) {
+ if (s->sensor_id != id)
+ /*
+ * Skip over other sensor types that
+ * are interleaved, don't count them.
+ */
+ continue;
+ if (s->timestamp != batch_timestamp)
+ /* we discovered the next batch */
+ break;
+ if (s->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH)
+ /* break on flush packets */
+ break;
+ batch_end = s;
+ batch_len++;
+ }
+
+ if (batch_len == 1)
+ goto done_with_this_batch;
+
+ /* Can we calculate period? */
+ if (sensorhub->batch_state[id].last_len == 0) {
+ dev_warn(sensorhub->dev, "Sensor %d: lost %d samples when spreading\n",
+ id, batch_len - 1);
+ goto done_with_this_batch;
+ /*
+ * Note: we're dropping the rest of the samples
+ * in this batch since we have no idea where
+ * they're supposed to go without a period
+ * calculation.
+ */
+ }
+
+ sample_period = div_s64(batch_timestamp -
+ sensorhub->batch_state[id].last_ts,
+ sensorhub->batch_state[id].last_len);
+ dev_dbg(sensorhub->dev,
+ "Adjusting %d samples, sensor %d last_batch @%lld (%d samples) batch_timestamp=%lld => period=%lld\n",
+ batch_len, id,
+ sensorhub->batch_state[id].last_ts,
+ sensorhub->batch_state[id].last_len,
+ batch_timestamp,
+ sample_period);
+
+ /*
+ * Adjust timestamps of the samples then push them to
+ * kfifo.
+ */
+ for (s = batch_start; s <= batch_end; s++) {
+ if (s->sensor_id != id)
+ /*
+ * Skip over other sensor types that
+ * are interleaved, don't change them.
+ */
+ continue;
+
+ s->timestamp = batch_timestamp +
+ sample_period * sample_idx;
+ sample_idx++;
+
+ cros_sensorhub_send_sample(sensorhub, s);
+ }
+
+done_with_this_batch:
+ sensorhub->batch_state[id].penul_ts =
+ sensorhub->batch_state[id].last_ts;
+ sensorhub->batch_state[id].penul_len =
+ sensorhub->batch_state[id].last_len;
+
+ sensorhub->batch_state[id].last_ts =
+ batch_timestamp;
+ sensorhub->batch_state[id].last_len = batch_len;
+
+ next_batch_start = batch_end + 1;
+ }
+ }
+}
+
+/*
+ * cros_ec_sensor_ring_spread_add_legacy: Calculate proper timestamps then
+ * add to ringbuffer (legacy).
+ *
+ * Note: This assumes we're running old firmware, where every sample's timestamp
+ * is after the sample. Run if tight_timestamps == false.
+ *
+ * If there is a sample with a proper timestamp
+ *
+ * timestamp | count
+ * -----------------
+ * older_unprocess_out --> TS1 | 1
+ * TS1 | 2
+ * out --> TS1 | 3
+ * next_out --> TS2 |
+ *
+ * We spread time for the samples [older_unprocess_out .. out]
+ * between TS1 and TS2: [TS1+1/4, TS1+2/4, TS1+3/4, TS2].
+ *
+ * If we reach the end of the samples, we compare with the
+ * current timestamp:
+ *
+ * older_unprocess_out --> TS1 | 1
+ * TS1 | 2
+ * out --> TS1 | 3
+ *
+ * We know have [TS1+1/3, TS1+2/3, current timestamp]
+ */
+static void
+cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub,
+ unsigned long sensor_mask,
+ s64 current_timestamp,
+ struct cros_ec_sensors_ring_sample
+ *last_out)
+{
+ struct cros_ec_sensors_ring_sample *out;
+ int i;
+
+ for_each_set_bit(i, &sensor_mask, sensorhub->sensor_num) {
+ s64 older_timestamp;
+ s64 timestamp;
+ struct cros_ec_sensors_ring_sample *older_unprocess_out =
+ sensorhub->ring;
+ struct cros_ec_sensors_ring_sample *next_out;
+ int count = 1;
+
+ for (out = sensorhub->ring; out < last_out; out = next_out) {
+ s64 time_period;
+
+ next_out = out + 1;
+ if (out->sensor_id != i)
+ continue;
+
+ /* Timestamp to start with */
+ older_timestamp = out->timestamp;
+
+ /* Find next sample. */
+ while (next_out < last_out && next_out->sensor_id != i)
+ next_out++;
+
+ if (next_out >= last_out) {
+ timestamp = current_timestamp;
+ } else {
+ timestamp = next_out->timestamp;
+ if (timestamp == older_timestamp) {
+ count++;
+ continue;
+ }
+ }
+
+ /*
+ * The next sample has a new timestamp, spread the
+ * unprocessed samples.
+ */
+ if (next_out < last_out)
+ count++;
+ time_period = div_s64(timestamp - older_timestamp,
+ count);
+
+ for (; older_unprocess_out <= out;
+ older_unprocess_out++) {
+ if (older_unprocess_out->sensor_id != i)
+ continue;
+ older_timestamp += time_period;
+ older_unprocess_out->timestamp =
+ older_timestamp;
+ }
+ count = 1;
+ /* The next_out sample has a valid timestamp, skip. */
+ next_out++;
+ older_unprocess_out = next_out;
+ }
+ }
+
+ /* Push the event into the kfifo */
+ for (out = sensorhub->ring; out < last_out; out++)
+ cros_sensorhub_send_sample(sensorhub, out);
+}
+
+/**
+ * cros_ec_sensorhub_ring_handler() - The trigger handler function
+ *
+ * @sensorhub: Sensor Hub object.
+ *
+ * Called by the notifier, process the EC sensor FIFO queue.
+ */
+static void cros_ec_sensorhub_ring_handler(struct cros_ec_sensorhub *sensorhub)
+{
+ struct ec_response_motion_sense_fifo_info *fifo_info =
+ sensorhub->fifo_info;
+ struct cros_ec_dev *ec = sensorhub->ec;
+ ktime_t fifo_timestamp, current_timestamp;
+ int i, j, number_data, ret;
+ unsigned long sensor_mask = 0;
+ struct ec_response_motion_sensor_data *in;
+ struct cros_ec_sensors_ring_sample *out, *last_out;
+
+ mutex_lock(&sensorhub->cmd_lock);
+
+ /* Get FIFO information if there are lost vectors. */
+ if (fifo_info->total_lost) {
+ int fifo_info_length =
+ sizeof(struct ec_response_motion_sense_fifo_info) +
+ sizeof(u16) * sensorhub->sensor_num;
+
+ /* Need to retrieve the number of lost vectors per sensor */
+ sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO;
+ sensorhub->msg->outsize = 1;
+ sensorhub->msg->insize = fifo_info_length;
+
+ if (cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg) < 0)
+ goto error;
+
+ memcpy(fifo_info, &sensorhub->resp->fifo_info,
+ fifo_info_length);
+
+ /*
+ * Update collection time, will not be as precise as the
+ * non-error case.
+ */
+ fifo_timestamp = cros_ec_get_time_ns();
+ } else {
+ fifo_timestamp = sensorhub->fifo_timestamp[
+ CROS_EC_SENSOR_NEW_TS];
+ }
+
+ if (fifo_info->count > sensorhub->fifo_size ||
+ fifo_info->size != sensorhub->fifo_size) {
+ dev_warn(sensorhub->dev,
+ "Mismatch EC data: count %d, size %d - expected %d",
+ fifo_info->count, fifo_info->size,
+ sensorhub->fifo_size);
+ goto error;
+ }
+
+ /* Copy elements in the main fifo */
+ current_timestamp = sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS];
+ out = sensorhub->ring;
+ for (i = 0; i < fifo_info->count; i += number_data) {
+ sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_READ;
+ sensorhub->params->fifo_read.max_data_vector =
+ fifo_info->count - i;
+ sensorhub->msg->outsize =
+ sizeof(struct ec_params_motion_sense);
+ sensorhub->msg->insize =
+ sizeof(sensorhub->resp->fifo_read) +
+ sensorhub->params->fifo_read.max_data_vector *
+ sizeof(struct ec_response_motion_sensor_data);
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg);
+ if (ret < 0) {
+ dev_warn(sensorhub->dev, "Fifo error: %d\n", ret);
+ break;
+ }
+ number_data = sensorhub->resp->fifo_read.number_data;
+ if (number_data == 0) {
+ dev_dbg(sensorhub->dev, "Unexpected empty FIFO\n");
+ break;
+ }
+ if (number_data > fifo_info->count - i) {
+ dev_warn(sensorhub->dev,
+ "Invalid EC data: too many entry received: %d, expected %d",
+ number_data, fifo_info->count - i);
+ break;
+ }
+ if (out + number_data >
+ sensorhub->ring + fifo_info->count) {
+ dev_warn(sensorhub->dev,
+ "Too many samples: %d (%zd data) to %d entries for expected %d entries",
+ i, out - sensorhub->ring, i + number_data,
+ fifo_info->count);
+ break;
+ }
+
+ for (in = sensorhub->resp->fifo_read.data, j = 0;
+ j < number_data; j++, in++) {
+ if (cros_ec_sensor_ring_process_event(
+ sensorhub, fifo_info,
+ fifo_timestamp,
+ &current_timestamp,
+ in, out)) {
+ sensor_mask |= BIT(in->sensor_num);
+ out++;
+ }
+ }
+ }
+ mutex_unlock(&sensorhub->cmd_lock);
+ last_out = out;
+
+ if (out == sensorhub->ring)
+ /* Unexpected empty FIFO. */
+ goto ring_handler_end;
+
+ /*
+ * Check if current_timestamp is ahead of the last sample. Normally,
+ * the EC appends a timestamp after the last sample, but if the AP
+ * is slow to respond to the IRQ, the EC may have added new samples.
+ * Use the FIFO info timestamp as last timestamp then.
+ */
+ if (!sensorhub->tight_timestamps &&
+ (last_out - 1)->timestamp == current_timestamp)
+ current_timestamp = fifo_timestamp;
+
+ /* Warn on lost samples. */
+ if (fifo_info->total_lost)
+ for (i = 0; i < sensorhub->sensor_num; i++) {
+ if (fifo_info->lost[i]) {
+ dev_warn_ratelimited(sensorhub->dev,
+ "Sensor %d: lost: %d out of %d\n",
+ i, fifo_info->lost[i],
+ fifo_info->total_lost);
+ if (sensorhub->tight_timestamps)
+ sensorhub->batch_state[i].last_len = 0;
+ }
+ }
+
+ /*
+ * Spread samples in case of batching, then add them to the
+ * ringbuffer.
+ */
+ if (sensorhub->tight_timestamps)
+ cros_ec_sensor_ring_spread_add(sensorhub, sensor_mask,
+ last_out);
+ else
+ cros_ec_sensor_ring_spread_add_legacy(sensorhub, sensor_mask,
+ current_timestamp,
+ last_out);
+
+ring_handler_end:
+ sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] = current_timestamp;
+ return;
+
+error:
+ mutex_unlock(&sensorhub->cmd_lock);
+}
+
+static int cros_ec_sensorhub_event(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *_notify)
+{
+ struct cros_ec_sensorhub *sensorhub;
+ struct cros_ec_device *ec_dev;
+
+ sensorhub = container_of(nb, struct cros_ec_sensorhub, notifier);
+ ec_dev = sensorhub->ec->ec_dev;
+
+ if (ec_dev->event_data.event_type != EC_MKBP_EVENT_SENSOR_FIFO)
+ return NOTIFY_DONE;
+
+ if (ec_dev->event_size != sizeof(ec_dev->event_data.data.sensor_fifo)) {
+ dev_warn(ec_dev->dev, "Invalid fifo info size\n");
+ return NOTIFY_DONE;
+ }
+
+ if (queued_during_suspend)
+ return NOTIFY_OK;
+
+ memcpy(sensorhub->fifo_info, &ec_dev->event_data.data.sensor_fifo.info,
+ sizeof(*sensorhub->fifo_info));
+ sensorhub->fifo_timestamp[CROS_EC_SENSOR_NEW_TS] =
+ ec_dev->last_event_time;
+ cros_ec_sensorhub_ring_handler(sensorhub);
+
+ return NOTIFY_OK;
+}
+
+/**
+ * cros_ec_sensorhub_ring_add() - Add the FIFO functionality if the EC
+ * supports it.
+ *
+ * @sensorhub : Sensor Hub object.
+ *
+ * Return: 0 on success.
+ */
+int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub)
+{
+ struct cros_ec_dev *ec = sensorhub->ec;
+ int ret;
+ int fifo_info_length =
+ sizeof(struct ec_response_motion_sense_fifo_info) +
+ sizeof(u16) * sensorhub->sensor_num;
+
+ /* Allocate the array for lost events. */
+ sensorhub->fifo_info = devm_kzalloc(sensorhub->dev, fifo_info_length,
+ GFP_KERNEL);
+ if (!sensorhub->fifo_info)
+ return -ENOMEM;
+
+ /* Retrieve FIFO information */
+ sensorhub->msg->version = 2;
+ sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO;
+ sensorhub->msg->outsize = 1;
+ sensorhub->msg->insize = fifo_info_length;
+
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Allocate the full fifo. We need to copy the whole FIFO to set
+ * timestamps properly.
+ */
+ sensorhub->fifo_size = sensorhub->resp->fifo_info.size;
+ sensorhub->ring = devm_kcalloc(sensorhub->dev, sensorhub->fifo_size,
+ sizeof(*sensorhub->ring), GFP_KERNEL);
+ if (!sensorhub->ring)
+ return -ENOMEM;
+
+ /*
+ * Allocate the callback area based on the number of sensors.
+ */
+ sensorhub->push_data = devm_kcalloc(
+ sensorhub->dev, sensorhub->sensor_num,
+ sizeof(*sensorhub->push_data),
+ GFP_KERNEL);
+ if (!sensorhub->push_data)
+ return -ENOMEM;
+
+ sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] =
+ cros_ec_get_time_ns();
+
+ sensorhub->tight_timestamps = cros_ec_check_features(
+ ec, EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS);
+
+ if (sensorhub->tight_timestamps) {
+ sensorhub->batch_state = devm_kcalloc(sensorhub->dev,
+ sensorhub->sensor_num,
+ sizeof(*sensorhub->batch_state),
+ GFP_KERNEL);
+ if (!sensorhub->batch_state)
+ return -ENOMEM;
+ }
+
+ /* Register the notifier that will act as a top half interrupt. */
+ sensorhub->notifier.notifier_call = cros_ec_sensorhub_event;
+ ret = blocking_notifier_chain_register(&ec->ec_dev->event_notifier,
+ &sensorhub->notifier);
+ if (ret < 0)
+ return ret;
+
+ /* Start collection samples. */
+ return cros_ec_sensorhub_ring_fifo_enable(sensorhub, true);
+}
+
+void cros_ec_sensorhub_ring_remove(void *arg)
+{
+ struct cros_ec_sensorhub *sensorhub = arg;
+ struct cros_ec_device *ec_dev = sensorhub->ec->ec_dev;
+
+ /* Disable the ring, prevent EC interrupt to the AP for nothing. */
+ cros_ec_sensorhub_ring_fifo_enable(sensorhub, false);
+ blocking_notifier_chain_unregister(&ec_dev->event_notifier,
+ &sensorhub->notifier);
+}
diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c
index 46786d2d679a..debea5c4c829 100644
--- a/drivers/platform/chrome/cros_ec_spi.c
+++ b/drivers/platform/chrome/cros_ec_spi.c
@@ -127,7 +127,8 @@ static int terminate_request(struct cros_ec_device *ec_dev)
*/
spi_message_init(&msg);
memset(&trans, 0, sizeof(trans));
- trans.delay_usecs = ec_spi->end_of_msg_delay;
+ trans.delay.value = ec_spi->end_of_msg_delay;
+ trans.delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&trans, &msg);
ret = spi_sync_locked(ec_spi->spi, &msg);
@@ -416,7 +417,8 @@ static int do_cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev,
spi_message_init(&msg);
if (ec_spi->start_of_msg_delay) {
memset(&trans_delay, 0, sizeof(trans_delay));
- trans_delay.delay_usecs = ec_spi->start_of_msg_delay;
+ trans_delay.delay.value = ec_spi->start_of_msg_delay;
+ trans_delay.delay.unit = SPI_DELAY_UNIT_USECS;
spi_message_add_tail(&trans_delay, &msg);
}
diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c
index 07dac97ad57c..d45ea5d5bfa4 100644
--- a/drivers/platform/chrome/cros_ec_sysfs.c
+++ b/drivers/platform/chrome/cros_ec_sysfs.c
@@ -149,14 +149,14 @@ static ssize_t version_show(struct device *dev,
/* Get build info. */
msg->command = EC_CMD_GET_BUILD_INFO + ec->cmd_offset;
msg->insize = EC_HOST_PARAM_SIZE;
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
- if (ret < 0)
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Build info: XFER ERROR %d\n", ret);
- else if (msg->result != EC_RES_SUCCESS)
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret == -EPROTO) {
count += scnprintf(buf + count, PAGE_SIZE - count,
"Build info: EC error %d\n", msg->result);
- else {
+ } else if (ret < 0) {
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Build info: XFER ERROR %d\n", ret);
+ } else {
msg->data[EC_HOST_PARAM_SIZE - 1] = '\0';
count += scnprintf(buf + count, PAGE_SIZE - count,
"Build info: %s\n", msg->data);
@@ -165,14 +165,14 @@ static ssize_t version_show(struct device *dev,
/* Get chip info. */
msg->command = EC_CMD_GET_CHIP_INFO + ec->cmd_offset;
msg->insize = sizeof(*r_chip);
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
- if (ret < 0)
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Chip info: XFER ERROR %d\n", ret);
- else if (msg->result != EC_RES_SUCCESS)
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret == -EPROTO) {
count += scnprintf(buf + count, PAGE_SIZE - count,
"Chip info: EC error %d\n", msg->result);
- else {
+ } else if (ret < 0) {
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Chip info: XFER ERROR %d\n", ret);
+ } else {
r_chip = (struct ec_response_get_chip_info *)msg->data;
r_chip->vendor[sizeof(r_chip->vendor) - 1] = '\0';
@@ -189,14 +189,14 @@ static ssize_t version_show(struct device *dev,
/* Get board version */
msg->command = EC_CMD_GET_BOARD_VERSION + ec->cmd_offset;
msg->insize = sizeof(*r_board);
- ret = cros_ec_cmd_xfer(ec->ec_dev, msg);
- if (ret < 0)
- count += scnprintf(buf + count, PAGE_SIZE - count,
- "Board version: XFER ERROR %d\n", ret);
- else if (msg->result != EC_RES_SUCCESS)
+ ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg);
+ if (ret == -EPROTO) {
count += scnprintf(buf + count, PAGE_SIZE - count,
"Board version: EC error %d\n", msg->result);
- else {
+ } else if (ret < 0) {
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "Board version: XFER ERROR %d\n", ret);
+ } else {
r_board = (struct ec_response_board_version *)msg->data;
count += scnprintf(buf + count, PAGE_SIZE - count,
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
new file mode 100644
index 000000000000..874269c07073
--- /dev/null
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -0,0 +1,357 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Google LLC
+ *
+ * This driver provides the ability to view and manage Type C ports through the
+ * Chrome OS EC.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_data/cros_ec_commands.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_device.h>
+#include <linux/usb/typec.h>
+
+#define DRV_NAME "cros-ec-typec"
+
+/* Platform-specific data for the Chrome OS EC Type C controller. */
+struct cros_typec_data {
+ struct device *dev;
+ struct cros_ec_device *ec;
+ int num_ports;
+ unsigned int cmd_ver;
+ /* Array of ports, indexed by port number. */
+ struct typec_port *ports[EC_USB_PD_MAX_PORTS];
+ /* Initial capabilities for each port. */
+ struct typec_capability *caps[EC_USB_PD_MAX_PORTS];
+};
+
+static int cros_typec_parse_port_props(struct typec_capability *cap,
+ struct fwnode_handle *fwnode,
+ struct device *dev)
+{
+ const char *buf;
+ int ret;
+
+ memset(cap, 0, sizeof(*cap));
+ ret = fwnode_property_read_string(fwnode, "power-role", &buf);
+ if (ret) {
+ dev_err(dev, "power-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_port_power_role(buf);
+ if (ret < 0)
+ return ret;
+ cap->type = ret;
+
+ ret = fwnode_property_read_string(fwnode, "data-role", &buf);
+ if (ret) {
+ dev_err(dev, "data-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_port_data_role(buf);
+ if (ret < 0)
+ return ret;
+ cap->data = ret;
+
+ ret = fwnode_property_read_string(fwnode, "try-power-role", &buf);
+ if (ret) {
+ dev_err(dev, "try-power-role not found: %d\n", ret);
+ return ret;
+ }
+
+ ret = typec_find_power_role(buf);
+ if (ret < 0)
+ return ret;
+ cap->prefer_role = ret;
+
+ cap->fwnode = fwnode;
+
+ return 0;
+}
+
+static int cros_typec_init_ports(struct cros_typec_data *typec)
+{
+ struct device *dev = typec->dev;
+ struct typec_capability *cap;
+ struct fwnode_handle *fwnode;
+ const char *port_prop;
+ int ret;
+ int i;
+ int nports;
+ u32 port_num = 0;
+
+ nports = device_get_child_node_count(dev);
+ if (nports == 0) {
+ dev_err(dev, "No port entries found.\n");
+ return -ENODEV;
+ }
+
+ if (nports > typec->num_ports) {
+ dev_err(dev, "More ports listed than can be supported.\n");
+ return -EINVAL;
+ }
+
+ /* DT uses "reg" to specify port number. */
+ port_prop = dev->of_node ? "reg" : "port-number";
+ device_for_each_child_node(dev, fwnode) {
+ if (fwnode_property_read_u32(fwnode, port_prop, &port_num)) {
+ ret = -EINVAL;
+ dev_err(dev, "No port-number for port, aborting.\n");
+ goto unregister_ports;
+ }
+
+ if (port_num >= typec->num_ports) {
+ dev_err(dev, "Invalid port number.\n");
+ ret = -EINVAL;
+ goto unregister_ports;
+ }
+
+ dev_dbg(dev, "Registering port %d\n", port_num);
+
+ cap = devm_kzalloc(dev, sizeof(*cap), GFP_KERNEL);
+ if (!cap) {
+ ret = -ENOMEM;
+ goto unregister_ports;
+ }
+
+ typec->caps[port_num] = cap;
+
+ ret = cros_typec_parse_port_props(cap, fwnode, dev);
+ if (ret < 0)
+ goto unregister_ports;
+
+ typec->ports[port_num] = typec_register_port(dev, cap);
+ if (IS_ERR(typec->ports[port_num])) {
+ dev_err(dev, "Failed to register port %d\n", port_num);
+ ret = PTR_ERR(typec->ports[port_num]);
+ goto unregister_ports;
+ }
+ }
+
+ return 0;
+
+unregister_ports:
+ for (i = 0; i < typec->num_ports; i++)
+ typec_unregister_port(typec->ports[i]);
+ return ret;
+}
+
+static int cros_typec_ec_command(struct cros_typec_data *typec,
+ unsigned int version,
+ unsigned int command,
+ void *outdata,
+ unsigned int outsize,
+ void *indata,
+ unsigned int insize)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->version = version;
+ msg->command = command;
+ msg->outsize = outsize;
+ msg->insize = insize;
+
+ if (outsize)
+ memcpy(msg->data, outdata, outsize);
+
+ ret = cros_ec_cmd_xfer_status(typec->ec, msg);
+ if (ret >= 0 && insize)
+ memcpy(indata, msg->data, insize);
+
+ kfree(msg);
+ return ret;
+}
+
+static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
+ int port_num, struct ec_response_usb_pd_control *resp)
+{
+ struct typec_port *port = typec->ports[port_num];
+ enum typec_orientation polarity;
+
+ if (!resp->enabled)
+ polarity = TYPEC_ORIENTATION_NONE;
+ else if (!resp->polarity)
+ polarity = TYPEC_ORIENTATION_NORMAL;
+ else
+ polarity = TYPEC_ORIENTATION_REVERSE;
+
+ typec_set_pwr_role(port, resp->role ? TYPEC_SOURCE : TYPEC_SINK);
+ typec_set_orientation(port, polarity);
+}
+
+static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
+ int port_num, struct ec_response_usb_pd_control_v1 *resp)
+{
+ struct typec_port *port = typec->ports[port_num];
+ enum typec_orientation polarity;
+
+ if (!(resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
+ polarity = TYPEC_ORIENTATION_NONE;
+ else if (!resp->polarity)
+ polarity = TYPEC_ORIENTATION_NORMAL;
+ else
+ polarity = TYPEC_ORIENTATION_REVERSE;
+ typec_set_orientation(port, polarity);
+ typec_set_data_role(port, resp->role & PD_CTRL_RESP_ROLE_DATA ?
+ TYPEC_HOST : TYPEC_DEVICE);
+ typec_set_pwr_role(port, resp->role & PD_CTRL_RESP_ROLE_POWER ?
+ TYPEC_SOURCE : TYPEC_SINK);
+ typec_set_vconn_role(port, resp->role & PD_CTRL_RESP_ROLE_VCONN ?
+ TYPEC_SOURCE : TYPEC_SINK);
+}
+
+static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
+{
+ struct ec_params_usb_pd_control req;
+ struct ec_response_usb_pd_control_v1 resp;
+ int ret;
+
+ if (port_num < 0 || port_num >= typec->num_ports) {
+ dev_err(typec->dev, "cannot get status for invalid port %d\n",
+ port_num);
+ return -EINVAL;
+ }
+
+ req.port = port_num;
+ req.role = USB_PD_CTRL_ROLE_NO_CHANGE;
+ req.mux = USB_PD_CTRL_MUX_NO_CHANGE;
+ req.swap = USB_PD_CTRL_SWAP_NONE;
+
+ ret = cros_typec_ec_command(typec, typec->cmd_ver,
+ EC_CMD_USB_PD_CONTROL, &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(typec->dev, "Enabled %d: 0x%hhx\n", port_num, resp.enabled);
+ dev_dbg(typec->dev, "Role %d: 0x%hhx\n", port_num, resp.role);
+ dev_dbg(typec->dev, "Polarity %d: 0x%hhx\n", port_num, resp.polarity);
+ dev_dbg(typec->dev, "State %d: %s\n", port_num, resp.state);
+
+ if (typec->cmd_ver == 1)
+ cros_typec_set_port_params_v1(typec, port_num, &resp);
+ else
+ cros_typec_set_port_params_v0(typec, port_num,
+ (struct ec_response_usb_pd_control *) &resp);
+
+ return 0;
+}
+
+static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
+{
+ struct ec_params_get_cmd_versions_v1 req_v1;
+ struct ec_response_get_cmd_versions resp;
+ int ret;
+
+ /* We're interested in the PD control command version. */
+ req_v1.cmd = EC_CMD_USB_PD_CONTROL;
+ ret = cros_typec_ec_command(typec, 1, EC_CMD_GET_CMD_VERSIONS,
+ &req_v1, sizeof(req_v1), &resp,
+ sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ if (resp.version_mask & EC_VER_MASK(1))
+ typec->cmd_ver = 1;
+ else
+ typec->cmd_ver = 0;
+
+ dev_dbg(typec->dev, "PD Control has version mask 0x%hhx\n",
+ typec->cmd_ver);
+
+ return 0;
+}
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id cros_typec_acpi_id[] = {
+ { "GOOG0014", 0 },
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, cros_typec_acpi_id);
+#endif
+
+#ifdef CONFIG_OF
+static const struct of_device_id cros_typec_of_match[] = {
+ { .compatible = "google,cros-ec-typec", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, cros_typec_of_match);
+#endif
+
+static int cros_typec_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_typec_data *typec;
+ struct ec_response_usb_pd_ports resp;
+ int ret, i;
+
+ typec = devm_kzalloc(dev, sizeof(*typec), GFP_KERNEL);
+ if (!typec)
+ return -ENOMEM;
+
+ typec->dev = dev;
+ typec->ec = dev_get_drvdata(pdev->dev.parent);
+ platform_set_drvdata(pdev, typec);
+
+ ret = cros_typec_get_cmd_version(typec);
+ if (ret < 0) {
+ dev_err(dev, "failed to get PD command version info\n");
+ return ret;
+ }
+
+ ret = cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
+ &resp, sizeof(resp));
+ if (ret < 0)
+ return ret;
+
+ typec->num_ports = resp.num_ports;
+ if (typec->num_ports > EC_USB_PD_MAX_PORTS) {
+ dev_warn(typec->dev,
+ "Too many ports reported: %d, limiting to max: %d\n",
+ typec->num_ports, EC_USB_PD_MAX_PORTS);
+ typec->num_ports = EC_USB_PD_MAX_PORTS;
+ }
+
+ ret = cros_typec_init_ports(typec);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < typec->num_ports; i++) {
+ ret = cros_typec_port_update(typec, i);
+ if (ret < 0)
+ goto unregister_ports;
+ }
+
+ return 0;
+
+unregister_ports:
+ for (i = 0; i < typec->num_ports; i++)
+ if (typec->ports[i])
+ typec_unregister_port(typec->ports[i]);
+ return ret;
+}
+
+static struct platform_driver cros_typec_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .acpi_match_table = ACPI_PTR(cros_typec_acpi_id),
+ .of_match_table = of_match_ptr(cros_typec_of_match),
+ },
+ .probe = cros_typec_probe,
+};
+
+module_platform_driver(cros_typec_driver);
+
+MODULE_AUTHOR("Prashant Malani <pmalani@chromium.org>");
+MODULE_DESCRIPTION("Chrome OS EC Type C control");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c
index 8edae465105c..46482d12cffe 100644
--- a/drivers/platform/chrome/cros_ec_vbc.c
+++ b/drivers/platform/chrome/cros_ec_vbc.c
@@ -40,7 +40,7 @@ static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj,
msg->outsize = para_sz;
msg->insize = resp_sz;
- err = cros_ec_cmd_xfer(ecdev, msg);
+ err = cros_ec_cmd_xfer_status(ecdev, msg);
if (err < 0) {
dev_err(dev, "Error sending read request: %d\n", err);
kfree(msg);
@@ -83,7 +83,7 @@ static ssize_t vboot_context_write(struct file *filp, struct kobject *kobj,
msg->outsize = para_sz;
msg->insize = 0;
- err = cros_ec_cmd_xfer(ecdev, msg);
+ err = cros_ec_cmd_xfer_status(ecdev, msg);
if (err < 0) {
dev_err(dev, "Error sending write request: %d\n", err);
kfree(msg);
diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c
new file mode 100644
index 000000000000..7f36142ab12a
--- /dev/null
+++ b/drivers/platform/chrome/cros_usbpd_notify.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2020 Google LLC
+ *
+ * This driver serves as the receiver of cros_ec PD host events.
+ */
+
+#include <linux/acpi.h>
+#include <linux/module.h>
+#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_usbpd_notify.h>
+#include <linux/platform_device.h>
+
+#define DRV_NAME "cros-usbpd-notify"
+#define DRV_NAME_PLAT_ACPI "cros-usbpd-notify-acpi"
+#define ACPI_DRV_NAME "GOOG0003"
+
+static BLOCKING_NOTIFIER_HEAD(cros_usbpd_notifier_list);
+
+struct cros_usbpd_notify_data {
+ struct device *dev;
+ struct cros_ec_device *ec;
+ struct notifier_block nb;
+};
+
+/**
+ * cros_usbpd_register_notify - Register a notifier callback for PD events.
+ * @nb: Notifier block pointer to register
+ *
+ * On ACPI platforms this corresponds to host events on the ECPD
+ * "GOOG0003" ACPI device. On non-ACPI platforms this will filter mkbp events
+ * for USB PD events.
+ *
+ * Return: 0 on success or negative error code.
+ */
+int cros_usbpd_register_notify(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&cros_usbpd_notifier_list,
+ nb);
+}
+EXPORT_SYMBOL_GPL(cros_usbpd_register_notify);
+
+/**
+ * cros_usbpd_unregister_notify - Unregister notifier callback for PD events.
+ * @nb: Notifier block pointer to unregister
+ *
+ * Unregister a notifier callback that was previously registered with
+ * cros_usbpd_register_notify().
+ */
+void cros_usbpd_unregister_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&cros_usbpd_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cros_usbpd_unregister_notify);
+
+/**
+ * cros_ec_pd_command - Send a command to the EC.
+ *
+ * @ec_dev: EC device
+ * @command: EC command
+ * @outdata: EC command output data
+ * @outsize: Size of outdata
+ * @indata: EC command input data
+ * @insize: Size of indata
+ *
+ * Return: >= 0 on success, negative error number on failure.
+ */
+static int cros_ec_pd_command(struct cros_ec_device *ec_dev,
+ int command,
+ uint8_t *outdata,
+ int outsize,
+ uint8_t *indata,
+ int insize)
+{
+ struct cros_ec_command *msg;
+ int ret;
+
+ msg = kzalloc(sizeof(*msg) + max(insize, outsize), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->command = command;
+ msg->outsize = outsize;
+ msg->insize = insize;
+
+ if (outsize)
+ memcpy(msg->data, outdata, outsize);
+
+ ret = cros_ec_cmd_xfer_status(ec_dev, msg);
+ if (ret < 0)
+ goto error;
+
+ if (insize)
+ memcpy(indata, msg->data, insize);
+error:
+ kfree(msg);
+ return ret;
+}
+
+static void cros_usbpd_get_event_and_notify(struct device *dev,
+ struct cros_ec_device *ec_dev)
+{
+ struct ec_response_host_event_status host_event_status;
+ u32 event = 0;
+ int ret;
+
+ /*
+ * We still send a 0 event out to older devices which don't
+ * have the updated device heirarchy.
+ */
+ if (!ec_dev) {
+ dev_dbg(dev,
+ "EC device inaccessible; sending 0 event status.\n");
+ goto send_notify;
+ }
+
+ /* Check for PD host events on EC. */
+ ret = cros_ec_pd_command(ec_dev, EC_CMD_PD_HOST_EVENT_STATUS,
+ NULL, 0,
+ (uint8_t *)&host_event_status,
+ sizeof(host_event_status));
+ if (ret < 0) {
+ dev_warn(dev, "Can't get host event status (err: %d)\n", ret);
+ goto send_notify;
+ }
+
+ event = host_event_status.status;
+
+send_notify:
+ blocking_notifier_call_chain(&cros_usbpd_notifier_list, event, NULL);
+}
+
+#ifdef CONFIG_ACPI
+
+static void cros_usbpd_notify_acpi(acpi_handle device, u32 event, void *data)
+{
+ struct cros_usbpd_notify_data *pdnotify = data;
+
+ cros_usbpd_get_event_and_notify(pdnotify->dev, pdnotify->ec);
+}
+
+static int cros_usbpd_notify_probe_acpi(struct platform_device *pdev)
+{
+ struct cros_usbpd_notify_data *pdnotify;
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev;
+ struct cros_ec_device *ec_dev;
+ acpi_status status;
+
+ adev = ACPI_COMPANION(dev);
+
+ pdnotify = devm_kzalloc(dev, sizeof(*pdnotify), GFP_KERNEL);
+ if (!pdnotify)
+ return -ENOMEM;
+
+ /* Get the EC device pointer needed to talk to the EC. */
+ ec_dev = dev_get_drvdata(dev->parent);
+ if (!ec_dev) {
+ /*
+ * We continue even for older devices which don't have the
+ * correct device heirarchy, namely, GOOG0003 is a child
+ * of GOOG0004.
+ */
+ dev_warn(dev, "Couldn't get Chrome EC device pointer.\n");
+ }
+
+ pdnotify->dev = dev;
+ pdnotify->ec = ec_dev;
+
+ status = acpi_install_notify_handler(adev->handle,
+ ACPI_ALL_NOTIFY,
+ cros_usbpd_notify_acpi,
+ pdnotify);
+ if (ACPI_FAILURE(status)) {
+ dev_warn(dev, "Failed to register notify handler %08x\n",
+ status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cros_usbpd_notify_remove_acpi(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ acpi_remove_notify_handler(adev->handle, ACPI_ALL_NOTIFY,
+ cros_usbpd_notify_acpi);
+
+ return 0;
+}
+
+static const struct acpi_device_id cros_usbpd_notify_acpi_device_ids[] = {
+ { ACPI_DRV_NAME, 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, cros_usbpd_notify_acpi_device_ids);
+
+static struct platform_driver cros_usbpd_notify_acpi_driver = {
+ .driver = {
+ .name = DRV_NAME_PLAT_ACPI,
+ .acpi_match_table = cros_usbpd_notify_acpi_device_ids,
+ },
+ .probe = cros_usbpd_notify_probe_acpi,
+ .remove = cros_usbpd_notify_remove_acpi,
+};
+
+#endif /* CONFIG_ACPI */
+
+static int cros_usbpd_notify_plat(struct notifier_block *nb,
+ unsigned long queued_during_suspend,
+ void *data)
+{
+ struct cros_usbpd_notify_data *pdnotify = container_of(nb,
+ struct cros_usbpd_notify_data, nb);
+ struct cros_ec_device *ec_dev = (struct cros_ec_device *)data;
+ u32 host_event = cros_ec_get_host_event(ec_dev);
+
+ if (!host_event)
+ return NOTIFY_DONE;
+
+ if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU)) {
+ cros_usbpd_get_event_and_notify(pdnotify->dev, ec_dev);
+ return NOTIFY_OK;
+ }
+ return NOTIFY_DONE;
+}
+
+static int cros_usbpd_notify_probe_plat(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ecdev = dev_get_drvdata(dev->parent);
+ struct cros_usbpd_notify_data *pdnotify;
+ int ret;
+
+ pdnotify = devm_kzalloc(dev, sizeof(*pdnotify), GFP_KERNEL);
+ if (!pdnotify)
+ return -ENOMEM;
+
+ pdnotify->dev = dev;
+ pdnotify->ec = ecdev->ec_dev;
+ pdnotify->nb.notifier_call = cros_usbpd_notify_plat;
+
+ dev_set_drvdata(dev, pdnotify);
+
+ ret = blocking_notifier_chain_register(&ecdev->ec_dev->event_notifier,
+ &pdnotify->nb);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register notifier\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cros_usbpd_notify_remove_plat(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct cros_ec_dev *ecdev = dev_get_drvdata(dev->parent);
+ struct cros_usbpd_notify_data *pdnotify =
+ (struct cros_usbpd_notify_data *)dev_get_drvdata(dev);
+
+ blocking_notifier_chain_unregister(&ecdev->ec_dev->event_notifier,
+ &pdnotify->nb);
+
+ return 0;
+}
+
+static struct platform_driver cros_usbpd_notify_plat_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ },
+ .probe = cros_usbpd_notify_probe_plat,
+ .remove = cros_usbpd_notify_remove_plat,
+};
+
+static int __init cros_usbpd_notify_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&cros_usbpd_notify_plat_driver);
+ if (ret < 0)
+ return ret;
+
+#ifdef CONFIG_ACPI
+ platform_driver_register(&cros_usbpd_notify_acpi_driver);
+#endif
+ return 0;
+}
+
+static void __exit cros_usbpd_notify_exit(void)
+{
+#ifdef CONFIG_ACPI
+ platform_driver_unregister(&cros_usbpd_notify_acpi_driver);
+#endif
+ platform_driver_unregister(&cros_usbpd_notify_plat_driver);
+}
+
+module_init(cros_usbpd_notify_init);
+module_exit(cros_usbpd_notify_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("ChromeOS power delivery notifier device");
+MODULE_AUTHOR("Jon Flatley <jflat@chromium.org>");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/platform/chrome/wilco_ec/event.c b/drivers/platform/chrome/wilco_ec/event.c
index dba3d445623f..814518509739 100644
--- a/drivers/platform/chrome/wilco_ec/event.c
+++ b/drivers/platform/chrome/wilco_ec/event.c
@@ -79,7 +79,7 @@ static DEFINE_IDA(event_ida);
struct ec_event {
u16 size;
u16 type;
- u16 event[0];
+ u16 event[];
} __packed;
#define ec_event_num_words(ev) (ev->size - 1)
@@ -96,7 +96,7 @@ struct ec_event_queue {
int capacity;
int head;
int tail;
- struct ec_event *entries[0];
+ struct ec_event *entries[];
};
/* Maximum number of events to store in ec_event_queue */
diff --git a/drivers/platform/chrome/wilco_ec/properties.c b/drivers/platform/chrome/wilco_ec/properties.c
index 62f27610dd33..c2bf4c95c5d2 100644
--- a/drivers/platform/chrome/wilco_ec/properties.c
+++ b/drivers/platform/chrome/wilco_ec/properties.c
@@ -3,8 +3,11 @@
* Copyright 2019 Google LLC
*/
+#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/platform_data/wilco-ec.h>
#include <linux/string.h>
+#include <linux/types.h>
#include <asm/unaligned.h>
/* Operation code; what the EC should do with the property */
diff --git a/drivers/platform/chrome/wilco_ec/sysfs.c b/drivers/platform/chrome/wilco_ec/sysfs.c
index f0d174b6bb21..3c587b4054a5 100644
--- a/drivers/platform/chrome/wilco_ec/sysfs.c
+++ b/drivers/platform/chrome/wilco_ec/sysfs.c
@@ -8,8 +8,12 @@
* See Documentation/ABI/testing/sysfs-platform-wilco-ec for more information.
*/
+#include <linux/device.h>
+#include <linux/kernel.h>
#include <linux/platform_data/wilco-ec.h>
+#include <linux/string.h>
#include <linux/sysfs.h>
+#include <linux/types.h>
#define CMD_KB_CMOS 0x7C
#define SUB_CMD_KB_CMOS_AUTO_ON 0x03
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 74e988f839e8..f8d3e3bd1bb5 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -4,7 +4,7 @@
*
* Copyright (c) Red Hat <mjg@redhat.com>
* Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali@kernel.org>
*
* Based on documentation in the libsmbios package:
* Copyright (C) 2005-2014 Dell Inc.
@@ -2295,6 +2295,6 @@ module_exit(dell_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_DESCRIPTION("Dell laptop driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
index a6b856cd86bd..a89fad47ff13 100644
--- a/drivers/platform/x86/dell-rbtn.c
+++ b/drivers/platform/x86/dell-rbtn.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
Dell Airplane Mode Switch driver
- Copyright (C) 2014-2015 Pali Rohár <pali.rohar@gmail.com>
+ Copyright (C) 2014-2015 Pali Rohár <pali@kernel.org>
*/
@@ -495,5 +495,5 @@ MODULE_PARM_DESC(auto_remove_rfkill, "Automatically remove rfkill devices when "
"(default true)");
MODULE_DEVICE_TABLE(acpi, rbtn_ids);
MODULE_DESCRIPTION("Dell Airplane Mode Switch driver");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-rbtn.h b/drivers/platform/x86/dell-rbtn.h
index 0fdc81644458..5e030f926c58 100644
--- a/drivers/platform/x86/dell-rbtn.h
+++ b/drivers/platform/x86/dell-rbtn.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/*
Dell Airplane Mode Switch driver
- Copyright (C) 2014-2015 Pali Rohár <pali.rohar@gmail.com>
+ Copyright (C) 2014-2015 Pali Rohár <pali@kernel.org>
*/
diff --git a/drivers/platform/x86/dell-smbios-base.c b/drivers/platform/x86/dell-smbios-base.c
index fe59b0ebff31..2e2cd565926a 100644
--- a/drivers/platform/x86/dell-smbios-base.c
+++ b/drivers/platform/x86/dell-smbios-base.c
@@ -4,7 +4,7 @@
*
* Copyright (c) Red Hat <mjg@redhat.com>
* Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali@kernel.org>
*
* Based on documentation in the libsmbios package:
* Copyright (C) 2005-2014 Dell Inc.
@@ -645,7 +645,7 @@ module_exit(dell_smbios_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_AUTHOR("Mario Limonciello <mario.limonciello@dell.com>");
MODULE_DESCRIPTION("Common functions for kernel modules using Dell SMBIOS");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell-smbios-smm.c b/drivers/platform/x86/dell-smbios-smm.c
index d6854d1c4119..97c52a839a3e 100644
--- a/drivers/platform/x86/dell-smbios-smm.c
+++ b/drivers/platform/x86/dell-smbios-smm.c
@@ -4,7 +4,7 @@
*
* Copyright (c) Red Hat <mjg@redhat.com>
* Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali@kernel.org>
* Copyright (c) 2017 Dell Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/platform/x86/dell-smbios.h b/drivers/platform/x86/dell-smbios.h
index a7ff9803f41a..75fa8ea0476d 100644
--- a/drivers/platform/x86/dell-smbios.h
+++ b/drivers/platform/x86/dell-smbios.h
@@ -4,7 +4,7 @@
*
* Copyright (c) Red Hat <mjg@redhat.com>
* Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (c) 2014 Pali Rohár <pali@kernel.org>
*
* Based on documentation in the libsmbios package:
* Copyright (C) 2005-2014 Dell Inc.
diff --git a/drivers/platform/x86/dell-smo8800.c b/drivers/platform/x86/dell-smo8800.c
index b531fe8ab7e0..5d9304a7de1b 100644
--- a/drivers/platform/x86/dell-smo8800.c
+++ b/drivers/platform/x86/dell-smo8800.c
@@ -3,7 +3,7 @@
* dell-smo8800.c - Dell Latitude ACPI SMO88XX freefall sensor driver
*
* Copyright (C) 2012 Sonal Santan <sonal.santan@gmail.com>
- * Copyright (C) 2014 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2014 Pali Rohár <pali@kernel.org>
*
* This is loosely based on lis3lv02d driver.
*/
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c
index 6669db2555fb..86e8dd6a8b33 100644
--- a/drivers/platform/x86/dell-wmi.c
+++ b/drivers/platform/x86/dell-wmi.c
@@ -3,7 +3,7 @@
* Dell WMI hotkeys
*
* Copyright (C) 2008 Red Hat <mjg@redhat.com>
- * Copyright (C) 2014-2015 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2014-2015 Pali Rohár <pali@kernel.org>
*
* Portions based on wistron_btns.c:
* Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
@@ -29,7 +29,7 @@
#include "dell-wmi-descriptor.h"
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_DESCRIPTION("Dell laptop WMI hotkeys driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig
index 195bc0462d3e..f3424fdce341 100644
--- a/drivers/power/supply/Kconfig
+++ b/drivers/power/supply/Kconfig
@@ -659,7 +659,7 @@ config CHARGER_RT9455
config CHARGER_CROS_USBPD
tristate "ChromeOS EC based USBPD charger"
- depends on CROS_EC
+ depends on CROS_USBPD_NOTIFY
default n
help
Say Y here to enable ChromeOS EC based USBPD charger
diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c
index 532f6e4fcafb..a1f00ae1c180 100644
--- a/drivers/power/supply/bq2415x_charger.c
+++ b/drivers/power/supply/bq2415x_charger.c
@@ -2,7 +2,7 @@
/*
* bq2415x charger driver
*
- * Copyright (C) 2011-2013 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2011-2013 Pali Rohár <pali@kernel.org>
*
* Datasheets:
* http://www.ti.com/product/bq24150
@@ -1788,6 +1788,6 @@ static struct i2c_driver bq2415x_driver = {
};
module_i2c_driver(bq2415x_driver);
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_DESCRIPTION("bq2415x charger driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c
index 664e50103eaa..942c92127b6d 100644
--- a/drivers/power/supply/bq27xxx_battery.c
+++ b/drivers/power/supply/bq27xxx_battery.c
@@ -4,7 +4,7 @@
* Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it>
* Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it>
* Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de>
- * Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2011 Pali Rohár <pali@kernel.org>
* Copyright (C) 2017 Liam Breck <kernel@networkimprov.net>
*
* Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc.
diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c
index 30c3d37511c9..2a45e84447fe 100644
--- a/drivers/power/supply/cros_usbpd-charger.c
+++ b/drivers/power/supply/cros_usbpd-charger.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/platform_data/cros_ec_commands.h>
#include <linux/platform_data/cros_ec_proto.h>
+#include <linux/platform_data/cros_usbpd_notify.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/slab.h>
@@ -517,32 +518,21 @@ static int cros_usbpd_charger_property_is_writeable(struct power_supply *psy,
}
static int cros_usbpd_charger_ec_event(struct notifier_block *nb,
- unsigned long queued_during_suspend,
+ unsigned long host_event,
void *_notify)
{
- struct cros_ec_device *ec_device;
- struct charger_data *charger;
- u32 host_event;
+ struct charger_data *charger = container_of(nb, struct charger_data,
+ notifier);
- charger = container_of(nb, struct charger_data, notifier);
- ec_device = charger->ec_device;
-
- host_event = cros_ec_get_host_event(ec_device);
- if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU)) {
- cros_usbpd_charger_power_changed(charger->ports[0]->psy);
- return NOTIFY_OK;
- } else {
- return NOTIFY_DONE;
- }
+ cros_usbpd_charger_power_changed(charger->ports[0]->psy);
+ return NOTIFY_OK;
}
static void cros_usbpd_charger_unregister_notifier(void *data)
{
struct charger_data *charger = data;
- struct cros_ec_device *ec_device = charger->ec_device;
- blocking_notifier_chain_unregister(&ec_device->event_notifier,
- &charger->notifier);
+ cros_usbpd_unregister_notify(&charger->notifier);
}
static int cros_usbpd_charger_probe(struct platform_device *pd)
@@ -676,21 +666,17 @@ static int cros_usbpd_charger_probe(struct platform_device *pd)
goto fail;
}
- if (ec_device->mkbp_event_supported) {
- /* Get PD events from the EC */
- charger->notifier.notifier_call = cros_usbpd_charger_ec_event;
- ret = blocking_notifier_chain_register(
- &ec_device->event_notifier,
- &charger->notifier);
- if (ret < 0) {
- dev_warn(dev, "failed to register notifier\n");
- } else {
- ret = devm_add_action_or_reset(dev,
- cros_usbpd_charger_unregister_notifier,
- charger);
- if (ret < 0)
- goto fail;
- }
+ /* Get PD events from the EC */
+ charger->notifier.notifier_call = cros_usbpd_charger_ec_event;
+ ret = cros_usbpd_register_notify(&charger->notifier);
+ if (ret < 0) {
+ dev_warn(dev, "failed to register notifier\n");
+ } else {
+ ret = devm_add_action_or_reset(dev,
+ cros_usbpd_charger_unregister_notifier,
+ charger);
+ if (ret < 0)
+ goto fail;
}
return 0;
diff --git a/drivers/power/supply/isp1704_charger.c b/drivers/power/supply/isp1704_charger.c
index 4812ac1ff2df..b6efc454e4f0 100644
--- a/drivers/power/supply/isp1704_charger.c
+++ b/drivers/power/supply/isp1704_charger.c
@@ -3,7 +3,7 @@
* ISP1704 USB Charger Detection driver
*
* Copyright (C) 2010 Nokia Corporation
- * Copyright (C) 2012 - 2013 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2012 - 2013 Pali Rohár <pali@kernel.org>
*/
#include <linux/kernel.h>
diff --git a/drivers/power/supply/rx51_battery.c b/drivers/power/supply/rx51_battery.c
index 8548b639ff2f..6e488ecf4dcb 100644
--- a/drivers/power/supply/rx51_battery.c
+++ b/drivers/power/supply/rx51_battery.c
@@ -2,7 +2,7 @@
/*
* Nokia RX-51 battery driver
*
- * Copyright (C) 2012 Pali Rohár <pali.rohar@gmail.com>
+ * Copyright (C) 2012 Pali Rohár <pali@kernel.org>
*/
#include <linux/module.h>
@@ -278,6 +278,6 @@ static struct platform_driver rx51_battery_driver = {
module_platform_driver(rx51_battery_driver);
MODULE_ALIAS("platform:rx51-battery");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
+MODULE_AUTHOR("Pali Rohár <pali@kernel.org>");
MODULE_DESCRIPTION("Nokia RX-51 battery driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/ps3/sys-manager-core.c b/drivers/ps3/sys-manager-core.c
index 24709c572c0c..e061b7d0632b 100644
--- a/drivers/ps3/sys-manager-core.c
+++ b/drivers/ps3/sys-manager-core.c
@@ -31,7 +31,7 @@ void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops)
{
BUG_ON(!ops);
BUG_ON(!ops->dev);
- ps3_sys_manager_ops = ops ? *ops : ps3_sys_manager_ops;
+ ps3_sys_manager_ops = *ops;
}
EXPORT_SYMBOL_GPL(ps3_sys_manager_register_ops);
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 30190beeb6e9..eebbc917ac97 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -33,6 +33,15 @@ config PWM_SYSFS
bool
default y if SYSFS
+config PWM_DEBUG
+ bool "PWM lowlevel drivers additional checks and debug messages"
+ depends on DEBUG_KERNEL
+ help
+ This option enables some additional checks to help lowlevel driver
+ authors to get their callbacks implemented correctly.
+ It is expected to introduce some runtime overhead and diagnostic
+ output to the kernel log, so only enable while working on a driver.
+
config PWM_AB8500
tristate "AB8500 PWM support"
depends on AB8500_CORE && ARCH_U8500
@@ -44,7 +53,8 @@ config PWM_AB8500
config PWM_ATMEL
tristate "Atmel PWM support"
- depends on ARCH_AT91 && OF
+ depends on OF
+ depends on ARCH_AT91 || COMPILE_TEST
help
Generic PWM framework driver for Atmel SoC.
@@ -100,7 +110,7 @@ config PWM_BCM_KONA
config PWM_BCM2835
tristate "BCM2835 PWM support"
- depends on ARCH_BCM2835 || ARCH_BRCMSTB
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
help
PWM framework driver for BCM2835 controller (Raspberry Pi)
@@ -109,7 +119,7 @@ config PWM_BCM2835
config PWM_BERLIN
tristate "Marvell Berlin PWM support"
- depends on ARCH_BERLIN
+ depends on ARCH_BERLIN || COMPILE_TEST
help
PWM framework driver for Marvell Berlin SoCs.
@@ -118,7 +128,7 @@ config PWM_BERLIN
config PWM_BRCMSTB
tristate "Broadcom STB PWM support"
- depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
help
Generic PWM framework driver for the Broadcom Set-top-Box
SoCs (BCM7xxx).
@@ -152,7 +162,7 @@ config PWM_CROS_EC
config PWM_EP93XX
tristate "Cirrus Logic EP93xx PWM support"
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
help
Generic PWM framework driver for Cirrus Logic EP93xx.
@@ -195,7 +205,7 @@ config PWM_IMG
config PWM_IMX1
tristate "i.MX1 PWM support"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
help
Generic PWM framework driver for i.MX1 and i.MX21
@@ -204,7 +214,7 @@ config PWM_IMX1
config PWM_IMX27
tristate "i.MX27 PWM support"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
help
Generic PWM framework driver for i.MX27 and later i.MX SoCs.
@@ -225,6 +235,8 @@ config PWM_IMX_TPM
config PWM_JZ4740
tristate "Ingenic JZ47xx PWM support"
depends on MACH_INGENIC
+ depends on COMMON_CLK
+ select MFD_SYSCON
help
Generic PWM framework driver for Ingenic JZ47xx based
machines.
@@ -244,7 +256,7 @@ config PWM_LP3943
config PWM_LPC18XX_SCT
tristate "LPC18xx/43xx PWM/SCT support"
- depends on ARCH_LPC18XX
+ depends on ARCH_LPC18XX || COMPILE_TEST
help
Generic PWM framework driver for NXP LPC18xx PWM/SCT which
supports 16 channels.
@@ -256,7 +268,7 @@ config PWM_LPC18XX_SCT
config PWM_LPC32XX
tristate "LPC32XX PWM support"
- depends on ARCH_LPC32XX
+ depends on ARCH_LPC32XX || COMPILE_TEST
help
Generic PWM framework driver for LPC32XX. The LPC32XX SOC has two
PWM controllers.
@@ -289,7 +301,8 @@ config PWM_LPSS_PLATFORM
config PWM_MESON
tristate "Amlogic Meson PWM driver"
- depends on ARCH_MESON
+ depends on ARCH_MESON || COMPILE_TEST
+ depends on COMMON_CLK
help
The platform driver for Amlogic Meson PWM controller.
@@ -318,7 +331,8 @@ config PWM_MEDIATEK
config PWM_MXS
tristate "Freescale MXS PWM support"
- depends on ARCH_MXS && OF
+ depends on OF
+ depends on ARCH_MXS || COMPILE_TEST
select STMP_DEVICE
help
Generic PWM framework driver for Freescale MXS.
@@ -357,7 +371,7 @@ config PWM_PUV3
config PWM_PXA
tristate "PXA PWM support"
- depends on ARCH_PXA
+ depends on ARCH_PXA || COMPILE_TEST
help
Generic PWM framework driver for PXA.
@@ -388,14 +402,14 @@ config PWM_RENESAS_TPU
config PWM_ROCKCHIP
tristate "Rockchip PWM support"
- depends on ARCH_ROCKCHIP
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
help
Generic PWM framework driver for the PWM controller found on
Rockchip SoCs.
config PWM_SAMSUNG
tristate "Samsung PWM support"
- depends on PLAT_SAMSUNG || ARCH_EXYNOS
+ depends on PLAT_SAMSUNG || ARCH_EXYNOS || COMPILE_TEST
help
Generic PWM framework driver for Samsung.
@@ -415,7 +429,7 @@ config PWM_SIFIVE
config PWM_SPEAR
tristate "STMicroelectronics SPEAr PWM support"
- depends on PLAT_SPEAR
+ depends on PLAT_SPEAR || COMPILE_TEST
depends on OF
help
Generic PWM framework driver for the PWM controller on ST
@@ -437,7 +451,7 @@ config PWM_SPRD
config PWM_STI
tristate "STiH4xx PWM support"
- depends on ARCH_STI
+ depends on ARCH_STI || COMPILE_TEST
depends on OF
help
Generic PWM framework driver for STiH4xx SoCs.
@@ -447,7 +461,7 @@ config PWM_STI
config PWM_STM32
tristate "STMicroelectronics STM32 PWM"
- depends on MFD_STM32_TIMERS
+ depends on MFD_STM32_TIMERS || COMPILE_TEST
help
Generic PWM framework driver for STM32 SoCs.
@@ -483,7 +497,7 @@ config PWM_SUN4I
config PWM_TEGRA
tristate "NVIDIA Tegra PWM support"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
help
Generic PWM framework driver for the PWFM controller found on NVIDIA
Tegra SoCs.
@@ -493,7 +507,7 @@ config PWM_TEGRA
config PWM_TIECAP
tristate "ECAP PWM support"
- depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3
+ depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
help
PWM driver support for the ECAP APWM controller found on TI SOCs
@@ -502,7 +516,7 @@ config PWM_TIECAP
config PWM_TIEHRPWM
tristate "EHRPWM PWM support"
- depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_K3
+ depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_K3 || COMPILE_TEST
help
PWM driver support for the EHRPWM controller found on TI SOCs
@@ -529,7 +543,7 @@ config PWM_TWL_LED
config PWM_VT8500
tristate "vt8500 PWM support"
- depends on ARCH_VT8500
+ depends on ARCH_VT8500 || COMPILE_TEST
help
Generic PWM framework driver for vt8500.
@@ -538,7 +552,7 @@ config PWM_VT8500
config PWM_ZX
tristate "ZTE ZX PWM support"
- depends on ARCH_ZX
+ depends on ARCH_ZX || COMPILE_TEST
help
Generic PWM framework driver for ZTE ZX family SoCs.
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 5a7f6598c05f..9973c442b455 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -120,6 +120,9 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label)
if (pwm->chip->ops->get_state) {
pwm->chip->ops->get_state(pwm->chip, pwm, &pwm->state);
trace_pwm_get(pwm, &pwm->state);
+
+ if (IS_ENABLED(PWM_DEBUG))
+ pwm->last = pwm->state;
}
set_bit(PWMF_REQUESTED, &pwm->flags);
@@ -232,17 +235,28 @@ void *pwm_get_chip_data(struct pwm_device *pwm)
}
EXPORT_SYMBOL_GPL(pwm_get_chip_data);
-static bool pwm_ops_check(const struct pwm_ops *ops)
+static bool pwm_ops_check(const struct pwm_chip *chip)
{
+
+ const struct pwm_ops *ops = chip->ops;
+
/* driver supports legacy, non-atomic operation */
- if (ops->config && ops->enable && ops->disable)
- return true;
+ if (ops->config && ops->enable && ops->disable) {
+ if (IS_ENABLED(CONFIG_PWM_DEBUG))
+ dev_warn(chip->dev,
+ "Driver needs updating to atomic API\n");
- /* driver supports atomic operation */
- if (ops->apply)
return true;
+ }
- return false;
+ if (!ops->apply)
+ return false;
+
+ if (IS_ENABLED(CONFIG_PWM_DEBUG) && !ops->get_state)
+ dev_warn(chip->dev,
+ "Please implement the .get_state() callback\n");
+
+ return true;
}
/**
@@ -266,7 +280,7 @@ int pwmchip_add_with_polarity(struct pwm_chip *chip,
if (!chip || !chip->dev || !chip->ops || !chip->npwm)
return -EINVAL;
- if (!pwm_ops_check(chip->ops))
+ if (!pwm_ops_check(chip))
return -EINVAL;
mutex_lock(&pwm_lock);
@@ -450,6 +464,107 @@ void pwm_free(struct pwm_device *pwm)
}
EXPORT_SYMBOL_GPL(pwm_free);
+static void pwm_apply_state_debug(struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct pwm_state *last = &pwm->last;
+ struct pwm_chip *chip = pwm->chip;
+ struct pwm_state s1, s2;
+ int err;
+
+ if (!IS_ENABLED(CONFIG_PWM_DEBUG))
+ return;
+
+ /* No reasonable diagnosis possible without .get_state() */
+ if (!chip->ops->get_state)
+ return;
+
+ /*
+ * *state was just applied. Read out the hardware state and do some
+ * checks.
+ */
+
+ chip->ops->get_state(chip, pwm, &s1);
+ trace_pwm_get(pwm, &s1);
+
+ /*
+ * The lowlevel driver either ignored .polarity (which is a bug) or as
+ * best effort inverted .polarity and fixed .duty_cycle respectively.
+ * Undo this inversion and fixup for further tests.
+ */
+ if (s1.enabled && s1.polarity != state->polarity) {
+ s2.polarity = state->polarity;
+ s2.duty_cycle = s1.period - s1.duty_cycle;
+ s2.period = s1.period;
+ s2.enabled = s1.enabled;
+ } else {
+ s2 = s1;
+ }
+
+ if (s2.polarity != state->polarity &&
+ state->duty_cycle < state->period)
+ dev_warn(chip->dev, ".apply ignored .polarity\n");
+
+ if (state->enabled &&
+ last->polarity == state->polarity &&
+ last->period > s2.period &&
+ last->period <= state->period)
+ dev_warn(chip->dev,
+ ".apply didn't pick the best available period (requested: %u, applied: %u, possible: %u)\n",
+ state->period, s2.period, last->period);
+
+ if (state->enabled && state->period < s2.period)
+ dev_warn(chip->dev,
+ ".apply is supposed to round down period (requested: %u, applied: %u)\n",
+ state->period, s2.period);
+
+ if (state->enabled &&
+ last->polarity == state->polarity &&
+ last->period == s2.period &&
+ last->duty_cycle > s2.duty_cycle &&
+ last->duty_cycle <= state->duty_cycle)
+ dev_warn(chip->dev,
+ ".apply didn't pick the best available duty cycle (requested: %u/%u, applied: %u/%u, possible: %u/%u)\n",
+ state->duty_cycle, state->period,
+ s2.duty_cycle, s2.period,
+ last->duty_cycle, last->period);
+
+ if (state->enabled && state->duty_cycle < s2.duty_cycle)
+ dev_warn(chip->dev,
+ ".apply is supposed to round down duty_cycle (requested: %u/%u, applied: %u/%u)\n",
+ state->duty_cycle, state->period,
+ s2.duty_cycle, s2.period);
+
+ if (!state->enabled && s2.enabled && s2.duty_cycle > 0)
+ dev_warn(chip->dev,
+ "requested disabled, but yielded enabled with duty > 0");
+
+ /* reapply the state that the driver reported being configured. */
+ err = chip->ops->apply(chip, pwm, &s1);
+ if (err) {
+ *last = s1;
+ dev_err(chip->dev, "failed to reapply current setting\n");
+ return;
+ }
+
+ trace_pwm_apply(pwm, &s1);
+
+ chip->ops->get_state(chip, pwm, last);
+ trace_pwm_get(pwm, last);
+
+ /* reapplication of the current state should give an exact match */
+ if (s1.enabled != last->enabled ||
+ s1.polarity != last->polarity ||
+ (s1.enabled && s1.period != last->period) ||
+ (s1.enabled && s1.duty_cycle != last->duty_cycle)) {
+ dev_err(chip->dev,
+ ".apply is not idempotent (ena=%d pol=%d %u/%u) -> (ena=%d pol=%d %u/%u)\n",
+ s1.enabled, s1.polarity, s1.duty_cycle, s1.period,
+ last->enabled, last->polarity, last->duty_cycle,
+ last->period);
+ }
+}
+
/**
* pwm_apply_state() - atomically apply a new state to a PWM device
* @pwm: PWM device
@@ -480,6 +595,12 @@ int pwm_apply_state(struct pwm_device *pwm, const struct pwm_state *state)
trace_pwm_apply(pwm, state);
pwm->state = *state;
+
+ /*
+ * only do this after pwm->state was applied as some
+ * implementations of .get_state depend on this
+ */
+ pwm_apply_state_debug(pwm, state);
} else {
/*
* FIXME: restore the initial state in case of error.
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index 91e24f01b54e..d78f86f8e462 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -166,6 +166,7 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
pc->chip.dev = &pdev->dev;
pc->chip.ops = &bcm2835_pwm_ops;
+ pc->chip.base = -1;
pc->chip.npwm = 2;
pc->chip.of_xlate = of_pwm_xlate_with_flags;
pc->chip.of_pwm_n_cells = 3;
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index 9145f6160649..5f3d7f7e6aef 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -18,10 +18,8 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index 35a7ac42269c..a6e40d4c485f 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -18,7 +18,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
@@ -96,9 +95,8 @@ struct pwm_imx27_chip {
#define to_pwm_imx27_chip(chip) container_of(chip, struct pwm_imx27_chip, chip)
-static int pwm_imx27_clk_prepare_enable(struct pwm_chip *chip)
+static int pwm_imx27_clk_prepare_enable(struct pwm_imx27_chip *imx)
{
- struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
int ret;
ret = clk_prepare_enable(imx->clk_ipg);
@@ -114,10 +112,8 @@ static int pwm_imx27_clk_prepare_enable(struct pwm_chip *chip)
return 0;
}
-static void pwm_imx27_clk_disable_unprepare(struct pwm_chip *chip)
+static void pwm_imx27_clk_disable_unprepare(struct pwm_imx27_chip *imx)
{
- struct pwm_imx27_chip *imx = to_pwm_imx27_chip(chip);
-
clk_disable_unprepare(imx->clk_per);
clk_disable_unprepare(imx->clk_ipg);
}
@@ -130,7 +126,7 @@ static void pwm_imx27_get_state(struct pwm_chip *chip,
u64 tmp;
int ret;
- ret = pwm_imx27_clk_prepare_enable(chip);
+ ret = pwm_imx27_clk_prepare_enable(imx);
if (ret < 0)
return;
@@ -174,8 +170,7 @@ static void pwm_imx27_get_state(struct pwm_chip *chip,
tmp = NSEC_PER_SEC * (u64)(val);
state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, pwm_clk);
- if (!state->enabled)
- pwm_imx27_clk_disable_unprepare(chip);
+ pwm_imx27_clk_disable_unprepare(imx);
}
static void pwm_imx27_sw_reset(struct pwm_chip *chip)
@@ -259,7 +254,7 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (cstate.enabled) {
pwm_imx27_wait_fifo_slot(chip, pwm);
} else {
- ret = pwm_imx27_clk_prepare_enable(chip);
+ ret = pwm_imx27_clk_prepare_enable(imx);
if (ret)
return ret;
@@ -289,8 +284,8 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
writel(cr, imx->mmio_base + MX3_PWMCR);
- if (!state->enabled && cstate.enabled)
- pwm_imx27_clk_disable_unprepare(chip);
+ if (!state->enabled)
+ pwm_imx27_clk_disable_unprepare(imx);
return 0;
}
@@ -310,6 +305,8 @@ MODULE_DEVICE_TABLE(of, pwm_imx27_dt_ids);
static int pwm_imx27_probe(struct platform_device *pdev)
{
struct pwm_imx27_chip *imx;
+ int ret;
+ u32 pwmcr;
imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
if (imx == NULL)
@@ -352,6 +349,15 @@ static int pwm_imx27_probe(struct platform_device *pdev)
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
+ ret = pwm_imx27_clk_prepare_enable(imx);
+ if (ret)
+ return ret;
+
+ /* keep clks on if pwm is running */
+ pwmcr = readl(imx->mmio_base + MX3_PWMCR);
+ if (!(pwmcr & MX3_PWMCR_EN))
+ pwm_imx27_clk_disable_unprepare(imx);
+
return pwmchip_add(&imx->chip);
}
@@ -361,8 +367,6 @@ static int pwm_imx27_remove(struct platform_device *pdev)
imx = platform_get_drvdata(pdev);
- pwm_imx27_clk_disable_unprepare(&imx->chip);
-
return pwmchip_remove(&imx->chip);
}
diff --git a/drivers/pwm/pwm-jz4740.c b/drivers/pwm/pwm-jz4740.c
index 9d78cc21cb12..3cd5c054ad9a 100644
--- a/drivers/pwm/pwm-jz4740.c
+++ b/drivers/pwm/pwm-jz4740.c
@@ -13,18 +13,19 @@
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/kernel.h>
+#include <linux/mfd/ingenic-tcu.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
-
-#include <asm/mach-jz4740/timer.h>
+#include <linux/regmap.h>
#define NUM_PWM 8
struct jz4740_pwm_chip {
struct pwm_chip chip;
- struct clk *clk;
+ struct regmap *map;
};
static inline struct jz4740_pwm_chip *to_jz4740(struct pwm_chip *chip)
@@ -32,82 +33,134 @@ static inline struct jz4740_pwm_chip *to_jz4740(struct pwm_chip *chip)
return container_of(chip, struct jz4740_pwm_chip, chip);
}
+static bool jz4740_pwm_can_use_chn(struct jz4740_pwm_chip *jz,
+ unsigned int channel)
+{
+ /* Enable all TCU channels for PWM use by default except channels 0/1 */
+ u32 pwm_channels_mask = GENMASK(NUM_PWM - 1, 2);
+
+ device_property_read_u32(jz->chip.dev->parent,
+ "ingenic,pwm-channels-mask",
+ &pwm_channels_mask);
+
+ return !!(pwm_channels_mask & BIT(channel));
+}
+
static int jz4740_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
- /*
- * Timers 0 and 1 are used for system tasks, so they are unavailable
- * for use as PWMs.
- */
- if (pwm->hwpwm < 2)
+ struct jz4740_pwm_chip *jz = to_jz4740(chip);
+ struct clk *clk;
+ char name[16];
+ int err;
+
+ if (!jz4740_pwm_can_use_chn(jz, pwm->hwpwm))
return -EBUSY;
- jz4740_timer_start(pwm->hwpwm);
+ snprintf(name, sizeof(name), "timer%u", pwm->hwpwm);
+
+ clk = clk_get(chip->dev, name);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(chip->dev, "Failed to get clock: %pe", clk);
+
+ return PTR_ERR(clk);
+ }
+
+ err = clk_prepare_enable(clk);
+ if (err < 0) {
+ clk_put(clk);
+ return err;
+ }
+
+ pwm_set_chip_data(pwm, clk);
return 0;
}
static void jz4740_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
- jz4740_timer_set_ctrl(pwm->hwpwm, 0);
+ struct clk *clk = pwm_get_chip_data(pwm);
- jz4740_timer_stop(pwm->hwpwm);
+ clk_disable_unprepare(clk);
+ clk_put(clk);
}
static int jz4740_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- uint32_t ctrl = jz4740_timer_get_ctrl(pwm->pwm);
+ struct jz4740_pwm_chip *jz = to_jz4740(chip);
+
+ /* Enable PWM output */
+ regmap_update_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
+ TCU_TCSR_PWM_EN, TCU_TCSR_PWM_EN);
- ctrl |= JZ_TIMER_CTRL_PWM_ENABLE;
- jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
- jz4740_timer_enable(pwm->hwpwm);
+ /* Start counter */
+ regmap_write(jz->map, TCU_REG_TESR, BIT(pwm->hwpwm));
return 0;
}
static void jz4740_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
- uint32_t ctrl = jz4740_timer_get_ctrl(pwm->hwpwm);
+ struct jz4740_pwm_chip *jz = to_jz4740(chip);
/*
* Set duty > period. This trick allows the TCU channels in TCU2 mode to
* properly return to their init level.
*/
- jz4740_timer_set_duty(pwm->hwpwm, 0xffff);
- jz4740_timer_set_period(pwm->hwpwm, 0x0);
+ regmap_write(jz->map, TCU_REG_TDHRc(pwm->hwpwm), 0xffff);
+ regmap_write(jz->map, TCU_REG_TDFRc(pwm->hwpwm), 0x0);
/*
* Disable PWM output.
* In TCU2 mode (channel 1/2 on JZ4750+), this must be done before the
* counter is stopped, while in TCU1 mode the order does not matter.
*/
- ctrl &= ~JZ_TIMER_CTRL_PWM_ENABLE;
- jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
+ regmap_update_bits(jz->map, TCU_REG_TCSRc(pwm->hwpwm),
+ TCU_TCSR_PWM_EN, 0);
/* Stop counter */
- jz4740_timer_disable(pwm->hwpwm);
+ regmap_write(jz->map, TCU_REG_TECR, BIT(pwm->hwpwm));
}
static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
const struct pwm_state *state)
{
struct jz4740_pwm_chip *jz4740 = to_jz4740(pwm->chip);
- unsigned long long tmp;
+ unsigned long long tmp = 0xffffull * NSEC_PER_SEC;
+ struct clk *clk = pwm_get_chip_data(pwm);
unsigned long period, duty;
- unsigned int prescaler = 0;
- uint16_t ctrl;
+ long rate;
+ int err;
- tmp = (unsigned long long)clk_get_rate(jz4740->clk) * state->period;
- do_div(tmp, 1000000000);
- period = tmp;
+ /*
+ * Limit the clock to a maximum rate that still gives us a period value
+ * which fits in 16 bits.
+ */
+ do_div(tmp, state->period);
- while (period > 0xffff && prescaler < 6) {
- period >>= 2;
- ++prescaler;
+ /*
+ * /!\ IMPORTANT NOTE:
+ * -------------------
+ * This code relies on the fact that clk_round_rate() will always round
+ * down, which is not a valid assumption given by the clk API, but only
+ * happens to be true with the clk drivers used for Ingenic SoCs.
+ *
+ * Right now, there is no alternative as the clk API does not have a
+ * round-down function (and won't have one for a while), but if it ever
+ * comes to light, a round-down function should be used instead.
+ */
+ rate = clk_round_rate(clk, tmp);
+ if (rate < 0) {
+ dev_err(chip->dev, "Unable to round rate: %ld", rate);
+ return rate;
}
- if (prescaler == 6)
- return -EINVAL;
+ /* Calculate period value */
+ tmp = (unsigned long long)rate * state->period;
+ do_div(tmp, NSEC_PER_SEC);
+ period = (unsigned long)tmp;
+ /* Calculate duty value */
tmp = (unsigned long long)period * state->duty_cycle;
do_div(tmp, state->period);
duty = period - tmp;
@@ -117,26 +170,38 @@ static int jz4740_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
jz4740_pwm_disable(chip, pwm);
- jz4740_timer_set_count(pwm->hwpwm, 0);
- jz4740_timer_set_duty(pwm->hwpwm, duty);
- jz4740_timer_set_period(pwm->hwpwm, period);
+ err = clk_set_rate(clk, rate);
+ if (err) {
+ dev_err(chip->dev, "Unable to set rate: %d", err);
+ return err;
+ }
+
+ /* Reset counter to 0 */
+ regmap_write(jz4740->map, TCU_REG_TCNTc(pwm->hwpwm), 0);
- ctrl = JZ_TIMER_CTRL_PRESCALER(prescaler) | JZ_TIMER_CTRL_SRC_EXT |
- JZ_TIMER_CTRL_PWM_ABBRUPT_SHUTDOWN;
+ /* Set duty */
+ regmap_write(jz4740->map, TCU_REG_TDHRc(pwm->hwpwm), duty);
- jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
+ /* Set period */
+ regmap_write(jz4740->map, TCU_REG_TDFRc(pwm->hwpwm), period);
+ /* Set abrupt shutdown */
+ regmap_update_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm),
+ TCU_TCSR_PWM_SD, TCU_TCSR_PWM_SD);
+
+ /* Set polarity */
switch (state->polarity) {
case PWM_POLARITY_NORMAL:
- ctrl &= ~JZ_TIMER_CTRL_PWM_ACTIVE_LOW;
+ regmap_update_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm),
+ TCU_TCSR_PWM_INITL_HIGH, 0);
break;
case PWM_POLARITY_INVERSED:
- ctrl |= JZ_TIMER_CTRL_PWM_ACTIVE_LOW;
+ regmap_update_bits(jz4740->map, TCU_REG_TCSRc(pwm->hwpwm),
+ TCU_TCSR_PWM_INITL_HIGH,
+ TCU_TCSR_PWM_INITL_HIGH);
break;
}
- jz4740_timer_set_ctrl(pwm->hwpwm, ctrl);
-
if (state->enabled)
jz4740_pwm_enable(chip, pwm);
@@ -152,17 +217,20 @@ static const struct pwm_ops jz4740_pwm_ops = {
static int jz4740_pwm_probe(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct jz4740_pwm_chip *jz4740;
- jz4740 = devm_kzalloc(&pdev->dev, sizeof(*jz4740), GFP_KERNEL);
+ jz4740 = devm_kzalloc(dev, sizeof(*jz4740), GFP_KERNEL);
if (!jz4740)
return -ENOMEM;
- jz4740->clk = devm_clk_get(&pdev->dev, "ext");
- if (IS_ERR(jz4740->clk))
- return PTR_ERR(jz4740->clk);
+ jz4740->map = device_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(jz4740->map)) {
+ dev_err(dev, "regmap not found: %ld\n", PTR_ERR(jz4740->map));
+ return PTR_ERR(jz4740->map);
+ }
- jz4740->chip.dev = &pdev->dev;
+ jz4740->chip.dev = dev;
jz4740->chip.ops = &jz4740_pwm_ops;
jz4740->chip.npwm = NUM_PWM;
jz4740->chip.base = -1;
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index 6245bbdb6e6c..bd0d7336b898 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -136,7 +136,7 @@ static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
dev_err(dev, "failed to set parent %s for %s: %d\n",
__clk_get_name(channel->clk_parent),
__clk_get_name(channel->clk), err);
- return err;
+ return err;
}
}
@@ -163,7 +163,7 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm,
{
struct meson_pwm_channel *channel = pwm_get_chip_data(pwm);
unsigned int duty, period, pre_div, cnt, duty_cnt;
- unsigned long fin_freq = -1;
+ unsigned long fin_freq;
duty = state->duty_cycle;
period = state->period;
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index f2e57fcf8f8b..7ce616923c52 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/slab.h>
diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
index 9e4378dc6897..0d31833db2e2 100644
--- a/drivers/pwm/pwm-omap-dmtimer.c
+++ b/drivers/pwm/pwm-omap-dmtimer.c
@@ -10,7 +10,27 @@
*
* Description:
* This file is the core OMAP support for the generic, Linux
- * PWM driver / controller, using the OMAP's dual-mode timers.
+ * PWM driver / controller, using the OMAP's dual-mode timers
+ * with a timer counter that goes up. When it overflows it gets
+ * reloaded with the load value and the pwm output goes up.
+ * When counter matches with match register, the output goes down.
+ * Reference Manual: http://www.ti.com/lit/ug/spruh73q/spruh73q.pdf
+ *
+ * Limitations:
+ * - When PWM is stopped, timer counter gets stopped immediately. This
+ * doesn't allow the current PWM period to complete and stops abruptly.
+ * - When PWM is running and changing both duty cycle and period,
+ * we cannot prevent in software that the output might produce
+ * a period with mixed settings. Especially when period/duty_cyle
+ * is updated while the pwm pin is high, current pwm period/duty_cycle
+ * can get updated as below based on the current timer counter:
+ * - period for current cycle = current_period + new period
+ * - duty_cycle for current period = current period + new duty_cycle.
+ * - PWM OMAP DM timer cannot change the polarity when pwm is active. When
+ * user requests a change in polarity when in active state:
+ * - PWM is stopped abruptly(without completing the current cycle)
+ * - Polarity is changed
+ * - A fresh cycle is started.
*/
#include <linux/clk.h>
@@ -20,8 +40,8 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <clocksource/timer-ti-dm.h>
#include <linux/platform_data/dmtimer-omap.h>
-#include <linux/platform_data/pwm_omap_dmtimer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
@@ -31,10 +51,20 @@
#define DM_TIMER_LOAD_MIN 0xfffffffe
#define DM_TIMER_MAX 0xffffffff
+/**
+ * struct pwm_omap_dmtimer_chip - Structure representing a pwm chip
+ * corresponding to omap dmtimer.
+ * @chip: PWM chip structure representing PWM controller
+ * @mutex: Mutex to protect pwm apply state
+ * @dm_timer: Pointer to omap dm timer.
+ * @pdata: Pointer to omap dm timer ops.
+ * dm_timer_pdev: Pointer to omap dm timer platform device
+ */
struct pwm_omap_dmtimer_chip {
struct pwm_chip chip;
+ /* Mutex to protect pwm apply state */
struct mutex mutex;
- pwm_omap_dmtimer *dm_timer;
+ struct omap_dm_timer *dm_timer;
const struct omap_dm_timer_ops *pdata;
struct platform_device *dm_timer_pdev;
};
@@ -45,11 +75,22 @@ to_pwm_omap_dmtimer_chip(struct pwm_chip *chip)
return container_of(chip, struct pwm_omap_dmtimer_chip, chip);
}
+/**
+ * pwm_omap_dmtimer_get_clock_cycles() - Get clock cycles in a time frame
+ * @clk_rate: pwm timer clock rate
+ * @ns: time frame in nano seconds.
+ *
+ * Return number of clock cycles in a given period(ins ns).
+ */
static u32 pwm_omap_dmtimer_get_clock_cycles(unsigned long clk_rate, int ns)
{
return DIV_ROUND_CLOSEST_ULL((u64)clk_rate * ns, NSEC_PER_SEC);
}
+/**
+ * pwm_omap_dmtimer_start() - Start the pwm omap dm timer in pwm mode
+ * @omap: Pointer to pwm omap dm timer chip
+ */
static void pwm_omap_dmtimer_start(struct pwm_omap_dmtimer_chip *omap)
{
/*
@@ -67,28 +108,46 @@ static void pwm_omap_dmtimer_start(struct pwm_omap_dmtimer_chip *omap)
omap->pdata->start(omap->dm_timer);
}
-static int pwm_omap_dmtimer_enable(struct pwm_chip *chip,
- struct pwm_device *pwm)
+/**
+ * pwm_omap_dmtimer_is_enabled() - Detect if the pwm is enabled.
+ * @omap: Pointer to pwm omap dm timer chip
+ *
+ * Return true if pwm is enabled else false.
+ */
+static bool pwm_omap_dmtimer_is_enabled(struct pwm_omap_dmtimer_chip *omap)
{
- struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
+ u32 status;
- mutex_lock(&omap->mutex);
- pwm_omap_dmtimer_start(omap);
- mutex_unlock(&omap->mutex);
+ status = omap->pdata->get_pwm_status(omap->dm_timer);
- return 0;
+ return !!(status & OMAP_TIMER_CTRL_ST);
}
-static void pwm_omap_dmtimer_disable(struct pwm_chip *chip,
- struct pwm_device *pwm)
+/**
+ * pwm_omap_dmtimer_polarity() - Detect the polarity of pwm.
+ * @omap: Pointer to pwm omap dm timer chip
+ *
+ * Return the polarity of pwm.
+ */
+static int pwm_omap_dmtimer_polarity(struct pwm_omap_dmtimer_chip *omap)
{
- struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
+ u32 status;
- mutex_lock(&omap->mutex);
- omap->pdata->stop(omap->dm_timer);
- mutex_unlock(&omap->mutex);
+ status = omap->pdata->get_pwm_status(omap->dm_timer);
+
+ return !!(status & OMAP_TIMER_CTRL_SCPWM);
}
+/**
+ * pwm_omap_dmtimer_config() - Update the configuration of pwm omap dm timer
+ * @chip: Pointer to PWM controller
+ * @pwm: Pointer to PWM channel
+ * @duty_ns: New duty cycle in nano seconds
+ * @period_ns: New period in nano seconds
+ *
+ * Return 0 if successfully changed the period/duty_cycle else appropriate
+ * error.
+ */
static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
struct pwm_device *pwm,
int duty_ns, int period_ns)
@@ -96,31 +155,26 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
u32 period_cycles, duty_cycles;
u32 load_value, match_value;
- struct clk *fclk;
unsigned long clk_rate;
- bool timer_active;
+ struct clk *fclk;
dev_dbg(chip->dev, "requested duty cycle: %d ns, period: %d ns\n",
duty_ns, period_ns);
- mutex_lock(&omap->mutex);
if (duty_ns == pwm_get_duty_cycle(pwm) &&
- period_ns == pwm_get_period(pwm)) {
- /* No change - don't cause any transients. */
- mutex_unlock(&omap->mutex);
+ period_ns == pwm_get_period(pwm))
return 0;
- }
fclk = omap->pdata->get_fclk(omap->dm_timer);
if (!fclk) {
dev_err(chip->dev, "invalid pmtimer fclk\n");
- goto err_einval;
+ return -EINVAL;
}
clk_rate = clk_get_rate(fclk);
if (!clk_rate) {
dev_err(chip->dev, "invalid pmtimer fclk rate\n");
- goto err_einval;
+ return -EINVAL;
}
dev_dbg(chip->dev, "clk rate: %luHz\n", clk_rate);
@@ -148,7 +202,7 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
dev_info(chip->dev,
"period %d ns too short for clock rate %lu Hz\n",
period_ns, clk_rate);
- goto err_einval;
+ return -EINVAL;
}
if (duty_cycles < 1) {
@@ -174,81 +228,103 @@ static int pwm_omap_dmtimer_config(struct pwm_chip *chip,
load_value = (DM_TIMER_MAX - period_cycles) + 1;
match_value = load_value + duty_cycles - 1;
- /*
- * We MUST stop the associated dual-mode timer before attempting to
- * write its registers, but calls to omap_dm_timer_start/stop must
- * be balanced so check if timer is active before calling timer_stop.
- */
- timer_active = pm_runtime_active(&omap->dm_timer_pdev->dev);
- if (timer_active)
- omap->pdata->stop(omap->dm_timer);
-
omap->pdata->set_load(omap->dm_timer, load_value);
omap->pdata->set_match(omap->dm_timer, true, match_value);
dev_dbg(chip->dev, "load value: %#08x (%d), match value: %#08x (%d)\n",
load_value, load_value, match_value, match_value);
- omap->pdata->set_pwm(omap->dm_timer,
- pwm_get_polarity(pwm) == PWM_POLARITY_INVERSED,
- true,
- PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE,
- true);
-
- /* If config was called while timer was running it must be reenabled. */
- if (timer_active)
- pwm_omap_dmtimer_start(omap);
+ return 0;
+}
- mutex_unlock(&omap->mutex);
+/**
+ * pwm_omap_dmtimer_set_polarity() - Changes the polarity of the pwm dm timer.
+ * @chip: Pointer to PWM controller
+ * @pwm: Pointer to PWM channel
+ * @polarity: New pwm polarity to be set
+ */
+static void pwm_omap_dmtimer_set_polarity(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ enum pwm_polarity polarity)
+{
+ struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
+ bool enabled;
- return 0;
+ /* Disable the PWM before changing the polarity. */
+ enabled = pwm_omap_dmtimer_is_enabled(omap);
+ if (enabled)
+ omap->pdata->stop(omap->dm_timer);
-err_einval:
- mutex_unlock(&omap->mutex);
+ omap->pdata->set_pwm(omap->dm_timer,
+ polarity == PWM_POLARITY_INVERSED,
+ true, OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE,
+ true);
- return -EINVAL;
+ if (enabled)
+ pwm_omap_dmtimer_start(omap);
}
-static int pwm_omap_dmtimer_set_polarity(struct pwm_chip *chip,
- struct pwm_device *pwm,
- enum pwm_polarity polarity)
+/**
+ * pwm_omap_dmtimer_apply() - Changes the state of the pwm omap dm timer.
+ * @chip: Pointer to PWM controller
+ * @pwm: Pointer to PWM channel
+ * @state: New state to apply
+ *
+ * Return 0 if successfully changed the state else appropriate error.
+ */
+static int pwm_omap_dmtimer_apply(struct pwm_chip *chip,
+ struct pwm_device *pwm,
+ const struct pwm_state *state)
{
struct pwm_omap_dmtimer_chip *omap = to_pwm_omap_dmtimer_chip(chip);
+ int ret = 0;
- /*
- * PWM core will not call set_polarity while PWM is enabled so it's
- * safe to reconfigure the timer here without stopping it first.
- */
mutex_lock(&omap->mutex);
- omap->pdata->set_pwm(omap->dm_timer,
- polarity == PWM_POLARITY_INVERSED,
- true,
- PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE,
- true);
+
+ if (pwm_omap_dmtimer_is_enabled(omap) && !state->enabled) {
+ omap->pdata->stop(omap->dm_timer);
+ goto unlock_mutex;
+ }
+
+ if (pwm_omap_dmtimer_polarity(omap) != state->polarity)
+ pwm_omap_dmtimer_set_polarity(chip, pwm, state->polarity);
+
+ ret = pwm_omap_dmtimer_config(chip, pwm, state->duty_cycle,
+ state->period);
+ if (ret)
+ goto unlock_mutex;
+
+ if (!pwm_omap_dmtimer_is_enabled(omap) && state->enabled) {
+ omap->pdata->set_pwm(omap->dm_timer,
+ state->polarity == PWM_POLARITY_INVERSED,
+ true,
+ OMAP_TIMER_TRIGGER_OVERFLOW_AND_COMPARE,
+ true);
+ pwm_omap_dmtimer_start(omap);
+ }
+
+unlock_mutex:
mutex_unlock(&omap->mutex);
- return 0;
+ return ret;
}
static const struct pwm_ops pwm_omap_dmtimer_ops = {
- .enable = pwm_omap_dmtimer_enable,
- .disable = pwm_omap_dmtimer_disable,
- .config = pwm_omap_dmtimer_config,
- .set_polarity = pwm_omap_dmtimer_set_polarity,
+ .apply = pwm_omap_dmtimer_apply,
.owner = THIS_MODULE,
};
static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct device_node *timer;
- struct platform_device *timer_pdev;
- struct pwm_omap_dmtimer_chip *omap;
struct dmtimer_platform_data *timer_pdata;
const struct omap_dm_timer_ops *pdata;
- pwm_omap_dmtimer *dm_timer;
- u32 v;
+ struct platform_device *timer_pdev;
+ struct pwm_omap_dmtimer_chip *omap;
+ struct omap_dm_timer *dm_timer;
+ struct device_node *timer;
int ret = 0;
+ u32 v;
timer = of_parse_phandle(np, "ti,timers", 0);
if (!timer)
@@ -281,6 +357,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
!pdata->set_load ||
!pdata->set_match ||
!pdata->set_pwm ||
+ !pdata->get_pwm_status ||
!pdata->set_prescaler ||
!pdata->write_counter) {
dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n");
diff --git a/drivers/pwm/pwm-pca9685.c b/drivers/pwm/pwm-pca9685.c
index b07bdca3d510..76cd22bd6614 100644
--- a/drivers/pwm/pwm-pca9685.c
+++ b/drivers/pwm/pwm-pca9685.c
@@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/pm_runtime.h>
+#include <linux/bitmap.h>
/*
* Because the PCA9685 has only one prescaler per chip, changing the period of
@@ -69,11 +70,11 @@
struct pca9685 {
struct pwm_chip chip;
struct regmap *regmap;
- int duty_ns;
int period_ns;
#if IS_ENABLED(CONFIG_GPIOLIB)
struct mutex lock;
struct gpio_chip gpio;
+ DECLARE_BITMAP(pwms_inuse, PCA9685_MAXCHAN + 1);
#endif
};
@@ -83,51 +84,51 @@ static inline struct pca9685 *to_pca(struct pwm_chip *chip)
}
#if IS_ENABLED(CONFIG_GPIOLIB)
-static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
+static bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca, int pwm_idx)
{
- struct pca9685 *pca = gpiochip_get_data(gpio);
- struct pwm_device *pwm;
+ bool is_inuse;
mutex_lock(&pca->lock);
-
- pwm = &pca->chip.pwms[offset];
-
- if (pwm->flags & (PWMF_REQUESTED | PWMF_EXPORTED)) {
- mutex_unlock(&pca->lock);
- return -EBUSY;
+ if (pwm_idx >= PCA9685_MAXCHAN) {
+ /*
+ * "all LEDs" channel:
+ * pretend already in use if any of the PWMs are requested
+ */
+ if (!bitmap_empty(pca->pwms_inuse, PCA9685_MAXCHAN)) {
+ is_inuse = true;
+ goto out;
+ }
+ } else {
+ /*
+ * regular channel:
+ * pretend already in use if the "all LEDs" channel is requested
+ */
+ if (test_bit(PCA9685_MAXCHAN, pca->pwms_inuse)) {
+ is_inuse = true;
+ goto out;
+ }
}
-
- pwm_set_chip_data(pwm, (void *)1);
-
+ is_inuse = test_and_set_bit(pwm_idx, pca->pwms_inuse);
+out:
mutex_unlock(&pca->lock);
- pm_runtime_get_sync(pca->chip.dev);
- return 0;
+ return is_inuse;
}
-static bool pca9685_pwm_is_gpio(struct pca9685 *pca, struct pwm_device *pwm)
+static void pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
{
- bool is_gpio = false;
-
mutex_lock(&pca->lock);
+ clear_bit(pwm_idx, pca->pwms_inuse);
+ mutex_unlock(&pca->lock);
+}
- if (pwm->hwpwm >= PCA9685_MAXCHAN) {
- unsigned int i;
-
- /*
- * Check if any of the GPIOs are requested and in that case
- * prevent using the "all LEDs" channel.
- */
- for (i = 0; i < pca->gpio.ngpio; i++)
- if (gpiochip_is_requested(&pca->gpio, i)) {
- is_gpio = true;
- break;
- }
- } else if (pwm_get_chip_data(pwm)) {
- is_gpio = true;
- }
+static int pca9685_pwm_gpio_request(struct gpio_chip *gpio, unsigned int offset)
+{
+ struct pca9685 *pca = gpiochip_get_data(gpio);
- mutex_unlock(&pca->lock);
- return is_gpio;
+ if (pca9685_pwm_test_and_set_inuse(pca, offset))
+ return -EBUSY;
+ pm_runtime_get_sync(pca->chip.dev);
+ return 0;
}
static int pca9685_pwm_gpio_get(struct gpio_chip *gpio, unsigned int offset)
@@ -162,13 +163,14 @@ static void pca9685_pwm_gpio_free(struct gpio_chip *gpio, unsigned int offset)
pca9685_pwm_gpio_set(gpio, offset, 0);
pm_runtime_put(pca->chip.dev);
+ pca9685_pwm_clear_inuse(pca, offset);
}
static int pca9685_pwm_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
/* Always out */
- return 0;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int pca9685_pwm_gpio_direction_input(struct gpio_chip *gpio,
@@ -213,12 +215,17 @@ static int pca9685_pwm_gpio_probe(struct pca9685 *pca)
return devm_gpiochip_add_data(dev, &pca->gpio, pca);
}
#else
-static inline bool pca9685_pwm_is_gpio(struct pca9685 *pca,
- struct pwm_device *pwm)
+static inline bool pca9685_pwm_test_and_set_inuse(struct pca9685 *pca,
+ int pwm_idx)
{
return false;
}
+static inline void
+pca9685_pwm_clear_inuse(struct pca9685 *pca, int pwm_idx)
+{
+}
+
static inline int pca9685_pwm_gpio_probe(struct pca9685 *pca)
{
return 0;
@@ -272,8 +279,6 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
}
}
- pca->duty_ns = duty_ns;
-
if (duty_ns < 1) {
if (pwm->hwpwm >= PCA9685_MAXCHAN)
reg = PCA9685_ALL_LED_OFF_H;
@@ -402,7 +407,7 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct pca9685 *pca = to_pca(chip);
- if (pca9685_pwm_is_gpio(pca, pwm))
+ if (pca9685_pwm_test_and_set_inuse(pca, pwm->hwpwm))
return -EBUSY;
pm_runtime_get_sync(chip->dev);
@@ -411,8 +416,11 @@ static int pca9685_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
static void pca9685_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
+ struct pca9685 *pca = to_pca(chip);
+
pca9685_pwm_disable(chip, pwm);
pm_runtime_put(chip->dev);
+ pca9685_pwm_clear_inuse(pca, pwm->hwpwm);
}
static const struct pwm_ops pca9685_pwm_ops = {
@@ -449,7 +457,6 @@ static int pca9685_pwm_probe(struct i2c_client *client,
ret);
return ret;
}
- pca->duty_ns = 0;
pca->period_ns = PCA9685_DEFAULT_PERIOD;
i2c_set_clientdata(client, pca);
@@ -512,8 +519,7 @@ static int pca9685_pwm_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
-static int pca9685_pwm_runtime_suspend(struct device *dev)
+static int __maybe_unused pca9685_pwm_runtime_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct pca9685 *pca = i2c_get_clientdata(client);
@@ -522,7 +528,7 @@ static int pca9685_pwm_runtime_suspend(struct device *dev)
return 0;
}
-static int pca9685_pwm_runtime_resume(struct device *dev)
+static int __maybe_unused pca9685_pwm_runtime_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct pca9685 *pca = i2c_get_clientdata(client);
@@ -530,7 +536,6 @@ static int pca9685_pwm_runtime_resume(struct device *dev)
pca9685_set_sleep_mode(pca, false);
return 0;
}
-#endif
static const struct i2c_device_id pca9685_id[] = {
{ "pca9685", 0 },
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 2685577b6dd4..7ab9eb6616d9 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -229,24 +229,28 @@ static int rcar_pwm_probe(struct platform_device *pdev)
rcar_pwm->chip.base = -1;
rcar_pwm->chip.npwm = 1;
+ pm_runtime_enable(&pdev->dev);
+
ret = pwmchip_add(&rcar_pwm->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM chip: %d\n", ret);
+ pm_runtime_disable(&pdev->dev);
return ret;
}
- pm_runtime_enable(&pdev->dev);
-
return 0;
}
static int rcar_pwm_remove(struct platform_device *pdev)
{
struct rcar_pwm_chip *rcar_pwm = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pwmchip_remove(&rcar_pwm->chip);
pm_runtime_disable(&pdev->dev);
- return pwmchip_remove(&rcar_pwm->chip);
+ return ret;
}
static const struct of_device_id rcar_pwm_of_table[] = {
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index 4a855a21b782..81ad5a551455 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -415,16 +415,15 @@ static int tpu_probe(struct platform_device *pdev)
tpu->chip.base = -1;
tpu->chip.npwm = TPU_CHANNEL_MAX;
+ pm_runtime_enable(&pdev->dev);
+
ret = pwmchip_add(&tpu->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register PWM chip\n");
+ pm_runtime_disable(&pdev->dev);
return ret;
}
- dev_info(&pdev->dev, "TPU PWM %d registered\n", tpu->pdev->id);
-
- pm_runtime_enable(&pdev->dev);
-
return 0;
}
@@ -434,12 +433,10 @@ static int tpu_remove(struct platform_device *pdev)
int ret;
ret = pwmchip_remove(&tpu->chip);
- if (ret)
- return ret;
pm_runtime_disable(&pdev->dev);
- return 0;
+ return ret;
}
#ifdef CONFIG_OF
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 3e3efa6c768f..5c677c563349 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -90,7 +90,6 @@ struct sun4i_pwm_chip {
spinlock_t ctrl_lock;
const struct sun4i_pwm_data *data;
unsigned long next_period[2];
- bool needs_delay[2];
};
static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip)
@@ -287,7 +286,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm));
sun4i_pwm->next_period[pwm->hwpwm] = jiffies +
usecs_to_jiffies(cstate.period / 1000 + 1);
- sun4i_pwm->needs_delay[pwm->hwpwm] = true;
if (state->polarity != PWM_POLARITY_NORMAL)
ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm);
@@ -298,7 +296,7 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->enabled) {
ctrl |= BIT_CH(PWM_EN, pwm->hwpwm);
- } else if (!sun4i_pwm->needs_delay[pwm->hwpwm]) {
+ } else {
ctrl &= ~BIT_CH(PWM_EN, pwm->hwpwm);
ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
}
@@ -310,15 +308,9 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
if (state->enabled)
return 0;
- if (!sun4i_pwm->needs_delay[pwm->hwpwm]) {
- clk_disable_unprepare(sun4i_pwm->clk);
- return 0;
- }
-
/* We need a full period to elapse before disabling the channel. */
now = jiffies;
- if (sun4i_pwm->needs_delay[pwm->hwpwm] &&
- time_before(now, sun4i_pwm->next_period[pwm->hwpwm])) {
+ if (time_before(now, sun4i_pwm->next_period[pwm->hwpwm])) {
delay_us = jiffies_to_usecs(sun4i_pwm->next_period[pwm->hwpwm] -
now);
if ((delay_us / 500) > MAX_UDELAY_MS)
@@ -326,7 +318,6 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
else
usleep_range(delay_us, delay_us * 2);
}
- sun4i_pwm->needs_delay[pwm->hwpwm] = false;
spin_lock(&sun4i_pwm->ctrl_lock);
ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG);
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index aa12fb3ed92e..d26ed8f579ff 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -282,9 +282,15 @@ static const struct tegra_pwm_soc tegra186_pwm_soc = {
.max_frequency = 102000000UL,
};
+static const struct tegra_pwm_soc tegra194_pwm_soc = {
+ .num_channels = 1,
+ .max_frequency = 408000000UL,
+};
+
static const struct of_device_id tegra_pwm_of_match[] = {
{ .compatible = "nvidia,tegra20-pwm", .data = &tegra20_pwm_soc },
{ .compatible = "nvidia,tegra186-pwm", .data = &tegra186_pwm_soc },
+ { .compatible = "nvidia,tegra194-pwm", .data = &tegra194_pwm_soc },
{ }
};
MODULE_DEVICE_TABLE(of, tegra_pwm_of_match);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f942a3302cdc..ec873f09c763 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -590,6 +590,16 @@ config RTC_DRV_RC5T583
This driver can also be built as a module. If so, the module
will be called rtc-rc5t583.
+config RTC_DRV_RC5T619
+ tristate "RICOH RC5T619 RTC driver"
+ depends on MFD_RN5T618
+ help
+ If you say yes here you get support for the RTC on the
+ RICOH RC5T619 chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-rc5t619.
+
config RTC_DRV_S35390A
tristate "Seiko Instruments S-35390A"
select BITREVERSE
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 3b66ee99063f..0721752c6ed4 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -133,6 +133,7 @@ obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
obj-$(CONFIG_RTC_DRV_R7301) += rtc-r7301.o
obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
obj-$(CONFIG_RTC_DRV_RC5T583) += rtc-rc5t583.o
+obj-$(CONFIG_RTC_DRV_RC5T619) += rtc-rc5t619.o
obj-$(CONFIG_RTC_DRV_RK808) += rtc-rk808.o
obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o
obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o
diff --git a/drivers/rtc/rtc-rc5t619.c b/drivers/rtc/rtc-rc5t619.c
new file mode 100644
index 000000000000..24e386ecbc7e
--- /dev/null
+++ b/drivers/rtc/rtc-rc5t619.c
@@ -0,0 +1,444 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * drivers/rtc/rtc-rc5t619.c
+ *
+ * Real time clock driver for RICOH RC5T619 power management chip.
+ *
+ * Copyright (C) 2019 Andreas Kemnade
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mfd/rn5t618.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/bcd.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/irqdomain.h>
+
+struct rc5t619_rtc {
+ int irq;
+ struct rtc_device *rtc;
+ struct rn5t618 *rn5t618;
+};
+
+#define CTRL1_ALARM_ENABLED 0x40
+#define CTRL1_24HR 0x20
+#define CTRL1_PERIODIC_MASK 0xf
+
+#define CTRL2_PON 0x10
+#define CTRL2_ALARM_STATUS 0x80
+#define CTRL2_CTFG 0x4
+#define CTRL2_CTC 0x1
+
+#define MONTH_CENTFLAG 0x80
+#define HOUR_PMFLAG 0x20
+#define MDAY_DAL_EXT 0x80
+
+static uint8_t rtc5t619_12hour_bcd2bin(uint8_t hour)
+{
+ if (hour & HOUR_PMFLAG) {
+ hour = bcd2bin(hour & ~HOUR_PMFLAG);
+ return hour == 12 ? 12 : 12 + hour;
+ }
+
+ hour = bcd2bin(hour);
+ return hour == 12 ? 0 : hour;
+}
+
+static uint8_t rtc5t619_12hour_bin2bcd(uint8_t hour)
+{
+ if (!hour)
+ return 0x12;
+
+ if (hour < 12)
+ return bin2bcd(hour);
+
+ if (hour == 12)
+ return 0x12 | HOUR_PMFLAG;
+
+ return bin2bcd(hour - 12) | HOUR_PMFLAG;
+}
+
+static int rc5t619_rtc_periodic_disable(struct device *dev)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+ int err;
+
+ /* disable function */
+ err = regmap_update_bits(rtc->rn5t618->regmap,
+ RN5T618_RTC_CTRL1, CTRL1_PERIODIC_MASK, 0);
+ if (err < 0)
+ return err;
+
+ /* clear alarm flag and CTFG */
+ err = regmap_update_bits(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2,
+ CTRL2_ALARM_STATUS | CTRL2_CTFG | CTRL2_CTC,
+ 0);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+/* things to be done once after power on */
+static int rc5t619_rtc_pon_setup(struct device *dev)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+ int err;
+ unsigned int reg_data;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, &reg_data);
+ if (err < 0)
+ return err;
+
+ /* clear VDET PON */
+ reg_data &= ~(CTRL2_PON | CTRL2_CTC | 0x4a); /* 0101-1011 */
+ reg_data |= 0x20; /* 0010-0000 */
+ err = regmap_write(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, reg_data);
+ if (err < 0)
+ return err;
+
+ /* clearing RTC Adjust register */
+ err = regmap_write(rtc->rn5t618->regmap, RN5T618_RTC_ADJUST, 0);
+ if (err)
+ return err;
+
+ return regmap_update_bits(rtc->rn5t618->regmap,
+ RN5T618_RTC_CTRL1,
+ CTRL1_24HR, CTRL1_24HR);
+}
+
+static int rc5t619_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+ u8 buff[7];
+ int err;
+ int cent_flag;
+ unsigned int ctrl1;
+ unsigned int ctrl2;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, &ctrl2);
+ if (err < 0)
+ return err;
+
+ if (ctrl2 & CTRL2_PON)
+ return -EINVAL;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL1, &ctrl1);
+ if (err < 0)
+ return err;
+
+ err = regmap_bulk_read(rtc->rn5t618->regmap, RN5T618_RTC_SECONDS,
+ buff, sizeof(buff));
+ if (err < 0)
+ return err;
+
+ if (buff[5] & MONTH_CENTFLAG)
+ cent_flag = 1;
+ else
+ cent_flag = 0;
+
+ tm->tm_sec = bcd2bin(buff[0]);
+ tm->tm_min = bcd2bin(buff[1]);
+
+ if (ctrl1 & CTRL1_24HR)
+ tm->tm_hour = bcd2bin(buff[2]);
+ else
+ tm->tm_hour = rtc5t619_12hour_bcd2bin(buff[2]);
+
+ tm->tm_wday = bcd2bin(buff[3]);
+ tm->tm_mday = bcd2bin(buff[4]);
+ tm->tm_mon = bcd2bin(buff[5] & 0x1f) - 1; /* back to system 0-11 */
+ tm->tm_year = bcd2bin(buff[6]) + 100 * cent_flag;
+
+ return 0;
+}
+
+static int rc5t619_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+ u8 buff[7];
+ int err;
+ int cent_flag;
+ unsigned int ctrl1;
+ unsigned int ctrl2;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, &ctrl2);
+ if (err < 0)
+ return err;
+
+ if (ctrl2 & CTRL2_PON)
+ rc5t619_rtc_pon_setup(dev);
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL1, &ctrl1);
+ if (err < 0)
+ return err;
+
+ if (tm->tm_year >= 100)
+ cent_flag = 1;
+ else
+ cent_flag = 0;
+
+ buff[0] = bin2bcd(tm->tm_sec);
+ buff[1] = bin2bcd(tm->tm_min);
+
+ if (ctrl1 & CTRL1_24HR)
+ buff[2] = bin2bcd(tm->tm_hour);
+ else
+ buff[2] = rtc5t619_12hour_bin2bcd(tm->tm_hour);
+
+ buff[3] = bin2bcd(tm->tm_wday);
+ buff[4] = bin2bcd(tm->tm_mday);
+ buff[5] = bin2bcd(tm->tm_mon + 1); /* system set 0-11 */
+ buff[6] = bin2bcd(tm->tm_year - cent_flag * 100);
+
+ if (cent_flag)
+ buff[5] |= MONTH_CENTFLAG;
+
+ err = regmap_bulk_write(rtc->rn5t618->regmap, RN5T618_RTC_SECONDS,
+ buff, sizeof(buff));
+ if (err < 0) {
+ dev_err(dev, "failed to program new time: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+/* 0-disable, 1-enable */
+static int rc5t619_rtc_alarm_enable(struct device *dev, unsigned int enabled)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+
+ return regmap_update_bits(rtc->rn5t618->regmap,
+ RN5T618_RTC_CTRL1,
+ CTRL1_ALARM_ENABLED,
+ enabled ? CTRL1_ALARM_ENABLED : 0);
+}
+
+static int rc5t619_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+ u8 buff[6];
+ unsigned int buff_cent;
+ int err;
+ int cent_flag;
+ unsigned int ctrl1;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL1, &ctrl1);
+ if (err)
+ return err;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_MONTH, &buff_cent);
+ if (err < 0) {
+ dev_err(dev, "failed to read time: %d\n", err);
+ return err;
+ }
+
+ if (buff_cent & MONTH_CENTFLAG)
+ cent_flag = 1;
+ else
+ cent_flag = 0;
+
+ err = regmap_bulk_read(rtc->rn5t618->regmap, RN5T618_RTC_ALARM_Y_SEC,
+ buff, sizeof(buff));
+ if (err)
+ return err;
+
+ buff[3] = buff[3] & 0x3f;
+
+ alrm->time.tm_sec = bcd2bin(buff[0]);
+ alrm->time.tm_min = bcd2bin(buff[1]);
+
+ if (ctrl1 & CTRL1_24HR)
+ alrm->time.tm_hour = bcd2bin(buff[2]);
+ else
+ alrm->time.tm_hour = rtc5t619_12hour_bcd2bin(buff[2]);
+
+ alrm->time.tm_mday = bcd2bin(buff[3]);
+ alrm->time.tm_mon = bcd2bin(buff[4]) - 1;
+ alrm->time.tm_year = bcd2bin(buff[5]) + 100 * cent_flag;
+ alrm->enabled = !!(ctrl1 & CTRL1_ALARM_ENABLED);
+ dev_dbg(dev, "read alarm: %ptR\n", &alrm->time);
+
+ return 0;
+}
+
+static int rc5t619_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+ u8 buff[6];
+ int err;
+ int cent_flag;
+ unsigned int ctrl1;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL1, &ctrl1);
+ if (err)
+ return err;
+
+ err = rc5t619_rtc_alarm_enable(dev, 0);
+ if (err < 0)
+ return err;
+
+ if (rtc->irq == -1)
+ return -EINVAL;
+
+ if (alrm->enabled == 0)
+ return 0;
+
+ if (alrm->time.tm_year >= 100)
+ cent_flag = 1;
+ else
+ cent_flag = 0;
+
+ alrm->time.tm_mon += 1;
+ buff[0] = bin2bcd(alrm->time.tm_sec);
+ buff[1] = bin2bcd(alrm->time.tm_min);
+
+ if (ctrl1 & CTRL1_24HR)
+ buff[2] = bin2bcd(alrm->time.tm_hour);
+ else
+ buff[2] = rtc5t619_12hour_bin2bcd(alrm->time.tm_hour);
+
+ buff[3] = bin2bcd(alrm->time.tm_mday);
+ buff[4] = bin2bcd(alrm->time.tm_mon);
+ buff[5] = bin2bcd(alrm->time.tm_year - 100 * cent_flag);
+ buff[3] |= MDAY_DAL_EXT;
+
+ err = regmap_bulk_write(rtc->rn5t618->regmap, RN5T618_RTC_ALARM_Y_SEC,
+ buff, sizeof(buff));
+ if (err < 0)
+ return err;
+
+ return rc5t619_rtc_alarm_enable(dev, alrm->enabled);
+}
+
+static const struct rtc_class_ops rc5t619_rtc_ops = {
+ .read_time = rc5t619_rtc_read_time,
+ .set_time = rc5t619_rtc_set_time,
+ .set_alarm = rc5t619_rtc_set_alarm,
+ .read_alarm = rc5t619_rtc_read_alarm,
+ .alarm_irq_enable = rc5t619_rtc_alarm_enable,
+};
+
+static int rc5t619_rtc_alarm_flag_clr(struct device *dev)
+{
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+
+ /* clear alarm-D status bits.*/
+ return regmap_update_bits(rtc->rn5t618->regmap,
+ RN5T618_RTC_CTRL2,
+ CTRL2_ALARM_STATUS | CTRL2_CTC, 0);
+}
+
+static irqreturn_t rc5t619_rtc_irq(int irq, void *data)
+{
+ struct device *dev = data;
+ struct rc5t619_rtc *rtc = dev_get_drvdata(dev);
+
+ rc5t619_rtc_alarm_flag_clr(dev);
+
+ rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF);
+ return IRQ_HANDLED;
+}
+
+static int rc5t619_rtc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
+ struct rc5t619_rtc *rtc;
+ unsigned int ctrl2;
+ int err;
+
+ rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL);
+ if (IS_ERR(rtc)) {
+ err = PTR_ERR(rtc);
+ return -ENOMEM;
+ }
+
+ rtc->rn5t618 = rn5t618;
+
+ dev_set_drvdata(dev, rtc);
+ rtc->irq = -1;
+
+ if (rn5t618->irq_data)
+ rtc->irq = regmap_irq_get_virq(rn5t618->irq_data,
+ RN5T618_IRQ_RTC);
+
+ if (rtc->irq < 0)
+ rtc->irq = -1;
+
+ err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, &ctrl2);
+ if (err < 0)
+ return err;
+
+ /* disable rtc periodic function */
+ err = rc5t619_rtc_periodic_disable(&pdev->dev);
+ if (err)
+ return err;
+
+ if (ctrl2 & CTRL2_PON) {
+ err = rc5t619_rtc_alarm_flag_clr(&pdev->dev);
+ if (err)
+ return err;
+ }
+
+ rtc->rtc = devm_rtc_allocate_device(&pdev->dev);
+ if (IS_ERR(rtc->rtc)) {
+ err = PTR_ERR(rtc->rtc);
+ dev_err(dev, "RTC device register: err %d\n", err);
+ return err;
+ }
+
+ rtc->rtc->ops = &rc5t619_rtc_ops;
+ rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_1900;
+ rtc->rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ /* set interrupt and enable it */
+ if (rtc->irq != -1) {
+ err = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL,
+ rc5t619_rtc_irq,
+ IRQF_ONESHOT,
+ "rtc-rc5t619",
+ &pdev->dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "request IRQ:%d fail\n", rtc->irq);
+ rtc->irq = -1;
+
+ err = rc5t619_rtc_alarm_enable(&pdev->dev, 0);
+ if (err)
+ return err;
+
+ } else {
+ /* enable wake */
+ device_init_wakeup(&pdev->dev, 1);
+ enable_irq_wake(rtc->irq);
+ }
+ } else {
+ /* system don't want to using alarm interrupt, so close it */
+ err = rc5t619_rtc_alarm_enable(&pdev->dev, 0);
+ if (err)
+ return err;
+
+ dev_warn(&pdev->dev, "rc5t619 interrupt is disabled\n");
+ }
+
+ return rtc_register_device(rtc->rtc);
+}
+
+static struct platform_driver rc5t619_rtc_driver = {
+ .driver = {
+ .name = "rc5t619-rtc",
+ },
+ .probe = rc5t619_rtc_probe,
+};
+
+module_platform_driver(rc5t619_rtc_driver);
+MODULE_ALIAS("platform:rc5t619-rtc");
+MODULE_DESCRIPTION("RICOH RC5T619 RTC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 80d22290f268..384edffe5cb4 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -57,11 +57,26 @@ static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev,
return copy_to_iter(addr, bytes, i);
}
+static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev,
+ pgoff_t pgoff, size_t nr_pages)
+{
+ long rc;
+ void *kaddr;
+
+ rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL);
+ if (rc < 0)
+ return rc;
+ memset(kaddr, 0, nr_pages << PAGE_SHIFT);
+ dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT);
+ return 0;
+}
+
static const struct dax_operations dcssblk_dax_ops = {
.direct_access = dcssblk_dax_direct_access,
.dax_supported = generic_fsdax_supported,
.copy_from_iter = dcssblk_dax_copy_from_iter,
.copy_to_iter = dcssblk_dax_copy_to_iter,
+ .zero_page_range = dcssblk_dax_zero_page_range,
};
struct dcssblk_dev_info {
@@ -680,8 +695,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->dax_dev = alloc_dax(dev_info, dev_info->gd->disk_name,
&dcssblk_dax_ops, DAXDEV_F_SYNC);
- if (!dev_info->dax_dev) {
- rc = -ENOMEM;
+ if (IS_ERR(dev_info->dax_dev)) {
+ rc = PTR_ERR(dev_info->dax_dev);
+ dev_info->dax_dev = NULL;
goto put_dev;
}
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 50007cb9be5b..b29fe8d50baf 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -849,8 +849,10 @@ static void io_subchannel_register(struct ccw_device *cdev)
* Now we know this subchannel will stay, we can throw
* our delayed uevent.
*/
- dev_set_uevent_suppress(&sch->dev, 0);
- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
/* make it known to the system */
ret = ccw_device_add(cdev);
if (ret) {
@@ -1058,8 +1060,11 @@ static int io_subchannel_probe(struct subchannel *sch)
* Throw the delayed uevent for the subchannel, register
* the ccw_device and exit.
*/
- dev_set_uevent_suppress(&sch->dev, 0);
- kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ /* should always be the case for the console */
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
cdev = sch_get_cdev(sch);
rc = ccw_device_add(cdev);
if (rc) {
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index b0beafc43d37..b8453b594679 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -374,7 +374,6 @@ int tiqdio_allocate_memory(void);
void tiqdio_free_memory(void);
int tiqdio_register_thinints(void);
void tiqdio_unregister_thinints(void);
-void clear_nonshared_ind(struct qdio_irq *);
int test_nonshared_ind(struct qdio_irq *);
/* prototypes for setup */
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 5a3d9ee90a7f..286b044fb027 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -58,25 +58,11 @@ static void qdio_clear_dbf_list(void)
mutex_unlock(&qdio_dbf_list_mutex);
}
-int qdio_allocate_dbf(struct qdio_initialize *init_data,
- struct qdio_irq *irq_ptr)
+int qdio_allocate_dbf(struct qdio_irq *irq_ptr)
{
char text[QDIO_DBF_NAME_LEN];
struct qdio_dbf_entry *new_entry;
- DBF_EVENT("qfmt:%1d", init_data->q_format);
- DBF_HEX(init_data->adapter_name, 8);
- DBF_EVENT("qpff%4x", init_data->qib_param_field_format);
- DBF_HEX(&init_data->qib_param_field, sizeof(void *));
- DBF_HEX(&init_data->input_slib_elements, sizeof(void *));
- DBF_HEX(&init_data->output_slib_elements, sizeof(void *));
- DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs,
- init_data->no_output_qs);
- DBF_HEX(&init_data->input_handler, sizeof(void *));
- DBF_HEX(&init_data->output_handler, sizeof(void *));
- DBF_HEX(&init_data->int_parm, sizeof(long));
- DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *));
- DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *));
DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
/* allocate trace view for the interface */
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h
index 122450ba6b90..0dfba085f360 100644
--- a/drivers/s390/cio/qdio_debug.h
+++ b/drivers/s390/cio/qdio_debug.h
@@ -64,8 +64,7 @@ static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
debug_event(dev->debug_area, level, addr, len);
}
-int qdio_allocate_dbf(struct qdio_initialize *init_data,
- struct qdio_irq *irq_ptr);
+int qdio_allocate_dbf(struct qdio_irq *irq_ptr);
void qdio_setup_debug_entries(struct qdio_irq *irq_ptr);
void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr);
int qdio_debug_init(void);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index c890848064fe..bcc3ab14e72d 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1220,27 +1220,21 @@ EXPORT_SYMBOL_GPL(qdio_free);
/**
* qdio_allocate - allocate qdio queues and associated data
- * @init_data: initialization data
+ * @cdev: associated ccw device
+ * @no_input_qs: allocate this number of Input Queues
+ * @no_output_qs: allocate this number of Output Queues
*/
-int qdio_allocate(struct qdio_initialize *init_data)
+int qdio_allocate(struct ccw_device *cdev, unsigned int no_input_qs,
+ unsigned int no_output_qs)
{
- struct ccw_device *cdev = init_data->cdev;
struct subchannel_id schid;
struct qdio_irq *irq_ptr;
ccw_device_get_schid(cdev, &schid);
DBF_EVENT("qallocate:%4x", schid.sch_no);
- if ((init_data->no_input_qs && !init_data->input_handler) ||
- (init_data->no_output_qs && !init_data->output_handler))
- return -EINVAL;
-
- if ((init_data->no_input_qs > QDIO_MAX_QUEUES_PER_IRQ) ||
- (init_data->no_output_qs > QDIO_MAX_QUEUES_PER_IRQ))
- return -EINVAL;
-
- if ((!init_data->input_sbal_addr_array) ||
- (!init_data->output_sbal_addr_array))
+ if (no_input_qs > QDIO_MAX_QUEUES_PER_IRQ ||
+ no_output_qs > QDIO_MAX_QUEUES_PER_IRQ)
return -EINVAL;
/* irq_ptr must be in GFP_DMA since it contains ccw1.cda */
@@ -1250,9 +1244,12 @@ int qdio_allocate(struct qdio_initialize *init_data)
irq_ptr->cdev = cdev;
mutex_init(&irq_ptr->setup_mutex);
- if (qdio_allocate_dbf(init_data, irq_ptr))
+ if (qdio_allocate_dbf(irq_ptr))
goto out_rel;
+ DBF_DEV_EVENT(DBF_ERR, irq_ptr, "alloc niq:%1u noq:%1u", no_input_qs,
+ no_output_qs);
+
/*
* Allocate a page for the chsc calls in qdio_establish.
* Must be pre-allocated since a zfcp recovery will call
@@ -1268,8 +1265,7 @@ int qdio_allocate(struct qdio_initialize *init_data)
if (!irq_ptr->qdr)
goto out_rel;
- if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs,
- init_data->no_output_qs))
+ if (qdio_allocate_qs(irq_ptr, no_input_qs, no_output_qs))
goto out_rel;
INIT_LIST_HEAD(&irq_ptr->entry);
@@ -1305,13 +1301,33 @@ static void qdio_detect_hsicq(struct qdio_irq *irq_ptr)
DBF_EVENT("use_cq:%d", use_cq);
}
+static void qdio_trace_init_data(struct qdio_irq *irq,
+ struct qdio_initialize *data)
+{
+ DBF_DEV_EVENT(DBF_ERR, irq, "qfmt:%1u", data->q_format);
+ DBF_DEV_HEX(irq, data->adapter_name, 8, DBF_ERR);
+ DBF_DEV_EVENT(DBF_ERR, irq, "qpff%4x", data->qib_param_field_format);
+ DBF_DEV_HEX(irq, &data->qib_param_field, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->input_slib_elements, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->output_slib_elements, sizeof(void *), DBF_ERR);
+ DBF_DEV_EVENT(DBF_ERR, irq, "niq:%1u noq:%1u", data->no_input_qs,
+ data->no_output_qs);
+ DBF_DEV_HEX(irq, &data->input_handler, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->output_handler, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->int_parm, sizeof(long), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->input_sbal_addr_array, sizeof(void *), DBF_ERR);
+ DBF_DEV_HEX(irq, &data->output_sbal_addr_array, sizeof(void *),
+ DBF_ERR);
+}
+
/**
* qdio_establish - establish queues on a qdio subchannel
+ * @cdev: associated ccw device
* @init_data: initialization data
*/
-int qdio_establish(struct qdio_initialize *init_data)
+int qdio_establish(struct ccw_device *cdev,
+ struct qdio_initialize *init_data)
{
- struct ccw_device *cdev = init_data->cdev;
struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct subchannel_id schid;
int rc;
@@ -1322,7 +1338,16 @@ int qdio_establish(struct qdio_initialize *init_data)
if (!irq_ptr)
return -ENODEV;
+ if ((init_data->no_input_qs && !init_data->input_handler) ||
+ (init_data->no_output_qs && !init_data->output_handler))
+ return -EINVAL;
+
+ if (!init_data->input_sbal_addr_array ||
+ !init_data->output_sbal_addr_array)
+ return -EINVAL;
+
mutex_lock(&irq_ptr->setup_mutex);
+ qdio_trace_init_data(irq_ptr, init_data);
qdio_setup_irq(irq_ptr, init_data);
rc = qdio_establish_thinint(irq_ptr);
@@ -1618,8 +1643,6 @@ int qdio_start_irq(struct ccw_device *cdev)
if (!irq_ptr)
return -ENODEV;
- clear_nonshared_ind(irq_ptr);
-
for_each_input_queue(irq_ptr, q, i)
qdio_stop_polling(q);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index bbbefc9f9e04..3083edd61f0c 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -213,8 +213,6 @@ static void setup_queues(struct qdio_irq *irq_ptr,
struct qdio_initialize *qdio_init)
{
struct qdio_q *q;
- struct qdio_buffer **input_sbal_array = qdio_init->input_sbal_addr_array;
- struct qdio_buffer **output_sbal_array = qdio_init->output_sbal_addr_array;
struct qdio_outbuf_state *output_sbal_state_array =
qdio_init->output_sbal_state_array;
int i;
@@ -225,8 +223,8 @@ static void setup_queues(struct qdio_irq *irq_ptr,
q->is_input_q = 1;
- setup_storage_lists(q, irq_ptr, input_sbal_array, i);
- input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+ setup_storage_lists(q, irq_ptr,
+ qdio_init->input_sbal_addr_array[i], i);
if (is_thinint_irq(irq_ptr)) {
tasklet_init(&q->tasklet, tiqdio_inbound_processing,
@@ -245,8 +243,8 @@ static void setup_queues(struct qdio_irq *irq_ptr,
output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
q->is_input_q = 0;
- setup_storage_lists(q, irq_ptr, output_sbal_array, i);
- output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
+ setup_storage_lists(q, irq_ptr,
+ qdio_init->output_sbal_addr_array[i], i);
tasklet_init(&q->tasklet, qdio_outbound_processing,
(unsigned long) q);
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index ea09df7209f0..ae50373617cd 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -82,36 +82,16 @@ void tiqdio_remove_device(struct qdio_irq *irq_ptr)
INIT_LIST_HEAD(&irq_ptr->entry);
}
-static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr)
-{
- return irq_ptr->nr_input_qs > 1;
-}
-
static inline int references_shared_dsci(struct qdio_irq *irq_ptr)
{
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
}
-static inline int shared_ind(struct qdio_irq *irq_ptr)
-{
- return references_shared_dsci(irq_ptr) ||
- has_multiple_inq_on_dsci(irq_ptr);
-}
-
-void clear_nonshared_ind(struct qdio_irq *irq_ptr)
-{
- if (!is_thinint_irq(irq_ptr))
- return;
- if (shared_ind(irq_ptr))
- return;
- xchg(irq_ptr->dsci, 0);
-}
-
int test_nonshared_ind(struct qdio_irq *irq_ptr)
{
if (!is_thinint_irq(irq_ptr))
return 0;
- if (shared_ind(irq_ptr))
+ if (references_shared_dsci(irq_ptr))
return 0;
if (*irq_ptr->dsci)
return 1;
@@ -131,8 +111,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
struct qdio_q *q;
int i;
- if (!references_shared_dsci(irq) &&
- has_multiple_inq_on_dsci(irq))
+ if (!references_shared_dsci(irq))
xchg(irq->dsci, 0);
if (irq->irq_poll) {
@@ -145,9 +124,6 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
}
for_each_input_queue(irq, q, i) {
- if (!shared_ind(irq))
- xchg(irq->dsci, 0);
-
/*
* Call inbound processing but not directly
* since that could starve other thinint queues.
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index e401a3d0aa57..339a6bc0339b 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -167,6 +167,11 @@ static int vfio_ccw_sch_probe(struct subchannel *sch)
if (ret)
goto out_disable;
+ if (dev_get_uevent_suppress(&sch->dev)) {
+ dev_set_uevent_suppress(&sch->dev, 0);
+ kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
+ }
+
VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid,
sch->schid.sch_no);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index acda230323d5..e0b26310ecab 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -181,11 +181,12 @@ struct qeth_vnicc_info {
/*****************************************************************************/
/* QDIO queue and buffer handling */
/*****************************************************************************/
-#define QETH_MAX_QUEUES 4
+#define QETH_MAX_OUT_QUEUES 4
#define QETH_IQD_MIN_TXQ 2 /* One for ucast, one for mcast. */
#define QETH_IQD_MCAST_TXQ 0
#define QETH_IQD_MIN_UCAST_TXQ 1
+#define QETH_MAX_IN_QUEUES 2
#define QETH_RX_COPYBREAK (PAGE_SIZE >> 1)
#define QETH_IN_BUF_SIZE_DEFAULT 65536
#define QETH_IN_BUF_COUNT_DEFAULT 64
@@ -539,7 +540,7 @@ struct qeth_qdio_info {
/* output */
int no_out_queues;
- struct qeth_qdio_out_q *out_qs[QETH_MAX_QUEUES];
+ struct qeth_qdio_out_q *out_qs[QETH_MAX_OUT_QUEUES];
struct qdio_outbuf_state *out_bufstates;
/* priority queueing */
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 24fd17b347fe..f7689461c242 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -4812,28 +4812,13 @@ out:
return;
}
-static void qeth_qdio_establish_cq(struct qeth_card *card,
- struct qdio_buffer **in_sbal_ptrs)
-{
- int i;
-
- if (card->options.cq == QETH_CQ_ENABLED) {
- int offset = QDIO_MAX_BUFFERS_PER_Q *
- (card->qdio.no_in_queues - 1);
-
- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
- in_sbal_ptrs[offset + i] =
- card->qdio.c_q->bufs[i].buffer;
- }
-}
-
static int qeth_qdio_establish(struct qeth_card *card)
{
+ struct qdio_buffer **out_sbal_ptrs[QETH_MAX_OUT_QUEUES];
+ struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
struct qdio_initialize init_data;
char *qib_param_field;
- struct qdio_buffer **in_sbal_ptrs;
- struct qdio_buffer **out_sbal_ptrs;
- int i, j, k;
+ unsigned int i;
int rc = 0;
QETH_CARD_TEXT(card, 2, "qdioest");
@@ -4847,35 +4832,14 @@ static int qeth_qdio_establish(struct qeth_card *card)
qeth_create_qib_param_field(card, qib_param_field);
qeth_create_qib_param_field_blkt(card, qib_param_field);
- in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
- sizeof(void *),
- GFP_KERNEL);
- if (!in_sbal_ptrs) {
- rc = -ENOMEM;
- goto out_free_qib_param;
- }
-
- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
- in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer;
-
- qeth_qdio_establish_cq(card, in_sbal_ptrs);
-
- out_sbal_ptrs =
- kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
- sizeof(void *),
- GFP_KERNEL);
- if (!out_sbal_ptrs) {
- rc = -ENOMEM;
- goto out_free_in_sbals;
- }
+ in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
+ if (card->options.cq == QETH_CQ_ENABLED)
+ in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
- for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++)
- out_sbal_ptrs[k] =
- card->qdio.out_qs[i]->bufs[j]->buffer;
+ for (i = 0; i < card->qdio.no_out_queues; i++)
+ out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
memset(&init_data, 0, sizeof(struct qdio_initialize));
- init_data.cdev = CARD_DDEV(card);
init_data.q_format = IS_IQD(card) ? QDIO_IQDIO_QFMT :
QDIO_QETH_QFMT;
init_data.qib_param_field_format = 0;
@@ -4893,12 +4857,13 @@ static int qeth_qdio_establish(struct qeth_card *card)
if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
- rc = qdio_allocate(&init_data);
+ rc = qdio_allocate(CARD_DDEV(card), init_data.no_input_qs,
+ init_data.no_output_qs);
if (rc) {
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
goto out;
}
- rc = qdio_establish(&init_data);
+ rc = qdio_establish(CARD_DDEV(card), &init_data);
if (rc) {
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
qdio_free(CARD_DDEV(card));
@@ -4916,10 +4881,6 @@ static int qeth_qdio_establish(struct qeth_card *card)
break;
}
out:
- kfree(out_sbal_ptrs);
-out_free_in_sbals:
- kfree(in_sbal_ptrs);
-out_free_qib_param:
kfree(qib_param_field);
out_free_nothing:
return rc;
@@ -5985,7 +5946,7 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
switch (card->info.type) {
case QETH_CARD_TYPE_IQD:
dev = alloc_netdev_mqs(sizeof(*priv), "hsi%d", NET_NAME_UNKNOWN,
- ether_setup, QETH_MAX_QUEUES, 1);
+ ether_setup, QETH_MAX_OUT_QUEUES, 1);
break;
case QETH_CARD_TYPE_OSM:
dev = alloc_etherdev(sizeof(*priv));
@@ -5995,7 +5956,7 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
ether_setup);
break;
default:
- dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_QUEUES, 1);
+ dev = alloc_etherdev_mqs(sizeof(*priv), QETH_MAX_OUT_QUEUES, 1);
}
if (!dev)
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 18a6751299f9..3d0bc000f500 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -178,12 +178,12 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return 0;
if (!(p_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
- /* fall through */
+ fallthrough;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
p_status = atomic_read(&port->status);
if (!(p_status & ZFCP_STATUS_COMMON_OPEN))
need = ZFCP_ERP_ACTION_REOPEN_PORT;
- /* fall through */
+ fallthrough;
case ZFCP_ERP_ACTION_REOPEN_PORT:
p_status = atomic_read(&port->status);
if (p_status & ZFCP_STATUS_COMMON_ERP_INUSE)
@@ -196,7 +196,7 @@ static enum zfcp_erp_act_type zfcp_erp_required_act(enum zfcp_erp_act_type want,
return need;
if (!(a_status & ZFCP_STATUS_COMMON_UNBLOCKED))
need = ZFCP_ERP_ACTION_REOPEN_ADAPTER;
- /* fall through */
+ fallthrough;
case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
a_status = atomic_read(&adapter->status);
if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
@@ -1086,7 +1086,7 @@ static enum zfcp_erp_act_result zfcp_erp_lun_strategy(
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return zfcp_erp_lun_strategy_close(erp_action);
/* already closed */
- /* fall through */
+ fallthrough;
case ZFCP_ERP_STEP_LUN_CLOSING:
if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
return ZFCP_ERP_FAILED;
@@ -1415,7 +1415,7 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act,
if (act->step != ZFCP_ERP_STEP_UNINITIALIZED)
if (result == ZFCP_ERP_SUCCEEDED)
zfcp_erp_try_rport_unblock(port);
- /* fall through */
+ fallthrough;
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
put_device(&port->dev);
break;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 7c603e5b5b19..111fe3fc32d7 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -564,7 +564,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
case FSF_TOPO_AL:
fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
fc_host_fabric_name(shost) = 0;
- /* fall through */
+ fallthrough;
default:
fc_host_fabric_name(shost) = 0;
dev_err(&adapter->ccw_device->dev,
@@ -1032,7 +1032,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
switch (fsq->word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_fc_test_link(zfcp_sdev->port);
- /* fall through */
+ fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -1127,7 +1127,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
break;
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(adapter, 0, "fsscth1");
- /* fall through */
+ fallthrough;
case FSF_GENERIC_COMMAND_REJECTED:
case FSF_PAYLOAD_SIZE_MISMATCH:
case FSF_REQUEST_SIZE_TOO_LARGE:
@@ -1313,7 +1313,7 @@ static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req)
break;
case FSF_SBAL_MISMATCH:
/* should never occur, avoided in zfcp_fsf_send_els */
- /* fall through */
+ fallthrough;
default:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -1736,7 +1736,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
/* no zfcp_fc_test_link() with failed open port */
- /* fall through */
+ fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
case FSF_SQ_NO_RETRY_POSSIBLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1909,14 +1909,14 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
dev_warn(&req->adapter->ccw_device->dev,
"Opening WKA port 0x%x failed\n", wka_port->d_id);
- /* fall through */
+ fallthrough;
case FSF_ADAPTER_STATUS_AVAILABLE:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
wka_port->status = ZFCP_FC_WKA_PORT_OFFLINE;
break;
case FSF_GOOD:
wka_port->handle = header->port_handle;
- /* fall through */
+ fallthrough;
case FSF_PORT_ALREADY_OPEN:
wka_port->status = ZFCP_FC_WKA_PORT_ONLINE;
}
@@ -2059,7 +2059,6 @@ static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req)
case FSF_ADAPTER_STATUS_AVAILABLE:
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
- /* fall through */
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -2144,7 +2143,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
case FSF_PORT_HANDLE_NOT_VALID:
zfcp_erp_adapter_reopen(adapter, 0, "fsouh_1");
- /* fall through */
+ fallthrough;
case FSF_LUN_ALREADY_OPEN:
break;
case FSF_PORT_BOXED:
@@ -2175,7 +2174,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
(unsigned long long)zfcp_scsi_dev_lun(sdev),
(unsigned long long)zfcp_sdev->port->wwpn);
zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
- /* fall through */
+ fallthrough;
case FSF_INVALID_COMMAND_OPTION:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -2183,7 +2182,7 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
switch (header->fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_fc_test_link(zfcp_sdev->port);
- /* fall through */
+ fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
@@ -2277,7 +2276,7 @@ static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
switch (req->qtcb->header.fsf_status_qual.word[0]) {
case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
zfcp_fc_test_link(zfcp_sdev->port);
- /* fall through */
+ fallthrough;
case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
req->status |= ZFCP_STATUS_FSFREQ_ERROR;
break;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index f0d6296e673b..26702b56a7ab 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -277,29 +277,6 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
return 0;
}
-
-static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
- struct zfcp_qdio *qdio)
-{
- memset(id, 0, sizeof(*id));
- id->cdev = qdio->adapter->ccw_device;
- id->q_format = QDIO_ZFCP_QFMT;
- memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
- ASCEBC(id->adapter_name, 8);
- id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
- if (enable_multibuffer)
- id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
- id->no_input_qs = 1;
- id->no_output_qs = 1;
- id->input_handler = zfcp_qdio_int_resp;
- id->output_handler = zfcp_qdio_int_req;
- id->int_parm = (unsigned long) qdio;
- id->input_sbal_addr_array = qdio->res_q;
- id->output_sbal_addr_array = qdio->req_q;
- id->scan_threshold =
- QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
-}
-
/**
* zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
* @qdio: pointer to struct zfcp_qdio
@@ -308,7 +285,6 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
*/
static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
{
- struct qdio_initialize init_data;
int ret;
ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
@@ -319,10 +295,9 @@ static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
if (ret)
goto free_req_q;
- zfcp_qdio_setup_init_data(&init_data, qdio);
init_waitqueue_head(&qdio->req_q_wq);
- ret = qdio_allocate(&init_data);
+ ret = qdio_allocate(qdio->adapter->ccw_device, 1, 1);
if (ret)
goto free_res_q;
@@ -374,8 +349,10 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
*/
int zfcp_qdio_open(struct zfcp_qdio *qdio)
{
+ struct qdio_buffer **input_sbals[1] = {qdio->res_q};
+ struct qdio_buffer **output_sbals[1] = {qdio->req_q};
struct qdio_buffer_element *sbale;
- struct qdio_initialize init_data;
+ struct qdio_initialize init_data = {0};
struct zfcp_adapter *adapter = qdio->adapter;
struct ccw_device *cdev = adapter->ccw_device;
struct qdio_ssqd_desc ssqd;
@@ -387,12 +364,26 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
atomic_andnot(ZFCP_STATUS_ADAPTER_SIOSL_ISSUED,
&qdio->adapter->status);
- zfcp_qdio_setup_init_data(&init_data, qdio);
+ init_data.q_format = QDIO_ZFCP_QFMT;
+ memcpy(init_data.adapter_name, dev_name(&cdev->dev), 8);
+ ASCEBC(init_data.adapter_name, 8);
+ init_data.qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV;
+ if (enable_multibuffer)
+ init_data.qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE;
+ init_data.no_input_qs = 1;
+ init_data.no_output_qs = 1;
+ init_data.input_handler = zfcp_qdio_int_resp;
+ init_data.output_handler = zfcp_qdio_int_req;
+ init_data.int_parm = (unsigned long) qdio;
+ init_data.input_sbal_addr_array = input_sbals;
+ init_data.output_sbal_addr_array = output_sbals;
+ init_data.scan_threshold =
+ QDIO_MAX_BUFFERS_PER_Q - ZFCP_QDIO_MAX_SBALS_PER_REQ * 2;
- if (qdio_establish(&init_data))
+ if (qdio_establish(cdev, &init_data))
goto failed_establish;
- if (qdio_get_ssqd_desc(init_data.cdev, &ssqd))
+ if (qdio_get_ssqd_desc(cdev, &ssqd))
goto failed_qdio;
if (ssqd.qdioac2 & CHSC_AC2_DATA_DIV_ENABLED)
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 4725e4c763cf..ddd73f6798af 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1626,7 +1626,7 @@ out:
int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
{
unsigned long flagv = 0;
- int retval;
+ int retval, unblock_retval;
struct Scsi_Host *host = aac->scsi_host_ptr;
int bled;
@@ -1656,8 +1656,9 @@ int aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
retval = _aac_reset_adapter(aac, bled, reset_type);
spin_unlock_irqrestore(host->host_lock, flagv);
- retval = scsi_host_unblock(host, SDEV_RUNNING);
-
+ unblock_retval = scsi_host_unblock(host, SDEV_RUNNING);
+ if (!retval)
+ retval = unblock_retval;
if ((forced < 2) && (retval == -ENODEV)) {
/* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
struct fib * fibctx = aac_fib_alloc(aac);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 4190a025381a..84fc499cb1e6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -1834,21 +1834,6 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
printerror = 0;
} else if (ahc_sent_msg(ahc, AHCMSG_1B,
MSG_BUS_DEV_RESET, TRUE)) {
-#ifdef __FreeBSD__
- /*
- * Don't mark the user's request for this BDR
- * as completing with CAM_BDR_SENT. CAM3
- * specifies CAM_REQ_CMP.
- */
- if (scb != NULL
- && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV
- && ahc_match_scb(ahc, scb, target, channel,
- CAM_LUN_WILDCARD,
- SCB_LIST_NULL,
- ROLE_INITIATOR)) {
- ahc_set_transaction_status(scb, CAM_REQ_CMP);
- }
-#endif
ahc_compile_devinfo(&devinfo,
initiator_role_id,
target,
@@ -4399,22 +4384,16 @@ ahc_alloc(void *platform_arg, char *name)
struct ahc_softc *ahc;
int i;
-#ifndef __FreeBSD__
ahc = kmalloc(sizeof(*ahc), GFP_ATOMIC);
if (!ahc) {
printk("aic7xxx: cannot malloc softc!\n");
kfree(name);
return NULL;
}
-#else
- ahc = device_get_softc((device_t)platform_arg);
-#endif
memset(ahc, 0, sizeof(*ahc));
ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
if (ahc->seep_config == NULL) {
-#ifndef __FreeBSD__
kfree(ahc);
-#endif
kfree(name);
return (NULL);
}
@@ -4540,9 +4519,7 @@ ahc_free(struct ahc_softc *ahc)
kfree(ahc->name);
if (ahc->seep_config != NULL)
kfree(ahc->seep_config);
-#ifndef __FreeBSD__
kfree(ahc);
-#endif
return;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 3b84db8d13a9..b6e8ed757252 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -66,7 +66,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "2.12.10"
+#define BNX2FC_VERSION "2.12.13"
#define PFX "bnx2fc: "
@@ -482,7 +482,10 @@ struct io_bdt {
struct bnx2fc_work {
struct list_head list;
struct bnx2fc_rport *tgt;
+ struct fcoe_task_ctx_entry *task;
+ unsigned char rq_data[BNX2FC_RQ_BUF_SZ];
u16 wqe;
+ u8 num_rq;
};
struct bnx2fc_unsol_els {
struct fc_lport *lport;
@@ -550,7 +553,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
enum fc_rport_event event);
void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
- u8 num_rq);
+ u8 num_rq, unsigned char *rq_data);
void bnx2fc_process_cleanup_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq);
@@ -559,7 +562,7 @@ void bnx2fc_process_abts_compl(struct bnx2fc_cmd *io_req,
u8 num_rq);
void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
- u8 num_rq);
+ u8 num_rq, unsigned char *rq_data);
void bnx2fc_process_els_compl(struct bnx2fc_cmd *els_req,
struct fcoe_task_ctx_entry *task,
u8 num_rq);
@@ -577,7 +580,9 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
void *arg, u32 timeout);
void bnx2fc_arm_cq(struct bnx2fc_rport *tgt);
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt);
-void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe);
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe,
+ unsigned char *rq_data, u8 num_rq,
+ struct fcoe_task_ctx_entry *task);
struct bnx2fc_rport *bnx2fc_tgt_lookup(struct fcoe_port *port,
u32 port_id);
void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index b4bfab5edf8f..1cbb431fa682 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -660,7 +660,10 @@ static int bnx2fc_percpu_io_thread(void *arg)
list_for_each_entry_safe(work, tmp, &work_list, list) {
list_del_init(&work->list);
- bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe,
+ work->rq_data,
+ work->num_rq,
+ work->task);
kfree(work);
}
@@ -2655,7 +2658,8 @@ static int bnx2fc_cpu_offline(unsigned int cpu)
/* Free all work in the list */
list_for_each_entry_safe(work, tmp, &p->work_list, list) {
list_del_init(&work->list);
- bnx2fc_process_cq_compl(work->tgt, work->wqe);
+ bnx2fc_process_cq_compl(work->tgt, work->wqe, work->rq_data,
+ work->num_rq, work->task);
kfree(work);
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 6f8335ddb1f2..1f7c58b4c535 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -863,36 +863,22 @@ ret_warn_rqe:
}
}
-void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
+void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe,
+ unsigned char *rq_data, u8 num_rq,
+ struct fcoe_task_ctx_entry *task)
{
- struct fcoe_task_ctx_entry *task;
- struct fcoe_task_ctx_entry *task_page;
struct fcoe_port *port = tgt->port;
struct bnx2fc_interface *interface = port->priv;
struct bnx2fc_hba *hba = interface->hba;
struct bnx2fc_cmd *io_req;
- int task_idx, index;
+
u16 xid;
u8 cmd_type;
u8 rx_state = 0;
- u8 num_rq;
spin_lock_bh(&tgt->tgt_lock);
- xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
- if (xid >= hba->max_tasks) {
- printk(KERN_ERR PFX "ERROR:xid out of range\n");
- spin_unlock_bh(&tgt->tgt_lock);
- return;
- }
- task_idx = xid / BNX2FC_TASKS_PER_PAGE;
- index = xid % BNX2FC_TASKS_PER_PAGE;
- task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
- task = &(task_page[index]);
-
- num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
- FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
- FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
if (io_req == NULL) {
@@ -912,7 +898,8 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
switch (cmd_type) {
case BNX2FC_SCSI_CMD:
if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
- bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
+ bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq,
+ rq_data);
spin_unlock_bh(&tgt->tgt_lock);
return;
}
@@ -929,7 +916,7 @@ void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
case BNX2FC_TASK_MGMT_CMD:
BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
- bnx2fc_process_tm_compl(io_req, task, num_rq);
+ bnx2fc_process_tm_compl(io_req, task, num_rq, rq_data);
break;
case BNX2FC_ABTS:
@@ -987,7 +974,9 @@ void bnx2fc_arm_cq(struct bnx2fc_rport *tgt)
}
-static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
+static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe,
+ unsigned char *rq_data, u8 num_rq,
+ struct fcoe_task_ctx_entry *task)
{
struct bnx2fc_work *work;
work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
@@ -997,29 +986,87 @@ static struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
INIT_LIST_HEAD(&work->list);
work->tgt = tgt;
work->wqe = wqe;
+ work->num_rq = num_rq;
+ work->task = task;
+ if (rq_data)
+ memcpy(work->rq_data, rq_data, BNX2FC_RQ_BUF_SZ);
+
return work;
}
/* Pending work request completion */
-static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
+static bool bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe)
{
unsigned int cpu = wqe % num_possible_cpus();
struct bnx2fc_percpu_s *fps;
struct bnx2fc_work *work;
+ struct fcoe_task_ctx_entry *task;
+ struct fcoe_task_ctx_entry *task_page;
+ struct fcoe_port *port = tgt->port;
+ struct bnx2fc_interface *interface = port->priv;
+ struct bnx2fc_hba *hba = interface->hba;
+ unsigned char *rq_data = NULL;
+ unsigned char rq_data_buff[BNX2FC_RQ_BUF_SZ];
+ int task_idx, index;
+ unsigned char *dummy;
+ u16 xid;
+ u8 num_rq;
+ int i;
+
+ xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
+ if (xid >= hba->max_tasks) {
+ pr_err(PFX "ERROR:xid out of range\n");
+ return false;
+ }
+
+ task_idx = xid / BNX2FC_TASKS_PER_PAGE;
+ index = xid % BNX2FC_TASKS_PER_PAGE;
+ task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
+ task = &task_page[index];
+
+ num_rq = ((task->rxwr_txrd.var_ctx.rx_flags &
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >>
+ FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT);
+
+ memset(rq_data_buff, 0, BNX2FC_RQ_BUF_SZ);
+
+ if (!num_rq)
+ goto num_rq_zero;
+
+ rq_data = bnx2fc_get_next_rqe(tgt, 1);
+
+ if (num_rq > 1) {
+ /* We do not need extra sense data */
+ for (i = 1; i < num_rq; i++)
+ dummy = bnx2fc_get_next_rqe(tgt, 1);
+ }
+
+ if (rq_data)
+ memcpy(rq_data_buff, rq_data, BNX2FC_RQ_BUF_SZ);
+
+ /* return RQ entries */
+ for (i = 0; i < num_rq; i++)
+ bnx2fc_return_rqe(tgt, 1);
+
+num_rq_zero:
fps = &per_cpu(bnx2fc_percpu, cpu);
spin_lock_bh(&fps->fp_work_lock);
if (fps->iothread) {
- work = bnx2fc_alloc_work(tgt, wqe);
+ work = bnx2fc_alloc_work(tgt, wqe, rq_data_buff,
+ num_rq, task);
if (work) {
list_add_tail(&work->list, &fps->work_list);
wake_up_process(fps->iothread);
spin_unlock_bh(&fps->fp_work_lock);
- return;
+ return true;
}
}
spin_unlock_bh(&fps->fp_work_lock);
- bnx2fc_process_cq_compl(tgt, wqe);
+ bnx2fc_process_cq_compl(tgt, wqe,
+ rq_data_buff, num_rq, task);
+
+ return true;
}
int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
@@ -1056,8 +1103,8 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
/* Unsolicited event notification */
bnx2fc_process_unsol_compl(tgt, wqe);
} else {
- bnx2fc_pending_work(tgt, wqe);
- num_free_sqes++;
+ if (bnx2fc_pending_work(tgt, wqe))
+ num_free_sqes++;
}
cqe++;
tgt->cq_cons_idx++;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 4c8122a82322..2b070f0835df 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -24,7 +24,7 @@ static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req);
static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req);
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
struct fcoe_fcp_rsp_payload *fcp_rsp,
- u8 num_rq);
+ u8 num_rq, unsigned char *rq_data);
void bnx2fc_cmd_timer_set(struct bnx2fc_cmd *io_req,
unsigned int timer_msec)
@@ -1518,7 +1518,8 @@ static void bnx2fc_tgt_reset_cmpl(struct bnx2fc_cmd *io_req)
}
void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
- struct fcoe_task_ctx_entry *task, u8 num_rq)
+ struct fcoe_task_ctx_entry *task, u8 num_rq,
+ unsigned char *rq_data)
{
struct bnx2fc_mp_req *tm_req;
struct fc_frame_header *fc_hdr;
@@ -1557,7 +1558,7 @@ void bnx2fc_process_tm_compl(struct bnx2fc_cmd *io_req,
if (fc_hdr->fh_r_ctl == FC_RCTL_DD_CMD_STATUS) {
bnx2fc_parse_fcp_rsp(io_req,
(struct fcoe_fcp_rsp_payload *)
- rsp_buf, num_rq);
+ rsp_buf, num_rq, rq_data);
if (io_req->fcp_rsp_code == 0) {
/* TM successful */
if (tm_req->tm_flags & FCP_TMF_LUN_RESET)
@@ -1755,15 +1756,11 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
struct fcoe_fcp_rsp_payload *fcp_rsp,
- u8 num_rq)
+ u8 num_rq, unsigned char *rq_data)
{
struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
- struct bnx2fc_rport *tgt = io_req->tgt;
u8 rsp_flags = fcp_rsp->fcp_flags.flags;
u32 rq_buff_len = 0;
- int i;
- unsigned char *rq_data;
- unsigned char *dummy;
int fcp_sns_len = 0;
int fcp_rsp_len = 0;
@@ -1809,14 +1806,6 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
rq_buff_len = num_rq * BNX2FC_RQ_BUF_SZ;
}
- rq_data = bnx2fc_get_next_rqe(tgt, 1);
-
- if (num_rq > 1) {
- /* We do not need extra sense data */
- for (i = 1; i < num_rq; i++)
- dummy = bnx2fc_get_next_rqe(tgt, 1);
- }
-
/* fetch fcp_rsp_code */
if ((fcp_rsp_len == 4) || (fcp_rsp_len == 8)) {
/* Only for task management function */
@@ -1837,9 +1826,6 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
if (fcp_sns_len)
memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len);
- /* return RQ entries */
- for (i = 0; i < num_rq; i++)
- bnx2fc_return_rqe(tgt, 1);
}
}
@@ -1918,7 +1904,7 @@ exit_qcmd:
void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
struct fcoe_task_ctx_entry *task,
- u8 num_rq)
+ u8 num_rq, unsigned char *rq_data)
{
struct fcoe_fcp_rsp_payload *fcp_rsp;
struct bnx2fc_rport *tgt = io_req->tgt;
@@ -1931,6 +1917,12 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
/* we will not receive ABTS response for this IO */
BNX2FC_IO_DBG(io_req, "Timer context finished processing "
"this scsi cmd\n");
+ if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
+ &io_req->req_flags)) {
+ BNX2FC_IO_DBG(io_req,
+ "Actual completion after cleanup request cleaning up\n");
+ bnx2fc_process_cleanup_compl(io_req, task, num_rq);
+ }
return;
}
@@ -1950,7 +1942,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
&(task->rxwr_only.union_ctx.comp_info.fcp_rsp.payload);
/* parse fcp_rsp and obtain sense data from RQ if available */
- bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq);
+ bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq, rq_data);
if (!sc_cmd->SCp.ptr) {
printk(KERN_ERR PFX "SCp.ptr is NULL\n");
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index d4c2a2e4c5d4..84d73f57292b 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -404,7 +404,7 @@ static const char * const hostbyte_table[]={
"DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR",
"DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE",
"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE",
-"DID_NEXUS_FAILURE" };
+"DID_NEXUS_FAILURE", "DID_ALLOC_FAILURE", "DID_MEDIUM_ERROR" };
static const char * const driverbyte_table[]={
"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index da6e97d8dc3b..773c45af9387 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -632,6 +632,8 @@ static void fc_rport_error(struct fc_rport_priv *rdata, int err)
fc_rport_enter_ready(rdata);
break;
case RPORT_ST_PRLI:
+ fc_rport_enter_plogi(rdata);
+ break;
case RPORT_ST_ADISC:
fc_rport_enter_logo(rdata);
break;
@@ -1208,9 +1210,15 @@ static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
if (!rjt)
FC_RPORT_DBG(rdata, "PRLI bad response\n");
- else
+ else {
FC_RPORT_DBG(rdata, "PRLI ELS rejected, reason %x expl %x\n",
rjt->er_reason, rjt->er_explan);
+ if (rjt->er_reason == ELS_RJT_UNAB &&
+ rjt->er_explan == ELS_EXPL_PLOGI_REQD) {
+ fc_rport_enter_plogi(rdata);
+ goto out;
+ }
+ }
fc_rport_error_retry(rdata, FC_EX_ELS_RJT);
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 357fdec06bae..8e2a356911a9 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -207,8 +207,7 @@ typedef struct lpfc_vpd {
} rev;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd3 :19; /* Reserved */
- uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd3 :20; /* Reserved */
uint32_t rsvd2 : 3; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t cmv : 1; /* Configure Max VPIs */
@@ -230,8 +229,7 @@ typedef struct lpfc_vpd {
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t rsvd2 : 3; /* Reserved */
- uint32_t cdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd3 :19; /* Reserved */
+ uint32_t rsvd3 :20; /* Reserved */
#endif
} sli3Feat;
} lpfc_vpd_t;
@@ -480,8 +478,8 @@ struct lpfc_vport {
struct dentry *debug_nodelist;
struct dentry *debug_nvmestat;
struct dentry *debug_scsistat;
- struct dentry *debug_nvmektime;
- struct dentry *debug_cpucheck;
+ struct dentry *debug_ioktime;
+ struct dentry *debug_hdwqstat;
struct dentry *vport_debugfs_root;
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
@@ -887,7 +885,6 @@ struct lpfc_hba {
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
#define LPFC_DELAY_INIT_LINK 1 /* layered driver hold off */
#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2 /* wait, manual intervention */
- uint32_t cfg_enable_dss;
uint32_t cfg_fdmi_on;
#define LPFC_FDMI_NO_SUPPORT 0 /* FDMI not supported */
#define LPFC_FDMI_SUPPORT 1 /* FDMI supported? */
@@ -1156,8 +1153,6 @@ struct lpfc_hba {
uint32_t iocb_cnt;
uint32_t iocb_max;
atomic_t sdev_cnt;
- uint8_t fips_spec_rev;
- uint8_t fips_level;
spinlock_t devicelock; /* lock for luns list */
mempool_t *device_data_mem_pool;
struct list_head luns;
@@ -1175,12 +1170,11 @@ struct lpfc_hba {
uint16_t sfp_warning;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- uint16_t cpucheck_on;
+ uint16_t hdwqstat_on;
#define LPFC_CHECK_OFF 0
#define LPFC_CHECK_NVME_IO 1
-#define LPFC_CHECK_NVMET_RCV 2
-#define LPFC_CHECK_NVMET_IO 4
-#define LPFC_CHECK_SCSI_IO 8
+#define LPFC_CHECK_NVMET_IO 2
+#define LPFC_CHECK_SCSI_IO 4
uint16_t ktime_on;
uint64_t ktime_data_samples;
uint64_t ktime_status_samples;
@@ -1225,6 +1219,11 @@ struct lpfc_hba {
#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
char os_host_name[MAXHOSTNAMELEN];
+
+ /* SCSI host template information - for physical port */
+ struct scsi_host_template port_template;
+ /* SCSI host template information - for all vports */
+ struct scsi_host_template vport_template;
};
static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index be3b0ccbac78..1354c141d614 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -2231,66 +2231,6 @@ lpfc_poll_store(struct device *dev, struct device_attribute *attr,
}
/**
- * lpfc_fips_level_show - Return the current FIPS level for the HBA
- * @dev: class unused variable.
- * @attr: device attribute, not used.
- * @buf: on return contains the module description text.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_fips_level_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_level);
-}
-
-/**
- * lpfc_fips_rev_show - Return the FIPS Spec revision for the HBA
- * @dev: class unused variable.
- * @attr: device attribute, not used.
- * @buf: on return contains the module description text.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_fips_rev_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%d\n", phba->fips_spec_rev);
-}
-
-/**
- * lpfc_dss_show - Return the current state of dss and the configured state
- * @dev: class converted to a Scsi_host structure.
- * @attr: device attribute, not used.
- * @buf: on return contains the formatted text.
- *
- * Returns: size of formatted string.
- **/
-static ssize_t
-lpfc_dss_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
-
- return scnprintf(buf, PAGE_SIZE, "%s - %sOperational\n",
- (phba->cfg_enable_dss) ? "Enabled" : "Disabled",
- (phba->sli3_options & LPFC_SLI3_DSS_ENABLED) ?
- "" : "Not ");
-}
-
-/**
* lpfc_sriov_hw_max_virtfn_show - Return maximum number of virtual functions
* @dev: class converted to a Scsi_host structure.
* @attr: device attribute, not used.
@@ -2705,9 +2645,6 @@ static DEVICE_ATTR(max_xri, S_IRUGO, lpfc_max_xri_show, NULL);
static DEVICE_ATTR(used_xri, S_IRUGO, lpfc_used_xri_show, NULL);
static DEVICE_ATTR(npiv_info, S_IRUGO, lpfc_npiv_info_show, NULL);
static DEVICE_ATTR_RO(lpfc_temp_sensor);
-static DEVICE_ATTR_RO(lpfc_fips_level);
-static DEVICE_ATTR_RO(lpfc_fips_rev);
-static DEVICE_ATTR_RO(lpfc_dss);
static DEVICE_ATTR_RO(lpfc_sriov_hw_max_virtfn);
static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
static DEVICE_ATTR(lpfc_xlane_supported, S_IRUGO, lpfc_oas_supported_show,
@@ -3868,9 +3805,9 @@ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
/*
# lun_queue_depth: This parameter is used to limit the number of outstanding
-# commands per FCP LUN. Value range is [1,512]. Default value is 30.
+# commands per FCP LUN.
*/
-LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
+LPFC_VPORT_ATTR_R(lun_queue_depth, 64, 1, 512,
"Max number of FCP commands we can queue to a specific LUN");
/*
@@ -6251,9 +6188,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_pt,
&dev_attr_txq_hw,
&dev_attr_txcmplq_hw,
- &dev_attr_lpfc_fips_level,
- &dev_attr_lpfc_fips_rev,
- &dev_attr_lpfc_dss,
&dev_attr_lpfc_sriov_hw_max_virtfn,
&dev_attr_protocol,
&dev_attr_lpfc_xlane_supported,
@@ -6289,8 +6223,6 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_lpfc_max_scsicmpl_time,
&dev_attr_lpfc_stat_data_ctrl,
&dev_attr_lpfc_static_vport,
- &dev_attr_lpfc_fips_level,
- &dev_attr_lpfc_fips_rev,
NULL,
};
@@ -7399,7 +7331,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_delay_discovery_init(phba, lpfc_delay_discovery);
lpfc_sli_mode_init(phba, lpfc_sli_mode);
- phba->cfg_enable_dss = 1;
lpfc_enable_mds_diags_init(phba, lpfc_enable_mds_diags);
lpfc_ras_fwlog_buffsize_init(phba, lpfc_ras_fwlog_buffsize);
lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level);
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index a450477a7e00..76dc8d9493d2 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -404,9 +404,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
extern struct device_attribute *lpfc_hba_attrs[];
extern struct device_attribute *lpfc_vport_attrs[];
extern struct scsi_host_template lpfc_template;
-extern struct scsi_host_template lpfc_template_no_hr;
extern struct scsi_host_template lpfc_template_nvme;
-extern struct scsi_host_template lpfc_vport_template;
extern struct fc_function_template lpfc_transport_functions;
extern struct fc_function_template lpfc_vport_transport_functions;
@@ -590,6 +588,7 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
int);
void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
struct lpfc_sli4_hdw_queue *qp);
+void lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd);
void lpfc_nvme_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 819335b16c2e..8a6e02aa553f 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1300,8 +1300,88 @@ buffer_done:
return len;
}
+void
+lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
+{
+ uint64_t seg1, seg2, seg3, seg4;
+ uint64_t segsum;
+
+ if (!lpfc_cmd->ts_last_cmd ||
+ !lpfc_cmd->ts_cmd_start ||
+ !lpfc_cmd->ts_cmd_wqput ||
+ !lpfc_cmd->ts_isr_cmpl ||
+ !lpfc_cmd->ts_data_io)
+ return;
+
+ if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_cmd_start)
+ return;
+ if (lpfc_cmd->ts_cmd_start < lpfc_cmd->ts_last_cmd)
+ return;
+ if (lpfc_cmd->ts_cmd_wqput < lpfc_cmd->ts_cmd_start)
+ return;
+ if (lpfc_cmd->ts_isr_cmpl < lpfc_cmd->ts_cmd_wqput)
+ return;
+ if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_isr_cmpl)
+ return;
+ /*
+ * Segment 1 - Time from Last FCP command cmpl is handed
+ * off to NVME Layer to start of next command.
+ * Segment 2 - Time from Driver receives a IO cmd start
+ * from NVME Layer to WQ put is done on IO cmd.
+ * Segment 3 - Time from Driver WQ put is done on IO cmd
+ * to MSI-X ISR for IO cmpl.
+ * Segment 4 - Time from MSI-X ISR for IO cmpl to when
+ * cmpl is handled off to the NVME Layer.
+ */
+ seg1 = lpfc_cmd->ts_cmd_start - lpfc_cmd->ts_last_cmd;
+ if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
+ seg1 = 0;
+
+ /* Calculate times relative to start of IO */
+ seg2 = (lpfc_cmd->ts_cmd_wqput - lpfc_cmd->ts_cmd_start);
+ segsum = seg2;
+ seg3 = lpfc_cmd->ts_isr_cmpl - lpfc_cmd->ts_cmd_start;
+ if (segsum > seg3)
+ return;
+ seg3 -= segsum;
+ segsum += seg3;
+
+ seg4 = lpfc_cmd->ts_data_io - lpfc_cmd->ts_cmd_start;
+ if (segsum > seg4)
+ return;
+ seg4 -= segsum;
+
+ phba->ktime_data_samples++;
+ phba->ktime_seg1_total += seg1;
+ if (seg1 < phba->ktime_seg1_min)
+ phba->ktime_seg1_min = seg1;
+ else if (seg1 > phba->ktime_seg1_max)
+ phba->ktime_seg1_max = seg1;
+ phba->ktime_seg2_total += seg2;
+ if (seg2 < phba->ktime_seg2_min)
+ phba->ktime_seg2_min = seg2;
+ else if (seg2 > phba->ktime_seg2_max)
+ phba->ktime_seg2_max = seg2;
+ phba->ktime_seg3_total += seg3;
+ if (seg3 < phba->ktime_seg3_min)
+ phba->ktime_seg3_min = seg3;
+ else if (seg3 > phba->ktime_seg3_max)
+ phba->ktime_seg3_max = seg3;
+ phba->ktime_seg4_total += seg4;
+ if (seg4 < phba->ktime_seg4_min)
+ phba->ktime_seg4_min = seg4;
+ else if (seg4 > phba->ktime_seg4_max)
+ phba->ktime_seg4_max = seg4;
+
+ lpfc_cmd->ts_last_cmd = 0;
+ lpfc_cmd->ts_cmd_start = 0;
+ lpfc_cmd->ts_cmd_wqput = 0;
+ lpfc_cmd->ts_isr_cmpl = 0;
+ lpfc_cmd->ts_data_io = 0;
+}
+
/**
- * lpfc_debugfs_nvmektime_data - Dump target node list to a buffer
+ * lpfc_debugfs_ioktime_data - Dump target node list to a buffer
* @vport: The vport to gather target node info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
@@ -1314,13 +1394,13 @@ buffer_done:
* not exceed @size.
**/
static int
-lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
+lpfc_debugfs_ioktime_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
int len = 0;
if (phba->nvmet_support == 0) {
- /* NVME Initiator */
+ /* Initiator */
len += scnprintf(buf + len, PAGE_SIZE - len,
"ktime %s: Total Samples: %lld\n",
(phba->ktime_on ? "Enabled" : "Disabled"),
@@ -1330,8 +1410,8 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(
buf + len, PAGE_SIZE - len,
- "Segment 1: Last NVME Cmd cmpl "
- "done -to- Start of next NVME cnd (in driver)\n");
+ "Segment 1: Last Cmd cmpl "
+ "done -to- Start of next Cmd (in driver)\n");
len += scnprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
@@ -1341,7 +1421,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
phba->ktime_seg1_max);
len += scnprintf(
buf + len, PAGE_SIZE - len,
- "Segment 2: Driver start of NVME cmd "
+ "Segment 2: Driver start of Cmd "
"-to- Firmware WQ doorbell\n");
len += scnprintf(
buf + len, PAGE_SIZE - len,
@@ -1364,7 +1444,7 @@ lpfc_debugfs_nvmektime_data(struct lpfc_vport *vport, char *buf, int size)
len += scnprintf(
buf + len, PAGE_SIZE - len,
"Segment 4: MSI-X ISR cmpl -to- "
- "NVME cmpl done\n");
+ "Cmd cmpl done\n");
len += scnprintf(
buf + len, PAGE_SIZE - len,
"avg:%08lld min:%08lld max %08lld\n",
@@ -1603,42 +1683,50 @@ out:
}
/**
- * lpfc_debugfs_cpucheck_data - Dump target node list to a buffer
+ * lpfc_debugfs_hdwqstat_data - Dump I/O stats to a buffer
* @vport: The vport to gather target node info from.
* @buf: The buffer to dump log into.
* @size: The maximum amount of data to process.
*
* Description:
- * This routine dumps the NVME statistics associated with @vport
+ * This routine dumps the NVME + SCSI statistics associated with @vport
*
* Return Value:
* This routine returns the amount of bytes that were dumped into @buf and will
* not exceed @size.
**/
static int
-lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
+lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
struct lpfc_sli4_hdw_queue *qp;
- int i, j, max_cnt;
- int len = 0;
+ struct lpfc_hdwq_stat *c_stat;
+ int i, j, len;
uint32_t tot_xmt;
uint32_t tot_rcv;
uint32_t tot_cmpl;
+ char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "CPUcheck %s ",
- (phba->cpucheck_on & LPFC_CHECK_NVME_IO ?
- "Enabled" : "Disabled"));
- if (phba->nvmet_support) {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "%s\n",
- (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV ?
- "Rcv Enabled\n" : "Rcv Disabled\n"));
- } else {
- len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
- }
- max_cnt = size - LPFC_DEBUG_OUT_LINE_SZ;
+ scnprintf(tmp, sizeof(tmp), "HDWQ Stats:\n\n");
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "(NVME Accounting: %s) ",
+ (phba->hdwqstat_on &
+ (LPFC_CHECK_NVME_IO | LPFC_CHECK_NVMET_IO) ?
+ "Enabled" : "Disabled"));
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "(SCSI Accounting: %s) ",
+ (phba->hdwqstat_on & LPFC_CHECK_SCSI_IO ?
+ "Enabled" : "Disabled"));
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
+ scnprintf(tmp, sizeof(tmp), "\n\n");
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
qp = &phba->sli4_hba.hdwq[i];
@@ -1646,46 +1734,76 @@ lpfc_debugfs_cpucheck_data(struct lpfc_vport *vport, char *buf, int size)
tot_rcv = 0;
tot_xmt = 0;
tot_cmpl = 0;
- for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
- tot_xmt += qp->cpucheck_xmt_io[j];
- tot_cmpl += qp->cpucheck_cmpl_io[j];
- if (phba->nvmet_support)
- tot_rcv += qp->cpucheck_rcv_io[j];
- }
- /* Only display Hardware Qs with something */
- if (!tot_xmt && !tot_cmpl && !tot_rcv)
- continue;
+ for_each_present_cpu(j) {
+ c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, j);
+
+ /* Only display for this HDWQ */
+ if (i != c_stat->hdwq_no)
+ continue;
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "HDWQ %03d: ", i);
- for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
/* Only display non-zero counters */
- if (!qp->cpucheck_xmt_io[j] &&
- !qp->cpucheck_cmpl_io[j] &&
- !qp->cpucheck_rcv_io[j])
+ if (!c_stat->xmt_io && !c_stat->cmpl_io &&
+ !c_stat->rcv_io)
continue;
+
+ if (!tot_xmt && !tot_cmpl && !tot_rcv) {
+ /* Print HDWQ string only the first time */
+ scnprintf(tmp, sizeof(tmp), "[HDWQ %d]:\t", i);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+ }
+
+ tot_xmt += c_stat->xmt_io;
+ tot_cmpl += c_stat->cmpl_io;
+ if (phba->nvmet_support)
+ tot_rcv += c_stat->rcv_io;
+
+ scnprintf(tmp, sizeof(tmp), "| [CPU %d]: ", j);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
if (phba->nvmet_support) {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "CPU %03d: %x/%x/%x ", j,
- qp->cpucheck_rcv_io[j],
- qp->cpucheck_xmt_io[j],
- qp->cpucheck_cmpl_io[j]);
+ scnprintf(tmp, sizeof(tmp),
+ "XMT 0x%x CMPL 0x%x RCV 0x%x |",
+ c_stat->xmt_io, c_stat->cmpl_io,
+ c_stat->rcv_io);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
} else {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "CPU %03d: %x/%x ", j,
- qp->cpucheck_xmt_io[j],
- qp->cpucheck_cmpl_io[j]);
+ scnprintf(tmp, sizeof(tmp),
+ "XMT 0x%x CMPL 0x%x |",
+ c_stat->xmt_io, c_stat->cmpl_io);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
}
}
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "Total: %x\n", tot_xmt);
- if (len >= max_cnt) {
- len += scnprintf(buf + len, PAGE_SIZE - len,
- "Truncated ...\n");
- return len;
+
+ /* Check if nothing to display */
+ if (!tot_xmt && !tot_cmpl && !tot_rcv)
+ continue;
+
+ scnprintf(tmp, sizeof(tmp), "\t->\t[HDWQ Total: ");
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+
+ if (phba->nvmet_support) {
+ scnprintf(tmp, sizeof(tmp),
+ "XMT 0x%x CMPL 0x%x RCV 0x%x]\n\n",
+ tot_xmt, tot_cmpl, tot_rcv);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
+ } else {
+ scnprintf(tmp, sizeof(tmp),
+ "XMT 0x%x CMPL 0x%x]\n\n",
+ tot_xmt, tot_cmpl);
+ if (strlcat(buf, tmp, size) >= size)
+ goto buffer_done;
}
}
+
+buffer_done:
+ len = strnlen(buf, size);
return len;
}
@@ -2689,7 +2807,7 @@ lpfc_debugfs_scsistat_write(struct file *file, const char __user *buf,
}
static int
-lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file)
+lpfc_debugfs_ioktime_open(struct inode *inode, struct file *file)
{
struct lpfc_vport *vport = inode->i_private;
struct lpfc_debug *debug;
@@ -2700,14 +2818,14 @@ lpfc_debugfs_nvmektime_open(struct inode *inode, struct file *file)
goto out;
/* Round to page boundary */
- debug->buffer = kmalloc(LPFC_NVMEKTIME_SIZE, GFP_KERNEL);
+ debug->buffer = kmalloc(LPFC_IOKTIME_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
- debug->len = lpfc_debugfs_nvmektime_data(vport, debug->buffer,
- LPFC_NVMEKTIME_SIZE);
+ debug->len = lpfc_debugfs_ioktime_data(vport, debug->buffer,
+ LPFC_IOKTIME_SIZE);
debug->i_private = inode->i_private;
file->private_data = debug;
@@ -2718,8 +2836,8 @@ out:
}
static ssize_t
-lpfc_debugfs_nvmektime_write(struct file *file, const char __user *buf,
- size_t nbytes, loff_t *ppos)
+lpfc_debugfs_ioktime_write(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
{
struct lpfc_debug *debug = file->private_data;
struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
@@ -2921,7 +3039,7 @@ lpfc_debugfs_nvmeio_trc_write(struct file *file, const char __user *buf,
}
static int
-lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
+lpfc_debugfs_hdwqstat_open(struct inode *inode, struct file *file)
{
struct lpfc_vport *vport = inode->i_private;
struct lpfc_debug *debug;
@@ -2932,14 +3050,14 @@ lpfc_debugfs_cpucheck_open(struct inode *inode, struct file *file)
goto out;
/* Round to page boundary */
- debug->buffer = kmalloc(LPFC_CPUCHECK_SIZE, GFP_KERNEL);
+ debug->buffer = kcalloc(1, LPFC_SCSISTAT_SIZE, GFP_KERNEL);
if (!debug->buffer) {
kfree(debug);
goto out;
}
- debug->len = lpfc_debugfs_cpucheck_data(vport, debug->buffer,
- LPFC_CPUCHECK_SIZE);
+ debug->len = lpfc_debugfs_hdwqstat_data(vport, debug->buffer,
+ LPFC_SCSISTAT_SIZE);
debug->i_private = inode->i_private;
file->private_data = debug;
@@ -2950,16 +3068,16 @@ out:
}
static ssize_t
-lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
+lpfc_debugfs_hdwqstat_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
{
struct lpfc_debug *debug = file->private_data;
struct lpfc_vport *vport = (struct lpfc_vport *)debug->i_private;
struct lpfc_hba *phba = vport->phba;
- struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_hdwq_stat *c_stat;
char mybuf[64];
char *pbuf;
- int i, j;
+ int i;
if (nbytes > 64)
nbytes = 64;
@@ -2972,41 +3090,39 @@ lpfc_debugfs_cpucheck_write(struct file *file, const char __user *buf,
if ((strncmp(pbuf, "on", sizeof("on") - 1) == 0)) {
if (phba->nvmet_support)
- phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
+ phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO;
else
- phba->cpucheck_on |= (LPFC_CHECK_NVME_IO |
+ phba->hdwqstat_on |= (LPFC_CHECK_NVME_IO |
LPFC_CHECK_SCSI_IO);
return strlen(pbuf);
} else if ((strncmp(pbuf, "nvme_on", sizeof("nvme_on") - 1) == 0)) {
if (phba->nvmet_support)
- phba->cpucheck_on |= LPFC_CHECK_NVMET_IO;
+ phba->hdwqstat_on |= LPFC_CHECK_NVMET_IO;
else
- phba->cpucheck_on |= LPFC_CHECK_NVME_IO;
+ phba->hdwqstat_on |= LPFC_CHECK_NVME_IO;
return strlen(pbuf);
} else if ((strncmp(pbuf, "scsi_on", sizeof("scsi_on") - 1) == 0)) {
- phba->cpucheck_on |= LPFC_CHECK_SCSI_IO;
+ if (!phba->nvmet_support)
+ phba->hdwqstat_on |= LPFC_CHECK_SCSI_IO;
return strlen(pbuf);
- } else if ((strncmp(pbuf, "rcv",
- sizeof("rcv") - 1) == 0)) {
- if (phba->nvmet_support)
- phba->cpucheck_on |= LPFC_CHECK_NVMET_RCV;
- else
- return -EINVAL;
+ } else if ((strncmp(pbuf, "nvme_off", sizeof("nvme_off") - 1) == 0)) {
+ phba->hdwqstat_on &= ~(LPFC_CHECK_NVME_IO |
+ LPFC_CHECK_NVMET_IO);
+ return strlen(pbuf);
+ } else if ((strncmp(pbuf, "scsi_off", sizeof("scsi_off") - 1) == 0)) {
+ phba->hdwqstat_on &= ~LPFC_CHECK_SCSI_IO;
return strlen(pbuf);
} else if ((strncmp(pbuf, "off",
sizeof("off") - 1) == 0)) {
- phba->cpucheck_on = LPFC_CHECK_OFF;
+ phba->hdwqstat_on = LPFC_CHECK_OFF;
return strlen(pbuf);
} else if ((strncmp(pbuf, "zero",
sizeof("zero") - 1) == 0)) {
- for (i = 0; i < phba->cfg_hdw_queue; i++) {
- qp = &phba->sli4_hba.hdwq[i];
-
- for (j = 0; j < LPFC_CHECK_CPU_CNT; j++) {
- qp->cpucheck_rcv_io[j] = 0;
- qp->cpucheck_xmt_io[j] = 0;
- qp->cpucheck_cmpl_io[j] = 0;
- }
+ for_each_present_cpu(i) {
+ c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, i);
+ c_stat->xmt_io = 0;
+ c_stat->cmpl_io = 0;
+ c_stat->rcv_io = 0;
}
return strlen(pbuf);
}
@@ -5431,13 +5547,13 @@ static const struct file_operations lpfc_debugfs_op_scsistat = {
.release = lpfc_debugfs_release,
};
-#undef lpfc_debugfs_op_nvmektime
-static const struct file_operations lpfc_debugfs_op_nvmektime = {
+#undef lpfc_debugfs_op_ioktime
+static const struct file_operations lpfc_debugfs_op_ioktime = {
.owner = THIS_MODULE,
- .open = lpfc_debugfs_nvmektime_open,
+ .open = lpfc_debugfs_ioktime_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
- .write = lpfc_debugfs_nvmektime_write,
+ .write = lpfc_debugfs_ioktime_write,
.release = lpfc_debugfs_release,
};
@@ -5451,13 +5567,13 @@ static const struct file_operations lpfc_debugfs_op_nvmeio_trc = {
.release = lpfc_debugfs_release,
};
-#undef lpfc_debugfs_op_cpucheck
-static const struct file_operations lpfc_debugfs_op_cpucheck = {
+#undef lpfc_debugfs_op_hdwqstat
+static const struct file_operations lpfc_debugfs_op_hdwqstat = {
.owner = THIS_MODULE,
- .open = lpfc_debugfs_cpucheck_open,
+ .open = lpfc_debugfs_hdwqstat_open,
.llseek = lpfc_debugfs_lseek,
.read = lpfc_debugfs_read,
- .write = lpfc_debugfs_cpucheck_write,
+ .write = lpfc_debugfs_hdwqstat_write,
.release = lpfc_debugfs_release,
};
@@ -6075,17 +6191,22 @@ nvmeio_off:
goto debug_failed;
}
- snprintf(name, sizeof(name), "nvmektime");
- vport->debug_nvmektime =
+ snprintf(name, sizeof(name), "ioktime");
+ vport->debug_ioktime =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_nvmektime);
+ vport, &lpfc_debugfs_op_ioktime);
+ if (!vport->debug_ioktime) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "0815 Cannot create debugfs ioktime\n");
+ goto debug_failed;
+ }
- snprintf(name, sizeof(name), "cpucheck");
- vport->debug_cpucheck =
+ snprintf(name, sizeof(name), "hdwqstat");
+ vport->debug_hdwqstat =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
- vport, &lpfc_debugfs_op_cpucheck);
+ vport, &lpfc_debugfs_op_hdwqstat);
/*
* The following section is for additional directories/files for the
@@ -6216,11 +6337,11 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
debugfs_remove(vport->debug_scsistat); /* scsistat */
vport->debug_scsistat = NULL;
- debugfs_remove(vport->debug_nvmektime); /* nvmektime */
- vport->debug_nvmektime = NULL;
+ debugfs_remove(vport->debug_ioktime); /* ioktime */
+ vport->debug_ioktime = NULL;
- debugfs_remove(vport->debug_cpucheck); /* cpucheck */
- vport->debug_cpucheck = NULL;
+ debugfs_remove(vport->debug_hdwqstat); /* hdwqstat */
+ vport->debug_hdwqstat = NULL;
if (vport->vport_debugfs_root) {
debugfs_remove(vport->vport_debugfs_root); /* vportX */
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
index 20f2537af511..7ab6d3b08698 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.h
+++ b/drivers/scsi/lpfc/lpfc_debugfs.h
@@ -46,8 +46,7 @@
/* nvmestat output buffer size */
#define LPFC_NVMESTAT_SIZE 8192
-#define LPFC_NVMEKTIME_SIZE 8192
-#define LPFC_CPUCHECK_SIZE 8192
+#define LPFC_IOKTIME_SIZE 8192
#define LPFC_NVMEIO_TRC_SIZE 8192
/* scsistat output buffer size */
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index ae51c0dbba0a..c20034b3101c 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -3262,8 +3262,7 @@ typedef struct {
#endif
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd1 : 19; /* Reserved */
- uint32_t cdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd1 : 20; /* Reserved */
uint32_t casabt : 1; /* Configure async abts status notice */
uint32_t rsvd2 : 2; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
@@ -3287,12 +3286,10 @@ typedef struct {
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t rsvd2 : 2; /* Reserved */
uint32_t casabt : 1; /* Configure async abts status notice */
- uint32_t cdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd1 : 19; /* Reserved */
+ uint32_t rsvd1 : 20; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t rsvd3 : 19; /* Reserved */
- uint32_t gdss : 1; /* Configure Data Security SLI */
+ uint32_t rsvd3 : 20; /* Reserved */
uint32_t gasabt : 1; /* Grant async abts status notice */
uint32_t rsvd4 : 2; /* Reserved */
uint32_t gbg : 1; /* Grant BlockGuard */
@@ -3316,8 +3313,7 @@ typedef struct {
uint32_t gbg : 1; /* Grant BlockGuard */
uint32_t rsvd4 : 2; /* Reserved */
uint32_t gasabt : 1; /* Grant async abts status notice */
- uint32_t gdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd3 : 19; /* Reserved */
+ uint32_t rsvd3 : 20; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
@@ -3339,15 +3335,11 @@ typedef struct {
uint32_t rsvd6; /* Reserved */
#ifdef __BIG_ENDIAN_BITFIELD
- uint32_t fips_rev : 3; /* FIPS Spec Revision */
- uint32_t fips_level : 4; /* FIPS Level */
- uint32_t sec_err : 9; /* security crypto error */
+ uint32_t rsvd7 : 16;
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
#else /* __LITTLE_ENDIAN */
uint32_t max_vpi : 16; /* Max number of virt N-Ports */
- uint32_t sec_err : 9; /* security crypto error */
- uint32_t fips_level : 4; /* FIPS Level */
- uint32_t fips_rev : 3; /* FIPS Spec Revision */
+ uint32_t rsvd7 : 16;
#endif
} CONFIG_PORT_VAR;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 9d03e9b71efb..4104bdcdbb6f 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -4231,6 +4231,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
{
struct lpfc_vport *vport;
struct Scsi_Host *shost = NULL;
+ struct scsi_host_template *template;
int error = 0;
int i;
uint64_t wwn;
@@ -4259,22 +4260,50 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
}
}
- if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
- if (dev != &phba->pcidev->dev) {
- shost = scsi_host_alloc(&lpfc_vport_template,
- sizeof(struct lpfc_vport));
+ /* Seed template for SCSI host registration */
+ if (dev == &phba->pcidev->dev) {
+ template = &phba->port_template;
+
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
+ /* Seed physical port template */
+ memcpy(template, &lpfc_template, sizeof(*template));
+
+ if (use_no_reset_hba) {
+ /* template is for a no reset SCSI Host */
+ template->max_sectors = 0xffff;
+ template->eh_host_reset_handler = NULL;
+ }
+
+ /* Template for all vports this physical port creates */
+ memcpy(&phba->vport_template, &lpfc_template,
+ sizeof(*template));
+ phba->vport_template.max_sectors = 0xffff;
+ phba->vport_template.shost_attrs = lpfc_vport_attrs;
+ phba->vport_template.eh_bus_reset_handler = NULL;
+ phba->vport_template.eh_host_reset_handler = NULL;
+ phba->vport_template.vendor_id = 0;
+
+ /* Initialize the host templates with updated value */
+ if (phba->sli_rev == LPFC_SLI_REV4) {
+ template->sg_tablesize = phba->cfg_scsi_seg_cnt;
+ phba->vport_template.sg_tablesize =
+ phba->cfg_scsi_seg_cnt;
+ } else {
+ template->sg_tablesize = phba->cfg_sg_seg_cnt;
+ phba->vport_template.sg_tablesize =
+ phba->cfg_sg_seg_cnt;
+ }
+
} else {
- if (!use_no_reset_hba)
- shost = scsi_host_alloc(&lpfc_template,
- sizeof(struct lpfc_vport));
- else
- shost = scsi_host_alloc(&lpfc_template_no_hr,
- sizeof(struct lpfc_vport));
+ /* NVMET is for physical port only */
+ memcpy(template, &lpfc_template_nvme,
+ sizeof(*template));
}
- } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
- shost = scsi_host_alloc(&lpfc_template_nvme,
- sizeof(struct lpfc_vport));
+ } else {
+ template = &phba->vport_template;
}
+
+ shost = scsi_host_alloc(template, sizeof(struct lpfc_vport));
if (!shost)
goto out;
@@ -4329,6 +4358,12 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
vport->port_type = LPFC_PHYSICAL_PORT;
}
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
+ "9081 CreatePort TMPLATE type %x TBLsize %d "
+ "SEGcnt %d/%d\n",
+ vport->port_type, shost->sg_tablesize,
+ phba->cfg_scsi_seg_cnt, phba->cfg_sg_seg_cnt);
+
/* Initialize all internally managed lists. */
INIT_LIST_HEAD(&vport->fc_nodes);
INIT_LIST_HEAD(&vport->rcv_buffer_list);
@@ -6301,11 +6336,6 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
* used to create the sg_dma_buf_pool must be dynamically calculated.
*/
- /* Initialize the host templates the configured values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-
if (phba->sli_rev == LPFC_SLI_REV4)
entry_sz = sizeof(struct sli4_sge);
else
@@ -6346,7 +6376,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
}
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
- "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
+ "9088 INIT sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
phba->cfg_total_seg_cnt);
@@ -6816,11 +6846,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
}
- /* Initialize the host templates with the updated values. */
- lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt;
- lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt;
-
lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
"9087 sg_seg_cnt:%d dmabuf_size:%d "
"total:%d scsi:%d nvme:%d\n",
@@ -6926,6 +6951,17 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
rc = -ENOMEM;
goto out_free_hba_cpu_map;
}
+
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ phba->sli4_hba.c_stat = alloc_percpu(struct lpfc_hdwq_stat);
+ if (!phba->sli4_hba.c_stat) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3332 Failed allocating per cpu hdwq stats\n");
+ rc = -ENOMEM;
+ goto out_free_hba_eq_info;
+ }
+#endif
+
/*
* Enable sr-iov virtual functions if supported and configured
* through the module parameter.
@@ -6945,6 +6981,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
return 0;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+out_free_hba_eq_info:
+ free_percpu(phba->sli4_hba.eq_info);
+#endif
out_free_hba_cpu_map:
kfree(phba->sli4_hba.cpu_map);
out_free_hba_eq_hdl:
@@ -6983,6 +7023,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
free_percpu(phba->sli4_hba.eq_info);
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ free_percpu(phba->sli4_hba.c_stat);
+#endif
/* Free memory allocated for msi-x interrupt vector to CPU mapping */
kfree(phba->sli4_hba.cpu_map);
@@ -10823,6 +10866,9 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct lpfc_hdwq_stat *c_stat;
+#endif
max_phys_id = 0;
min_phys_id = LPFC_VECTOR_MAP_EMPTY;
@@ -11074,10 +11120,17 @@ found_any:
idx = 0;
for_each_possible_cpu(cpu) {
cpup = &phba->sli4_hba.cpu_map[cpu];
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ c_stat = per_cpu_ptr(phba->sli4_hba.c_stat, cpu);
+ c_stat->hdwq_no = cpup->hdwq;
+#endif
if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY)
continue;
cpup->hdwq = idx++ % phba->cfg_hdw_queue;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ c_stat->hdwq_no = cpup->hdwq;
+#endif
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3340 Set Affinity: not present "
"CPU %d hdwq %d\n",
@@ -11173,11 +11226,9 @@ static void lpfc_cpuhp_add(struct lpfc_hba *phba)
rcu_read_lock();
- if (!list_empty(&phba->poll_list)) {
- timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
+ if (!list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
- }
rcu_read_unlock();
@@ -13145,6 +13196,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
lpfc_sli4_ras_setup(phba);
INIT_LIST_HEAD(&phba->poll_list);
+ timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index d1773c01d2b3..e35b52b66d6c 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1299,8 +1299,6 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
if (phba->cfg_enable_bg)
mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
- if (phba->cfg_enable_dss)
- mb->un.varCfgPort.cdss = 1; /* Configure Security */
mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index f6c8963c915d..a45936e08031 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -382,13 +382,15 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
if (ndlp->upcall_flags & NLP_WAIT_FOR_UNREG) {
ndlp->nrport = NULL;
ndlp->upcall_flags &= ~NLP_WAIT_FOR_UNREG;
- }
- spin_unlock_irq(&vport->phba->hbalock);
+ spin_unlock_irq(&vport->phba->hbalock);
- /* Remove original register reference. The host transport
- * won't reference this rport/remoteport any further.
- */
- lpfc_nlp_put(ndlp);
+ /* Remove original register reference. The host transport
+ * won't reference this rport/remoteport any further.
+ */
+ lpfc_nlp_put(ndlp);
+ } else {
+ spin_unlock_irq(&vport->phba->hbalock);
+ }
rport_err:
return;
@@ -897,88 +899,6 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
sgl->sge_len = cpu_to_le32(nCmd->rsplen);
}
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-static void
-lpfc_nvme_ktime(struct lpfc_hba *phba,
- struct lpfc_io_buf *lpfc_ncmd)
-{
- uint64_t seg1, seg2, seg3, seg4;
- uint64_t segsum;
-
- if (!lpfc_ncmd->ts_last_cmd ||
- !lpfc_ncmd->ts_cmd_start ||
- !lpfc_ncmd->ts_cmd_wqput ||
- !lpfc_ncmd->ts_isr_cmpl ||
- !lpfc_ncmd->ts_data_nvme)
- return;
-
- if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_cmd_start)
- return;
- if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
- return;
- if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
- return;
- if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
- return;
- if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
- return;
- /*
- * Segment 1 - Time from Last FCP command cmpl is handed
- * off to NVME Layer to start of next command.
- * Segment 2 - Time from Driver receives a IO cmd start
- * from NVME Layer to WQ put is done on IO cmd.
- * Segment 3 - Time from Driver WQ put is done on IO cmd
- * to MSI-X ISR for IO cmpl.
- * Segment 4 - Time from MSI-X ISR for IO cmpl to when
- * cmpl is handled off to the NVME Layer.
- */
- seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
- if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
- seg1 = 0;
-
- /* Calculate times relative to start of IO */
- seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
- segsum = seg2;
- seg3 = lpfc_ncmd->ts_isr_cmpl - lpfc_ncmd->ts_cmd_start;
- if (segsum > seg3)
- return;
- seg3 -= segsum;
- segsum += seg3;
-
- seg4 = lpfc_ncmd->ts_data_nvme - lpfc_ncmd->ts_cmd_start;
- if (segsum > seg4)
- return;
- seg4 -= segsum;
-
- phba->ktime_data_samples++;
- phba->ktime_seg1_total += seg1;
- if (seg1 < phba->ktime_seg1_min)
- phba->ktime_seg1_min = seg1;
- else if (seg1 > phba->ktime_seg1_max)
- phba->ktime_seg1_max = seg1;
- phba->ktime_seg2_total += seg2;
- if (seg2 < phba->ktime_seg2_min)
- phba->ktime_seg2_min = seg2;
- else if (seg2 > phba->ktime_seg2_max)
- phba->ktime_seg2_max = seg2;
- phba->ktime_seg3_total += seg3;
- if (seg3 < phba->ktime_seg3_min)
- phba->ktime_seg3_min = seg3;
- else if (seg3 > phba->ktime_seg3_max)
- phba->ktime_seg3_max = seg3;
- phba->ktime_seg4_total += seg4;
- if (seg4 < phba->ktime_seg4_min)
- phba->ktime_seg4_min = seg4;
- else if (seg4 > phba->ktime_seg4_max)
- phba->ktime_seg4_max = seg4;
-
- lpfc_ncmd->ts_last_cmd = 0;
- lpfc_ncmd->ts_cmd_start = 0;
- lpfc_ncmd->ts_cmd_wqput = 0;
- lpfc_ncmd->ts_isr_cmpl = 0;
- lpfc_ncmd->ts_data_nvme = 0;
-}
-#endif
/**
* lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
@@ -1010,6 +930,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
uint32_t code, status, idx;
uint16_t cid, sqhd, data;
uint32_t *ptr;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ int cpu;
+#endif
/* Sanity check on return of outstanding command */
if (!lpfc_ncmd) {
@@ -1178,23 +1101,19 @@ out_err:
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
if (lpfc_ncmd->ts_cmd_start) {
lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
- lpfc_ncmd->ts_data_nvme = ktime_get_ns();
- phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
- lpfc_nvme_ktime(phba, lpfc_ncmd);
+ lpfc_ncmd->ts_data_io = ktime_get_ns();
+ phba->ktime_last_cmd = lpfc_ncmd->ts_data_io;
+ lpfc_io_ktime(phba, lpfc_ncmd);
}
- if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) {
- uint32_t cpu;
- idx = lpfc_ncmd->cur_iocbq.hba_wqidx;
+ if (unlikely(phba->hdwqstat_on & LPFC_CHECK_NVME_IO)) {
cpu = raw_smp_processor_id();
- if (cpu < LPFC_CHECK_CPU_CNT) {
- if (lpfc_ncmd->cpu != cpu)
- lpfc_printf_vlog(vport,
- KERN_INFO, LOG_NVME_IOERR,
- "6701 CPU Check cmpl: "
- "cpu %d expect %d\n",
- cpu, lpfc_ncmd->cpu);
- phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
- }
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
+ if (lpfc_ncmd->cpu != cpu)
+ lpfc_printf_vlog(vport,
+ KERN_INFO, LOG_NVME_IOERR,
+ "6701 CPU Check cmpl: "
+ "cpu %d expect %d\n",
+ cpu, lpfc_ncmd->cpu);
}
#endif
@@ -1743,19 +1662,17 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
if (lpfc_ncmd->ts_cmd_start)
lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
- if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
+ if (phba->hdwqstat_on & LPFC_CHECK_NVME_IO) {
cpu = raw_smp_processor_id();
- if (cpu < LPFC_CHECK_CPU_CNT) {
- lpfc_ncmd->cpu = cpu;
- if (idx != cpu)
- lpfc_printf_vlog(vport,
- KERN_INFO, LOG_NVME_IOERR,
- "6702 CPU Check cmd: "
- "cpu %d wq %d\n",
- lpfc_ncmd->cpu,
- lpfc_queue_info->index);
- phba->sli4_hba.hdwq[idx].cpucheck_xmt_io[cpu]++;
- }
+ this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
+ lpfc_ncmd->cpu = cpu;
+ if (idx != cpu)
+ lpfc_printf_vlog(vport,
+ KERN_INFO, LOG_NVME_IOERR,
+ "6702 CPU Check cmd: "
+ "cpu %d wq %d\n",
+ lpfc_ncmd->cpu,
+ lpfc_queue_info->index);
}
#endif
return 0;
@@ -1985,8 +1902,6 @@ out_unlock:
/* Declare and initialization an instance of the FC NVME template. */
static struct nvme_fc_port_template lpfc_nvme_template = {
- .module = THIS_MODULE,
-
/* initiator-based functions */
.localport_delete = lpfc_nvme_localport_delete,
.remoteport_delete = lpfc_nvme_remoteport_delete,
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index 9dc9afe1c255..565419bf8d74 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -707,7 +707,7 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
struct lpfc_nvmet_rcv_ctx *ctxp;
uint32_t status, result, op, start_clean, logerr;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- uint32_t id;
+ int id;
#endif
ctxp = cmdwqe->context2;
@@ -814,16 +814,14 @@ lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
rsp->done(rsp);
}
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
+ if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
id = raw_smp_processor_id();
- if (id < LPFC_CHECK_CPU_CNT) {
- if (ctxp->cpu != id)
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
- "6704 CPU Check cmdcmpl: "
- "cpu %d expect %d\n",
- id, ctxp->cpu);
- phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_cmpl_io[id]++;
- }
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
+ if (ctxp->cpu != id)
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6704 CPU Check cmdcmpl: "
+ "cpu %d expect %d\n",
+ id, ctxp->cpu);
}
#endif
}
@@ -931,6 +929,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
struct lpfc_sli_ring *pring;
unsigned long iflags;
int rc;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ int id;
+#endif
if (phba->pport->load_flag & FC_UNLOADING) {
rc = -ENODEV;
@@ -954,16 +955,14 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
if (!ctxp->hdwq)
ctxp->hdwq = &phba->sli4_hba.hdwq[rsp->hwqid];
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
- int id = raw_smp_processor_id();
- if (id < LPFC_CHECK_CPU_CNT) {
- if (rsp->hwqid != id)
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
- "6705 CPU Check OP: "
- "cpu %d expect %d\n",
- id, rsp->hwqid);
- phba->sli4_hba.hdwq[rsp->hwqid].cpucheck_xmt_io[id]++;
- }
+ if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
+ id = raw_smp_processor_id();
+ this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
+ if (rsp->hwqid != id)
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6705 CPU Check OP: "
+ "cpu %d expect %d\n",
+ id, rsp->hwqid);
ctxp->cpu = id; /* Setup cpu for cmpl check */
}
#endif
@@ -2270,15 +2269,13 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
size = nvmebuf->bytes_recv;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
- if (current_cpu < LPFC_CHECK_CPU_CNT) {
- if (idx != current_cpu)
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
- "6703 CPU Check rcv: "
- "cpu %d expect %d\n",
- current_cpu, idx);
- phba->sli4_hba.hdwq[idx].cpucheck_rcv_io[current_cpu]++;
- }
+ if (phba->hdwqstat_on & LPFC_CHECK_NVMET_IO) {
+ this_cpu_inc(phba->sli4_hba.c_stat->rcv_io);
+ if (idx != current_cpu)
+ lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
+ "6703 CPU Check rcv: "
+ "cpu %d expect %d\n",
+ current_cpu, idx);
}
#endif
@@ -2598,7 +2595,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
union lpfc_wqe128 *wqe;
struct ulp_bde64 *bde;
dma_addr_t physaddr;
- int i, cnt;
+ int i, cnt, nsegs;
int do_pbde;
int xc = 1;
@@ -2629,6 +2626,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
phba->cfg_nvme_seg_cnt);
return NULL;
}
+ nsegs = rsp->sg_cnt;
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
nvmewqe = ctxp->wqeq;
@@ -2868,7 +2866,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
wqe->fcp_trsp.rsvd_12_15[0] = 0;
/* Use rspbuf, NOT sg list */
- rsp->sg_cnt = 0;
+ nsegs = 0;
sgl->word2 = 0;
atomic_inc(&tgtp->xmt_fcp_rsp);
break;
@@ -2885,7 +2883,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
nvmewqe->context1 = ndlp;
- for_each_sg(rsp->sg, sgel, rsp->sg_cnt, i) {
+ for_each_sg(rsp->sg, sgel, nsegs, i) {
physaddr = sg_dma_address(sgel);
cnt = sg_dma_len(sgel);
sgl->addr_hi = putPaddrHigh(physaddr);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 0fc9a242bc65..ad62fb3f3a54 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -3805,9 +3805,6 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
struct Scsi_Host *shost;
int idx;
uint32_t logit = LOG_FCP;
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- int cpu;
-#endif
/* Guard against abort handler being called at same time */
spin_lock(&lpfc_cmd->buf_lock);
@@ -3826,11 +3823,8 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
- cpu = raw_smp_processor_id();
- if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq)
- phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++;
- }
+ if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
+ this_cpu_inc(phba->sli4_hba.c_stat->cmpl_io);
#endif
shost = cmd->device->host;
@@ -4031,6 +4025,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
lpfc_cmd->pCmd = NULL;
spin_unlock(&lpfc_cmd->buf_lock);
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (lpfc_cmd->ts_cmd_start) {
+ lpfc_cmd->ts_isr_cmpl = pIocbIn->isr_timestamp;
+ lpfc_cmd->ts_data_io = ktime_get_ns();
+ phba->ktime_last_cmd = lpfc_cmd->ts_data_io;
+ lpfc_io_ktime(phba, lpfc_cmd);
+ }
+#endif
/* The sdev is not guaranteed to be valid post scsi_done upcall. */
cmd->scsi_done(cmd);
@@ -4504,7 +4506,10 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
int err, idx;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- int cpu;
+ uint64_t start = 0L;
+
+ if (phba->ktime_on)
+ start = ktime_get_ns();
#endif
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
@@ -4626,17 +4631,20 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
- if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) {
- cpu = raw_smp_processor_id();
- if (cpu < LPFC_CHECK_CPU_CNT) {
- struct lpfc_sli4_hdw_queue *hdwq =
- &phba->sli4_hba.hdwq[lpfc_cmd->hdwq_no];
- hdwq->cpucheck_xmt_io[cpu]++;
- }
- }
+ if (unlikely(phba->hdwqstat_on & LPFC_CHECK_SCSI_IO))
+ this_cpu_inc(phba->sli4_hba.c_stat->xmt_io);
#endif
err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
&lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ if (start) {
+ lpfc_cmd->ts_cmd_start = start;
+ lpfc_cmd->ts_last_cmd = phba->ktime_last_cmd;
+ lpfc_cmd->ts_cmd_wqput = ktime_get_ns();
+ } else {
+ lpfc_cmd->ts_cmd_start = 0;
+ }
+#endif
if (err) {
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"3376 FCP could not issue IOCB err %x"
@@ -6023,31 +6031,6 @@ struct scsi_host_template lpfc_template_nvme = {
.track_queue_depth = 0,
};
-struct scsi_host_template lpfc_template_no_hr = {
- .module = THIS_MODULE,
- .name = LPFC_DRIVER_NAME,
- .proc_name = LPFC_DRIVER_NAME,
- .info = lpfc_info,
- .queuecommand = lpfc_queuecommand,
- .eh_timed_out = fc_eh_timed_out,
- .eh_abort_handler = lpfc_abort_handler,
- .eh_device_reset_handler = lpfc_device_reset_handler,
- .eh_target_reset_handler = lpfc_target_reset_handler,
- .eh_bus_reset_handler = lpfc_bus_reset_handler,
- .slave_alloc = lpfc_slave_alloc,
- .slave_configure = lpfc_slave_configure,
- .slave_destroy = lpfc_slave_destroy,
- .scan_finished = lpfc_scan_finished,
- .this_id = -1,
- .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
- .cmd_per_lun = LPFC_CMD_PER_LUN,
- .shost_attrs = lpfc_hba_attrs,
- .max_sectors = 0xFFFFFFFF,
- .vendor_id = LPFC_NL_VENDOR_ID,
- .change_queue_depth = scsi_change_queue_depth,
- .track_queue_depth = 1,
-};
-
struct scsi_host_template lpfc_template = {
.module = THIS_MODULE,
.name = LPFC_DRIVER_NAME,
@@ -6073,26 +6056,3 @@ struct scsi_host_template lpfc_template = {
.change_queue_depth = scsi_change_queue_depth,
.track_queue_depth = 1,
};
-
-struct scsi_host_template lpfc_vport_template = {
- .module = THIS_MODULE,
- .name = LPFC_DRIVER_NAME,
- .proc_name = LPFC_DRIVER_NAME,
- .info = lpfc_info,
- .queuecommand = lpfc_queuecommand,
- .eh_timed_out = fc_eh_timed_out,
- .eh_abort_handler = lpfc_abort_handler,
- .eh_device_reset_handler = lpfc_device_reset_handler,
- .eh_target_reset_handler = lpfc_target_reset_handler,
- .slave_alloc = lpfc_slave_alloc,
- .slave_configure = lpfc_slave_configure,
- .slave_destroy = lpfc_slave_destroy,
- .scan_finished = lpfc_scan_finished,
- .this_id = -1,
- .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT,
- .cmd_per_lun = LPFC_CMD_PER_LUN,
- .shost_attrs = lpfc_vport_attrs,
- .max_sectors = 0xFFFF,
- .change_queue_depth = scsi_change_queue_depth,
- .track_queue_depth = 1,
-};
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 0b26b5c0527e..b6fb665e6ec4 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -230,25 +230,16 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
* This routine will update the HBA index of a queue to reflect consumption of
* Work Queue Entries by the HBA. When the HBA indicates that it has consumed
* an entry the host calls this function to update the queue's internal
- * pointers. This routine returns the number of entries that were consumed by
- * the HBA.
+ * pointers.
**/
-static uint32_t
+static void
lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
{
- uint32_t released = 0;
-
/* sanity check on queue memory */
if (unlikely(!q))
- return 0;
+ return;
- if (q->hba_index == index)
- return 0;
- do {
- q->hba_index = ((q->hba_index + 1) % q->entry_count);
- released++;
- } while (q->hba_index != index);
- return released;
+ q->hba_index = index;
}
/**
@@ -2511,6 +2502,8 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
!pmb->u.mb.mbxStatus) {
rpi = pmb->u.mb.un.varWords[0];
vpi = pmb->u.mb.un.varRegLogin.vpi;
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
lpfc_unreg_login(phba, vpi, rpi, pmb);
pmb->vport = vport;
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -4044,6 +4037,11 @@ lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
struct lpfc_iocbq *piocb, *next_iocb;
spin_lock_irq(&phba->hbalock);
+ if (phba->hba_flag & HBA_IOQ_FLUSH ||
+ !phba->sli4_hba.hdwq) {
+ spin_unlock_irq(&phba->hbalock);
+ return;
+ }
/* Indicate the I/O queues are flushed */
phba->hba_flag |= HBA_IOQ_FLUSH;
spin_unlock_irq(&phba->hbalock);
@@ -5034,23 +5032,6 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
} else
phba->max_vpi = 0;
- phba->fips_level = 0;
- phba->fips_spec_rev = 0;
- if (pmb->u.mb.un.varCfgPort.gdss) {
- phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
- phba->fips_level = pmb->u.mb.un.varCfgPort.fips_level;
- phba->fips_spec_rev = pmb->u.mb.un.varCfgPort.fips_rev;
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2850 Security Crypto Active. FIPS x%d "
- "(Spec Rev: x%d)",
- phba->fips_level, phba->fips_spec_rev);
- }
- if (pmb->u.mb.un.varCfgPort.sec_err) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2856 Config Port Security Crypto "
- "Error: x%x ",
- pmb->u.mb.un.varCfgPort.sec_err);
- }
if (pmb->u.mb.un.varCfgPort.gerbm)
phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
if (pmb->u.mb.un.varCfgPort.gcrp)
@@ -14442,12 +14423,10 @@ static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
{
struct lpfc_hba *phba = eq->phba;
- if (list_empty(&phba->poll_list)) {
- timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0);
- /* kickstart slowpath processing for this eq */
+ /* kickstart slowpath processing if needed */
+ if (list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB));
- }
list_add_rcu(&eq->_poll_list, &phba->poll_list);
synchronize_rcu();
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 7bcf922a8be2..93d976ea8c5d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -446,6 +446,6 @@ struct lpfc_io_buf {
uint64_t ts_last_cmd;
uint64_t ts_cmd_wqput;
uint64_t ts_isr_cmpl;
- uint64_t ts_data_nvme;
+ uint64_t ts_data_io;
#endif
};
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index d963ca871383..8da7429e385a 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -697,13 +697,6 @@ struct lpfc_sli4_hdw_queue {
struct lpfc_lock_stat lock_conflict;
#endif
-#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
-#define LPFC_CHECK_CPU_CNT 128
- uint32_t cpucheck_rcv_io[LPFC_CHECK_CPU_CNT];
- uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT];
- uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT];
-#endif
-
/* Per HDWQ pool resources */
struct list_head sgl_list;
struct list_head cmd_rsp_buf_list;
@@ -740,6 +733,15 @@ struct lpfc_sli4_hdw_queue {
#define lpfc_qp_spin_lock(lock, qp, lstat) spin_lock(lock)
#endif
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+struct lpfc_hdwq_stat {
+ u32 hdwq_no;
+ u32 rcv_io;
+ u32 xmt_io;
+ u32 cmpl_io;
+};
+#endif
+
struct lpfc_sli4_hba {
void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
* config space registers
@@ -921,6 +923,9 @@ struct lpfc_sli4_hba {
struct cpumask numa_mask;
uint16_t curr_disp_cpu;
struct lpfc_eq_intr_info __percpu *eq_info;
+#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
+ struct lpfc_hdwq_stat __percpu *c_stat;
+#endif
uint32_t conf_trunk;
#define lpfc_conf_trunk_port0_WORD conf_trunk
#define lpfc_conf_trunk_port0_SHIFT 0
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c4ab006e6ecc..ca40c47cfbe0 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.6.0.4"
+#define LPFC_DRIVER_VERSION "12.8.0.0"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 778d5e6ce385..04a40afe60e3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -9908,8 +9908,8 @@ static void scsih_remove(struct pci_dev *pdev)
ioc->remove_host = 1;
- mpt3sas_wait_for_commands_to_complete(ioc);
- _scsih_flush_running_cmds(ioc);
+ if (!pci_device_is_present(pdev))
+ _scsih_flush_running_cmds(ioc);
_scsih_fw_event_cleanup_queue(ioc);
@@ -9992,8 +9992,8 @@ scsih_shutdown(struct pci_dev *pdev)
ioc->remove_host = 1;
- mpt3sas_wait_for_commands_to_complete(ioc);
- _scsih_flush_running_cmds(ioc);
+ if (!pci_device_is_present(pdev))
+ _scsih_flush_running_cmds(ioc);
_scsih_fw_event_cleanup_queue(ioc);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 84e2a980dea0..4886d247df6f 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -610,7 +610,6 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
}
static struct nvme_fc_port_template qla_nvme_fc_transport = {
- .module = THIS_MODULE,
.localport_delete = qla_nvme_localport_delete,
.remoteport_delete = qla_nvme_remoteport_delete,
.create_queue = qla_nvme_alloc_queue,
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 0ec1b31c75a9..b2a803c51288 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2022,7 +2022,7 @@ static void __iscsi_unbind_session(struct work_struct *work)
if (session->target_id == ISCSI_MAX_TARGET) {
spin_unlock_irqrestore(&session->lock, flags);
mutex_unlock(&ihost->mutex);
- return;
+ goto unbind_session_exit;
}
target_id = session->target_id;
@@ -2034,6 +2034,8 @@ static void __iscsi_unbind_session(struct work_struct *work)
ida_simple_remove(&iscsi_sess_ida, target_id);
scsi_remove_target(&session->dev);
+
+unbind_session_exit:
iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 1c270e6034d5..d2fe3fa470f9 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -550,10 +550,12 @@ out:
static void sr_block_release(struct gendisk *disk, fmode_t mode)
{
struct scsi_cd *cd = scsi_cd(disk);
+
mutex_lock(&cd->lock);
cdrom_release(&cd->cdi, mode);
- scsi_cd_put(cd);
mutex_unlock(&cd->lock);
+
+ scsi_cd_put(cd);
}
static int sr_block_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 40a66b31b31f..673c16596fb2 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -499,8 +499,15 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_lpm(hba);
- if (err)
+ if (err) {
+ /*
+ * Set link as off state enforcedly to trigger
+ * ufshcd_host_reset_and_restore() in ufshcd_suspend()
+ * for completed host reset.
+ */
+ ufshcd_set_link_off(hba);
return -EAGAIN;
+ }
}
if (!ufshcd_is_link_active(hba))
@@ -519,8 +526,10 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ufshcd_is_link_hibern8(hba)) {
err = ufs_mtk_link_set_hpm(hba);
- if (err)
+ if (err) {
+ err = ufshcd_link_recovery(hba);
return err;
+ }
}
return 0;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index e04e8b8bdca6..698e8d20b4ba 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -172,19 +172,6 @@ enum {
#define ufshcd_clear_eh_in_progress(h) \
((h)->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
-#define ufshcd_set_ufs_dev_active(h) \
- ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
-#define ufshcd_set_ufs_dev_sleep(h) \
- ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
-#define ufshcd_set_ufs_dev_poweroff(h) \
- ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
-#define ufshcd_is_ufs_dev_active(h) \
- ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
-#define ufshcd_is_ufs_dev_sleep(h) \
- ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
-#define ufshcd_is_ufs_dev_poweroff(h) \
- ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
-
struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
{UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
{UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
@@ -868,28 +855,29 @@ static bool ufshcd_is_unipro_pa_params_tuning_req(struct ufs_hba *hba)
return false;
}
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+/**
+ * ufshcd_set_clk_freq - set UFS controller clock frequencies
+ * @hba: per adapter instance
+ * @scale_up: If True, set max possible frequency othewise set low frequency
+ *
+ * Returns 0 if successful
+ * Returns < 0 for any other errors
+ */
+static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
struct ufs_clk_info *clki;
struct list_head *head = &hba->clk_list_head;
- ktime_t start = ktime_get();
- bool clk_state_changed = false;
if (list_empty(head))
goto out;
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
- if (ret)
- return ret;
-
list_for_each_entry(clki, head, list) {
if (!IS_ERR_OR_NULL(clki->clk)) {
if (scale_up && clki->max_freq) {
if (clki->curr_freq == clki->max_freq)
continue;
- clk_state_changed = true;
ret = clk_set_rate(clki->clk, clki->max_freq);
if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
@@ -908,7 +896,6 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
if (clki->curr_freq == clki->min_freq)
continue;
- clk_state_changed = true;
ret = clk_set_rate(clki->clk, clki->min_freq);
if (ret) {
dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
@@ -927,11 +914,37 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
clki->name, clk_get_rate(clki->clk));
}
+out:
+ return ret;
+}
+
+/**
+ * ufshcd_scale_clks - scale up or scale down UFS controller clocks
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns 0 if successful
+ * Returns < 0 for any other errors
+ */
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+ int ret = 0;
+ ktime_t start = ktime_get();
+
+ ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_set_clk_freq(hba, scale_up);
+ if (ret)
+ goto out;
+
ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+ if (ret)
+ ufshcd_set_clk_freq(hba, !scale_up);
out:
- if (clk_state_changed)
- trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+ trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
return ret;
@@ -1065,8 +1078,7 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
}
/* check if the power mode needs to be changed or not? */
- ret = ufshcd_change_power_mode(hba, &new_pwr_info);
-
+ ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
if (ret)
dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
__func__, ret,
@@ -1119,35 +1131,32 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
ret = ufshcd_clock_scaling_prepare(hba);
if (ret)
- return ret;
+ goto out;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
ret = ufshcd_scale_gear(hba, false);
if (ret)
- goto out;
+ goto out_unprepare;
}
ret = ufshcd_scale_clks(hba, scale_up);
if (ret) {
if (!scale_up)
ufshcd_scale_gear(hba, true);
- goto out;
+ goto out_unprepare;
}
/* scale up the gear after scaling up clocks */
if (scale_up) {
ret = ufshcd_scale_gear(hba, true);
- if (ret) {
+ if (ret)
ufshcd_scale_clks(hba, false);
- goto out;
- }
}
- ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
-
-out:
+out_unprepare:
ufshcd_clock_scaling_unprepare(hba);
+out:
ufshcd_release(hba);
return ret;
}
@@ -3785,7 +3794,7 @@ out:
return ret;
}
-static int ufshcd_link_recovery(struct ufs_hba *hba)
+int ufshcd_link_recovery(struct ufs_hba *hba)
{
int ret;
unsigned long flags;
@@ -3812,6 +3821,7 @@ static int ufshcd_link_recovery(struct ufs_hba *hba)
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
{
@@ -4112,8 +4122,6 @@ int ufshcd_config_pwr_mode(struct ufs_hba *hba,
memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
ret = ufshcd_change_power_mode(hba, &final_params);
- if (!ret)
- ufshcd_print_pwr_info(hba);
return ret;
}
@@ -6299,7 +6307,7 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* scale up clocks to max frequency before full reinitialization */
- ufshcd_scale_clks(hba, true);
+ ufshcd_set_clk_freq(hba, true);
err = ufshcd_hba_enable(hba);
if (err)
@@ -7127,6 +7135,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
__func__, ret);
goto out;
}
+ ufshcd_print_pwr_info(hba);
}
/*
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index dd1ee277069a..6ffc08ad85f6 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -130,6 +130,19 @@ enum uic_link_state {
#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
UIC_LINK_HIBERN8_STATE)
+#define ufshcd_set_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
+#define ufshcd_set_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
+#define ufshcd_set_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_is_ufs_dev_active(h) \
+ ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
+#define ufshcd_is_ufs_dev_sleep(h) \
+ ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
+#define ufshcd_is_ufs_dev_poweroff(h) \
+ ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
+
/*
* UFS Power management levels.
* Each level is in increasing order of power savings.
@@ -788,6 +801,7 @@ int ufshcd_alloc_host(struct device *, struct ufs_hba **);
void ufshcd_dealloc_host(struct ufs_hba *);
int ufshcd_hba_enable(struct ufs_hba *hba);
int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
+int ufshcd_link_recovery(struct ufs_hba *hba);
int ufshcd_make_hba_operational(struct ufs_hba *hba);
void ufshcd_remove(struct ufs_hba *);
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
@@ -1083,6 +1097,7 @@ static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
{
if (hba->vops && hba->vops->device_reset) {
hba->vops->device_reset(hba);
+ ufshcd_set_ufs_dev_active(hba);
ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0);
}
}
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 1778f8c62861..425ab6f7e375 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -22,5 +22,6 @@ source "drivers/soc/ux500/Kconfig"
source "drivers/soc/versatile/Kconfig"
source "drivers/soc/xilinx/Kconfig"
source "drivers/soc/zte/Kconfig"
+source "drivers/soc/kendryte/Kconfig"
endmenu
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index a39f17cea376..36452bed86ef 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -28,3 +28,4 @@ obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_PLAT_VERSATILE) += versatile/
obj-y += xilinx/
obj-$(CONFIG_ARCH_ZX) += zte/
+obj-$(CONFIG_SOC_KENDRYTE) += kendryte/
diff --git a/drivers/soc/kendryte/Kconfig b/drivers/soc/kendryte/Kconfig
new file mode 100644
index 000000000000..49785b1b0217
--- /dev/null
+++ b/drivers/soc/kendryte/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+
+if SOC_KENDRYTE
+
+config K210_SYSCTL
+ bool "Kendryte K210 system controller"
+ default y
+ depends on RISCV
+ help
+ Enables controlling the K210 various clocks and to enable
+ general purpose use of the extra 2MB of SRAM normally
+ reserved for the AI engine.
+
+endif
diff --git a/drivers/soc/kendryte/Makefile b/drivers/soc/kendryte/Makefile
new file mode 100644
index 000000000000..002d9ce95c0d
--- /dev/null
+++ b/drivers/soc/kendryte/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_K210_SYSCTL) += k210-sysctl.o
diff --git a/drivers/soc/kendryte/k210-sysctl.c b/drivers/soc/kendryte/k210-sysctl.c
new file mode 100644
index 000000000000..4608fbca20e1
--- /dev/null
+++ b/drivers/soc/kendryte/k210-sysctl.c
@@ -0,0 +1,248 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2019 Christoph Hellwig.
+ * Copyright (c) 2019 Western Digital Corporation or its affiliates.
+ */
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/bitfield.h>
+#include <asm/soc.h>
+
+#define K210_SYSCTL_CLK0_FREQ 26000000UL
+
+/* Registers base address */
+#define K210_SYSCTL_SYSCTL_BASE_ADDR 0x50440000ULL
+
+/* Registers */
+#define K210_SYSCTL_PLL0 0x08
+#define K210_SYSCTL_PLL1 0x0c
+/* clkr: 4bits, clkf1: 6bits, clkod: 4bits, bwadj: 4bits */
+#define PLL_RESET (1 << 20)
+#define PLL_PWR (1 << 21)
+#define PLL_INTFB (1 << 22)
+#define PLL_BYPASS (1 << 23)
+#define PLL_TEST (1 << 24)
+#define PLL_OUT_EN (1 << 25)
+#define PLL_TEST_EN (1 << 26)
+#define K210_SYSCTL_PLL_LOCK 0x18
+#define PLL0_LOCK1 (1 << 0)
+#define PLL0_LOCK2 (1 << 1)
+#define PLL0_SLIP_CLEAR (1 << 2)
+#define PLL0_TEST_CLK_OUT (1 << 3)
+#define PLL1_LOCK1 (1 << 8)
+#define PLL1_LOCK2 (1 << 9)
+#define PLL1_SLIP_CLEAR (1 << 10)
+#define PLL1_TEST_CLK_OUT (1 << 11)
+#define PLL2_LOCK1 (1 << 16)
+#define PLL2_LOCK2 (1 << 16)
+#define PLL2_SLIP_CLEAR (1 << 18)
+#define PLL2_TEST_CLK_OUT (1 << 19)
+#define K210_SYSCTL_CLKSEL0 0x20
+#define CLKSEL_ACLK (1 << 0)
+#define K210_SYSCTL_CLKEN_CENT 0x28
+#define CLKEN_CPU (1 << 0)
+#define CLKEN_SRAM0 (1 << 1)
+#define CLKEN_SRAM1 (1 << 2)
+#define CLKEN_APB0 (1 << 3)
+#define CLKEN_APB1 (1 << 4)
+#define CLKEN_APB2 (1 << 5)
+#define K210_SYSCTL_CLKEN_PERI 0x2c
+#define CLKEN_ROM (1 << 0)
+#define CLKEN_DMA (1 << 1)
+#define CLKEN_AI (1 << 2)
+#define CLKEN_DVP (1 << 3)
+#define CLKEN_FFT (1 << 4)
+#define CLKEN_GPIO (1 << 5)
+#define CLKEN_SPI0 (1 << 6)
+#define CLKEN_SPI1 (1 << 7)
+#define CLKEN_SPI2 (1 << 8)
+#define CLKEN_SPI3 (1 << 9)
+#define CLKEN_I2S0 (1 << 10)
+#define CLKEN_I2S1 (1 << 11)
+#define CLKEN_I2S2 (1 << 12)
+#define CLKEN_I2C0 (1 << 13)
+#define CLKEN_I2C1 (1 << 14)
+#define CLKEN_I2C2 (1 << 15)
+#define CLKEN_UART1 (1 << 16)
+#define CLKEN_UART2 (1 << 17)
+#define CLKEN_UART3 (1 << 18)
+#define CLKEN_AES (1 << 19)
+#define CLKEN_FPIO (1 << 20)
+#define CLKEN_TIMER0 (1 << 21)
+#define CLKEN_TIMER1 (1 << 22)
+#define CLKEN_TIMER2 (1 << 23)
+#define CLKEN_WDT0 (1 << 24)
+#define CLKEN_WDT1 (1 << 25)
+#define CLKEN_SHA (1 << 26)
+#define CLKEN_OTP (1 << 27)
+#define CLKEN_RTC (1 << 29)
+
+struct k210_sysctl {
+ void __iomem *regs;
+ struct clk_hw hw;
+};
+
+static void k210_set_bits(u32 val, void __iomem *reg)
+{
+ writel(readl(reg) | val, reg);
+}
+
+static void k210_clear_bits(u32 val, void __iomem *reg)
+{
+ writel(readl(reg) & ~val, reg);
+}
+
+static void k210_pll1_enable(void __iomem *regs)
+{
+ u32 val;
+
+ val = readl(regs + K210_SYSCTL_PLL1);
+ val &= ~GENMASK(19, 0); /* clkr1 = 0 */
+ val |= FIELD_PREP(GENMASK(9, 4), 0x3B); /* clkf1 = 59 */
+ val |= FIELD_PREP(GENMASK(13, 10), 0x3); /* clkod1 = 3 */
+ val |= FIELD_PREP(GENMASK(19, 14), 0x3B); /* bwadj1 = 59 */
+ writel(val, regs + K210_SYSCTL_PLL1);
+
+ k210_clear_bits(PLL_BYPASS, regs + K210_SYSCTL_PLL1);
+ k210_set_bits(PLL_PWR, regs + K210_SYSCTL_PLL1);
+
+ /*
+ * Reset the pll. The magic NOPs come from the Kendryte reference SDK.
+ */
+ k210_clear_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
+ k210_set_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
+ nop();
+ nop();
+ k210_clear_bits(PLL_RESET, regs + K210_SYSCTL_PLL1);
+
+ for (;;) {
+ val = readl(regs + K210_SYSCTL_PLL_LOCK);
+ if (val & PLL1_LOCK2)
+ break;
+ writel(val | PLL1_SLIP_CLEAR, regs + K210_SYSCTL_PLL_LOCK);
+ }
+
+ k210_set_bits(PLL_OUT_EN, regs + K210_SYSCTL_PLL1);
+}
+
+static unsigned long k210_sysctl_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct k210_sysctl *s = container_of(hw, struct k210_sysctl, hw);
+ u32 clksel0, pll0;
+ u64 pll0_freq, clkr0, clkf0, clkod0;
+
+ /*
+ * If the clock selector is not set, use the base frequency.
+ * Otherwise, use PLL0 frequency with a frequency divisor.
+ */
+ clksel0 = readl(s->regs + K210_SYSCTL_CLKSEL0);
+ if (!(clksel0 & CLKSEL_ACLK))
+ return K210_SYSCTL_CLK0_FREQ;
+
+ /*
+ * Get PLL0 frequency:
+ * freq = base frequency * clkf0 / (clkr0 * clkod0)
+ */
+ pll0 = readl(s->regs + K210_SYSCTL_PLL0);
+ clkr0 = 1 + FIELD_GET(GENMASK(3, 0), pll0);
+ clkf0 = 1 + FIELD_GET(GENMASK(9, 4), pll0);
+ clkod0 = 1 + FIELD_GET(GENMASK(13, 10), pll0);
+ pll0_freq = clkf0 * K210_SYSCTL_CLK0_FREQ / (clkr0 * clkod0);
+
+ /* Get the frequency divisor from the clock selector */
+ return pll0_freq / (2ULL << FIELD_GET(0x00000006, clksel0));
+}
+
+static const struct clk_ops k210_sysctl_clk_ops = {
+ .recalc_rate = k210_sysctl_clk_recalc_rate,
+};
+
+static const struct clk_init_data k210_clk_init_data = {
+ .name = "k210-sysctl-pll1",
+ .ops = &k210_sysctl_clk_ops,
+};
+
+static int k210_sysctl_probe(struct platform_device *pdev)
+{
+ struct k210_sysctl *s;
+ int error;
+
+ pr_info("Kendryte K210 SoC sysctl\n");
+
+ s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ s->regs = devm_ioremap_resource(&pdev->dev,
+ platform_get_resource(pdev, IORESOURCE_MEM, 0));
+ if (IS_ERR(s->regs))
+ return PTR_ERR(s->regs);
+
+ s->hw.init = &k210_clk_init_data;
+ error = devm_clk_hw_register(&pdev->dev, &s->hw);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register clk");
+ return error;
+ }
+
+ error = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get,
+ &s->hw);
+ if (error) {
+ dev_err(&pdev->dev, "adding clk provider failed\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id k210_sysctl_of_match[] = {
+ { .compatible = "kendryte,k210-sysctl", },
+ {}
+};
+
+static struct platform_driver k210_sysctl_driver = {
+ .driver = {
+ .name = "k210-sysctl",
+ .of_match_table = k210_sysctl_of_match,
+ },
+ .probe = k210_sysctl_probe,
+};
+
+static int __init k210_sysctl_init(void)
+{
+ return platform_driver_register(&k210_sysctl_driver);
+}
+core_initcall(k210_sysctl_init);
+
+/*
+ * This needs to be called very early during initialization, given that
+ * PLL1 needs to be enabled to be able to use all SRAM.
+ */
+static void __init k210_soc_early_init(const void *fdt)
+{
+ void __iomem *regs;
+
+ regs = ioremap(K210_SYSCTL_SYSCTL_BASE_ADDR, 0x1000);
+ if (!regs)
+ panic("K210 sysctl ioremap");
+
+ /* Enable PLL1 to make the KPU SRAM useable */
+ k210_pll1_enable(regs);
+
+ k210_set_bits(PLL_OUT_EN, regs + K210_SYSCTL_PLL0);
+
+ k210_set_bits(CLKEN_CPU | CLKEN_SRAM0 | CLKEN_SRAM1,
+ regs + K210_SYSCTL_CLKEN_CENT);
+ k210_set_bits(CLKEN_ROM | CLKEN_TIMER0 | CLKEN_RTC,
+ regs + K210_SYSCTL_CLKEN_PERI);
+
+ k210_set_bits(CLKSEL_ACLK, regs + K210_SYSCTL_CLKSEL0);
+
+ iounmap(regs);
+}
+SOC_EARLY_INIT_DECLARE(generic_k210, "kendryte,k210", k210_soc_early_init);
diff --git a/drivers/staging/gasket/gasket_core.c b/drivers/staging/gasket/gasket_core.c
index cd181a64f737..8e0575fcb4c8 100644
--- a/drivers/staging/gasket/gasket_core.c
+++ b/drivers/staging/gasket/gasket_core.c
@@ -689,7 +689,7 @@ static bool gasket_mmap_has_permissions(struct gasket_dev *gasket_dev,
/* Make sure that no wrong flags are set. */
requested_permissions =
- (vma->vm_flags & (VM_WRITE | VM_READ | VM_EXEC));
+ (vma->vm_flags & VM_ACCESS_FLAGS);
if (requested_permissions & ~(bar_permissions)) {
dev_dbg(gasket_dev->dev,
"Attempting to map a region with requested permissions 0x%x, but region has permissions 0x%x.\n",
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 425c1070de08..bd3ed6ce7571 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -134,7 +134,7 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
* Assigned designator
*/
desig_len = desc[7];
- if (desig_len != 16) {
+ if (desig_len != XCOPY_NAA_IEEE_REGEX_LEN) {
pr_err("XCOPY 0xe4: invalid desig_len: %d\n", (int)desig_len);
return -EINVAL;
}
@@ -315,11 +315,6 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
xop->nolb, (unsigned long long)xop->src_lba,
(unsigned long long)xop->dst_lba);
- if (dc != 0) {
- xop->dbl = get_unaligned_be24(&desc[29]);
-
- pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl);
- }
return 0;
}
@@ -415,7 +410,8 @@ static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
struct xcopy_pt_cmd *xpt_cmd = container_of(se_cmd,
struct xcopy_pt_cmd, se_cmd);
- kfree(xpt_cmd);
+ /* xpt_cmd is on the stack, nothing to free here */
+ pr_debug("xpt_cmd done: %p\n", xpt_cmd);
}
static int xcopy_pt_check_stop_free(struct se_cmd *se_cmd)
@@ -504,7 +500,6 @@ void target_xcopy_release_pt(void)
* @cdb: SCSI CDB to be copied into @xpt_cmd.
* @remote_port: If false, use the LUN through which the XCOPY command has
* been received. If true, use @se_dev->xcopy_lun.
- * @alloc_mem: Whether or not to allocate an SGL list.
*
* Set up a SCSI command (READ or WRITE) that will be used to execute an
* XCOPY command.
@@ -514,12 +509,9 @@ static int target_xcopy_setup_pt_cmd(
struct xcopy_op *xop,
struct se_device *se_dev,
unsigned char *cdb,
- bool remote_port,
- bool alloc_mem)
+ bool remote_port)
{
struct se_cmd *cmd = &xpt_cmd->se_cmd;
- sense_reason_t sense_rc;
- int ret = 0, rc;
/*
* Setup LUN+port to honor reservations based upon xop->op_origin for
@@ -535,46 +527,17 @@ static int target_xcopy_setup_pt_cmd(
cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
cmd->tag = 0;
- sense_rc = target_setup_cmd_from_cdb(cmd, cdb);
- if (sense_rc) {
- ret = -EINVAL;
- goto out;
- }
+ if (target_setup_cmd_from_cdb(cmd, cdb))
+ return -EINVAL;
- if (alloc_mem) {
- rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
- cmd->data_length, false, false);
- if (rc < 0) {
- ret = rc;
- goto out;
- }
- /*
- * Set this bit so that transport_free_pages() allows the
- * caller to release SGLs + physical memory allocated by
- * transport_generic_get_mem()..
- */
- cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
- } else {
- /*
- * Here the previously allocated SGLs for the internal READ
- * are mapped zero-copy to the internal WRITE.
- */
- sense_rc = transport_generic_map_mem_to_cmd(cmd,
- xop->xop_data_sg, xop->xop_data_nents,
- NULL, 0);
- if (sense_rc) {
- ret = -EINVAL;
- goto out;
- }
+ if (transport_generic_map_mem_to_cmd(cmd, xop->xop_data_sg,
+ xop->xop_data_nents, NULL, 0))
+ return -EINVAL;
- pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
- " %u\n", cmd->t_data_sg, cmd->t_data_nents);
- }
+ pr_debug("Setup PASSTHROUGH_NOALLOC t_data_sg: %p t_data_nents:"
+ " %u\n", cmd->t_data_sg, cmd->t_data_nents);
return 0;
-
-out:
- return ret;
}
static int target_xcopy_issue_pt_cmd(struct xcopy_pt_cmd *xpt_cmd)
@@ -604,20 +567,15 @@ static int target_xcopy_read_source(
sector_t src_lba,
u32 src_sectors)
{
- struct xcopy_pt_cmd *xpt_cmd;
- struct se_cmd *se_cmd;
+ struct xcopy_pt_cmd xpt_cmd;
+ struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
u32 length = (src_sectors * src_dev->dev_attrib.block_size);
int rc;
unsigned char cdb[16];
bool remote_port = (xop->op_origin == XCOL_DEST_RECV_OP);
- xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
- if (!xpt_cmd) {
- pr_err("Unable to allocate xcopy_pt_cmd\n");
- return -ENOMEM;
- }
- init_completion(&xpt_cmd->xpt_passthrough_sem);
- se_cmd = &xpt_cmd->se_cmd;
+ memset(&xpt_cmd, 0, sizeof(xpt_cmd));
+ init_completion(&xpt_cmd.xpt_passthrough_sem);
memset(&cdb[0], 0, 16);
cdb[0] = READ_16;
@@ -627,36 +585,24 @@ static int target_xcopy_read_source(
(unsigned long long)src_lba, src_sectors, length);
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
- DMA_FROM_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
- xop->src_pt_cmd = xpt_cmd;
+ DMA_FROM_DEVICE, 0, &xpt_cmd.sense_buffer[0]);
- rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, src_dev, &cdb[0],
- remote_port, true);
+ rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, src_dev, &cdb[0],
+ remote_port);
if (rc < 0) {
- ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
- transport_generic_free_cmd(se_cmd, 0);
- return rc;
+ ec_cmd->scsi_status = se_cmd->scsi_status;
+ goto out;
}
- xop->xop_data_sg = se_cmd->t_data_sg;
- xop->xop_data_nents = se_cmd->t_data_nents;
pr_debug("XCOPY-READ: Saved xop->xop_data_sg: %p, num: %u for READ"
" memory\n", xop->xop_data_sg, xop->xop_data_nents);
- rc = target_xcopy_issue_pt_cmd(xpt_cmd);
- if (rc < 0) {
- ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
- transport_generic_free_cmd(se_cmd, 0);
- return rc;
- }
- /*
- * Clear off the allocated t_data_sg, that has been saved for
- * zero-copy WRITE submission reuse in struct xcopy_op..
- */
- se_cmd->t_data_sg = NULL;
- se_cmd->t_data_nents = 0;
-
- return 0;
+ rc = target_xcopy_issue_pt_cmd(&xpt_cmd);
+ if (rc < 0)
+ ec_cmd->scsi_status = se_cmd->scsi_status;
+out:
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
}
static int target_xcopy_write_destination(
@@ -666,20 +612,15 @@ static int target_xcopy_write_destination(
sector_t dst_lba,
u32 dst_sectors)
{
- struct xcopy_pt_cmd *xpt_cmd;
- struct se_cmd *se_cmd;
+ struct xcopy_pt_cmd xpt_cmd;
+ struct se_cmd *se_cmd = &xpt_cmd.se_cmd;
u32 length = (dst_sectors * dst_dev->dev_attrib.block_size);
int rc;
unsigned char cdb[16];
bool remote_port = (xop->op_origin == XCOL_SOURCE_RECV_OP);
- xpt_cmd = kzalloc(sizeof(struct xcopy_pt_cmd), GFP_KERNEL);
- if (!xpt_cmd) {
- pr_err("Unable to allocate xcopy_pt_cmd\n");
- return -ENOMEM;
- }
- init_completion(&xpt_cmd->xpt_passthrough_sem);
- se_cmd = &xpt_cmd->se_cmd;
+ memset(&xpt_cmd, 0, sizeof(xpt_cmd));
+ init_completion(&xpt_cmd.xpt_passthrough_sem);
memset(&cdb[0], 0, 16);
cdb[0] = WRITE_16;
@@ -689,36 +630,21 @@ static int target_xcopy_write_destination(
(unsigned long long)dst_lba, dst_sectors, length);
transport_init_se_cmd(se_cmd, &xcopy_pt_tfo, &xcopy_pt_sess, length,
- DMA_TO_DEVICE, 0, &xpt_cmd->sense_buffer[0]);
- xop->dst_pt_cmd = xpt_cmd;
+ DMA_TO_DEVICE, 0, &xpt_cmd.sense_buffer[0]);
- rc = target_xcopy_setup_pt_cmd(xpt_cmd, xop, dst_dev, &cdb[0],
- remote_port, false);
+ rc = target_xcopy_setup_pt_cmd(&xpt_cmd, xop, dst_dev, &cdb[0],
+ remote_port);
if (rc < 0) {
- struct se_cmd *src_cmd = &xop->src_pt_cmd->se_cmd;
- ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
- /*
- * If the failure happened before the t_mem_list hand-off in
- * target_xcopy_setup_pt_cmd(), Reset memory + clear flag so that
- * core releases this memory on error during X-COPY WRITE I/O.
- */
- src_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
- src_cmd->t_data_sg = xop->xop_data_sg;
- src_cmd->t_data_nents = xop->xop_data_nents;
-
- transport_generic_free_cmd(se_cmd, 0);
- return rc;
- }
-
- rc = target_xcopy_issue_pt_cmd(xpt_cmd);
- if (rc < 0) {
- ec_cmd->scsi_status = xpt_cmd->se_cmd.scsi_status;
- se_cmd->se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
- transport_generic_free_cmd(se_cmd, 0);
- return rc;
+ ec_cmd->scsi_status = se_cmd->scsi_status;
+ goto out;
}
- return 0;
+ rc = target_xcopy_issue_pt_cmd(&xpt_cmd);
+ if (rc < 0)
+ ec_cmd->scsi_status = se_cmd->scsi_status;
+out:
+ transport_generic_free_cmd(se_cmd, 0);
+ return rc;
}
static void target_xcopy_do_work(struct work_struct *work)
@@ -729,7 +655,7 @@ static void target_xcopy_do_work(struct work_struct *work)
sector_t src_lba, dst_lba, end_lba;
unsigned int max_sectors;
int rc = 0;
- unsigned short nolb, cur_nolb, max_nolb, copied_nolb = 0;
+ unsigned short nolb, max_nolb, copied_nolb = 0;
if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE)
goto err_free;
@@ -759,7 +685,23 @@ static void target_xcopy_do_work(struct work_struct *work)
(unsigned long long)src_lba, (unsigned long long)dst_lba);
while (src_lba < end_lba) {
- cur_nolb = min(nolb, max_nolb);
+ unsigned short cur_nolb = min(nolb, max_nolb);
+ u32 cur_bytes = cur_nolb * src_dev->dev_attrib.block_size;
+
+ if (cur_bytes != xop->xop_data_bytes) {
+ /*
+ * (Re)allocate a buffer large enough to hold the XCOPY
+ * I/O size, which can be reused each read / write loop.
+ */
+ target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
+ rc = target_alloc_sgl(&xop->xop_data_sg,
+ &xop->xop_data_nents,
+ cur_bytes,
+ false, false);
+ if (rc < 0)
+ goto out;
+ xop->xop_data_bytes = cur_bytes;
+ }
pr_debug("target_xcopy_do_work: Calling read src_dev: %p src_lba: %llu,"
" cur_nolb: %hu\n", src_dev, (unsigned long long)src_lba, cur_nolb);
@@ -777,10 +719,8 @@ static void target_xcopy_do_work(struct work_struct *work)
rc = target_xcopy_write_destination(ec_cmd, xop, dst_dev,
dst_lba, cur_nolb);
- if (rc < 0) {
- transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
+ if (rc < 0)
goto out;
- }
dst_lba += cur_nolb;
pr_debug("target_xcopy_do_work: Incremented WRITE dst_lba to %llu\n",
@@ -788,14 +728,10 @@ static void target_xcopy_do_work(struct work_struct *work)
copied_nolb += cur_nolb;
nolb -= cur_nolb;
-
- transport_generic_free_cmd(&xop->src_pt_cmd->se_cmd, 0);
- xop->dst_pt_cmd->se_cmd.se_cmd_flags &= ~SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
-
- transport_generic_free_cmd(&xop->dst_pt_cmd->se_cmd, 0);
}
xcopy_pt_undepend_remotedev(xop);
+ target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
kfree(xop);
pr_debug("target_xcopy_do_work: Final src_lba: %llu, dst_lba: %llu\n",
@@ -809,6 +745,7 @@ static void target_xcopy_do_work(struct work_struct *work)
out:
xcopy_pt_undepend_remotedev(xop);
+ target_free_sgl(xop->xop_data_sg, xop->xop_data_nents);
err_free:
kfree(xop);
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index 26ba4c3c9cff..c56a1bde9417 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -5,7 +5,7 @@
#define XCOPY_TARGET_DESC_LEN 32
#define XCOPY_SEGMENT_DESC_LEN 28
#define XCOPY_NAA_IEEE_REGEX_LEN 16
-#define XCOPY_MAX_SECTORS 1024
+#define XCOPY_MAX_SECTORS 4096
/*
* SPC4r37 6.4.6.1
@@ -18,8 +18,6 @@ enum xcopy_origin_list {
XCOL_DEST_RECV_OP = 0x02,
};
-struct xcopy_pt_cmd;
-
struct xcopy_op {
int op_origin;
@@ -35,11 +33,8 @@ struct xcopy_op {
unsigned short stdi;
unsigned short dtdi;
unsigned short nolb;
- unsigned int dbl;
-
- struct xcopy_pt_cmd *src_pt_cmd;
- struct xcopy_pt_cmd *dst_pt_cmd;
+ u32 xop_data_bytes;
u32 xop_data_nents;
struct scatterlist *xop_data_sg;
struct work_struct xop_work;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 5a05db5438d6..91af271e9bb0 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -1,17 +1,18 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# Generic thermal sysfs drivers configuration
+# Generic thermal drivers configuration
#
menuconfig THERMAL
- bool "Generic Thermal sysfs driver"
+ bool "Thermal drivers"
help
- Generic Thermal Sysfs driver offers a generic mechanism for
+ Thermal drivers offer a generic mechanism for
thermal management. Usually it's made up of one or more thermal
- zone and cooling device.
+ zones and cooling devices.
Each thermal zone contains its own temperature, trip points,
- cooling devices.
- All platforms with ACPI thermal support can use this driver.
+ and cooling devices.
+ All platforms with ACPI or Open Firmware thermal support can use
+ this driver.
If you want this support, you should say Y here.
if THERMAL
@@ -251,6 +252,27 @@ config IMX_THERMAL
cpufreq is used as the cooling device to throttle CPUs when the
passive trip is crossed.
+config IMX_SC_THERMAL
+ tristate "Temperature sensor driver for NXP i.MX SoCs with System Controller"
+ depends on IMX_SCU
+ depends on OF
+ help
+ Support for Temperature Monitor (TEMPMON) found on NXP i.MX SoCs with
+ system controller inside, Linux kernel has to communicate with system
+ controller via MU (message unit) IPC to get temperature from thermal
+ sensor. It supports one critical trip point and one
+ passive trip point for each thermal sensor.
+
+config IMX8MM_THERMAL
+ tristate "Temperature sensor driver for Freescale i.MX8MM SoC"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on OF
+ help
+ Support for Thermal Monitoring Unit (TMU) found on Freescale i.MX8MM SoC.
+ It supports one critical trip point and one passive trip point. The
+ cpufreq is used as the cooling device to throttle CPUs when the passive
+ trip is crossed.
+
config MAX77620_THERMAL
tristate "Temperature sensor driver for Maxim MAX77620 PMIC"
depends on MFD_MAX77620
@@ -265,6 +287,7 @@ config QORIQ_THERMAL
tristate "QorIQ Thermal Monitoring Unit"
depends on THERMAL_OF
depends on HAS_IOMEM
+ select REGMAP_MMIO
help
Support for Thermal Monitoring Unit (TMU) found on QorIQ platforms.
It supports one critical trip point and one passive trip point. The
@@ -460,4 +483,11 @@ config UNIPHIER_THERMAL
Enable this to plug in UniPhier on-chip PVT thermal driver into the
thermal framework. The driver supports CPU thermal zone temperature
reporting and a couple of trip points.
+
+config SPRD_THERMAL
+ tristate "Temperature sensor on Spreadtrum SoCs"
+ depends on ARCH_SPRD || COMPILE_TEST
+ help
+ Support for the Spreadtrum thermal sensor driver in the Linux thermal
+ framework.
endif
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 9fb88e26fb10..8c8ed7b79915 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -43,6 +43,8 @@ obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o
obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o
obj-$(CONFIG_TANGO_THERMAL) += tango_thermal.o
obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o
+obj-$(CONFIG_IMX_SC_THERMAL) += imx_sc_thermal.o
+obj-$(CONFIG_IMX8MM_THERMAL) += imx8mm_thermal.o
obj-$(CONFIG_MAX77620_THERMAL) += max77620_thermal.o
obj-$(CONFIG_QORIQ_THERMAL) += qoriq_thermal.o
obj-$(CONFIG_DA9062_THERMAL) += da9062-thermal.o
@@ -57,3 +59,4 @@ obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o
obj-$(CONFIG_ZX2967_THERMAL) += zx2967_thermal.o
obj-$(CONFIG_UNIPHIER_THERMAL) += uniphier_thermal.o
obj-$(CONFIG_AMLOGIC_THERMAL) += amlogic_thermal.o
+obj-$(CONFIG_SPRD_THERMAL) += sprd_thermal.o
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 4ae8c856c88e..e297e135c031 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -273,7 +273,7 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev,
struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata;
/* Request state should be less than max_level */
- if (WARN_ON(state > cpufreq_cdev->max_level))
+ if (state > cpufreq_cdev->max_level)
return -EINVAL;
num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus);
@@ -437,7 +437,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
int ret;
/* Request state should be less than max_level */
- if (WARN_ON(state > cpufreq_cdev->max_level))
+ if (state > cpufreq_cdev->max_level)
return -EINVAL;
/* Check if the old cooling action is same as new cooling action */
@@ -456,6 +456,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
capacity = frequency * max_capacity;
capacity /= cpufreq_cdev->policy->cpuinfo.max_freq;
arch_set_thermal_pressure(cpus, max_capacity - capacity);
+ ret = 0;
}
return ret;
diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c
new file mode 100644
index 000000000000..0d60f8d7894f
--- /dev/null
+++ b/drivers/thermal/imx8mm_thermal.c
@@ -0,0 +1,236 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 NXP.
+ *
+ * Author: Anson Huang <Anson.Huang@nxp.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/thermal.h>
+
+#include "thermal_core.h"
+
+#define TER 0x0 /* TMU enable */
+#define TPS 0x4
+#define TRITSR 0x20 /* TMU immediate temp */
+
+#define TER_EN BIT(31)
+#define TRITSR_TEMP0_VAL_MASK 0xff
+#define TRITSR_TEMP1_VAL_MASK 0xff0000
+
+#define PROBE_SEL_ALL GENMASK(31, 30)
+
+#define probe_status_offset(x) (30 + x)
+#define SIGN_BIT BIT(7)
+#define TEMP_VAL_MASK GENMASK(6, 0)
+
+#define VER1_TEMP_LOW_LIMIT 10000
+#define VER2_TEMP_LOW_LIMIT -40000
+#define VER2_TEMP_HIGH_LIMIT 125000
+
+#define TMU_VER1 0x1
+#define TMU_VER2 0x2
+
+struct thermal_soc_data {
+ u32 num_sensors;
+ u32 version;
+ int (*get_temp)(void *, int *);
+};
+
+struct tmu_sensor {
+ struct imx8mm_tmu *priv;
+ u32 hw_id;
+ struct thermal_zone_device *tzd;
+};
+
+struct imx8mm_tmu {
+ void __iomem *base;
+ struct clk *clk;
+ const struct thermal_soc_data *socdata;
+ struct tmu_sensor sensors[0];
+};
+
+static int imx8mm_tmu_get_temp(void *data, int *temp)
+{
+ struct tmu_sensor *sensor = data;
+ struct imx8mm_tmu *tmu = sensor->priv;
+ u32 val;
+
+ val = readl_relaxed(tmu->base + TRITSR) & TRITSR_TEMP0_VAL_MASK;
+ *temp = val * 1000;
+ if (*temp < VER1_TEMP_LOW_LIMIT)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int imx8mp_tmu_get_temp(void *data, int *temp)
+{
+ struct tmu_sensor *sensor = data;
+ struct imx8mm_tmu *tmu = sensor->priv;
+ unsigned long val;
+ bool ready;
+
+ val = readl_relaxed(tmu->base + TRITSR);
+ ready = test_bit(probe_status_offset(sensor->hw_id), &val);
+ if (!ready)
+ return -EAGAIN;
+
+ val = sensor->hw_id ? FIELD_GET(TRITSR_TEMP1_VAL_MASK, val) :
+ FIELD_GET(TRITSR_TEMP0_VAL_MASK, val);
+ if (val & SIGN_BIT) /* negative */
+ val = (~(val & TEMP_VAL_MASK) + 1);
+
+ *temp = val * 1000;
+ if (*temp < VER2_TEMP_LOW_LIMIT || *temp > VER2_TEMP_HIGH_LIMIT)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int tmu_get_temp(void *data, int *temp)
+{
+ struct tmu_sensor *sensor = data;
+ struct imx8mm_tmu *tmu = sensor->priv;
+
+ return tmu->socdata->get_temp(data, temp);
+}
+
+static struct thermal_zone_of_device_ops tmu_tz_ops = {
+ .get_temp = tmu_get_temp,
+};
+
+static void imx8mm_tmu_enable(struct imx8mm_tmu *tmu, bool enable)
+{
+ u32 val;
+
+ val = readl_relaxed(tmu->base + TER);
+ val = enable ? (val | TER_EN) : (val & ~TER_EN);
+ writel_relaxed(val, tmu->base + TER);
+}
+
+static void imx8mm_tmu_probe_sel_all(struct imx8mm_tmu *tmu)
+{
+ u32 val;
+
+ val = readl_relaxed(tmu->base + TPS);
+ val |= PROBE_SEL_ALL;
+ writel_relaxed(val, tmu->base + TPS);
+}
+
+static int imx8mm_tmu_probe(struct platform_device *pdev)
+{
+ const struct thermal_soc_data *data;
+ struct imx8mm_tmu *tmu;
+ int ret;
+ int i;
+
+ data = of_device_get_match_data(&pdev->dev);
+
+ tmu = devm_kzalloc(&pdev->dev, struct_size(tmu, sensors,
+ data->num_sensors), GFP_KERNEL);
+ if (!tmu)
+ return -ENOMEM;
+
+ tmu->socdata = data;
+
+ tmu->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(tmu->base))
+ return PTR_ERR(tmu->base);
+
+ tmu->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tmu->clk)) {
+ ret = PTR_ERR(tmu->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to get tmu clock: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(tmu->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable tmu clock: %d\n", ret);
+ return ret;
+ }
+
+ /* disable the monitor during initialization */
+ imx8mm_tmu_enable(tmu, false);
+
+ for (i = 0; i < data->num_sensors; i++) {
+ tmu->sensors[i].priv = tmu;
+ tmu->sensors[i].tzd =
+ devm_thermal_zone_of_sensor_register(&pdev->dev, i,
+ &tmu->sensors[i],
+ &tmu_tz_ops);
+ if (IS_ERR(tmu->sensors[i].tzd)) {
+ dev_err(&pdev->dev,
+ "failed to register thermal zone sensor[%d]: %d\n",
+ i, ret);
+ return PTR_ERR(tmu->sensors[i].tzd);
+ }
+ tmu->sensors[i].hw_id = i;
+ }
+
+ platform_set_drvdata(pdev, tmu);
+
+ /* enable all the probes for V2 TMU */
+ if (tmu->socdata->version == TMU_VER2)
+ imx8mm_tmu_probe_sel_all(tmu);
+
+ /* enable the monitor */
+ imx8mm_tmu_enable(tmu, true);
+
+ return 0;
+}
+
+static int imx8mm_tmu_remove(struct platform_device *pdev)
+{
+ struct imx8mm_tmu *tmu = platform_get_drvdata(pdev);
+
+ /* disable TMU */
+ imx8mm_tmu_enable(tmu, false);
+
+ clk_disable_unprepare(tmu->clk);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct thermal_soc_data imx8mm_tmu_data = {
+ .num_sensors = 1,
+ .version = TMU_VER1,
+ .get_temp = imx8mm_tmu_get_temp,
+};
+
+static struct thermal_soc_data imx8mp_tmu_data = {
+ .num_sensors = 2,
+ .version = TMU_VER2,
+ .get_temp = imx8mp_tmu_get_temp,
+};
+
+static const struct of_device_id imx8mm_tmu_table[] = {
+ { .compatible = "fsl,imx8mm-tmu", .data = &imx8mm_tmu_data, },
+ { .compatible = "fsl,imx8mp-tmu", .data = &imx8mp_tmu_data, },
+ { },
+};
+
+static struct platform_driver imx8mm_tmu = {
+ .driver = {
+ .name = "i.mx8mm_thermal",
+ .of_match_table = imx8mm_tmu_table,
+ },
+ .probe = imx8mm_tmu_probe,
+ .remove = imx8mm_tmu_remove,
+};
+module_platform_driver(imx8mm_tmu);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
+MODULE_DESCRIPTION("i.MX8MM Thermal Monitor Unit driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c
new file mode 100644
index 000000000000..a8723b1eb8b0
--- /dev/null
+++ b/drivers/thermal/imx_sc_thermal.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018-2020 NXP.
+ */
+
+#include <linux/err.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/firmware/imx/types.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#include "thermal_core.h"
+
+#define IMX_SC_MISC_FUNC_GET_TEMP 13
+
+static struct imx_sc_ipc *thermal_ipc_handle;
+
+struct imx_sc_sensor {
+ struct thermal_zone_device *tzd;
+ u32 resource_id;
+};
+
+struct req_get_temp {
+ u16 resource_id;
+ u8 type;
+} __packed __aligned(4);
+
+struct resp_get_temp {
+ s16 celsius;
+ s8 tenths;
+} __packed __aligned(4);
+
+struct imx_sc_msg_misc_get_temp {
+ struct imx_sc_rpc_msg hdr;
+ union {
+ struct req_get_temp req;
+ struct resp_get_temp resp;
+ } data;
+} __packed __aligned(4);
+
+static int imx_sc_thermal_get_temp(void *data, int *temp)
+{
+ struct imx_sc_msg_misc_get_temp msg;
+ struct imx_sc_rpc_msg *hdr = &msg.hdr;
+ struct imx_sc_sensor *sensor = data;
+ int ret;
+
+ msg.data.req.resource_id = sensor->resource_id;
+ msg.data.req.type = IMX_SC_C_TEMP;
+
+ hdr->ver = IMX_SC_RPC_VERSION;
+ hdr->svc = IMX_SC_RPC_SVC_MISC;
+ hdr->func = IMX_SC_MISC_FUNC_GET_TEMP;
+ hdr->size = 2;
+
+ ret = imx_scu_call_rpc(thermal_ipc_handle, &msg, true);
+ if (ret) {
+ dev_err(&sensor->tzd->device, "read temp sensor %d failed, ret %d\n",
+ sensor->resource_id, ret);
+ return ret;
+ }
+
+ *temp = msg.data.resp.celsius * 1000 + msg.data.resp.tenths * 100;
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops imx_sc_thermal_ops = {
+ .get_temp = imx_sc_thermal_get_temp,
+};
+
+static int imx_sc_thermal_probe(struct platform_device *pdev)
+{
+ struct device_node *np, *child, *sensor_np;
+ struct imx_sc_sensor *sensor;
+ int ret;
+
+ ret = imx_scu_get_handle(&thermal_ipc_handle);
+ if (ret)
+ return ret;
+
+ np = of_find_node_by_name(NULL, "thermal-zones");
+ if (!np)
+ return -ENODEV;
+
+ sensor_np = of_node_get(pdev->dev.of_node);
+
+ for_each_available_child_of_node(np, child) {
+ sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL);
+ if (!sensor) {
+ of_node_put(sensor_np);
+ return -ENOMEM;
+ }
+
+ ret = thermal_zone_of_get_sensor_id(child,
+ sensor_np,
+ &sensor->resource_id);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to get valid sensor resource id: %d\n",
+ ret);
+ break;
+ }
+
+ sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev,
+ sensor->resource_id,
+ sensor,
+ &imx_sc_thermal_ops);
+ if (IS_ERR(sensor->tzd)) {
+ dev_err(&pdev->dev, "failed to register thermal zone\n");
+ ret = PTR_ERR(sensor->tzd);
+ break;
+ }
+ }
+
+ of_node_put(sensor_np);
+
+ return ret;
+}
+
+static int imx_sc_thermal_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id imx_sc_thermal_table[] = {
+ { .compatible = "fsl,imx-sc-thermal", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, imx_sc_thermal_table);
+
+static struct platform_driver imx_sc_thermal_driver = {
+ .probe = imx_sc_thermal_probe,
+ .remove = imx_sc_thermal_remove,
+ .driver = {
+ .name = "imx-sc-thermal",
+ .of_match_table = imx_sc_thermal_table,
+ },
+};
+module_platform_driver(imx_sc_thermal_driver);
+
+MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>");
+MODULE_DESCRIPTION("Thermal driver for NXP i.MX SoCs with system controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index bb6754a5342c..e761c9b42217 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -3,24 +3,17 @@
// Copyright 2013 Freescale Semiconductor, Inc.
#include <linux/clk.h>
-#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/cpu_cooling.h>
#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/platform_device.h>
#include <linux/regmap.h>
-#include <linux/slab.h>
#include <linux/thermal.h>
-#include <linux/types.h>
#include <linux/nvmem-consumer.h>
#define REG_SET 0x4
@@ -872,14 +865,12 @@ static int imx_thermal_remove(struct platform_device *pdev)
clk_disable_unprepare(data->thermal_clk);
thermal_zone_device_unregister(data->tz);
- cpufreq_cooling_unregister(data->cdev);
- cpufreq_cpu_put(data->policy);
+ imx_thermal_unregister_legacy_cooling(data);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int imx_thermal_suspend(struct device *dev)
+static int __maybe_unused imx_thermal_suspend(struct device *dev)
{
struct imx_thermal_data *data = dev_get_drvdata(dev);
struct regmap *map = data->tempmon;
@@ -900,7 +891,7 @@ static int imx_thermal_suspend(struct device *dev)
return 0;
}
-static int imx_thermal_resume(struct device *dev)
+static int __maybe_unused imx_thermal_resume(struct device *dev)
{
struct imx_thermal_data *data = dev_get_drvdata(dev);
struct regmap *map = data->tempmon;
@@ -918,7 +909,6 @@ static int imx_thermal_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops,
imx_thermal_suspend, imx_thermal_resume);
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 6cad15eb9cf4..ceef89c956bd 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -65,7 +65,7 @@ static ssize_t available_uuids_show(struct device *dev,
for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; i++) {
if (priv->uuid_bitmap & (1 << i))
if (PAGE_SIZE - length > 0)
- length += snprintf(&buf[length],
+ length += scnprintf(&buf[length],
PAGE_SIZE - length,
"%s\n",
int3400_thermal_uuids[i]);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index b1fd34516e28..297db1d2d960 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -45,6 +45,9 @@
/* JasperLake thermal reporting device */
#define PCI_DEVICE_ID_PROC_JSL_THERMAL 0x4503
+/* TigerLake thermal reporting device */
+#define PCI_DEVICE_ID_PROC_TGL_THERMAL 0x9A03
+
#define DRV_NAME "proc_thermal"
struct power_config {
@@ -728,6 +731,8 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_ICL_THERMAL),
.driver_data = (kernel_ulong_t)&rapl_mmio_hsw, },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_JSL_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_TGL_THERMAL),
+ .driver_data = (kernel_ulong_t)&rapl_mmio_hsw, },
{ 0, },
};
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index ef0baa954ff0..874a47d6923f 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -449,6 +449,50 @@ thermal_zone_of_add_sensor(struct device_node *zone,
}
/**
+ * thermal_zone_of_get_sensor_id - get sensor ID from a DT thermal zone
+ * @tz_np: a valid thermal zone device node.
+ * @sensor_np: a sensor node of a valid sensor device.
+ * @id: the sensor ID returned if success.
+ *
+ * This function will get sensor ID from a given thermal zone node and
+ * the sensor node must match the temperature provider @sensor_np.
+ *
+ * Return: 0 on success, proper error code otherwise.
+ */
+
+int thermal_zone_of_get_sensor_id(struct device_node *tz_np,
+ struct device_node *sensor_np,
+ u32 *id)
+{
+ struct of_phandle_args sensor_specs;
+ int ret;
+
+ ret = of_parse_phandle_with_args(tz_np,
+ "thermal-sensors",
+ "#thermal-sensor-cells",
+ 0,
+ &sensor_specs);
+ if (ret)
+ return ret;
+
+ if (sensor_specs.np != sensor_np) {
+ of_node_put(sensor_specs.np);
+ return -ENODEV;
+ }
+
+ if (sensor_specs.args_count > 1)
+ pr_warn("%pOFn: too many cells in sensor specifier %d\n",
+ sensor_specs.np, sensor_specs.args_count);
+
+ *id = sensor_specs.args_count ? sensor_specs.args[0] : 0;
+
+ of_node_put(sensor_specs.np);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(thermal_zone_of_get_sensor_id);
+
+/**
* thermal_zone_of_sensor_register - registers a sensor to a DT thermal zone
* @dev: a valid struct device pointer of a sensor device. Must contain
* a valid .of_node, for the sensor node.
@@ -499,36 +543,22 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
sensor_np = of_node_get(dev->of_node);
for_each_available_child_of_node(np, child) {
- struct of_phandle_args sensor_specs;
int ret, id;
/* For now, thermal framework supports only 1 sensor per zone */
- ret = of_parse_phandle_with_args(child, "thermal-sensors",
- "#thermal-sensor-cells",
- 0, &sensor_specs);
+ ret = thermal_zone_of_get_sensor_id(child, sensor_np, &id);
if (ret)
continue;
- if (sensor_specs.args_count >= 1) {
- id = sensor_specs.args[0];
- WARN(sensor_specs.args_count > 1,
- "%pOFn: too many cells in sensor specifier %d\n",
- sensor_specs.np, sensor_specs.args_count);
- } else {
- id = 0;
- }
-
- if (sensor_specs.np == sensor_np && id == sensor_id) {
+ if (id == sensor_id) {
tzd = thermal_zone_of_add_sensor(child, sensor_np,
data, ops);
if (!IS_ERR(tzd))
tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED);
- of_node_put(sensor_specs.np);
of_node_put(child);
goto exit;
}
- of_node_put(sensor_specs.np);
}
exit:
of_node_put(sensor_np);
diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c
index fb77acb8d13b..2a28a5af209e 100644
--- a/drivers/thermal/qcom/tsens-8960.c
+++ b/drivers/thermal/qcom/tsens-8960.c
@@ -245,7 +245,7 @@ static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s)
return adc_code * slope + offset;
}
-static int get_temp_8960(struct tsens_sensor *s, int *temp)
+static int get_temp_8960(const struct tsens_sensor *s, int *temp)
{
int ret;
u32 code, trdy;
@@ -279,7 +279,7 @@ static const struct tsens_ops ops_8960 = {
.resume = resume_8960,
};
-const struct tsens_plat_data data_8960 = {
+struct tsens_plat_data data_8960 = {
.num_sensors = 11,
.ops = &ops_8960,
};
diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c
index c8d57ee0a5bb..172545366636 100644
--- a/drivers/thermal/qcom/tsens-common.c
+++ b/drivers/thermal/qcom/tsens-common.c
@@ -23,6 +23,10 @@
* @low_thresh: lower threshold temperature value
* @low_irq_mask: mask register for lower threshold irqs
* @low_irq_clear: clear register for lower threshold irqs
+ * @crit_viol: critical threshold violated
+ * @crit_thresh: critical threshold temperature value
+ * @crit_irq_mask: mask register for critical threshold irqs
+ * @crit_irq_clear: clear register for critical threshold irqs
*
* Structure containing data about temperature threshold settings and
* irq status if they were violated.
@@ -36,6 +40,10 @@ struct tsens_irq_data {
int low_thresh;
u32 low_irq_mask;
u32 low_irq_clear;
+ u32 crit_viol;
+ u32 crit_thresh;
+ u32 crit_irq_mask;
+ u32 crit_irq_clear;
};
char *qfprom_read(struct device *dev, const char *cname)
@@ -128,7 +136,7 @@ static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s)
* Return: Temperature in milliCelsius on success, a negative errno will
* be returned in error cases
*/
-static int tsens_hw_to_mC(struct tsens_sensor *s, int field)
+static int tsens_hw_to_mC(const struct tsens_sensor *s, int field)
{
struct tsens_priv *priv = s->priv;
u32 resolution;
@@ -160,7 +168,7 @@ static int tsens_hw_to_mC(struct tsens_sensor *s, int field)
*
* Return: ADC code or temperature in deciCelsius.
*/
-static int tsens_mC_to_hw(struct tsens_sensor *s, int temp)
+static int tsens_mC_to_hw(const struct tsens_sensor *s, int temp)
{
struct tsens_priv *priv = s->priv;
@@ -189,6 +197,9 @@ static void tsens_set_interrupt_v1(struct tsens_priv *priv, u32 hw_id,
case LOWER:
index = LOW_INT_CLEAR_0 + hw_id;
break;
+ case CRITICAL:
+ /* No critical interrupts before v2 */
+ return;
}
regmap_field_write(priv->rf[index], enable ? 0 : 1);
}
@@ -214,6 +225,10 @@ static void tsens_set_interrupt_v2(struct tsens_priv *priv, u32 hw_id,
index_mask = LOW_INT_MASK_0 + hw_id;
index_clear = LOW_INT_CLEAR_0 + hw_id;
break;
+ case CRITICAL:
+ index_mask = CRIT_INT_MASK_0 + hw_id;
+ index_clear = CRIT_INT_CLEAR_0 + hw_id;
+ break;
}
if (enable) {
@@ -268,14 +283,23 @@ static int tsens_threshold_violated(struct tsens_priv *priv, u32 hw_id,
ret = regmap_field_read(priv->rf[LOWER_STATUS_0 + hw_id], &d->low_viol);
if (ret)
return ret;
- if (d->up_viol || d->low_viol)
+
+ if (priv->feat->crit_int) {
+ ret = regmap_field_read(priv->rf[CRITICAL_STATUS_0 + hw_id],
+ &d->crit_viol);
+ if (ret)
+ return ret;
+ }
+
+ if (d->up_viol || d->low_viol || d->crit_viol)
return 1;
return 0;
}
static int tsens_read_irq_state(struct tsens_priv *priv, u32 hw_id,
- struct tsens_sensor *s, struct tsens_irq_data *d)
+ const struct tsens_sensor *s,
+ struct tsens_irq_data *d)
{
int ret;
@@ -292,22 +316,37 @@ static int tsens_read_irq_state(struct tsens_priv *priv, u32 hw_id,
ret = regmap_field_read(priv->rf[LOW_INT_MASK_0 + hw_id], &d->low_irq_mask);
if (ret)
return ret;
+ ret = regmap_field_read(priv->rf[CRIT_INT_CLEAR_0 + hw_id],
+ &d->crit_irq_clear);
+ if (ret)
+ return ret;
+ ret = regmap_field_read(priv->rf[CRIT_INT_MASK_0 + hw_id],
+ &d->crit_irq_mask);
+ if (ret)
+ return ret;
+
+ d->crit_thresh = tsens_hw_to_mC(s, CRIT_THRESH_0 + hw_id);
} else {
/* No mask register on older TSENS */
d->up_irq_mask = 0;
d->low_irq_mask = 0;
+ d->crit_irq_clear = 0;
+ d->crit_irq_mask = 0;
+ d->crit_thresh = 0;
}
d->up_thresh = tsens_hw_to_mC(s, UP_THRESH_0 + hw_id);
d->low_thresh = tsens_hw_to_mC(s, LOW_THRESH_0 + hw_id);
- dev_dbg(priv->dev, "[%u] %s%s: status(%u|%u) | clr(%u|%u) | mask(%u|%u)\n",
- hw_id, __func__, (d->up_viol || d->low_viol) ? "(V)" : "",
- d->low_viol, d->up_viol, d->low_irq_clear, d->up_irq_clear,
- d->low_irq_mask, d->up_irq_mask);
- dev_dbg(priv->dev, "[%u] %s%s: thresh: (%d:%d)\n", hw_id, __func__,
- (d->up_viol || d->low_viol) ? "(violation)" : "",
- d->low_thresh, d->up_thresh);
+ dev_dbg(priv->dev, "[%u] %s%s: status(%u|%u|%u) | clr(%u|%u|%u) | mask(%u|%u|%u)\n",
+ hw_id, __func__,
+ (d->up_viol || d->low_viol || d->crit_viol) ? "(V)" : "",
+ d->low_viol, d->up_viol, d->crit_viol,
+ d->low_irq_clear, d->up_irq_clear, d->crit_irq_clear,
+ d->low_irq_mask, d->up_irq_mask, d->crit_irq_mask);
+ dev_dbg(priv->dev, "[%u] %s%s: thresh: (%d:%d:%d)\n", hw_id, __func__,
+ (d->up_viol || d->low_viol || d->crit_viol) ? "(V)" : "",
+ d->low_thresh, d->up_thresh, d->crit_thresh);
return 0;
}
@@ -322,6 +361,76 @@ static inline u32 masked_irq(u32 hw_id, u32 mask, enum tsens_ver ver)
}
/**
+ * tsens_critical_irq_thread() - Threaded handler for critical interrupts
+ * @irq: irq number
+ * @data: tsens controller private data
+ *
+ * Check FSM watchdog bark status and clear if needed.
+ * Check all sensors to find ones that violated their critical threshold limits.
+ * Clear and then re-enable the interrupt.
+ *
+ * The level-triggered interrupt might deassert if the temperature returned to
+ * within the threshold limits by the time the handler got scheduled. We
+ * consider the irq to have been handled in that case.
+ *
+ * Return: IRQ_HANDLED
+ */
+irqreturn_t tsens_critical_irq_thread(int irq, void *data)
+{
+ struct tsens_priv *priv = data;
+ struct tsens_irq_data d;
+ int temp, ret, i;
+ u32 wdog_status, wdog_count;
+
+ if (priv->feat->has_watchdog) {
+ ret = regmap_field_read(priv->rf[WDOG_BARK_STATUS],
+ &wdog_status);
+ if (ret)
+ return ret;
+
+ if (wdog_status) {
+ /* Clear WDOG interrupt */
+ regmap_field_write(priv->rf[WDOG_BARK_CLEAR], 1);
+ regmap_field_write(priv->rf[WDOG_BARK_CLEAR], 0);
+ ret = regmap_field_read(priv->rf[WDOG_BARK_COUNT],
+ &wdog_count);
+ if (ret)
+ return ret;
+ if (wdog_count)
+ dev_dbg(priv->dev, "%s: watchdog count: %d\n",
+ __func__, wdog_count);
+
+ /* Fall through to handle critical interrupts if any */
+ }
+ }
+
+ for (i = 0; i < priv->num_sensors; i++) {
+ const struct tsens_sensor *s = &priv->sensor[i];
+ u32 hw_id = s->hw_id;
+
+ if (IS_ERR(s->tzd))
+ continue;
+ if (!tsens_threshold_violated(priv, hw_id, &d))
+ continue;
+ ret = get_temp_tsens_valid(s, &temp);
+ if (ret) {
+ dev_err(priv->dev, "[%u] %s: error reading sensor\n",
+ hw_id, __func__);
+ continue;
+ }
+
+ tsens_read_irq_state(priv, hw_id, s, &d);
+ if (d.crit_viol &&
+ !masked_irq(hw_id, d.crit_irq_mask, tsens_version(priv))) {
+ /* Mask critical interrupts, unused on Linux */
+ tsens_set_interrupt(priv, hw_id, CRITICAL, false);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
* tsens_irq_thread - Threaded interrupt handler for uplow interrupts
* @irq: irq number
* @data: tsens controller private data
@@ -346,10 +455,10 @@ irqreturn_t tsens_irq_thread(int irq, void *data)
for (i = 0; i < priv->num_sensors; i++) {
bool trigger = false;
- struct tsens_sensor *s = &priv->sensor[i];
+ const struct tsens_sensor *s = &priv->sensor[i];
u32 hw_id = s->hw_id;
- if (IS_ERR(priv->sensor[i].tzd))
+ if (IS_ERR(s->tzd))
continue;
if (!tsens_threshold_violated(priv, hw_id, &d))
continue;
@@ -368,7 +477,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data)
tsens_set_interrupt(priv, hw_id, UPPER, disable);
if (d.up_thresh > temp) {
dev_dbg(priv->dev, "[%u] %s: re-arm upper\n",
- priv->sensor[i].hw_id, __func__);
+ hw_id, __func__);
tsens_set_interrupt(priv, hw_id, UPPER, enable);
} else {
trigger = true;
@@ -379,7 +488,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data)
tsens_set_interrupt(priv, hw_id, LOWER, disable);
if (d.low_thresh < temp) {
dev_dbg(priv->dev, "[%u] %s: re-arm low\n",
- priv->sensor[i].hw_id, __func__);
+ hw_id, __func__);
tsens_set_interrupt(priv, hw_id, LOWER, enable);
} else {
trigger = true;
@@ -392,7 +501,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data)
if (trigger) {
dev_dbg(priv->dev, "[%u] %s: TZ update trigger (%d mC)\n",
hw_id, __func__, temp);
- thermal_zone_device_update(priv->sensor[i].tzd,
+ thermal_zone_device_update(s->tzd,
THERMAL_EVENT_UNSPECIFIED);
} else {
dev_dbg(priv->dev, "[%u] %s: no violation: %d\n",
@@ -435,7 +544,7 @@ int tsens_set_trips(void *_sensor, int low, int high)
spin_unlock_irqrestore(&priv->ul_lock, flags);
dev_dbg(dev, "[%u] %s: (%d:%d)->(%d:%d)\n",
- s->hw_id, __func__, d.low_thresh, d.up_thresh, cl_low, cl_high);
+ hw_id, __func__, d.low_thresh, d.up_thresh, cl_low, cl_high);
return 0;
}
@@ -457,7 +566,7 @@ void tsens_disable_irq(struct tsens_priv *priv)
regmap_field_write(priv->rf[INT_EN], 0);
}
-int get_temp_tsens_valid(struct tsens_sensor *s, int *temp)
+int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp)
{
struct tsens_priv *priv = s->priv;
int hw_id = s->hw_id;
@@ -486,7 +595,7 @@ int get_temp_tsens_valid(struct tsens_sensor *s, int *temp)
return 0;
}
-int get_temp_common(struct tsens_sensor *s, int *temp)
+int get_temp_common(const struct tsens_sensor *s, int *temp)
{
struct tsens_priv *priv = s->priv;
int hw_id = s->hw_id;
@@ -590,6 +699,7 @@ int __init init_common(struct tsens_priv *priv)
{
void __iomem *tm_base, *srot_base;
struct device *dev = priv->dev;
+ u32 ver_minor;
struct resource *res;
u32 enabled;
int ret, i, j;
@@ -602,7 +712,7 @@ int __init init_common(struct tsens_priv *priv)
/* DT with separate SROT and TM address space */
priv->tm_offset = 0;
res = platform_get_resource(op, IORESOURCE_MEM, 1);
- srot_base = devm_ioremap_resource(&op->dev, res);
+ srot_base = devm_ioremap_resource(dev, res);
if (IS_ERR(srot_base)) {
ret = PTR_ERR(srot_base);
goto err_put_device;
@@ -620,7 +730,7 @@ int __init init_common(struct tsens_priv *priv)
}
res = platform_get_resource(op, IORESOURCE_MEM, 0);
- tm_base = devm_ioremap_resource(&op->dev, res);
+ tm_base = devm_ioremap_resource(dev, res);
if (IS_ERR(tm_base)) {
ret = PTR_ERR(tm_base);
goto err_put_device;
@@ -639,6 +749,9 @@ int __init init_common(struct tsens_priv *priv)
if (IS_ERR(priv->rf[i]))
return PTR_ERR(priv->rf[i]);
}
+ ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor);
+ if (ret)
+ goto err_put_device;
}
priv->rf[TSENS_EN] = devm_regmap_field_alloc(dev, priv->srot_map,
@@ -683,12 +796,47 @@ int __init init_common(struct tsens_priv *priv)
}
}
+ if (priv->feat->crit_int) {
+ /* Loop might need changes if enum regfield_ids is reordered */
+ for (j = CRITICAL_STATUS_0; j <= CRIT_THRESH_15; j += 16) {
+ for (i = 0; i < priv->feat->max_sensors; i++) {
+ int idx = j + i;
+
+ priv->rf[idx] =
+ devm_regmap_field_alloc(dev,
+ priv->tm_map,
+ priv->fields[idx]);
+ if (IS_ERR(priv->rf[idx])) {
+ ret = PTR_ERR(priv->rf[idx]);
+ goto err_put_device;
+ }
+ }
+ }
+ }
+
+ if (tsens_version(priv) > VER_1_X && ver_minor > 2) {
+ /* Watchdog is present only on v2.3+ */
+ priv->feat->has_watchdog = 1;
+ for (i = WDOG_BARK_STATUS; i <= CC_MON_MASK; i++) {
+ priv->rf[i] = devm_regmap_field_alloc(dev, priv->tm_map,
+ priv->fields[i]);
+ if (IS_ERR(priv->rf[i])) {
+ ret = PTR_ERR(priv->rf[i]);
+ goto err_put_device;
+ }
+ }
+ /*
+ * Watchdog is already enabled, unmask the bark.
+ * Disable cycle completion monitoring
+ */
+ regmap_field_write(priv->rf[WDOG_BARK_MASK], 0);
+ regmap_field_write(priv->rf[CC_MON_MASK], 1);
+ }
+
spin_lock_init(&priv->ul_lock);
tsens_enable_irq(priv);
tsens_debug_init(op);
- return 0;
-
err_put_device:
put_device(&op->dev);
return ret;
diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c
index 4b8dd6de02ce..959a9371d205 100644
--- a/drivers/thermal/qcom/tsens-v0_1.c
+++ b/drivers/thermal/qcom/tsens-v0_1.c
@@ -327,7 +327,7 @@ static int calibrate_8974(struct tsens_priv *priv)
/* v0.1: 8916, 8974 */
-static const struct tsens_features tsens_v0_1_feat = {
+static struct tsens_features tsens_v0_1_feat = {
.ver_major = VER_0_1,
.crit_int = 0,
.adc = 1,
@@ -377,7 +377,7 @@ static const struct tsens_ops ops_8916 = {
.get_temp = get_temp_common,
};
-const struct tsens_plat_data data_8916 = {
+struct tsens_plat_data data_8916 = {
.num_sensors = 5,
.ops = &ops_8916,
.hw_ids = (unsigned int []){0, 1, 2, 4, 5 },
@@ -392,7 +392,7 @@ static const struct tsens_ops ops_8974 = {
.get_temp = get_temp_common,
};
-const struct tsens_plat_data data_8974 = {
+struct tsens_plat_data data_8974 = {
.num_sensors = 11,
.ops = &ops_8974,
.feat = &tsens_v0_1_feat,
diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c
index bd2ddb684a45..b682a4df0081 100644
--- a/drivers/thermal/qcom/tsens-v1.c
+++ b/drivers/thermal/qcom/tsens-v1.c
@@ -299,7 +299,7 @@ static int calibrate_8976(struct tsens_priv *priv)
/* v1.x: msm8956,8976,qcs404,405 */
-static const struct tsens_features tsens_v1_feat = {
+static struct tsens_features tsens_v1_feat = {
.ver_major = VER_1_X,
.crit_int = 0,
.adc = 1,
@@ -368,7 +368,7 @@ static const struct tsens_ops ops_generic_v1 = {
.get_temp = get_temp_tsens_valid,
};
-const struct tsens_plat_data data_tsens_v1 = {
+struct tsens_plat_data data_tsens_v1 = {
.ops = &ops_generic_v1,
.feat = &tsens_v1_feat,
.fields = tsens_v1_regfields,
@@ -381,7 +381,7 @@ static const struct tsens_ops ops_8976 = {
};
/* Valid for both MSM8956 and MSM8976. Sensor ID 3 is unused. */
-const struct tsens_plat_data data_8976 = {
+struct tsens_plat_data data_8976 = {
.num_sensors = 11,
.ops = &ops_8976,
.hw_ids = (unsigned int[]){0, 1, 2, 4, 5, 6, 7, 8, 9, 10},
diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c
index a4d15e1abfdd..b293ed32174b 100644
--- a/drivers/thermal/qcom/tsens-v2.c
+++ b/drivers/thermal/qcom/tsens-v2.c
@@ -24,10 +24,11 @@
#define TM_Sn_CRITICAL_THRESHOLD_OFF 0x0060
#define TM_Sn_STATUS_OFF 0x00a0
#define TM_TRDY_OFF 0x00e4
+#define TM_WDOG_LOG_OFF 0x013c
/* v2.x: 8996, 8998, sdm845 */
-static const struct tsens_features tsens_v2_feat = {
+static struct tsens_features tsens_v2_feat = {
.ver_major = VER_2_X,
.crit_int = 1,
.adc = 0,
@@ -51,8 +52,9 @@ static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
[INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 2),
/* TEMPERATURE THRESHOLDS */
- REG_FIELD_FOR_EACH_SENSOR16(LOW_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 0, 11),
- REG_FIELD_FOR_EACH_SENSOR16(UP_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 12, 23),
+ REG_FIELD_FOR_EACH_SENSOR16(LOW_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 0, 11),
+ REG_FIELD_FOR_EACH_SENSOR16(UP_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 12, 23),
+ REG_FIELD_FOR_EACH_SENSOR16(CRIT_THRESH, TM_Sn_CRITICAL_THRESHOLD_OFF, 0, 11),
/* INTERRUPTS [CLEAR/STATUS/MASK] */
REG_FIELD_SPLIT_BITS_0_15(LOW_INT_STATUS, TM_UPPER_LOWER_INT_STATUS_OFF),
@@ -61,6 +63,18 @@ static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = {
REG_FIELD_SPLIT_BITS_16_31(UP_INT_STATUS, TM_UPPER_LOWER_INT_STATUS_OFF),
REG_FIELD_SPLIT_BITS_16_31(UP_INT_CLEAR, TM_UPPER_LOWER_INT_CLEAR_OFF),
REG_FIELD_SPLIT_BITS_16_31(UP_INT_MASK, TM_UPPER_LOWER_INT_MASK_OFF),
+ REG_FIELD_SPLIT_BITS_0_15(CRIT_INT_STATUS, TM_CRITICAL_INT_STATUS_OFF),
+ REG_FIELD_SPLIT_BITS_0_15(CRIT_INT_CLEAR, TM_CRITICAL_INT_CLEAR_OFF),
+ REG_FIELD_SPLIT_BITS_0_15(CRIT_INT_MASK, TM_CRITICAL_INT_MASK_OFF),
+
+ /* WATCHDOG on v2.3 or later */
+ [WDOG_BARK_STATUS] = REG_FIELD(TM_CRITICAL_INT_STATUS_OFF, 31, 31),
+ [WDOG_BARK_CLEAR] = REG_FIELD(TM_CRITICAL_INT_CLEAR_OFF, 31, 31),
+ [WDOG_BARK_MASK] = REG_FIELD(TM_CRITICAL_INT_MASK_OFF, 31, 31),
+ [CC_MON_STATUS] = REG_FIELD(TM_CRITICAL_INT_STATUS_OFF, 30, 30),
+ [CC_MON_CLEAR] = REG_FIELD(TM_CRITICAL_INT_CLEAR_OFF, 30, 30),
+ [CC_MON_MASK] = REG_FIELD(TM_CRITICAL_INT_MASK_OFF, 30, 30),
+ [WDOG_BARK_COUNT] = REG_FIELD(TM_WDOG_LOG_OFF, 0, 7),
/* Sn_STATUS */
REG_FIELD_FOR_EACH_SENSOR16(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 11),
@@ -81,14 +95,14 @@ static const struct tsens_ops ops_generic_v2 = {
.get_temp = get_temp_tsens_valid,
};
-const struct tsens_plat_data data_tsens_v2 = {
+struct tsens_plat_data data_tsens_v2 = {
.ops = &ops_generic_v2,
.feat = &tsens_v2_feat,
.fields = tsens_v2_regfields,
};
/* Kept around for backward compatibility with old msm8996.dtsi */
-const struct tsens_plat_data data_8996 = {
+struct tsens_plat_data data_8996 = {
.num_sensors = 13,
.ops = &ops_generic_v2,
.feat = &tsens_v2_feat,
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 0e7cf5236932..2f77d235cf73 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -85,11 +85,42 @@ static const struct thermal_zone_of_device_ops tsens_of_ops = {
.set_trips = tsens_set_trips,
};
+static int tsens_register_irq(struct tsens_priv *priv, char *irqname,
+ irq_handler_t thread_fn)
+{
+ struct platform_device *pdev;
+ int ret, irq;
+
+ pdev = of_find_device_by_node(priv->dev->of_node);
+ if (!pdev)
+ return -ENODEV;
+
+ irq = platform_get_irq_byname(pdev, irqname);
+ if (irq < 0) {
+ ret = irq;
+ /* For old DTs with no IRQ defined */
+ if (irq == -ENXIO)
+ ret = 0;
+ } else {
+ ret = devm_request_threaded_irq(&pdev->dev, irq,
+ NULL, thread_fn,
+ IRQF_ONESHOT,
+ dev_name(&pdev->dev), priv);
+ if (ret)
+ dev_err(&pdev->dev, "%s: failed to get irq\n",
+ __func__);
+ else
+ enable_irq_wake(irq);
+ }
+
+ put_device(&pdev->dev);
+ return ret;
+}
+
static int tsens_register(struct tsens_priv *priv)
{
- int i, ret, irq;
+ int i, ret;
struct thermal_zone_device *tzd;
- struct platform_device *pdev;
for (i = 0; i < priv->num_sensors; i++) {
priv->sensor[i].priv = priv;
@@ -103,32 +134,14 @@ static int tsens_register(struct tsens_priv *priv)
priv->ops->enable(priv, i);
}
- pdev = of_find_device_by_node(priv->dev->of_node);
- if (!pdev)
- return -ENODEV;
-
- irq = platform_get_irq_byname(pdev, "uplow");
- if (irq < 0) {
- ret = irq;
- /* For old DTs with no IRQ defined */
- if (irq == -ENXIO)
- ret = 0;
- goto err_put_device;
- }
-
- ret = devm_request_threaded_irq(&pdev->dev, irq,
- NULL, tsens_irq_thread,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- dev_name(&pdev->dev), priv);
- if (ret) {
- dev_err(&pdev->dev, "%s: failed to get irq\n", __func__);
- goto err_put_device;
- }
+ ret = tsens_register_irq(priv, "uplow", tsens_irq_thread);
+ if (ret < 0)
+ return ret;
- enable_irq_wake(irq);
+ if (priv->feat->crit_int)
+ ret = tsens_register_irq(priv, "critical",
+ tsens_critical_irq_thread);
-err_put_device:
- put_device(&pdev->dev);
return ret;
}
diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h
index e24a865fbc34..502acf0e6828 100644
--- a/drivers/thermal/qcom/tsens.h
+++ b/drivers/thermal/qcom/tsens.h
@@ -23,6 +23,7 @@
struct tsens_priv;
+/* IP version numbers in ascending order */
enum tsens_ver {
VER_0_1 = 0,
VER_1_X,
@@ -32,6 +33,7 @@ enum tsens_ver {
enum tsens_irq_type {
LOWER,
UPPER,
+ CRITICAL,
};
/**
@@ -67,7 +69,7 @@ struct tsens_ops {
/* mandatory callbacks */
int (*init)(struct tsens_priv *priv);
int (*calibrate)(struct tsens_priv *priv);
- int (*get_temp)(struct tsens_sensor *s, int *temp);
+ int (*get_temp)(const struct tsens_sensor *s, int *temp);
/* optional callbacks */
int (*enable)(struct tsens_priv *priv, int i);
void (*disable)(struct tsens_priv *priv);
@@ -374,6 +376,82 @@ enum regfield_ids {
CRITICAL_STATUS_13,
CRITICAL_STATUS_14,
CRITICAL_STATUS_15,
+ CRIT_INT_STATUS_0, /* CRITICAL interrupt status */
+ CRIT_INT_STATUS_1,
+ CRIT_INT_STATUS_2,
+ CRIT_INT_STATUS_3,
+ CRIT_INT_STATUS_4,
+ CRIT_INT_STATUS_5,
+ CRIT_INT_STATUS_6,
+ CRIT_INT_STATUS_7,
+ CRIT_INT_STATUS_8,
+ CRIT_INT_STATUS_9,
+ CRIT_INT_STATUS_10,
+ CRIT_INT_STATUS_11,
+ CRIT_INT_STATUS_12,
+ CRIT_INT_STATUS_13,
+ CRIT_INT_STATUS_14,
+ CRIT_INT_STATUS_15,
+ CRIT_INT_CLEAR_0, /* CRITICAL interrupt clear */
+ CRIT_INT_CLEAR_1,
+ CRIT_INT_CLEAR_2,
+ CRIT_INT_CLEAR_3,
+ CRIT_INT_CLEAR_4,
+ CRIT_INT_CLEAR_5,
+ CRIT_INT_CLEAR_6,
+ CRIT_INT_CLEAR_7,
+ CRIT_INT_CLEAR_8,
+ CRIT_INT_CLEAR_9,
+ CRIT_INT_CLEAR_10,
+ CRIT_INT_CLEAR_11,
+ CRIT_INT_CLEAR_12,
+ CRIT_INT_CLEAR_13,
+ CRIT_INT_CLEAR_14,
+ CRIT_INT_CLEAR_15,
+ CRIT_INT_MASK_0, /* CRITICAL interrupt mask */
+ CRIT_INT_MASK_1,
+ CRIT_INT_MASK_2,
+ CRIT_INT_MASK_3,
+ CRIT_INT_MASK_4,
+ CRIT_INT_MASK_5,
+ CRIT_INT_MASK_6,
+ CRIT_INT_MASK_7,
+ CRIT_INT_MASK_8,
+ CRIT_INT_MASK_9,
+ CRIT_INT_MASK_10,
+ CRIT_INT_MASK_11,
+ CRIT_INT_MASK_12,
+ CRIT_INT_MASK_13,
+ CRIT_INT_MASK_14,
+ CRIT_INT_MASK_15,
+ CRIT_THRESH_0, /* CRITICAL threshold values */
+ CRIT_THRESH_1,
+ CRIT_THRESH_2,
+ CRIT_THRESH_3,
+ CRIT_THRESH_4,
+ CRIT_THRESH_5,
+ CRIT_THRESH_6,
+ CRIT_THRESH_7,
+ CRIT_THRESH_8,
+ CRIT_THRESH_9,
+ CRIT_THRESH_10,
+ CRIT_THRESH_11,
+ CRIT_THRESH_12,
+ CRIT_THRESH_13,
+ CRIT_THRESH_14,
+ CRIT_THRESH_15,
+
+ /* WATCHDOG */
+ WDOG_BARK_STATUS,
+ WDOG_BARK_CLEAR,
+ WDOG_BARK_MASK,
+ WDOG_BARK_COUNT,
+
+ /* CYCLE COMPLETION MONITOR */
+ CC_MON_STATUS,
+ CC_MON_CLEAR,
+ CC_MON_MASK,
+
MIN_STATUS_0, /* MIN threshold violated */
MIN_STATUS_1,
MIN_STATUS_2,
@@ -418,6 +496,7 @@ enum regfield_ids {
* @adc: do the sensors only output adc code (instead of temperature)?
* @srot_split: does the IP neatly splits the register space into SROT and TM,
* with SROT only being available to secure boot firmware?
+ * @has_watchdog: does this IP support watchdog functionality?
* @max_sensors: maximum sensors supported by this version of the IP
*/
struct tsens_features {
@@ -425,6 +504,7 @@ struct tsens_features {
unsigned int crit_int:1;
unsigned int adc:1;
unsigned int srot_split:1;
+ unsigned int has_watchdog:1;
unsigned int max_sensors;
};
@@ -440,12 +520,14 @@ struct tsens_plat_data {
const u32 num_sensors;
const struct tsens_ops *ops;
unsigned int *hw_ids;
- const struct tsens_features *feat;
+ struct tsens_features *feat;
const struct reg_field *fields;
};
/**
* struct tsens_context - Registers to be saved/restored across a context loss
+ * @threshold: Threshold register value
+ * @control: Control register value
*/
struct tsens_context {
int threshold;
@@ -460,6 +542,8 @@ struct tsens_context {
* @srot_map: pointer to SROT register address space
* @tm_offset: deal with old device trees that don't address TM and SROT
* address space separately
+ * @ul_lock: lock while processing upper/lower threshold interrupts
+ * @crit_lock: lock while processing critical threshold interrupts
* @rf: array of regmap_fields used to store value of the field
* @ctx: registers to be saved and restored during suspend/resume
* @feat: features of the IP
@@ -481,36 +565,37 @@ struct tsens_priv {
struct regmap_field *rf[MAX_REGFIELDS];
struct tsens_context ctx;
- const struct tsens_features *feat;
+ struct tsens_features *feat;
const struct reg_field *fields;
const struct tsens_ops *ops;
struct dentry *debug_root;
struct dentry *debug;
- struct tsens_sensor sensor[0];
+ struct tsens_sensor sensor[];
};
char *qfprom_read(struct device *dev, const char *cname);
void compute_intercept_slope(struct tsens_priv *priv, u32 *pt1, u32 *pt2, u32 mode);
int init_common(struct tsens_priv *priv);
-int get_temp_tsens_valid(struct tsens_sensor *s, int *temp);
-int get_temp_common(struct tsens_sensor *s, int *temp);
+int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp);
+int get_temp_common(const struct tsens_sensor *s, int *temp);
int tsens_enable_irq(struct tsens_priv *priv);
void tsens_disable_irq(struct tsens_priv *priv);
int tsens_set_trips(void *_sensor, int low, int high);
irqreturn_t tsens_irq_thread(int irq, void *data);
+irqreturn_t tsens_critical_irq_thread(int irq, void *data);
/* TSENS target */
-extern const struct tsens_plat_data data_8960;
+extern struct tsens_plat_data data_8960;
/* TSENS v0.1 targets */
-extern const struct tsens_plat_data data_8916, data_8974;
+extern struct tsens_plat_data data_8916, data_8974;
/* TSENS v1 targets */
-extern const struct tsens_plat_data data_tsens_v1, data_8976;
+extern struct tsens_plat_data data_tsens_v1, data_8976;
/* TSENS v2 targets */
-extern const struct tsens_plat_data data_8996, data_tsens_v2;
+extern struct tsens_plat_data data_8996, data_tsens_v2;
#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c
index 874bc46e6c73..028a6bbf75dc 100644
--- a/drivers/thermal/qoriq_thermal.c
+++ b/drivers/thermal/qoriq_thermal.c
@@ -3,12 +3,11 @@
// Copyright 2016 Freescale Semiconductor, Inc.
#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_address.h>
+#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
#include <linux/thermal.h>
@@ -228,6 +227,14 @@ static const struct regmap_access_table qoriq_rd_table = {
.n_yes_ranges = ARRAY_SIZE(qoriq_yes_ranges),
};
+static void qoriq_tmu_action(void *p)
+{
+ struct qoriq_tmu_data *data = p;
+
+ regmap_write(data->regmap, REGS_TMR, TMR_DISABLE);
+ clk_disable_unprepare(data->clk);
+}
+
static int qoriq_tmu_probe(struct platform_device *pdev)
{
int ret;
@@ -278,6 +285,10 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
return ret;
}
+ ret = devm_add_action_or_reset(dev, qoriq_tmu_action, data);
+ if (ret)
+ return ret;
+
/* version register offset at: 0xbf8 on both v1 and v2 */
ret = regmap_read(data->regmap, REGS_IPBRR(0), &ver);
if (ret) {
@@ -290,35 +301,17 @@ static int qoriq_tmu_probe(struct platform_device *pdev)
ret = qoriq_tmu_calibration(dev, data); /* TMU calibration */
if (ret < 0)
- goto err;
+ return ret;
ret = qoriq_tmu_register_tmu_zone(dev, data);
if (ret < 0) {
dev_err(dev, "Failed to register sensors\n");
- ret = -ENODEV;
- goto err;
+ return ret;
}
platform_set_drvdata(pdev, data);
return 0;
-
-err:
- clk_disable_unprepare(data->clk);
-
- return ret;
-}
-
-static int qoriq_tmu_remove(struct platform_device *pdev)
-{
- struct qoriq_tmu_data *data = platform_get_drvdata(pdev);
-
- /* Disable monitoring */
- regmap_write(data->regmap, REGS_TMR, TMR_DISABLE);
-
- clk_disable_unprepare(data->clk);
-
- return 0;
}
static int __maybe_unused qoriq_tmu_suspend(struct device *dev)
@@ -365,7 +358,6 @@ static struct platform_driver qoriq_tmu = {
.of_match_table = qoriq_tmu_match,
},
.probe = qoriq_tmu_probe,
- .remove = qoriq_tmu_remove,
};
module_platform_driver(qoriq_tmu);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 72877bdc072d..58fe7c1ef00b 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -81,8 +81,6 @@ struct rcar_gen3_thermal_tsc {
void __iomem *base;
struct thermal_zone_device *zone;
struct equation_coefs coef;
- int low;
- int high;
int tj_t;
int id; /* thermal channel id */
};
@@ -204,12 +202,14 @@ static int rcar_gen3_thermal_mcelsius_to_temp(struct rcar_gen3_thermal_tsc *tsc,
return INT_FIXPT(val);
}
-static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high)
+static int rcar_gen3_thermal_update_range(struct rcar_gen3_thermal_tsc *tsc)
{
- struct rcar_gen3_thermal_tsc *tsc = devdata;
+ int temperature, low, high;
+
+ rcar_gen3_thermal_get_temp(tsc, &temperature);
- low = clamp_val(low, -40000, 120000);
- high = clamp_val(high, -40000, 120000);
+ low = temperature - MCELSIUS(1);
+ high = temperature + MCELSIUS(1);
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP1,
rcar_gen3_thermal_mcelsius_to_temp(tsc, low));
@@ -217,15 +217,11 @@ static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high)
rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP2,
rcar_gen3_thermal_mcelsius_to_temp(tsc, high));
- tsc->low = low;
- tsc->high = high;
-
return 0;
}
static const struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = {
.get_temp = rcar_gen3_thermal_get_temp,
- .set_trips = rcar_gen3_thermal_set_trips,
};
static void rcar_thermal_irq_set(struct rcar_gen3_thermal_priv *priv, bool on)
@@ -246,9 +242,11 @@ static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data)
for (i = 0; i < priv->num_tscs; i++) {
status = rcar_gen3_thermal_read(priv->tscs[i], REG_GEN3_IRQSTR);
rcar_gen3_thermal_write(priv->tscs[i], REG_GEN3_IRQSTR, 0);
- if (status)
+ if (status) {
+ rcar_gen3_thermal_update_range(priv->tscs[i]);
thermal_zone_device_update(priv->tscs[i]->zone,
THERMAL_EVENT_UNSPECIFIED);
+ }
}
return IRQ_HANDLED;
@@ -325,6 +323,10 @@ static const struct of_device_id rcar_gen3_thermal_dt_ids[] = {
.data = &rcar_gen3_ths_tj_1_m3_w,
},
{
+ .compatible = "renesas,r8a77961-thermal",
+ .data = &rcar_gen3_ths_tj_1_m3_w,
+ },
+ {
.compatible = "renesas,r8a77965-thermal",
.data = &rcar_gen3_ths_tj_1,
},
@@ -446,14 +448,15 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
goto error_unregister;
ret = devm_add_action_or_reset(dev, rcar_gen3_hwmon_action, zone);
- if (ret) {
+ if (ret)
goto error_unregister;
- }
ret = of_thermal_get_ntrips(tsc->zone);
if (ret < 0)
goto error_unregister;
+ rcar_gen3_thermal_update_range(tsc);
+
dev_info(dev, "TSC%d: Loaded %d trip points\n", i, ret);
}
@@ -492,7 +495,7 @@ static int __maybe_unused rcar_gen3_thermal_resume(struct device *dev)
struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
priv->thermal_init(tsc);
- rcar_gen3_thermal_set_trips(tsc, tsc->low, tsc->high);
+ rcar_gen3_thermal_update_range(tsc);
}
rcar_thermal_irq_set(priv, true);
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 8f1aafa2044e..e0c1f2409035 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -95,7 +95,6 @@ struct rcar_thermal_priv {
struct mutex lock;
struct list_head list;
int id;
- u32 ctemp;
};
#define rcar_thermal_for_each_priv(pos, common) \
@@ -201,7 +200,6 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
struct device *dev = rcar_priv_to_dev(priv);
int i;
u32 ctemp, old, new;
- int ret = -EINVAL;
mutex_lock(&priv->lock);
@@ -247,37 +245,29 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
((ctemp - 1) << 0)));
}
- dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp);
-
- priv->ctemp = ctemp;
- ret = 0;
err_out_unlock:
mutex_unlock(&priv->lock);
- return ret;
+
+ return ctemp ? ctemp : -EINVAL;
}
static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
int *temp)
{
- int tmp;
- int ret;
-
- ret = rcar_thermal_update_temp(priv);
- if (ret < 0)
- return ret;
+ int ctemp;
- mutex_lock(&priv->lock);
- if (priv->chip->ctemp_bands == 1)
- tmp = MCELSIUS((priv->ctemp * 5) - 65);
- else if (priv->ctemp < 24)
- tmp = MCELSIUS(((priv->ctemp * 55) - 720) / 10);
- else
- tmp = MCELSIUS((priv->ctemp * 5) - 60);
- mutex_unlock(&priv->lock);
+ ctemp = rcar_thermal_update_temp(priv);
+ if (ctemp < 0)
+ return ctemp;
/* Guaranteed operating range is -45C to 125C. */
- *temp = tmp;
+ if (priv->chip->ctemp_bands == 1)
+ *temp = MCELSIUS((ctemp * 5) - 65);
+ else if (ctemp < 24)
+ *temp = MCELSIUS(((ctemp * 55) - 720) / 10);
+ else
+ *temp = MCELSIUS((ctemp * 5) - 60);
return 0;
}
@@ -387,28 +377,17 @@ static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable)
static void rcar_thermal_work(struct work_struct *work)
{
struct rcar_thermal_priv *priv;
- int cctemp, nctemp;
int ret;
priv = container_of(work, struct rcar_thermal_priv, work.work);
- ret = rcar_thermal_get_current_temp(priv, &cctemp);
- if (ret < 0)
- return;
-
ret = rcar_thermal_update_temp(priv);
if (ret < 0)
return;
rcar_thermal_irq_enable(priv);
- ret = rcar_thermal_get_current_temp(priv, &nctemp);
- if (ret < 0)
- return;
-
- if (nctemp != cctemp)
- thermal_zone_device_update(priv->zone,
- THERMAL_EVENT_UNSPECIFIED);
+ thermal_zone_device_update(priv->zone, THERMAL_EVENT_UNSPECIFIED);
}
static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status)
@@ -521,8 +500,10 @@ static int rcar_thermal_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM,
mres++);
common->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(common->base))
- return PTR_ERR(common->base);
+ if (IS_ERR(common->base)) {
+ ret = PTR_ERR(common->base);
+ goto error_unregister;
+ }
idle = 0; /* polling delay is not needed */
}
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index fd4a17812f33..e9a90bc23b11 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1094,7 +1094,9 @@ static int exynos_tmu_probe(struct platform_device *pdev)
&exynos_sensor_ops);
if (IS_ERR(data->tzd)) {
ret = PTR_ERR(data->tzd);
- dev_err(&pdev->dev, "Failed to register sensor: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to register sensor: %d\n",
+ ret);
goto err_sclk;
}
diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c
new file mode 100644
index 000000000000..a340374e8c51
--- /dev/null
+++ b/drivers/thermal/sprd_thermal.c
@@ -0,0 +1,552 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Spreadtrum Communications Inc.
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/thermal.h>
+
+#define SPRD_THM_CTL 0x0
+#define SPRD_THM_INT_EN 0x4
+#define SPRD_THM_INT_STS 0x8
+#define SPRD_THM_INT_RAW_STS 0xc
+#define SPRD_THM_DET_PERIOD 0x10
+#define SPRD_THM_INT_CLR 0x14
+#define SPRD_THM_INT_CLR_ST 0x18
+#define SPRD_THM_MON_PERIOD 0x4c
+#define SPRD_THM_MON_CTL 0x50
+#define SPRD_THM_INTERNAL_STS1 0x54
+#define SPRD_THM_RAW_READ_MSK 0x3ff
+
+#define SPRD_THM_OFFSET(id) ((id) * 0x4)
+#define SPRD_THM_TEMP(id) (SPRD_THM_OFFSET(id) + 0x5c)
+#define SPRD_THM_THRES(id) (SPRD_THM_OFFSET(id) + 0x2c)
+
+#define SPRD_THM_SEN(id) BIT((id) + 2)
+#define SPRD_THM_SEN_OVERHEAT_EN(id) BIT((id) + 8)
+#define SPRD_THM_SEN_OVERHEAT_ALARM_EN(id) BIT((id) + 0)
+
+/* bits definitions for register THM_CTL */
+#define SPRD_THM_SET_RDY_ST BIT(13)
+#define SPRD_THM_SET_RDY BIT(12)
+#define SPRD_THM_MON_EN BIT(1)
+#define SPRD_THM_EN BIT(0)
+
+/* bits definitions for register THM_INT_CTL */
+#define SPRD_THM_BIT_INT_EN BIT(26)
+#define SPRD_THM_OVERHEAT_EN BIT(25)
+#define SPRD_THM_OTP_TRIP_SHIFT 10
+
+/* bits definitions for register SPRD_THM_INTERNAL_STS1 */
+#define SPRD_THM_TEMPER_RDY BIT(0)
+
+#define SPRD_THM_DET_PERIOD_DATA 0x800
+#define SPRD_THM_DET_PERIOD_MASK GENMASK(19, 0)
+#define SPRD_THM_MON_MODE 0x7
+#define SPRD_THM_MON_MODE_MASK GENMASK(3, 0)
+#define SPRD_THM_MON_PERIOD_DATA 0x10
+#define SPRD_THM_MON_PERIOD_MASK GENMASK(15, 0)
+#define SPRD_THM_THRES_MASK GENMASK(19, 0)
+#define SPRD_THM_INT_CLR_MASK GENMASK(24, 0)
+
+/* thermal sensor calibration parameters */
+#define SPRD_THM_TEMP_LOW -40000
+#define SPRD_THM_TEMP_HIGH 120000
+#define SPRD_THM_OTP_TEMP 120000
+#define SPRD_THM_HOT_TEMP 75000
+#define SPRD_THM_RAW_DATA_LOW 0
+#define SPRD_THM_RAW_DATA_HIGH 1000
+#define SPRD_THM_SEN_NUM 8
+#define SPRD_THM_DT_OFFSET 24
+#define SPRD_THM_RATION_OFFSET 17
+#define SPRD_THM_RATION_SIGN 16
+
+#define SPRD_THM_RDYST_POLLING_TIME 10
+#define SPRD_THM_RDYST_TIMEOUT 700
+#define SPRD_THM_TEMP_READY_POLL_TIME 10000
+#define SPRD_THM_TEMP_READY_TIMEOUT 600000
+#define SPRD_THM_MAX_SENSOR 8
+
+struct sprd_thermal_sensor {
+ struct thermal_zone_device *tzd;
+ struct sprd_thermal_data *data;
+ struct device *dev;
+ int cal_slope;
+ int cal_offset;
+ int id;
+};
+
+struct sprd_thermal_data {
+ const struct sprd_thm_variant_data *var_data;
+ struct sprd_thermal_sensor *sensor[SPRD_THM_MAX_SENSOR];
+ struct clk *clk;
+ void __iomem *base;
+ u32 ratio_off;
+ int ratio_sign;
+ int nr_sensors;
+};
+
+/*
+ * The conversion between ADC and temperature is based on linear relationship,
+ * and use idea_k to specify the slope and ideal_b to specify the offset.
+ *
+ * Since different Spreadtrum SoCs have different ideal_k and ideal_b,
+ * we should save ideal_k and ideal_b in the device data structure.
+ */
+struct sprd_thm_variant_data {
+ u32 ideal_k;
+ u32 ideal_b;
+};
+
+static const struct sprd_thm_variant_data ums512_data = {
+ .ideal_k = 262,
+ .ideal_b = 66400,
+};
+
+static inline void sprd_thm_update_bits(void __iomem *reg, u32 mask, u32 val)
+{
+ u32 tmp, orig;
+
+ orig = readl(reg);
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ writel(tmp, reg);
+}
+
+static int sprd_thm_cal_read(struct device_node *np, const char *cell_id,
+ u32 *val)
+{
+ struct nvmem_cell *cell;
+ void *buf;
+ size_t len;
+
+ cell = of_nvmem_cell_get(np, cell_id);
+ if (IS_ERR(cell))
+ return PTR_ERR(cell);
+
+ buf = nvmem_cell_read(cell, &len);
+ nvmem_cell_put(cell);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ if (len > sizeof(u32)) {
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ memcpy(val, buf, len);
+
+ kfree(buf);
+ return 0;
+}
+
+static int sprd_thm_sensor_calibration(struct device_node *np,
+ struct sprd_thermal_data *thm,
+ struct sprd_thermal_sensor *sen)
+{
+ int ret;
+ /*
+ * According to thermal datasheet, the default calibration offset is 64,
+ * and the default ratio is 1000.
+ */
+ int dt_offset = 64, ratio = 1000;
+
+ ret = sprd_thm_cal_read(np, "sen_delta_cal", &dt_offset);
+ if (ret)
+ return ret;
+
+ ratio += thm->ratio_sign * thm->ratio_off;
+
+ /*
+ * According to the ideal slope K and ideal offset B, combined with
+ * calibration value of thermal from efuse, then calibrate the real
+ * slope k and offset b:
+ * k_cal = (k * ratio) / 1000.
+ * b_cal = b + (dt_offset - 64) * 500.
+ */
+ sen->cal_slope = (thm->var_data->ideal_k * ratio) / 1000;
+ sen->cal_offset = thm->var_data->ideal_b + (dt_offset - 128) * 250;
+
+ return 0;
+}
+
+static int sprd_thm_rawdata_to_temp(struct sprd_thermal_sensor *sen,
+ u32 rawdata)
+{
+ clamp(rawdata, (u32)SPRD_THM_RAW_DATA_LOW, (u32)SPRD_THM_RAW_DATA_HIGH);
+
+ /*
+ * According to the thermal datasheet, the formula of converting
+ * adc value to the temperature value should be:
+ * T_final = k_cal * x - b_cal.
+ */
+ return sen->cal_slope * rawdata - sen->cal_offset;
+}
+
+static int sprd_thm_temp_to_rawdata(int temp, struct sprd_thermal_sensor *sen)
+{
+ u32 val;
+
+ clamp(temp, (int)SPRD_THM_TEMP_LOW, (int)SPRD_THM_TEMP_HIGH);
+
+ /*
+ * According to the thermal datasheet, the formula of converting
+ * adc value to the temperature value should be:
+ * T_final = k_cal * x - b_cal.
+ */
+ val = (temp + sen->cal_offset) / sen->cal_slope;
+
+ return clamp(val, val, (u32)(SPRD_THM_RAW_DATA_HIGH - 1));
+}
+
+static int sprd_thm_read_temp(void *devdata, int *temp)
+{
+ struct sprd_thermal_sensor *sen = devdata;
+ u32 data;
+
+ data = readl(sen->data->base + SPRD_THM_TEMP(sen->id)) &
+ SPRD_THM_RAW_READ_MSK;
+
+ *temp = sprd_thm_rawdata_to_temp(sen, data);
+
+ return 0;
+}
+
+static const struct thermal_zone_of_device_ops sprd_thm_ops = {
+ .get_temp = sprd_thm_read_temp,
+};
+
+static int sprd_thm_poll_ready_status(struct sprd_thermal_data *thm)
+{
+ u32 val;
+ int ret;
+
+ /*
+ * Wait for thermal ready status before configuring thermal parameters.
+ */
+ ret = readl_poll_timeout(thm->base + SPRD_THM_CTL, val,
+ !(val & SPRD_THM_SET_RDY_ST),
+ SPRD_THM_RDYST_POLLING_TIME,
+ SPRD_THM_RDYST_TIMEOUT);
+ if (ret)
+ return ret;
+
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_MON_EN,
+ SPRD_THM_MON_EN);
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_SET_RDY,
+ SPRD_THM_SET_RDY);
+ return 0;
+}
+
+static int sprd_thm_wait_temp_ready(struct sprd_thermal_data *thm)
+{
+ u32 val;
+
+ /* Wait for first temperature data ready before reading temperature */
+ return readl_poll_timeout(thm->base + SPRD_THM_INTERNAL_STS1, val,
+ !(val & SPRD_THM_TEMPER_RDY),
+ SPRD_THM_TEMP_READY_POLL_TIME,
+ SPRD_THM_TEMP_READY_TIMEOUT);
+}
+
+static int sprd_thm_set_ready(struct sprd_thermal_data *thm)
+{
+ int ret;
+
+ ret = sprd_thm_poll_ready_status(thm);
+ if (ret)
+ return ret;
+
+ /*
+ * Clear interrupt status, enable thermal interrupt and enable thermal.
+ *
+ * The SPRD thermal controller integrates a hardware interrupt signal,
+ * which means if the temperature is overheat, it will generate an
+ * interrupt and notify the event to PMIC automatically to shutdown the
+ * system. So here we should enable the interrupt bits, though we have
+ * not registered an irq handler.
+ */
+ writel(SPRD_THM_INT_CLR_MASK, thm->base + SPRD_THM_INT_CLR);
+ sprd_thm_update_bits(thm->base + SPRD_THM_INT_EN,
+ SPRD_THM_BIT_INT_EN, SPRD_THM_BIT_INT_EN);
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL,
+ SPRD_THM_EN, SPRD_THM_EN);
+ return 0;
+}
+
+static void sprd_thm_sensor_init(struct sprd_thermal_data *thm,
+ struct sprd_thermal_sensor *sen)
+{
+ u32 otp_rawdata, hot_rawdata;
+
+ otp_rawdata = sprd_thm_temp_to_rawdata(SPRD_THM_OTP_TEMP, sen);
+ hot_rawdata = sprd_thm_temp_to_rawdata(SPRD_THM_HOT_TEMP, sen);
+
+ /* Enable the sensor' overheat temperature protection interrupt */
+ sprd_thm_update_bits(thm->base + SPRD_THM_INT_EN,
+ SPRD_THM_SEN_OVERHEAT_ALARM_EN(sen->id),
+ SPRD_THM_SEN_OVERHEAT_ALARM_EN(sen->id));
+
+ /* Set the sensor' overheat and hot threshold temperature */
+ sprd_thm_update_bits(thm->base + SPRD_THM_THRES(sen->id),
+ SPRD_THM_THRES_MASK,
+ (otp_rawdata << SPRD_THM_OTP_TRIP_SHIFT) |
+ hot_rawdata);
+
+ /* Enable the corresponding sensor */
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_SEN(sen->id),
+ SPRD_THM_SEN(sen->id));
+}
+
+static void sprd_thm_para_config(struct sprd_thermal_data *thm)
+{
+ /* Set the period of two valid temperature detection action */
+ sprd_thm_update_bits(thm->base + SPRD_THM_DET_PERIOD,
+ SPRD_THM_DET_PERIOD_MASK, SPRD_THM_DET_PERIOD);
+
+ /* Set the sensors' monitor mode */
+ sprd_thm_update_bits(thm->base + SPRD_THM_MON_CTL,
+ SPRD_THM_MON_MODE_MASK, SPRD_THM_MON_MODE);
+
+ /* Set the sensors' monitor period */
+ sprd_thm_update_bits(thm->base + SPRD_THM_MON_PERIOD,
+ SPRD_THM_MON_PERIOD_MASK, SPRD_THM_MON_PERIOD);
+}
+
+static void sprd_thm_toggle_sensor(struct sprd_thermal_sensor *sen, bool on)
+{
+ struct thermal_zone_device *tzd = sen->tzd;
+
+ tzd->ops->set_mode(tzd,
+ on ? THERMAL_DEVICE_ENABLED : THERMAL_DEVICE_DISABLED);
+}
+
+static int sprd_thm_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *sen_child;
+ struct sprd_thermal_data *thm;
+ struct sprd_thermal_sensor *sen;
+ const struct sprd_thm_variant_data *pdata;
+ int ret, i;
+ u32 val;
+
+ pdata = of_device_get_match_data(&pdev->dev);
+ if (!pdata) {
+ dev_err(&pdev->dev, "No matching driver data found\n");
+ return -EINVAL;
+ }
+
+ thm = devm_kzalloc(&pdev->dev, sizeof(*thm), GFP_KERNEL);
+ if (!thm)
+ return -ENOMEM;
+
+ thm->var_data = pdata;
+ thm->base = devm_platform_ioremap_resource(pdev, 0);
+ if (!thm->base)
+ return -ENOMEM;
+
+ thm->nr_sensors = of_get_child_count(np);
+ if (thm->nr_sensors == 0 || thm->nr_sensors > SPRD_THM_MAX_SENSOR) {
+ dev_err(&pdev->dev, "incorrect sensor count\n");
+ return -EINVAL;
+ }
+
+ thm->clk = devm_clk_get(&pdev->dev, "enable");
+ if (IS_ERR(thm->clk)) {
+ dev_err(&pdev->dev, "failed to get enable clock\n");
+ return PTR_ERR(thm->clk);
+ }
+
+ ret = clk_prepare_enable(thm->clk);
+ if (ret)
+ return ret;
+
+ sprd_thm_para_config(thm);
+
+ ret = sprd_thm_cal_read(np, "thm_sign_cal", &val);
+ if (ret)
+ goto disable_clk;
+
+ if (val > 0)
+ thm->ratio_sign = -1;
+ else
+ thm->ratio_sign = 1;
+
+ ret = sprd_thm_cal_read(np, "thm_ratio_cal", &thm->ratio_off);
+ if (ret)
+ goto disable_clk;
+
+ for_each_child_of_node(np, sen_child) {
+ sen = devm_kzalloc(&pdev->dev, sizeof(*sen), GFP_KERNEL);
+ if (!sen) {
+ ret = -ENOMEM;
+ goto disable_clk;
+ }
+
+ sen->data = thm;
+ sen->dev = &pdev->dev;
+
+ ret = of_property_read_u32(sen_child, "reg", &sen->id);
+ if (ret) {
+ dev_err(&pdev->dev, "get sensor reg failed");
+ goto disable_clk;
+ }
+
+ ret = sprd_thm_sensor_calibration(sen_child, thm, sen);
+ if (ret) {
+ dev_err(&pdev->dev, "efuse cal analysis failed");
+ goto disable_clk;
+ }
+
+ sprd_thm_sensor_init(thm, sen);
+
+ sen->tzd = devm_thermal_zone_of_sensor_register(sen->dev,
+ sen->id,
+ sen,
+ &sprd_thm_ops);
+ if (IS_ERR(sen->tzd)) {
+ dev_err(&pdev->dev, "register thermal zone failed %d\n",
+ sen->id);
+ ret = PTR_ERR(sen->tzd);
+ goto disable_clk;
+ }
+
+ thm->sensor[sen->id] = sen;
+ }
+
+ ret = sprd_thm_set_ready(thm);
+ if (ret)
+ goto disable_clk;
+
+ ret = sprd_thm_wait_temp_ready(thm);
+ if (ret)
+ goto disable_clk;
+
+ for (i = 0; i < thm->nr_sensors; i++)
+ sprd_thm_toggle_sensor(thm->sensor[i], true);
+
+ platform_set_drvdata(pdev, thm);
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(thm->clk);
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void sprd_thm_hw_suspend(struct sprd_thermal_data *thm)
+{
+ int i;
+
+ for (i = 0; i < thm->nr_sensors; i++) {
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL,
+ SPRD_THM_SEN(thm->sensor[i]->id), 0);
+ }
+
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL,
+ SPRD_THM_EN, 0x0);
+}
+
+static int sprd_thm_suspend(struct device *dev)
+{
+ struct sprd_thermal_data *thm = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < thm->nr_sensors; i++)
+ sprd_thm_toggle_sensor(thm->sensor[i], false);
+
+ sprd_thm_hw_suspend(thm);
+ clk_disable_unprepare(thm->clk);
+
+ return 0;
+}
+
+static int sprd_thm_hw_resume(struct sprd_thermal_data *thm)
+{
+ int ret, i;
+
+ for (i = 0; i < thm->nr_sensors; i++) {
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL,
+ SPRD_THM_SEN(thm->sensor[i]->id),
+ SPRD_THM_SEN(thm->sensor[i]->id));
+ }
+
+ ret = sprd_thm_poll_ready_status(thm);
+ if (ret)
+ return ret;
+
+ writel(SPRD_THM_INT_CLR_MASK, thm->base + SPRD_THM_INT_CLR);
+ sprd_thm_update_bits(thm->base + SPRD_THM_CTL,
+ SPRD_THM_EN, SPRD_THM_EN);
+ return sprd_thm_wait_temp_ready(thm);
+}
+
+static int sprd_thm_resume(struct device *dev)
+{
+ struct sprd_thermal_data *thm = dev_get_drvdata(dev);
+ int ret, i;
+
+ ret = clk_prepare_enable(thm->clk);
+ if (ret)
+ return ret;
+
+ ret = sprd_thm_hw_resume(thm);
+ if (ret)
+ goto disable_clk;
+
+ for (i = 0; i < thm->nr_sensors; i++)
+ sprd_thm_toggle_sensor(thm->sensor[i], true);
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(thm->clk);
+ return ret;
+}
+#endif
+
+static int sprd_thm_remove(struct platform_device *pdev)
+{
+ struct sprd_thermal_data *thm = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < thm->nr_sensors; i++) {
+ sprd_thm_toggle_sensor(thm->sensor[i], false);
+ devm_thermal_zone_of_sensor_unregister(&pdev->dev,
+ thm->sensor[i]->tzd);
+ }
+
+ clk_disable_unprepare(thm->clk);
+ return 0;
+}
+
+static const struct of_device_id sprd_thermal_of_match[] = {
+ { .compatible = "sprd,ums512-thermal", .data = &ums512_data },
+ { },
+};
+
+static const struct dev_pm_ops sprd_thermal_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(sprd_thm_suspend, sprd_thm_resume)
+};
+
+static struct platform_driver sprd_thermal_driver = {
+ .probe = sprd_thm_probe,
+ .remove = sprd_thm_remove,
+ .driver = {
+ .name = "sprd-thermal",
+ .pm = &sprd_thermal_pm_ops,
+ .of_match_table = sprd_thermal_of_match,
+ },
+};
+
+module_platform_driver(sprd_thermal_driver);
+
+MODULE_AUTHOR("Freeman Liu <freeman.liu@unisoc.com>");
+MODULE_DESCRIPTION("Spreadtrum thermal driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c
index ad9e3bf8fdf6..9314e3df6a42 100644
--- a/drivers/thermal/st/stm_thermal.c
+++ b/drivers/thermal/st/stm_thermal.c
@@ -478,7 +478,8 @@ static int stm_thermal_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, stm_thermal_suspend, stm_thermal_resume);
+static SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops,
+ stm_thermal_suspend, stm_thermal_resume);
static const struct thermal_zone_of_device_ops stm_tz_ops = {
.get_temp = stm_thermal_get_temp,
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index 2fa78f738568..263b0420fbe4 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/clk.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/types.h>
@@ -24,7 +24,6 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <linux/of_irq.h>
-#include <linux/of_gpio.h>
#include <linux/io.h>
#include "ti-bandgap.h"
@@ -743,27 +742,13 @@ exit:
static int ti_bandgap_tshut_init(struct ti_bandgap *bgp,
struct platform_device *pdev)
{
- int gpio_nr = bgp->tshut_gpio;
int status;
- /* Request for gpio_86 line */
- status = gpio_request(gpio_nr, "tshut");
- if (status < 0) {
- dev_err(bgp->dev, "Could not request for TSHUT GPIO:%i\n", 86);
- return status;
- }
- status = gpio_direction_input(gpio_nr);
- if (status) {
- dev_err(bgp->dev, "Cannot set input TSHUT GPIO %d\n", gpio_nr);
- return status;
- }
-
- status = request_irq(gpio_to_irq(gpio_nr), ti_bandgap_tshut_irq_handler,
+ status = request_irq(gpiod_to_irq(bgp->tshut_gpiod),
+ ti_bandgap_tshut_irq_handler,
IRQF_TRIGGER_RISING, "tshut", NULL);
- if (status) {
- gpio_free(gpio_nr);
+ if (status)
dev_err(bgp->dev, "request irq failed for TSHUT");
- }
return 0;
}
@@ -860,11 +845,10 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
} while (res);
if (TI_BANDGAP_HAS(bgp, TSHUT)) {
- bgp->tshut_gpio = of_get_gpio(node, 0);
- if (!gpio_is_valid(bgp->tshut_gpio)) {
- dev_err(&pdev->dev, "invalid gpio for tshut (%d)\n",
- bgp->tshut_gpio);
- return ERR_PTR(-EINVAL);
+ bgp->tshut_gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN);
+ if (IS_ERR(bgp->tshut_gpiod)) {
+ dev_err(&pdev->dev, "invalid gpio for tshut\n");
+ return ERR_CAST(bgp->tshut_gpiod);
}
}
@@ -1046,10 +1030,8 @@ put_clks:
put_fclock:
clk_put(bgp->fclock);
free_irqs:
- if (TI_BANDGAP_HAS(bgp, TSHUT)) {
- free_irq(gpio_to_irq(bgp->tshut_gpio), NULL);
- gpio_free(bgp->tshut_gpio);
- }
+ if (TI_BANDGAP_HAS(bgp, TSHUT))
+ free_irq(gpiod_to_irq(bgp->tshut_gpiod), NULL);
return ret;
}
@@ -1079,10 +1061,8 @@ int ti_bandgap_remove(struct platform_device *pdev)
if (TI_BANDGAP_HAS(bgp, TALERT))
free_irq(bgp->irq, bgp);
- if (TI_BANDGAP_HAS(bgp, TSHUT)) {
- free_irq(gpio_to_irq(bgp->tshut_gpio), NULL);
- gpio_free(bgp->tshut_gpio);
- }
+ if (TI_BANDGAP_HAS(bgp, TSHUT))
+ free_irq(gpiod_to_irq(bgp->tshut_gpiod), NULL);
return 0;
}
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.h b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
index bb9b0f7faf99..fce4657e9486 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.h
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.h
@@ -13,6 +13,8 @@
#include <linux/types.h>
#include <linux/err.h>
+struct gpio_desc;
+
/**
* DOC: bandgap driver data structure
* ==================================
@@ -199,7 +201,7 @@ struct ti_bandgap {
struct clk *div_clk;
spinlock_t lock; /* shields this struct */
int irq;
- int tshut_gpio;
+ struct gpio_desc *tshut_gpiod;
u32 clk_rate;
};
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
new file mode 100644
index 000000000000..7db1460104b7
--- /dev/null
+++ b/drivers/vdpa/Kconfig
@@ -0,0 +1,37 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config VDPA
+ tristate
+ help
+ Enable this module to support vDPA device that uses a
+ datapath which complies with virtio specifications with
+ vendor specific control path.
+
+menuconfig VDPA_MENU
+ bool "VDPA drivers"
+ default n
+
+if VDPA_MENU
+
+config VDPA_SIM
+ tristate "vDPA device simulator"
+ depends on RUNTIME_TESTING_MENU
+ select VDPA
+ select VHOST_RING
+ default n
+ help
+ vDPA networking device simulator which loop TX traffic back
+ to RX. This device is used for testing, prototyping and
+ development of vDPA.
+
+config IFCVF
+ tristate "Intel IFC VF VDPA driver"
+ depends on PCI_MSI
+ select VDPA
+ default n
+ help
+ This kernel module can drive Intel IFC VF NIC to offload
+ virtio dataplane traffic to hardware.
+ To compile this driver as a module, choose M here: the module will
+ be called ifcvf.
+
+endif # VDPA_MENU
diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile
new file mode 100644
index 000000000000..8bbb686ca7a2
--- /dev/null
+++ b/drivers/vdpa/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VDPA) += vdpa.o
+obj-$(CONFIG_VDPA_SIM) += vdpa_sim/
+obj-$(CONFIG_IFCVF) += ifcvf/
diff --git a/drivers/vdpa/ifcvf/Makefile b/drivers/vdpa/ifcvf/Makefile
new file mode 100644
index 000000000000..d709915995ab
--- /dev/null
+++ b/drivers/vdpa/ifcvf/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_IFCVF) += ifcvf.o
+ifcvf-$(CONFIG_IFCVF) += ifcvf_main.o ifcvf_base.o
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c
new file mode 100644
index 000000000000..b61b06ea26d3
--- /dev/null
+++ b/drivers/vdpa/ifcvf/ifcvf_base.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IFC VF NIC driver for virtio dataplane offloading
+ *
+ * Copyright (C) 2020 Intel Corporation.
+ *
+ * Author: Zhu Lingshan <lingshan.zhu@intel.com>
+ *
+ */
+
+#include "ifcvf_base.h"
+
+static inline u8 ifc_ioread8(u8 __iomem *addr)
+{
+ return ioread8(addr);
+}
+static inline u16 ifc_ioread16 (__le16 __iomem *addr)
+{
+ return ioread16(addr);
+}
+
+static inline u32 ifc_ioread32(__le32 __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static inline void ifc_iowrite8(u8 value, u8 __iomem *addr)
+{
+ iowrite8(value, addr);
+}
+
+static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr)
+{
+ iowrite16(value, addr);
+}
+
+static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr)
+{
+ iowrite32(value, addr);
+}
+
+static void ifc_iowrite64_twopart(u64 val,
+ __le32 __iomem *lo, __le32 __iomem *hi)
+{
+ ifc_iowrite32((u32)val, lo);
+ ifc_iowrite32(val >> 32, hi);
+}
+
+struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw)
+{
+ return container_of(hw, struct ifcvf_adapter, vf);
+}
+
+static void __iomem *get_cap_addr(struct ifcvf_hw *hw,
+ struct virtio_pci_cap *cap)
+{
+ struct ifcvf_adapter *ifcvf;
+ struct pci_dev *pdev;
+ u32 length, offset;
+ u8 bar;
+
+ length = le32_to_cpu(cap->length);
+ offset = le32_to_cpu(cap->offset);
+ bar = cap->bar;
+
+ ifcvf= vf_to_adapter(hw);
+ pdev = ifcvf->pdev;
+
+ if (bar >= IFCVF_PCI_MAX_RESOURCE) {
+ IFCVF_DBG(pdev,
+ "Invalid bar number %u to get capabilities\n", bar);
+ return NULL;
+ }
+
+ if (offset + length > pci_resource_len(pdev, bar)) {
+ IFCVF_DBG(pdev,
+ "offset(%u) + len(%u) overflows bar%u's capability\n",
+ offset, length, bar);
+ return NULL;
+ }
+
+ return hw->base[bar] + offset;
+}
+
+static int ifcvf_read_config_range(struct pci_dev *dev,
+ uint32_t *val, int size, int where)
+{
+ int ret, i;
+
+ for (i = 0; i < size; i += 4) {
+ ret = pci_read_config_dword(dev, where + i, val + i / 4);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev)
+{
+ struct virtio_pci_cap cap;
+ u16 notify_off;
+ int ret;
+ u8 pos;
+ u32 i;
+
+ ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos);
+ if (ret < 0) {
+ IFCVF_ERR(pdev, "Failed to read PCI capability list\n");
+ return -EIO;
+ }
+
+ while (pos) {
+ ret = ifcvf_read_config_range(pdev, (u32 *)&cap,
+ sizeof(cap), pos);
+ if (ret < 0) {
+ IFCVF_ERR(pdev,
+ "Failed to get PCI capability at %x\n", pos);
+ break;
+ }
+
+ if (cap.cap_vndr != PCI_CAP_ID_VNDR)
+ goto next;
+
+ switch (cap.cfg_type) {
+ case VIRTIO_PCI_CAP_COMMON_CFG:
+ hw->common_cfg = get_cap_addr(hw, &cap);
+ IFCVF_DBG(pdev, "hw->common_cfg = %p\n",
+ hw->common_cfg);
+ break;
+ case VIRTIO_PCI_CAP_NOTIFY_CFG:
+ pci_read_config_dword(pdev, pos + sizeof(cap),
+ &hw->notify_off_multiplier);
+ hw->notify_bar = cap.bar;
+ hw->notify_base = get_cap_addr(hw, &cap);
+ IFCVF_DBG(pdev, "hw->notify_base = %p\n",
+ hw->notify_base);
+ break;
+ case VIRTIO_PCI_CAP_ISR_CFG:
+ hw->isr = get_cap_addr(hw, &cap);
+ IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr);
+ break;
+ case VIRTIO_PCI_CAP_DEVICE_CFG:
+ hw->net_cfg = get_cap_addr(hw, &cap);
+ IFCVF_DBG(pdev, "hw->net_cfg = %p\n", hw->net_cfg);
+ break;
+ }
+
+next:
+ pos = cap.cap_next;
+ }
+
+ if (hw->common_cfg == NULL || hw->notify_base == NULL ||
+ hw->isr == NULL || hw->net_cfg == NULL) {
+ IFCVF_ERR(pdev, "Incomplete PCI capabilities\n");
+ return -EIO;
+ }
+
+ for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ ifc_iowrite16(i, &hw->common_cfg->queue_select);
+ notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off);
+ hw->vring[i].notify_addr = hw->notify_base +
+ notify_off * hw->notify_off_multiplier;
+ }
+
+ hw->lm_cfg = hw->base[IFCVF_LM_BAR];
+
+ IFCVF_DBG(pdev,
+ "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n",
+ hw->common_cfg, hw->notify_base, hw->isr,
+ hw->net_cfg, hw->notify_off_multiplier);
+
+ return 0;
+}
+
+u8 ifcvf_get_status(struct ifcvf_hw *hw)
+{
+ return ifc_ioread8(&hw->common_cfg->device_status);
+}
+
+void ifcvf_set_status(struct ifcvf_hw *hw, u8 status)
+{
+ ifc_iowrite8(status, &hw->common_cfg->device_status);
+}
+
+void ifcvf_reset(struct ifcvf_hw *hw)
+{
+ ifcvf_set_status(hw, 0);
+ /* flush set_status, make sure VF is stopped, reset */
+ ifcvf_get_status(hw);
+}
+
+static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status)
+{
+ if (status != 0)
+ status |= ifcvf_get_status(hw);
+
+ ifcvf_set_status(hw, status);
+ ifcvf_get_status(hw);
+}
+
+u64 ifcvf_get_features(struct ifcvf_hw *hw)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+ u32 features_lo, features_hi;
+
+ ifc_iowrite32(0, &cfg->device_feature_select);
+ features_lo = ifc_ioread32(&cfg->device_feature);
+
+ ifc_iowrite32(1, &cfg->device_feature_select);
+ features_hi = ifc_ioread32(&cfg->device_feature);
+
+ return ((u64)features_hi << 32) | features_lo;
+}
+
+void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
+ void *dst, int length)
+{
+ u8 old_gen, new_gen, *p;
+ int i;
+
+ WARN_ON(offset + length > sizeof(struct virtio_net_config));
+ do {
+ old_gen = ifc_ioread8(&hw->common_cfg->config_generation);
+ p = dst;
+ for (i = 0; i < length; i++)
+ *p++ = ifc_ioread8(hw->net_cfg + offset + i);
+
+ new_gen = ifc_ioread8(&hw->common_cfg->config_generation);
+ } while (old_gen != new_gen);
+}
+
+void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
+ const void *src, int length)
+{
+ const u8 *p;
+ int i;
+
+ p = src;
+ WARN_ON(offset + length > sizeof(struct virtio_net_config));
+ for (i = 0; i < length; i++)
+ ifc_iowrite8(*p++, hw->net_cfg + offset + i);
+}
+
+static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features)
+{
+ struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg;
+
+ ifc_iowrite32(0, &cfg->guest_feature_select);
+ ifc_iowrite32((u32)features, &cfg->guest_feature);
+
+ ifc_iowrite32(1, &cfg->guest_feature_select);
+ ifc_iowrite32(features >> 32, &cfg->guest_feature);
+}
+
+static int ifcvf_config_features(struct ifcvf_hw *hw)
+{
+ struct ifcvf_adapter *ifcvf;
+
+ ifcvf = vf_to_adapter(hw);
+ ifcvf_set_features(hw, hw->req_features);
+ ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK);
+
+ if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) {
+ IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid)
+{
+ struct ifcvf_lm_cfg __iomem *ifcvf_lm;
+ void __iomem *avail_idx_addr;
+ u16 last_avail_idx;
+ u32 q_pair_id;
+
+ ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
+ q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
+ avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
+ last_avail_idx = ifc_ioread16(avail_idx_addr);
+
+ return last_avail_idx;
+}
+
+int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num)
+{
+ struct ifcvf_lm_cfg __iomem *ifcvf_lm;
+ void __iomem *avail_idx_addr;
+ u32 q_pair_id;
+
+ ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
+ q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2);
+ avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2];
+ hw->vring[qid].last_avail_idx = num;
+ ifc_iowrite16(num, avail_idx_addr);
+
+ return 0;
+}
+
+static int ifcvf_hw_enable(struct ifcvf_hw *hw)
+{
+ struct ifcvf_lm_cfg __iomem *ifcvf_lm;
+ struct virtio_pci_common_cfg __iomem *cfg;
+ struct ifcvf_adapter *ifcvf;
+ u32 i;
+
+ ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg;
+ ifcvf = vf_to_adapter(hw);
+ cfg = hw->common_cfg;
+ ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config);
+
+ if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) {
+ IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < hw->nr_vring; i++) {
+ if (!hw->vring[i].ready)
+ break;
+
+ ifc_iowrite16(i, &cfg->queue_select);
+ ifc_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo,
+ &cfg->queue_desc_hi);
+ ifc_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo,
+ &cfg->queue_avail_hi);
+ ifc_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo,
+ &cfg->queue_used_hi);
+ ifc_iowrite16(hw->vring[i].size, &cfg->queue_size);
+ ifc_iowrite16(i + IFCVF_MSI_QUEUE_OFF, &cfg->queue_msix_vector);
+
+ if (ifc_ioread16(&cfg->queue_msix_vector) ==
+ VIRTIO_MSI_NO_VECTOR) {
+ IFCVF_ERR(ifcvf->pdev,
+ "No msix vector for queue %u\n", i);
+ return -EINVAL;
+ }
+
+ ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx);
+ ifc_iowrite16(1, &cfg->queue_enable);
+ }
+
+ return 0;
+}
+
+static void ifcvf_hw_disable(struct ifcvf_hw *hw)
+{
+ struct virtio_pci_common_cfg __iomem *cfg;
+ u32 i;
+
+ cfg = hw->common_cfg;
+ ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->msix_config);
+
+ for (i = 0; i < hw->nr_vring; i++) {
+ ifc_iowrite16(i, &cfg->queue_select);
+ ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->queue_msix_vector);
+ }
+
+ ifc_ioread16(&cfg->queue_msix_vector);
+}
+
+int ifcvf_start_hw(struct ifcvf_hw *hw)
+{
+ ifcvf_reset(hw);
+ ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE);
+ ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER);
+
+ if (ifcvf_config_features(hw) < 0)
+ return -EINVAL;
+
+ if (ifcvf_hw_enable(hw) < 0)
+ return -EINVAL;
+
+ ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK);
+
+ return 0;
+}
+
+void ifcvf_stop_hw(struct ifcvf_hw *hw)
+{
+ ifcvf_hw_disable(hw);
+ ifcvf_reset(hw);
+}
+
+void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid)
+{
+ ifc_iowrite16(qid, hw->vring[qid].notify_addr);
+}
diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h
new file mode 100644
index 000000000000..e80307092351
--- /dev/null
+++ b/drivers/vdpa/ifcvf/ifcvf_base.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Intel IFC VF NIC driver for virtio dataplane offloading
+ *
+ * Copyright (C) 2020 Intel Corporation.
+ *
+ * Author: Zhu Lingshan <lingshan.zhu@intel.com>
+ *
+ */
+
+#ifndef _IFCVF_H_
+#define _IFCVF_H_
+
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/vdpa.h>
+#include <uapi/linux/virtio_net.h>
+#include <uapi/linux/virtio_config.h>
+#include <uapi/linux/virtio_pci.h>
+
+#define IFCVF_VENDOR_ID 0x1AF4
+#define IFCVF_DEVICE_ID 0x1041
+#define IFCVF_SUBSYS_VENDOR_ID 0x8086
+#define IFCVF_SUBSYS_DEVICE_ID 0x001A
+
+#define IFCVF_SUPPORTED_FEATURES \
+ ((1ULL << VIRTIO_NET_F_MAC) | \
+ (1ULL << VIRTIO_F_ANY_LAYOUT) | \
+ (1ULL << VIRTIO_F_VERSION_1) | \
+ (1ULL << VIRTIO_F_ORDER_PLATFORM) | \
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF))
+
+/* Only one queue pair for now. */
+#define IFCVF_MAX_QUEUE_PAIRS 1
+
+#define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE
+#define IFCVF_QUEUE_MAX 32768
+#define IFCVF_MSI_CONFIG_OFF 0
+#define IFCVF_MSI_QUEUE_OFF 1
+#define IFCVF_PCI_MAX_RESOURCE 6
+
+#define IFCVF_LM_CFG_SIZE 0x40
+#define IFCVF_LM_RING_STATE_OFFSET 0x20
+#define IFCVF_LM_BAR 4
+
+#define IFCVF_ERR(pdev, fmt, ...) dev_err(&pdev->dev, fmt, ##__VA_ARGS__)
+#define IFCVF_DBG(pdev, fmt, ...) dev_dbg(&pdev->dev, fmt, ##__VA_ARGS__)
+#define IFCVF_INFO(pdev, fmt, ...) dev_info(&pdev->dev, fmt, ##__VA_ARGS__)
+
+#define ifcvf_private_to_vf(adapter) \
+ (&((struct ifcvf_adapter *)adapter)->vf)
+
+#define IFCVF_MAX_INTR (IFCVF_MAX_QUEUE_PAIRS * 2 + 1)
+
+struct vring_info {
+ u64 desc;
+ u64 avail;
+ u64 used;
+ u16 size;
+ u16 last_avail_idx;
+ bool ready;
+ void __iomem *notify_addr;
+ u32 irq;
+ struct vdpa_callback cb;
+ char msix_name[256];
+};
+
+struct ifcvf_hw {
+ u8 __iomem *isr;
+ /* Live migration */
+ u8 __iomem *lm_cfg;
+ u16 nr_vring;
+ /* Notification bar number */
+ u8 notify_bar;
+ /* Notificaiton bar address */
+ void __iomem *notify_base;
+ u32 notify_off_multiplier;
+ u64 req_features;
+ struct virtio_pci_common_cfg __iomem *common_cfg;
+ void __iomem *net_cfg;
+ struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2];
+ void __iomem * const *base;
+};
+
+struct ifcvf_adapter {
+ struct vdpa_device vdpa;
+ struct pci_dev *pdev;
+ struct ifcvf_hw vf;
+};
+
+struct ifcvf_vring_lm_cfg {
+ u32 idx_addr[2];
+ u8 reserved[IFCVF_LM_CFG_SIZE - 8];
+};
+
+struct ifcvf_lm_cfg {
+ u8 reserved[IFCVF_LM_RING_STATE_OFFSET];
+ struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUE_PAIRS];
+};
+
+int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *dev);
+int ifcvf_start_hw(struct ifcvf_hw *hw);
+void ifcvf_stop_hw(struct ifcvf_hw *hw);
+void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid);
+void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset,
+ void *dst, int length);
+void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset,
+ const void *src, int length);
+u8 ifcvf_get_status(struct ifcvf_hw *hw);
+void ifcvf_set_status(struct ifcvf_hw *hw, u8 status);
+void io_write64_twopart(u64 val, u32 *lo, u32 *hi);
+void ifcvf_reset(struct ifcvf_hw *hw);
+u64 ifcvf_get_features(struct ifcvf_hw *hw);
+u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid);
+int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num);
+struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw);
+#endif /* _IFCVF_H_ */
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
new file mode 100644
index 000000000000..8d54dc5b08d2
--- /dev/null
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel IFC VF NIC driver for virtio dataplane offloading
+ *
+ * Copyright (C) 2020 Intel Corporation.
+ *
+ * Author: Zhu Lingshan <lingshan.zhu@intel.com>
+ *
+ */
+
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/sysfs.h>
+#include "ifcvf_base.h"
+
+#define VERSION_STRING "0.1"
+#define DRIVER_AUTHOR "Intel Corporation"
+#define IFCVF_DRIVER_NAME "ifcvf"
+
+static irqreturn_t ifcvf_intr_handler(int irq, void *arg)
+{
+ struct vring_info *vring = arg;
+
+ if (vring->cb.callback)
+ return vring->cb.callback(vring->cb.private);
+
+ return IRQ_HANDLED;
+}
+
+static int ifcvf_start_datapath(void *private)
+{
+ struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
+ struct ifcvf_adapter *ifcvf;
+ u8 status;
+ int ret;
+
+ ifcvf = vf_to_adapter(vf);
+ vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2;
+ ret = ifcvf_start_hw(vf);
+ if (ret < 0) {
+ status = ifcvf_get_status(vf);
+ status |= VIRTIO_CONFIG_S_FAILED;
+ ifcvf_set_status(vf, status);
+ }
+
+ return ret;
+}
+
+static int ifcvf_stop_datapath(void *private)
+{
+ struct ifcvf_hw *vf = ifcvf_private_to_vf(private);
+ int i;
+
+ for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++)
+ vf->vring[i].cb.callback = NULL;
+
+ ifcvf_stop_hw(vf);
+
+ return 0;
+}
+
+static void ifcvf_reset_vring(struct ifcvf_adapter *adapter)
+{
+ struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter);
+ int i;
+
+ for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ vf->vring[i].last_avail_idx = 0;
+ vf->vring[i].desc = 0;
+ vf->vring[i].avail = 0;
+ vf->vring[i].used = 0;
+ vf->vring[i].ready = 0;
+ vf->vring[i].cb.callback = NULL;
+ vf->vring[i].cb.private = NULL;
+ }
+
+ ifcvf_reset(vf);
+}
+
+static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev)
+{
+ return container_of(vdpa_dev, struct ifcvf_adapter, vdpa);
+}
+
+static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev);
+
+ return &adapter->vf;
+}
+
+static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+ u64 features;
+
+ features = ifcvf_get_features(vf) & IFCVF_SUPPORTED_FEATURES;
+
+ return features;
+}
+
+static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ vf->req_features = features;
+
+ return 0;
+}
+
+static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_get_status(vf);
+}
+
+static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status)
+{
+ struct ifcvf_adapter *adapter;
+ struct ifcvf_hw *vf;
+
+ vf = vdpa_to_vf(vdpa_dev);
+ adapter = dev_get_drvdata(vdpa_dev->dev.parent);
+
+ if (status == 0) {
+ ifcvf_stop_datapath(adapter);
+ ifcvf_reset_vring(adapter);
+ return;
+ }
+
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK) {
+ if (ifcvf_start_datapath(adapter) < 0)
+ IFCVF_ERR(adapter->pdev,
+ "Failed to set ifcvf vdpa status %u\n",
+ status);
+ }
+
+ ifcvf_set_status(vf, status);
+}
+
+static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev)
+{
+ return IFCVF_QUEUE_MAX;
+}
+
+static u64 ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_get_vq_state(vf, qid);
+}
+
+static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid,
+ u64 num)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ifcvf_set_vq_state(vf, qid, num);
+}
+
+static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid,
+ struct vdpa_callback *cb)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ vf->vring[qid].cb = *cb;
+}
+
+static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev,
+ u16 qid, bool ready)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ vf->vring[qid].ready = ready;
+}
+
+static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return vf->vring[qid].ready;
+}
+
+static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid,
+ u32 num)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ vf->vring[qid].size = num;
+}
+
+static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid,
+ u64 desc_area, u64 driver_area,
+ u64 device_area)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ vf->vring[qid].desc = desc_area;
+ vf->vring[qid].avail = driver_area;
+ vf->vring[qid].used = device_area;
+
+ return 0;
+}
+
+static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ ifcvf_notify_queue(vf, qid);
+}
+
+static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ return ioread8(&vf->common_cfg->config_generation);
+}
+
+static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev)
+{
+ return VIRTIO_ID_NET;
+}
+
+static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev)
+{
+ return IFCVF_SUBSYS_VENDOR_ID;
+}
+
+static u16 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev)
+{
+ return IFCVF_QUEUE_ALIGNMENT;
+}
+
+static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ WARN_ON(offset + len > sizeof(struct virtio_net_config));
+ ifcvf_read_net_config(vf, offset, buf, len);
+}
+
+static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev,
+ unsigned int offset, const void *buf,
+ unsigned int len)
+{
+ struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev);
+
+ WARN_ON(offset + len > sizeof(struct virtio_net_config));
+ ifcvf_write_net_config(vf, offset, buf, len);
+}
+
+static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev,
+ struct vdpa_callback *cb)
+{
+ /* We don't support config interrupt */
+}
+
+/*
+ * IFCVF currently does't have on-chip IOMMU, so not
+ * implemented set_map()/dma_map()/dma_unmap()
+ */
+static const struct vdpa_config_ops ifc_vdpa_ops = {
+ .get_features = ifcvf_vdpa_get_features,
+ .set_features = ifcvf_vdpa_set_features,
+ .get_status = ifcvf_vdpa_get_status,
+ .set_status = ifcvf_vdpa_set_status,
+ .get_vq_num_max = ifcvf_vdpa_get_vq_num_max,
+ .get_vq_state = ifcvf_vdpa_get_vq_state,
+ .set_vq_state = ifcvf_vdpa_set_vq_state,
+ .set_vq_cb = ifcvf_vdpa_set_vq_cb,
+ .set_vq_ready = ifcvf_vdpa_set_vq_ready,
+ .get_vq_ready = ifcvf_vdpa_get_vq_ready,
+ .set_vq_num = ifcvf_vdpa_set_vq_num,
+ .set_vq_address = ifcvf_vdpa_set_vq_address,
+ .kick_vq = ifcvf_vdpa_kick_vq,
+ .get_generation = ifcvf_vdpa_get_generation,
+ .get_device_id = ifcvf_vdpa_get_device_id,
+ .get_vendor_id = ifcvf_vdpa_get_vendor_id,
+ .get_vq_align = ifcvf_vdpa_get_vq_align,
+ .get_config = ifcvf_vdpa_get_config,
+ .set_config = ifcvf_vdpa_set_config,
+ .set_config_cb = ifcvf_vdpa_set_config_cb,
+};
+
+static int ifcvf_request_irq(struct ifcvf_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ifcvf_hw *vf = &adapter->vf;
+ int vector, i, ret, irq;
+
+
+ for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) {
+ snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n",
+ pci_name(pdev), i);
+ vector = i + IFCVF_MSI_QUEUE_OFF;
+ irq = pci_irq_vector(pdev, vector);
+ ret = devm_request_irq(&pdev->dev, irq,
+ ifcvf_intr_handler, 0,
+ vf->vring[i].msix_name,
+ &vf->vring[i]);
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "Failed to request irq for vq %d\n", i);
+ return ret;
+ }
+ vf->vring[i].irq = irq;
+ }
+
+ return 0;
+}
+
+static void ifcvf_free_irq_vectors(void *data)
+{
+ pci_free_irq_vectors(data);
+}
+
+static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct ifcvf_adapter *adapter;
+ struct ifcvf_hw *vf;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to enable device\n");
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4),
+ IFCVF_DRIVER_NAME);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request MMIO region\n");
+ return ret;
+ }
+
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ IFCVF_ERR(pdev, "No usable DMA confiugration\n");
+ return ret;
+ }
+
+ ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "No usable coherent DMA confiugration\n");
+ return ret;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR,
+ IFCVF_MAX_INTR, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ IFCVF_ERR(pdev, "Failed to alloc irq vectors\n");
+ return ret;
+ }
+
+ ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev);
+ if (ret) {
+ IFCVF_ERR(pdev,
+ "Failed for adding devres for freeing irq vectors\n");
+ return ret;
+ }
+
+ adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa,
+ dev, &ifc_vdpa_ops);
+ if (adapter == NULL) {
+ IFCVF_ERR(pdev, "Failed to allocate vDPA structure");
+ return -ENOMEM;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, adapter);
+
+ vf = &adapter->vf;
+ vf->base = pcim_iomap_table(pdev);
+
+ adapter->pdev = pdev;
+ adapter->vdpa.dma_dev = &pdev->dev;
+
+ ret = ifcvf_request_irq(adapter);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to request MSI-X irq\n");
+ goto err;
+ }
+
+ ret = ifcvf_init_hw(vf, pdev);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to init IFCVF hw\n");
+ goto err;
+ }
+
+ ret = vdpa_register_device(&adapter->vdpa);
+ if (ret) {
+ IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ put_device(&adapter->vdpa.dev);
+ return ret;
+}
+
+static void ifcvf_remove(struct pci_dev *pdev)
+{
+ struct ifcvf_adapter *adapter = pci_get_drvdata(pdev);
+
+ vdpa_unregister_device(&adapter->vdpa);
+}
+
+static struct pci_device_id ifcvf_pci_ids[] = {
+ { PCI_DEVICE_SUB(IFCVF_VENDOR_ID,
+ IFCVF_DEVICE_ID,
+ IFCVF_SUBSYS_VENDOR_ID,
+ IFCVF_SUBSYS_DEVICE_ID) },
+ { 0 },
+};
+MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids);
+
+static struct pci_driver ifcvf_driver = {
+ .name = IFCVF_DRIVER_NAME,
+ .id_table = ifcvf_pci_ids,
+ .probe = ifcvf_probe,
+ .remove = ifcvf_remove,
+};
+
+module_pci_driver(ifcvf_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(VERSION_STRING);
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
new file mode 100644
index 000000000000..e9ed6a2b635b
--- /dev/null
+++ b/drivers/vdpa/vdpa.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vDPA bus.
+ *
+ * Copyright (c) 2020, Red Hat. All rights reserved.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include <linux/vdpa.h>
+
+static DEFINE_IDA(vdpa_index_ida);
+
+static int vdpa_dev_probe(struct device *d)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(d);
+ struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
+ int ret = 0;
+
+ if (drv && drv->probe)
+ ret = drv->probe(vdev);
+
+ return ret;
+}
+
+static int vdpa_dev_remove(struct device *d)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(d);
+ struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
+
+ if (drv && drv->remove)
+ drv->remove(vdev);
+
+ return 0;
+}
+
+static struct bus_type vdpa_bus = {
+ .name = "vdpa",
+ .probe = vdpa_dev_probe,
+ .remove = vdpa_dev_remove,
+};
+
+static void vdpa_release_dev(struct device *d)
+{
+ struct vdpa_device *vdev = dev_to_vdpa(d);
+ const struct vdpa_config_ops *ops = vdev->config;
+
+ if (ops->free)
+ ops->free(vdev);
+
+ ida_simple_remove(&vdpa_index_ida, vdev->index);
+ kfree(vdev);
+}
+
+/**
+ * __vdpa_alloc_device - allocate and initilaize a vDPA device
+ * This allows driver to some prepartion after device is
+ * initialized but before registered.
+ * @parent: the parent device
+ * @config: the bus operations that is supported by this device
+ * @size: size of the parent structure that contains private data
+ *
+ * Drvier should use vdap_alloc_device() wrapper macro instead of
+ * using this directly.
+ *
+ * Returns an error when parent/config/dma_dev is not set or fail to get
+ * ida.
+ */
+struct vdpa_device *__vdpa_alloc_device(struct device *parent,
+ const struct vdpa_config_ops *config,
+ size_t size)
+{
+ struct vdpa_device *vdev;
+ int err = -EINVAL;
+
+ if (!config)
+ goto err;
+
+ if (!!config->dma_map != !!config->dma_unmap)
+ goto err;
+
+ err = -ENOMEM;
+ vdev = kzalloc(size, GFP_KERNEL);
+ if (!vdev)
+ goto err;
+
+ err = ida_simple_get(&vdpa_index_ida, 0, 0, GFP_KERNEL);
+ if (err < 0)
+ goto err_ida;
+
+ vdev->dev.bus = &vdpa_bus;
+ vdev->dev.parent = parent;
+ vdev->dev.release = vdpa_release_dev;
+ vdev->index = err;
+ vdev->config = config;
+
+ err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index);
+ if (err)
+ goto err_name;
+
+ device_initialize(&vdev->dev);
+
+ return vdev;
+
+err_name:
+ ida_simple_remove(&vdpa_index_ida, vdev->index);
+err_ida:
+ kfree(vdev);
+err:
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(__vdpa_alloc_device);
+
+/**
+ * vdpa_register_device - register a vDPA device
+ * Callers must have a succeed call of vdpa_init_device() before.
+ * @vdev: the vdpa device to be registered to vDPA bus
+ *
+ * Returns an error when fail to add to vDPA bus
+ */
+int vdpa_register_device(struct vdpa_device *vdev)
+{
+ return device_add(&vdev->dev);
+}
+EXPORT_SYMBOL_GPL(vdpa_register_device);
+
+/**
+ * vdpa_unregister_device - unregister a vDPA device
+ * @vdev: the vdpa device to be unregisted from vDPA bus
+ */
+void vdpa_unregister_device(struct vdpa_device *vdev)
+{
+ device_unregister(&vdev->dev);
+}
+EXPORT_SYMBOL_GPL(vdpa_unregister_device);
+
+/**
+ * __vdpa_register_driver - register a vDPA device driver
+ * @drv: the vdpa device driver to be registered
+ * @owner: module owner of the driver
+ *
+ * Returns an err when fail to do the registration
+ */
+int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner)
+{
+ drv->driver.bus = &vdpa_bus;
+ drv->driver.owner = owner;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(__vdpa_register_driver);
+
+/**
+ * vdpa_unregister_driver - unregister a vDPA device driver
+ * @drv: the vdpa device driver to be unregistered
+ */
+void vdpa_unregister_driver(struct vdpa_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(vdpa_unregister_driver);
+
+static int vdpa_init(void)
+{
+ return bus_register(&vdpa_bus);
+}
+
+static void __exit vdpa_exit(void)
+{
+ bus_unregister(&vdpa_bus);
+ ida_destroy(&vdpa_index_ida);
+}
+core_initcall(vdpa_init);
+module_exit(vdpa_exit);
+
+MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/vdpa/vdpa_sim/Makefile b/drivers/vdpa/vdpa_sim/Makefile
new file mode 100644
index 000000000000..b40278f65e04
--- /dev/null
+++ b/drivers/vdpa/vdpa_sim/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_VDPA_SIM) += vdpa_sim.o
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
new file mode 100644
index 000000000000..6e8a0cf2fdeb
--- /dev/null
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VDPA networking device simulator.
+ *
+ * Copyright (c) 2020, Red Hat Inc. All rights reserved.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/uuid.h>
+#include <linux/iommu.h>
+#include <linux/dma-mapping.h>
+#include <linux/sysfs.h>
+#include <linux/file.h>
+#include <linux/etherdevice.h>
+#include <linux/vringh.h>
+#include <linux/vdpa.h>
+#include <linux/vhost_iotlb.h>
+#include <uapi/linux/virtio_config.h>
+#include <uapi/linux/virtio_net.h>
+
+#define DRV_VERSION "0.1"
+#define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
+#define DRV_DESC "vDPA Device Simulator"
+#define DRV_LICENSE "GPL v2"
+
+struct vdpasim_virtqueue {
+ struct vringh vring;
+ struct vringh_kiov iov;
+ unsigned short head;
+ bool ready;
+ u64 desc_addr;
+ u64 device_addr;
+ u64 driver_addr;
+ u32 num;
+ void *private;
+ irqreturn_t (*cb)(void *data);
+};
+
+#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
+#define VDPASIM_QUEUE_MAX 256
+#define VDPASIM_DEVICE_ID 0x1
+#define VDPASIM_VENDOR_ID 0
+#define VDPASIM_VQ_NUM 0x2
+#define VDPASIM_NAME "vdpasim-netdev"
+
+static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
+ (1ULL << VIRTIO_F_VERSION_1) |
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM);
+
+/* State of each vdpasim device */
+struct vdpasim {
+ struct vdpa_device vdpa;
+ struct vdpasim_virtqueue vqs[2];
+ struct work_struct work;
+ /* spinlock to synchronize virtqueue state */
+ spinlock_t lock;
+ struct virtio_net_config config;
+ struct vhost_iotlb *iommu;
+ void *buffer;
+ u32 status;
+ u32 generation;
+ u64 features;
+};
+
+static struct vdpasim *vdpasim_dev;
+
+static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
+{
+ return container_of(vdpa, struct vdpasim, vdpa);
+}
+
+static struct vdpasim *dev_to_sim(struct device *dev)
+{
+ struct vdpa_device *vdpa = dev_to_vdpa(dev);
+
+ return vdpa_to_sim(vdpa);
+}
+
+static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
+{
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+ int ret;
+
+ ret = vringh_init_iotlb(&vq->vring, vdpasim_features,
+ VDPASIM_QUEUE_MAX, false,
+ (struct vring_desc *)(uintptr_t)vq->desc_addr,
+ (struct vring_avail *)
+ (uintptr_t)vq->driver_addr,
+ (struct vring_used *)
+ (uintptr_t)vq->device_addr);
+}
+
+static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
+{
+ vq->ready = 0;
+ vq->desc_addr = 0;
+ vq->driver_addr = 0;
+ vq->device_addr = 0;
+ vq->cb = NULL;
+ vq->private = NULL;
+ vringh_init_iotlb(&vq->vring, vdpasim_features, VDPASIM_QUEUE_MAX,
+ false, NULL, NULL, NULL);
+}
+
+static void vdpasim_reset(struct vdpasim *vdpasim)
+{
+ int i;
+
+ for (i = 0; i < VDPASIM_VQ_NUM; i++)
+ vdpasim_vq_reset(&vdpasim->vqs[i]);
+
+ vhost_iotlb_reset(vdpasim->iommu);
+
+ vdpasim->features = 0;
+ vdpasim->status = 0;
+ ++vdpasim->generation;
+}
+
+static void vdpasim_work(struct work_struct *work)
+{
+ struct vdpasim *vdpasim = container_of(work, struct
+ vdpasim, work);
+ struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
+ struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
+ size_t read, write, total_write;
+ int err;
+ int pkts = 0;
+
+ spin_lock(&vdpasim->lock);
+
+ if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
+ goto out;
+
+ if (!txq->ready || !rxq->ready)
+ goto out;
+
+ while (true) {
+ total_write = 0;
+ err = vringh_getdesc_iotlb(&txq->vring, &txq->iov, NULL,
+ &txq->head, GFP_ATOMIC);
+ if (err <= 0)
+ break;
+
+ err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->iov,
+ &rxq->head, GFP_ATOMIC);
+ if (err <= 0) {
+ vringh_complete_iotlb(&txq->vring, txq->head, 0);
+ break;
+ }
+
+ while (true) {
+ read = vringh_iov_pull_iotlb(&txq->vring, &txq->iov,
+ vdpasim->buffer,
+ PAGE_SIZE);
+ if (read <= 0)
+ break;
+
+ write = vringh_iov_push_iotlb(&rxq->vring, &rxq->iov,
+ vdpasim->buffer, read);
+ if (write <= 0)
+ break;
+
+ total_write += write;
+ }
+
+ /* Make sure data is wrote before advancing index */
+ smp_wmb();
+
+ vringh_complete_iotlb(&txq->vring, txq->head, 0);
+ vringh_complete_iotlb(&rxq->vring, rxq->head, total_write);
+
+ /* Make sure used is visible before rasing the interrupt. */
+ smp_wmb();
+
+ local_bh_disable();
+ if (txq->cb)
+ txq->cb(txq->private);
+ if (rxq->cb)
+ rxq->cb(rxq->private);
+ local_bh_enable();
+
+ if (++pkts > 4) {
+ schedule_work(&vdpasim->work);
+ goto out;
+ }
+ }
+
+out:
+ spin_unlock(&vdpasim->lock);
+}
+
+static int dir_to_perm(enum dma_data_direction dir)
+{
+ int perm = -EFAULT;
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ perm = VHOST_MAP_WO;
+ break;
+ case DMA_TO_DEVICE:
+ perm = VHOST_MAP_RO;
+ break;
+ case DMA_BIDIRECTIONAL:
+ perm = VHOST_MAP_RW;
+ break;
+ default:
+ break;
+ }
+
+ return perm;
+}
+
+static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vdpasim *vdpasim = dev_to_sim(dev);
+ struct vhost_iotlb *iommu = vdpasim->iommu;
+ u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset;
+ int ret, perm = dir_to_perm(dir);
+
+ if (perm < 0)
+ return DMA_MAPPING_ERROR;
+
+ /* For simplicity, use identical mapping to avoid e.g iova
+ * allocator.
+ */
+ ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1,
+ pa, dir_to_perm(dir));
+ if (ret)
+ return DMA_MAPPING_ERROR;
+
+ return (dma_addr_t)(pa);
+}
+
+static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct vdpasim *vdpasim = dev_to_sim(dev);
+ struct vhost_iotlb *iommu = vdpasim->iommu;
+
+ vhost_iotlb_del_range(iommu, (u64)dma_addr,
+ (u64)dma_addr + size - 1);
+}
+
+static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_addr, gfp_t flag,
+ unsigned long attrs)
+{
+ struct vdpasim *vdpasim = dev_to_sim(dev);
+ struct vhost_iotlb *iommu = vdpasim->iommu;
+ void *addr = kmalloc(size, flag);
+ int ret;
+
+ if (!addr)
+ *dma_addr = DMA_MAPPING_ERROR;
+ else {
+ u64 pa = virt_to_phys(addr);
+
+ ret = vhost_iotlb_add_range(iommu, (u64)pa,
+ (u64)pa + size - 1,
+ pa, VHOST_MAP_RW);
+ if (ret) {
+ *dma_addr = DMA_MAPPING_ERROR;
+ kfree(addr);
+ addr = NULL;
+ } else
+ *dma_addr = (dma_addr_t)pa;
+ }
+
+ return addr;
+}
+
+static void vdpasim_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ unsigned long attrs)
+{
+ struct vdpasim *vdpasim = dev_to_sim(dev);
+ struct vhost_iotlb *iommu = vdpasim->iommu;
+
+ vhost_iotlb_del_range(iommu, (u64)dma_addr,
+ (u64)dma_addr + size - 1);
+ kfree(phys_to_virt((uintptr_t)dma_addr));
+}
+
+static const struct dma_map_ops vdpasim_dma_ops = {
+ .map_page = vdpasim_map_page,
+ .unmap_page = vdpasim_unmap_page,
+ .alloc = vdpasim_alloc_coherent,
+ .free = vdpasim_free_coherent,
+};
+
+static const struct vdpa_config_ops vdpasim_net_config_ops;
+
+static struct vdpasim *vdpasim_create(void)
+{
+ struct virtio_net_config *config;
+ struct vdpasim *vdpasim;
+ struct device *dev;
+ int ret = -ENOMEM;
+
+ vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL,
+ &vdpasim_net_config_ops);
+ if (!vdpasim)
+ goto err_alloc;
+
+ INIT_WORK(&vdpasim->work, vdpasim_work);
+ spin_lock_init(&vdpasim->lock);
+
+ dev = &vdpasim->vdpa.dev;
+ dev->coherent_dma_mask = DMA_BIT_MASK(64);
+ set_dma_ops(dev, &vdpasim_dma_ops);
+
+ vdpasim->iommu = vhost_iotlb_alloc(2048, 0);
+ if (!vdpasim->iommu)
+ goto err_iommu;
+
+ vdpasim->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!vdpasim->buffer)
+ goto err_iommu;
+
+ config = &vdpasim->config;
+ config->mtu = 1500;
+ config->status = VIRTIO_NET_S_LINK_UP;
+ eth_random_addr(config->mac);
+
+ vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu);
+ vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
+
+ vdpasim->vdpa.dma_dev = dev;
+ ret = vdpa_register_device(&vdpasim->vdpa);
+ if (ret)
+ goto err_iommu;
+
+ return vdpasim;
+
+err_iommu:
+ put_device(dev);
+err_alloc:
+ return ERR_PTR(ret);
+}
+
+static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
+ u64 desc_area, u64 driver_area,
+ u64 device_area)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ vq->desc_addr = desc_area;
+ vq->driver_addr = driver_area;
+ vq->device_addr = device_area;
+
+ return 0;
+}
+
+static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ vq->num = num;
+}
+
+static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ if (vq->ready)
+ schedule_work(&vdpasim->work);
+}
+
+static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
+ struct vdpa_callback *cb)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ vq->cb = cb->callback;
+ vq->private = cb->private;
+}
+
+static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ spin_lock(&vdpasim->lock);
+ vq->ready = ready;
+ if (vq->ready)
+ vdpasim_queue_ready(vdpasim, idx);
+ spin_unlock(&vdpasim->lock);
+}
+
+static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+
+ return vq->ready;
+}
+
+static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, u64 state)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+ struct vringh *vrh = &vq->vring;
+
+ spin_lock(&vdpasim->lock);
+ vrh->last_avail_idx = state;
+ spin_unlock(&vdpasim->lock);
+
+ return 0;
+}
+
+static u64 vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
+ struct vringh *vrh = &vq->vring;
+
+ return vrh->last_avail_idx;
+}
+
+static u16 vdpasim_get_vq_align(struct vdpa_device *vdpa)
+{
+ return VDPASIM_QUEUE_ALIGN;
+}
+
+static u64 vdpasim_get_features(struct vdpa_device *vdpa)
+{
+ return vdpasim_features;
+}
+
+static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ /* DMA mapping must be done by driver */
+ if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+ return -EINVAL;
+
+ vdpasim->features = features & vdpasim_features;
+
+ return 0;
+}
+
+static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
+ struct vdpa_callback *cb)
+{
+ /* We don't support config interrupt */
+}
+
+static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
+{
+ return VDPASIM_QUEUE_MAX;
+}
+
+static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
+{
+ return VDPASIM_DEVICE_ID;
+}
+
+static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
+{
+ return VDPASIM_VENDOR_ID;
+}
+
+static u8 vdpasim_get_status(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ u8 status;
+
+ spin_lock(&vdpasim->lock);
+ status = vdpasim->status;
+ spin_unlock(&vdpasim->lock);
+
+ return vdpasim->status;
+}
+
+static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ spin_lock(&vdpasim->lock);
+ vdpasim->status = status;
+ if (status == 0)
+ vdpasim_reset(vdpasim);
+ spin_unlock(&vdpasim->lock);
+}
+
+static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ if (offset + len < sizeof(struct virtio_net_config))
+ memcpy(buf, &vdpasim->config + offset, len);
+}
+
+static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
+ const void *buf, unsigned int len)
+{
+ /* No writable config supportted by vdpasim */
+}
+
+static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ return vdpasim->generation;
+}
+
+static int vdpasim_set_map(struct vdpa_device *vdpa,
+ struct vhost_iotlb *iotlb)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+ struct vhost_iotlb_map *map;
+ u64 start = 0ULL, last = 0ULL - 1;
+ int ret;
+
+ vhost_iotlb_reset(vdpasim->iommu);
+
+ for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
+ map = vhost_iotlb_itree_next(map, start, last)) {
+ ret = vhost_iotlb_add_range(vdpasim->iommu, map->start,
+ map->last, map->addr, map->perm);
+ if (ret)
+ goto err;
+ }
+ return 0;
+
+err:
+ vhost_iotlb_reset(vdpasim->iommu);
+ return ret;
+}
+
+static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size,
+ u64 pa, u32 perm)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ return vhost_iotlb_add_range(vdpasim->iommu, iova,
+ iova + size - 1, pa, perm);
+}
+
+static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1);
+
+ return 0;
+}
+
+static void vdpasim_free(struct vdpa_device *vdpa)
+{
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ cancel_work_sync(&vdpasim->work);
+ kfree(vdpasim->buffer);
+ if (vdpasim->iommu)
+ vhost_iotlb_free(vdpasim->iommu);
+}
+
+static const struct vdpa_config_ops vdpasim_net_config_ops = {
+ .set_vq_address = vdpasim_set_vq_address,
+ .set_vq_num = vdpasim_set_vq_num,
+ .kick_vq = vdpasim_kick_vq,
+ .set_vq_cb = vdpasim_set_vq_cb,
+ .set_vq_ready = vdpasim_set_vq_ready,
+ .get_vq_ready = vdpasim_get_vq_ready,
+ .set_vq_state = vdpasim_set_vq_state,
+ .get_vq_state = vdpasim_get_vq_state,
+ .get_vq_align = vdpasim_get_vq_align,
+ .get_features = vdpasim_get_features,
+ .set_features = vdpasim_set_features,
+ .set_config_cb = vdpasim_set_config_cb,
+ .get_vq_num_max = vdpasim_get_vq_num_max,
+ .get_device_id = vdpasim_get_device_id,
+ .get_vendor_id = vdpasim_get_vendor_id,
+ .get_status = vdpasim_get_status,
+ .set_status = vdpasim_set_status,
+ .get_config = vdpasim_get_config,
+ .set_config = vdpasim_set_config,
+ .get_generation = vdpasim_get_generation,
+ .set_map = vdpasim_set_map,
+ .dma_map = vdpasim_dma_map,
+ .dma_unmap = vdpasim_dma_unmap,
+ .free = vdpasim_free,
+};
+
+static int __init vdpasim_dev_init(void)
+{
+ vdpasim_dev = vdpasim_create();
+
+ if (!IS_ERR(vdpasim_dev))
+ return 0;
+
+ return PTR_ERR(vdpasim_dev);
+}
+
+static void __exit vdpasim_dev_exit(void)
+{
+ struct vdpa_device *vdpa = &vdpasim_dev->vdpa;
+
+ vdpa_unregister_device(vdpa);
+}
+
+module_init(vdpasim_dev_init)
+module_exit(vdpasim_dev_exit)
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE(DRV_LICENSE);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
index 3d03ccbd1adc..362b832f5338 100644
--- a/drivers/vhost/Kconfig
+++ b/drivers/vhost/Kconfig
@@ -1,4 +1,29 @@
# SPDX-License-Identifier: GPL-2.0-only
+config VHOST_IOTLB
+ tristate
+ help
+ Generic IOTLB implementation for vhost and vringh.
+
+config VHOST_RING
+ tristate
+ select VHOST_IOTLB
+ help
+ This option is selected by any driver which needs to access
+ the host side of a virtio ring.
+
+config VHOST
+ tristate
+ select VHOST_IOTLB
+ help
+ This option is selected by any driver which needs to access
+ the core of vhost.
+
+menuconfig VHOST_MENU
+ bool "VHOST drivers"
+ default y
+
+if VHOST_MENU
+
config VHOST_NET
tristate "Host kernel accelerator for virtio net"
depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP)
@@ -23,8 +48,8 @@ config VHOST_SCSI
config VHOST_VSOCK
tristate "vhost virtio-vsock driver"
depends on VSOCKETS && EVENTFD
- select VIRTIO_VSOCKETS_COMMON
select VHOST
+ select VIRTIO_VSOCKETS_COMMON
default n
---help---
This kernel module can be loaded in the host kernel to provide AF_VSOCK
@@ -34,11 +59,17 @@ config VHOST_VSOCK
To compile this driver as a module, choose M here: the module will be called
vhost_vsock.
-config VHOST
- tristate
- ---help---
- This option is selected by any driver which needs to access
- the core of vhost.
+config VHOST_VDPA
+ tristate "Vhost driver for vDPA-based backend"
+ depends on EVENTFD
+ select VHOST
+ select VDPA
+ help
+ This kernel module can be loaded in host kernel to accelerate
+ guest virtio devices with the vDPA-based backends.
+
+ To compile this driver as a module, choose M here: the module
+ will be called vhost_vdpa.
config VHOST_CROSS_ENDIAN_LEGACY
bool "Cross-endian support for vhost"
@@ -54,3 +85,5 @@ config VHOST_CROSS_ENDIAN_LEGACY
adds some overhead, it is disabled by default.
If unsure, say "N".
+
+endif
diff --git a/drivers/vhost/Kconfig.vringh b/drivers/vhost/Kconfig.vringh
deleted file mode 100644
index c1fe36a9b8d4..000000000000
--- a/drivers/vhost/Kconfig.vringh
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config VHOST_RING
- tristate
- ---help---
- This option is selected by any driver which needs to access
- the host side of a virtio ring.
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
index 6c6df24f770c..f3e1897cce85 100644
--- a/drivers/vhost/Makefile
+++ b/drivers/vhost/Makefile
@@ -10,4 +10,10 @@ vhost_vsock-y := vsock.o
obj-$(CONFIG_VHOST_RING) += vringh.o
+obj-$(CONFIG_VHOST_VDPA) += vhost_vdpa.o
+vhost_vdpa-y := vdpa.o
+
obj-$(CONFIG_VHOST) += vhost.o
+
+obj-$(CONFIG_VHOST_IOTLB) += vhost_iotlb.o
+vhost_iotlb-y := iotlb.o
diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c
new file mode 100644
index 000000000000..1f0ca6e44410
--- /dev/null
+++ b/drivers/vhost/iotlb.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Red Hat, Inc.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ * IOTLB implementation for vhost.
+ */
+#include <linux/slab.h>
+#include <linux/vhost_iotlb.h>
+#include <linux/module.h>
+
+#define MOD_VERSION "0.1"
+#define MOD_DESC "VHOST IOTLB"
+#define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>"
+#define MOD_LICENSE "GPL v2"
+
+#define START(map) ((map)->start)
+#define LAST(map) ((map)->last)
+
+INTERVAL_TREE_DEFINE(struct vhost_iotlb_map,
+ rb, __u64, __subtree_last,
+ START, LAST, static inline, vhost_iotlb_itree);
+
+/**
+ * vhost_iotlb_map_free - remove a map node and free it
+ * @iotlb: the IOTLB
+ * @map: the map that want to be remove and freed
+ */
+void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
+ struct vhost_iotlb_map *map)
+{
+ vhost_iotlb_itree_remove(map, &iotlb->root);
+ list_del(&map->link);
+ kfree(map);
+ iotlb->nmaps--;
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_map_free);
+
+/**
+ * vhost_iotlb_add_range - add a new range to vhost IOTLB
+ * @iotlb: the IOTLB
+ * @start: start of the IOVA range
+ * @last: last of IOVA range
+ * @addr: the address that is mapped to @start
+ * @perm: access permission of this range
+ *
+ * Returns an error last is smaller than start or memory allocation
+ * fails
+ */
+int vhost_iotlb_add_range(struct vhost_iotlb *iotlb,
+ u64 start, u64 last,
+ u64 addr, unsigned int perm)
+{
+ struct vhost_iotlb_map *map;
+
+ if (last < start)
+ return -EFAULT;
+
+ if (iotlb->limit &&
+ iotlb->nmaps == iotlb->limit &&
+ iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) {
+ map = list_first_entry(&iotlb->list, typeof(*map), link);
+ vhost_iotlb_map_free(iotlb, map);
+ }
+
+ map = kmalloc(sizeof(*map), GFP_ATOMIC);
+ if (!map)
+ return -ENOMEM;
+
+ map->start = start;
+ map->size = last - start + 1;
+ map->last = last;
+ map->addr = addr;
+ map->perm = perm;
+
+ iotlb->nmaps++;
+ vhost_iotlb_itree_insert(map, &iotlb->root);
+
+ INIT_LIST_HEAD(&map->link);
+ list_add_tail(&map->link, &iotlb->list);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_add_range);
+
+/**
+ * vring_iotlb_del_range - delete overlapped ranges from vhost IOTLB
+ * @iotlb: the IOTLB
+ * @start: start of the IOVA range
+ * @last: last of IOVA range
+ */
+void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last)
+{
+ struct vhost_iotlb_map *map;
+
+ while ((map = vhost_iotlb_itree_iter_first(&iotlb->root,
+ start, last)))
+ vhost_iotlb_map_free(iotlb, map);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_del_range);
+
+/**
+ * vhost_iotlb_alloc - add a new vhost IOTLB
+ * @limit: maximum number of IOTLB entries
+ * @flags: VHOST_IOTLB_FLAG_XXX
+ *
+ * Returns an error is memory allocation fails
+ */
+struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags)
+{
+ struct vhost_iotlb *iotlb = kzalloc(sizeof(*iotlb), GFP_KERNEL);
+
+ if (!iotlb)
+ return NULL;
+
+ iotlb->root = RB_ROOT_CACHED;
+ iotlb->limit = limit;
+ iotlb->nmaps = 0;
+ iotlb->flags = flags;
+ INIT_LIST_HEAD(&iotlb->list);
+
+ return iotlb;
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_alloc);
+
+/**
+ * vhost_iotlb_reset - reset vhost IOTLB (free all IOTLB entries)
+ * @iotlb: the IOTLB to be reset
+ */
+void vhost_iotlb_reset(struct vhost_iotlb *iotlb)
+{
+ vhost_iotlb_del_range(iotlb, 0ULL, 0ULL - 1);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_reset);
+
+/**
+ * vhost_iotlb_free - reset and free vhost IOTLB
+ * @iotlb: the IOTLB to be freed
+ */
+void vhost_iotlb_free(struct vhost_iotlb *iotlb)
+{
+ if (iotlb) {
+ vhost_iotlb_reset(iotlb);
+ kfree(iotlb);
+ }
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_free);
+
+/**
+ * vhost_iotlb_itree_first - return the first overlapped range
+ * @iotlb: the IOTLB
+ * @start: start of IOVA range
+ * @end: end of IOVA range
+ */
+struct vhost_iotlb_map *
+vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last)
+{
+ return vhost_iotlb_itree_iter_first(&iotlb->root, start, last);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_itree_first);
+
+/**
+ * vhost_iotlb_itree_first - return the next overlapped range
+ * @iotlb: the IOTLB
+ * @start: start of IOVA range
+ * @end: end of IOVA range
+ */
+struct vhost_iotlb_map *
+vhost_iotlb_itree_next(struct vhost_iotlb_map *map, u64 start, u64 last)
+{
+ return vhost_iotlb_itree_iter_next(map, start, last);
+}
+EXPORT_SYMBOL_GPL(vhost_iotlb_itree_next);
+
+MODULE_VERSION(MOD_VERSION);
+MODULE_DESCRIPTION(MOD_DESC);
+MODULE_AUTHOR(MOD_AUTHOR);
+MODULE_LICENSE(MOD_LICENSE);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 18e205eeb9af..87469d67ede8 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1324,7 +1324,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
}
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
UIO_MAXIOV + VHOST_NET_BATCH,
- VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT);
+ VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT,
+ NULL);
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
@@ -1586,7 +1587,7 @@ static long vhost_net_reset_owner(struct vhost_net *n)
struct socket *tx_sock = NULL;
struct socket *rx_sock = NULL;
long err;
- struct vhost_umem *umem;
+ struct vhost_iotlb *umem;
mutex_lock(&n->dev.mutex);
err = vhost_dev_check_owner(&n->dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0b949a14bce3..7653667a8cdc 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1628,7 +1628,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
- VHOST_SCSI_WEIGHT, 0);
+ VHOST_SCSI_WEIGHT, 0, NULL);
vhost_scsi_init_inflight(vs, NULL);
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
new file mode 100644
index 000000000000..421f02a8530a
--- /dev/null
+++ b/drivers/vhost/vdpa.c
@@ -0,0 +1,883 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018-2020 Intel Corporation.
+ * Copyright (C) 2020 Red Hat, Inc.
+ *
+ * Author: Tiwei Bie <tiwei.bie@intel.com>
+ * Jason Wang <jasowang@redhat.com>
+ *
+ * Thanks Michael S. Tsirkin for the valuable comments and
+ * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
+ * their supports.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/uuid.h>
+#include <linux/vdpa.h>
+#include <linux/nospec.h>
+#include <linux/vhost.h>
+#include <linux/virtio_net.h>
+
+#include "vhost.h"
+
+enum {
+ VHOST_VDPA_FEATURES =
+ (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
+ (1ULL << VIRTIO_F_ANY_LAYOUT) |
+ (1ULL << VIRTIO_F_VERSION_1) |
+ (1ULL << VIRTIO_F_IOMMU_PLATFORM) |
+ (1ULL << VIRTIO_F_RING_PACKED) |
+ (1ULL << VIRTIO_F_ORDER_PLATFORM) |
+ (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
+ (1ULL << VIRTIO_RING_F_EVENT_IDX),
+
+ VHOST_VDPA_NET_FEATURES = VHOST_VDPA_FEATURES |
+ (1ULL << VIRTIO_NET_F_CSUM) |
+ (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
+ (1ULL << VIRTIO_NET_F_MTU) |
+ (1ULL << VIRTIO_NET_F_MAC) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
+ (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
+ (1ULL << VIRTIO_NET_F_GUEST_ECN) |
+ (1ULL << VIRTIO_NET_F_GUEST_UFO) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO4) |
+ (1ULL << VIRTIO_NET_F_HOST_TSO6) |
+ (1ULL << VIRTIO_NET_F_HOST_ECN) |
+ (1ULL << VIRTIO_NET_F_HOST_UFO) |
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
+ (1ULL << VIRTIO_NET_F_STATUS) |
+ (1ULL << VIRTIO_NET_F_SPEED_DUPLEX),
+};
+
+/* Currently, only network backend w/o multiqueue is supported. */
+#define VHOST_VDPA_VQ_MAX 2
+
+#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
+
+struct vhost_vdpa {
+ struct vhost_dev vdev;
+ struct iommu_domain *domain;
+ struct vhost_virtqueue *vqs;
+ struct completion completion;
+ struct vdpa_device *vdpa;
+ struct device dev;
+ struct cdev cdev;
+ atomic_t opened;
+ int nvqs;
+ int virtio_id;
+ int minor;
+};
+
+static DEFINE_IDA(vhost_vdpa_ida);
+
+static dev_t vhost_vdpa_major;
+
+static const u64 vhost_vdpa_features[] = {
+ [VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES,
+};
+
+static void handle_vq_kick(struct vhost_work *work)
+{
+ struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
+ poll.work);
+ struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
+ const struct vdpa_config_ops *ops = v->vdpa->config;
+
+ ops->kick_vq(v->vdpa, vq - v->vqs);
+}
+
+static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
+{
+ struct vhost_virtqueue *vq = private;
+ struct eventfd_ctx *call_ctx = vq->call_ctx;
+
+ if (call_ctx)
+ eventfd_signal(call_ctx, 1);
+
+ return IRQ_HANDLED;
+}
+
+static void vhost_vdpa_reset(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ ops->set_status(vdpa, 0);
+}
+
+static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u32 device_id;
+
+ device_id = ops->get_device_id(vdpa);
+
+ if (copy_to_user(argp, &device_id, sizeof(device_id)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u8 status;
+
+ status = ops->get_status(vdpa);
+
+ if (copy_to_user(statusp, &status, sizeof(status)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u8 status;
+
+ if (copy_from_user(&status, statusp, sizeof(status)))
+ return -EFAULT;
+
+ /*
+ * Userspace shouldn't remove status bits unless reset the
+ * status to 0.
+ */
+ if (status != 0 && (ops->get_status(vdpa) & ~status) != 0)
+ return -EINVAL;
+
+ ops->set_status(vdpa, status);
+
+ return 0;
+}
+
+static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
+ struct vhost_vdpa_config *c)
+{
+ long size = 0;
+
+ switch (v->virtio_id) {
+ case VIRTIO_ID_NET:
+ size = sizeof(struct virtio_net_config);
+ break;
+ }
+
+ if (c->len == 0)
+ return -EINVAL;
+
+ if (c->len > size - c->off)
+ return -E2BIG;
+
+ return 0;
+}
+
+static long vhost_vdpa_get_config(struct vhost_vdpa *v,
+ struct vhost_vdpa_config __user *c)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_vdpa_config config;
+ unsigned long size = offsetof(struct vhost_vdpa_config, buf);
+ u8 *buf;
+
+ if (copy_from_user(&config, c, size))
+ return -EFAULT;
+ if (vhost_vdpa_config_validate(v, &config))
+ return -EINVAL;
+ buf = kvzalloc(config.len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ops->get_config(vdpa, config.off, buf, config.len);
+
+ if (copy_to_user(c->buf, buf, config.len)) {
+ kvfree(buf);
+ return -EFAULT;
+ }
+
+ kvfree(buf);
+ return 0;
+}
+
+static long vhost_vdpa_set_config(struct vhost_vdpa *v,
+ struct vhost_vdpa_config __user *c)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_vdpa_config config;
+ unsigned long size = offsetof(struct vhost_vdpa_config, buf);
+ u8 *buf;
+
+ if (copy_from_user(&config, c, size))
+ return -EFAULT;
+ if (vhost_vdpa_config_validate(v, &config))
+ return -EINVAL;
+ buf = kvzalloc(config.len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, c->buf, config.len)) {
+ kvfree(buf);
+ return -EFAULT;
+ }
+
+ ops->set_config(vdpa, config.off, buf, config.len);
+
+ kvfree(buf);
+ return 0;
+}
+
+static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u64 features;
+
+ features = ops->get_features(vdpa);
+ features &= vhost_vdpa_features[v->virtio_id];
+
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u64 features;
+
+ /*
+ * It's not allowed to change the features after they have
+ * been negotiated.
+ */
+ if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
+ return -EBUSY;
+
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+
+ if (features & ~vhost_vdpa_features[v->virtio_id])
+ return -EINVAL;
+
+ if (ops->set_features(vdpa, features))
+ return -EINVAL;
+
+ return 0;
+}
+
+static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ u16 num;
+
+ num = ops->get_vq_num_max(vdpa);
+
+ if (copy_to_user(argp, &num, sizeof(num)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
+ void __user *argp)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_callback cb;
+ struct vhost_virtqueue *vq;
+ struct vhost_vring_state s;
+ u8 status;
+ u32 idx;
+ long r;
+
+ r = get_user(idx, (u32 __user *)argp);
+ if (r < 0)
+ return r;
+
+ if (idx >= v->nvqs)
+ return -ENOBUFS;
+
+ idx = array_index_nospec(idx, v->nvqs);
+ vq = &v->vqs[idx];
+
+ status = ops->get_status(vdpa);
+
+ if (cmd == VHOST_VDPA_SET_VRING_ENABLE) {
+ if (copy_from_user(&s, argp, sizeof(s)))
+ return -EFAULT;
+ ops->set_vq_ready(vdpa, idx, s.num);
+ return 0;
+ }
+
+ if (cmd == VHOST_GET_VRING_BASE)
+ vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
+
+ r = vhost_vring_ioctl(&v->vdev, cmd, argp);
+ if (r)
+ return r;
+
+ switch (cmd) {
+ case VHOST_SET_VRING_ADDR:
+ if (ops->set_vq_address(vdpa, idx,
+ (u64)(uintptr_t)vq->desc,
+ (u64)(uintptr_t)vq->avail,
+ (u64)(uintptr_t)vq->used))
+ r = -EINVAL;
+ break;
+
+ case VHOST_SET_VRING_BASE:
+ if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx))
+ r = -EINVAL;
+ break;
+
+ case VHOST_SET_VRING_CALL:
+ if (vq->call_ctx) {
+ cb.callback = vhost_vdpa_virtqueue_cb;
+ cb.private = vq;
+ } else {
+ cb.callback = NULL;
+ cb.private = NULL;
+ }
+ ops->set_vq_cb(vdpa, idx, &cb);
+ break;
+
+ case VHOST_SET_VRING_NUM:
+ ops->set_vq_num(vdpa, idx, vq->num);
+ break;
+ }
+
+ return r;
+}
+
+static long vhost_vdpa_unlocked_ioctl(struct file *filep,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vhost_vdpa *v = filep->private_data;
+ struct vhost_dev *d = &v->vdev;
+ void __user *argp = (void __user *)arg;
+ long r;
+
+ mutex_lock(&d->mutex);
+
+ switch (cmd) {
+ case VHOST_VDPA_GET_DEVICE_ID:
+ r = vhost_vdpa_get_device_id(v, argp);
+ break;
+ case VHOST_VDPA_GET_STATUS:
+ r = vhost_vdpa_get_status(v, argp);
+ break;
+ case VHOST_VDPA_SET_STATUS:
+ r = vhost_vdpa_set_status(v, argp);
+ break;
+ case VHOST_VDPA_GET_CONFIG:
+ r = vhost_vdpa_get_config(v, argp);
+ break;
+ case VHOST_VDPA_SET_CONFIG:
+ r = vhost_vdpa_set_config(v, argp);
+ break;
+ case VHOST_GET_FEATURES:
+ r = vhost_vdpa_get_features(v, argp);
+ break;
+ case VHOST_SET_FEATURES:
+ r = vhost_vdpa_set_features(v, argp);
+ break;
+ case VHOST_VDPA_GET_VRING_NUM:
+ r = vhost_vdpa_get_vring_num(v, argp);
+ break;
+ case VHOST_SET_LOG_BASE:
+ case VHOST_SET_LOG_FD:
+ r = -ENOIOCTLCMD;
+ break;
+ default:
+ r = vhost_dev_ioctl(&v->vdev, cmd, argp);
+ if (r == -ENOIOCTLCMD)
+ r = vhost_vdpa_vring_ioctl(v, cmd, argp);
+ break;
+ }
+
+ mutex_unlock(&d->mutex);
+ return r;
+}
+
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vhost_iotlb *iotlb = dev->iotlb;
+ struct vhost_iotlb_map *map;
+ struct page *page;
+ unsigned long pfn, pinned;
+
+ while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
+ pinned = map->size >> PAGE_SHIFT;
+ for (pfn = map->addr >> PAGE_SHIFT;
+ pinned > 0; pfn++, pinned--) {
+ page = pfn_to_page(pfn);
+ if (map->perm & VHOST_ACCESS_WO)
+ set_page_dirty_lock(page);
+ unpin_user_page(page);
+ }
+ atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm);
+ vhost_iotlb_map_free(iotlb, map);
+ }
+}
+
+static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
+{
+ struct vhost_dev *dev = &v->vdev;
+
+ vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
+ kfree(dev->iotlb);
+ dev->iotlb = NULL;
+}
+
+static int perm_to_iommu_flags(u32 perm)
+{
+ int flags = 0;
+
+ switch (perm) {
+ case VHOST_ACCESS_WO:
+ flags |= IOMMU_WRITE;
+ break;
+ case VHOST_ACCESS_RO:
+ flags |= IOMMU_READ;
+ break;
+ case VHOST_ACCESS_RW:
+ flags |= (IOMMU_WRITE | IOMMU_READ);
+ break;
+ default:
+ WARN(1, "invalidate vhost IOTLB permission\n");
+ break;
+ }
+
+ return flags | IOMMU_CACHE;
+}
+
+static int vhost_vdpa_map(struct vhost_vdpa *v,
+ u64 iova, u64 size, u64 pa, u32 perm)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ int r = 0;
+
+ r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1,
+ pa, perm);
+ if (r)
+ return r;
+
+ if (ops->dma_map)
+ r = ops->dma_map(vdpa, iova, size, pa, perm);
+ else if (ops->set_map)
+ r = ops->set_map(vdpa, dev->iotlb);
+ else
+ r = iommu_map(v->domain, iova, pa, size,
+ perm_to_iommu_flags(perm));
+
+ return r;
+}
+
+static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
+
+ if (ops->dma_map)
+ ops->dma_unmap(vdpa, iova, size);
+ else if (ops->set_map)
+ ops->set_map(vdpa, dev->iotlb);
+ else
+ iommu_unmap(v->domain, iova, size);
+}
+
+static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_dev *dev = &v->vdev;
+ struct vhost_iotlb *iotlb = dev->iotlb;
+ struct page **page_list;
+ unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
+ unsigned int gup_flags = FOLL_LONGTERM;
+ unsigned long npages, cur_base, map_pfn, last_pfn = 0;
+ unsigned long locked, lock_limit, pinned, i;
+ u64 iova = msg->iova;
+ int ret = 0;
+
+ if (vhost_iotlb_itree_first(iotlb, msg->iova,
+ msg->iova + msg->size - 1))
+ return -EEXIST;
+
+ page_list = (struct page **) __get_free_page(GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ if (msg->perm & VHOST_ACCESS_WO)
+ gup_flags |= FOLL_WRITE;
+
+ npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT;
+ if (!npages)
+ return -EINVAL;
+
+ down_read(&dev->mm->mmap_sem);
+
+ locked = atomic64_add_return(npages, &dev->mm->pinned_vm);
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if (locked > lock_limit) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ cur_base = msg->uaddr & PAGE_MASK;
+ iova &= PAGE_MASK;
+
+ while (npages) {
+ pinned = min_t(unsigned long, npages, list_size);
+ ret = pin_user_pages(cur_base, pinned,
+ gup_flags, page_list, NULL);
+ if (ret != pinned)
+ goto out;
+
+ if (!last_pfn)
+ map_pfn = page_to_pfn(page_list[0]);
+
+ for (i = 0; i < ret; i++) {
+ unsigned long this_pfn = page_to_pfn(page_list[i]);
+ u64 csize;
+
+ if (last_pfn && (this_pfn != last_pfn + 1)) {
+ /* Pin a contiguous chunk of memory */
+ csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT;
+ if (vhost_vdpa_map(v, iova, csize,
+ map_pfn << PAGE_SHIFT,
+ msg->perm))
+ goto out;
+ map_pfn = this_pfn;
+ iova += csize;
+ }
+
+ last_pfn = this_pfn;
+ }
+
+ cur_base += ret << PAGE_SHIFT;
+ npages -= ret;
+ }
+
+ /* Pin the rest chunk */
+ ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT,
+ map_pfn << PAGE_SHIFT, msg->perm);
+out:
+ if (ret) {
+ vhost_vdpa_unmap(v, msg->iova, msg->size);
+ atomic64_sub(npages, &dev->mm->pinned_vm);
+ }
+ up_read(&dev->mm->mmap_sem);
+ free_page((unsigned long)page_list);
+ return ret;
+}
+
+static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg)
+{
+ struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
+ int r = 0;
+
+ r = vhost_dev_check_owner(dev);
+ if (r)
+ return r;
+
+ switch (msg->type) {
+ case VHOST_IOTLB_UPDATE:
+ r = vhost_vdpa_process_iotlb_update(v, msg);
+ break;
+ case VHOST_IOTLB_INVALIDATE:
+ vhost_vdpa_unmap(v, msg->iova, msg->size);
+ break;
+ default:
+ r = -EINVAL;
+ break;
+ }
+
+ return r;
+}
+
+static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vdpa *v = file->private_data;
+ struct vhost_dev *dev = &v->vdev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct device *dma_dev = vdpa_get_dma_dev(vdpa);
+ struct bus_type *bus;
+ int ret;
+
+ /* Device want to do DMA by itself */
+ if (ops->set_map || ops->dma_map)
+ return 0;
+
+ bus = dma_dev->bus;
+ if (!bus)
+ return -EFAULT;
+
+ if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
+ return -ENOTSUPP;
+
+ v->domain = iommu_domain_alloc(bus);
+ if (!v->domain)
+ return -EIO;
+
+ ret = iommu_attach_device(v->domain, dma_dev);
+ if (ret)
+ goto err_attach;
+
+ return 0;
+
+err_attach:
+ iommu_domain_free(v->domain);
+ return ret;
+}
+
+static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
+{
+ struct vdpa_device *vdpa = v->vdpa;
+ struct device *dma_dev = vdpa_get_dma_dev(vdpa);
+
+ if (v->domain) {
+ iommu_detach_device(v->domain, dma_dev);
+ iommu_domain_free(v->domain);
+ }
+
+ v->domain = NULL;
+}
+
+static int vhost_vdpa_open(struct inode *inode, struct file *filep)
+{
+ struct vhost_vdpa *v;
+ struct vhost_dev *dev;
+ struct vhost_virtqueue **vqs;
+ int nvqs, i, r, opened;
+
+ v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
+ if (!v)
+ return -ENODEV;
+
+ opened = atomic_cmpxchg(&v->opened, 0, 1);
+ if (opened)
+ return -EBUSY;
+
+ nvqs = v->nvqs;
+ vhost_vdpa_reset(v);
+
+ vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
+ if (!vqs) {
+ r = -ENOMEM;
+ goto err;
+ }
+
+ dev = &v->vdev;
+ for (i = 0; i < nvqs; i++) {
+ vqs[i] = &v->vqs[i];
+ vqs[i]->handle_kick = handle_vq_kick;
+ }
+ vhost_dev_init(dev, vqs, nvqs, 0, 0, 0,
+ vhost_vdpa_process_iotlb_msg);
+
+ dev->iotlb = vhost_iotlb_alloc(0, 0);
+ if (!dev->iotlb) {
+ r = -ENOMEM;
+ goto err_init_iotlb;
+ }
+
+ r = vhost_vdpa_alloc_domain(v);
+ if (r)
+ goto err_init_iotlb;
+
+ filep->private_data = v;
+
+ return 0;
+
+err_init_iotlb:
+ vhost_dev_cleanup(&v->vdev);
+err:
+ atomic_dec(&v->opened);
+ return r;
+}
+
+static int vhost_vdpa_release(struct inode *inode, struct file *filep)
+{
+ struct vhost_vdpa *v = filep->private_data;
+ struct vhost_dev *d = &v->vdev;
+
+ mutex_lock(&d->mutex);
+ filep->private_data = NULL;
+ vhost_vdpa_reset(v);
+ vhost_dev_stop(&v->vdev);
+ vhost_vdpa_iotlb_free(v);
+ vhost_vdpa_free_domain(v);
+ vhost_dev_cleanup(&v->vdev);
+ kfree(v->vdev.vqs);
+ mutex_unlock(&d->mutex);
+
+ atomic_dec(&v->opened);
+ complete(&v->completion);
+
+ return 0;
+}
+
+static const struct file_operations vhost_vdpa_fops = {
+ .owner = THIS_MODULE,
+ .open = vhost_vdpa_open,
+ .release = vhost_vdpa_release,
+ .write_iter = vhost_vdpa_chr_write_iter,
+ .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
+
+static void vhost_vdpa_release_dev(struct device *device)
+{
+ struct vhost_vdpa *v =
+ container_of(device, struct vhost_vdpa, dev);
+
+ ida_simple_remove(&vhost_vdpa_ida, v->minor);
+ kfree(v->vqs);
+ kfree(v);
+}
+
+static int vhost_vdpa_probe(struct vdpa_device *vdpa)
+{
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vhost_vdpa *v;
+ int minor, nvqs = VHOST_VDPA_VQ_MAX;
+ int r;
+
+ /* Currently, we only accept the network devices. */
+ if (ops->get_device_id(vdpa) != VIRTIO_ID_NET)
+ return -ENOTSUPP;
+
+ v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
+ if (!v)
+ return -ENOMEM;
+
+ minor = ida_simple_get(&vhost_vdpa_ida, 0,
+ VHOST_VDPA_DEV_MAX, GFP_KERNEL);
+ if (minor < 0) {
+ kfree(v);
+ return minor;
+ }
+
+ atomic_set(&v->opened, 0);
+ v->minor = minor;
+ v->vdpa = vdpa;
+ v->nvqs = nvqs;
+ v->virtio_id = ops->get_device_id(vdpa);
+
+ device_initialize(&v->dev);
+ v->dev.release = vhost_vdpa_release_dev;
+ v->dev.parent = &vdpa->dev;
+ v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
+ v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue),
+ GFP_KERNEL);
+ if (!v->vqs) {
+ r = -ENOMEM;
+ goto err;
+ }
+
+ r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
+ if (r)
+ goto err;
+
+ cdev_init(&v->cdev, &vhost_vdpa_fops);
+ v->cdev.owner = THIS_MODULE;
+
+ r = cdev_device_add(&v->cdev, &v->dev);
+ if (r)
+ goto err;
+
+ init_completion(&v->completion);
+ vdpa_set_drvdata(vdpa, v);
+
+ return 0;
+
+err:
+ put_device(&v->dev);
+ return r;
+}
+
+static void vhost_vdpa_remove(struct vdpa_device *vdpa)
+{
+ struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
+ int opened;
+
+ cdev_device_del(&v->cdev, &v->dev);
+
+ do {
+ opened = atomic_cmpxchg(&v->opened, 0, 1);
+ if (!opened)
+ break;
+ wait_for_completion(&v->completion);
+ } while (1);
+
+ put_device(&v->dev);
+}
+
+static struct vdpa_driver vhost_vdpa_driver = {
+ .driver = {
+ .name = "vhost_vdpa",
+ },
+ .probe = vhost_vdpa_probe,
+ .remove = vhost_vdpa_remove,
+};
+
+static int __init vhost_vdpa_init(void)
+{
+ int r;
+
+ r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
+ "vhost-vdpa");
+ if (r)
+ goto err_alloc_chrdev;
+
+ r = vdpa_register_driver(&vhost_vdpa_driver);
+ if (r)
+ goto err_vdpa_register_driver;
+
+ return 0;
+
+err_vdpa_register_driver:
+ unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
+err_alloc_chrdev:
+ return r;
+}
+module_init(vhost_vdpa_init);
+
+static void __exit vhost_vdpa_exit(void)
+{
+ vdpa_unregister_driver(&vhost_vdpa_driver);
+ unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
+}
+module_exit(vhost_vdpa_exit);
+
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Intel Corporation");
+MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index f44340b41494..d450e16c5c25 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -50,10 +50,6 @@ enum {
#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
-INTERVAL_TREE_DEFINE(struct vhost_umem_node,
- rb, __u64, __subtree_last,
- START, LAST, static inline, vhost_umem_interval_tree);
-
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static void vhost_disable_cross_endian(struct vhost_virtqueue *vq)
{
@@ -457,7 +453,9 @@ static size_t vhost_get_desc_size(struct vhost_virtqueue *vq,
void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs,
- int iov_limit, int weight, int byte_weight)
+ int iov_limit, int weight, int byte_weight,
+ int (*msg_handler)(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg))
{
struct vhost_virtqueue *vq;
int i;
@@ -473,6 +471,7 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->iov_limit = iov_limit;
dev->weight = weight;
dev->byte_weight = byte_weight;
+ dev->msg_handler = msg_handler;
init_llist_head(&dev->work_list);
init_waitqueue_head(&dev->wait);
INIT_LIST_HEAD(&dev->read_list);
@@ -581,21 +580,25 @@ err_mm:
}
EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
-struct vhost_umem *vhost_dev_reset_owner_prepare(void)
+static struct vhost_iotlb *iotlb_alloc(void)
+{
+ return vhost_iotlb_alloc(max_iotlb_entries,
+ VHOST_IOTLB_FLAG_RETIRE);
+}
+
+struct vhost_iotlb *vhost_dev_reset_owner_prepare(void)
{
- return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL);
+ return iotlb_alloc();
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
/* Caller should have device mutex */
-void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem)
+void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem)
{
int i;
vhost_dev_cleanup(dev);
- /* Restore memory to default empty mapping. */
- INIT_LIST_HEAD(&umem->umem_list);
dev->umem = umem;
/* We don't need VQ locks below since vhost_dev_cleanup makes sure
* VQs aren't running.
@@ -618,28 +621,6 @@ void vhost_dev_stop(struct vhost_dev *dev)
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
-static void vhost_umem_free(struct vhost_umem *umem,
- struct vhost_umem_node *node)
-{
- vhost_umem_interval_tree_remove(node, &umem->umem_tree);
- list_del(&node->link);
- kfree(node);
- umem->numem--;
-}
-
-static void vhost_umem_clean(struct vhost_umem *umem)
-{
- struct vhost_umem_node *node, *tmp;
-
- if (!umem)
- return;
-
- list_for_each_entry_safe(node, tmp, &umem->umem_list, link)
- vhost_umem_free(umem, node);
-
- kvfree(umem);
-}
-
static void vhost_clear_msg(struct vhost_dev *dev)
{
struct vhost_msg_node *node, *n;
@@ -677,9 +658,9 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
eventfd_ctx_put(dev->log_ctx);
dev->log_ctx = NULL;
/* No one will access memory at this point */
- vhost_umem_clean(dev->umem);
+ vhost_iotlb_free(dev->umem);
dev->umem = NULL;
- vhost_umem_clean(dev->iotlb);
+ vhost_iotlb_free(dev->iotlb);
dev->iotlb = NULL;
vhost_clear_msg(dev);
wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM);
@@ -715,27 +696,26 @@ static bool vhost_overflow(u64 uaddr, u64 size)
}
/* Caller should have vq mutex and device mutex. */
-static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem,
+static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem,
int log_all)
{
- struct vhost_umem_node *node;
+ struct vhost_iotlb_map *map;
if (!umem)
return false;
- list_for_each_entry(node, &umem->umem_list, link) {
- unsigned long a = node->userspace_addr;
+ list_for_each_entry(map, &umem->list, link) {
+ unsigned long a = map->addr;
- if (vhost_overflow(node->userspace_addr, node->size))
+ if (vhost_overflow(map->addr, map->size))
return false;
- if (!access_ok((void __user *)a,
- node->size))
+ if (!access_ok((void __user *)a, map->size))
return false;
else if (log_all && !log_access_ok(log_base,
- node->start,
- node->size))
+ map->start,
+ map->size))
return false;
}
return true;
@@ -745,17 +725,17 @@ static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq,
u64 addr, unsigned int size,
int type)
{
- const struct vhost_umem_node *node = vq->meta_iotlb[type];
+ const struct vhost_iotlb_map *map = vq->meta_iotlb[type];
- if (!node)
+ if (!map)
return NULL;
- return (void *)(uintptr_t)(node->userspace_addr + addr - node->start);
+ return (void *)(uintptr_t)(map->addr + addr - map->start);
}
/* Can we switch to this memory table? */
/* Caller should have device mutex but not vq mutex */
-static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem,
+static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem,
int log_all)
{
int i;
@@ -1020,47 +1000,6 @@ static inline int vhost_get_desc(struct vhost_virtqueue *vq,
return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
}
-static int vhost_new_umem_range(struct vhost_umem *umem,
- u64 start, u64 size, u64 end,
- u64 userspace_addr, int perm)
-{
- struct vhost_umem_node *tmp, *node;
-
- if (!size)
- return -EFAULT;
-
- node = kmalloc(sizeof(*node), GFP_ATOMIC);
- if (!node)
- return -ENOMEM;
-
- if (umem->numem == max_iotlb_entries) {
- tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link);
- vhost_umem_free(umem, tmp);
- }
-
- node->start = start;
- node->size = size;
- node->last = end;
- node->userspace_addr = userspace_addr;
- node->perm = perm;
- INIT_LIST_HEAD(&node->link);
- list_add_tail(&node->link, &umem->umem_list);
- vhost_umem_interval_tree_insert(node, &umem->umem_tree);
- umem->numem++;
-
- return 0;
-}
-
-static void vhost_del_umem_range(struct vhost_umem *umem,
- u64 start, u64 end)
-{
- struct vhost_umem_node *node;
-
- while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
- start, end)))
- vhost_umem_free(umem, node);
-}
-
static void vhost_iotlb_notify_vq(struct vhost_dev *d,
struct vhost_iotlb_msg *msg)
{
@@ -1117,9 +1056,9 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
break;
}
vhost_vq_meta_reset(dev);
- if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size,
- msg->iova + msg->size - 1,
- msg->uaddr, msg->perm)) {
+ if (vhost_iotlb_add_range(dev->iotlb, msg->iova,
+ msg->iova + msg->size - 1,
+ msg->uaddr, msg->perm)) {
ret = -ENOMEM;
break;
}
@@ -1131,8 +1070,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
break;
}
vhost_vq_meta_reset(dev);
- vhost_del_umem_range(dev->iotlb, msg->iova,
- msg->iova + msg->size - 1);
+ vhost_iotlb_del_range(dev->iotlb, msg->iova,
+ msg->iova + msg->size - 1);
break;
default:
ret = -EINVAL;
@@ -1178,7 +1117,12 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
ret = -EINVAL;
goto done;
}
- if (vhost_process_iotlb_msg(dev, &msg)) {
+
+ if (dev->msg_handler)
+ ret = dev->msg_handler(dev, &msg);
+ else
+ ret = vhost_process_iotlb_msg(dev, &msg);
+ if (ret) {
ret = -EFAULT;
goto done;
}
@@ -1311,44 +1255,42 @@ static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
}
static void vhost_vq_meta_update(struct vhost_virtqueue *vq,
- const struct vhost_umem_node *node,
+ const struct vhost_iotlb_map *map,
int type)
{
int access = (type == VHOST_ADDR_USED) ?
VHOST_ACCESS_WO : VHOST_ACCESS_RO;
- if (likely(node->perm & access))
- vq->meta_iotlb[type] = node;
+ if (likely(map->perm & access))
+ vq->meta_iotlb[type] = map;
}
static bool iotlb_access_ok(struct vhost_virtqueue *vq,
int access, u64 addr, u64 len, int type)
{
- const struct vhost_umem_node *node;
- struct vhost_umem *umem = vq->iotlb;
+ const struct vhost_iotlb_map *map;
+ struct vhost_iotlb *umem = vq->iotlb;
u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
if (vhost_vq_meta_fetch(vq, addr, len, type))
return true;
while (len > s) {
- node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
- addr,
- last);
- if (node == NULL || node->start > addr) {
+ map = vhost_iotlb_itree_first(umem, addr, last);
+ if (map == NULL || map->start > addr) {
vhost_iotlb_miss(vq, addr, access);
return false;
- } else if (!(node->perm & access)) {
+ } else if (!(map->perm & access)) {
/* Report the possible access violation by
* request another translation from userspace.
*/
return false;
}
- size = node->size - addr + node->start;
+ size = map->size - addr + map->start;
if (orig_addr == addr && size >= len)
- vhost_vq_meta_update(vq, node, type);
+ vhost_vq_meta_update(vq, map, type);
s += size;
addr += size;
@@ -1364,12 +1306,12 @@ int vq_meta_prefetch(struct vhost_virtqueue *vq)
if (!vq->iotlb)
return 1;
- return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
+ return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc,
vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
- iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail,
+ iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail,
vhost_get_avail_size(vq, num),
VHOST_ADDR_AVAIL) &&
- iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used,
+ iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used,
vhost_get_used_size(vq, num), VHOST_ADDR_USED);
}
EXPORT_SYMBOL_GPL(vq_meta_prefetch);
@@ -1408,25 +1350,11 @@ bool vhost_vq_access_ok(struct vhost_virtqueue *vq)
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
-static struct vhost_umem *vhost_umem_alloc(void)
-{
- struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL);
-
- if (!umem)
- return NULL;
-
- umem->umem_tree = RB_ROOT_CACHED;
- umem->numem = 0;
- INIT_LIST_HEAD(&umem->umem_list);
-
- return umem;
-}
-
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
struct vhost_memory mem, *newmem;
struct vhost_memory_region *region;
- struct vhost_umem *newumem, *oldumem;
+ struct vhost_iotlb *newumem, *oldumem;
unsigned long size = offsetof(struct vhost_memory, regions);
int i;
@@ -1448,7 +1376,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
return -EFAULT;
}
- newumem = vhost_umem_alloc();
+ newumem = iotlb_alloc();
if (!newumem) {
kvfree(newmem);
return -ENOMEM;
@@ -1457,13 +1385,12 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
for (region = newmem->regions;
region < newmem->regions + mem.nregions;
region++) {
- if (vhost_new_umem_range(newumem,
- region->guest_phys_addr,
- region->memory_size,
- region->guest_phys_addr +
- region->memory_size - 1,
- region->userspace_addr,
- VHOST_ACCESS_RW))
+ if (vhost_iotlb_add_range(newumem,
+ region->guest_phys_addr,
+ region->guest_phys_addr +
+ region->memory_size - 1,
+ region->userspace_addr,
+ VHOST_MAP_RW))
goto err;
}
@@ -1481,11 +1408,11 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
}
kvfree(newmem);
- vhost_umem_clean(oldumem);
+ vhost_iotlb_free(oldumem);
return 0;
err:
- vhost_umem_clean(newumem);
+ vhost_iotlb_free(newumem);
kvfree(newmem);
return -EFAULT;
}
@@ -1726,10 +1653,10 @@ EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
{
- struct vhost_umem *niotlb, *oiotlb;
+ struct vhost_iotlb *niotlb, *oiotlb;
int i;
- niotlb = vhost_umem_alloc();
+ niotlb = iotlb_alloc();
if (!niotlb)
return -ENOMEM;
@@ -1745,7 +1672,7 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled)
mutex_unlock(&vq->mutex);
}
- vhost_umem_clean(oiotlb);
+ vhost_iotlb_free(oiotlb);
return 0;
}
@@ -1875,8 +1802,8 @@ static int log_write(void __user *log_base,
static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
{
- struct vhost_umem *umem = vq->umem;
- struct vhost_umem_node *u;
+ struct vhost_iotlb *umem = vq->umem;
+ struct vhost_iotlb_map *u;
u64 start, end, l, min;
int r;
bool hit = false;
@@ -1886,16 +1813,15 @@ static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
/* More than one GPAs can be mapped into a single HVA. So
* iterate all possible umems here to be safe.
*/
- list_for_each_entry(u, &umem->umem_list, link) {
- if (u->userspace_addr > hva - 1 + len ||
- u->userspace_addr - 1 + u->size < hva)
+ list_for_each_entry(u, &umem->list, link) {
+ if (u->addr > hva - 1 + len ||
+ u->addr - 1 + u->size < hva)
continue;
- start = max(u->userspace_addr, hva);
- end = min(u->userspace_addr - 1 + u->size,
- hva - 1 + len);
+ start = max(u->addr, hva);
+ end = min(u->addr - 1 + u->size, hva - 1 + len);
l = end - start + 1;
r = log_write(vq->log_base,
- u->start + start - u->userspace_addr,
+ u->start + start - u->addr,
l);
if (r < 0)
return r;
@@ -2046,9 +1972,9 @@ EXPORT_SYMBOL_GPL(vhost_vq_init_access);
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size, int access)
{
- const struct vhost_umem_node *node;
+ const struct vhost_iotlb_map *map;
struct vhost_dev *dev = vq->dev;
- struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem;
+ struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem;
struct iovec *_iov;
u64 s = 0;
int ret = 0;
@@ -2060,25 +1986,24 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
break;
}
- node = vhost_umem_interval_tree_iter_first(&umem->umem_tree,
- addr, addr + len - 1);
- if (node == NULL || node->start > addr) {
+ map = vhost_iotlb_itree_first(umem, addr, addr + len - 1);
+ if (map == NULL || map->start > addr) {
if (umem != dev->iotlb) {
ret = -EFAULT;
break;
}
ret = -EAGAIN;
break;
- } else if (!(node->perm & access)) {
+ } else if (!(map->perm & access)) {
ret = -EPERM;
break;
}
_iov = iov + ret;
- size = node->size - addr + node->start;
+ size = map->size - addr + map->start;
_iov->iov_len = min((u64)len - s, size);
_iov->iov_base = (void __user *)(unsigned long)
- (node->userspace_addr + addr - node->start);
+ (map->addr + addr - map->start);
s += size;
addr += size;
++ret;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index a123fd70847e..181382185bbc 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -12,6 +12,7 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/atomic.h>
+#include <linux/vhost_iotlb.h>
struct vhost_work;
typedef void (*vhost_work_fn_t)(struct vhost_work *work);
@@ -52,27 +53,6 @@ struct vhost_log {
u64 len;
};
-#define START(node) ((node)->start)
-#define LAST(node) ((node)->last)
-
-struct vhost_umem_node {
- struct rb_node rb;
- struct list_head link;
- __u64 start;
- __u64 last;
- __u64 size;
- __u64 userspace_addr;
- __u32 perm;
- __u32 flags_padding;
- __u64 __subtree_last;
-};
-
-struct vhost_umem {
- struct rb_root_cached umem_tree;
- struct list_head umem_list;
- int numem;
-};
-
enum vhost_uaddr_type {
VHOST_ADDR_DESC = 0,
VHOST_ADDR_AVAIL = 1,
@@ -90,7 +70,7 @@ struct vhost_virtqueue {
struct vring_desc __user *desc;
struct vring_avail __user *avail;
struct vring_used __user *used;
- const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
+ const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
struct file *kick;
struct eventfd_ctx *call_ctx;
struct eventfd_ctx *error_ctx;
@@ -128,8 +108,8 @@ struct vhost_virtqueue {
struct iovec *indirect;
struct vring_used_elem *heads;
/* Protected by virtqueue mutex. */
- struct vhost_umem *umem;
- struct vhost_umem *iotlb;
+ struct vhost_iotlb *umem;
+ struct vhost_iotlb *iotlb;
void *private_data;
u64 acked_features;
u64 acked_backend_features;
@@ -164,8 +144,8 @@ struct vhost_dev {
struct eventfd_ctx *log_ctx;
struct llist_head work_list;
struct task_struct *worker;
- struct vhost_umem *umem;
- struct vhost_umem *iotlb;
+ struct vhost_iotlb *umem;
+ struct vhost_iotlb *iotlb;
spinlock_t iotlb_lock;
struct list_head read_list;
struct list_head pending_list;
@@ -174,16 +154,20 @@ struct vhost_dev {
int weight;
int byte_weight;
u64 kcov_handle;
+ int (*msg_handler)(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg);
};
bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
- int nvqs, int iov_limit, int weight, int byte_weight);
+ int nvqs, int iov_limit, int weight, int byte_weight,
+ int (*msg_handler)(struct vhost_dev *dev,
+ struct vhost_iotlb_msg *msg));
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
-struct vhost_umem *vhost_dev_reset_owner_prepare(void);
-void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *);
+struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
+void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
void vhost_dev_cleanup(struct vhost_dev *);
void vhost_dev_stop(struct vhost_dev *);
long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
@@ -229,6 +213,9 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
struct iov_iter *from);
int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
+void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
+ struct vhost_iotlb_map *map);
+
#define vq_err(vq, fmt, ...) do { \
pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
if ((vq)->error_ctx) \
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index a0a2d74967ef..ee0491f579ac 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -13,6 +13,9 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/bvec.h>
+#include <linux/highmem.h>
+#include <linux/vhost_iotlb.h>
#include <uapi/linux/virtio_config.h>
static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
@@ -71,9 +74,11 @@ static inline int __vringh_get_head(const struct vringh *vrh,
}
/* Copy some bytes to/from the iovec. Returns num copied. */
-static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov,
+static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
+ struct vringh_kiov *iov,
void *ptr, size_t len,
- int (*xfer)(void *addr, void *ptr,
+ int (*xfer)(const struct vringh *vrh,
+ void *addr, void *ptr,
size_t len))
{
int err, done = 0;
@@ -82,7 +87,7 @@ static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov,
size_t partlen;
partlen = min(iov->iov[iov->i].iov_len, len);
- err = xfer(iov->iov[iov->i].iov_base, ptr, partlen);
+ err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
if (err)
return err;
done += partlen;
@@ -96,6 +101,7 @@ static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov,
/* Fix up old iov element then increment. */
iov->iov[iov->i].iov_len = iov->consumed;
iov->iov[iov->i].iov_base -= iov->consumed;
+
iov->consumed = 0;
iov->i++;
@@ -227,7 +233,8 @@ static int slow_copy(struct vringh *vrh, void *dst, const void *src,
u64 addr,
struct vringh_range *r),
struct vringh_range *range,
- int (*copy)(void *dst, const void *src, size_t len))
+ int (*copy)(const struct vringh *vrh,
+ void *dst, const void *src, size_t len))
{
size_t part, len = sizeof(struct vring_desc);
@@ -241,7 +248,7 @@ static int slow_copy(struct vringh *vrh, void *dst, const void *src,
if (!rcheck(vrh, addr, &part, range, getrange))
return -EINVAL;
- err = copy(dst, src, part);
+ err = copy(vrh, dst, src, part);
if (err)
return err;
@@ -262,7 +269,8 @@ __vringh_iov(struct vringh *vrh, u16 i,
struct vringh_range *)),
bool (*getrange)(struct vringh *, u64, struct vringh_range *),
gfp_t gfp,
- int (*copy)(void *dst, const void *src, size_t len))
+ int (*copy)(const struct vringh *vrh,
+ void *dst, const void *src, size_t len))
{
int err, count = 0, up_next, desc_max;
struct vring_desc desc, *descs;
@@ -291,7 +299,7 @@ __vringh_iov(struct vringh *vrh, u16 i,
err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
&slowrange, copy);
else
- err = copy(&desc, &descs[i], sizeof(desc));
+ err = copy(vrh, &desc, &descs[i], sizeof(desc));
if (unlikely(err))
goto fail;
@@ -404,7 +412,8 @@ static inline int __vringh_complete(struct vringh *vrh,
unsigned int num_used,
int (*putu16)(const struct vringh *vrh,
__virtio16 *p, u16 val),
- int (*putused)(struct vring_used_elem *dst,
+ int (*putused)(const struct vringh *vrh,
+ struct vring_used_elem *dst,
const struct vring_used_elem
*src, unsigned num))
{
@@ -420,12 +429,12 @@ static inline int __vringh_complete(struct vringh *vrh,
/* Compiler knows num_used == 1 sometimes, hence extra check */
if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
u16 part = vrh->vring.num - off;
- err = putused(&used_ring->ring[off], used, part);
+ err = putused(vrh, &used_ring->ring[off], used, part);
if (!err)
- err = putused(&used_ring->ring[0], used + part,
+ err = putused(vrh, &used_ring->ring[0], used + part,
num_used - part);
} else
- err = putused(&used_ring->ring[off], used, num_used);
+ err = putused(vrh, &used_ring->ring[off], used, num_used);
if (err) {
vringh_bad("Failed to write %u used entries %u at %p",
@@ -564,13 +573,15 @@ static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
return put_user(v, (__force __virtio16 __user *)p);
}
-static inline int copydesc_user(void *dst, const void *src, size_t len)
+static inline int copydesc_user(const struct vringh *vrh,
+ void *dst, const void *src, size_t len)
{
return copy_from_user(dst, (__force void __user *)src, len) ?
-EFAULT : 0;
}
-static inline int putused_user(struct vring_used_elem *dst,
+static inline int putused_user(const struct vringh *vrh,
+ struct vring_used_elem *dst,
const struct vring_used_elem *src,
unsigned int num)
{
@@ -578,13 +589,15 @@ static inline int putused_user(struct vring_used_elem *dst,
sizeof(*dst) * num) ? -EFAULT : 0;
}
-static inline int xfer_from_user(void *src, void *dst, size_t len)
+static inline int xfer_from_user(const struct vringh *vrh, void *src,
+ void *dst, size_t len)
{
return copy_from_user(dst, (__force void __user *)src, len) ?
-EFAULT : 0;
}
-static inline int xfer_to_user(void *dst, void *src, size_t len)
+static inline int xfer_to_user(const struct vringh *vrh,
+ void *dst, void *src, size_t len)
{
return copy_to_user((__force void __user *)dst, src, len) ?
-EFAULT : 0;
@@ -706,7 +719,7 @@ EXPORT_SYMBOL(vringh_getdesc_user);
*/
ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
{
- return vringh_iov_xfer((struct vringh_kiov *)riov,
+ return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
dst, len, xfer_from_user);
}
EXPORT_SYMBOL(vringh_iov_pull_user);
@@ -722,7 +735,7 @@ EXPORT_SYMBOL(vringh_iov_pull_user);
ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
const void *src, size_t len)
{
- return vringh_iov_xfer((struct vringh_kiov *)wiov,
+ return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
(void *)src, len, xfer_to_user);
}
EXPORT_SYMBOL(vringh_iov_push_user);
@@ -832,13 +845,15 @@ static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
return 0;
}
-static inline int copydesc_kern(void *dst, const void *src, size_t len)
+static inline int copydesc_kern(const struct vringh *vrh,
+ void *dst, const void *src, size_t len)
{
memcpy(dst, src, len);
return 0;
}
-static inline int putused_kern(struct vring_used_elem *dst,
+static inline int putused_kern(const struct vringh *vrh,
+ struct vring_used_elem *dst,
const struct vring_used_elem *src,
unsigned int num)
{
@@ -846,13 +861,15 @@ static inline int putused_kern(struct vring_used_elem *dst,
return 0;
}
-static inline int xfer_kern(void *src, void *dst, size_t len)
+static inline int xfer_kern(const struct vringh *vrh, void *src,
+ void *dst, size_t len)
{
memcpy(dst, src, len);
return 0;
}
-static inline int kern_xfer(void *dst, void *src, size_t len)
+static inline int kern_xfer(const struct vringh *vrh, void *dst,
+ void *src, size_t len)
{
memcpy(dst, src, len);
return 0;
@@ -949,7 +966,7 @@ EXPORT_SYMBOL(vringh_getdesc_kern);
*/
ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
{
- return vringh_iov_xfer(riov, dst, len, xfer_kern);
+ return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
}
EXPORT_SYMBOL(vringh_iov_pull_kern);
@@ -964,7 +981,7 @@ EXPORT_SYMBOL(vringh_iov_pull_kern);
ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
const void *src, size_t len)
{
- return vringh_iov_xfer(wiov, (void *)src, len, kern_xfer);
+ return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
}
EXPORT_SYMBOL(vringh_iov_push_kern);
@@ -1042,4 +1059,362 @@ int vringh_need_notify_kern(struct vringh *vrh)
}
EXPORT_SYMBOL(vringh_need_notify_kern);
+static int iotlb_translate(const struct vringh *vrh,
+ u64 addr, u64 len, struct bio_vec iov[],
+ int iov_size, u32 perm)
+{
+ struct vhost_iotlb_map *map;
+ struct vhost_iotlb *iotlb = vrh->iotlb;
+ int ret = 0;
+ u64 s = 0;
+
+ while (len > s) {
+ u64 size, pa, pfn;
+
+ if (unlikely(ret >= iov_size)) {
+ ret = -ENOBUFS;
+ break;
+ }
+
+ map = vhost_iotlb_itree_first(iotlb, addr,
+ addr + len - 1);
+ if (!map || map->start > addr) {
+ ret = -EINVAL;
+ break;
+ } else if (!(map->perm & perm)) {
+ ret = -EPERM;
+ break;
+ }
+
+ size = map->size - addr + map->start;
+ pa = map->addr + addr - map->start;
+ pfn = pa >> PAGE_SHIFT;
+ iov[ret].bv_page = pfn_to_page(pfn);
+ iov[ret].bv_len = min(len - s, size);
+ iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
+ s += size;
+ addr += size;
+ ++ret;
+ }
+
+ return ret;
+}
+
+static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
+ void *src, size_t len)
+{
+ struct iov_iter iter;
+ struct bio_vec iov[16];
+ int ret;
+
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
+ len, iov, 16, VHOST_MAP_RO);
+ if (ret < 0)
+ return ret;
+
+ iov_iter_bvec(&iter, READ, iov, ret, len);
+
+ ret = copy_from_iter(dst, len, &iter);
+
+ return ret;
+}
+
+static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
+ void *src, size_t len)
+{
+ struct iov_iter iter;
+ struct bio_vec iov[16];
+ int ret;
+
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
+ len, iov, 16, VHOST_MAP_WO);
+ if (ret < 0)
+ return ret;
+
+ iov_iter_bvec(&iter, WRITE, iov, ret, len);
+
+ return copy_to_iter(src, len, &iter);
+}
+
+static inline int getu16_iotlb(const struct vringh *vrh,
+ u16 *val, const __virtio16 *p)
+{
+ struct bio_vec iov;
+ void *kaddr, *from;
+ int ret;
+
+ /* Atomic read is needed for getu16 */
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ &iov, 1, VHOST_MAP_RO);
+ if (ret < 0)
+ return ret;
+
+ kaddr = kmap_atomic(iov.bv_page);
+ from = kaddr + iov.bv_offset;
+ *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
+ kunmap_atomic(kaddr);
+
+ return 0;
+}
+
+static inline int putu16_iotlb(const struct vringh *vrh,
+ __virtio16 *p, u16 val)
+{
+ struct bio_vec iov;
+ void *kaddr, *to;
+ int ret;
+
+ /* Atomic write is needed for putu16 */
+ ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
+ &iov, 1, VHOST_MAP_WO);
+ if (ret < 0)
+ return ret;
+
+ kaddr = kmap_atomic(iov.bv_page);
+ to = kaddr + iov.bv_offset;
+ WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val));
+ kunmap_atomic(kaddr);
+
+ return 0;
+}
+
+static inline int copydesc_iotlb(const struct vringh *vrh,
+ void *dst, const void *src, size_t len)
+{
+ int ret;
+
+ ret = copy_from_iotlb(vrh, dst, (void *)src, len);
+ if (ret != len)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
+ void *dst, size_t len)
+{
+ int ret;
+
+ ret = copy_from_iotlb(vrh, dst, src, len);
+ if (ret != len)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int xfer_to_iotlb(const struct vringh *vrh,
+ void *dst, void *src, size_t len)
+{
+ int ret;
+
+ ret = copy_to_iotlb(vrh, dst, src, len);
+ if (ret != len)
+ return -EFAULT;
+
+ return 0;
+}
+
+static inline int putused_iotlb(const struct vringh *vrh,
+ struct vring_used_elem *dst,
+ const struct vring_used_elem *src,
+ unsigned int num)
+{
+ int size = num * sizeof(*dst);
+ int ret;
+
+ ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
+ if (ret != size)
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
+ * @vrh: the vringh to initialize.
+ * @features: the feature bits for this ring.
+ * @num: the number of elements.
+ * @weak_barriers: true if we only need memory barriers, not I/O.
+ * @desc: the userpace descriptor pointer.
+ * @avail: the userpace avail pointer.
+ * @used: the userpace used pointer.
+ *
+ * Returns an error if num is invalid.
+ */
+int vringh_init_iotlb(struct vringh *vrh, u64 features,
+ unsigned int num, bool weak_barriers,
+ struct vring_desc *desc,
+ struct vring_avail *avail,
+ struct vring_used *used)
+{
+ return vringh_init_kern(vrh, features, num, weak_barriers,
+ desc, avail, used);
+}
+EXPORT_SYMBOL(vringh_init_iotlb);
+
+/**
+ * vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
+ * @vrh: the vring
+ * @iotlb: iotlb associated with this vring
+ */
+void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb)
+{
+ vrh->iotlb = iotlb;
+}
+EXPORT_SYMBOL(vringh_set_iotlb);
+
+/**
+ * vringh_getdesc_iotlb - get next available descriptor from ring with
+ * IOTLB.
+ * @vrh: the kernelspace vring.
+ * @riov: where to put the readable descriptors (or NULL)
+ * @wiov: where to put the writable descriptors (or NULL)
+ * @head: head index we received, for passing to vringh_complete_iotlb().
+ * @gfp: flags for allocating larger riov/wiov.
+ *
+ * Returns 0 if there was no descriptor, 1 if there was, or -errno.
+ *
+ * Note that on error return, you can tell the difference between an
+ * invalid ring and a single invalid descriptor: in the former case,
+ * *head will be vrh->vring.num. You may be able to ignore an invalid
+ * descriptor, but there's not much you can do with an invalid ring.
+ *
+ * Note that you may need to clean up riov and wiov, even on error!
+ */
+int vringh_getdesc_iotlb(struct vringh *vrh,
+ struct vringh_kiov *riov,
+ struct vringh_kiov *wiov,
+ u16 *head,
+ gfp_t gfp)
+{
+ int err;
+
+ err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
+ if (err < 0)
+ return err;
+
+ /* Empty... */
+ if (err == vrh->vring.num)
+ return 0;
+
+ *head = err;
+ err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
+ gfp, copydesc_iotlb);
+ if (err)
+ return err;
+
+ return 1;
+}
+EXPORT_SYMBOL(vringh_getdesc_iotlb);
+
+/**
+ * vringh_iov_pull_iotlb - copy bytes from vring_iov.
+ * @vrh: the vring.
+ * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
+ * @dst: the place to copy.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
+ struct vringh_kiov *riov,
+ void *dst, size_t len)
+{
+ return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
+}
+EXPORT_SYMBOL(vringh_iov_pull_iotlb);
+
+/**
+ * vringh_iov_push_iotlb - copy bytes into vring_iov.
+ * @vrh: the vring.
+ * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
+ * @dst: the place to copy.
+ * @len: the maximum length to copy.
+ *
+ * Returns the bytes copied <= len or a negative errno.
+ */
+ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
+ struct vringh_kiov *wiov,
+ const void *src, size_t len)
+{
+ return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
+}
+EXPORT_SYMBOL(vringh_iov_push_iotlb);
+
+/**
+ * vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
+ * @vrh: the vring.
+ * @num: the number of descriptors to put back (ie. num
+ * vringh_get_iotlb() to undo).
+ *
+ * The next vringh_get_iotlb() will return the old descriptor(s) again.
+ */
+void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
+{
+ /* We only update vring_avail_event(vr) when we want to be notified,
+ * so we haven't changed that yet.
+ */
+ vrh->last_avail_idx -= num;
+}
+EXPORT_SYMBOL(vringh_abandon_iotlb);
+
+/**
+ * vringh_complete_iotlb - we've finished with descriptor, publish it.
+ * @vrh: the vring.
+ * @head: the head as filled in by vringh_getdesc_iotlb.
+ * @len: the length of data we have written.
+ *
+ * You should check vringh_need_notify_iotlb() after one or more calls
+ * to this function.
+ */
+int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
+{
+ struct vring_used_elem used;
+
+ used.id = cpu_to_vringh32(vrh, head);
+ used.len = cpu_to_vringh32(vrh, len);
+
+ return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
+}
+EXPORT_SYMBOL(vringh_complete_iotlb);
+
+/**
+ * vringh_notify_enable_iotlb - we want to know if something changes.
+ * @vrh: the vring.
+ *
+ * This always enables notifications, but returns false if there are
+ * now more buffers available in the vring.
+ */
+bool vringh_notify_enable_iotlb(struct vringh *vrh)
+{
+ return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
+}
+EXPORT_SYMBOL(vringh_notify_enable_iotlb);
+
+/**
+ * vringh_notify_disable_iotlb - don't tell us if something changes.
+ * @vrh: the vring.
+ *
+ * This is our normal running state: we disable and then only enable when
+ * we're going to sleep.
+ */
+void vringh_notify_disable_iotlb(struct vringh *vrh)
+{
+ __vringh_notify_disable(vrh, putu16_iotlb);
+}
+EXPORT_SYMBOL(vringh_notify_disable_iotlb);
+
+/**
+ * vringh_need_notify_iotlb - must we tell the other side about used buffers?
+ * @vrh: the vring we've called vringh_complete_iotlb() on.
+ *
+ * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
+ */
+int vringh_need_notify_iotlb(struct vringh *vrh)
+{
+ return __vringh_need_notify(vrh, getu16_iotlb);
+}
+EXPORT_SYMBOL(vringh_need_notify_iotlb);
+
+
MODULE_LICENSE("GPL");
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index c2d7d57e98cf..97669484a3f6 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -621,7 +621,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
- VHOST_VSOCK_WEIGHT);
+ VHOST_VSOCK_WEIGHT, NULL);
file->private_data = vsock;
spin_lock_init(&vsock->send_pkt_list_lock);
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 68f7592c5060..25ef0cbd7583 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/fb.h>
#include <linux/lcd.h>
#include <linux/spi/spi.h>
@@ -90,9 +90,8 @@ struct corgi_lcd {
int mode;
char buf[2];
- int gpio_backlight_on;
- int gpio_backlight_cont;
- int gpio_backlight_cont_inverted;
+ struct gpio_desc *backlight_on;
+ struct gpio_desc *backlight_cont;
void (*kick_battery)(void);
};
@@ -403,13 +402,13 @@ static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity)
corgi_ssp_lcdtg_send(lcd, DUTYCTRL_ADRS, intensity);
/* Bit 5 via GPIO_BACKLIGHT_CONT */
- cont = !!(intensity & 0x20) ^ lcd->gpio_backlight_cont_inverted;
+ cont = !!(intensity & 0x20);
- if (gpio_is_valid(lcd->gpio_backlight_cont))
- gpio_set_value_cansleep(lcd->gpio_backlight_cont, cont);
+ if (lcd->backlight_cont)
+ gpiod_set_value_cansleep(lcd->backlight_cont, cont);
- if (gpio_is_valid(lcd->gpio_backlight_on))
- gpio_set_value_cansleep(lcd->gpio_backlight_on, intensity);
+ if (lcd->backlight_on)
+ gpiod_set_value_cansleep(lcd->backlight_on, intensity);
if (lcd->kick_battery)
lcd->kick_battery();
@@ -482,48 +481,17 @@ static int setup_gpio_backlight(struct corgi_lcd *lcd,
struct corgi_lcd_platform_data *pdata)
{
struct spi_device *spi = lcd->spi_dev;
- int err;
-
- lcd->gpio_backlight_on = -1;
- lcd->gpio_backlight_cont = -1;
-
- if (gpio_is_valid(pdata->gpio_backlight_on)) {
- err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_on,
- "BL_ON");
- if (err) {
- dev_err(&spi->dev,
- "failed to request GPIO%d for backlight_on\n",
- pdata->gpio_backlight_on);
- return err;
- }
-
- lcd->gpio_backlight_on = pdata->gpio_backlight_on;
- gpio_direction_output(lcd->gpio_backlight_on, 0);
- }
- if (gpio_is_valid(pdata->gpio_backlight_cont)) {
- err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_cont,
- "BL_CONT");
- if (err) {
- dev_err(&spi->dev,
- "failed to request GPIO%d for backlight_cont\n",
- pdata->gpio_backlight_cont);
- return err;
- }
-
- lcd->gpio_backlight_cont = pdata->gpio_backlight_cont;
-
- /* spitz and akita use both GPIOs for backlight, and
- * have inverted polarity of GPIO_BACKLIGHT_CONT
- */
- if (gpio_is_valid(lcd->gpio_backlight_on)) {
- lcd->gpio_backlight_cont_inverted = 1;
- gpio_direction_output(lcd->gpio_backlight_cont, 1);
- } else {
- lcd->gpio_backlight_cont_inverted = 0;
- gpio_direction_output(lcd->gpio_backlight_cont, 0);
- }
- }
+ lcd->backlight_on = devm_gpiod_get_optional(&spi->dev,
+ "BL_ON", GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->backlight_on))
+ return PTR_ERR(lcd->backlight_on);
+
+ lcd->backlight_cont = devm_gpiod_get_optional(&spi->dev, "BL_CONT",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(lcd->backlight_cont))
+ return PTR_ERR(lcd->backlight_cont);
+
return 0;
}
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index efb4efc2a13d..82b8d7594701 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -7,7 +7,6 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -258,8 +257,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
&data->post_pwm_on_delay);
of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
- data->enable_gpio = -EINVAL;
-
/*
* Determine the number of brightness levels, if this property is not
* set a default table of brightness levels will be used.
@@ -503,22 +500,6 @@ static int pwm_backlight_probe(struct platform_device *pdev)
}
/*
- * Compatibility fallback for drivers still using the integer GPIO
- * platform data. Must go away soon.
- */
- if (!pb->enable_gpio && gpio_is_valid(data->enable_gpio)) {
- ret = devm_gpio_request_one(&pdev->dev, data->enable_gpio,
- GPIOF_OUT_INIT_HIGH, "enable");
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to request GPIO#%d: %d\n",
- data->enable_gpio, ret);
- goto err_alloc;
- }
-
- pb->enable_gpio = gpio_to_desc(data->enable_gpio);
- }
-
- /*
* If the GPIO is not known to be already configured as output, that
* is, if gpiod_get_direction returns either 1 or -EINVAL, change the
* direction to output and set the GPIO as active.
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 28335788e76e..9d28a8e3328f 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -1282,6 +1282,9 @@ finished:
if (!con_is_bound(&fb_con))
fbcon_exit();
+ if (vc->vc_num == logo_shown)
+ logo_shown = FBCON_LOGO_CANSHOW;
+
return;
}
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index 078615cf2afc..2bbf94b15bba 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -43,6 +43,19 @@ config VIRTIO_PCI_LEGACY
If unsure, say Y.
+config VIRTIO_VDPA
+ tristate "vDPA driver for virtio devices"
+ select VDPA
+ select VIRTIO
+ help
+ This driver provides support for virtio based paravirtual
+ device driver over vDPA bus. For this to be useful, you need
+ an appropriate vDPA device implementation that operates on a
+ physical device to allow the datapath of virtio to be
+ offloaded to hardware.
+
+ If unsure, say M.
+
config VIRTIO_PMEM
tristate "Support for virtio pmem driver"
depends on VIRTIO
@@ -58,6 +71,7 @@ config VIRTIO_BALLOON
tristate "Virtio balloon driver"
depends on VIRTIO
select MEMORY_BALLOON
+ select PAGE_REPORTING
---help---
This driver supports increasing and decreasing the amount
of memory within a KVM guest.
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 3a2b5c5dcf46..29a1386ecc03 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -6,3 +6,4 @@ virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o
obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o
obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o
+obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 341458fd95ca..0ef16566c3f3 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -14,11 +14,13 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/balloon_compaction.h>
+#include <linux/oom.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/mount.h>
#include <linux/magic.h>
#include <linux/pseudo_fs.h>
+#include <linux/page_reporting.h>
/*
* Balloon device works in 4K page units. So each page is pointed to by
@@ -27,7 +29,9 @@
*/
#define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT)
#define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256
-#define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80
+/* Maximum number of (4k) pages to deflate on OOM notifications. */
+#define VIRTIO_BALLOON_OOM_NR_PAGES 256
+#define VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY 80
#define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \
__GFP_NOMEMALLOC)
@@ -47,6 +51,7 @@ enum virtio_balloon_vq {
VIRTIO_BALLOON_VQ_DEFLATE,
VIRTIO_BALLOON_VQ_STATS,
VIRTIO_BALLOON_VQ_FREE_PAGE,
+ VIRTIO_BALLOON_VQ_REPORTING,
VIRTIO_BALLOON_VQ_MAX
};
@@ -112,8 +117,15 @@ struct virtio_balloon {
/* Memory statistics */
struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
- /* To register a shrinker to shrink memory upon memory pressure */
+ /* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */
struct shrinker shrinker;
+
+ /* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */
+ struct notifier_block oom_nb;
+
+ /* Free page reporting device */
+ struct virtqueue *reporting_vq;
+ struct page_reporting_dev_info pr_dev_info;
};
static struct virtio_device_id id_table[] = {
@@ -153,6 +165,33 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
}
+int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_info,
+ struct scatterlist *sg, unsigned int nents)
+{
+ struct virtio_balloon *vb =
+ container_of(pr_dev_info, struct virtio_balloon, pr_dev_info);
+ struct virtqueue *vq = vb->reporting_vq;
+ unsigned int unused, err;
+
+ /* We should always be able to add these buffers to an empty queue. */
+ err = virtqueue_add_inbuf(vq, sg, nents, vb, GFP_NOWAIT | __GFP_NOWARN);
+
+ /*
+ * In the extremely unlikely case that something has occurred and we
+ * are able to trigger an error we will simply display a warning
+ * and exit without actually processing the pages.
+ */
+ if (WARN_ON_ONCE(err))
+ return err;
+
+ virtqueue_kick(vq);
+
+ /* When host has read buffer, this completes via balloon_ack */
+ wait_event(vb->acked, virtqueue_get_buf(vq, &unused));
+
+ return 0;
+}
+
static void set_page_pfns(struct virtio_balloon *vb,
__virtio32 pfns[], struct page *page)
{
@@ -481,6 +520,7 @@ static int init_vqs(struct virtio_balloon *vb)
names[VIRTIO_BALLOON_VQ_STATS] = NULL;
callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
+ names[VIRTIO_BALLOON_VQ_REPORTING] = NULL;
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
names[VIRTIO_BALLOON_VQ_STATS] = "stats";
@@ -492,6 +532,11 @@ static int init_vqs(struct virtio_balloon *vb)
callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL;
}
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
+ names[VIRTIO_BALLOON_VQ_REPORTING] = "reporting_vq";
+ callbacks[VIRTIO_BALLOON_VQ_REPORTING] = balloon_ack;
+ }
+
err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX,
vqs, callbacks, names, NULL, NULL);
if (err)
@@ -524,6 +569,9 @@ static int init_vqs(struct virtio_balloon *vb)
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE];
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING))
+ vb->reporting_vq = vqs[VIRTIO_BALLOON_VQ_REPORTING];
+
return 0;
}
@@ -788,50 +836,13 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb,
return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
}
-static unsigned long leak_balloon_pages(struct virtio_balloon *vb,
- unsigned long pages_to_free)
-{
- return leak_balloon(vb, pages_to_free * VIRTIO_BALLOON_PAGES_PER_PAGE) /
- VIRTIO_BALLOON_PAGES_PER_PAGE;
-}
-
-static unsigned long shrink_balloon_pages(struct virtio_balloon *vb,
- unsigned long pages_to_free)
-{
- unsigned long pages_freed = 0;
-
- /*
- * One invocation of leak_balloon can deflate at most
- * VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it
- * multiple times to deflate pages till reaching pages_to_free.
- */
- while (vb->num_pages && pages_freed < pages_to_free)
- pages_freed += leak_balloon_pages(vb,
- pages_to_free - pages_freed);
-
- update_balloon_size(vb);
-
- return pages_freed;
-}
-
static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker,
struct shrink_control *sc)
{
- unsigned long pages_to_free, pages_freed = 0;
struct virtio_balloon *vb = container_of(shrinker,
struct virtio_balloon, shrinker);
- pages_to_free = sc->nr_to_scan;
-
- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
- pages_freed = shrink_free_pages(vb, pages_to_free);
-
- if (pages_freed >= pages_to_free)
- return pages_freed;
-
- pages_freed += shrink_balloon_pages(vb, pages_to_free - pages_freed);
-
- return pages_freed;
+ return shrink_free_pages(vb, sc->nr_to_scan);
}
static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
@@ -839,12 +850,22 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker,
{
struct virtio_balloon *vb = container_of(shrinker,
struct virtio_balloon, shrinker);
- unsigned long count;
- count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE;
- count += vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
+ return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES;
+}
+
+static int virtio_balloon_oom_notify(struct notifier_block *nb,
+ unsigned long dummy, void *parm)
+{
+ struct virtio_balloon *vb = container_of(nb,
+ struct virtio_balloon, oom_nb);
+ unsigned long *freed = parm;
+
+ *freed += leak_balloon(vb, VIRTIO_BALLOON_OOM_NR_PAGES) /
+ VIRTIO_BALLOON_PAGES_PER_PAGE;
+ update_balloon_size(vb);
- return count;
+ return NOTIFY_OK;
}
static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb)
@@ -864,7 +885,6 @@ static int virtio_balloon_register_shrinker(struct virtio_balloon *vb)
static int virtballoon_probe(struct virtio_device *vdev)
{
struct virtio_balloon *vb;
- __u32 poison_val;
int err;
if (!vdev->config->get) {
@@ -930,27 +950,65 @@ static int virtballoon_probe(struct virtio_device *vdev)
VIRTIO_BALLOON_CMD_ID_STOP);
spin_lock_init(&vb->free_page_list_lock);
INIT_LIST_HEAD(&vb->free_page_list);
- if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
- memset(&poison_val, PAGE_POISON, sizeof(poison_val));
- virtio_cwrite(vb->vdev, struct virtio_balloon_config,
- poison_val, &poison_val);
- }
- }
- /*
- * We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a
- * shrinker needs to be registered to relieve memory pressure.
- */
- if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
+ /*
+ * We're allowed to reuse any free pages, even if they are
+ * still to be processed by the host.
+ */
err = virtio_balloon_register_shrinker(vb);
if (err)
goto out_del_balloon_wq;
}
+
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) {
+ vb->oom_nb.notifier_call = virtio_balloon_oom_notify;
+ vb->oom_nb.priority = VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY;
+ err = register_oom_notifier(&vb->oom_nb);
+ if (err < 0)
+ goto out_unregister_shrinker;
+ }
+
+ if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) {
+ /* Start with poison val of 0 representing general init */
+ __u32 poison_val = 0;
+
+ /*
+ * Let the hypervisor know that we are expecting a
+ * specific value to be written back in balloon pages.
+ */
+ if (!want_init_on_free())
+ memset(&poison_val, PAGE_POISON, sizeof(poison_val));
+
+ virtio_cwrite(vb->vdev, struct virtio_balloon_config,
+ poison_val, &poison_val);
+ }
+
+ vb->pr_dev_info.report = virtballoon_free_page_report;
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) {
+ unsigned int capacity;
+
+ capacity = virtqueue_get_vring_size(vb->reporting_vq);
+ if (capacity < PAGE_REPORTING_CAPACITY) {
+ err = -ENOSPC;
+ goto out_unregister_oom;
+ }
+
+ err = page_reporting_register(&vb->pr_dev_info);
+ if (err)
+ goto out_unregister_oom;
+ }
+
virtio_device_ready(vdev);
if (towards_target(vb))
virtballoon_changed(vdev);
return 0;
+out_unregister_oom:
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ unregister_oom_notifier(&vb->oom_nb);
+out_unregister_shrinker:
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
+ virtio_balloon_unregister_shrinker(vb);
out_del_balloon_wq:
if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
destroy_workqueue(vb->balloon_wq);
@@ -989,7 +1047,11 @@ static void virtballoon_remove(struct virtio_device *vdev)
{
struct virtio_balloon *vb = vdev->priv;
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING))
+ page_reporting_unregister(&vb->pr_dev_info);
if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+ unregister_oom_notifier(&vb->oom_nb);
+ if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT))
virtio_balloon_unregister_shrinker(vb);
spin_lock_irq(&vb->stop_update_lock);
vb->stop_update = true;
@@ -1045,7 +1107,10 @@ static int virtballoon_restore(struct virtio_device *vdev)
static int virtballoon_validate(struct virtio_device *vdev)
{
- if (!page_poisoning_enabled())
+ /* Tell the host whether we care about poisoned pages. */
+ if (!want_init_on_free() &&
+ (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY) ||
+ !page_poisoning_enabled()))
__virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON);
__virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM);
@@ -1058,6 +1123,7 @@ static unsigned int features[] = {
VIRTIO_BALLOON_F_DEFLATE_ON_OOM,
VIRTIO_BALLOON_F_FREE_PAGE_HINT,
VIRTIO_BALLOON_F_PAGE_POISON,
+ VIRTIO_BALLOON_F_REPORTING,
};
static struct virtio_driver virtio_balloon_driver = {
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
new file mode 100644
index 000000000000..c30eb55030be
--- /dev/null
+++ b/drivers/virtio/virtio_vdpa.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VIRTIO based driver for vDPA device
+ *
+ * Copyright (c) 2020, Red Hat. All rights reserved.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/virtio.h>
+#include <linux/vdpa.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+
+#define MOD_VERSION "0.1"
+#define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>"
+#define MOD_DESC "vDPA bus driver for virtio devices"
+#define MOD_LICENSE "GPL v2"
+
+struct virtio_vdpa_device {
+ struct virtio_device vdev;
+ struct vdpa_device *vdpa;
+ u64 features;
+
+ /* The lock to protect virtqueue list */
+ spinlock_t lock;
+ /* List of virtio_vdpa_vq_info */
+ struct list_head virtqueues;
+};
+
+struct virtio_vdpa_vq_info {
+ /* the actual virtqueue */
+ struct virtqueue *vq;
+
+ /* the list node for the virtqueues list */
+ struct list_head node;
+};
+
+static inline struct virtio_vdpa_device *
+to_virtio_vdpa_device(struct virtio_device *dev)
+{
+ return container_of(dev, struct virtio_vdpa_device, vdev);
+}
+
+static struct vdpa_device *vd_get_vdpa(struct virtio_device *vdev)
+{
+ return to_virtio_vdpa_device(vdev)->vdpa;
+}
+
+static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset,
+ void *buf, unsigned len)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ ops->get_config(vdpa, offset, buf, len);
+}
+
+static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
+ const void *buf, unsigned len)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ ops->set_config(vdpa, offset, buf, len);
+}
+
+static u32 virtio_vdpa_generation(struct virtio_device *vdev)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ if (ops->get_generation)
+ return ops->get_generation(vdpa);
+
+ return 0;
+}
+
+static u8 virtio_vdpa_get_status(struct virtio_device *vdev)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->get_status(vdpa);
+}
+
+static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->set_status(vdpa, status);
+}
+
+static void virtio_vdpa_reset(struct virtio_device *vdev)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->set_status(vdpa, 0);
+}
+
+static bool virtio_vdpa_notify(struct virtqueue *vq)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vq->vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ ops->kick_vq(vdpa, vq->index);
+
+ return true;
+}
+
+static irqreturn_t virtio_vdpa_config_cb(void *private)
+{
+ struct virtio_vdpa_device *vd_dev = private;
+
+ virtio_config_changed(&vd_dev->vdev);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t virtio_vdpa_virtqueue_cb(void *private)
+{
+ struct virtio_vdpa_vq_info *info = private;
+
+ return vring_interrupt(0, info->vq);
+}
+
+static struct virtqueue *
+virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
+ void (*callback)(struct virtqueue *vq),
+ const char *name, bool ctx)
+{
+ struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct virtio_vdpa_vq_info *info;
+ struct vdpa_callback cb;
+ struct virtqueue *vq;
+ u64 desc_addr, driver_addr, device_addr;
+ unsigned long flags;
+ u32 align, num;
+ int err;
+
+ if (!name)
+ return NULL;
+
+ /* Queue shouldn't already be set up. */
+ if (ops->get_vq_ready(vdpa, index))
+ return ERR_PTR(-ENOENT);
+
+ /* Allocate and fill out our active queue description */
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ num = ops->get_vq_num_max(vdpa);
+ if (num == 0) {
+ err = -ENOENT;
+ goto error_new_virtqueue;
+ }
+
+ /* Create the vring */
+ align = ops->get_vq_align(vdpa);
+ vq = vring_create_virtqueue(index, num, align, vdev,
+ true, true, ctx,
+ virtio_vdpa_notify, callback, name);
+ if (!vq) {
+ err = -ENOMEM;
+ goto error_new_virtqueue;
+ }
+
+ /* Setup virtqueue callback */
+ cb.callback = virtio_vdpa_virtqueue_cb;
+ cb.private = info;
+ ops->set_vq_cb(vdpa, index, &cb);
+ ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq));
+
+ desc_addr = virtqueue_get_desc_addr(vq);
+ driver_addr = virtqueue_get_avail_addr(vq);
+ device_addr = virtqueue_get_used_addr(vq);
+
+ if (ops->set_vq_address(vdpa, index,
+ desc_addr, driver_addr,
+ device_addr)) {
+ err = -EINVAL;
+ goto err_vq;
+ }
+
+ ops->set_vq_ready(vdpa, index, 1);
+
+ vq->priv = info;
+ info->vq = vq;
+
+ spin_lock_irqsave(&vd_dev->lock, flags);
+ list_add(&info->node, &vd_dev->virtqueues);
+ spin_unlock_irqrestore(&vd_dev->lock, flags);
+
+ return vq;
+
+err_vq:
+ vring_del_virtqueue(vq);
+error_new_virtqueue:
+ ops->set_vq_ready(vdpa, index, 0);
+ /* VDPA driver should make sure vq is stopeed here */
+ WARN_ON(ops->get_vq_ready(vdpa, index));
+ kfree(info);
+ return ERR_PTR(err);
+}
+
+static void virtio_vdpa_del_vq(struct virtqueue *vq)
+{
+ struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev);
+ struct vdpa_device *vdpa = vd_dev->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct virtio_vdpa_vq_info *info = vq->priv;
+ unsigned int index = vq->index;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vd_dev->lock, flags);
+ list_del(&info->node);
+ spin_unlock_irqrestore(&vd_dev->lock, flags);
+
+ /* Select and deactivate the queue */
+ ops->set_vq_ready(vdpa, index, 0);
+ WARN_ON(ops->get_vq_ready(vdpa, index));
+
+ vring_del_virtqueue(vq);
+
+ kfree(info);
+}
+
+static void virtio_vdpa_del_vqs(struct virtio_device *vdev)
+{
+ struct virtqueue *vq, *n;
+
+ list_for_each_entry_safe(vq, n, &vdev->vqs, list)
+ virtio_vdpa_del_vq(vq);
+}
+
+static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char * const names[],
+ const bool *ctx,
+ struct irq_affinity *desc)
+{
+ struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct vdpa_callback cb;
+ int i, err, queue_idx = 0;
+
+ for (i = 0; i < nvqs; ++i) {
+ if (!names[i]) {
+ vqs[i] = NULL;
+ continue;
+ }
+
+ vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++,
+ callbacks[i], names[i], ctx ?
+ ctx[i] : false);
+ if (IS_ERR(vqs[i])) {
+ err = PTR_ERR(vqs[i]);
+ goto err_setup_vq;
+ }
+ }
+
+ cb.callback = virtio_vdpa_config_cb;
+ cb.private = vd_dev;
+ ops->set_config_cb(vdpa, &cb);
+
+ return 0;
+
+err_setup_vq:
+ virtio_vdpa_del_vqs(vdev);
+ return err;
+}
+
+static u64 virtio_vdpa_get_features(struct virtio_device *vdev)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ return ops->get_features(vdpa);
+}
+
+static int virtio_vdpa_finalize_features(struct virtio_device *vdev)
+{
+ struct vdpa_device *vdpa = vd_get_vdpa(vdev);
+ const struct vdpa_config_ops *ops = vdpa->config;
+
+ /* Give virtio_ring a chance to accept features. */
+ vring_transport_features(vdev);
+
+ return ops->set_features(vdpa, vdev->features);
+}
+
+static const char *virtio_vdpa_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev);
+ struct vdpa_device *vdpa = vd_dev->vdpa;
+
+ return dev_name(&vdpa->dev);
+}
+
+static const struct virtio_config_ops virtio_vdpa_config_ops = {
+ .get = virtio_vdpa_get,
+ .set = virtio_vdpa_set,
+ .generation = virtio_vdpa_generation,
+ .get_status = virtio_vdpa_get_status,
+ .set_status = virtio_vdpa_set_status,
+ .reset = virtio_vdpa_reset,
+ .find_vqs = virtio_vdpa_find_vqs,
+ .del_vqs = virtio_vdpa_del_vqs,
+ .get_features = virtio_vdpa_get_features,
+ .finalize_features = virtio_vdpa_finalize_features,
+ .bus_name = virtio_vdpa_bus_name,
+};
+
+static void virtio_vdpa_release_dev(struct device *_d)
+{
+ struct virtio_device *vdev =
+ container_of(_d, struct virtio_device, dev);
+ struct virtio_vdpa_device *vd_dev =
+ container_of(vdev, struct virtio_vdpa_device, vdev);
+
+ kfree(vd_dev);
+}
+
+static int virtio_vdpa_probe(struct vdpa_device *vdpa)
+{
+ const struct vdpa_config_ops *ops = vdpa->config;
+ struct virtio_vdpa_device *vd_dev, *reg_dev = NULL;
+ int ret = -EINVAL;
+
+ vd_dev = kzalloc(sizeof(*vd_dev), GFP_KERNEL);
+ if (!vd_dev)
+ return -ENOMEM;
+
+ vd_dev->vdev.dev.parent = vdpa_get_dma_dev(vdpa);
+ vd_dev->vdev.dev.release = virtio_vdpa_release_dev;
+ vd_dev->vdev.config = &virtio_vdpa_config_ops;
+ vd_dev->vdpa = vdpa;
+ INIT_LIST_HEAD(&vd_dev->virtqueues);
+ spin_lock_init(&vd_dev->lock);
+
+ vd_dev->vdev.id.device = ops->get_device_id(vdpa);
+ if (vd_dev->vdev.id.device == 0)
+ goto err;
+
+ vd_dev->vdev.id.vendor = ops->get_vendor_id(vdpa);
+ ret = register_virtio_device(&vd_dev->vdev);
+ reg_dev = vd_dev;
+ if (ret)
+ goto err;
+
+ vdpa_set_drvdata(vdpa, vd_dev);
+
+ return 0;
+
+err:
+ if (reg_dev)
+ put_device(&vd_dev->vdev.dev);
+ else
+ kfree(vd_dev);
+ return ret;
+}
+
+static void virtio_vdpa_remove(struct vdpa_device *vdpa)
+{
+ struct virtio_vdpa_device *vd_dev = vdpa_get_drvdata(vdpa);
+
+ unregister_virtio_device(&vd_dev->vdev);
+}
+
+static struct vdpa_driver virtio_vdpa_driver = {
+ .driver = {
+ .name = "virtio_vdpa",
+ },
+ .probe = virtio_vdpa_probe,
+ .remove = virtio_vdpa_remove,
+};
+
+module_vdpa_driver(virtio_vdpa_driver);
+
+MODULE_VERSION(MOD_VERSION);
+MODULE_LICENSE(MOD_LICENSE);
+MODULE_AUTHOR(MOD_AUTHOR);
+MODULE_DESCRIPTION(MOD_DESC);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 9ea2b43d4b01..0663c604bd64 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -584,6 +584,14 @@ config DAVINCI_WATCHDOG
NOTE: once enabled, this timer cannot be disabled.
Say N if you are unsure.
+config K3_RTI_WATCHDOG
+ tristate "Texas Instruments K3 RTI watchdog"
+ depends on ARCH_K3 || COMPILE_TEST
+ select WATCHDOG_CORE
+ help
+ Say Y here if you want to include support for the K3 watchdog
+ timer (RTI module) available in the K3 generation of processors.
+
config ORION_WATCHDOG
tristate "Orion watchdog"
depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || (COMPILE_TEST && !ARCH_EBSA110)
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 2ee352bf3372..6de2e4ceef19 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -57,6 +57,7 @@ obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o
obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o
obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o
+obj-$(CONFIG_K3_RTI_WATCHDOG) += rti_wdt.o
obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
obj-$(CONFIG_SUNXI_WATCHDOG) += sunxi_wdt.o
obj-$(CONFIG_RN5T618_WATCHDOG) += rn5t618_wdt.o
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index f8d58bf0bf66..1fe472f56cb3 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -244,6 +244,11 @@ static const struct regmap_config imx2_wdt_regmap_config = {
.max_register = 0x8,
};
+static void imx2_wdt_action(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
static int __init imx2_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -292,6 +297,10 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = devm_add_action_or_reset(dev, imx2_wdt_action, wdev->clk);
+ if (ret)
+ return ret;
+
regmap_read(wdev->regmap, IMX2_WDT_WRSR, &val);
wdog->bootstatus = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0;
@@ -315,32 +324,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
*/
regmap_write(wdev->regmap, IMX2_WDT_WMCR, 0);
- ret = watchdog_register_device(wdog);
- if (ret)
- goto disable_clk;
-
- dev_info(dev, "timeout %d sec (nowayout=%d)\n",
- wdog->timeout, nowayout);
-
- return 0;
-
-disable_clk:
- clk_disable_unprepare(wdev->clk);
- return ret;
-}
-
-static int __exit imx2_wdt_remove(struct platform_device *pdev)
-{
- struct watchdog_device *wdog = platform_get_drvdata(pdev);
- struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
-
- watchdog_unregister_device(wdog);
-
- if (imx2_wdt_is_running(wdev)) {
- imx2_wdt_ping(wdog);
- dev_crit(&pdev->dev, "Device removed: Expect reboot!\n");
- }
- return 0;
+ return devm_watchdog_register_device(dev, wdog);
}
static void imx2_wdt_shutdown(struct platform_device *pdev)
@@ -417,7 +401,6 @@ static const struct of_device_id imx2_wdt_dt_ids[] = {
MODULE_DEVICE_TABLE(of, imx2_wdt_dt_ids);
static struct platform_driver imx2_wdt_driver = {
- .remove = __exit_p(imx2_wdt_remove),
.shutdown = imx2_wdt_shutdown,
.driver = {
.name = DRIVER_NAME,
diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c
index 11b9e7c6b7f5..7993c8c41b3a 100644
--- a/drivers/watchdog/imx7ulp_wdt.c
+++ b/drivers/watchdog/imx7ulp_wdt.c
@@ -4,7 +4,6 @@
*/
#include <linux/clk.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c
index 8ed89f032ebf..60a32469f7de 100644
--- a/drivers/watchdog/imx_sc_wdt.c
+++ b/drivers/watchdog/imx_sc_wdt.c
@@ -6,13 +6,11 @@
#include <linux/arm-smccc.h>
#include <linux/firmware/imx/sci.h>
#include <linux/io.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/of.h>
#include <linux/platform_device.h>
-#include <linux/reboot.h>
#include <linux/watchdog.h>
#define DEFAULT_TIMEOUT 60
diff --git a/drivers/watchdog/npcm_wdt.c b/drivers/watchdog/npcm_wdt.c
index 9c773c3d6d5d..765577f11c8d 100644
--- a/drivers/watchdog/npcm_wdt.c
+++ b/drivers/watchdog/npcm_wdt.c
@@ -103,30 +103,29 @@ static int npcm_wdt_stop(struct watchdog_device *wdd)
return 0;
}
-
static int npcm_wdt_set_timeout(struct watchdog_device *wdd,
unsigned int timeout)
{
if (timeout < 2)
wdd->timeout = 1;
else if (timeout < 3)
- wdd->timeout = 2;
+ wdd->timeout = 2;
else if (timeout < 6)
- wdd->timeout = 5;
+ wdd->timeout = 5;
else if (timeout < 11)
- wdd->timeout = 10;
+ wdd->timeout = 10;
else if (timeout < 22)
- wdd->timeout = 21;
+ wdd->timeout = 21;
else if (timeout < 44)
- wdd->timeout = 43;
+ wdd->timeout = 43;
else if (timeout < 87)
- wdd->timeout = 86;
+ wdd->timeout = 86;
else if (timeout < 173)
- wdd->timeout = 172;
+ wdd->timeout = 172;
else if (timeout < 688)
- wdd->timeout = 687;
+ wdd->timeout = 687;
else
- wdd->timeout = 2750;
+ wdd->timeout = 2750;
if (watchdog_active(wdd))
npcm_wdt_start(wdd);
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 8e6dfe76f9c9..4ddb4ea2e4a3 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -52,7 +52,7 @@
#define WDT_A370_RATIO (1 << WDT_A370_RATIO_SHIFT)
static bool nowayout = WATCHDOG_NOWAYOUT;
-static int heartbeat = -1; /* module parameter (seconds) */
+static int heartbeat; /* module parameter (seconds) */
struct orion_watchdog;
diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c
index 1213179f863c..0937b8d33104 100644
--- a/drivers/watchdog/pm8916_wdt.c
+++ b/drivers/watchdog/pm8916_wdt.c
@@ -192,6 +192,7 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
wdt->wdev.timeout = PM8916_WDT_DEFAULT_TIMEOUT;
wdt->wdev.pretimeout = 0;
watchdog_set_drvdata(&wdt->wdev, wdt);
+ platform_set_drvdata(pdev, wdt);
watchdog_init_timeout(&wdt->wdev, 0, dev);
pm8916_wdt_configure_timers(&wdt->wdev);
@@ -199,6 +200,29 @@ static int pm8916_wdt_probe(struct platform_device *pdev)
return devm_watchdog_register_device(dev, &wdt->wdev);
}
+static int __maybe_unused pm8916_wdt_suspend(struct device *dev)
+{
+ struct pm8916_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdev))
+ return pm8916_wdt_stop(&wdt->wdev);
+
+ return 0;
+}
+
+static int __maybe_unused pm8916_wdt_resume(struct device *dev)
+{
+ struct pm8916_wdt *wdt = dev_get_drvdata(dev);
+
+ if (watchdog_active(&wdt->wdev))
+ return pm8916_wdt_start(&wdt->wdev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(pm8916_wdt_pm_ops, pm8916_wdt_suspend,
+ pm8916_wdt_resume);
+
static const struct of_device_id pm8916_wdt_id_table[] = {
{ .compatible = "qcom,pm8916-wdt" },
{ }
@@ -210,6 +234,7 @@ static struct platform_driver pm8916_wdt_driver = {
.driver = {
.name = "pm8916-wdt",
.of_match_table = of_match_ptr(pm8916_wdt_id_table),
+ .pm = &pm8916_wdt_pm_ops,
},
};
module_platform_driver(pm8916_wdt_driver);
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index eb47fe5ed280..ab7465d186fd 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -40,6 +40,11 @@ static const u32 reg_offset_data_kpss[] = {
[WDT_BITE_TIME] = 0x14,
};
+struct qcom_wdt_match_data {
+ const u32 *offset;
+ bool pretimeout;
+};
+
struct qcom_wdt {
struct watchdog_device wdd;
unsigned long rate;
@@ -179,19 +184,29 @@ static void qcom_clk_disable_unprepare(void *data)
clk_disable_unprepare(data);
}
+static const struct qcom_wdt_match_data match_data_apcs_tmr = {
+ .offset = reg_offset_data_apcs_tmr,
+ .pretimeout = false,
+};
+
+static const struct qcom_wdt_match_data match_data_kpss = {
+ .offset = reg_offset_data_kpss,
+ .pretimeout = true,
+};
+
static int qcom_wdt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct qcom_wdt *wdt;
struct resource *res;
struct device_node *np = dev->of_node;
- const u32 *regs;
+ const struct qcom_wdt_match_data *data;
u32 percpu_offset;
int irq, ret;
struct clk *clk;
- regs = of_device_get_match_data(dev);
- if (!regs) {
+ data = of_device_get_match_data(dev);
+ if (!data) {
dev_err(dev, "Unsupported QCOM WDT module\n");
return -ENODEV;
}
@@ -247,9 +262,8 @@ static int qcom_wdt_probe(struct platform_device *pdev)
/* check if there is pretimeout support */
irq = platform_get_irq_optional(pdev, 0);
- if (irq > 0) {
- ret = devm_request_irq(dev, irq, qcom_wdt_isr,
- IRQF_TRIGGER_RISING,
+ if (data->pretimeout && irq > 0) {
+ ret = devm_request_irq(dev, irq, qcom_wdt_isr, 0,
"wdt_bark", &wdt->wdd);
if (ret)
return ret;
@@ -267,7 +281,7 @@ static int qcom_wdt_probe(struct platform_device *pdev)
wdt->wdd.min_timeout = 1;
wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
wdt->wdd.parent = dev;
- wdt->layout = regs;
+ wdt->layout = data->offset;
if (readl(wdt_addr(wdt, WDT_STS)) & 1)
wdt->wdd.bootstatus = WDIOF_CARDRESET;
@@ -311,9 +325,9 @@ static int __maybe_unused qcom_wdt_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qcom_wdt_pm_ops, qcom_wdt_suspend, qcom_wdt_resume);
static const struct of_device_id qcom_wdt_of_table[] = {
- { .compatible = "qcom,kpss-timer", .data = reg_offset_data_apcs_tmr },
- { .compatible = "qcom,scss-timer", .data = reg_offset_data_apcs_tmr },
- { .compatible = "qcom,kpss-wdt", .data = reg_offset_data_kpss },
+ { .compatible = "qcom,kpss-timer", .data = &match_data_apcs_tmr },
+ { .compatible = "qcom,scss-timer", .data = &match_data_apcs_tmr },
+ { .compatible = "qcom,kpss-wdt", .data = &match_data_kpss },
{ },
};
MODULE_DEVICE_TABLE(of, qcom_wdt_of_table);
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
new file mode 100644
index 000000000000..d456dd72d99a
--- /dev/null
+++ b/drivers/watchdog/rti_wdt.c
@@ -0,0 +1,255 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Watchdog driver for the K3 RTI module
+ *
+ * (c) Copyright 2019-2020 Texas Instruments Inc.
+ * All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/watchdog.h>
+
+#define DEFAULT_HEARTBEAT 60
+
+/* Max heartbeat is calculated at 32kHz source clock */
+#define MAX_HEARTBEAT 1000
+
+/* Timer register set definition */
+#define RTIDWDCTRL 0x90
+#define RTIDWDPRLD 0x94
+#define RTIWDSTATUS 0x98
+#define RTIWDKEY 0x9c
+#define RTIDWDCNTR 0xa0
+#define RTIWWDRXCTRL 0xa4
+#define RTIWWDSIZECTRL 0xa8
+
+#define RTIWWDRX_NMI 0xa
+
+#define RTIWWDSIZE_50P 0x50
+
+#define WDENABLE_KEY 0xa98559da
+
+#define WDKEY_SEQ0 0xe51a
+#define WDKEY_SEQ1 0xa35c
+
+#define WDT_PRELOAD_SHIFT 13
+
+#define WDT_PRELOAD_MAX 0xfff
+
+#define DWDST BIT(1)
+
+static int heartbeat;
+
+/*
+ * struct to hold data for each WDT device
+ * @base - base io address of WD device
+ * @freq - source clock frequency of WDT
+ * @wdd - hold watchdog device as is in WDT core
+ */
+struct rti_wdt_device {
+ void __iomem *base;
+ unsigned long freq;
+ struct watchdog_device wdd;
+};
+
+static int rti_wdt_start(struct watchdog_device *wdd)
+{
+ u32 timer_margin;
+ struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd);
+
+ /* set timeout period */
+ timer_margin = (u64)wdd->timeout * wdt->freq;
+ timer_margin >>= WDT_PRELOAD_SHIFT;
+ if (timer_margin > WDT_PRELOAD_MAX)
+ timer_margin = WDT_PRELOAD_MAX;
+ writel_relaxed(timer_margin, wdt->base + RTIDWDPRLD);
+
+ /*
+ * RTI only supports a windowed mode, where the watchdog can only
+ * be petted during the open window; not too early or not too late.
+ * The HW configuration options only allow for the open window size
+ * to be 50% or less than that; we obviouly want to configure the open
+ * window as large as possible so we select the 50% option. To avoid
+ * any glitches, we accommodate 5% safety margin also, so we setup
+ * the min_hw_hearbeat at 55% of the timeout period.
+ */
+ wdd->min_hw_heartbeat_ms = 11 * wdd->timeout * 1000 / 20;
+
+ /* Generate NMI when wdt expires */
+ writel_relaxed(RTIWWDRX_NMI, wdt->base + RTIWWDRXCTRL);
+
+ /* Open window size 50%; this is the largest window size available */
+ writel_relaxed(RTIWWDSIZE_50P, wdt->base + RTIWWDSIZECTRL);
+
+ readl_relaxed(wdt->base + RTIWWDSIZECTRL);
+
+ /* enable watchdog */
+ writel_relaxed(WDENABLE_KEY, wdt->base + RTIDWDCTRL);
+ return 0;
+}
+
+static int rti_wdt_ping(struct watchdog_device *wdd)
+{
+ struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd);
+
+ /* put watchdog in service state */
+ writel_relaxed(WDKEY_SEQ0, wdt->base + RTIWDKEY);
+ /* put watchdog in active state */
+ writel_relaxed(WDKEY_SEQ1, wdt->base + RTIWDKEY);
+
+ return 0;
+}
+
+static unsigned int rti_wdt_get_timeleft(struct watchdog_device *wdd)
+{
+ u64 timer_counter;
+ u32 val;
+ struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd);
+
+ /* if timeout has occurred then return 0 */
+ val = readl_relaxed(wdt->base + RTIWDSTATUS);
+ if (val & DWDST)
+ return 0;
+
+ timer_counter = readl_relaxed(wdt->base + RTIDWDCNTR);
+
+ do_div(timer_counter, wdt->freq);
+
+ return timer_counter;
+}
+
+static const struct watchdog_info rti_wdt_info = {
+ .options = WDIOF_KEEPALIVEPING,
+ .identity = "K3 RTI Watchdog",
+};
+
+static const struct watchdog_ops rti_wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = rti_wdt_start,
+ .ping = rti_wdt_ping,
+ .get_timeleft = rti_wdt_get_timeleft,
+};
+
+static int rti_wdt_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct device *dev = &pdev->dev;
+ struct resource *wdt_mem;
+ struct watchdog_device *wdd;
+ struct rti_wdt_device *wdt;
+ struct clk *clk;
+
+ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL);
+ if (!wdt)
+ return -ENOMEM;
+
+ clk = clk_get(dev, NULL);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get clock\n");
+ return PTR_ERR(clk);
+ }
+
+ wdt->freq = clk_get_rate(clk);
+
+ clk_put(clk);
+
+ if (!wdt->freq) {
+ dev_err(dev, "Failed to get fck rate.\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "runtime pm failed\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, wdt);
+
+ wdd = &wdt->wdd;
+ wdd->info = &rti_wdt_info;
+ wdd->ops = &rti_wdt_ops;
+ wdd->min_timeout = 1;
+ wdd->max_hw_heartbeat_ms = (WDT_PRELOAD_MAX << WDT_PRELOAD_SHIFT) /
+ wdt->freq * 1000;
+ wdd->timeout = DEFAULT_HEARTBEAT;
+ wdd->parent = dev;
+
+ watchdog_init_timeout(wdd, heartbeat, dev);
+
+ watchdog_set_drvdata(wdd, wdt);
+ watchdog_set_nowayout(wdd, 1);
+ watchdog_set_restart_priority(wdd, 128);
+
+ wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ wdt->base = devm_ioremap_resource(dev, wdt_mem);
+ if (IS_ERR(wdt->base)) {
+ ret = PTR_ERR(wdt->base);
+ goto err_iomap;
+ }
+
+ ret = watchdog_register_device(wdd);
+ if (ret) {
+ dev_err(dev, "cannot register watchdog device\n");
+ goto err_iomap;
+ }
+
+ return 0;
+
+err_iomap:
+ pm_runtime_put_sync(&pdev->dev);
+
+ return ret;
+}
+
+static int rti_wdt_remove(struct platform_device *pdev)
+{
+ struct rti_wdt_device *wdt = platform_get_drvdata(pdev);
+
+ watchdog_unregister_device(&wdt->wdd);
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id rti_wdt_of_match[] = {
+ { .compatible = "ti,j7-rti-wdt", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rti_wdt_of_match);
+
+static struct platform_driver rti_wdt_driver = {
+ .driver = {
+ .name = "rti-wdt",
+ .of_match_table = rti_wdt_of_match,
+ },
+ .probe = rti_wdt_probe,
+ .remove = rti_wdt_remove,
+};
+
+module_platform_driver(rti_wdt_driver);
+
+MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>");
+MODULE_DESCRIPTION("K3 RTI Watchdog Driver");
+
+module_param(heartbeat, int, 0);
+MODULE_PARM_DESC(heartbeat,
+ "Watchdog heartbeat period in seconds from 1 to "
+ __MODULE_STRING(MAX_HEARTBEAT) ", default "
+ __MODULE_STRING(DEFAULT_HEARTBEAT));
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rti-wdt");
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 861daf4f37b2..423844757812 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -39,6 +39,10 @@
static DEFINE_IDA(watchdog_ida);
+static int stop_on_reboot = -1;
+module_param(stop_on_reboot, int, 0444);
+MODULE_PARM_DESC(stop_on_reboot, "Stop watchdogs on reboot (0=keep watching, 1=stop)");
+
/*
* Deferred Registration infrastructure.
*
@@ -254,6 +258,14 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
}
}
+ /* Module parameter to force watchdog policy on reboot. */
+ if (stop_on_reboot != -1) {
+ if (stop_on_reboot)
+ set_bit(WDOG_STOP_ON_REBOOT, &wdd->status);
+ else
+ clear_bit(WDOG_STOP_ON_REBOOT, &wdd->status);
+ }
+
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c
index 8b5c742f24e8..7e4cd34a8c20 100644
--- a/drivers/watchdog/watchdog_dev.c
+++ b/drivers/watchdog/watchdog_dev.c
@@ -282,6 +282,7 @@ static int watchdog_start(struct watchdog_device *wdd)
if (err == 0) {
set_bit(WDOG_ACTIVE, &wdd->status);
wd_data->last_keepalive = started_at;
+ wd_data->last_hw_keepalive = started_at;
watchdog_update_worker(wdd);
}
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index 030ce240620d..d96ad8f38bd2 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -13,7 +13,6 @@
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#include <linux/uaccess.h>
-#include <linux/gpio.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/pdata.h>
@@ -29,7 +28,6 @@ struct wm831x_wdt_drvdata {
struct watchdog_device wdt;
struct wm831x *wm831x;
struct mutex lock;
- int update_gpio;
int update_state;
};
@@ -103,14 +101,6 @@ static int wm831x_wdt_ping(struct watchdog_device *wdt_dev)
mutex_lock(&driver_data->lock);
- if (driver_data->update_gpio) {
- gpio_set_value_cansleep(driver_data->update_gpio,
- driver_data->update_state);
- driver_data->update_state = !driver_data->update_state;
- ret = 0;
- goto out;
- }
-
reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
if (!(reg & WM831X_WDOG_RST_SRC)) {
@@ -239,23 +229,6 @@ static int wm831x_wdt_probe(struct platform_device *pdev)
reg |= pdata->secondary << WM831X_WDOG_SECACT_SHIFT;
reg |= pdata->software << WM831X_WDOG_RST_SRC_SHIFT;
- if (pdata->update_gpio) {
- ret = devm_gpio_request_one(dev, pdata->update_gpio,
- GPIOF_OUT_INIT_LOW,
- "Watchdog update");
- if (ret < 0) {
- dev_err(wm831x->dev,
- "Failed to request update GPIO: %d\n",
- ret);
- return ret;
- }
-
- driver_data->update_gpio = pdata->update_gpio;
-
- /* Make sure the watchdog takes hardware updates */
- reg |= WM831X_WDOG_RST_SRC;
- }
-
ret = wm831x_reg_unlock(wm831x);
if (ret == 0) {
ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg);
diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c
index 4a363a8b2d20..cab86a08456b 100644
--- a/drivers/watchdog/ziirave_wdt.c
+++ b/drivers/watchdog/ziirave_wdt.c
@@ -422,7 +422,7 @@ static int ziirave_firm_upload(struct watchdog_device *wdd,
static const struct watchdog_info ziirave_wdt_info = {
.options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING,
- .identity = "Zodiac RAVE Watchdog",
+ .identity = "RAVE Switch Watchdog",
};
static const struct watchdog_ops ziirave_wdt_ops = {
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index 8edef51c92e5..64df919a2111 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -53,37 +53,37 @@ static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
}
-static void evtchn_2l_clear_pending(unsigned port)
+static void evtchn_2l_clear_pending(evtchn_port_t port)
{
struct shared_info *s = HYPERVISOR_shared_info;
sync_clear_bit(port, BM(&s->evtchn_pending[0]));
}
-static void evtchn_2l_set_pending(unsigned port)
+static void evtchn_2l_set_pending(evtchn_port_t port)
{
struct shared_info *s = HYPERVISOR_shared_info;
sync_set_bit(port, BM(&s->evtchn_pending[0]));
}
-static bool evtchn_2l_is_pending(unsigned port)
+static bool evtchn_2l_is_pending(evtchn_port_t port)
{
struct shared_info *s = HYPERVISOR_shared_info;
return sync_test_bit(port, BM(&s->evtchn_pending[0]));
}
-static bool evtchn_2l_test_and_set_mask(unsigned port)
+static bool evtchn_2l_test_and_set_mask(evtchn_port_t port)
{
struct shared_info *s = HYPERVISOR_shared_info;
return sync_test_and_set_bit(port, BM(&s->evtchn_mask[0]));
}
-static void evtchn_2l_mask(unsigned port)
+static void evtchn_2l_mask(evtchn_port_t port)
{
struct shared_info *s = HYPERVISOR_shared_info;
sync_set_bit(port, BM(&s->evtchn_mask[0]));
}
-static void evtchn_2l_unmask(unsigned port)
+static void evtchn_2l_unmask(evtchn_port_t port)
{
struct shared_info *s = HYPERVISOR_shared_info;
unsigned int cpu = get_cpu();
@@ -173,7 +173,7 @@ static void evtchn_2l_handle_events(unsigned cpu)
/* Timer interrupt has highest priority. */
irq = irq_from_virq(cpu, VIRQ_TIMER);
if (irq != -1) {
- unsigned int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
word_idx = evtchn / BITS_PER_LONG;
bit_idx = evtchn % BITS_PER_LONG;
if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
@@ -228,7 +228,7 @@ static void evtchn_2l_handle_events(unsigned cpu)
do {
xen_ulong_t bits;
- int port;
+ evtchn_port_t port;
bits = MASK_LSBS(pending_bits, bit_idx);
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 499eff7d3f65..3a791c8485d0 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -116,7 +116,7 @@ static void clear_evtchn_to_irq_all(void)
}
}
-static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
+static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
{
unsigned row;
unsigned col;
@@ -143,7 +143,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq)
return 0;
}
-int get_evtchn_to_irq(unsigned evtchn)
+int get_evtchn_to_irq(evtchn_port_t evtchn)
{
if (evtchn >= xen_evtchn_max_channels())
return -1;
@@ -162,7 +162,7 @@ struct irq_info *info_for_irq(unsigned irq)
static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq,
enum xen_irq_type type,
- unsigned evtchn,
+ evtchn_port_t evtchn,
unsigned short cpu)
{
int ret;
@@ -184,7 +184,7 @@ static int xen_irq_info_common_setup(struct irq_info *info,
}
static int xen_irq_info_evtchn_setup(unsigned irq,
- unsigned evtchn)
+ evtchn_port_t evtchn)
{
struct irq_info *info = info_for_irq(irq);
@@ -193,7 +193,7 @@ static int xen_irq_info_evtchn_setup(unsigned irq,
static int xen_irq_info_ipi_setup(unsigned cpu,
unsigned irq,
- unsigned evtchn,
+ evtchn_port_t evtchn,
enum ipi_vector ipi)
{
struct irq_info *info = info_for_irq(irq);
@@ -207,7 +207,7 @@ static int xen_irq_info_ipi_setup(unsigned cpu,
static int xen_irq_info_virq_setup(unsigned cpu,
unsigned irq,
- unsigned evtchn,
+ evtchn_port_t evtchn,
unsigned virq)
{
struct irq_info *info = info_for_irq(irq);
@@ -220,7 +220,7 @@ static int xen_irq_info_virq_setup(unsigned cpu,
}
static int xen_irq_info_pirq_setup(unsigned irq,
- unsigned evtchn,
+ evtchn_port_t evtchn,
unsigned pirq,
unsigned gsi,
uint16_t domid,
@@ -245,7 +245,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
/*
* Accessors for packed IRQ information.
*/
-unsigned int evtchn_from_irq(unsigned irq)
+evtchn_port_t evtchn_from_irq(unsigned irq)
{
if (WARN(irq >= nr_irqs, "Invalid irq %d!\n", irq))
return 0;
@@ -253,7 +253,7 @@ unsigned int evtchn_from_irq(unsigned irq)
return info_for_irq(irq)->evtchn;
}
-unsigned irq_from_evtchn(unsigned int evtchn)
+unsigned int irq_from_evtchn(evtchn_port_t evtchn)
{
return get_evtchn_to_irq(evtchn);
}
@@ -304,7 +304,7 @@ unsigned cpu_from_irq(unsigned irq)
return info_for_irq(irq)->cpu;
}
-unsigned int cpu_from_evtchn(unsigned int evtchn)
+unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
{
int irq = get_evtchn_to_irq(evtchn);
unsigned ret = 0;
@@ -330,9 +330,9 @@ static bool pirq_needs_eoi_flag(unsigned irq)
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
}
-static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
+static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu)
{
- int irq = get_evtchn_to_irq(chn);
+ int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info = info_for_irq(irq);
BUG_ON(irq == -1);
@@ -354,7 +354,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
*/
void notify_remote_via_irq(int irq)
{
- int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn))
notify_remote_via_evtchn(evtchn);
@@ -445,7 +445,7 @@ static void xen_free_irq(unsigned irq)
irq_free_desc(irq);
}
-static void xen_evtchn_close(unsigned int port)
+static void xen_evtchn_close(evtchn_port_t port)
{
struct evtchn_close close;
@@ -472,7 +472,7 @@ static void pirq_query_unmask(int irq)
static void eoi_pirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
+ evtchn_port_t evtchn = evtchn_from_irq(data->irq);
struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
int rc = 0;
@@ -508,7 +508,7 @@ static unsigned int __startup_pirq(unsigned int irq)
{
struct evtchn_bind_pirq bind_pirq;
struct irq_info *info = info_for_irq(irq);
- int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
int rc;
BUG_ON(info->type != IRQT_PIRQ);
@@ -561,7 +561,7 @@ static void shutdown_pirq(struct irq_data *data)
{
unsigned int irq = data->irq;
struct irq_info *info = info_for_irq(irq);
- unsigned evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
BUG_ON(info->type != IRQT_PIRQ);
@@ -601,7 +601,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
static void __unbind_from_irq(unsigned int irq)
{
- int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
struct irq_info *info = irq_get_handler_data(irq);
if (info->refcnt > 0) {
@@ -827,7 +827,7 @@ int xen_pirq_from_irq(unsigned irq)
}
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
-int bind_evtchn_to_irq(unsigned int evtchn)
+int bind_evtchn_to_irq(evtchn_port_t evtchn)
{
int irq;
int ret;
@@ -870,8 +870,8 @@ EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
- int evtchn, irq;
- int ret;
+ evtchn_port_t evtchn;
+ int ret, irq;
mutex_lock(&irq_mapping_update_lock);
@@ -909,7 +909,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
}
int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
- unsigned int remote_port)
+ evtchn_port_t remote_port)
{
struct evtchn_bind_interdomain bind_interdomain;
int err;
@@ -924,10 +924,11 @@ int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
}
EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq);
-static int find_virq(unsigned int virq, unsigned int cpu)
+static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
{
struct evtchn_status status;
- int port, rc = -ENOENT;
+ evtchn_port_t port;
+ int rc = -ENOENT;
memset(&status, 0, sizeof(status));
for (port = 0; port < xen_evtchn_max_channels(); port++) {
@@ -939,7 +940,7 @@ static int find_virq(unsigned int virq, unsigned int cpu)
if (status.status != EVTCHNSTAT_virq)
continue;
if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
- rc = port;
+ *evtchn = port;
break;
}
}
@@ -962,7 +963,8 @@ EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
{
struct evtchn_bind_virq bind_virq;
- int evtchn, irq, ret;
+ evtchn_port_t evtchn = 0;
+ int irq, ret;
mutex_lock(&irq_mapping_update_lock);
@@ -988,9 +990,8 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
evtchn = bind_virq.port;
else {
if (ret == -EEXIST)
- ret = find_virq(virq, cpu);
+ ret = find_virq(virq, cpu, &evtchn);
BUG_ON(ret < 0);
- evtchn = ret;
}
ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
@@ -1019,7 +1020,7 @@ static void unbind_from_irq(unsigned int irq)
mutex_unlock(&irq_mapping_update_lock);
}
-int bind_evtchn_to_irqhandler(unsigned int evtchn,
+int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
irq_handler_t handler,
unsigned long irqflags,
const char *devname, void *dev_id)
@@ -1040,7 +1041,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
- unsigned int remote_port,
+ evtchn_port_t remote_port,
irq_handler_t handler,
unsigned long irqflags,
const char *devname,
@@ -1132,7 +1133,7 @@ int xen_set_irq_priority(unsigned irq, unsigned priority)
}
EXPORT_SYMBOL_GPL(xen_set_irq_priority);
-int evtchn_make_refcounted(unsigned int evtchn)
+int evtchn_make_refcounted(evtchn_port_t evtchn)
{
int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info;
@@ -1153,7 +1154,7 @@ int evtchn_make_refcounted(unsigned int evtchn)
}
EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
-int evtchn_get(unsigned int evtchn)
+int evtchn_get(evtchn_port_t evtchn)
{
int irq;
struct irq_info *info;
@@ -1186,7 +1187,7 @@ int evtchn_get(unsigned int evtchn)
}
EXPORT_SYMBOL_GPL(evtchn_get);
-void evtchn_put(unsigned int evtchn)
+void evtchn_put(evtchn_port_t evtchn)
{
int irq = get_evtchn_to_irq(evtchn);
if (WARN_ON(irq == -1))
@@ -1252,7 +1253,7 @@ void xen_hvm_evtchn_do_upcall(void)
EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
/* Rebind a new event channel to an existing irq. */
-void rebind_evtchn_irq(int evtchn, int irq)
+void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
{
struct irq_info *info = info_for_irq(irq);
@@ -1284,7 +1285,7 @@ void rebind_evtchn_irq(int evtchn, int irq)
}
/* Rebind an evtchn so that it gets delivered to a specific cpu */
-static int xen_rebind_evtchn_to_cpu(int evtchn, unsigned int tcpu)
+static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
{
struct evtchn_bind_vcpu bind_vcpu;
int masked;
@@ -1342,7 +1343,7 @@ EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
static void enable_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
+ evtchn_port_t evtchn = evtchn_from_irq(data->irq);
if (VALID_EVTCHN(evtchn))
unmask_evtchn(evtchn);
@@ -1350,7 +1351,7 @@ static void enable_dynirq(struct irq_data *data)
static void disable_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
+ evtchn_port_t evtchn = evtchn_from_irq(data->irq);
if (VALID_EVTCHN(evtchn))
mask_evtchn(evtchn);
@@ -1358,7 +1359,7 @@ static void disable_dynirq(struct irq_data *data)
static void ack_dynirq(struct irq_data *data)
{
- int evtchn = evtchn_from_irq(data->irq);
+ evtchn_port_t evtchn = evtchn_from_irq(data->irq);
if (!VALID_EVTCHN(evtchn))
return;
@@ -1385,7 +1386,7 @@ static void mask_ack_dynirq(struct irq_data *data)
static int retrigger_dynirq(struct irq_data *data)
{
- unsigned int evtchn = evtchn_from_irq(data->irq);
+ evtchn_port_t evtchn = evtchn_from_irq(data->irq);
int masked;
if (!VALID_EVTCHN(evtchn))
@@ -1440,7 +1441,8 @@ static void restore_pirqs(void)
static void restore_cpu_virqs(unsigned int cpu)
{
struct evtchn_bind_virq bind_virq;
- int virq, irq, evtchn;
+ evtchn_port_t evtchn;
+ int virq, irq;
for (virq = 0; virq < NR_VIRQS; virq++) {
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
@@ -1465,7 +1467,8 @@ static void restore_cpu_virqs(unsigned int cpu)
static void restore_cpu_ipis(unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
- int ipi, irq, evtchn;
+ evtchn_port_t evtchn;
+ int ipi, irq;
for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
@@ -1489,7 +1492,7 @@ static void restore_cpu_ipis(unsigned int cpu)
/* Clear an irq's pending state, in preparation for polling on it */
void xen_clear_irq_pending(int irq)
{
- int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn))
clear_evtchn(evtchn);
@@ -1497,7 +1500,7 @@ void xen_clear_irq_pending(int irq)
EXPORT_SYMBOL(xen_clear_irq_pending);
void xen_set_irq_pending(int irq)
{
- int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
if (VALID_EVTCHN(evtchn))
set_evtchn(evtchn);
@@ -1505,7 +1508,7 @@ void xen_set_irq_pending(int irq)
bool xen_test_irq_pending(int irq)
{
- int evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = evtchn_from_irq(irq);
bool ret = false;
if (VALID_EVTCHN(evtchn))
@@ -1667,7 +1670,7 @@ module_param(fifo_events, bool, 0);
void __init xen_init_IRQ(void)
{
int ret = -EINVAL;
- unsigned int evtchn;
+ evtchn_port_t evtchn;
if (fifo_events)
ret = xen_evtchn_fifo_init();
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index 76b318e88382..c60ee0450173 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -82,7 +82,7 @@ static unsigned event_array_pages __read_mostly;
#endif
-static inline event_word_t *event_word_from_port(unsigned port)
+static inline event_word_t *event_word_from_port(evtchn_port_t port)
{
unsigned i = port / EVENT_WORDS_PER_PAGE;
@@ -140,7 +140,7 @@ static void init_array_page(event_word_t *array_page)
static int evtchn_fifo_setup(struct irq_info *info)
{
- unsigned port = info->evtchn;
+ evtchn_port_t port = info->evtchn;
unsigned new_array_pages;
int ret;
@@ -191,37 +191,37 @@ static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
/* no-op */
}
-static void evtchn_fifo_clear_pending(unsigned port)
+static void evtchn_fifo_clear_pending(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
-static void evtchn_fifo_set_pending(unsigned port)
+static void evtchn_fifo_set_pending(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
-static bool evtchn_fifo_is_pending(unsigned port)
+static bool evtchn_fifo_is_pending(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
}
-static bool evtchn_fifo_test_and_set_mask(unsigned port)
+static bool evtchn_fifo_test_and_set_mask(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
return sync_test_and_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
}
-static void evtchn_fifo_mask(unsigned port)
+static void evtchn_fifo_mask(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
}
-static bool evtchn_fifo_is_masked(unsigned port)
+static bool evtchn_fifo_is_masked(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
@@ -242,7 +242,7 @@ static void clear_masked(volatile event_word_t *word)
} while (w != old);
}
-static void evtchn_fifo_unmask(unsigned port)
+static void evtchn_fifo_unmask(evtchn_port_t port)
{
event_word_t *word = event_word_from_port(port);
@@ -270,7 +270,7 @@ static uint32_t clear_linked(volatile event_word_t *word)
return w & EVTCHN_FIFO_LINK_MASK;
}
-static void handle_irq_for_port(unsigned port)
+static void handle_irq_for_port(evtchn_port_t port)
{
int irq;
@@ -286,7 +286,7 @@ static void consume_one_event(unsigned cpu,
{
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
uint32_t head;
- unsigned port;
+ evtchn_port_t port;
event_word_t *word;
head = q->head[priority];
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
index 82938cff6c7a..10684feb094e 100644
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -33,7 +33,7 @@ struct irq_info {
int refcnt;
enum xen_irq_type type; /* type */
unsigned irq;
- unsigned int evtchn; /* event channel */
+ evtchn_port_t evtchn; /* event channel */
unsigned short cpu; /* cpu bound */
union {
@@ -60,12 +60,12 @@ struct evtchn_ops {
int (*setup)(struct irq_info *info);
void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
- void (*clear_pending)(unsigned port);
- void (*set_pending)(unsigned port);
- bool (*is_pending)(unsigned port);
- bool (*test_and_set_mask)(unsigned port);
- void (*mask)(unsigned port);
- void (*unmask)(unsigned port);
+ void (*clear_pending)(evtchn_port_t port);
+ void (*set_pending)(evtchn_port_t port);
+ bool (*is_pending)(evtchn_port_t port);
+ bool (*test_and_set_mask)(evtchn_port_t port);
+ void (*mask)(evtchn_port_t port);
+ void (*unmask)(evtchn_port_t port);
void (*handle_events)(unsigned cpu);
void (*resume)(void);
@@ -74,11 +74,11 @@ struct evtchn_ops {
extern const struct evtchn_ops *evtchn_ops;
extern int **evtchn_to_irq;
-int get_evtchn_to_irq(unsigned int evtchn);
+int get_evtchn_to_irq(evtchn_port_t evtchn);
struct irq_info *info_for_irq(unsigned irq);
unsigned cpu_from_irq(unsigned irq);
-unsigned cpu_from_evtchn(unsigned int evtchn);
+unsigned int cpu_from_evtchn(evtchn_port_t evtchn);
static inline unsigned xen_evtchn_max_channels(void)
{
@@ -102,32 +102,32 @@ static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
evtchn_ops->bind_to_cpu(info, cpu);
}
-static inline void clear_evtchn(unsigned port)
+static inline void clear_evtchn(evtchn_port_t port)
{
evtchn_ops->clear_pending(port);
}
-static inline void set_evtchn(unsigned port)
+static inline void set_evtchn(evtchn_port_t port)
{
evtchn_ops->set_pending(port);
}
-static inline bool test_evtchn(unsigned port)
+static inline bool test_evtchn(evtchn_port_t port)
{
return evtchn_ops->is_pending(port);
}
-static inline bool test_and_set_mask(unsigned port)
+static inline bool test_and_set_mask(evtchn_port_t port)
{
return evtchn_ops->test_and_set_mask(port);
}
-static inline void mask_evtchn(unsigned port)
+static inline void mask_evtchn(evtchn_port_t port)
{
return evtchn_ops->mask(port);
}
-static inline void unmask_evtchn(unsigned port)
+static inline void unmask_evtchn(evtchn_port_t port)
{
return evtchn_ops->unmask(port);
}
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 052b55a14ebc..6e0b1dd5573c 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -83,7 +83,7 @@ struct per_user_data {
struct user_evtchn {
struct rb_node node;
struct per_user_data *user;
- unsigned port;
+ evtchn_port_t port;
bool enabled;
};
@@ -138,7 +138,8 @@ static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
kfree(evtchn);
}
-static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port)
+static struct user_evtchn *find_evtchn(struct per_user_data *u,
+ evtchn_port_t port)
{
struct rb_node *node = u->evtchns.rb_node;
@@ -163,7 +164,7 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
struct per_user_data *u = evtchn->user;
WARN(!evtchn->enabled,
- "Interrupt for port %d, but apparently not enabled; per-user %p\n",
+ "Interrupt for port %u, but apparently not enabled; per-user %p\n",
evtchn->port, u);
disable_irq_nosync(irq);
@@ -286,7 +287,7 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
mutex_lock(&u->bind_mutex);
for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
- unsigned port = kbuf[i];
+ evtchn_port_t port = kbuf[i];
struct user_evtchn *evtchn;
evtchn = find_evtchn(u, port);
@@ -361,7 +362,7 @@ static int evtchn_resize_ring(struct per_user_data *u)
return 0;
}
-static int evtchn_bind_to_user(struct per_user_data *u, int port)
+static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port)
{
struct user_evtchn *evtchn;
struct evtchn_close close;
@@ -423,7 +424,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u,
static DEFINE_PER_CPU(int, bind_last_selected_cpu);
-static void evtchn_bind_interdom_next_vcpu(int evtchn)
+static void evtchn_bind_interdom_next_vcpu(evtchn_port_t evtchn)
{
unsigned int selected_cpu, irq;
struct irq_desc *desc;
diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h
index 9a3960ecff6c..20d7d059dadb 100644
--- a/drivers/xen/gntdev-common.h
+++ b/drivers/xen/gntdev-common.h
@@ -15,6 +15,7 @@
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/types.h>
+#include <xen/interface/event_channel.h>
struct gntdev_dmabuf_priv;
@@ -38,7 +39,7 @@ struct gntdev_unmap_notify {
int flags;
/* Address relative to the start of the gntdev_grant_map. */
int addr;
- int event;
+ evtchn_port_t event;
};
struct gntdev_grant_map {
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 0258415ca0b2..50651e566564 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -652,7 +652,7 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
struct gntdev_grant_map *map;
int rc;
int out_flags;
- unsigned int out_event;
+ evtchn_port_t out_event;
if (copy_from_user(&op, u, sizeof(op)))
return -EFAULT;
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index c57c71b7d53d..cf4ce3e9358d 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -300,7 +300,7 @@ static struct sock_mapping *pvcalls_new_active_socket(
struct pvcalls_fedata *fedata,
uint64_t id,
grant_ref_t ref,
- uint32_t evtchn,
+ evtchn_port_t evtchn,
struct socket *sock)
{
int ret;
@@ -905,7 +905,8 @@ static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
static int backend_connect(struct xenbus_device *dev)
{
- int err, evtchn;
+ int err;
+ evtchn_port_t evtchn;
grant_ref_t ring_ref;
struct pvcalls_fedata *fedata = NULL;
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 57592a6b5c9e..b43b5595e988 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -368,12 +368,12 @@ out:
return -ENOMEM;
}
-static int create_active(struct sock_mapping *map, int *evtchn)
+static int create_active(struct sock_mapping *map, evtchn_port_t *evtchn)
{
void *bytes;
int ret = -ENOMEM, irq = -1, i;
- *evtchn = -1;
+ *evtchn = 0;
init_waitqueue_head(&map->active.inflight_conn_req);
bytes = map->active.data.in;
@@ -404,7 +404,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
return 0;
out_error:
- if (*evtchn >= 0)
+ if (*evtchn > 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
return ret;
}
@@ -415,7 +415,8 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
struct pvcalls_bedata *bedata;
struct sock_mapping *map = NULL;
struct xen_pvcalls_request *req;
- int notify, req_id, ret, evtchn;
+ int notify, req_id, ret;
+ evtchn_port_t evtchn;
if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
return -EOPNOTSUPP;
@@ -765,7 +766,8 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
struct sock_mapping *map;
struct sock_mapping *map2 = NULL;
struct xen_pvcalls_request *req;
- int notify, req_id, ret, evtchn, nonblock;
+ int notify, req_id, ret, nonblock;
+ evtchn_port_t evtchn;
map = pvcalls_enter_sock(sock);
if (IS_ERR(map))
@@ -1125,7 +1127,8 @@ static int pvcalls_front_remove(struct xenbus_device *dev)
static int pvcalls_front_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
- int ret = -ENOMEM, evtchn, i;
+ int ret = -ENOMEM, i;
+ evtchn_port_t evtchn;
unsigned int max_page_order, function_calls, len;
char *versions;
grant_ref_t gref_head = 0;
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 833b2d2c4318..f2115587855f 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -105,13 +105,13 @@ static void free_pdev(struct xen_pcibk_device *pdev)
}
static int xen_pcibk_do_attach(struct xen_pcibk_device *pdev, int gnt_ref,
- int remote_evtchn)
+ evtchn_port_t remote_evtchn)
{
int err = 0;
void *vaddr;
dev_dbg(&pdev->xdev->dev,
- "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
+ "Attaching to frontend resources - gnt_ref=%d evtchn=%u\n",
gnt_ref, remote_evtchn);
err = xenbus_map_ring_valloc(pdev->xdev, &gnt_ref, 1, &vaddr);
@@ -142,7 +142,8 @@ out:
static int xen_pcibk_attach(struct xen_pcibk_device *pdev)
{
int err = 0;
- int gnt_ref, remote_evtchn;
+ int gnt_ref;
+ evtchn_port_t remote_evtchn;
char *magic = NULL;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ba0942e481bc..75c0a2e9a6db 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -854,7 +854,8 @@ unmap_page:
static int scsiback_map(struct vscsibk_info *info)
{
struct xenbus_device *dev = info->dev;
- unsigned int ring_ref, evtchn;
+ unsigned int ring_ref;
+ evtchn_port_t evtchn;
int err;
err = xenbus_gather(XBT_NIL, dev->otherend,
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 31eb822ac313..385843256865 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -391,7 +391,7 @@ EXPORT_SYMBOL_GPL(xenbus_grant_ring);
* error, the device will switch to XenbusStateClosing, and the error will be
* saved in the store.
*/
-int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
+int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port)
{
struct evtchn_alloc_unbound alloc_unbound;
int err;
@@ -414,7 +414,7 @@ EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);
/**
* Free an existing event channel. Returns 0 on success or -errno on error.
*/
-int xenbus_free_evtchn(struct xenbus_device *dev, int port)
+int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port)
{
struct evtchn_close close;
int err;
@@ -423,7 +423,7 @@ int xenbus_free_evtchn(struct xenbus_device *dev, int port)
err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
if (err)
- xenbus_dev_error(dev, err, "freeing event channel %d", port);
+ xenbus_dev_error(dev, err, "freeing event channel %u", port);
return err;
}