summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_configfs.c6
-rw-r--r--drivers/acpi/dptf/dptf_power.c1
-rw-r--r--drivers/acpi/fan.c2
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/android/binder.c14
-rw-r--r--drivers/ata/libata-core.c11
-rw-r--r--drivers/ata/libata-scsi.c9
-rw-r--r--drivers/ata/sata_rcar.c11
-rw-r--r--drivers/atm/atmtcp.c10
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/core.c44
-rw-r--r--drivers/base/dd.c5
-rw-r--r--drivers/base/power/trace.c4
-rw-r--r--drivers/base/regmap/Kconfig2
-rw-r--r--drivers/base/regmap/regmap-debugfs.c52
-rw-r--r--drivers/base/regmap/regmap.c108
-rw-r--r--drivers/block/loop.c8
-rw-r--r--drivers/block/nbd.c25
-rw-r--r--drivers/block/rbd.c4
-rw-r--r--drivers/block/virtio_blk.c1
-rw-r--r--drivers/block/zram/zram_drv.c3
-rw-r--r--drivers/bus/ti-sysc.c129
-rw-r--r--drivers/char/hw_random/ks-sa-rng.c1
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/tpm/st33zp24/i2c.c2
-rw-r--r--drivers/char/tpm/st33zp24/spi.c4
-rw-r--r--drivers/char/tpm/st33zp24/st33zp24.c2
-rw-r--r--drivers/char/tpm/tpm-dev-common.c19
-rw-r--r--drivers/char/tpm/tpm_ibmvtpm.c14
-rw-r--r--drivers/char/tpm/tpm_tis.c7
-rw-r--r--drivers/char/tpm/tpm_tis_core.c2
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c10
-rw-r--r--drivers/char/virtio_console.c3
-rw-r--r--drivers/clk/Kconfig1
-rw-r--r--drivers/clk/clk-ast2600.c49
-rw-r--r--drivers/clk/mvebu/Kconfig1
-rw-r--r--drivers/clk/sifive/fu540-prci.c5
-rw-r--r--drivers/clocksource/arm_arch_timer.c11
-rw-r--r--drivers/clocksource/timer-ti-dm-systimer.c46
-rw-r--r--drivers/counter/104-quad-8.c22
-rw-r--r--drivers/cpufreq/intel_pstate.c15
-rw-r--r--drivers/cpuidle/cpuidle.c5
-rw-r--r--drivers/crypto/caam/Kconfig2
-rw-r--r--drivers/crypto/caam/ctrl.c18
-rw-r--r--drivers/crypto/caam/desc.h4
-rw-r--r--drivers/crypto/caam/pdb.h2
-rw-r--r--drivers/crypto/ccp/sev-dev.c23
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_cm.c2
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c7
-rw-r--r--drivers/crypto/hisilicon/sgl.c3
-rw-r--r--drivers/crypto/marvell/octeontx/otx_cptvf_algs.c11
-rw-r--r--drivers/dio/dio.c6
-rw-r--r--drivers/dma-buf/dma-buf.c65
-rw-r--r--drivers/dma/dmatest.c2
-rw-r--r--drivers/dma/dw/core.c12
-rw-r--r--drivers/dma/fsl-edma-common.c28
-rw-r--r--drivers/dma/fsl-edma-common.h2
-rw-r--r--drivers/dma/fsl-edma.c7
-rw-r--r--drivers/dma/idxd/cdev.c19
-rw-r--r--drivers/dma/idxd/device.c25
-rw-r--r--drivers/dma/idxd/idxd.h1
-rw-r--r--drivers/dma/idxd/irq.c3
-rw-r--r--drivers/dma/idxd/sysfs.c5
-rw-r--r--drivers/dma/imx-sdma.c11
-rw-r--r--drivers/dma/ioat/dma.c12
-rw-r--r--drivers/dma/ioat/dma.h2
-rw-r--r--drivers/dma/mcf-edma.c7
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/dma/tegra210-adma.c5
-rw-r--r--drivers/dma/ti/k3-udma-private.c1
-rw-r--r--drivers/dma/ti/k3-udma.c39
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/firmware/efi/Kconfig11
-rw-r--r--drivers/firmware/efi/arm-init.c40
-rw-r--r--drivers/firmware/efi/efi-pstore.c5
-rw-r--r--drivers/firmware/efi/efi.c17
-rw-r--r--drivers/firmware/efi/efivars.c5
-rw-r--r--drivers/firmware/efi/esrt.c2
-rw-r--r--drivers/firmware/efi/libstub/alignedmem.c2
-rw-r--r--drivers/firmware/efi/libstub/arm32-stub.c54
-rw-r--r--drivers/firmware/efi/libstub/arm64-stub.c25
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c80
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c20
-rw-r--r--drivers/firmware/efi/libstub/efistub.h28
-rw-r--r--drivers/firmware/efi/libstub/file.c16
-rw-r--r--drivers/firmware/efi/libstub/skip_spaces.c1
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c8
-rw-r--r--drivers/firmware/efi/vars.c6
-rw-r--r--drivers/firmware/psci/psci_checker.c8
-rw-r--r--drivers/firmware/raspberrypi.c5
-rw-r--r--drivers/fpga/Kconfig2
-rw-r--r--drivers/gpio/gpio-arizona.c7
-rw-r--r--drivers/gpio/gpio-pca953x.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h11
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c53
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c151
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c27
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c10
-rw-r--r--drivers/gpu/drm/aspeed/aspeed_gfx_drv.c3
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c15
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c63
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dma.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c65
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c71
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c241
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c25
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c185
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c18
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring.c110
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c77
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c15
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c2
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/README46
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm119
-rw-r--r--drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm117
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h6
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h7
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c1
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c84
-rw-r--r--drivers/gpu/drm/i915/i915_priolist_types.h2
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c15
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c206
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_mock_selftests.h1
-rw-r--r--drivers/gpu/drm/lima/lima_pp.c2
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c2
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c3
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c8
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c6
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c25
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c12
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c52
-rw-r--r--drivers/gpu/drm/meson/meson_registers.h6
-rw-r--r--drivers/gpu/drm/meson/meson_viu.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dmem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c4
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c5
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c13
-rw-r--r--drivers/gpu/drm/tegra/dc.c1
-rw-r--r--drivers/gpu/drm/tegra/hub.c17
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c8
-rw-r--r--drivers/gpu/host1x/bus.c9
-rw-r--r--drivers/gpu/host1x/dev.c11
-rw-r--r--drivers/hid/hid-alps.c2
-rw-r--r--drivers/hid/hid-apple.c18
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-logitech-dj.c6
-rw-r--r--drivers/hid/hid-logitech-hidpp.c2
-rw-r--r--drivers/hid/hid-magicmouse.c6
-rw-r--r--drivers/hid/hid-quirks.c2
-rw-r--r--drivers/hid/hid-steam.c6
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c8
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/acpi_power_meter.c4
-rw-r--r--drivers/hwmon/amd_energy.c2
-rw-r--r--drivers/hwmon/aspeed-pwm-tacho.c2
-rw-r--r--drivers/hwmon/bt1-pvt.c12
-rw-r--r--drivers/hwmon/drivetemp.c43
-rw-r--r--drivers/hwmon/emc2103.c2
-rw-r--r--drivers/hwmon/max6697.c7
-rw-r--r--drivers/hwmon/nct6775.c6
-rw-r--r--drivers/hwmon/pmbus/Kconfig2
-rw-r--r--drivers/hwmon/pmbus/adm1275.c10
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c8
-rw-r--r--drivers/hwmon/scmi-hwmon.c2
-rw-r--r--drivers/hwtracing/coresight/coresight-cti.c96
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c82
-rw-r--r--drivers/hwtracing/intel_th/core.c21
-rw-r--r--drivers/hwtracing/intel_th/pci.c15
-rw-r--r--drivers/hwtracing/intel_th/sth.c4
-rw-r--r--drivers/i2c/Kconfig7
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c3
-rw-r--r--drivers/i2c/busses/i2c-cadence.c28
-rw-r--r--drivers/i2c/busses/i2c-designware-common.c25
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h3
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c17
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c1
-rw-r--r--drivers/i2c/busses/i2c-fsi.c2
-rw-r--r--drivers/i2c/busses/i2c-mlxcpld.c4
-rw-r--r--drivers/i2c/busses/i2c-qcom-geni.c6
-rw-r--r--drivers/i2c/busses/i2c-rcar.c3
-rw-r--r--drivers/i2c/i2c-core-base.c25
-rw-r--r--drivers/i2c/i2c-core-smbus.c9
-rw-r--r--drivers/iio/accel/mma8452.c5
-rw-r--r--drivers/iio/adc/ad7780.c2
-rw-r--r--drivers/iio/adc/adi-axi-adc.c4
-rw-r--r--drivers/iio/health/afe4403.c9
-rw-r--r--drivers/iio/health/afe4404.c8
-rw-r--r--drivers/iio/humidity/hdc100x.c10
-rw-r--r--drivers/iio/humidity/hts221.h7
-rw-r--r--drivers/iio/humidity/hts221_buffer.c9
-rw-r--r--drivers/iio/industrialio-core.c2
-rw-r--r--drivers/iio/magnetometer/ak8974.c29
-rw-r--r--drivers/iio/pressure/ms5611_core.c11
-rw-r--r--drivers/iio/pressure/zpa2326.c4
-rw-r--r--drivers/infiniband/core/cm.c3
-rw-r--r--drivers/infiniband/core/cma.c18
-rw-r--r--drivers/infiniband/core/counters.c4
-rw-r--r--drivers/infiniband/core/mad.c3
-rw-r--r--drivers/infiniband/core/rdma_core.c42
-rw-r--r--drivers/infiniband/core/sa_query.c38
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c1
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c19
-rw-r--r--drivers/infiniband/hw/hfi1/init.c37
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h2
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h6
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c104
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c2
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c5
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c51
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c7
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c22
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c55
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c8
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c4
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c13
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c6
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c3
-rw-r--r--drivers/infiniband/sw/siw/siw_qp_rx.c3
-rw-r--r--drivers/input/mouse/elan_i2c_core.c9
-rw-r--r--drivers/input/mouse/synaptics.c1
-rw-r--r--drivers/input/serio/hp_sdc.c2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/touchscreen/elants_i2c.c1
-rw-r--r--drivers/iommu/Kconfig3
-rw-r--r--drivers/iommu/amd/amd_iommu.h2
-rw-r--r--drivers/iommu/amd/iommu.c5
-rw-r--r--drivers/iommu/arm-smmu-qcom.c2
-rw-r--r--drivers/iommu/hyperv-iommu.c5
-rw-r--r--drivers/iommu/intel/dmar.c3
-rw-r--r--drivers/iommu/intel/iommu.c59
-rw-r--r--drivers/iommu/intel/irq_remapping.c2
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/iommu/qcom_iommu.c37
-rw-r--r--drivers/iommu/sun50i-iommu.c8
-rw-r--r--drivers/irqchip/Kconfig2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c16
-rw-r--r--drivers/irqchip/irq-gic.c14
-rw-r--r--drivers/irqchip/irq-riscv-intc.c2
-rw-r--r--drivers/md/bcache/btree.c8
-rw-r--r--drivers/md/bcache/super.c35
-rw-r--r--drivers/md/dm-integrity.c4
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-rq.c4
-rw-r--r--drivers/md/dm-writecache.c16
-rw-r--r--drivers/md/dm-zoned-metadata.c51
-rw-r--r--drivers/md/dm-zoned-reclaim.c11
-rw-r--r--drivers/md/dm-zoned-target.c12
-rw-r--r--drivers/md/dm.c114
-rw-r--r--drivers/media/platform/omap3isp/isp.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c1
-rw-r--r--drivers/message/fusion/mptbase.c41
-rw-r--r--drivers/message/fusion/mptscsih.c4
-rw-r--r--drivers/mfd/ioc3.c5
-rw-r--r--drivers/misc/atmel-ssc.c24
-rw-r--r--drivers/misc/habanalabs/command_submission.c13
-rw-r--r--drivers/misc/habanalabs/debugfs.c4
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c37
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h3
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_packets.h3
-rw-r--r--drivers/misc/kgdbts.c6
-rw-r--r--drivers/misc/mei/bus.c3
-rw-r--r--drivers/misc/mei/hw-me-regs.h3
-rw-r--r--drivers/misc/mei/hw-me.c70
-rw-r--r--drivers/misc/mei/hw-me.h17
-rw-r--r--drivers/misc/mei/pci-me.c17
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c6
-rw-r--r--drivers/mmc/host/owl-mmc.c2
-rw-r--r--drivers/mmc/host/sdhci-msm.c5
-rw-r--r--drivers/mmc/host/sdhci-of-aspeed.c2
-rw-r--r--drivers/mtd/mtdcore.c4
-rw-r--r--drivers/mtd/nand/raw/nandsim.c2
-rw-r--r--drivers/mtd/nand/raw/xway_nand.c2
-rw-r--r--drivers/net/bareudp.c29
-rw-r--r--drivers/net/bonding/bond_main.c10
-rw-r--r--drivers/net/bonding/bond_netlink.c3
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c3
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c45
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c1
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c22
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h1
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c327
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_phy.c29
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_phy.h8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h2
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c228
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h1
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c22
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c155
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c8
-rw-r--r--drivers/net/ethernet/cortina/gemini.c5
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c1
-rw-r--r--drivers/net/ethernet/freescale/fec.h6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c117
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c45
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c86
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c43
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c91
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h16
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c9
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c54
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c14
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c10
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c59
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c71
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c29
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c14
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h7
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c18
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c26
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/socionext/netsec.c5
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c3
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/hippi/rrunner.c2
-rw-r--r--drivers/net/ieee802154/adf7242.c6
-rw-r--r--drivers/net/ipa/gsi.c16
-rw-r--r--drivers/net/ipa/ipa_cmd.c15
-rw-r--r--drivers/net/ipa/ipa_cmd.h8
-rw-r--r--drivers/net/ipa/ipa_data-sdm845.c1
-rw-r--r--drivers/net/ipa/ipa_endpoint.c2
-rw-r--r--drivers/net/ipa/ipa_gsi.c1
-rw-r--r--drivers/net/ipa/ipa_gsi.h2
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c6
-rw-r--r--drivers/net/macsec.c5
-rw-r--r--drivers/net/macvlan.c5
-rw-r--r--drivers/net/netdevsim/netdev.c4
-rw-r--r--drivers/net/phy/Kconfig3
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/phy/mscc/mscc_macsec.c40
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c4
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/ax88172a.c1
-rw-r--r--drivers/net/usb/hso.c10
-rw-r--r--drivers/net/usb/lan78xx.c113
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/smsc95xx.c9
-rw-r--r--drivers/net/vxlan.c14
-rw-r--r--drivers/net/wan/hdlc_x25.c4
-rw-r--r--drivers/net/wan/lapbether.c17
-rw-r--r--drivers/net/wan/x25_asy.c21
-rw-r--r--drivers/net/wireguard/device.c1
-rw-r--r--drivers/net/wireguard/queueing.h19
-rw-r--r--drivers/net/wireguard/receive.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c78
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.h15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h30
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c13
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c39
-rw-r--r--drivers/net/xen-netfront.c64
-rw-r--r--drivers/nfc/s3fwrn5/core.c1
-rw-r--r--drivers/nvdimm/region_devs.c14
-rw-r--r--drivers/nvdimm/security.c2
-rw-r--r--drivers/nvme/host/core.c15
-rw-r--r--drivers/nvme/host/multipath.c53
-rw-r--r--drivers/nvme/host/nvme.h15
-rw-r--r--drivers/nvme/host/pci.c6
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/host/tcp.c4
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/opp/of.c4
-rw-r--r--drivers/pci/controller/vmd.c5
-rw-r--r--drivers/pci/pci.c30
-rw-r--r--drivers/perf/arm-cci.c1
-rw-r--r--drivers/perf/arm-ccn.c1
-rw-r--r--drivers/perf/arm_dsu_pmu.c1
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c2
-rw-r--r--drivers/perf/arm_spe_pmu.c1
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_hha_pmu.c2
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c2
-rw-r--r--drivers/perf/qcom_l2_pmu.c1
-rw-r--r--drivers/perf/qcom_l3_pmu.c1
-rw-r--r--drivers/perf/thunderx2_pmu.c1
-rw-r--r--drivers/perf/xgene_pmu.c1
-rw-r--r--drivers/phy/allwinner/phy-sun4i-usb.c5
-rw-r--r--drivers/phy/intel/phy-intel-combo.c14
-rw-r--r--drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c4
-rw-r--r--drivers/phy/ti/phy-am654-serdes.c2
-rw-r--r--drivers/phy/ti/phy-j721e-wiz.c10
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx.c7
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c67
-rw-r--r--drivers/pinctrl/pinctrl-amd.h2
-rw-r--r--drivers/pinctrl/pinctrl-mcp23s08_spi.c5
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ipq6018.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c21
-rw-r--r--drivers/pinctrl/tegra/pinctrl-tegra.c4
-rw-r--r--drivers/platform/x86/asus-wmi.c1
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_common.h3
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c1
-rw-r--r--drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c14
-rw-r--r--drivers/regulator/Kconfig2
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/da903x-regulator.c (renamed from drivers/regulator/da903x.c)0
-rw-r--r--drivers/regulator/da9063-regulator.c1
-rw-r--r--drivers/regulator/helpers.c2
-rw-r--r--drivers/regulator/pfuze100-regulator.c60
-rw-r--r--drivers/regulator/qcom_smd-regulator.c2
-rw-r--r--drivers/s390/cio/qdio.h7
-rw-r--r--drivers/s390/cio/qdio_debug.c4
-rw-r--r--drivers/s390/cio/qdio_main.c101
-rw-r--r--drivers/s390/cio/vfio_ccw_chp.c1
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c3
-rw-r--r--drivers/s390/scsi/zfcp_erp.c13
-rw-r--r--drivers/s390/virtio/virtio_ccw.c26
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c1
-rw-r--r--drivers/scsi/ipr.c1
-rw-r--r--drivers/scsi/isci/init.c1
-rw-r--r--drivers/scsi/libfc/fc_rport.c13
-rw-r--r--drivers/scsi/libiscsi.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c3
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c12
-rw-r--r--drivers/scsi/mvsas/mv_init.c1
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c3
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_dh.c1
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/ufs/ufs_bsg.c4
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c8
-rw-r--r--drivers/soc/imx/soc-imx.c3
-rw-r--r--drivers/soc/imx/soc-imx8m.c10
-rw-r--r--drivers/soc/ti/omap_prm.c8
-rw-r--r--drivers/soundwire/intel.c5
-rw-r--r--drivers/spi/spi-fsl-dspi.c47
-rw-r--r--drivers/spi/spi-mt65xx.c15
-rw-r--r--drivers/spi/spi-pxa2xx.c5
-rw-r--r--drivers/spi/spi-rspi.c28
-rw-r--r--drivers/spi/spi-sprd-adi.c2
-rw-r--r--drivers/spi/spi-stm32-qspi.c46
-rw-r--r--drivers/spi/spi-sun6i.c14
-rw-r--r--drivers/spi/spidev.c24
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c10
-rw-r--r--drivers/staging/media/atomisp/Kconfig2
-rw-r--r--drivers/staging/media/atomisp/Makefile6
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-ov2680.c6
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c6
-rw-r--r--drivers/staging/media/atomisp/include/linux/atomisp_platform.h1
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp-regs.h3
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_acc.c4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.c59
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_cmd.h4
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.c70
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_compat_css20.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.c14
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_drvfs.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c537
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_internal.h2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_ioctl.c19
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_v4l2.c285
-rw-r--r--drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c28
-rw-r--r--drivers/staging/media/atomisp/pci/hive_types.h24
-rw-r--r--drivers/staging/media/atomisp/pci/hmm/hmm.c10
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_system_global.h302
-rw-r--r--drivers/staging/media/atomisp/pci/isp2400_system_local.h321
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_system_global.h410
-rw-r--r--drivers/staging/media/atomisp/pci/isp2401_system_local.h402
-rw-r--r--drivers/staging/media/atomisp/pci/sh_css.c9
-rw-r--r--drivers/staging/media/atomisp/pci/system_global.h395
-rw-r--r--drivers/staging/media/atomisp/pci/system_local.c179
-rw-r--r--drivers/staging/media/atomisp/pci/system_local.h104
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_wlan_util.c4
-rw-r--r--drivers/staging/wfx/hif_tx.c6
-rw-r--r--drivers/staging/wfx/hif_tx.h2
-rw-r--r--drivers/staging/wfx/queue.c21
-rw-r--r--drivers/staging/wfx/scan.c6
-rw-r--r--drivers/thermal/cpufreq_cooling.c6
-rw-r--r--drivers/thermal/imx_thermal.c7
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c7
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c2
-rw-r--r--drivers/thermal/mtk_thermal.c11
-rw-r--r--drivers/thermal/qcom/tsens.c10
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c2
-rw-r--r--drivers/thermal/sprd_thermal.c4
-rw-r--r--drivers/thunderbolt/tunnel.c12
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c9
-rw-r--r--drivers/tty/serial/kgdb_nmi.c2
-rw-r--r--drivers/tty/serial/kgdboc.c32
-rw-r--r--drivers/tty/serial/mxs-auart.c12
-rw-r--r--drivers/tty/serial/serial_core.c115
-rw-r--r--drivers/tty/serial/sh-sci.c3
-rw-r--r--drivers/tty/serial/xilinx_uartps.c1
-rw-r--r--drivers/uio/uio_pdrv_genirq.c4
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c2
-rw-r--r--drivers/usb/cdns3/ep0.c40
-rw-r--r--drivers/usb/cdns3/trace.h8
-rw-r--r--drivers/usb/chipidea/core.c24
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/gadget.c6
-rw-r--r--drivers/usb/dwc2/platform.c14
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c9
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c12
-rw-r--r--drivers/usb/early/ehci-dbgp.c3
-rw-r--r--drivers/usb/gadget/function/f_uac1_legacy.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c10
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c7
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c3
-rw-r--r--drivers/usb/gadget/usbstring.c2
-rw-r--r--drivers/usb/host/ehci-exynos.c5
-rw-r--r--drivers/usb/host/ehci-pci.c7
-rw-r--r--drivers/usb/host/ohci-sm501.c1
-rw-r--r--drivers/usb/host/xhci-mtk.c5
-rw-r--r--drivers/usb/host/xhci.c9
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/misc/usbtest.c1
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c6
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c23
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h2
-rw-r--r--drivers/usb/serial/ch341.c1
-rw-r--r--drivers/usb/serial/cypress_m8.c2
-rw-r--r--drivers/usb/serial/cypress_m8.h3
-rw-r--r--drivers/usb/serial/iuu_phoenix.c8
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c13
-rw-r--r--drivers/usb/typec/tcpm/tcpci_rt1711h.c31
-rw-r--r--drivers/vdpa/vdpa.c2
-rw-r--r--drivers/vfio/pci/vfio_pci.c13
-rw-r--r--drivers/vfio/pci/vfio_pci_config.c17
-rw-r--r--drivers/vhost/test.c57
-rw-r--r--drivers/vhost/test.h1
-rw-r--r--drivers/vhost/vdpa.c2
-rw-r--r--drivers/video/backlight/tosa_lcd.c4
-rw-r--r--drivers/video/fbdev/core/fbcon.c3
-rw-r--r--drivers/video/fbdev/hpfb.c2
-rw-r--r--drivers/video/fbdev/uvesafb.c2
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.c6
-rw-r--r--drivers/virt/vboxguest/vboxguest_core.h15
-rw-r--r--drivers/virt/vboxguest/vboxguest_linux.c3
-rw-r--r--drivers/virt/vboxguest/vmmdev.h2
-rw-r--r--drivers/virtio/virtio_mem.c27
-rw-r--r--drivers/virtio/virtio_mmio.c4
-rw-r--r--drivers/xen/xenbus/xenbus_client.c171
700 files changed, 8010 insertions, 5828 deletions
diff --git a/drivers/acpi/acpi_configfs.c b/drivers/acpi/acpi_configfs.c
index ece8c1a921cc..88c8af455ea3 100644
--- a/drivers/acpi/acpi_configfs.c
+++ b/drivers/acpi/acpi_configfs.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/configfs.h>
#include <linux/acpi.h>
+#include <linux/security.h>
#include "acpica/accommon.h"
#include "acpica/actables.h"
@@ -28,7 +29,10 @@ static ssize_t acpi_table_aml_write(struct config_item *cfg,
{
const struct acpi_table_header *header = data;
struct acpi_table *table;
- int ret;
+ int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
+
+ if (ret)
+ return ret;
table = container_of(cfg, struct acpi_table, cfg);
diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c
index 5fab7e350db8..92b996a564d0 100644
--- a/drivers/acpi/dptf/dptf_power.c
+++ b/drivers/acpi/dptf/dptf_power.c
@@ -228,6 +228,7 @@ static const struct acpi_device_id int3407_device_ids[] = {
{"INT3407", 0},
{"INT3532", 0},
{"INTC1047", 0},
+ {"INTC1050", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 873e039ad4b7..62873388b24f 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -25,8 +25,8 @@ static int acpi_fan_remove(struct platform_device *pdev);
static const struct acpi_device_id fan_device_ids[] = {
{"PNP0C0B", 0},
- {"INT1044", 0},
{"INT3404", 0},
+ {"INTC1044", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, fan_device_ids);
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 3a89909b50a6..76c668c05fa0 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -938,13 +938,13 @@ static void __exit interrupt_stats_exit(void)
}
static ssize_t
-acpi_show_profile(struct device *dev, struct device_attribute *attr,
+acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
}
-static const struct device_attribute pm_profile_attr =
+static const struct kobj_attribute pm_profile_attr =
__ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
static ssize_t hotplug_enabled_show(struct kobject *kobj,
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index e47c8a4c83db..f50c5f182bb5 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -4686,8 +4686,15 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
static void binder_free_proc(struct binder_proc *proc)
{
+ struct binder_device *device;
+
BUG_ON(!list_empty(&proc->todo));
BUG_ON(!list_empty(&proc->delivered_death));
+ device = container_of(proc->context, struct binder_device, context);
+ if (refcount_dec_and_test(&device->ref)) {
+ kfree(proc->context->name);
+ kfree(device);
+ }
binder_alloc_deferred_release(&proc->alloc);
put_task_struct(proc->tsk);
binder_stats_deleted(BINDER_STAT_PROC);
@@ -5406,7 +5413,6 @@ static int binder_node_release(struct binder_node *node, int refs)
static void binder_deferred_release(struct binder_proc *proc)
{
struct binder_context *context = proc->context;
- struct binder_device *device;
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
@@ -5423,12 +5429,6 @@ static void binder_deferred_release(struct binder_proc *proc)
context->binder_context_mgr_node = NULL;
}
mutex_unlock(&context->context_mgr_node_lock);
- device = container_of(proc->context, struct binder_device, context);
- if (refcount_dec_and_test(&device->ref)) {
- kfree(context->name);
- kfree(device);
- }
- proc->context = NULL;
binder_inner_proc_lock(proc);
/*
* Make sure proc stays alive after we
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 69361ec43db5..b1cd4d97bc2a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -42,7 +42,6 @@
#include <linux/workqueue.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
-#include <linux/async.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include <linux/glob.h>
@@ -5778,7 +5777,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
/* perform each probe asynchronously */
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- async_schedule(async_port_probe, ap);
+ ap->cookie = async_schedule(async_port_probe, ap);
}
return 0;
@@ -5920,11 +5919,11 @@ void ata_host_detach(struct ata_host *host)
{
int i;
- /* Ensure ata_port probe has completed */
- async_synchronize_full();
-
- for (i = 0; i < host->n_ports; i++)
+ for (i = 0; i < host->n_ports; i++) {
+ /* Ensure ata_port probe has completed */
+ async_synchronize_cookie(host->ports[i]->cookie + 1);
ata_port_detach(host->ports[i]);
+ }
/* the host is dead now, dissociate ACPI */
ata_acpi_dissociate(host);
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 435781a16875..46336084b1a9 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3684,12 +3684,13 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
{
struct scsi_cmnd *scmd = qc->scsicmd;
const u8 *cdb = scmd->cmnd;
- const u8 *p;
u8 pg, spg;
unsigned six_byte, pg_len, hdr_len, bd_len;
int len;
u16 fp = (u16)-1;
u8 bp = 0xff;
+ u8 buffer[64];
+ const u8 *p = buffer;
VPRINTK("ENTER\n");
@@ -3723,12 +3724,14 @@ static unsigned int ata_scsi_mode_select_xlat(struct ata_queued_cmd *qc)
if (!scsi_sg_count(scmd) || scsi_sglist(scmd)->length < len)
goto invalid_param_len;
- p = page_address(sg_page(scsi_sglist(scmd)));
-
/* Move past header and block descriptors. */
if (len < hdr_len)
goto invalid_param_len;
+ if (!sg_copy_to_buffer(scsi_sglist(scmd), scsi_sg_count(scmd),
+ buffer, sizeof(buffer)))
+ goto invalid_param_len;
+
if (six_byte)
bd_len = p[3];
else
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 980aacdbcf3b..141ac600b64c 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -907,7 +907,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0)
- goto err_pm_disable;
+ goto err_pm_put;
host = ata_host_alloc(dev, 1);
if (!host) {
@@ -937,7 +937,6 @@ static int sata_rcar_probe(struct platform_device *pdev)
err_pm_put:
pm_runtime_put(dev);
-err_pm_disable:
pm_runtime_disable(dev);
return ret;
}
@@ -991,8 +990,10 @@ static int sata_rcar_resume(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(dev);
return ret;
+ }
if (priv->type == RCAR_GEN3_SATA) {
sata_rcar_init_module(priv);
@@ -1017,8 +1018,10 @@ static int sata_rcar_restore(struct device *dev)
int ret;
ret = pm_runtime_get_sync(dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put(dev);
return ret;
+ }
sata_rcar_setup_port(host);
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index d9fd70280482..7f814da3c2d0 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf)
return -EMEDIUMTYPE;
}
dev_data = PRIV(dev);
- if (!dev_data->persist) return 0;
+ if (!dev_data->persist) {
+ atm_dev_put(dev);
+ return 0;
+ }
dev_data->persist = 0;
- if (PRIV(dev)->vcc) return 0;
+ if (PRIV(dev)->vcc) {
+ atm_dev_put(dev);
+ return 0;
+ }
kfree(dev_data);
atm_dev_put(dev);
atm_dev_deregister(dev);
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 95c22c0f9036..40fb069a8a7e 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -153,7 +153,6 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
extern void device_block_probing(void);
extern void device_unblock_probing(void);
-extern void driver_deferred_probe_force_trigger(void);
/* /sys/devices directory */
extern struct kset *devices_kset;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 67d39a90b45c..05d414e9e8a4 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -50,6 +50,7 @@ static DEFINE_MUTEX(wfs_lock);
static LIST_HEAD(deferred_sync);
static unsigned int defer_sync_state_count = 1;
static unsigned int defer_fw_devlink_count;
+static LIST_HEAD(deferred_fw_devlink);
static DEFINE_MUTEX(defer_fw_devlink_lock);
static bool fw_devlink_is_permissive(void);
@@ -754,11 +755,11 @@ static void __device_links_queue_sync_state(struct device *dev,
*/
dev->state_synced = true;
- if (WARN_ON(!list_empty(&dev->links.defer_sync)))
+ if (WARN_ON(!list_empty(&dev->links.defer_hook)))
return;
get_device(dev);
- list_add_tail(&dev->links.defer_sync, list);
+ list_add_tail(&dev->links.defer_hook, list);
}
/**
@@ -776,8 +777,8 @@ static void device_links_flush_sync_list(struct list_head *list,
{
struct device *dev, *tmp;
- list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
- list_del_init(&dev->links.defer_sync);
+ list_for_each_entry_safe(dev, tmp, list, links.defer_hook) {
+ list_del_init(&dev->links.defer_hook);
if (dev != dont_lock_dev)
device_lock(dev);
@@ -815,12 +816,12 @@ void device_links_supplier_sync_state_resume(void)
if (defer_sync_state_count)
goto out;
- list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_sync) {
+ list_for_each_entry_safe(dev, tmp, &deferred_sync, links.defer_hook) {
/*
* Delete from deferred_sync list before queuing it to
- * sync_list because defer_sync is used for both lists.
+ * sync_list because defer_hook is used for both lists.
*/
- list_del_init(&dev->links.defer_sync);
+ list_del_init(&dev->links.defer_hook);
__device_links_queue_sync_state(dev, &sync_list);
}
out:
@@ -838,8 +839,8 @@ late_initcall(sync_state_resume_initcall);
static void __device_links_supplier_defer_sync(struct device *sup)
{
- if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
- list_add_tail(&sup->links.defer_sync, &deferred_sync);
+ if (list_empty(&sup->links.defer_hook) && dev_has_sync_state(sup))
+ list_add_tail(&sup->links.defer_hook, &deferred_sync);
}
static void device_link_drop_managed(struct device_link *link)
@@ -1052,7 +1053,7 @@ void device_links_driver_cleanup(struct device *dev)
WRITE_ONCE(link->status, DL_STATE_DORMANT);
}
- list_del_init(&dev->links.defer_sync);
+ list_del_init(&dev->links.defer_hook);
__device_links_no_driver(dev);
device_links_write_unlock();
@@ -1244,6 +1245,12 @@ static void fw_devlink_link_device(struct device *dev)
fw_ret = -EAGAIN;
} else {
fw_ret = -ENODEV;
+ /*
+ * defer_hook is not used to add device to deferred_sync list
+ * until device is bound. Since deferred fw devlink also blocks
+ * probing, same list hook can be used for deferred_fw_devlink.
+ */
+ list_add_tail(&dev->links.defer_hook, &deferred_fw_devlink);
}
if (fw_ret == -ENODEV)
@@ -1312,6 +1319,9 @@ void fw_devlink_pause(void)
*/
void fw_devlink_resume(void)
{
+ struct device *dev, *tmp;
+ LIST_HEAD(probe_list);
+
mutex_lock(&defer_fw_devlink_lock);
if (!defer_fw_devlink_count) {
WARN(true, "Unmatched fw_devlink pause/resume!");
@@ -1323,9 +1333,19 @@ void fw_devlink_resume(void)
goto out;
device_link_add_missing_supplier_links();
- driver_deferred_probe_force_trigger();
+ list_splice_tail_init(&deferred_fw_devlink, &probe_list);
out:
mutex_unlock(&defer_fw_devlink_lock);
+
+ /*
+ * bus_probe_device() can cause new devices to get added and they'll
+ * try to grab defer_fw_devlink_lock. So, this needs to be done outside
+ * the defer_fw_devlink_lock.
+ */
+ list_for_each_entry_safe(dev, tmp, &probe_list, links.defer_hook) {
+ list_del_init(&dev->links.defer_hook);
+ bus_probe_device(dev);
+ }
}
/* Device links support end. */
@@ -2172,7 +2192,7 @@ void device_initialize(struct device *dev)
INIT_LIST_HEAD(&dev->links.consumers);
INIT_LIST_HEAD(&dev->links.suppliers);
INIT_LIST_HEAD(&dev->links.needs_suppliers);
- INIT_LIST_HEAD(&dev->links.defer_sync);
+ INIT_LIST_HEAD(&dev->links.defer_hook);
dev->links.status = DL_DEV_NO_DRIVER;
}
EXPORT_SYMBOL_GPL(device_initialize);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 9a1d940342ac..48ca81cb8ebc 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -164,11 +164,6 @@ static void driver_deferred_probe_trigger(void)
if (!driver_deferred_probe_enable)
return;
- driver_deferred_probe_force_trigger();
-}
-
-void driver_deferred_probe_force_trigger(void)
-{
/*
* A successful probe means that all the devices in the pending list
* should be triggered to be reprobed. Move all the deferred devices
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c
index 977d27bd1a22..a97f33d0c59f 100644
--- a/drivers/base/power/trace.c
+++ b/drivers/base/power/trace.c
@@ -265,14 +265,14 @@ static struct notifier_block pm_trace_nb = {
.notifier_call = pm_trace_notify,
};
-static int early_resume_init(void)
+static int __init early_resume_init(void)
{
hash_value_early_read = read_magic_time();
register_pm_notifier(&pm_trace_nb);
return 0;
}
-static int late_resume_init(void)
+static int __init late_resume_init(void)
{
unsigned int val = hash_value_early_read;
unsigned int user, file, dev;
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 0fd6f97ee523..1d1d26b0d279 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -4,7 +4,7 @@
# subsystems should select the appropriate symbols.
config REGMAP
- default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SCCB || REGMAP_I3C)
+ default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ || REGMAP_SOUNDWIRE || REGMAP_SCCB || REGMAP_I3C)
select IRQ_DOMAIN if REGMAP_IRQ
bool
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 089e5dc7144a..f58baff2be0a 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -463,29 +463,31 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
{
struct regmap *map = container_of(file->private_data,
struct regmap, cache_only);
- ssize_t result;
- bool was_enabled, require_sync = false;
+ bool new_val, require_sync = false;
int err;
- map->lock(map->lock_arg);
+ err = kstrtobool_from_user(user_buf, count, &new_val);
+ /* Ignore malforned data like debugfs_write_file_bool() */
+ if (err)
+ return count;
- was_enabled = map->cache_only;
+ err = debugfs_file_get(file->f_path.dentry);
+ if (err)
+ return err;
- result = debugfs_write_file_bool(file, user_buf, count, ppos);
- if (result < 0) {
- map->unlock(map->lock_arg);
- return result;
- }
+ map->lock(map->lock_arg);
- if (map->cache_only && !was_enabled) {
+ if (new_val && !map->cache_only) {
dev_warn(map->dev, "debugfs cache_only=Y forced\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
- } else if (!map->cache_only && was_enabled) {
+ } else if (!new_val && map->cache_only) {
dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
require_sync = true;
}
+ map->cache_only = new_val;
map->unlock(map->lock_arg);
+ debugfs_file_put(file->f_path.dentry);
if (require_sync) {
err = regcache_sync(map);
@@ -493,7 +495,7 @@ static ssize_t regmap_cache_only_write_file(struct file *file,
dev_err(map->dev, "Failed to sync cache %d\n", err);
}
- return result;
+ return count;
}
static const struct file_operations regmap_cache_only_fops = {
@@ -508,28 +510,32 @@ static ssize_t regmap_cache_bypass_write_file(struct file *file,
{
struct regmap *map = container_of(file->private_data,
struct regmap, cache_bypass);
- ssize_t result;
- bool was_enabled;
+ bool new_val;
+ int err;
- map->lock(map->lock_arg);
+ err = kstrtobool_from_user(user_buf, count, &new_val);
+ /* Ignore malforned data like debugfs_write_file_bool() */
+ if (err)
+ return count;
- was_enabled = map->cache_bypass;
+ err = debugfs_file_get(file->f_path.dentry);
+ if (err)
+ return err;
- result = debugfs_write_file_bool(file, user_buf, count, ppos);
- if (result < 0)
- goto out;
+ map->lock(map->lock_arg);
- if (map->cache_bypass && !was_enabled) {
+ if (new_val && !map->cache_bypass) {
dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
add_taint(TAINT_USER, LOCKDEP_STILL_OK);
- } else if (!map->cache_bypass && was_enabled) {
+ } else if (!new_val && map->cache_bypass) {
dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
}
+ map->cache_bypass = new_val;
-out:
map->unlock(map->lock_arg);
+ debugfs_file_put(file->f_path.dentry);
- return result;
+ return count;
}
static const struct file_operations regmap_cache_bypass_fops = {
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index c472f624382d..795a62a04022 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/log2.h>
#include <linux/hwspinlock.h>
+#include <asm/unaligned.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -249,22 +250,20 @@ static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
{
- __be16 *b = buf;
-
- b[0] = cpu_to_be16(val << shift);
+ put_unaligned_be16(val << shift, buf);
}
static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
{
- __le16 *b = buf;
-
- b[0] = cpu_to_le16(val << shift);
+ put_unaligned_le16(val << shift, buf);
}
static void regmap_format_16_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u16 *)buf = val << shift;
+ u16 v = val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
@@ -280,43 +279,39 @@ static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
{
- __be32 *b = buf;
-
- b[0] = cpu_to_be32(val << shift);
+ put_unaligned_be32(val << shift, buf);
}
static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
{
- __le32 *b = buf;
-
- b[0] = cpu_to_le32(val << shift);
+ put_unaligned_le32(val << shift, buf);
}
static void regmap_format_32_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u32 *)buf = val << shift;
+ u32 v = val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
#ifdef CONFIG_64BIT
static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
{
- __be64 *b = buf;
-
- b[0] = cpu_to_be64((u64)val << shift);
+ put_unaligned_be64((u64) val << shift, buf);
}
static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
{
- __le64 *b = buf;
-
- b[0] = cpu_to_le64((u64)val << shift);
+ put_unaligned_le64((u64) val << shift, buf);
}
static void regmap_format_64_native(void *buf, unsigned int val,
unsigned int shift)
{
- *(u64 *)buf = (u64)val << shift;
+ u64 v = (u64) val << shift;
+
+ memcpy(buf, &v, sizeof(v));
}
#endif
@@ -333,35 +328,34 @@ static unsigned int regmap_parse_8(const void *buf)
static unsigned int regmap_parse_16_be(const void *buf)
{
- const __be16 *b = buf;
-
- return be16_to_cpu(b[0]);
+ return get_unaligned_be16(buf);
}
static unsigned int regmap_parse_16_le(const void *buf)
{
- const __le16 *b = buf;
-
- return le16_to_cpu(b[0]);
+ return get_unaligned_le16(buf);
}
static void regmap_parse_16_be_inplace(void *buf)
{
- __be16 *b = buf;
+ u16 v = get_unaligned_be16(buf);
- b[0] = be16_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_16_le_inplace(void *buf)
{
- __le16 *b = buf;
+ u16 v = get_unaligned_le16(buf);
- b[0] = le16_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_16_native(const void *buf)
{
- return *(u16 *)buf;
+ u16 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
static unsigned int regmap_parse_24(const void *buf)
@@ -376,69 +370,67 @@ static unsigned int regmap_parse_24(const void *buf)
static unsigned int regmap_parse_32_be(const void *buf)
{
- const __be32 *b = buf;
-
- return be32_to_cpu(b[0]);
+ return get_unaligned_be32(buf);
}
static unsigned int regmap_parse_32_le(const void *buf)
{
- const __le32 *b = buf;
-
- return le32_to_cpu(b[0]);
+ return get_unaligned_le32(buf);
}
static void regmap_parse_32_be_inplace(void *buf)
{
- __be32 *b = buf;
+ u32 v = get_unaligned_be32(buf);
- b[0] = be32_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_32_le_inplace(void *buf)
{
- __le32 *b = buf;
+ u32 v = get_unaligned_le32(buf);
- b[0] = le32_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_32_native(const void *buf)
{
- return *(u32 *)buf;
+ u32 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
#ifdef CONFIG_64BIT
static unsigned int regmap_parse_64_be(const void *buf)
{
- const __be64 *b = buf;
-
- return be64_to_cpu(b[0]);
+ return get_unaligned_be64(buf);
}
static unsigned int regmap_parse_64_le(const void *buf)
{
- const __le64 *b = buf;
-
- return le64_to_cpu(b[0]);
+ return get_unaligned_le64(buf);
}
static void regmap_parse_64_be_inplace(void *buf)
{
- __be64 *b = buf;
+ u64 v = get_unaligned_be64(buf);
- b[0] = be64_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static void regmap_parse_64_le_inplace(void *buf)
{
- __le64 *b = buf;
+ u64 v = get_unaligned_le64(buf);
- b[0] = le64_to_cpu(b[0]);
+ memcpy(buf, &v, sizeof(v));
}
static unsigned int regmap_parse_64_native(const void *buf)
{
- return *(u64 *)buf;
+ u64 v;
+
+ memcpy(&v, buf, sizeof(v));
+ return v;
}
#endif
@@ -1357,6 +1349,7 @@ void regmap_exit(struct regmap *map)
if (map->hwlock)
hwspin_lock_free(map->hwlock);
kfree_const(map->name);
+ kfree(map->patch);
kfree(map);
}
EXPORT_SYMBOL_GPL(regmap_exit);
@@ -1371,7 +1364,7 @@ static int dev_get_regmap_match(struct device *dev, void *res, void *data)
/* If the user didn't specify a name match any */
if (data)
- return (*r)->name == data;
+ return !strcmp((*r)->name, data);
else
return 1;
}
@@ -2944,8 +2937,9 @@ EXPORT_SYMBOL_GPL(regmap_update_bits_base);
* @reg: Register to read from
* @bits: Bits to test
*
- * Returns -1 if the underlying regmap_read() fails, 0 if at least one of the
- * tested bits is not set and 1 if all tested bits are set.
+ * Returns 0 if at least one of the tested bits is not set, 1 if all tested
+ * bits are set and a negative error number if the underlying regmap_read()
+ * fails.
*/
int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
{
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c33bbbfd1bd9..475e1a738560 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1368,14 +1368,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
lo->lo_sizelimit != info->lo_sizelimit) {
size_changed = true;
sync_blockdev(lo->lo_device);
- kill_bdev(lo->lo_device);
+ invalidate_bdev(lo->lo_device);
}
/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) {
- /* If any pages were dirtied after kill_bdev(), try again */
+ /* If any pages were dirtied after invalidate_bdev(), try again */
err = -EAGAIN;
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
__func__, lo->lo_number, lo->lo_file_name,
@@ -1615,11 +1615,11 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
return 0;
sync_blockdev(lo->lo_device);
- kill_bdev(lo->lo_device);
+ invalidate_bdev(lo->lo_device);
blk_mq_freeze_queue(lo->lo_queue);
- /* kill_bdev should have truncated all the pages */
+ /* invalidate_bdev should have truncated all the pages */
if (lo->lo_device->bd_inode->i_mapping->nrpages) {
err = -EAGAIN;
pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 43cff01a5a67..ce7e9f223b20 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1033,25 +1033,26 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
dev_err(disk_to_dev(nbd->disk),
"Device being setup by another task");
- sockfd_put(sock);
- return -EBUSY;
+ err = -EBUSY;
+ goto put_socket;
+ }
+
+ nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
+ if (!nsock) {
+ err = -ENOMEM;
+ goto put_socket;
}
socks = krealloc(config->socks, (config->num_connections + 1) *
sizeof(struct nbd_sock *), GFP_KERNEL);
if (!socks) {
- sockfd_put(sock);
- return -ENOMEM;
+ kfree(nsock);
+ err = -ENOMEM;
+ goto put_socket;
}
config->socks = socks;
- nsock = kzalloc(sizeof(struct nbd_sock), GFP_KERNEL);
- if (!nsock) {
- sockfd_put(sock);
- return -ENOMEM;
- }
-
nsock->fallback_index = -1;
nsock->dead = false;
mutex_init(&nsock->tx_lock);
@@ -1063,6 +1064,10 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
atomic_inc(&config->live_connections);
return 0;
+
+put_socket:
+ sockfd_put(sock);
+ return err;
}
static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 7420648a1de6..4f61e9209461 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1451,8 +1451,10 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
{
struct rbd_obj_request *obj_request = osd_req->r_priv;
+ struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
+ struct ceph_options *opt = rbd_dev->rbd_client->client->options;
- osd_req->r_flags = CEPH_OSD_FLAG_READ;
+ osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica;
osd_req->r_snapid = obj_request->img_request->snap_id;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 9d21bf0f155e..980df853ee49 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -878,6 +878,7 @@ out_put_disk:
put_disk(vblk->disk);
out_free_vq:
vdev->config->del_vqs(vdev);
+ kfree(vblk->vqs);
out_free_vblk:
kfree(vblk);
out_free_index:
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 6e2ad90b17a3..270dd810be54 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -2021,7 +2021,8 @@ static ssize_t hot_add_show(struct class *class,
return ret;
return scnprintf(buf, PAGE_SIZE, "%d\n", ret);
}
-static CLASS_ATTR_RO(hot_add);
+static struct class_attribute class_attr_hot_add =
+ __ATTR(hot_add, 0400, hot_add_show, NULL);
static ssize_t hot_remove_store(struct class *class,
struct class_attribute *attr,
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 3affd180baac..191c97b84715 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -221,6 +221,34 @@ static u32 sysc_read_sysstatus(struct sysc *ddata)
return sysc_read(ddata, offset);
}
+/* Poll on reset status */
+static int sysc_wait_softreset(struct sysc *ddata)
+{
+ u32 sysc_mask, syss_done, rstval;
+ int syss_offset, error = 0;
+
+ syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+
+ if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
+ syss_done = 0;
+ else
+ syss_done = ddata->cfg.syss_mask;
+
+ if (syss_offset >= 0) {
+ error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
+ rstval, (rstval & ddata->cfg.syss_mask) ==
+ syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
+
+ } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
+ error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
+ rstval, !(rstval & sysc_mask),
+ 100, MAX_MODULE_SOFTRESET_WAIT);
+ }
+
+ return error;
+}
+
static int sysc_add_named_clock_from_child(struct sysc *ddata,
const char *name,
const char *optfck_name)
@@ -925,18 +953,47 @@ static int sysc_enable_module(struct device *dev)
struct sysc *ddata;
const struct sysc_regbits *regbits;
u32 reg, idlemodes, best_mode;
+ int error;
ddata = dev_get_drvdata(dev);
+
+ /*
+ * Some modules like DSS reset automatically on idle. Enable optional
+ * reset clocks and wait for OCP softreset to complete.
+ */
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET) {
+ error = sysc_enable_opt_clocks(ddata);
+ if (error) {
+ dev_err(ddata->dev,
+ "Optional clocks failed for enable: %i\n",
+ error);
+ return error;
+ }
+ }
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
+ if (ddata->cfg.quirks & SYSC_QUIRK_OPT_CLKS_IN_RESET)
+ sysc_disable_opt_clocks(ddata);
+
+ /*
+ * Some subsystem private interconnects, like DSS top level module,
+ * need only the automatic OCP softreset handling with no sysconfig
+ * register bits to configure.
+ */
if (ddata->offsets[SYSC_SYSCONFIG] == -ENODEV)
return 0;
regbits = ddata->cap->regbits;
reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
- /* Set CLOCKACTIVITY, we only use it for ick */
+ /*
+ * Set CLOCKACTIVITY, we only use it for ick. And we only configure it
+ * based on the SYSC_QUIRK_USE_CLOCKACT flag, not based on the hardware
+ * capabilities. See the old HWMOD_SET_DEFAULT_CLOCKACT flag.
+ */
if (regbits->clkact_shift >= 0 &&
- (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT ||
- ddata->cfg.sysc_val & BIT(regbits->clkact_shift)))
+ (ddata->cfg.quirks & SYSC_QUIRK_USE_CLOCKACT))
reg |= SYSC_CLOCACT_ICK << regbits->clkact_shift;
/* Set SIDLE mode */
@@ -991,6 +1048,9 @@ set_autoidle:
sysc_write_sysconfig(ddata, reg);
}
+ /* Flush posted write */
+ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
if (ddata->module_enable_quirk)
ddata->module_enable_quirk(ddata);
@@ -1071,6 +1131,9 @@ set_sidle:
reg |= 1 << regbits->autoidle_shift;
sysc_write_sysconfig(ddata, reg);
+ /* Flush posted write */
+ sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
return 0;
}
@@ -1215,7 +1278,8 @@ static int __maybe_unused sysc_noirq_suspend(struct device *dev)
ddata = dev_get_drvdata(dev);
- if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
return 0;
return pm_runtime_force_suspend(dev);
@@ -1227,7 +1291,8 @@ static int __maybe_unused sysc_noirq_resume(struct device *dev)
ddata = dev_get_drvdata(dev);
- if (ddata->cfg.quirks & SYSC_QUIRK_LEGACY_IDLE)
+ if (ddata->cfg.quirks &
+ (SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_NO_IDLE))
return 0;
return pm_runtime_force_resume(dev);
@@ -1488,7 +1553,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
bool lcd_en, digit_en, lcd2_en = false, lcd3_en = false;
const int lcd_en_mask = BIT(0), digit_en_mask = BIT(1);
int manager_count;
- bool framedonetv_irq;
+ bool framedonetv_irq = true;
u32 val, irq_mask = 0;
switch (sysc_soc->soc) {
@@ -1505,6 +1570,7 @@ static u32 sysc_quirk_dispc(struct sysc *ddata, int dispc_offset,
break;
case SOC_AM4:
manager_count = 1;
+ framedonetv_irq = false;
break;
case SOC_UNKNOWN:
default:
@@ -1663,8 +1729,8 @@ static void sysc_quirk_rtc(struct sysc *ddata, bool lock)
local_irq_save(flags);
/* RTC_STATUS BUSY bit may stay active for 1/32768 seconds (~30 usec) */
- error = readl_poll_timeout(ddata->module_va + 0x44, val,
- !(val & BIT(0)), 100, 50);
+ error = readl_poll_timeout_atomic(ddata->module_va + 0x44, val,
+ !(val & BIT(0)), 100, 50);
if (error)
dev_warn(ddata->dev, "rtc busy timeout\n");
/* Now we have ~15 microseconds to read/write various registers */
@@ -1822,11 +1888,10 @@ static int sysc_legacy_init(struct sysc *ddata)
*/
static int sysc_reset(struct sysc *ddata)
{
- int sysc_offset, syss_offset, sysc_val, rstval, error = 0;
- u32 sysc_mask, syss_done;
+ int sysc_offset, sysc_val, error;
+ u32 sysc_mask;
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
- syss_offset = ddata->offsets[SYSC_SYSSTATUS];
if (ddata->legacy_mode ||
ddata->cap->regbits->srst_shift < 0 ||
@@ -1835,11 +1900,6 @@ static int sysc_reset(struct sysc *ddata)
sysc_mask = BIT(ddata->cap->regbits->srst_shift);
- if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
- syss_done = 0;
- else
- syss_done = ddata->cfg.syss_mask;
-
if (ddata->pre_reset_quirk)
ddata->pre_reset_quirk(ddata);
@@ -1856,18 +1916,9 @@ static int sysc_reset(struct sysc *ddata)
if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata);
- /* Poll on reset status */
- if (syss_offset >= 0) {
- error = readx_poll_timeout(sysc_read_sysstatus, ddata, rstval,
- (rstval & ddata->cfg.syss_mask) ==
- syss_done,
- 100, MAX_MODULE_SOFTRESET_WAIT);
-
- } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
- error = readx_poll_timeout(sysc_read_sysconfig, ddata, rstval,
- !(rstval & sysc_mask),
- 100, MAX_MODULE_SOFTRESET_WAIT);
- }
+ error = sysc_wait_softreset(ddata);
+ if (error)
+ dev_warn(ddata->dev, "OCP softreset timed out\n");
if (ddata->reset_done_quirk)
ddata->reset_done_quirk(ddata);
@@ -2814,6 +2865,24 @@ static int sysc_check_disabled_devices(struct sysc *ddata)
return error;
}
+/*
+ * Ignore timers tagged with no-reset and no-idle. These are likely in use,
+ * for example by drivers/clocksource/timer-ti-dm-systimer.c. If more checks
+ * are needed, we could also look at the timer register configuration.
+ */
+static int sysc_check_active_timer(struct sysc *ddata)
+{
+ if (ddata->cap->type != TI_SYSC_OMAP2_TIMER &&
+ ddata->cap->type != TI_SYSC_OMAP4_TIMER)
+ return 0;
+
+ if ((ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT) &&
+ (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE))
+ return -EBUSY;
+
+ return 0;
+}
+
static const struct of_device_id sysc_match_table[] = {
{ .compatible = "simple-bus", },
{ /* sentinel */ },
@@ -2870,6 +2939,10 @@ static int sysc_probe(struct platform_device *pdev)
if (error)
return error;
+ error = sysc_check_active_timer(ddata);
+ if (error)
+ return error;
+
error = sysc_get_clocks(ddata);
if (error)
return error;
diff --git a/drivers/char/hw_random/ks-sa-rng.c b/drivers/char/hw_random/ks-sa-rng.c
index e2330e757f1f..001617033d6a 100644
--- a/drivers/char/hw_random/ks-sa-rng.c
+++ b/drivers/char/hw_random/ks-sa-rng.c
@@ -244,6 +244,7 @@ static int ks_sa_rng_probe(struct platform_device *pdev)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "Failed to enable SA power-domain\n");
+ pm_runtime_put_noidle(dev);
pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 31cae88a730b..934c92dcb9ab 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -171,7 +171,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
if (!ptr)
goto failed;
- probe = probe_kernel_read(bounce, ptr, sz);
+ probe = copy_from_kernel_nofault(bounce, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
if (probe)
goto failed;
diff --git a/drivers/char/tpm/st33zp24/i2c.c b/drivers/char/tpm/st33zp24/i2c.c
index 35333b65acd1..7c617edff4ca 100644
--- a/drivers/char/tpm/st33zp24/i2c.c
+++ b/drivers/char/tpm/st33zp24/i2c.c
@@ -210,7 +210,7 @@ static int st33zp24_i2c_request_resources(struct i2c_client *client)
/*
* st33zp24_i2c_probe initialize the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: client, the i2c_client description (TPM I2C description).
* @param: id, the i2c_device_id struct.
* @return: 0 in case of success.
* -1 in other case.
diff --git a/drivers/char/tpm/st33zp24/spi.c b/drivers/char/tpm/st33zp24/spi.c
index 26e09de50f1e..a75dafd39445 100644
--- a/drivers/char/tpm/st33zp24/spi.c
+++ b/drivers/char/tpm/st33zp24/spi.c
@@ -329,7 +329,7 @@ static int st33zp24_spi_request_resources(struct spi_device *dev)
/*
* st33zp24_spi_probe initialize the TPM device
- * @param: dev, the spi_device drescription (TPM SPI description).
+ * @param: dev, the spi_device description (TPM SPI description).
* @return: 0 in case of success.
* or a negative value describing the error.
*/
@@ -378,7 +378,7 @@ static int st33zp24_spi_probe(struct spi_device *dev)
/*
* st33zp24_spi_remove remove the TPM device
- * @param: client, the spi_device drescription (TPM SPI description).
+ * @param: client, the spi_device description (TPM SPI description).
* @return: 0 in case of success.
*/
static int st33zp24_spi_remove(struct spi_device *dev)
diff --git a/drivers/char/tpm/st33zp24/st33zp24.c b/drivers/char/tpm/st33zp24/st33zp24.c
index 37bb13f516be..4ec10ab5e576 100644
--- a/drivers/char/tpm/st33zp24/st33zp24.c
+++ b/drivers/char/tpm/st33zp24/st33zp24.c
@@ -502,7 +502,7 @@ static const struct tpm_class_ops st33zp24_tpm = {
/*
* st33zp24_probe initialize the TPM device
- * @param: client, the i2c_client drescription (TPM I2C description).
+ * @param: client, the i2c_client description (TPM I2C description).
* @param: id, the i2c_device_id struct.
* @return: 0 in case of success.
* -1 in other case.
diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c
index 87f449340202..1784530b8387 100644
--- a/drivers/char/tpm/tpm-dev-common.c
+++ b/drivers/char/tpm/tpm-dev-common.c
@@ -189,15 +189,6 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
goto out;
}
- /* atomic tpm command send and result receive. We only hold the ops
- * lock during this period so that the tpm can be unregistered even if
- * the char dev is held open.
- */
- if (tpm_try_get_ops(priv->chip)) {
- ret = -EPIPE;
- goto out;
- }
-
priv->response_length = 0;
priv->response_read = false;
*off = 0;
@@ -211,11 +202,19 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf,
if (file->f_flags & O_NONBLOCK) {
priv->command_enqueued = true;
queue_work(tpm_dev_wq, &priv->async_work);
- tpm_put_ops(priv->chip);
mutex_unlock(&priv->buffer_mutex);
return size;
}
+ /* atomic tpm command send and result receive. We only hold the ops
+ * lock during this period so that the tpm can be unregistered even if
+ * the char dev is held open.
+ */
+ if (tpm_try_get_ops(priv->chip)) {
+ ret = -EPIPE;
+ goto out;
+ }
+
ret = tpm_dev_transmit(priv->chip, priv->space, priv->data_buffer,
sizeof(priv->data_buffer));
tpm_put_ops(priv->chip);
diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
index 09fe45246b8c..994385bf37c0 100644
--- a/drivers/char/tpm/tpm_ibmvtpm.c
+++ b/drivers/char/tpm/tpm_ibmvtpm.c
@@ -683,13 +683,6 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
if (rc)
goto init_irq_cleanup;
- if (!strcmp(id->compat, "IBM,vtpm20")) {
- chip->flags |= TPM_CHIP_FLAG_TPM2;
- rc = tpm2_get_cc_attrs_tbl(chip);
- if (rc)
- goto init_irq_cleanup;
- }
-
if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
ibmvtpm->rtce_buf != NULL,
HZ)) {
@@ -697,6 +690,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
goto init_irq_cleanup;
}
+ if (!strcmp(id->compat, "IBM,vtpm20")) {
+ chip->flags |= TPM_CHIP_FLAG_TPM2;
+ rc = tpm2_get_cc_attrs_tbl(chip);
+ if (rc)
+ goto init_irq_cleanup;
+ }
+
return tpm_chip_register(chip);
init_irq_cleanup:
do {
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index e7df342a317d..0b214963539d 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -235,6 +235,13 @@ static int tpm_tis_pnp_init(struct pnp_dev *pnp_dev,
return tpm_tis_init(&pnp_dev->dev, &tpm_info);
}
+/*
+ * There is a known bug caused by 93e1b7d42e1e ("[PATCH] tpm: add HID module
+ * parameter"). This commit added IFX0102 device ID, which is also used by
+ * tpm_infineon but ignored to add quirks to probe which driver ought to be
+ * used.
+ */
+
static struct pnp_device_id tpm_pnp_tbl[] = {
{"PNP0C31", 0}, /* TPM */
{"ATM1200", 0}, /* Atmel */
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 2435216bd10a..65ab1b027949 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -1085,7 +1085,7 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
return 0;
out_err:
- if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
+ if (chip->ops->clk_enable != NULL)
chip->ops->clk_enable(chip, false);
tpm_tis_remove(chip);
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index d96755935529..3856f6ebcb34 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -53,8 +53,6 @@ static int tpm_tis_spi_flow_control(struct tpm_tis_spi_phy *phy,
if ((phy->iobuf[3] & 0x01) == 0) {
// handle SPI wait states
- phy->iobuf[0] = 0;
-
for (i = 0; i < TPM_RETRY; i++) {
spi_xfer->len = 1;
spi_message_init(&m);
@@ -104,6 +102,8 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
if (ret < 0)
goto exit;
+ /* Flow control transfers are receive only */
+ spi_xfer.tx_buf = NULL;
ret = phy->flow_control(phy, &spi_xfer);
if (ret < 0)
goto exit;
@@ -113,9 +113,8 @@ int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len,
spi_xfer.delay.value = 5;
spi_xfer.delay.unit = SPI_DELAY_UNIT_USECS;
- if (in) {
- spi_xfer.tx_buf = NULL;
- } else if (out) {
+ if (out) {
+ spi_xfer.tx_buf = phy->iobuf;
spi_xfer.rx_buf = NULL;
memcpy(phy->iobuf, out, transfer_len);
out += transfer_len;
@@ -288,6 +287,7 @@ static struct spi_driver tpm_tis_spi_driver = {
.pm = &tpm_tis_pm,
.of_match_table = of_match_ptr(of_tis_spi_match),
.acpi_match_table = ACPI_PTR(acpi_tis_spi_match),
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = tpm_tis_spi_driver_probe,
.remove = tpm_tis_spi_remove,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 00c5e3acee46..ca691bce9791 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -2116,6 +2116,7 @@ static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
{ 0 },
};
+MODULE_DEVICE_TABLE(virtio, id_table);
static unsigned int features[] = {
VIRTIO_CONSOLE_F_SIZE,
@@ -2128,6 +2129,7 @@ static struct virtio_device_id rproc_serial_id_table[] = {
#endif
{ 0 },
};
+MODULE_DEVICE_TABLE(virtio, rproc_serial_id_table);
static unsigned int rproc_serial_features[] = {
};
@@ -2280,6 +2282,5 @@ static void __exit fini(void)
module_init(init);
module_exit(fini);
-MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio console driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 69934c0c3dd8..326f91b2dda9 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -50,6 +50,7 @@ source "drivers/clk/versatile/Kconfig"
config CLK_HSDK
bool "PLL Driver for HSDK platform"
depends on OF || COMPILE_TEST
+ depends on IOMEM
help
This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs
control.
diff --git a/drivers/clk/clk-ast2600.c b/drivers/clk/clk-ast2600.c
index 99afc949925f..177368cac6dd 100644
--- a/drivers/clk/clk-ast2600.c
+++ b/drivers/clk/clk-ast2600.c
@@ -131,6 +131,18 @@ static const struct clk_div_table ast2600_eclk_div_table[] = {
{ 0 }
};
+static const struct clk_div_table ast2600_emmc_extclk_div_table[] = {
+ { 0x0, 2 },
+ { 0x1, 4 },
+ { 0x2, 6 },
+ { 0x3, 8 },
+ { 0x4, 10 },
+ { 0x5, 12 },
+ { 0x6, 14 },
+ { 0x7, 16 },
+ { 0 }
+};
+
static const struct clk_div_table ast2600_mac_div_table[] = {
{ 0x0, 4 },
{ 0x1, 4 },
@@ -390,6 +402,11 @@ static struct clk_hw *aspeed_g6_clk_hw_register_gate(struct device *dev,
return hw;
}
+static const char *const emmc_extclk_parent_names[] = {
+ "emmc_extclk_hpll_in",
+ "mpll",
+};
+
static const char * const vclk_parent_names[] = {
"dpll",
"d1pll",
@@ -459,16 +476,32 @@ static int aspeed_g6_clk_probe(struct platform_device *pdev)
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_UARTX] = hw;
- /* EMMC ext clock divider */
- hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "hpll", 0,
- scu_g6_base + ASPEED_G6_CLK_SELECTION1, 15, 0,
- &aspeed_g6_clk_lock);
+ /* EMMC ext clock */
+ hw = clk_hw_register_fixed_factor(dev, "emmc_extclk_hpll_in", "hpll",
+ 0, 1, 2);
if (IS_ERR(hw))
return PTR_ERR(hw);
- hw = clk_hw_register_divider_table(dev, "emmc_extclk", "emmc_extclk_gate", 0,
- scu_g6_base + ASPEED_G6_CLK_SELECTION1, 12, 3, 0,
- ast2600_div_table,
- &aspeed_g6_clk_lock);
+
+ hw = clk_hw_register_mux(dev, "emmc_extclk_mux",
+ emmc_extclk_parent_names,
+ ARRAY_SIZE(emmc_extclk_parent_names), 0,
+ scu_g6_base + ASPEED_G6_CLK_SELECTION1, 11, 1,
+ 0, &aspeed_g6_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hw = clk_hw_register_gate(dev, "emmc_extclk_gate", "emmc_extclk_mux",
+ 0, scu_g6_base + ASPEED_G6_CLK_SELECTION1,
+ 15, 0, &aspeed_g6_clk_lock);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ hw = clk_hw_register_divider_table(dev, "emmc_extclk",
+ "emmc_extclk_gate", 0,
+ scu_g6_base +
+ ASPEED_G6_CLK_SELECTION1, 12,
+ 3, 0, ast2600_emmc_extclk_div_table,
+ &aspeed_g6_clk_lock);
if (IS_ERR(hw))
return PTR_ERR(hw);
aspeed_g6_clk_data->hws[ASPEED_CLK_EMMC] = hw;
diff --git a/drivers/clk/mvebu/Kconfig b/drivers/clk/mvebu/Kconfig
index ded07b0bd0d5..557d6213783c 100644
--- a/drivers/clk/mvebu/Kconfig
+++ b/drivers/clk/mvebu/Kconfig
@@ -42,6 +42,7 @@ config ARMADA_AP806_SYSCON
config ARMADA_AP_CPU_CLK
bool
+ select ARMADA_AP_CP_HELPER
config ARMADA_CP110_SYSCON
bool
diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c
index 6282ee2f361c..a8901f90a61a 100644
--- a/drivers/clk/sifive/fu540-prci.c
+++ b/drivers/clk/sifive/fu540-prci.c
@@ -586,7 +586,10 @@ static int sifive_fu540_prci_probe(struct platform_device *pdev)
struct __prci_data *pd;
int r;
- pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
+ pd = devm_kzalloc(dev,
+ struct_size(pd, hw_clks.hws,
+ ARRAY_SIZE(__prci_init_clocks)),
+ GFP_KERNEL);
if (!pd)
return -ENOMEM;
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index ecf7b7db2d05..6c3e84180146 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -480,6 +480,14 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.set_next_event_virt = erratum_set_next_event_tval_virt,
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+ {
+ .match_type = ate_match_local_cap_id,
+ .id = (void *)ARM64_WORKAROUND_1418040,
+ .desc = "ARM erratum 1418040",
+ .disable_compat_vdso = true,
+ },
+#endif
};
typedef bool (*ate_match_fn_t)(const struct arch_timer_erratum_workaround *,
@@ -566,6 +574,9 @@ void arch_timer_enable_workaround(const struct arch_timer_erratum_workaround *wa
if (wa->read_cntvct_el0) {
clocksource_counter.vdso_clock_mode = VDSO_CLOCKMODE_NONE;
vdso_default = VDSO_CLOCKMODE_NONE;
+ } else if (wa->disable_compat_vdso && vdso_default != VDSO_CLOCKMODE_NONE) {
+ vdso_default = VDSO_CLOCKMODE_ARCHTIMER_NOCOMPAT;
+ clocksource_counter.vdso_clock_mode = vdso_default;
}
}
diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c
index 6fd1f219a512..f6fd1c1cc527 100644
--- a/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/drivers/clocksource/timer-ti-dm-systimer.c
@@ -19,7 +19,7 @@
/* For type1, set SYSC_OMAP2_CLOCKACTIVITY for fck off on idle, l4 clock on */
#define DMTIMER_TYPE1_ENABLE ((1 << 9) | (SYSC_IDLE_SMART << 3) | \
SYSC_OMAP2_ENAWAKEUP | SYSC_OMAP2_AUTOIDLE)
-
+#define DMTIMER_TYPE1_DISABLE (SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE)
#define DMTIMER_TYPE2_ENABLE (SYSC_IDLE_SMART_WKUP << 2)
#define DMTIMER_RESET_WAIT 100000
@@ -44,6 +44,8 @@ struct dmtimer_systimer {
u8 ctrl;
u8 wakeup;
u8 ifctrl;
+ struct clk *fck;
+ struct clk *ick;
unsigned long rate;
};
@@ -298,16 +300,20 @@ static void __init dmtimer_systimer_select_best(void)
}
/* Interface clocks are only available on some SoCs variants */
-static int __init dmtimer_systimer_init_clock(struct device_node *np,
+static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t,
+ struct device_node *np,
const char *name,
unsigned long *rate)
{
struct clk *clock;
unsigned long r;
+ bool is_ick = false;
int error;
+ is_ick = !strncmp(name, "ick", 3);
+
clock = of_clk_get_by_name(np, name);
- if ((PTR_ERR(clock) == -EINVAL) && !strncmp(name, "ick", 3))
+ if ((PTR_ERR(clock) == -EINVAL) && is_ick)
return 0;
else if (IS_ERR(clock))
return PTR_ERR(clock);
@@ -320,6 +326,11 @@ static int __init dmtimer_systimer_init_clock(struct device_node *np,
if (!r)
return -ENODEV;
+ if (is_ick)
+ t->ick = clock;
+ else
+ t->fck = clock;
+
*rate = r;
return 0;
@@ -339,7 +350,10 @@ static void dmtimer_systimer_enable(struct dmtimer_systimer *t)
static void dmtimer_systimer_disable(struct dmtimer_systimer *t)
{
- writel_relaxed(0, t->base + t->sysc);
+ if (!dmtimer_systimer_revision1(t))
+ return;
+
+ writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc);
}
static int __init dmtimer_systimer_setup(struct device_node *np,
@@ -366,13 +380,13 @@ static int __init dmtimer_systimer_setup(struct device_node *np,
pr_err("%s: clock source init failed: %i\n", __func__, error);
/* For ti-sysc, we have timer clocks at the parent module level */
- error = dmtimer_systimer_init_clock(np->parent, "fck", &rate);
+ error = dmtimer_systimer_init_clock(t, np->parent, "fck", &rate);
if (error)
goto err_unmap;
t->rate = rate;
- error = dmtimer_systimer_init_clock(np->parent, "ick", &rate);
+ error = dmtimer_systimer_init_clock(t, np->parent, "ick", &rate);
if (error)
goto err_unmap;
@@ -496,12 +510,18 @@ static void omap_clockevent_idle(struct clock_event_device *evt)
struct dmtimer_systimer *t = &clkevt->t;
dmtimer_systimer_disable(t);
+ clk_disable(t->fck);
}
static void omap_clockevent_unidle(struct clock_event_device *evt)
{
struct dmtimer_clockevent *clkevt = to_dmtimer_clockevent(evt);
struct dmtimer_systimer *t = &clkevt->t;
+ int error;
+
+ error = clk_enable(t->fck);
+ if (error)
+ pr_err("could not enable timer fck on resume: %i\n", error);
dmtimer_systimer_enable(t);
writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_ena);
@@ -570,8 +590,8 @@ static int __init dmtimer_clockevent_init(struct device_node *np)
3, /* Timer internal resynch latency */
0xffffffff);
- if (of_device_is_compatible(np, "ti,am33xx") ||
- of_device_is_compatible(np, "ti,am43")) {
+ if (of_machine_is_compatible("ti,am33xx") ||
+ of_machine_is_compatible("ti,am43")) {
dev->suspend = omap_clockevent_idle;
dev->resume = omap_clockevent_unidle;
}
@@ -616,12 +636,18 @@ static void dmtimer_clocksource_suspend(struct clocksource *cs)
clksrc->loadval = readl_relaxed(t->base + t->counter);
dmtimer_systimer_disable(t);
+ clk_disable(t->fck);
}
static void dmtimer_clocksource_resume(struct clocksource *cs)
{
struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
struct dmtimer_systimer *t = &clksrc->t;
+ int error;
+
+ error = clk_enable(t->fck);
+ if (error)
+ pr_err("could not enable timer fck on resume: %i\n", error);
dmtimer_systimer_enable(t);
writel_relaxed(clksrc->loadval, t->base + t->counter);
@@ -653,8 +679,8 @@ static int __init dmtimer_clocksource_init(struct device_node *np)
dev->mask = CLOCKSOURCE_MASK(32);
dev->flags = CLOCK_SOURCE_IS_CONTINUOUS;
- if (of_device_is_compatible(np, "ti,am33xx") ||
- of_device_is_compatible(np, "ti,am43")) {
+ /* Unlike for clockevent, legacy code sets suspend only for am4 */
+ if (of_machine_is_compatible("ti,am43")) {
dev->suspend = dmtimer_clocksource_suspend;
dev->resume = dmtimer_clocksource_resume;
}
diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c
index aa13708c2bc3..d22cfae1b019 100644
--- a/drivers/counter/104-quad-8.c
+++ b/drivers/counter/104-quad-8.c
@@ -1274,18 +1274,26 @@ static ssize_t quad8_signal_cable_fault_read(struct counter_device *counter,
struct counter_signal *signal,
void *private, char *buf)
{
- const struct quad8_iio *const priv = counter->priv;
+ struct quad8_iio *const priv = counter->priv;
const size_t channel_id = signal->id / 2;
- const bool disabled = !(priv->cable_fault_enable & BIT(channel_id));
+ bool disabled;
unsigned int status;
unsigned int fault;
- if (disabled)
+ mutex_lock(&priv->lock);
+
+ disabled = !(priv->cable_fault_enable & BIT(channel_id));
+
+ if (disabled) {
+ mutex_unlock(&priv->lock);
return -EINVAL;
+ }
/* Logic 0 = cable fault */
status = inb(priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
+ mutex_unlock(&priv->lock);
+
/* Mask respective channel and invert logic */
fault = !(status & BIT(channel_id));
@@ -1317,6 +1325,8 @@ static ssize_t quad8_signal_cable_fault_enable_write(
if (ret)
return ret;
+ mutex_lock(&priv->lock);
+
if (enable)
priv->cable_fault_enable |= BIT(channel_id);
else
@@ -1327,6 +1337,8 @@ static ssize_t quad8_signal_cable_fault_enable_write(
outb(cable_fault_enable, priv->base + QUAD8_DIFF_ENCODER_CABLE_STATUS);
+ mutex_unlock(&priv->lock);
+
return len;
}
@@ -1353,6 +1365,8 @@ static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter,
if (ret)
return ret;
+ mutex_lock(&priv->lock);
+
priv->fck_prescaler[channel_id] = prescaler;
/* Reset Byte Pointer */
@@ -1363,6 +1377,8 @@ static ssize_t quad8_signal_fck_prescaler_write(struct counter_device *counter,
outb(QUAD8_CTR_RLD | QUAD8_RLD_RESET_BP | QUAD8_RLD_PRESET_PSC,
base_offset + 1);
+ mutex_unlock(&priv->lock);
+
return len;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 8e23a698ce04..7e0f7880b21a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2464,7 +2464,7 @@ static struct cpufreq_driver intel_cpufreq = {
.name = "intel_cpufreq",
};
-static struct cpufreq_driver *default_driver = &intel_pstate;
+static struct cpufreq_driver *default_driver;
static void intel_pstate_driver_cleanup(void)
{
@@ -2677,6 +2677,8 @@ static struct acpi_platform_list plat_info[] __initdata = {
{ } /* End */
};
+#define BITMASK_OOB (BIT(8) | BIT(18))
+
static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
{
const struct x86_cpu_id *id;
@@ -2686,8 +2688,9 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
id = x86_match_cpu(intel_pstate_cpu_oob_ids);
if (id) {
rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
- if (misc_pwr & (1 << 8)) {
- pr_debug("Bit 8 in the MISC_PWR_MGMT MSR set\n");
+ if (misc_pwr & BITMASK_OOB) {
+ pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
+ pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
return true;
}
}
@@ -2755,6 +2758,7 @@ static int __init intel_pstate_init(void)
hwp_active++;
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
+ default_driver = &intel_pstate;
goto hwp_cpu_matched;
}
} else {
@@ -2772,7 +2776,8 @@ static int __init intel_pstate_init(void)
return -ENODEV;
}
/* Without HWP start in the passive mode. */
- default_driver = &intel_cpufreq;
+ if (!default_driver)
+ default_driver = &intel_cpufreq;
hwp_cpu_matched:
/*
@@ -2817,6 +2822,8 @@ static int __init intel_pstate_setup(char *str)
if (!strcmp(str, "disable")) {
no_load = 1;
+ } else if (!strcmp(str, "active")) {
+ default_driver = &intel_pstate;
} else if (!strcmp(str, "passive")) {
default_driver = &intel_cpufreq;
no_hwp = 1;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index c149d9e20dfd..87197319ab06 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -186,9 +186,10 @@ int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* be frozen safely.
*/
index = find_deepest_state(drv, dev, U64_MAX, 0, true);
- if (index > 0)
+ if (index > 0) {
enter_s2idle_proper(drv, dev, index);
-
+ local_irq_enable();
+ }
return index;
}
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig
index a62f228be6da..bc35aa0ec07a 100644
--- a/drivers/crypto/caam/Kconfig
+++ b/drivers/crypto/caam/Kconfig
@@ -147,7 +147,7 @@ config CRYPTO_DEV_FSL_CAAM_RNG_API
select HW_RANDOM
help
Selecting this will register the SEC4 hardware rng to
- the hw_random API for suppying the kernel entropy pool.
+ the hw_random API for supplying the kernel entropy pool.
endif # CRYPTO_DEV_FSL_CAAM_JR
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 4fcdd262e581..f3d20b7645e0 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -54,7 +54,7 @@ static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
/*
* load 1 to clear written reg:
- * resets the done interrrupt and returns the RNG to idle.
+ * resets the done interrupt and returns the RNG to idle.
*/
append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
@@ -156,7 +156,7 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
DESC_DER_DECO_STAT_SHIFT;
/*
- * If an error occured in the descriptor, then
+ * If an error occurred in the descriptor, then
* the DECO status field will be set to 0x0D
*/
if (deco_state == DECO_STAT_HOST_ERR)
@@ -264,7 +264,7 @@ static void devm_deinstantiate_rng(void *data)
* - -ENODEV if DECO0 couldn't be acquired
* - -EAGAIN if an error occurred when executing the descriptor
* f.i. there was a RNG hardware error due to not "good enough"
- * entropy being aquired.
+ * entropy being acquired.
*/
static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
int gen_sk)
@@ -733,8 +733,8 @@ static int caam_probe(struct platform_device *pdev)
handle_imx6_err005766(&ctrl->mcr);
/*
- * Read the Compile Time paramters and SCFGR to determine
- * if Virtualization is enabled for this platform
+ * Read the Compile Time parameters and SCFGR to determine
+ * if virtualization is enabled for this platform
*/
scfgr = rd_reg32(&ctrl->scfgr);
@@ -863,9 +863,9 @@ static int caam_probe(struct platform_device *pdev)
}
/*
* if instantiate_rng(...) fails, the loop will rerun
- * and the kick_trng(...) function will modfiy the
+ * and the kick_trng(...) function will modify the
* upper and lower limits of the entropy sampling
- * interval, leading to a sucessful initialization of
+ * interval, leading to a successful initialization of
* the RNG.
*/
ret = instantiate_rng(dev, inst_handles,
@@ -882,8 +882,8 @@ static int caam_probe(struct platform_device *pdev)
return ret;
}
/*
- * Set handles init'ed by this module as the complement of the
- * already initialized ones
+ * Set handles initialized by this module as the complement of
+ * the already initialized ones
*/
ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index e796d3cb9be8..e13470901586 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -18,7 +18,7 @@
*/
#define SEC4_SG_LEN_EXT 0x80000000 /* Entry points to table */
-#define SEC4_SG_LEN_FIN 0x40000000 /* Last ent in table */
+#define SEC4_SG_LEN_FIN 0x40000000 /* Last entry in table */
#define SEC4_SG_BPID_MASK 0x000000ff
#define SEC4_SG_BPID_SHIFT 16
#define SEC4_SG_LEN_MASK 0x3fffffff /* Excludes EXT and FINAL */
@@ -113,7 +113,7 @@
*/
#define HDR_REVERSE 0x00000800
-/* Propogate DNR property to SharedDesc */
+/* Propagate DNR property to SharedDesc */
#define HDR_PROP_DNR 0x00000800
/* JobDesc/SharedDesc share property */
diff --git a/drivers/crypto/caam/pdb.h b/drivers/crypto/caam/pdb.h
index 68c1fd5dee5d..8ccc22075043 100644
--- a/drivers/crypto/caam/pdb.h
+++ b/drivers/crypto/caam/pdb.h
@@ -453,7 +453,7 @@ struct srtp_decap_pdb {
#define DSA_PDB_N_MASK 0x7f
struct dsa_sign_pdb {
- u32 sgf_ln; /* Use DSA_PDB_ defintions per above */
+ u32 sgf_ln; /* Use DSA_PDB_ definitions per above */
u8 *q;
u8 *r;
u8 *g; /* or Gx,y */
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index a2426334be61..476113e12489 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -376,6 +376,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
struct sev_device *sev = psp_master->sev_data;
struct sev_user_data_pek_csr input;
struct sev_data_pek_csr *data;
+ void __user *input_address;
void *blob = NULL;
int ret;
@@ -394,6 +395,7 @@ static int sev_ioctl_do_pek_csr(struct sev_issue_cmd *argp, bool writable)
goto cmd;
/* allocate a physically contiguous buffer to store the CSR blob */
+ input_address = (void __user *)input.address;
if (input.length > SEV_FW_BLOB_MAX_SIZE) {
ret = -EFAULT;
goto e_free;
@@ -426,7 +428,7 @@ cmd:
}
if (blob) {
- if (copy_to_user((void __user *)input.address, blob, input.length))
+ if (copy_to_user(input_address, blob, input.length))
ret = -EFAULT;
}
@@ -437,7 +439,7 @@ e_free:
return ret;
}
-void *psp_copy_user_blob(u64 __user uaddr, u32 len)
+void *psp_copy_user_blob(u64 uaddr, u32 len)
{
if (!uaddr || !len)
return ERR_PTR(-EINVAL);
@@ -446,7 +448,7 @@ void *psp_copy_user_blob(u64 __user uaddr, u32 len)
if (len > SEV_FW_BLOB_MAX_SIZE)
return ERR_PTR(-EINVAL);
- return memdup_user((void __user *)(uintptr_t)uaddr, len);
+ return memdup_user((void __user *)uaddr, len);
}
EXPORT_SYMBOL_GPL(psp_copy_user_blob);
@@ -621,6 +623,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
{
struct sev_user_data_get_id2 input;
struct sev_data_get_id *data;
+ void __user *input_address;
void *id_blob = NULL;
int ret;
@@ -631,6 +634,8 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
if (copy_from_user(&input, (void __user *)argp->data, sizeof(input)))
return -EFAULT;
+ input_address = (void __user *)input.address;
+
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -660,8 +665,7 @@ static int sev_ioctl_do_get_id2(struct sev_issue_cmd *argp)
}
if (id_blob) {
- if (copy_to_user((void __user *)input.address,
- id_blob, data->len)) {
+ if (copy_to_user(input_address, id_blob, data->len)) {
ret = -EFAULT;
goto e_free;
}
@@ -720,6 +724,8 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
struct sev_user_data_pdh_cert_export input;
void *pdh_blob = NULL, *cert_blob = NULL;
struct sev_data_pdh_cert_export *data;
+ void __user *input_cert_chain_address;
+ void __user *input_pdh_cert_address;
int ret;
/* If platform is not in INIT state then transition it to INIT. */
@@ -745,6 +751,9 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable)
!input.cert_chain_address)
goto cmd;
+ input_pdh_cert_address = (void __user *)input.pdh_cert_address;
+ input_cert_chain_address = (void __user *)input.cert_chain_address;
+
/* Allocate a physically contiguous buffer to store the PDH blob. */
if (input.pdh_cert_len > SEV_FW_BLOB_MAX_SIZE) {
ret = -EFAULT;
@@ -788,7 +797,7 @@ cmd:
}
if (pdh_blob) {
- if (copy_to_user((void __user *)input.pdh_cert_address,
+ if (copy_to_user(input_pdh_cert_address,
pdh_blob, input.pdh_cert_len)) {
ret = -EFAULT;
goto e_free_cert;
@@ -796,7 +805,7 @@ cmd:
}
if (cert_blob) {
- if (copy_to_user((void __user *)input.cert_chain_address,
+ if (copy_to_user(input_cert_chain_address,
cert_blob, input.cert_chain_len))
ret = -EFAULT;
}
diff --git a/drivers/crypto/chelsio/chtls/chtls_cm.c b/drivers/crypto/chelsio/chtls/chtls_cm.c
index f200fae6f7cb..54093115eb95 100644
--- a/drivers/crypto/chelsio/chtls/chtls_cm.c
+++ b/drivers/crypto/chelsio/chtls/chtls_cm.c
@@ -102,7 +102,7 @@ static struct net_device *chtls_find_netdev(struct chtls_dev *cdev,
case PF_INET:
if (likely(!inet_sk(sk)->inet_rcv_saddr))
return ndev;
- ndev = ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr);
+ ndev = __ip_dev_find(&init_net, inet_sk(sk)->inet_rcv_saddr, false);
break;
#if IS_ENABLED(CONFIG_IPV6)
case PF_INET6:
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index e1401d9cc756..2e9acae1cba3 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1052,14 +1052,15 @@ int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
&record_type);
if (err)
goto out_err;
+
+ /* Avoid appending tls handshake, alert to tls data */
+ if (skb)
+ tx_skb_finalize(skb);
}
recordsz = size;
csk->tlshws.txleft = recordsz;
csk->tlshws.type = record_type;
-
- if (skb)
- ULP_SKB_CB(skb)->ulp.tls.type = record_type;
}
if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) ||
diff --git a/drivers/crypto/hisilicon/sgl.c b/drivers/crypto/hisilicon/sgl.c
index 0e8c7e324fb4..725a739800b0 100644
--- a/drivers/crypto/hisilicon/sgl.c
+++ b/drivers/crypto/hisilicon/sgl.c
@@ -66,7 +66,8 @@ struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
sgl_size = sizeof(struct acc_hw_sge) * sge_nr +
sizeof(struct hisi_acc_hw_sgl);
- block_size = PAGE_SIZE * (1 << (MAX_ORDER - 1));
+ block_size = 1 << (PAGE_SHIFT + MAX_ORDER <= 32 ?
+ PAGE_SHIFT + MAX_ORDER - 1 : 31);
sgl_num_per_block = block_size / sgl_size;
block_num = count / sgl_num_per_block;
remain_sgl = count % sgl_num_per_block;
diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
index 60e744f680d3..1e0a1d70ebd3 100644
--- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c
@@ -118,6 +118,9 @@ static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
struct otx_cpt_req_info *cpt_req;
struct pci_dev *pdev;
+ if (!cpt_info)
+ goto complete;
+
cpt_req = cpt_info->req;
if (!status) {
/*
@@ -129,10 +132,10 @@ static void otx_cpt_aead_callback(int status, void *arg1, void *arg2)
!cpt_req->is_enc)
status = validate_hmac_cipher_null(cpt_req);
}
- if (cpt_info) {
- pdev = cpt_info->pdev;
- do_request_cleanup(pdev, cpt_info);
- }
+ pdev = cpt_info->pdev;
+ do_request_cleanup(pdev, cpt_info);
+
+complete:
if (areq)
areq->complete(areq, status);
}
diff --git a/drivers/dio/dio.c b/drivers/dio/dio.c
index c9aa15fb86a9..193b40e7aec0 100644
--- a/drivers/dio/dio.c
+++ b/drivers/dio/dio.c
@@ -135,7 +135,8 @@ int __init dio_find(int deviceid)
else
va = ioremap(pa, PAGE_SIZE);
- if (probe_kernel_read(&i, (unsigned char *)va + DIO_IDOFF, 1)) {
+ if (copy_from_kernel_nofault(&i,
+ (unsigned char *)va + DIO_IDOFF, 1)) {
if (scode >= DIOII_SCBASE)
iounmap(va);
continue; /* no board present at that select code */
@@ -208,7 +209,8 @@ static int __init dio_init(void)
else
va = ioremap(pa, PAGE_SIZE);
- if (probe_kernel_read(&i, (unsigned char *)va + DIO_IDOFF, 1)) {
+ if (copy_from_kernel_nofault(&i,
+ (unsigned char *)va + DIO_IDOFF, 1)) {
if (scode >= DIOII_SCBASE)
iounmap(va);
continue; /* no board present at that select code */
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 01ce125f8e8d..1ca609f66fdf 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -45,46 +45,20 @@ static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
size_t ret = 0;
dmabuf = dentry->d_fsdata;
- dma_resv_lock(dmabuf->resv, NULL);
+ spin_lock(&dmabuf->name_lock);
if (dmabuf->name)
ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
- dma_resv_unlock(dmabuf->resv);
+ spin_unlock(&dmabuf->name_lock);
return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
dentry->d_name.name, ret > 0 ? name : "");
}
-static const struct dentry_operations dma_buf_dentry_ops = {
- .d_dname = dmabuffs_dname,
-};
-
-static struct vfsmount *dma_buf_mnt;
-
-static int dma_buf_fs_init_context(struct fs_context *fc)
-{
- struct pseudo_fs_context *ctx;
-
- ctx = init_pseudo(fc, DMA_BUF_MAGIC);
- if (!ctx)
- return -ENOMEM;
- ctx->dops = &dma_buf_dentry_ops;
- return 0;
-}
-
-static struct file_system_type dma_buf_fs_type = {
- .name = "dmabuf",
- .init_fs_context = dma_buf_fs_init_context,
- .kill_sb = kill_anon_super,
-};
-
-static int dma_buf_release(struct inode *inode, struct file *file)
+static void dma_buf_release(struct dentry *dentry)
{
struct dma_buf *dmabuf;
- if (!is_dma_buf_file(file))
- return -EINVAL;
-
- dmabuf = file->private_data;
+ dmabuf = dentry->d_fsdata;
BUG_ON(dmabuf->vmapping_counter);
@@ -110,9 +84,32 @@ static int dma_buf_release(struct inode *inode, struct file *file)
module_put(dmabuf->owner);
kfree(dmabuf->name);
kfree(dmabuf);
+}
+
+static const struct dentry_operations dma_buf_dentry_ops = {
+ .d_dname = dmabuffs_dname,
+ .d_release = dma_buf_release,
+};
+
+static struct vfsmount *dma_buf_mnt;
+
+static int dma_buf_fs_init_context(struct fs_context *fc)
+{
+ struct pseudo_fs_context *ctx;
+
+ ctx = init_pseudo(fc, DMA_BUF_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->dops = &dma_buf_dentry_ops;
return 0;
}
+static struct file_system_type dma_buf_fs_type = {
+ .name = "dmabuf",
+ .init_fs_context = dma_buf_fs_init_context,
+ .kill_sb = kill_anon_super,
+};
+
static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
{
struct dma_buf *dmabuf;
@@ -341,8 +338,10 @@ static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
kfree(name);
goto out_unlock;
}
+ spin_lock(&dmabuf->name_lock);
kfree(dmabuf->name);
dmabuf->name = name;
+ spin_unlock(&dmabuf->name_lock);
out_unlock:
dma_resv_unlock(dmabuf->resv);
@@ -405,14 +404,13 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
/* Don't count the temporary reference taken inside procfs seq_show */
seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
- dma_resv_lock(dmabuf->resv, NULL);
+ spin_lock(&dmabuf->name_lock);
if (dmabuf->name)
seq_printf(m, "name:\t%s\n", dmabuf->name);
- dma_resv_unlock(dmabuf->resv);
+ spin_unlock(&dmabuf->name_lock);
}
static const struct file_operations dma_buf_fops = {
- .release = dma_buf_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
@@ -546,6 +544,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->size = exp_info->size;
dmabuf->exp_name = exp_info->exp_name;
dmabuf->owner = exp_info->owner;
+ spin_lock_init(&dmabuf->name_lock);
init_waitqueue_head(&dmabuf->poll);
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index b175229a4b01..604f80357931 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -1176,6 +1176,8 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp)
} else if (dmatest_run) {
if (!is_threaded_test_pending(info)) {
pr_info("No channels configured, continue with any\n");
+ if (!is_threaded_test_run(info))
+ stop_threaded_test(info);
add_threaded_test(info);
}
start_threaded_tests(info);
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 21cb2a58dbd2..a1b56f52db2f 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -118,16 +118,11 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
- if (test_bit(DW_DMA_IS_INITIALIZED, &dwc->flags))
- return;
-
dw->initialize_chan(dwc);
/* Enable interrupts */
channel_set_bit(dw, MASK.XFER, dwc->mask);
channel_set_bit(dw, MASK.ERROR, dwc->mask);
-
- set_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
}
/*----------------------------------------------------------------------*/
@@ -954,8 +949,6 @@ static void dwc_issue_pending(struct dma_chan *chan)
void do_dw_dma_off(struct dw_dma *dw)
{
- unsigned int i;
-
dma_writel(dw, CFG, 0);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
@@ -966,9 +959,6 @@ void do_dw_dma_off(struct dw_dma *dw)
while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
cpu_relax();
-
- for (i = 0; i < dw->dma.chancnt; i++)
- clear_bit(DW_DMA_IS_INITIALIZED, &dw->chan[i].flags);
}
void do_dw_dma_on(struct dw_dma *dw)
@@ -1032,8 +1022,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
/* Clear custom channel configuration */
memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
- clear_bit(DW_DMA_IS_INITIALIZED, &dwc->flags);
-
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c
index 5697c3622699..930ae268c497 100644
--- a/drivers/dma/fsl-edma-common.c
+++ b/drivers/dma/fsl-edma-common.c
@@ -352,26 +352,28 @@ static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
/*
* TCD parameters are stored in struct fsl_edma_hw_tcd in little
* endian format. However, we need to load the TCD registers in
- * big- or little-endian obeying the eDMA engine model endian.
+ * big- or little-endian obeying the eDMA engine model endian,
+ * and this is performed from specific edma_write functions
*/
edma_writew(edma, 0, &regs->tcd[ch].csr);
- edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
- edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
- edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr);
- edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff);
+ edma_writel(edma, (s32)tcd->saddr, &regs->tcd[ch].saddr);
+ edma_writel(edma, (s32)tcd->daddr, &regs->tcd[ch].daddr);
- edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes);
- edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast);
+ edma_writew(edma, (s16)tcd->attr, &regs->tcd[ch].attr);
+ edma_writew(edma, tcd->soff, &regs->tcd[ch].soff);
- edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer);
- edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter);
- edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
+ edma_writel(edma, (s32)tcd->nbytes, &regs->tcd[ch].nbytes);
+ edma_writel(edma, (s32)tcd->slast, &regs->tcd[ch].slast);
- edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
+ edma_writew(edma, (s16)tcd->citer, &regs->tcd[ch].citer);
+ edma_writew(edma, (s16)tcd->biter, &regs->tcd[ch].biter);
+ edma_writew(edma, (s16)tcd->doff, &regs->tcd[ch].doff);
+
+ edma_writel(edma, (s32)tcd->dlast_sga,
&regs->tcd[ch].dlast_sga);
- edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr);
+ edma_writew(edma, (s16)tcd->csr, &regs->tcd[ch].csr);
}
static inline
@@ -589,6 +591,8 @@ void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
{
struct virt_dma_desc *vdesc;
+ lockdep_assert_held(&fsl_chan->vchan.lock);
+
vdesc = vchan_next_desc(&fsl_chan->vchan);
if (!vdesc)
return;
diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h
index 67e422590c9a..ec1169741de1 100644
--- a/drivers/dma/fsl-edma-common.h
+++ b/drivers/dma/fsl-edma-common.h
@@ -33,7 +33,7 @@
#define EDMA_TCD_ATTR_DSIZE_16BIT BIT(0)
#define EDMA_TCD_ATTR_DSIZE_32BIT BIT(1)
#define EDMA_TCD_ATTR_DSIZE_64BIT (BIT(0) | BIT(1))
-#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(3) | BIT(0))
+#define EDMA_TCD_ATTR_DSIZE_32BYTE (BIT(2) | BIT(0))
#define EDMA_TCD_ATTR_SSIZE_8BIT 0
#define EDMA_TCD_ATTR_SSIZE_16BIT (EDMA_TCD_ATTR_DSIZE_16BIT << 8)
#define EDMA_TCD_ATTR_SSIZE_32BIT (EDMA_TCD_ATTR_DSIZE_32BIT << 8)
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index eff7ebd8cf35..90bb72af306c 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -45,6 +45,13 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
fsl_chan = &fsl_edma->chans[ch];
spin_lock(&fsl_chan->vchan.lock);
+
+ if (!fsl_chan->edesc) {
+ /* terminate_all called before */
+ spin_unlock(&fsl_chan->vchan.lock);
+ continue;
+ }
+
if (!fsl_chan->edesc->iscyclic) {
list_del(&fsl_chan->edesc->vdesc.node);
vchan_cookie_complete(&fsl_chan->edesc->vdesc);
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index ff49847e37a8..cb376cf6a2d2 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -74,6 +74,7 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_device *idxd;
struct idxd_wq *wq;
struct device *dev;
+ int rc = 0;
wq = inode_wq(inode);
idxd = wq->idxd;
@@ -81,17 +82,27 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
- if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq))
- return -EBUSY;
-
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ mutex_lock(&wq->wq_lock);
+
+ if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) {
+ rc = -EBUSY;
+ goto failed;
+ }
+
ctx->wq = wq;
filp->private_data = ctx;
idxd_wq_get(wq);
+ mutex_unlock(&wq->wq_lock);
return 0;
+
+ failed:
+ mutex_unlock(&wq->wq_lock);
+ kfree(ctx);
+ return rc;
}
static int idxd_cdev_release(struct inode *node, struct file *filep)
@@ -105,7 +116,9 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
filep->private_data = NULL;
kfree(ctx);
+ mutex_lock(&wq->wq_lock);
idxd_wq_put(wq);
+ mutex_unlock(&wq->wq_lock);
return 0;
}
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 8d79a8787104..8d2718c585dc 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -320,6 +320,31 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
devm_iounmap(dev, wq->dportal);
}
+void idxd_wq_disable_cleanup(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ int i, wq_offset;
+
+ lockdep_assert_held(&idxd->dev_lock);
+ memset(&wq->wqcfg, 0, sizeof(wq->wqcfg));
+ wq->type = IDXD_WQT_NONE;
+ wq->size = 0;
+ wq->group = NULL;
+ wq->threshold = 0;
+ wq->priority = 0;
+ clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
+ memset(wq->name, 0, WQ_NAME_SIZE);
+
+ for (i = 0; i < 8; i++) {
+ wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32);
+ iowrite32(0, idxd->reg_base + wq_offset);
+ dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n",
+ wq->id, i, wq_offset,
+ ioread32(idxd->reg_base + wq_offset));
+ }
+}
+
/* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd)
{
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index b8f8a363b4a7..908c8d0ef3ab 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -290,6 +290,7 @@ int idxd_wq_enable(struct idxd_wq *wq);
int idxd_wq_disable(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
+void idxd_wq_disable_cleanup(struct idxd_wq *wq);
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 6510791b9921..8a35f58da689 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -141,7 +141,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
if (!err)
- return IRQ_HANDLED;
+ goto out;
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
@@ -162,6 +162,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
spin_unlock_bh(&idxd->dev_lock);
}
+ out:
idxd_unmask_msix_vector(idxd, irq_entry->id);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 052dae5d6ddd..2e2c5082f322 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -315,6 +315,11 @@ static int idxd_config_bus_remove(struct device *dev)
idxd_unregister_dma_device(idxd);
spin_lock_irqsave(&idxd->dev_lock, flags);
rc = idxd_device_disable(idxd);
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ idxd_wq_disable_cleanup(wq);
+ }
spin_unlock_irqrestore(&idxd->dev_lock, flags);
module_put(THIS_MODULE);
if (rc < 0)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 91774039ae5d..270992c4fe47 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1331,8 +1331,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
sdma_channel_synchronize(chan);
- if (sdmac->event_id0 >= 0)
- sdma_event_disable(sdmac, sdmac->event_id0);
+ sdma_event_disable(sdmac, sdmac->event_id0);
if (sdmac->event_id1)
sdma_event_disable(sdmac, sdmac->event_id1);
@@ -1632,11 +1631,9 @@ static int sdma_config(struct dma_chan *chan,
memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
/* Set ENBLn earlier to make sure dma request triggered after that */
- if (sdmac->event_id0 >= 0) {
- if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
- return -EINVAL;
- sdma_event_enable(sdmac, sdmac->event_id0);
- }
+ if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
+ return -EINVAL;
+ sdma_event_enable(sdmac, sdmac->event_id0);
if (sdmac->event_id1) {
if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 8ad0ad861c86..fd782aee02d9 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -26,6 +26,18 @@
#include "../dmaengine.h"
+int completion_timeout = 200;
+module_param(completion_timeout, int, 0644);
+MODULE_PARM_DESC(completion_timeout,
+ "set ioat completion timeout [msec] (default 200 [msec])");
+int idle_timeout = 2000;
+module_param(idle_timeout, int, 0644);
+MODULE_PARM_DESC(idle_timeout,
+ "set ioat idel timeout [msec] (default 2000 [msec])");
+
+#define IDLE_TIMEOUT msecs_to_jiffies(idle_timeout)
+#define COMPLETION_TIMEOUT msecs_to_jiffies(completion_timeout)
+
static char *chanerr_str[] = {
"DMA Transfer Source Address Error",
"DMA Transfer Destination Address Error",
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index e6b622e1ba92..f7f31fdf14cf 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -104,8 +104,6 @@ struct ioatdma_chan {
#define IOAT_RUN 5
#define IOAT_CHAN_ACTIVE 6
struct timer_list timer;
- #define COMPLETION_TIMEOUT msecs_to_jiffies(100)
- #define IDLE_TIMEOUT msecs_to_jiffies(2000)
#define RESET_DELAY msecs_to_jiffies(100)
struct ioatdma_device *ioat_dma;
dma_addr_t completion_dma;
diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c
index e15bd15a9ef6..e12b754e6398 100644
--- a/drivers/dma/mcf-edma.c
+++ b/drivers/dma/mcf-edma.c
@@ -35,6 +35,13 @@ static irqreturn_t mcf_edma_tx_handler(int irq, void *dev_id)
mcf_chan = &mcf_edma->chans[ch];
spin_lock(&mcf_chan->vchan.lock);
+
+ if (!mcf_chan->edesc) {
+ /* terminate_all called before */
+ spin_unlock(&mcf_chan->vchan.lock);
+ continue;
+ }
+
if (!mcf_chan->edesc->iscyclic) {
list_del(&mcf_chan->edesc->vdesc.node);
vchan_cookie_complete(&mcf_chan->edesc->vdesc);
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index b218a013c260..8f7ceb698226 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -586,6 +586,8 @@ static void usb_dmac_isr_transfer_end(struct usb_dmac_chan *chan)
desc->residue = usb_dmac_get_current_residue(chan, desc,
desc->sg_index - 1);
desc->done_cookie = desc->vd.tx.cookie;
+ desc->vd.tx_result.result = DMA_TRANS_NOERROR;
+ desc->vd.tx_result.residue = desc->residue;
vchan_cookie_complete(&desc->vd);
/* Restart the next transfer if this driver has a next desc */
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index db58d7e4f9fe..c5fa2ef74abc 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -658,6 +658,7 @@ static int tegra_adma_alloc_chan_resources(struct dma_chan *dc)
ret = pm_runtime_get_sync(tdc2dev(tdc));
if (ret < 0) {
+ pm_runtime_put_noidle(tdc2dev(tdc));
free_irq(tdc->irq, tdc);
return ret;
}
@@ -869,8 +870,10 @@ static int tegra_adma_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
goto rpm_disable;
+ }
ret = tegra_adma_init(tdma);
if (ret)
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index 0b8f3dd6b146..77e8e67d995b 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -42,6 +42,7 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
ud = platform_get_drvdata(pdev);
if (!ud) {
pr_debug("UDMA has not been probed\n");
+ put_device(&pdev->dev);
return ERR_PTR(-EPROBE_DEFER);
}
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index c91e2dc1bb72..6c879a734360 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -1753,7 +1753,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
dev_err(ud->ddev.dev,
"Descriptor pool allocation failed\n");
uc->use_dma_pool = false;
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_cleanup;
}
}
@@ -1773,16 +1774,18 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
ret = udma_get_chan_pair(uc);
if (ret)
- return ret;
+ goto err_cleanup;
ret = udma_alloc_tx_resources(uc);
- if (ret)
- return ret;
+ if (ret) {
+ udma_put_rchan(uc);
+ goto err_cleanup;
+ }
ret = udma_alloc_rx_resources(uc);
if (ret) {
udma_free_tx_resources(uc);
- return ret;
+ goto err_cleanup;
}
uc->config.src_thread = ud->psil_base + uc->tchan->id;
@@ -1800,10 +1803,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
uc->id);
ret = udma_alloc_tx_resources(uc);
- if (ret) {
- uc->config.remote_thread_id = -1;
- return ret;
- }
+ if (ret)
+ goto err_cleanup;
uc->config.src_thread = ud->psil_base + uc->tchan->id;
uc->config.dst_thread = uc->config.remote_thread_id;
@@ -1820,10 +1821,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
uc->id);
ret = udma_alloc_rx_resources(uc);
- if (ret) {
- uc->config.remote_thread_id = -1;
- return ret;
- }
+ if (ret)
+ goto err_cleanup;
uc->config.src_thread = uc->config.remote_thread_id;
uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
@@ -1838,7 +1837,9 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
/* Can not happen */
dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
__func__, uc->id, uc->config.dir);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_cleanup;
+
}
/* check if the channel configuration was successful */
@@ -1847,7 +1848,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
if (udma_is_chan_running(uc)) {
dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
- udma_stop(uc);
+ udma_reset_chan(uc, false);
if (udma_is_chan_running(uc)) {
dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
ret = -EBUSY;
@@ -1906,8 +1907,6 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
udma_reset_rings(uc);
- INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
- udma_check_tx_completion);
return 0;
err_irq_free:
@@ -1919,7 +1918,7 @@ err_psi_free:
err_res_free:
udma_free_tx_resources(uc);
udma_free_rx_resources(uc);
-
+err_cleanup:
udma_reset_uchan(uc);
if (uc->use_dma_pool) {
@@ -3019,7 +3018,6 @@ static void udma_free_chan_resources(struct dma_chan *chan)
}
cancel_delayed_work_sync(&uc->tx_drain.work);
- destroy_delayed_work_on_stack(&uc->tx_drain.work);
if (uc->irq_num_ring > 0) {
free_irq(uc->irq_num_ring, uc);
@@ -3593,7 +3591,7 @@ static int udma_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(navss_node, "ti,udma-atype", &ud->atype);
+ ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype);
if (!ret && ud->atype > 2) {
dev_err(dev, "Invalid atype: %u\n", ud->atype);
return -EINVAL;
@@ -3711,6 +3709,7 @@ static int udma_probe(struct platform_device *pdev)
tasklet_init(&uc->vc.task, udma_vchan_complete,
(unsigned long)&uc->vc);
init_completion(&uc->teardown_completed);
+ INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
}
ret = dma_async_device_register(&ud->ddev);
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index ef90070a9194..6262f6370c5d 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -269,6 +269,8 @@ static int get_scrub_rate(struct mem_ctl_info *mci)
if (pvt->model == 0x60)
amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval);
+ else
+ amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
} else {
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index e6fc022bc87e..3939699e62fe 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -278,3 +278,14 @@ config EFI_EARLYCON
depends on SERIAL_EARLYCON && !ARM && !IA64
select FONT_SUPPORT
select ARCH_USE_MEMREMAP_PROT
+
+config EFI_CUSTOM_SSDT_OVERLAYS
+ bool "Load custom ACPI SSDT overlay from an EFI variable"
+ depends on EFI_VARS && ACPI
+ default ACPI_TABLE_UPGRADE
+ help
+ Allow loading of an ACPI SSDT overlay from an EFI variable specified
+ by a kernel command line option.
+
+ See Documentation/admin-guide/acpi/ssdt-overlays.rst for more
+ information.
diff --git a/drivers/firmware/efi/arm-init.c b/drivers/firmware/efi/arm-init.c
index c697e70ca7e7..71c445d20258 100644
--- a/drivers/firmware/efi/arm-init.c
+++ b/drivers/firmware/efi/arm-init.c
@@ -52,9 +52,11 @@ static phys_addr_t __init efi_to_phys(unsigned long addr)
}
static __initdata unsigned long screen_info_table = EFI_INVALID_TABLE_ADDR;
+static __initdata unsigned long cpu_state_table = EFI_INVALID_TABLE_ADDR;
static const efi_config_table_type_t arch_tables[] __initconst = {
{LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID, &screen_info_table},
+ {LINUX_EFI_ARM_CPU_STATE_TABLE_GUID, &cpu_state_table},
{}
};
@@ -62,7 +64,8 @@ static void __init init_screen_info(void)
{
struct screen_info *si;
- if (screen_info_table != EFI_INVALID_TABLE_ADDR) {
+ if (IS_ENABLED(CONFIG_ARM) &&
+ screen_info_table != EFI_INVALID_TABLE_ADDR) {
si = early_memremap_ro(screen_info_table, sizeof(*si));
if (!si) {
pr_err("Could not map screen_info config table\n");
@@ -116,7 +119,8 @@ static int __init uefi_init(u64 efi_system_table)
goto out;
}
retval = efi_config_parse_tables(config_tables, systab->nr_tables,
- arch_tables);
+ IS_ENABLED(CONFIG_ARM) ? arch_tables
+ : NULL);
early_memunmap(config_tables, table_size);
out:
@@ -238,9 +242,37 @@ void __init efi_init(void)
init_screen_info();
+#ifdef CONFIG_ARM
/* ARM does not permit early mappings to persist across paging_init() */
- if (IS_ENABLED(CONFIG_ARM))
- efi_memmap_unmap();
+ efi_memmap_unmap();
+
+ if (cpu_state_table != EFI_INVALID_TABLE_ADDR) {
+ struct efi_arm_entry_state *state;
+ bool dump_state = true;
+
+ state = early_memremap_ro(cpu_state_table,
+ sizeof(struct efi_arm_entry_state));
+ if (state == NULL) {
+ pr_warn("Unable to map CPU entry state table.\n");
+ return;
+ }
+
+ if ((state->sctlr_before_ebs & 1) == 0)
+ pr_warn(FW_BUG "EFI stub was entered with MMU and Dcache disabled, please fix your firmware!\n");
+ else if ((state->sctlr_after_ebs & 1) == 0)
+ pr_warn(FW_BUG "ExitBootServices() returned with MMU and Dcache disabled, please fix your firmware!\n");
+ else
+ dump_state = false;
+
+ if (dump_state || efi_enabled(EFI_DBG)) {
+ pr_info("CPSR at EFI stub entry : 0x%08x\n", state->cpsr_before_ebs);
+ pr_info("SCTLR at EFI stub entry : 0x%08x\n", state->sctlr_before_ebs);
+ pr_info("CPSR after ExitBootServices() : 0x%08x\n", state->cpsr_after_ebs);
+ pr_info("SCTLR after ExitBootServices(): 0x%08x\n", state->sctlr_after_ebs);
+ }
+ early_memunmap(state, sizeof(struct efi_arm_entry_state));
+ }
+#endif
}
static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index c2f1d4e6630b..feb7fe6f2da7 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -356,10 +356,7 @@ static struct pstore_info efi_pstore_info = {
static __init int efivars_pstore_init(void)
{
- if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
- return 0;
-
- if (!efivars_kobject())
+ if (!efivars_kobject() || !efivar_supports_writes())
return 0;
if (efivars_pstore_disable)
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 7f1657b6c30d..fdd1db025dbf 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -176,11 +176,13 @@ static struct efivar_operations generic_ops;
static int generic_ops_register(void)
{
generic_ops.get_variable = efi.get_variable;
- generic_ops.set_variable = efi.set_variable;
- generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
generic_ops.get_next_variable = efi.get_next_variable;
generic_ops.query_variable_store = efi_query_variable_store;
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
+ generic_ops.set_variable = efi.set_variable;
+ generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
+ }
return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
}
@@ -189,7 +191,7 @@ static void generic_ops_unregister(void)
efivars_unregister(&generic_efivars);
}
-#if IS_ENABLED(CONFIG_ACPI)
+#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
#define EFIVAR_SSDT_NAME_MAX 16
static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
static int __init efivar_ssdt_setup(char *str)
@@ -382,7 +384,8 @@ static int __init efisubsys_init(void)
return -ENOMEM;
}
- if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES)) {
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
+ EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
efivar_ssdt_load();
error = generic_ops_register();
if (error)
@@ -416,7 +419,8 @@ static int __init efisubsys_init(void)
err_remove_group:
sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
err_unregister:
- if (efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
+ if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
+ EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
generic_ops_unregister();
err_put:
kobject_put(efi_kobj);
@@ -622,7 +626,8 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
rsv = (void *)(p + prsv % PAGE_SIZE);
/* reserve the entry itself */
- memblock_reserve(prsv, EFI_MEMRESERVE_SIZE(rsv->size));
+ memblock_reserve(prsv,
+ struct_size(rsv, entry, rsv->size));
for (i = 0; i < atomic_read(&rsv->count); i++) {
memblock_reserve(rsv->entry[i].base,
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 26528a46d99e..dcea137142b3 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -680,11 +680,8 @@ int efivars_sysfs_init(void)
struct kobject *parent_kobj = efivars_kobject();
int error = 0;
- if (!efi_rt_services_supported(EFI_RT_SUPPORTED_VARIABLE_SERVICES))
- return -ENODEV;
-
/* No efivars has been registered yet */
- if (!parent_kobj)
+ if (!parent_kobj || !efivar_supports_writes())
return 0;
printk(KERN_INFO "EFI Variables Facility v%s %s\n", EFIVARS_VERSION,
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
index e3d692696583..d5915272141f 100644
--- a/drivers/firmware/efi/esrt.c
+++ b/drivers/firmware/efi/esrt.c
@@ -181,7 +181,7 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
"entry%d", entry_num);
if (rc) {
- kfree(entry);
+ kobject_put(&entry->kobj);
return rc;
}
}
diff --git a/drivers/firmware/efi/libstub/alignedmem.c b/drivers/firmware/efi/libstub/alignedmem.c
index cc89c4d6196f..1de9878ddd3a 100644
--- a/drivers/firmware/efi/libstub/alignedmem.c
+++ b/drivers/firmware/efi/libstub/alignedmem.c
@@ -44,7 +44,7 @@ efi_status_t efi_allocate_pages_aligned(unsigned long size, unsigned long *addr,
*addr = ALIGN((unsigned long)alloc_addr, align);
if (slack > 0) {
- int l = (alloc_addr % align) / EFI_PAGE_SIZE;
+ int l = (alloc_addr & (align - 1)) / EFI_PAGE_SIZE;
if (l) {
efi_bs_call(free_pages, alloc_addr, slack - l + 1);
diff --git a/drivers/firmware/efi/libstub/arm32-stub.c b/drivers/firmware/efi/libstub/arm32-stub.c
index 40243f524556..d08e5d55838c 100644
--- a/drivers/firmware/efi/libstub/arm32-stub.c
+++ b/drivers/firmware/efi/libstub/arm32-stub.c
@@ -7,10 +7,49 @@
#include "efistub.h"
+static efi_guid_t cpu_state_guid = LINUX_EFI_ARM_CPU_STATE_TABLE_GUID;
+
+struct efi_arm_entry_state *efi_entry_state;
+
+static void get_cpu_state(u32 *cpsr, u32 *sctlr)
+{
+ asm("mrs %0, cpsr" : "=r"(*cpsr));
+ if ((*cpsr & MODE_MASK) == HYP_MODE)
+ asm("mrc p15, 4, %0, c1, c0, 0" : "=r"(*sctlr));
+ else
+ asm("mrc p15, 0, %0, c1, c0, 0" : "=r"(*sctlr));
+}
+
efi_status_t check_platform_features(void)
{
+ efi_status_t status;
+ u32 cpsr, sctlr;
int block;
+ get_cpu_state(&cpsr, &sctlr);
+
+ efi_info("Entering in %s mode with MMU %sabled\n",
+ ((cpsr & MODE_MASK) == HYP_MODE) ? "HYP" : "SVC",
+ (sctlr & 1) ? "en" : "dis");
+
+ status = efi_bs_call(allocate_pool, EFI_LOADER_DATA,
+ sizeof(*efi_entry_state),
+ (void **)&efi_entry_state);
+ if (status != EFI_SUCCESS) {
+ efi_err("allocate_pool() failed\n");
+ return status;
+ }
+
+ efi_entry_state->cpsr_before_ebs = cpsr;
+ efi_entry_state->sctlr_before_ebs = sctlr;
+
+ status = efi_bs_call(install_configuration_table, &cpu_state_guid,
+ efi_entry_state);
+ if (status != EFI_SUCCESS) {
+ efi_err("install_configuration_table() failed\n");
+ goto free_state;
+ }
+
/* non-LPAE kernels can run anywhere */
if (!IS_ENABLED(CONFIG_ARM_LPAE))
return EFI_SUCCESS;
@@ -19,9 +58,22 @@ efi_status_t check_platform_features(void)
block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
if (block < 5) {
efi_err("This LPAE kernel is not supported by your CPU\n");
- return EFI_UNSUPPORTED;
+ status = EFI_UNSUPPORTED;
+ goto drop_table;
}
return EFI_SUCCESS;
+
+drop_table:
+ efi_bs_call(install_configuration_table, &cpu_state_guid, NULL);
+free_state:
+ efi_bs_call(free_pool, efi_entry_state);
+ return status;
+}
+
+void efi_handle_post_ebs_state(void)
+{
+ get_cpu_state(&efi_entry_state->cpsr_after_ebs,
+ &efi_entry_state->sctlr_after_ebs);
}
static efi_guid_t screen_info_guid = LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID;
diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c
index 7f6a57dec513..e5bfac79e5ac 100644
--- a/drivers/firmware/efi/libstub/arm64-stub.c
+++ b/drivers/firmware/efi/libstub/arm64-stub.c
@@ -35,13 +35,16 @@ efi_status_t check_platform_features(void)
}
/*
- * Relocatable kernels can fix up the misalignment with respect to
- * MIN_KIMG_ALIGN, so they only require a minimum alignment of EFI_KIMG_ALIGN
- * (which accounts for the alignment of statically allocated objects such as
- * the swapper stack.)
+ * Although relocatable kernels can fix up the misalignment with respect to
+ * MIN_KIMG_ALIGN, the resulting virtual text addresses are subtly out of
+ * sync with those recorded in the vmlinux when kaslr is disabled but the
+ * image required relocation anyway. Therefore retain 2M alignment unless
+ * KASLR is in use.
*/
-static const u64 min_kimg_align = IS_ENABLED(CONFIG_RELOCATABLE) ? EFI_KIMG_ALIGN
- : MIN_KIMG_ALIGN;
+static u64 min_kimg_align(void)
+{
+ return efi_nokaslr ? MIN_KIMG_ALIGN : EFI_KIMG_ALIGN;
+}
efi_status_t handle_kernel_image(unsigned long *image_addr,
unsigned long *image_size,
@@ -74,21 +77,21 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
kernel_size = _edata - _text;
kernel_memsize = kernel_size + (_end - _edata);
- *reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align;
+ *reserve_size = kernel_memsize + TEXT_OFFSET % min_kimg_align();
if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) {
/*
* If KASLR is enabled, and we have some randomness available,
* locate the kernel at a randomized offset in physical memory.
*/
- status = efi_random_alloc(*reserve_size, min_kimg_align,
+ status = efi_random_alloc(*reserve_size, min_kimg_align(),
reserve_addr, phys_seed);
} else {
status = EFI_OUT_OF_RESOURCES;
}
if (status != EFI_SUCCESS) {
- if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align)) {
+ if (IS_ALIGNED((u64)_text - TEXT_OFFSET, min_kimg_align())) {
/*
* Just execute from wherever we were loaded by the
* UEFI PE/COFF loader if the alignment is suitable.
@@ -99,7 +102,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
}
status = efi_allocate_pages_aligned(*reserve_size, reserve_addr,
- ULONG_MAX, min_kimg_align);
+ ULONG_MAX, min_kimg_align());
if (status != EFI_SUCCESS) {
efi_err("Failed to relocate kernel\n");
@@ -108,7 +111,7 @@ efi_status_t handle_kernel_image(unsigned long *image_addr,
}
}
- *image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align;
+ *image_addr = *reserve_addr + TEXT_OFFSET % min_kimg_align();
memcpy((void *)*image_addr, _text, kernel_size);
return EFI_SUCCESS;
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 89f075275300..6bca70bbb43d 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -19,7 +19,7 @@
#include "efistub.h"
bool efi_nochunk;
-bool efi_nokaslr;
+bool efi_nokaslr = !IS_ENABLED(CONFIG_RANDOMIZE_BASE);
bool efi_noinitrd;
int efi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
bool efi_novamap;
@@ -32,6 +32,10 @@ bool __pure __efi_soft_reserve_enabled(void)
return !efi_nosoftreserve;
}
+/**
+ * efi_char16_puts() - Write a UCS-2 encoded string to the console
+ * @str: UCS-2 encoded string
+ */
void efi_char16_puts(efi_char16_t *str)
{
efi_call_proto(efi_table_attr(efi_system_table, con_out),
@@ -83,6 +87,10 @@ u32 utf8_to_utf32(const u8 **s8)
return c32;
}
+/**
+ * efi_puts() - Write a UTF-8 encoded string to the console
+ * @str: UTF-8 encoded string
+ */
void efi_puts(const char *str)
{
efi_char16_t buf[128];
@@ -113,6 +121,16 @@ void efi_puts(const char *str)
}
}
+/**
+ * efi_printk() - Print a kernel message
+ * @fmt: format string
+ *
+ * The first letter of the format string is used to determine the logging level
+ * of the message. If the level is less then the current EFI logging level, the
+ * message is suppressed. The message will be truncated to 255 bytes.
+ *
+ * Return: number of printed characters
+ */
int efi_printk(const char *fmt, ...)
{
char printf_buf[256];
@@ -154,13 +172,18 @@ int efi_printk(const char *fmt, ...)
return printed;
}
-/*
- * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
+/**
+ * efi_parse_options() - Parse EFI command line options
+ * @cmdline: kernel command line
+ *
+ * Parse the ASCII string @cmdline for EFI options, denoted by the efi=
* option, e.g. efi=nochunk.
*
* It should be noted that efi= is parsed in two very different
* environments, first in the early boot environment of the EFI boot
* stub, and subsequently during the kernel boot.
+ *
+ * Return: status code
*/
efi_status_t efi_parse_options(char const *cmdline)
{
@@ -286,13 +309,21 @@ char *efi_convert_cmdline(efi_loaded_image_t *image, int *cmd_line_len)
return (char *)cmdline_addr;
}
-/*
+/**
+ * efi_exit_boot_services() - Exit boot services
+ * @handle: handle of the exiting image
+ * @map: pointer to receive the memory map
+ * @priv: argument to be passed to @priv_func
+ * @priv_func: function to process the memory map before exiting boot services
+ *
* Handle calling ExitBootServices according to the requirements set out by the
* spec. Obtains the current memory map, and returns that info after calling
* ExitBootServices. The client must specify a function to perform any
* processing of the memory map data prior to ExitBootServices. A client
* specific structure may be passed to the function via priv. The client
* function may be called multiple times.
+ *
+ * Return: status code
*/
efi_status_t efi_exit_boot_services(void *handle,
struct efi_boot_memmap *map,
@@ -361,6 +392,11 @@ fail:
return status;
}
+/**
+ * get_efi_config_table() - retrieve UEFI configuration table
+ * @guid: GUID of the configuration table to be retrieved
+ * Return: pointer to the configuration table or NULL
+ */
void *get_efi_config_table(efi_guid_t guid)
{
unsigned long tables = efi_table_attr(efi_system_table, tables);
@@ -408,17 +444,18 @@ static const struct {
};
/**
- * efi_load_initrd_dev_path - load the initrd from the Linux initrd device path
+ * efi_load_initrd_dev_path() - load the initrd from the Linux initrd device path
* @load_addr: pointer to store the address where the initrd was loaded
* @load_size: pointer to store the size of the loaded initrd
* @max: upper limit for the initrd memory allocation
- * @return: %EFI_SUCCESS if the initrd was loaded successfully, in which
- * case @load_addr and @load_size are assigned accordingly
- * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd
- * device path
- * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL
- * %EFI_OUT_OF_RESOURCES if memory allocation failed
- * %EFI_LOAD_ERROR in all other cases
+ *
+ * Return:
+ * * %EFI_SUCCESS if the initrd was loaded successfully, in which
+ * case @load_addr and @load_size are assigned accordingly
+ * * %EFI_NOT_FOUND if no LoadFile2 protocol exists on the initrd device path
+ * * %EFI_INVALID_PARAMETER if load_addr == NULL or load_size == NULL
+ * * %EFI_OUT_OF_RESOURCES if memory allocation failed
+ * * %EFI_LOAD_ERROR in all other cases
*/
static
efi_status_t efi_load_initrd_dev_path(unsigned long *load_addr,
@@ -481,6 +518,16 @@ efi_status_t efi_load_initrd_cmdline(efi_loaded_image_t *image,
load_addr, load_size);
}
+/**
+ * efi_load_initrd() - Load initial RAM disk
+ * @image: EFI loaded image protocol
+ * @load_addr: pointer to loaded initrd
+ * @load_size: size of loaded initrd
+ * @soft_limit: preferred size of allocated memory for loading the initrd
+ * @hard_limit: minimum size of allocated memory
+ *
+ * Return: status code
+ */
efi_status_t efi_load_initrd(efi_loaded_image_t *image,
unsigned long *load_addr,
unsigned long *load_size,
@@ -505,6 +552,15 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
return status;
}
+/**
+ * efi_wait_for_key() - Wait for key stroke
+ * @usec: number of microseconds to wait for key stroke
+ * @key: key entered
+ *
+ * Wait for up to @usec microseconds for a key stroke.
+ *
+ * Return: status code, EFI_SUCCESS if key received
+ */
efi_status_t efi_wait_for_key(unsigned long usec, efi_input_key_t *key)
{
efi_event_t events[2], timer;
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index e97370bdfdb0..a5a405d8ab44 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -122,23 +122,6 @@ static unsigned long get_dram_base(void)
}
/*
- * This function handles the architcture specific differences between arm and
- * arm64 regarding where the kernel image must be loaded and any memory that
- * must be reserved. On failure it is required to free all
- * all allocations it has made.
- */
-efi_status_t handle_kernel_image(unsigned long *image_addr,
- unsigned long *image_size,
- unsigned long *reserve_addr,
- unsigned long *reserve_size,
- unsigned long dram_base,
- efi_loaded_image_t *image);
-
-asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
- unsigned long fdt_addr,
- unsigned long fdt_size);
-
-/*
* EFI entry point for the arm/arm64 EFI stubs. This is the entrypoint
* that is described in the PE/COFF header. Most of the code is the same
* for both archictectures, with the arch-specific code provided in the
@@ -329,6 +312,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
if (status != EFI_SUCCESS)
goto fail_free_initrd;
+ if (IS_ENABLED(CONFIG_ARM))
+ efi_handle_post_ebs_state();
+
efi_enter_kernel(image_addr, fdt_addr, fdt_totalsize((void *)fdt_addr));
/* not reached */
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index bcd8c0a785f0..85050f5a1b28 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -157,8 +157,14 @@ typedef void (__efiapi *efi_event_notify_t)(efi_event_t, void *);
#define EFI_EVT_NOTIFY_WAIT 0x00000100U
#define EFI_EVT_NOTIFY_SIGNAL 0x00000200U
-/*
- * boottime->wait_for_event takes an array of events as input.
+/**
+ * efi_set_event_at() - add event to events array
+ *
+ * @events: array of UEFI events
+ * @ids: index where to put the event in the array
+ * @event: event to add to the aray
+ *
+ * boottime->wait_for_event() takes an array of events as input.
* Provide a helper to set it up correctly for mixed mode.
*/
static inline
@@ -770,5 +776,23 @@ efi_status_t efi_load_initrd(efi_loaded_image_t *image,
unsigned long *load_size,
unsigned long soft_limit,
unsigned long hard_limit);
+/*
+ * This function handles the architcture specific differences between arm and
+ * arm64 regarding where the kernel image must be loaded and any memory that
+ * must be reserved. On failure it is required to free all
+ * all allocations it has made.
+ */
+efi_status_t handle_kernel_image(unsigned long *image_addr,
+ unsigned long *image_size,
+ unsigned long *reserve_addr,
+ unsigned long *reserve_size,
+ unsigned long dram_base,
+ efi_loaded_image_t *image);
+
+asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
+ unsigned long fdt_addr,
+ unsigned long fdt_size);
+
+void efi_handle_post_ebs_state(void);
#endif
diff --git a/drivers/firmware/efi/libstub/file.c b/drivers/firmware/efi/libstub/file.c
index 2005e33b33d5..630caa6b1f4c 100644
--- a/drivers/firmware/efi/libstub/file.c
+++ b/drivers/firmware/efi/libstub/file.c
@@ -102,12 +102,20 @@ static int find_file_option(const efi_char16_t *cmdline, int cmdline_len,
if (!found)
return 0;
+ /* Skip any leading slashes */
+ while (cmdline[i] == L'/' || cmdline[i] == L'\\')
+ i++;
+
while (--result_len > 0 && i < cmdline_len) {
- if (cmdline[i] == L'\0' ||
- cmdline[i] == L'\n' ||
- cmdline[i] == L' ')
+ efi_char16_t c = cmdline[i++];
+
+ if (c == L'\0' || c == L'\n' || c == L' ')
break;
- *result++ = cmdline[i++];
+ else if (c == L'/')
+ /* Replace UNIX dir separators with EFI standard ones */
+ *result++ = L'\\';
+ else
+ *result++ = c;
}
*result = L'\0';
return i;
diff --git a/drivers/firmware/efi/libstub/skip_spaces.c b/drivers/firmware/efi/libstub/skip_spaces.c
index a700b3c7f7d0..159fb4e456c6 100644
--- a/drivers/firmware/efi/libstub/skip_spaces.c
+++ b/drivers/firmware/efi/libstub/skip_spaces.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/types.h>
char *skip_spaces(const char *str)
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 5a48d996ed71..3672539cb96e 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -8,6 +8,7 @@
#include <linux/efi.h>
#include <linux/pci.h>
+#include <linux/stddef.h>
#include <asm/efi.h>
#include <asm/e820/types.h>
@@ -361,8 +362,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
int options_size = 0;
efi_status_t status;
char *cmdline_ptr;
- unsigned long ramdisk_addr;
- unsigned long ramdisk_size;
efi_system_table = sys_table_arg;
@@ -390,8 +389,9 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
hdr = &boot_params->hdr;
- /* Copy the second sector to boot_params */
- memcpy(&hdr->jump, image_base + 512, 512);
+ /* Copy the setup header from the second sector to boot_params */
+ memcpy(&hdr->jump, image_base + 512,
+ sizeof(struct setup_header) - offsetof(struct setup_header, jump));
/*
* Fill out some of the header fields ourselves because the
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 5f2a4d162795..973eef234b36 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -1229,3 +1229,9 @@ out:
return rv;
}
EXPORT_SYMBOL_GPL(efivars_unregister);
+
+int efivar_supports_writes(void)
+{
+ return __efivars && __efivars->ops->set_variable;
+}
+EXPORT_SYMBOL_GPL(efivar_supports_writes);
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index 873841af8d57..3d6ba425dbb9 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -157,8 +157,10 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
GFP_KERNEL);
- if (!cpu_groups)
+ if (!cpu_groups) {
+ free_cpumask_var(tmp);
return -ENOMEM;
+ }
cpumask_copy(tmp, cpu_online_mask);
@@ -167,6 +169,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
topology_core_cpumask(cpumask_any(tmp));
if (!alloc_cpumask_var(&cpu_groups[num_groups], GFP_KERNEL)) {
+ free_cpumask_var(tmp);
free_cpu_groups(num_groups, &cpu_groups);
return -ENOMEM;
}
@@ -196,13 +199,12 @@ static int hotplug_tests(void)
if (!page_buf)
goto out_free_cpu_groups;
- err = 0;
/*
* Of course the last CPU cannot be powered down and cpu_down() should
* refuse doing that.
*/
pr_info("Trying to turn off and on again all CPUs\n");
- err += down_and_up_cpus(cpu_online_mask, offlined_cpus);
+ err = down_and_up_cpus(cpu_online_mask, offlined_cpus);
/*
* Take down CPUs by cpu group this time. When the last CPU is turned
diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c
index ef8098856a47..625c8fdceabf 100644
--- a/drivers/firmware/raspberrypi.c
+++ b/drivers/firmware/raspberrypi.c
@@ -181,6 +181,7 @@ EXPORT_SYMBOL_GPL(rpi_firmware_property);
static void
rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
{
+ time64_t date_and_time;
u32 packet;
int ret = rpi_firmware_property(fw,
RPI_FIRMWARE_GET_FIRMWARE_REVISION,
@@ -189,7 +190,9 @@ rpi_firmware_print_firmware_revision(struct rpi_firmware *fw)
if (ret)
return;
- dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &packet);
+ /* This is not compatible with y2038 */
+ date_and_time = packet;
+ dev_info(fw->cl.dev, "Attached to firmware from %ptT\n", &date_and_time);
}
static void
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index b2408a710662..7cd5a29fc437 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -208,7 +208,7 @@ config FPGA_DFL_PCI
config FPGA_MGR_ZYNQMP_FPGA
tristate "Xilinx ZynqMP FPGA"
- depends on ARCH_ZYNQMP || COMPILE_TEST
+ depends on ZYNQMP_FIRMWARE || (!ZYNQMP_FIRMWARE && COMPILE_TEST)
help
FPGA manager driver support for Xilinx ZynqMP FPGAs.
This driver uses the processor configuration port(PCAP)
diff --git a/drivers/gpio/gpio-arizona.c b/drivers/gpio/gpio-arizona.c
index 5640efe5e750..5bda38e0780f 100644
--- a/drivers/gpio/gpio-arizona.c
+++ b/drivers/gpio/gpio-arizona.c
@@ -64,6 +64,7 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
ret = pm_runtime_get_sync(chip->parent);
if (ret < 0) {
dev_err(chip->parent, "Failed to resume: %d\n", ret);
+ pm_runtime_put_autosuspend(chip->parent);
return ret;
}
@@ -72,12 +73,15 @@ static int arizona_gpio_get(struct gpio_chip *chip, unsigned offset)
if (ret < 0) {
dev_err(chip->parent, "Failed to drop cache: %d\n",
ret);
+ pm_runtime_put_autosuspend(chip->parent);
return ret;
}
ret = regmap_read(arizona->regmap, reg, &val);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(chip->parent);
return ret;
+ }
pm_runtime_mark_last_busy(chip->parent);
pm_runtime_put_autosuspend(chip->parent);
@@ -106,6 +110,7 @@ static int arizona_gpio_direction_out(struct gpio_chip *chip,
ret = pm_runtime_get_sync(chip->parent);
if (ret < 0) {
dev_err(chip->parent, "Failed to resume: %d\n", ret);
+ pm_runtime_put(chip->parent);
return ret;
}
}
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 1fca8dd7824f..a3b9bdedbe44 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -107,6 +107,84 @@ static const struct i2c_device_id pca953x_id[] = {
};
MODULE_DEVICE_TABLE(i2c, pca953x_id);
+#ifdef CONFIG_GPIO_PCA953X_IRQ
+
+#include <linux/dmi.h>
+#include <linux/gpio.h>
+#include <linux/list.h>
+
+static const struct dmi_system_id pca953x_dmi_acpi_irq_info[] = {
+ {
+ /*
+ * On Intel Galileo Gen 2 board the IRQ pin of one of
+ * the I²C GPIO expanders, which has GpioInt() resource,
+ * is provided as an absolute number instead of being
+ * relative. Since first controller (gpio-sch.c) and
+ * second (gpio-dwapb.c) are at the fixed bases, we may
+ * safely refer to the number in the global space to get
+ * an IRQ out of it.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
+ },
+ },
+ {}
+};
+
+#ifdef CONFIG_ACPI
+static int pca953x_acpi_get_pin(struct acpi_resource *ares, void *data)
+{
+ struct acpi_resource_gpio *agpio;
+ int *pin = data;
+
+ if (acpi_gpio_get_irq_resource(ares, &agpio))
+ *pin = agpio->pin_table[0];
+ return 1;
+}
+
+static int pca953x_acpi_find_pin(struct device *dev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ int pin = -ENOENT, ret;
+ LIST_HEAD(r);
+
+ ret = acpi_dev_get_resources(adev, &r, pca953x_acpi_get_pin, &pin);
+ acpi_dev_free_resource_list(&r);
+ if (ret < 0)
+ return ret;
+
+ return pin;
+}
+#else
+static inline int pca953x_acpi_find_pin(struct device *dev) { return -ENXIO; }
+#endif
+
+static int pca953x_acpi_get_irq(struct device *dev)
+{
+ int pin, ret;
+
+ pin = pca953x_acpi_find_pin(dev);
+ if (pin < 0)
+ return pin;
+
+ dev_info(dev, "Applying ACPI interrupt quirk (GPIO %d)\n", pin);
+
+ if (!gpio_is_valid(pin))
+ return -EINVAL;
+
+ ret = gpio_request(pin, "pca953x interrupt");
+ if (ret)
+ return ret;
+
+ ret = gpio_to_irq(pin);
+
+ /* When pin is used as an IRQ, no need to keep it requested */
+ gpio_free(pin);
+
+ return ret;
+}
+#endif
+
static const struct acpi_device_id pca953x_acpi_ids[] = {
{ "INT3491", 16 | PCA953X_TYPE | PCA_LATCH_INT, },
{ }
@@ -322,6 +400,7 @@ static const struct regmap_config pca953x_ai_i2c_regmap = {
.writeable_reg = pca953x_writeable_register,
.volatile_reg = pca953x_volatile_register,
+ .disable_locking = true,
.cache_type = REGCACHE_RBTREE,
.max_register = 0x7f,
};
@@ -623,8 +702,6 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
DECLARE_BITMAP(reg_direction, MAX_LINE);
int level;
- pca953x_read_regs(chip, chip->regs->direction, reg_direction);
-
if (chip->driver_data & PCA_PCAL) {
/* Enable latch on interrupt-enabled inputs */
pca953x_write_regs(chip, PCAL953X_IN_LATCH, chip->irq_mask);
@@ -635,7 +712,11 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
pca953x_write_regs(chip, PCAL953X_INT_MASK, irq_mask);
}
+ /* Switch direction to input if needed */
+ pca953x_read_regs(chip, chip->regs->direction, reg_direction);
+
bitmap_or(irq_mask, chip->irq_trig_fall, chip->irq_trig_raise, gc->ngpio);
+ bitmap_complement(reg_direction, reg_direction, gc->ngpio);
bitmap_and(irq_mask, irq_mask, reg_direction, gc->ngpio);
/* Look for any newly setup interrupt */
@@ -734,14 +815,16 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid)
struct gpio_chip *gc = &chip->gpio_chip;
DECLARE_BITMAP(pending, MAX_LINE);
int level;
+ bool ret;
- if (!pca953x_irq_pending(chip, pending))
- return IRQ_NONE;
+ mutex_lock(&chip->i2c_lock);
+ ret = pca953x_irq_pending(chip, pending);
+ mutex_unlock(&chip->i2c_lock);
for_each_set_bit(level, pending, gc->ngpio)
handle_nested_irq(irq_find_mapping(gc->irq.domain, level));
- return IRQ_HANDLED;
+ return IRQ_RETVAL(ret);
}
static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
@@ -752,6 +835,12 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, int irq_base)
DECLARE_BITMAP(irq_stat, MAX_LINE);
int ret;
+ if (dmi_first_match(pca953x_dmi_acpi_irq_info)) {
+ ret = pca953x_acpi_get_irq(&client->dev);
+ if (ret > 0)
+ client->irq = ret;
+ }
+
if (!client->irq)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
index 58f9d8c3a17a..44f927641b89 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
@@ -204,6 +204,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 11:
+ case 12:
mem_channel_number = igp_info->v11.umachannelnumber;
/* channel width is 64 */
if (vram_width)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index d33cb344be69..a414da22a359 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -1295,27 +1295,37 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched)
static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring)
{
struct amdgpu_job *job;
- struct drm_sched_job *s_job;
+ struct drm_sched_job *s_job, *tmp;
uint32_t preempt_seq;
struct dma_fence *fence, **ptr;
struct amdgpu_fence_driver *drv = &ring->fence_drv;
struct drm_gpu_scheduler *sched = &ring->sched;
+ bool preempted = true;
if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
return;
preempt_seq = le32_to_cpu(*(drv->cpu_addr + 2));
- if (preempt_seq <= atomic_read(&drv->last_seq))
- return;
+ if (preempt_seq <= atomic_read(&drv->last_seq)) {
+ preempted = false;
+ goto no_preempt;
+ }
preempt_seq &= drv->num_fences_mask;
ptr = &drv->fences[preempt_seq];
fence = rcu_dereference_protected(*ptr, 1);
+no_preempt:
spin_lock(&sched->job_list_lock);
- list_for_each_entry(s_job, &sched->ring_mirror_list, node) {
+ list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
+ if (dma_fence_is_signaled(&s_job->s_fence->finished)) {
+ /* remove job from ring_mirror_list */
+ list_del_init(&s_job->node);
+ sched->ops->free_job(s_job);
+ continue;
+ }
job = to_amdgpu_job(s_job);
- if (job->fence == fence)
+ if (preempted && job->fence == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 47207188c569..4fb4c3b69687 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -37,7 +37,8 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
memset(&ti, 0, sizeof(struct amdgpu_task_info));
- if (amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
+ if (amdgpu_gpu_recovery &&
+ amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
s_job->sched->name);
return;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 775e389c9a13..ebb8a28ff002 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -696,7 +696,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
* default power levels, write "r" (reset) to the file to reset them.
*
*
- * < For Vega20 >
+ * < For Vega20 and newer ASICs >
*
* Reading the file will display:
*
@@ -778,8 +778,7 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
while (isspace(*++tmp_str));
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -1039,8 +1038,7 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
- while (tmp[0]) {
- sub_str = strsep(&tmp, delimiter);
+ while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
if (ret)
@@ -1637,8 +1635,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
- while (tmp_str[0]) {
- sub_str = strsep(&tmp_str, delimiter);
+ while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@@ -1668,7 +1665,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
}
/**
- * DOC: busy_percent
+ * DOC: gpu_busy_percent
*
* The amdgpu driver provides a sysfs API for reading how busy the GPU
* is as a percentage. The file gpu_busy_percent is used for this.
@@ -2784,7 +2781,7 @@ static ssize_t amdgpu_hwmon_show_sclk(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000);
+ return snprintf(buf, PAGE_SIZE, "%u\n", sclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev,
@@ -2819,7 +2816,7 @@ static ssize_t amdgpu_hwmon_show_mclk(struct device *dev,
if (r)
return r;
- return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000);
+ return snprintf(buf, PAGE_SIZE, "%u\n", mclk * 10 * 1000);
}
static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 7301fdcfb8bc..ef3269c43d4f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -372,6 +372,52 @@ static int psp_tmr_load(struct psp_context *psp)
return ret;
}
+static void psp_prep_tmr_unload_cmd_buf(struct psp_context *psp,
+ struct psp_gfx_cmd_resp *cmd)
+{
+ if (amdgpu_sriov_vf(psp->adev))
+ cmd->cmd_id = GFX_CMD_ID_DESTROY_VMR;
+ else
+ cmd->cmd_id = GFX_CMD_ID_DESTROY_TMR;
+}
+
+static int psp_tmr_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_tmr_unload_cmd_buf(psp, cmd);
+ DRM_INFO("free PSP TMR buffer\n");
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd,
+ psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
+static int psp_tmr_terminate(struct psp_context *psp)
+{
+ int ret;
+ void *tmr_buf;
+ void **pptr;
+
+ ret = psp_tmr_unload(psp);
+ if (ret)
+ return ret;
+
+ /* free TMR memory buffer */
+ pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
+ amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
+
+ return 0;
+}
+
static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint64_t asd_mc, uint32_t size)
{
@@ -1779,8 +1825,6 @@ static int psp_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- void *tmr_buf;
- void **pptr;
if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
@@ -1790,10 +1834,9 @@ static int psp_hw_fini(void *handle)
psp_asd_unload(psp);
+ psp_tmr_terminate(psp);
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
- pptr = amdgpu_sriov_vf(psp->adev) ? &tmr_buf : NULL;
- amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, pptr);
amdgpu_bo_free_kernel(&psp->fw_pri_bo,
&psp->fw_pri_mc_addr, &psp->fw_pri_buf);
amdgpu_bo_free_kernel(&psp->fence_buf_bo,
@@ -1840,6 +1883,18 @@ static int psp_suspend(void *handle)
}
}
+ ret = psp_asd_unload(psp);
+ if (ret) {
+ DRM_ERROR("Failed to unload asd\n");
+ return ret;
+ }
+
+ ret = psp_tmr_terminate(psp);
+ if (ret) {
+ DRM_ERROR("Falied to terminate tmr\n");
+ return ret;
+ }
+
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
if (ret) {
DRM_ERROR("PSP ring stop failed\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index bd5dd4f64311..fac77a86c04b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -7513,12 +7513,17 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
- if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size))
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
return -ENOMEM;
+ }
/* assert preemption condition */
amdgpu_ring_set_preempt_cond_exec(ring, false);
@@ -7529,6 +7534,8 @@ static int gfx_v10_0_ring_preempt_ib(struct amdgpu_ring *ring)
++ring->trail_seq);
amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
/* poll the trailing fence */
for (i = 0; i < adev->usec_timeout; i++) {
if (ring->trail_seq ==
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index b544baf306f2..8fb66e50a57b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -314,30 +314,20 @@ static uint64_t sdma_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
static uint64_t sdma_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
- u64 *wptr = NULL;
- uint64_t local_wptr = 0;
+ u64 wptr;
if (ring->use_doorbell) {
/* XXX check if swapping is necessary on BE */
- wptr = ((u64 *)&adev->wb.wb[ring->wptr_offs]);
- DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", *wptr);
- *wptr = (*wptr) >> 2;
- DRM_DEBUG("wptr/doorbell after shift == 0x%016llx\n", *wptr);
+ wptr = READ_ONCE(*((u64 *)&adev->wb.wb[ring->wptr_offs]));
+ DRM_DEBUG("wptr/doorbell before shift == 0x%016llx\n", wptr);
} else {
- u32 lowbit, highbit;
-
- wptr = &local_wptr;
- lowbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR)) >> 2;
- highbit = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI)) >> 2;
-
- DRM_DEBUG("wptr [%i]high== 0x%08x low==0x%08x\n",
- ring->me, highbit, lowbit);
- *wptr = highbit;
- *wptr = (*wptr) << 32;
- *wptr |= lowbit;
+ wptr = RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI));
+ wptr = wptr << 32;
+ wptr |= RREG32(sdma_v5_0_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR));
+ DRM_DEBUG("wptr before shift [%i] wptr == 0x%016llx\n", ring->me, wptr);
}
- return *wptr;
+ return wptr >> 2;
}
/**
@@ -1298,8 +1288,12 @@ static int sdma_v5_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i;
- for (i = 0; i < adev->sdma.num_instances; i++)
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ if (adev->sdma.instance[i].fw != NULL)
+ release_firmware(adev->sdma.instance[i].fw);
+
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f0587d94294d..fee60921fccf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -40,6 +40,7 @@
#include <drm/drm_file.h>
#include <drm/drm_drv.h>
#include <drm/drm_device.h>
+#include <drm/drm_ioctl.h>
#include <kgd_kfd_interface.h>
#include <linux/swap.h>
@@ -1076,7 +1077,7 @@ static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
struct drm_device *ddev = kfd->ddev;
- return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
+ return devcgroup_check_permission(DEVCG_DEV_CHAR, DRM_MAJOR,
ddev->render->index,
DEVCG_ACC_WRITE | DEVCG_ACC_READ);
#else
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index d27221ddcdeb..0e0c42e9f6a3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -428,6 +428,7 @@ struct kfd_process *kfd_create_process(struct file *filep)
(int)process->lead_thread->pid);
if (ret) {
pr_warn("Creating procfs pid directory failed");
+ kobject_put(process->kobj);
goto out;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 7ced9f87be97..86ffa0c2880f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -974,6 +974,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
/* Update the actual used number of crtc */
adev->mode_info.num_crtc = adev->dm.display_indexes_num;
+ /* create fake encoders for MST */
+ dm_dp_create_fake_mst_encoders(adev);
+
/* TODO: Add_display_info? */
/* TODO use dynamic cursor width */
@@ -997,6 +1000,12 @@ error:
static void amdgpu_dm_fini(struct amdgpu_device *adev)
{
+ int i;
+
+ for (i = 0; i < adev->dm.display_indexes_num; i++) {
+ drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
+ }
+
amdgpu_dm_audio_fini(adev);
amdgpu_dm_destroy_drm_device(&adev->dm);
@@ -1358,7 +1367,7 @@ static int dm_late_init(void *handle)
struct dmcu *dmcu = NULL;
bool ret;
- if (!adev->dm.fw_dmcu)
+ if (!adev->dm.fw_dmcu && !adev->dm.dmub_fw)
return detect_mst_link_for_all_connectors(adev->ddev);
dmcu = adev->dm.dc->res_pool->dmcu;
@@ -2010,6 +2019,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
struct amdgpu_display_manager *dm;
struct drm_connector *conn_base;
struct amdgpu_device *adev;
+ struct dc_link *link = NULL;
static const u8 pre_computed_values[] = {
50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
@@ -2017,6 +2027,10 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
if (!aconnector || !aconnector->dc_link)
return;
+ link = aconnector->dc_link;
+ if (link->connector_signal != SIGNAL_TYPE_EDP)
+ return;
+
conn_base = &aconnector->base;
adev = conn_base->dev->dev_private;
dm = &adev->dm;
@@ -5024,7 +5038,8 @@ create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct drm_connector *connector = &aconnector->base;
struct amdgpu_device *adev = connector->dev->dev_private;
struct dc_stream_state *stream;
- int requested_bpc = connector->state ? connector->state->max_requested_bpc : 8;
+ const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
+ int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
enum dc_status dc_result = DC_OK;
do {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d61186ff411d..648180ccdc2e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -43,6 +43,9 @@
*/
#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
+
+#define AMDGPU_DM_MAX_CRTC 6
+
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
@@ -328,6 +331,13 @@ struct amdgpu_display_manager {
* available in FW
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
+
+ /**
+ * @mst_encoders:
+ *
+ * fake encoders used for DP MST.
+ */
+ struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC];
};
struct amdgpu_dm_connector {
@@ -356,7 +366,6 @@ struct amdgpu_dm_connector {
struct amdgpu_dm_dp_aux dm_dp_aux;
struct drm_dp_mst_port *port;
struct amdgpu_dm_connector *mst_port;
- struct amdgpu_encoder *mst_encoder;
struct drm_dp_aux *dsc_aux;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 076af267b488..1d692f4f42f3 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -1058,7 +1058,6 @@ static const struct {
{"link_settings", &dp_link_settings_debugfs_fops},
{"phy_settings", &dp_phy_settings_debugfs_fop},
{"test_pattern", &dp_phy_test_pattern_fops},
- {"output_bpc", &output_bpc_fops},
{"vrr_range", &vrr_range_fops},
#ifdef CONFIG_DRM_AMD_DC_HDCP
{"hdcp_sink_capability", &hdcp_sink_capability_fops},
@@ -1142,6 +1141,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
&force_yuv420_output_fops);
+ debugfs_create_file("output_bpc", 0644, dir, connector,
+ &output_bpc_fops);
+
connector->debugfs_dpcd_address = 0;
connector->debugfs_dpcd_size = 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index dcf84a61de37..949d10ef8304 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -510,8 +510,10 @@ static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin
srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
- if (!srm)
- return -EINVAL;
+ if (!srm) {
+ ret = -EINVAL;
+ goto ret;
+ }
if (pos >= srm_size)
ret = 0;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index ae0a7ef1d595..e5ecc5affa1e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -95,7 +95,6 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_dm_connector *aconnector =
to_amdgpu_dm_connector(connector);
- struct amdgpu_encoder *amdgpu_encoder = aconnector->mst_encoder;
if (aconnector->dc_sink) {
dc_link_remove_remote_sink(aconnector->dc_link,
@@ -105,8 +104,6 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
kfree(aconnector->edid);
- drm_encoder_cleanup(&amdgpu_encoder->base);
- kfree(amdgpu_encoder);
drm_connector_cleanup(connector);
drm_dp_mst_put_port_malloc(aconnector->port);
kfree(aconnector);
@@ -243,7 +240,11 @@ static struct drm_encoder *
dm_mst_atomic_best_encoder(struct drm_connector *connector,
struct drm_connector_state *connector_state)
{
- return &to_amdgpu_dm_connector(connector)->mst_encoder->base;
+ struct drm_device *dev = connector->dev;
+ struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
+
+ return &adev->dm.mst_encoders[acrtc->crtc_id].base;
}
static int
@@ -306,31 +307,27 @@ static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
.destroy = amdgpu_dm_encoder_destroy,
};
-static struct amdgpu_encoder *
-dm_dp_create_fake_mst_encoder(struct amdgpu_dm_connector *connector)
+void
+dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev)
{
- struct drm_device *dev = connector->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
- struct amdgpu_encoder *amdgpu_encoder;
- struct drm_encoder *encoder;
-
- amdgpu_encoder = kzalloc(sizeof(*amdgpu_encoder), GFP_KERNEL);
- if (!amdgpu_encoder)
- return NULL;
+ struct drm_device *dev = adev->ddev;
+ int i;
- encoder = &amdgpu_encoder->base;
- encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
+ for (i = 0; i < adev->dm.display_indexes_num; i++) {
+ struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i];
+ struct drm_encoder *encoder = &amdgpu_encoder->base;
- drm_encoder_init(
- dev,
- &amdgpu_encoder->base,
- &amdgpu_dm_encoder_funcs,
- DRM_MODE_ENCODER_DPMST,
- NULL);
+ encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
- drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
+ drm_encoder_init(
+ dev,
+ &amdgpu_encoder->base,
+ &amdgpu_dm_encoder_funcs,
+ DRM_MODE_ENCODER_DPMST,
+ NULL);
- return amdgpu_encoder;
+ drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs);
+ }
}
static struct drm_connector *
@@ -343,6 +340,7 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
+ int i;
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
if (!aconnector)
@@ -369,9 +367,10 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
master->dc_link,
master->connector_id);
- aconnector->mst_encoder = dm_dp_create_fake_mst_encoder(master);
- drm_connector_attach_encoder(&aconnector->base,
- &aconnector->mst_encoder->base);
+ for (i = 0; i < adev->dm.display_indexes_num; i++) {
+ drm_connector_attach_encoder(&aconnector->base,
+ &adev->dm.mst_encoders[i].base);
+ }
connector->max_bpc_property = master->base.max_bpc_property;
if (connector->max_bpc_property)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
index d2c56579a2cc..b38bd68121ce 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
@@ -35,6 +35,9 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
int link_index);
+void
+dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
struct dc_state *dc_state);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 6f93a6ca4cf0..d016f50e187c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2538,10 +2538,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
copy_stream_update_to_stream(dc, context, stream, stream_update);
- if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
- DC_ERROR("Mode validation failed for stream update!\n");
- dc_release_state(context);
- return;
+ if (update_type > UPDATE_TYPE_FAST) {
+ if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
+ DC_ERROR("Mode validation failed for stream update!\n");
+ dc_release_state(context);
+ return;
+ }
}
commit_planes_for_stream(
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 4f0e7203dba4..470c82794f6f 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -56,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)
}
}
-static void dc_stream_construct(struct dc_stream_state *stream,
+static bool dc_stream_construct(struct dc_stream_state *stream,
struct dc_sink *dc_sink_data)
{
uint32_t i = 0;
@@ -118,11 +118,17 @@ static void dc_stream_construct(struct dc_stream_state *stream,
update_stream_signal(stream, dc_sink_data);
stream->out_transfer_func = dc_create_transfer_func();
+ if (stream->out_transfer_func == NULL) {
+ dc_sink_release(dc_sink_data);
+ return false;
+ }
stream->out_transfer_func->type = TF_TYPE_BYPASS;
stream->out_transfer_func->ctx = stream->ctx;
stream->stream_id = stream->ctx->dc_stream_id_count;
stream->ctx->dc_stream_id_count++;
+
+ return true;
}
static void dc_stream_destruct(struct dc_stream_state *stream)
@@ -164,13 +170,20 @@ struct dc_stream_state *dc_create_stream_for_sink(
stream = kzalloc(sizeof(struct dc_stream_state), GFP_KERNEL);
if (stream == NULL)
- return NULL;
+ goto alloc_fail;
- dc_stream_construct(stream, sink);
+ if (dc_stream_construct(stream, sink) == false)
+ goto construct_fail;
kref_init(&stream->refcount);
return stream;
+
+construct_fail:
+ kfree(stream);
+
+alloc_fail:
+ return NULL;
}
struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index 3f66868df171..ea29cf95d470 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -28,8 +28,6 @@ endif
endif
CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc_dpi.o := $(dsc_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dsc/dc_dsc.o := $(dsc_ccflags)
DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index 0ea6662a1563..0c7f247bb7de 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -22,10 +22,12 @@
* Author: AMD
*/
+#include <drm/drm_dsc.h>
#include "dc_hw_types.h"
#include "dsc.h"
#include <drm/drm_dp_helper.h>
#include "dc.h"
+#include "rc_calc.h"
/* This module's internal functions */
@@ -304,22 +306,6 @@ static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
return (value + 9) / 10;
}
-static inline uint32_t calc_dsc_bpp_x16(uint32_t stream_bandwidth_kbps, uint32_t pix_clk_100hz, uint32_t bpp_increment_div)
-{
- uint32_t dsc_target_bpp_x16;
- float f_dsc_target_bpp;
- float f_stream_bandwidth_100bps = stream_bandwidth_kbps * 10.0f;
- uint32_t precision = bpp_increment_div; // bpp_increment_div is actually precision
-
- f_dsc_target_bpp = f_stream_bandwidth_100bps / pix_clk_100hz;
-
- // Round down to the nearest precision stop to bring it into DSC spec range
- dsc_target_bpp_x16 = (uint32_t)(f_dsc_target_bpp * precision);
- dsc_target_bpp_x16 = (dsc_target_bpp_x16 * 16) / precision;
-
- return dsc_target_bpp_x16;
-}
-
/* Get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range, and timing's pixel clock
* and uncompressed bandwidth.
*/
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index 03ae15946c6d..667afbc260f9 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -23,6 +23,7 @@
* Authors: AMD
*
*/
+#include <drm/drm_dsc.h>
#include "os_types.h"
#include "rc_calc.h"
@@ -40,7 +41,8 @@
break
-void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, enum max_min max_min, float bpp)
+static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
+ enum max_min max_min, float bpp)
{
int mode = MODE_SELECT(444, 422, 420);
int sel = table_hash(mode, bpc, max_min);
@@ -85,7 +87,7 @@ void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc, enum ma
memcpy(qps, table[index].qps, sizeof(qp_set));
}
-double dsc_roundf(double num)
+static double dsc_roundf(double num)
{
if (num < 0.0)
num = num - 0.5;
@@ -95,7 +97,7 @@ double dsc_roundf(double num)
return (int)(num);
}
-double dsc_ceil(double num)
+static double dsc_ceil(double num)
{
double retval = (int)num;
@@ -105,7 +107,7 @@ double dsc_ceil(double num)
return (int)retval;
}
-void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
+static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
{
int *p = ofs;
@@ -160,7 +162,7 @@ void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
}
}
-int median3(int a, int b, int c)
+static int median3(int a, int b, int c)
{
if (a > b)
swap(a, b);
@@ -172,13 +174,25 @@ int median3(int a, int b, int c)
return b;
}
-void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_comp bpc, float bpp, int slice_width, int slice_height, int minor_version)
+static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm,
+ enum bits_per_comp bpc, u8 drm_bpp,
+ bool is_navite_422_or_420,
+ int slice_width, int slice_height,
+ int minor_version)
{
+ float bpp;
float bpp_group;
float initial_xmit_delay_factor;
int padding_pixels;
int i;
+ bpp = ((float)drm_bpp / 16.0);
+ /* in native_422 or native_420 modes, the bits_per_pixel is double the
+ * target bpp (the latter is what calc_rc_params expects)
+ */
+ if (is_navite_422_or_420)
+ bpp /= 2.0;
+
rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
@@ -251,3 +265,128 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com
rc->rc_buf_thresh[13] = 8064;
}
+static u32 _do_bytes_per_pixel_calc(int slice_width, u8 drm_bpp,
+ bool is_navite_422_or_420)
+{
+ float bpp;
+ u32 bytes_per_pixel;
+ double d_bytes_per_pixel;
+
+ bpp = ((float)drm_bpp / 16.0);
+ d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
+ // TODO: Make sure the formula for calculating this is precise (ceiling
+ // vs. floor, and at what point they should be applied)
+ if (is_navite_422_or_420)
+ d_bytes_per_pixel /= 2;
+
+ bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000);
+
+ return bytes_per_pixel;
+}
+
+static u32 _do_calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz,
+ u32 bpp_increment_div)
+{
+ u32 dsc_target_bpp_x16;
+ float f_dsc_target_bpp;
+ float f_stream_bandwidth_100bps;
+ // bpp_increment_div is actually precision
+ u32 precision = bpp_increment_div;
+
+ f_stream_bandwidth_100bps = stream_bandwidth_kbps * 10.0f;
+ f_dsc_target_bpp = f_stream_bandwidth_100bps / pix_clk_100hz;
+
+ // Round down to the nearest precision stop to bring it into DSC spec
+ // range
+ dsc_target_bpp_x16 = (u32)(f_dsc_target_bpp * precision);
+ dsc_target_bpp_x16 = (dsc_target_bpp_x16 * 16) / precision;
+
+ return dsc_target_bpp_x16;
+}
+
+/**
+ * calc_rc_params - reads the user's cmdline mode
+ * @rc: DC internal DSC parameters
+ * @pps: DRM struct with all required DSC values
+ *
+ * This function expects a drm_dsc_config data struct with all the required DSC
+ * values previously filled out by our driver and based on this information it
+ * computes some of the DSC values.
+ *
+ * @note This calculation requires float point operation, most of it executes
+ * under kernel_fpu_{begin,end}.
+ */
+void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps)
+{
+ enum colour_mode mode;
+ enum bits_per_comp bpc;
+ bool is_navite_422_or_420;
+ u8 drm_bpp = pps->bits_per_pixel;
+ int slice_width = pps->slice_width;
+ int slice_height = pps->slice_height;
+
+ mode = pps->convert_rgb ? CM_RGB : (pps->simple_422 ? CM_444 :
+ (pps->native_422 ? CM_422 :
+ pps->native_420 ? CM_420 : CM_444));
+ bpc = (pps->bits_per_component == 8) ? BPC_8 : (pps->bits_per_component == 10)
+ ? BPC_10 : BPC_12;
+
+ is_navite_422_or_420 = pps->native_422 || pps->native_420;
+
+ DC_FP_START();
+ _do_calc_rc_params(rc, mode, bpc, drm_bpp, is_navite_422_or_420,
+ slice_width, slice_height,
+ pps->dsc_version_minor);
+ DC_FP_END();
+}
+
+/**
+ * calc_dsc_bytes_per_pixel - calculate bytes per pixel
+ * @pps: DRM struct with all required DSC values
+ *
+ * Based on the information inside drm_dsc_config, this function calculates the
+ * total of bytes per pixel.
+ *
+ * @note This calculation requires float point operation, most of it executes
+ * under kernel_fpu_{begin,end}.
+ *
+ * Return:
+ * Return the number of bytes per pixel
+ */
+u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps)
+
+{
+ u32 ret;
+ u8 drm_bpp = pps->bits_per_pixel;
+ int slice_width = pps->slice_width;
+ bool is_navite_422_or_420 = pps->native_422 || pps->native_420;
+
+ DC_FP_START();
+ ret = _do_bytes_per_pixel_calc(slice_width, drm_bpp,
+ is_navite_422_or_420);
+ DC_FP_END();
+ return ret;
+}
+
+/**
+ * calc_dsc_bpp_x16 - retrieve the dsc bits per pixel
+ * @stream_bandwidth_kbps:
+ * @pix_clk_100hz:
+ * @bpp_increment_div:
+ *
+ * Calculate the total of bits per pixel for DSC configuration.
+ *
+ * @note This calculation requires float point operation, most of it executes
+ * under kernel_fpu_{begin,end}.
+ */
+u32 calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz,
+ u32 bpp_increment_div)
+{
+ u32 dsc_bpp;
+
+ DC_FP_START();
+ dsc_bpp = _do_calc_dsc_bpp_x16(stream_bandwidth_kbps, pix_clk_100hz,
+ bpp_increment_div);
+ DC_FP_END();
+ return dsc_bpp;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
index b6b1f09c2009..21723fa6561e 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
@@ -77,7 +77,10 @@ struct qp_entry {
typedef struct qp_entry qp_table[];
-void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_comp bpc, float bpp, int slice_width, int slice_height, int minor_version);
+void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps);
+u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps);
+u32 calc_dsc_bpp_x16(u32 stream_bandwidth_kbps, u32 pix_clk_100hz,
+ u32 bpp_increment_div);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index 1f6e63b71456..ef830aded5b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -27,8 +27,6 @@
#include "dscc_types.h"
#include "rc_calc.h"
-double dsc_ceil(double num);
-
static void copy_pps_fields(struct drm_dsc_config *to, const struct drm_dsc_config *from)
{
to->line_buf_depth = from->line_buf_depth;
@@ -100,34 +98,13 @@ static void copy_rc_to_cfg(struct drm_dsc_config *dsc_cfg, const struct rc_param
int dscc_compute_dsc_parameters(const struct drm_dsc_config *pps, struct dsc_parameters *dsc_params)
{
- enum colour_mode mode = pps->convert_rgb ? CM_RGB :
- (pps->simple_422 ? CM_444 :
- (pps->native_422 ? CM_422 :
- pps->native_420 ? CM_420 : CM_444));
- enum bits_per_comp bpc = (pps->bits_per_component == 8) ? BPC_8 :
- (pps->bits_per_component == 10) ? BPC_10 : BPC_12;
- float bpp = ((float) pps->bits_per_pixel / 16.0);
- int slice_width = pps->slice_width;
- int slice_height = pps->slice_height;
int ret;
struct rc_params rc;
struct drm_dsc_config dsc_cfg;
- double d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
-
- // TODO: Make sure the formula for calculating this is precise (ceiling vs. floor, and at what point they should be applied)
- if (pps->native_422 || pps->native_420)
- d_bytes_per_pixel /= 2;
-
- dsc_params->bytes_per_pixel = (uint32_t)dsc_ceil(d_bytes_per_pixel * 0x10000000);
-
- /* in native_422 or native_420 modes, the bits_per_pixel is double the target bpp
- * (the latter is what calc_rc_params expects)
- */
- if (pps->native_422 || pps->native_420)
- bpp /= 2.0;
+ dsc_params->bytes_per_pixel = calc_dsc_bytes_per_pixel(pps);
- calc_rc_params(&rc, mode, bpc, bpp, slice_width, slice_height, pps->dsc_version_minor);
+ calc_rc_params(&rc, pps);
dsc_params->pps = *pps;
dsc_params->pps.initial_scale_value = 8 * rc.rc_model_size / (rc.rc_model_size - rc.initial_fullness_offset);
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index 9431b48aecb4..bcfe34ef8c28 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -843,7 +843,7 @@ static bool build_regamma(struct pwl_float_data_ex *rgb_regamma,
pow_buffer_ptr = -1; // reset back to no optimize
ret = true;
release:
- kfree(coeff);
+ kvfree(coeff);
return ret;
}
@@ -1777,7 +1777,7 @@ bool calculate_user_regamma_ramp(struct dc_transfer_func *output_tf,
kfree(rgb_regamma);
rgb_regamma_alloc_fail:
- kvfree(rgb_user);
+ kfree(rgb_user);
rgb_user_alloc_fail:
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 67476047c067..fbb3f3a0dff7 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -689,7 +689,7 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
return -EINVAL;
}
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1 << workload_type,
NULL);
if (ret) {
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
index 85e5b1ed22c2..56923a96b450 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c
@@ -239,7 +239,7 @@ static void ci_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
switch (dev_id) {
case 0x67BA:
- case 0x66B1:
+ case 0x67B1:
smu_data->power_tune_defaults = &defaults_hawaii_pro;
break;
case 0x67B8:
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 2fb97554134f..c2e0fbbccf56 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -522,9 +522,11 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
- ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
- if (ret)
- goto err4;
+ if (adev->psp.ras.ras) {
+ ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
+ if (ret)
+ goto err4;
+ }
return 0;
@@ -560,7 +562,8 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
(struct vega20_smumgr *)(hwmgr->smu_backend);
struct amdgpu_device *adev = hwmgr->adev;
- smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
+ if (adev->psp.ras.ras)
+ smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
if (priv) {
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
index 3da71a088b92..0ecc18b55ffb 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vegam_smumgr.c
@@ -644,9 +644,6 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
/* sclk is bigger than max sclk in the dependence table */
*voltage |= (dep_table->entries[i - 1].vddc * VOLTAGE_SCALE) << VDDC_SHIFT;
- vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
- (dep_table->entries[i - 1].vddc -
- (uint16_t)VDDC_VDDCI_DELTA));
if (SMU7_VOLTAGE_CONTROL_NONE == data->vddci_control)
*voltage |= (data->vbios_boot_state.vddci_bootup_value *
@@ -654,8 +651,13 @@ static int vegam_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
else if (dep_table->entries[i - 1].vddci)
*voltage |= (dep_table->entries[i - 1].vddci *
VOLTAGE_SCALE) << VDDC_SHIFT;
- else
+ else {
+ vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
+ (dep_table->entries[i - 1].vddc -
+ (uint16_t)VDDC_VDDCI_DELTA));
+
*voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
+ }
if (SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control)
*mvdd = data->vbios_boot_state.mvdd_bootup_value * VOLTAGE_SCALE;
diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
index 6b27242b9ee3..bca3fcff16ec 100644
--- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
+++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c
@@ -173,8 +173,6 @@ static int aspeed_gfx_load(struct drm_device *drm)
drm_mode_config_reset(drm);
- drm_fbdev_generic_setup(drm, 32);
-
return 0;
}
@@ -225,6 +223,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
if (ret)
goto err_unload;
+ drm_fbdev_generic_setup(&priv->drm, 32);
return 0;
err_unload:
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index cf804389f5ec..e464429d32df 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -61,13 +61,8 @@ int drm_i2c_encoder_init(struct drm_device *dev,
request_module("%s%s", I2C_MODULE_PREFIX, info->type);
- client = i2c_new_device(adap, info);
- if (!client) {
- err = -ENOMEM;
- goto fail;
- }
-
- if (!client->dev.driver) {
+ client = i2c_new_client_device(adap, info);
+ if (!i2c_client_has_driver(client)) {
err = -ENODEV;
goto fail_unregister;
}
@@ -84,7 +79,7 @@ int drm_i2c_encoder_init(struct drm_device *dev,
err = encoder_drv->encoder_init(client, dev, encoder);
if (err)
- goto fail_unregister;
+ goto fail_module_put;
if (info->platform_data)
encoder->slave_funcs->set_config(&encoder->base,
@@ -92,10 +87,10 @@ int drm_i2c_encoder_init(struct drm_device *dev,
return 0;
+fail_module_put:
+ module_put(module);
fail_unregister:
i2c_unregister_device(client);
- module_put(module);
-fail:
return err;
}
EXPORT_SYMBOL(drm_i2c_encoder_init);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 170aa7689110..5609e164805f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -227,18 +227,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
-/**
- * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
- * @fb_helper: driver-allocated fbdev helper, can be NULL
- *
- * This should be called from driver's drm &drm_driver.lastclose callback
- * when implementing an fbcon on top of kms using this helper. This ensures that
- * the user isn't greeted with a black screen when e.g. X dies.
- *
- * RETURNS:
- * Zero if everything went ok, negative error code otherwise.
- */
-int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+static int
+__drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper,
+ bool force)
{
bool do_delayed;
int ret;
@@ -250,7 +241,16 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
return 0;
mutex_lock(&fb_helper->lock);
- ret = drm_client_modeset_commit(&fb_helper->client);
+ if (force) {
+ /*
+ * Yes this is the _locked version which expects the master lock
+ * to be held. But for forced restores we're intentionally
+ * racing here, see drm_fb_helper_set_par().
+ */
+ ret = drm_client_modeset_commit_locked(&fb_helper->client);
+ } else {
+ ret = drm_client_modeset_commit(&fb_helper->client);
+ }
do_delayed = fb_helper->delayed_hotplug;
if (do_delayed)
@@ -262,6 +262,22 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
return ret;
}
+
+/**
+ * drm_fb_helper_restore_fbdev_mode_unlocked - restore fbdev configuration
+ * @fb_helper: driver-allocated fbdev helper, can be NULL
+ *
+ * This should be called from driver's drm &drm_driver.lastclose callback
+ * when implementing an fbcon on top of kms using this helper. This ensures that
+ * the user isn't greeted with a black screen when e.g. X dies.
+ *
+ * RETURNS:
+ * Zero if everything went ok, negative error code otherwise.
+ */
+int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
+{
+ return __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, false);
+}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
#ifdef CONFIG_MAGIC_SYSRQ
@@ -1318,6 +1334,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct fb_var_screeninfo *var = &info->var;
+ bool force;
if (oops_in_progress)
return -EBUSY;
@@ -1327,7 +1344,25 @@ int drm_fb_helper_set_par(struct fb_info *info)
return -EINVAL;
}
- drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
+ /*
+ * Normally we want to make sure that a kms master takes precedence over
+ * fbdev, to avoid fbdev flickering and occasionally stealing the
+ * display status. But Xorg first sets the vt back to text mode using
+ * the KDSET IOCTL with KD_TEXT, and only after that drops the master
+ * status when exiting.
+ *
+ * In the past this was caught by drm_fb_helper_lastclose(), but on
+ * modern systems where logind always keeps a drm fd open to orchestrate
+ * the vt switching, this doesn't work.
+ *
+ * To not break the userspace ABI we have this special case here, which
+ * is only used for the above case. Everything else uses the normal
+ * commit function, which ensures that we never steal the display from
+ * an active drm master.
+ */
+ force = var->activate & FB_ACTIVATE_KD_TEXT;
+
+ __drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper, force);
return 0;
}
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index ffd95bfeaa94..d00ea384dcbf 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -30,12 +30,6 @@ struct drm_dmi_panel_orientation_data {
int orientation;
};
-static const struct drm_dmi_panel_orientation_data acer_s1003 = {
- .width = 800,
- .height = 1280,
- .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
-};
-
static const struct drm_dmi_panel_orientation_data asus_t100ha = {
.width = 800,
.height = 1280,
@@ -114,13 +108,19 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"),
},
- .driver_data = (void *)&acer_s1003,
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* Asus T100HA */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
},
.driver_data = (void *)&asus_t100ha,
+ }, { /* Asus T101HA */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
index 619f81435c1b..58b89ec11b0e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
@@ -61,7 +61,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev, void **dma_priv)
{
struct exynos_drm_private *priv = drm_dev->dev_private;
- int ret;
+ int ret = 0;
if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
@@ -92,7 +92,7 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
if (ret)
clear_dma_max_seg_size(subdrv_dev);
- return 0;
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index fcee33a43aca..03be31427181 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1498,7 +1498,6 @@ static int g2d_probe(struct platform_device *pdev)
g2d->irq = platform_get_irq(pdev, 0);
if (g2d->irq < 0) {
- dev_err(dev, "failed to get irq\n");
ret = g2d->irq;
goto err_put_clk;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index a86abc173605..3821ea76a703 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -269,8 +269,10 @@ static void mic_pre_enable(struct drm_bridge *bridge)
goto unlock;
ret = pm_runtime_get_sync(mic->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(mic->dev);
goto unlock;
+ }
mic_set_path(mic, 1);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index a6fd0c29e5b8..544b9993c99e 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -307,8 +307,6 @@ static int hibmc_load(struct drm_device *dev)
/* reset all the states of crtc/plane/encoder/connector */
drm_mode_config_reset(dev);
- drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
-
return 0;
err:
@@ -355,6 +353,9 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
ret);
goto err_unload;
}
+
+ drm_fbdev_generic_setup(dev, dev->mode_config.preferred_depth);
+
return 0;
err_unload:
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index aa22465bb56e..0575a1eea2a1 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -2579,14 +2579,14 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
static void
tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
- u32 level)
+ u32 level, enum intel_output_type type)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations;
u32 n_entries, val, ln, dpcnt_mask, dpcnt_val;
- if (encoder->type == INTEL_OUTPUT_HDMI) {
+ if (type == INTEL_OUTPUT_HDMI) {
n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
ddi_translations = tgl_dkl_phy_hdmi_ddi_trans;
} else {
@@ -2638,7 +2638,7 @@ static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
if (intel_phy_is_combo(dev_priv, phy))
icl_combo_phy_ddi_vswing_sequence(encoder, level, type);
else
- tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level);
+ tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level, type);
}
static u32 translate_signal_level(struct intel_dp *intel_dp, int signal_levels)
@@ -2987,7 +2987,7 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port));
}
- ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE);
+ ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE);
/* DPPATC */
@@ -3472,7 +3472,9 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
INTEL_OUTPUT_DP_MST);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- intel_dp_set_infoframes(encoder, false, old_crtc_state, old_conn_state);
+ if (!is_mst)
+ intel_dp_set_infoframes(encoder, false,
+ old_crtc_state, old_conn_state);
/*
* Power down sink before disabling the port, otherwise we end
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 9ea1a397d1b5..26996e1839e2 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -3822,6 +3822,17 @@ skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
return true;
}
+unsigned int
+intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
+{
+ int x = 0, y = 0;
+
+ intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
+ plane_state->color_plane[0].offset, 0);
+
+ return y;
+}
+
static int skl_check_main_surface(struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index efb4da205ea2..3a06f72c9859 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -608,6 +608,7 @@ unsigned int i9xx_plane_max_stride(struct intel_plane *plane,
u32 pixel_format, u64 modifier,
unsigned int rotation);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
+unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index d18b406f2a7d..f29e51ce489c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -397,6 +397,14 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
*/
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port,
false);
+
+ /*
+ * BSpec 4287: disable DIP after the transcoder is disabled and before
+ * the transcoder clock select is set to none.
+ */
+ if (last_mst_stream)
+ intel_dp_set_infoframes(&intel_dig_port->base, false,
+ old_crtc_state, NULL);
/*
* From TGL spec: "If multi-stream slave transcoder: Configure
* Transcoder Clock Select to direct no clock to the transcoder"
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 1c26673acb2d..412572f88b67 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -48,19 +48,6 @@
#include "intel_frontbuffer.h"
/*
- * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
- * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
- * origin so the x and y offsets can actually fit the registers. As a
- * consequence, the fence doesn't really start exactly at the display plane
- * address we program because it starts at the real start of the buffer, so we
- * have to take this into consideration here.
- */
-static unsigned int get_crtc_fence_y_offset(struct intel_fbc *fbc)
-{
- return fbc->state_cache.plane.y - fbc->state_cache.plane.adjusted_y;
-}
-
-/*
* For SKL+, the plane source size used by the hardware is based on the value we
* write to the PLANE_SIZE register. For BDW-, the hardware looks at the value
* we wrote to PIPESRC.
@@ -141,7 +128,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
fbc_ctl2 |= FBC_CTL_CPU_FENCE;
intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2);
intel_de_write(dev_priv, FBC_FENCE_OFF,
- params->crtc.fence_y_offset);
+ params->fence_y_offset);
}
/* enable it... */
@@ -175,7 +162,7 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
if (params->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
intel_de_write(dev_priv, DPFC_FENCE_YOFF,
- params->crtc.fence_y_offset);
+ params->fence_y_offset);
} else {
intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0);
}
@@ -243,7 +230,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | params->fence_id);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
- params->crtc.fence_y_offset);
+ params->fence_y_offset);
}
} else {
if (IS_GEN(dev_priv, 6)) {
@@ -253,7 +240,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
}
intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF,
- params->crtc.fence_y_offset);
+ params->fence_y_offset);
/* enable it... */
intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -320,7 +307,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
SNB_CPU_FENCE_ENABLE | params->fence_id);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
- params->crtc.fence_y_offset);
+ params->fence_y_offset);
} else if (dev_priv->ggtt.num_fences) {
intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
@@ -631,8 +618,8 @@ static bool rotation_is_valid(struct drm_i915_private *dev_priv,
/*
* For some reason, the hardware tracking starts looking at whatever we
* programmed as the display plane base address register. It does not look at
- * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y}
- * variables instead of just looking at the pipe/plane size.
+ * the X and Y offset registers. That's why we include the src x/y offsets
+ * instead of just looking at the plane size.
*/
static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
{
@@ -705,7 +692,6 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->plane.src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
cache->plane.adjusted_x = plane_state->color_plane[0].x;
cache->plane.adjusted_y = plane_state->color_plane[0].y;
- cache->plane.y = plane_state->uapi.src.y1 >> 16;
cache->plane.pixel_blend_mode = plane_state->hw.pixel_blend_mode;
@@ -713,6 +699,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.stride = fb->pitches[0];
cache->fb.modifier = fb->modifier;
+ cache->fence_y_offset = intel_plane_fence_y_offset(plane_state);
+
drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
!plane_state->vma->fence);
@@ -731,6 +719,25 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
fbc->compressed_fb.size * fbc->threshold;
}
+static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ struct intel_fbc_state_cache *cache = &fbc->state_cache;
+
+ if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
+ cache->fb.modifier != I915_FORMAT_MOD_X_TILED)
+ return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
+ else
+ return 0;
+}
+
+static bool intel_fbc_gen9_wa_cfb_stride_changed(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ return fbc->params.gen9_wa_cfb_stride != intel_fbc_gen9_wa_cfb_stride(dev_priv);
+}
+
static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
@@ -883,12 +890,13 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
memset(params, 0, sizeof(*params));
params->fence_id = cache->fence_id;
+ params->fence_y_offset = cache->fence_y_offset;
params->crtc.pipe = crtc->pipe;
params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
- params->crtc.fence_y_offset = get_crtc_fence_y_offset(fbc);
params->fb.format = cache->fb.format;
+ params->fb.modifier = cache->fb.modifier;
params->fb.stride = cache->fb.stride;
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
@@ -918,6 +926,9 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
if (params->fb.format != cache->fb.format)
return false;
+ if (params->fb.modifier != cache->fb.modifier)
+ return false;
+
if (params->fb.stride != cache->fb.stride)
return false;
@@ -1197,7 +1208,8 @@ void intel_fbc_enable(struct intel_atomic_state *state,
if (fbc->crtc) {
if (fbc->crtc != crtc ||
- !intel_fbc_cfb_size_changed(dev_priv))
+ (!intel_fbc_cfb_size_changed(dev_priv) &&
+ !intel_fbc_gen9_wa_cfb_stride_changed(dev_priv)))
goto out;
__intel_fbc_disable(dev_priv);
@@ -1219,12 +1231,7 @@ void intel_fbc_enable(struct intel_atomic_state *state,
goto out;
}
- if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
- plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED)
- cache->gen9_wa_cfb_stride =
- DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
- else
- cache->gen9_wa_cfb_stride = 0;
+ cache->gen9_wa_cfb_stride = intel_fbc_gen9_wa_cfb_stride(dev_priv);
drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n",
pipe_name(crtc->pipe));
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 010f37240710..95b6d9457910 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -2867,19 +2867,13 @@ intel_hdmi_connector_register(struct drm_connector *connector)
return ret;
}
-static void intel_hdmi_destroy(struct drm_connector *connector)
+static void intel_hdmi_connector_unregister(struct drm_connector *connector)
{
struct cec_notifier *n = intel_attached_hdmi(to_intel_connector(connector))->cec_notifier;
cec_notifier_conn_unregister(n);
- intel_connector_destroy(connector);
-}
-
-static void intel_hdmi_connector_unregister(struct drm_connector *connector)
-{
intel_hdmi_remove_i2c_symlink(connector);
-
intel_connector_unregister(connector);
}
@@ -2891,7 +2885,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.atomic_set_property = intel_digital_connector_atomic_set_property,
.late_register = intel_hdmi_connector_register,
.early_unregister = intel_hdmi_connector_unregister,
- .destroy = intel_hdmi_destroy,
+ .destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index e4aece20bc80..52db2bde44a3 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -204,25 +204,25 @@ static int __ring_active(struct intel_ring *ring)
{
int err;
- err = i915_active_acquire(&ring->vma->active);
+ err = intel_ring_pin(ring);
if (err)
return err;
- err = intel_ring_pin(ring);
+ err = i915_active_acquire(&ring->vma->active);
if (err)
- goto err_active;
+ goto err_pin;
return 0;
-err_active:
- i915_active_release(&ring->vma->active);
+err_pin:
+ intel_ring_unpin(ring);
return err;
}
static void __ring_retire(struct intel_ring *ring)
{
- intel_ring_unpin(ring);
i915_active_release(&ring->vma->active);
+ intel_ring_unpin(ring);
}
__i915_active_call
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index da5b61085257..8691eb61e185 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -646,7 +646,7 @@ static int engine_setup_common(struct intel_engine_cs *engine)
struct measure_breadcrumb {
struct i915_request rq;
struct intel_ring ring;
- u32 cs[1024];
+ u32 cs[2048];
};
static int measure_breadcrumb_dw(struct intel_context *ce)
@@ -668,6 +668,8 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
+ frame->ring.wrap =
+ BITS_PER_TYPE(frame->ring.size) - ilog2(frame->ring.size);
frame->ring.effective_size = frame->ring.size;
intel_ring_update_space(&frame->ring);
frame->rq.ring = &frame->ring;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 87e6c5bdd2dc..cb07e1d2a353 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1134,6 +1134,13 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
list_move(&rq->sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ /* Check in case we rollback so far we wrap [size/2] */
+ if (intel_ring_direction(rq->ring,
+ intel_ring_wrap(rq->ring,
+ rq->tail),
+ rq->ring->tail) > 0)
+ rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
+
active = rq;
} else {
struct intel_engine_cs *owner = rq->context->engine;
@@ -1498,8 +1505,9 @@ static u64 execlists_update_context(struct i915_request *rq)
* HW has a tendency to ignore us rewinding the TAIL to the end of
* an earlier request.
*/
+ GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
+ prev = rq->ring->tail;
tail = intel_ring_set_tail(rq->ring, rq->tail);
- prev = ce->lrc_reg_state[CTX_RING_TAIL];
if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
desc |= CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state[CTX_RING_TAIL] = tail;
@@ -1895,7 +1903,8 @@ static void defer_active(struct intel_engine_cs *engine)
static bool
need_timeslice(const struct intel_engine_cs *engine,
- const struct i915_request *rq)
+ const struct i915_request *rq,
+ const struct rb_node *rb)
{
int hint;
@@ -1903,9 +1912,28 @@ need_timeslice(const struct intel_engine_cs *engine,
return false;
hint = engine->execlists.queue_priority_hint;
+
+ if (rb) {
+ const struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ const struct intel_engine_cs *inflight =
+ intel_context_inflight(&ve->context);
+
+ if (!inflight || inflight == engine) {
+ struct i915_request *next;
+
+ rcu_read_lock();
+ next = READ_ONCE(ve->request);
+ if (next)
+ hint = max(hint, rq_prio(next));
+ rcu_read_unlock();
+ }
+ }
+
if (!list_is_last(&rq->sched.link, &engine->active.requests))
hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
+ GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE);
return hint >= effective_prio(rq);
}
@@ -1977,10 +2005,9 @@ static void set_timeslice(struct intel_engine_cs *engine)
set_timer_ms(&engine->execlists.timer, duration);
}
-static void start_timeslice(struct intel_engine_cs *engine)
+static void start_timeslice(struct intel_engine_cs *engine, int prio)
{
struct intel_engine_execlists *execlists = &engine->execlists;
- const int prio = queue_prio(execlists);
unsigned long duration;
if (!intel_engine_has_timeslices(engine))
@@ -2140,7 +2167,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
__unwind_incomplete_requests(engine);
last = NULL;
- } else if (need_timeslice(engine, last) &&
+ } else if (need_timeslice(engine, last, rb) &&
timeslice_expired(execlists, last)) {
if (i915_request_completed(last)) {
tasklet_hi_schedule(&execlists->tasklet);
@@ -2188,7 +2215,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
*/
- start_timeslice(engine);
+ start_timeslice(engine, queue_prio(execlists));
return;
}
}
@@ -2223,7 +2250,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock);
- start_timeslice(engine);
+ start_timeslice(engine, rq_prio(rq));
return; /* leave this for another sibling */
}
@@ -4739,6 +4766,14 @@ static int gen12_emit_flush(struct i915_request *request, u32 mode)
return 0;
}
+static void assert_request_valid(struct i915_request *rq)
+{
+ struct intel_ring *ring __maybe_unused = rq->ring;
+
+ /* Can we unwind this request without appearing to go forwards? */
+ GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
+}
+
/*
* Reserve space for 2 NOOPs at the end of each request to be
* used as a workaround for not being allowed to do lite
@@ -4751,6 +4786,9 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
*cs++ = MI_NOOP;
request->wa_tail = intel_ring_offset(request, cs);
+ /* Check that entire request is less than half the ring */
+ assert_request_valid(request);
+
return cs;
}
@@ -5358,13 +5396,8 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve)
* typically be the first we inspect for submission.
*/
swp = prandom_u32_max(ve->num_siblings);
- if (!swp)
- return;
-
- swap(ve->siblings[swp], ve->siblings[0]);
- if (!intel_engine_has_relative_mmio(ve->siblings[0]))
- virtual_update_register_offsets(ve->context.lrc_reg_state,
- ve->siblings[0]);
+ if (swp)
+ swap(ve->siblings[swp], ve->siblings[0]);
}
static int virtual_context_alloc(struct intel_context *ce)
@@ -5377,15 +5410,9 @@ static int virtual_context_alloc(struct intel_context *ce)
static int virtual_context_pin(struct intel_context *ce)
{
struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
- int err;
/* Note: we must use a real engine class for setting up reg state */
- err = __execlists_context_pin(ce, ve->siblings[0]);
- if (err)
- return err;
-
- virtual_engine_initial_hint(ve);
- return 0;
+ return __execlists_context_pin(ce, ve->siblings[0]);
}
static void virtual_context_enter(struct intel_context *ce)
@@ -5650,6 +5677,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
intel_engine_init_breadcrumbs(&ve->base);
intel_engine_init_execlists(&ve->base);
+ ve->base.breadcrumbs.irq_armed = true; /* fake HW, used for irq_work */
ve->base.cops = &virtual_context_ops;
ve->base.request_alloc = execlists_request_alloc;
@@ -5731,6 +5759,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
+ virtual_engine_initial_hint(ve);
return &ve->context;
err_put:
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 8cda1b7e17ba..bdb324167ef3 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -315,3 +315,7 @@ int intel_ring_cacheline_align(struct i915_request *rq)
GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
return 0;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_ring.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 90a2b9e399b0..85d2bef51524 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -179,6 +179,12 @@ wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
}
static void
+wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
+{
+ wa_write_masked_or(wal, reg, clr, 0);
+}
+
+static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val);
@@ -687,6 +693,227 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
}
static void
+gen4_gt_workarounds_init(struct drm_i915_private *i915,
+ struct i915_wa_list *wal)
+{
+ /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
+ wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
+}
+
+static void
+g4x_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+ gen4_gt_workarounds_init(i915, wal);
+
+ /* WaDisableRenderCachePipelinedFlush:g4x,ilk */
+ wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
+}
+
+static void
+ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+ g4x_gt_workarounds_init(i915, wal);
+
+ wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
+}
+
+static void
+snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+ /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
+ wa_masked_en(wal,
+ _3D_CHICKEN,
+ _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
+
+ /* WaDisable_RenderCache_OperationalFlush:snb */
+ wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_add(wal,
+ GEN6_GT_MODE, 0,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
+ GEN6_WIZ_HASHING_16x4);
+
+ wa_masked_dis(wal, CACHE_MODE_0, CM0_STC_EVICT_DISABLE_LRA_SNB);
+
+ wa_masked_en(wal,
+ _3D_CHICKEN3,
+ /* WaStripsFansDisableFastClipPerformanceFix:snb */
+ _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
+ /*
+ * Bspec says:
+ * "This bit must be set if 3DSTATE_CLIP clip mode is set
+ * to normal and 3DSTATE_SF number of SF output attributes
+ * is more than 16."
+ */
+ _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
+}
+
+static void
+ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+ /* WaDisableEarlyCull:ivb */
+ wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+ /* WaDisablePSDDualDispatchEnable:ivb */
+ if (IS_IVB_GT1(i915))
+ wa_masked_en(wal,
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+
+ /* WaDisable_RenderCache_OperationalFlush:ivb */
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
+ wa_masked_dis(wal,
+ GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode:ivb */
+ wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
+ wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
+
+ /* WaForceL3Serialization:ivb */
+ wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
+ wa_write_masked_or(wal, GEN7_FF_THREAD_MODE,
+ GEN7_FF_SCHED_MASK,
+ GEN7_FF_TS_SCHED_HW |
+ GEN7_FF_VS_SCHED_HW |
+ GEN7_FF_DS_SCHED_HW);
+
+ if (0) { /* causes HiZ corruption on ivb:gt1 */
+ /* enable HiZ Raw Stall Optimization */
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
+ }
+
+ /* WaDisable4x2SubspanOptimization:ivb */
+ wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_add(wal, GEN7_GT_MODE, 0,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
+ GEN6_WIZ_HASHING_16x4);
+}
+
+static void
+vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+ /* WaDisableEarlyCull:vlv */
+ wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
+
+ /* WaPsdDispatchEnable:vlv */
+ /* WaDisablePSDDualDispatchEnable:vlv */
+ wa_masked_en(wal,
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_MAX_PS_THREAD_DEP |
+ GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
+
+ /* WaDisable_RenderCache_OperationalFlush:vlv */
+ wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
+
+ /* WaForceL3Serialization:vlv */
+ wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
+
+ /*
+ * WaVSThreadDispatchOverride:ivb,vlv
+ *
+ * This actually overrides the dispatch
+ * mode for all thread types.
+ */
+ wa_write_masked_or(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN7_FF_SCHED_MASK,
+ GEN7_FF_TS_SCHED_HW |
+ GEN7_FF_VS_SCHED_HW |
+ GEN7_FF_DS_SCHED_HW);
+
+ /*
+ * BSpec says this must be set, even though
+ * WaDisable4x2SubspanOptimization isn't listed for VLV.
+ */
+ wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_add(wal, GEN7_GT_MODE, 0,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
+ GEN6_WIZ_HASHING_16x4);
+
+ /*
+ * WaIncreaseL3CreditsForVLVB0:vlv
+ * This is the hardware default actually.
+ */
+ wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
+}
+
+static void
+hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+{
+ /* L3 caching of data atomics doesn't work -- disable it. */
+ wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
+
+ wa_add(wal,
+ HSW_ROW_CHICKEN3, 0,
+ _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
+ 0 /* XXX does this reg exist? */);
+
+ /* WaVSRefCountFullforceMissDisable:hsw */
+ wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
+
+ wa_masked_dis(wal,
+ CACHE_MODE_0_GEN7,
+ /* WaDisable_RenderCache_OperationalFlush:hsw */
+ RC_OP_FLUSH_ENABLE |
+ /* enable HiZ Raw Stall Optimization */
+ HIZ_RAW_STALL_OPT_DISABLE);
+
+ /* WaDisable4x2SubspanOptimization:hsw */
+ wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
+
+ /*
+ * BSpec recommends 8x4 when MSAA is used,
+ * however in practice 16x4 seems fastest.
+ *
+ * Note that PS/WM thread counts depend on the WIZ hashing
+ * disable bit, which we don't touch here, but it's good
+ * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
+ */
+ wa_add(wal, GEN7_GT_MODE, 0,
+ _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4),
+ GEN6_WIZ_HASHING_16x4);
+
+ /* WaSampleCChickenBitEnable:hsw */
+ wa_masked_en(wal, HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
+}
+
+static void
gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{
/* WaDisableKillLogic:bxt,skl,kbl */
@@ -963,6 +1190,20 @@ gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
bxt_gt_workarounds_init(i915, wal);
else if (IS_SKYLAKE(i915))
skl_gt_workarounds_init(i915, wal);
+ else if (IS_HASWELL(i915))
+ hsw_gt_workarounds_init(i915, wal);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_gt_workarounds_init(i915, wal);
+ else if (IS_IVYBRIDGE(i915))
+ ivb_gt_workarounds_init(i915, wal);
+ else if (IS_GEN(i915, 6))
+ snb_gt_workarounds_init(i915, wal);
+ else if (IS_GEN(i915, 5))
+ ilk_gt_workarounds_init(i915, wal);
+ else if (IS_G4X(i915))
+ g4x_gt_workarounds_init(i915, wal);
+ else if (IS_GEN(i915, 4))
+ gen4_gt_workarounds_init(i915, wal);
else if (INTEL_GEN(i915) <= 8)
return;
else
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 2b2efff6e19d..4aa4cc917d8b 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -310,22 +310,20 @@ static bool wait_until_running(struct hang *h, struct i915_request *rq)
1000));
}
-static void engine_heartbeat_disable(struct intel_engine_cs *engine,
- unsigned long *saved)
+static void engine_heartbeat_disable(struct intel_engine_cs *engine)
{
- *saved = engine->props.heartbeat_interval_ms;
engine->props.heartbeat_interval_ms = 0;
intel_engine_pm_get(engine);
intel_engine_park_heartbeat(engine);
}
-static void engine_heartbeat_enable(struct intel_engine_cs *engine,
- unsigned long saved)
+static void engine_heartbeat_enable(struct intel_engine_cs *engine)
{
intel_engine_pm_put(engine);
- engine->props.heartbeat_interval_ms = saved;
+ engine->props.heartbeat_interval_ms =
+ engine->defaults.heartbeat_interval_ms;
}
static int igt_hang_sanitycheck(void *arg)
@@ -473,7 +471,6 @@ static int igt_reset_nop_engine(void *arg)
for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count, count;
struct intel_context *ce;
- unsigned long heartbeat;
IGT_TIMEOUT(end_time);
int err;
@@ -485,7 +482,7 @@ static int igt_reset_nop_engine(void *arg)
reset_engine_count = i915_reset_engine_count(global, engine);
count = 0;
- engine_heartbeat_disable(engine, &heartbeat);
+ engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
int i;
@@ -529,7 +526,7 @@ static int igt_reset_nop_engine(void *arg)
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- engine_heartbeat_enable(engine, heartbeat);
+ engine_heartbeat_enable(engine);
pr_info("%s(%s): %d resets\n", __func__, engine->name, count);
@@ -564,7 +561,6 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
for_each_engine(engine, gt, id) {
unsigned int reset_count, reset_engine_count;
- unsigned long heartbeat;
IGT_TIMEOUT(end_time);
if (active && !intel_engine_can_store_dword(engine))
@@ -580,7 +576,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
reset_count = i915_reset_count(global);
reset_engine_count = i915_reset_engine_count(global, engine);
- engine_heartbeat_disable(engine, &heartbeat);
+ engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
if (active) {
@@ -632,7 +628,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- engine_heartbeat_enable(engine, heartbeat);
+ engine_heartbeat_enable(engine);
if (err)
break;
@@ -789,7 +785,6 @@ static int __igt_reset_engines(struct intel_gt *gt,
struct active_engine threads[I915_NUM_ENGINES] = {};
unsigned long device = i915_reset_count(global);
unsigned long count = 0, reported;
- unsigned long heartbeat;
IGT_TIMEOUT(end_time);
if (flags & TEST_ACTIVE &&
@@ -832,7 +827,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
yield(); /* start all threads before we begin */
- engine_heartbeat_disable(engine, &heartbeat);
+ engine_heartbeat_disable(engine);
set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
do {
struct i915_request *rq = NULL;
@@ -906,7 +901,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
}
} while (time_before(jiffies, end_time));
clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- engine_heartbeat_enable(engine, heartbeat);
+ engine_heartbeat_enable(engine);
pr_info("i915_reset_engine(%s:%s): %lu resets\n",
engine->name, test_name, count);
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 824f99c4cc7c..924bc01ef526 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -51,22 +51,20 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
return vma;
}
-static void engine_heartbeat_disable(struct intel_engine_cs *engine,
- unsigned long *saved)
+static void engine_heartbeat_disable(struct intel_engine_cs *engine)
{
- *saved = engine->props.heartbeat_interval_ms;
engine->props.heartbeat_interval_ms = 0;
intel_engine_pm_get(engine);
intel_engine_park_heartbeat(engine);
}
-static void engine_heartbeat_enable(struct intel_engine_cs *engine,
- unsigned long saved)
+static void engine_heartbeat_enable(struct intel_engine_cs *engine)
{
intel_engine_pm_put(engine);
- engine->props.heartbeat_interval_ms = saved;
+ engine->props.heartbeat_interval_ms =
+ engine->defaults.heartbeat_interval_ms;
}
static bool is_active(struct i915_request *rq)
@@ -224,7 +222,6 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
struct intel_context *ce[2] = {};
struct i915_request *rq[2];
struct igt_live_test t;
- unsigned long saved;
int n;
if (prio && !intel_engine_has_preemption(engine))
@@ -237,7 +234,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
err = -EIO;
break;
}
- engine_heartbeat_disable(engine, &saved);
+ engine_heartbeat_disable(engine);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
struct intel_context *tmp;
@@ -345,7 +342,7 @@ err_ce:
intel_context_put(ce[n]);
}
- engine_heartbeat_enable(engine, saved);
+ engine_heartbeat_enable(engine);
if (igt_live_test_end(&t))
err = -EIO;
if (err)
@@ -466,7 +463,6 @@ static int live_hold_reset(void *arg)
for_each_engine(engine, gt, id) {
struct intel_context *ce;
- unsigned long heartbeat;
struct i915_request *rq;
ce = intel_context_create(engine);
@@ -475,7 +471,7 @@ static int live_hold_reset(void *arg)
break;
}
- engine_heartbeat_disable(engine, &heartbeat);
+ engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -535,7 +531,7 @@ static int live_hold_reset(void *arg)
i915_request_put(rq);
out:
- engine_heartbeat_enable(engine, heartbeat);
+ engine_heartbeat_enable(engine);
intel_context_put(ce);
if (err)
break;
@@ -580,10 +576,9 @@ static int live_error_interrupt(void *arg)
for_each_engine(engine, gt, id) {
const struct error_phase *p;
- unsigned long heartbeat;
int err = 0;
- engine_heartbeat_disable(engine, &heartbeat);
+ engine_heartbeat_disable(engine);
for (p = phases; p->error[0] != GOOD; p++) {
struct i915_request *client[ARRAY_SIZE(phases->error)];
@@ -682,7 +677,7 @@ out:
}
}
- engine_heartbeat_enable(engine, heartbeat);
+ engine_heartbeat_enable(engine);
if (err) {
intel_gt_set_wedged(gt);
return err;
@@ -828,7 +823,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer,
}
}
- err = release_queue(outer, vma, n, INT_MAX);
+ err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER);
if (err)
goto out;
@@ -895,16 +890,14 @@ static int live_timeslice_preempt(void *arg)
enum intel_engine_id id;
for_each_engine(engine, gt, id) {
- unsigned long saved;
-
if (!intel_engine_has_preemption(engine))
continue;
memset(vaddr, 0, PAGE_SIZE);
- engine_heartbeat_disable(engine, &saved);
+ engine_heartbeat_disable(engine);
err = slice_semaphore_queue(engine, vma, count);
- engine_heartbeat_enable(engine, saved);
+ engine_heartbeat_enable(engine);
if (err)
goto err_pin;
@@ -1009,7 +1002,6 @@ static int live_timeslice_rewind(void *arg)
enum { X = 1, Z, Y };
struct i915_request *rq[3] = {};
struct intel_context *ce;
- unsigned long heartbeat;
unsigned long timeslice;
int i, err = 0;
u32 *slot;
@@ -1028,7 +1020,7 @@ static int live_timeslice_rewind(void *arg)
* Expect execution/evaluation order XZY
*/
- engine_heartbeat_disable(engine, &heartbeat);
+ engine_heartbeat_disable(engine);
timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
slot = memset32(engine->status_page.addr + 1000, 0, 4);
@@ -1122,7 +1114,7 @@ err:
wmb();
engine->props.timeslice_duration_ms = timeslice;
- engine_heartbeat_enable(engine, heartbeat);
+ engine_heartbeat_enable(engine);
for (i = 0; i < 3; i++)
i915_request_put(rq[i]);
if (igt_flush_test(gt->i915))
@@ -1202,12 +1194,11 @@ static int live_timeslice_queue(void *arg)
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
};
struct i915_request *rq, *nop;
- unsigned long saved;
if (!intel_engine_has_preemption(engine))
continue;
- engine_heartbeat_disable(engine, &saved);
+ engine_heartbeat_disable(engine);
memset(vaddr, 0, PAGE_SIZE);
/* ELSP[0]: semaphore wait */
@@ -1284,7 +1275,7 @@ static int live_timeslice_queue(void *arg)
err_rq:
i915_request_put(rq);
err_heartbeat:
- engine_heartbeat_enable(engine, saved);
+ engine_heartbeat_enable(engine);
if (err)
break;
}
@@ -1298,6 +1289,121 @@ err_obj:
return err;
}
+static int live_timeslice_nopreempt(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * We should not timeslice into a request that is marked with
+ * I915_REQUEST_NOPREEMPT.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ unsigned long timeslice;
+
+ if (!intel_engine_has_preemption(engine))
+ continue;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ engine_heartbeat_disable(engine);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ /* Create an unpreemptible spinner */
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto out_spin;
+ }
+
+ set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
+ i915_request_put(rq);
+
+ /* Followed by a maximum priority barrier (heartbeat) */
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(rq);
+ goto out_spin;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_spin;
+ }
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ /*
+ * Wait until the barrier is in ELSP, and we know timeslicing
+ * will have been activated.
+ */
+ if (wait_for_submit(engine, rq, HZ / 2)) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto out_spin;
+ }
+
+ /*
+ * Since the ELSP[0] request is unpreemptible, it should not
+ * allow the maximum priority barrier through. Wait long
+ * enough to see if it is timesliced in by mistake.
+ */
+ if (i915_request_wait(rq, 0, timeslice_threshold(engine)) >= 0) {
+ pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n",
+ engine->name);
+ err = -EINVAL;
+ }
+ i915_request_put(rq);
+
+out_spin:
+ igt_spinner_end(&spin);
+out_heartbeat:
+ xchg(&engine->props.timeslice_duration_ms, timeslice);
+ engine_heartbeat_enable(engine);
+ if (err)
+ break;
+
+ if (igt_flush_test(gt->i915)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
static int live_busywait_preempt(void *arg)
{
struct intel_gt *gt = arg;
@@ -4153,7 +4259,6 @@ static int reset_virtual_engine(struct intel_gt *gt,
{
struct intel_engine_cs *engine;
struct intel_context *ve;
- unsigned long *heartbeat;
struct igt_spinner spin;
struct i915_request *rq;
unsigned int n;
@@ -4165,15 +4270,9 @@ static int reset_virtual_engine(struct intel_gt *gt,
* descendents are not executed while the capture is in progress.
*/
- heartbeat = kmalloc_array(nsibling, sizeof(*heartbeat), GFP_KERNEL);
- if (!heartbeat)
+ if (igt_spinner_init(&spin, gt))
return -ENOMEM;
- if (igt_spinner_init(&spin, gt)) {
- err = -ENOMEM;
- goto out_free;
- }
-
ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
@@ -4181,7 +4280,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
}
for (n = 0; n < nsibling; n++)
- engine_heartbeat_disable(siblings[n], &heartbeat[n]);
+ engine_heartbeat_disable(siblings[n]);
rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
if (IS_ERR(rq)) {
@@ -4252,13 +4351,11 @@ out_rq:
i915_request_put(rq);
out_heartbeat:
for (n = 0; n < nsibling; n++)
- engine_heartbeat_enable(siblings[n], heartbeat[n]);
+ engine_heartbeat_enable(siblings[n]);
intel_context_put(ve);
out_spin:
igt_spinner_fini(&spin);
-out_free:
- kfree(heartbeat);
return err;
}
@@ -4314,6 +4411,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_timeslice_preempt),
SUBTEST(live_timeslice_rewind),
SUBTEST(live_timeslice_queue),
+ SUBTEST(live_timeslice_nopreempt),
SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
SUBTEST(live_late_preempt),
@@ -4932,9 +5030,7 @@ static int live_lrc_gpr(void *arg)
return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
- unsigned long heartbeat;
-
- engine_heartbeat_disable(engine, &heartbeat);
+ engine_heartbeat_disable(engine);
err = __live_lrc_gpr(engine, scratch, false);
if (err)
@@ -4945,7 +5041,7 @@ static int live_lrc_gpr(void *arg)
goto err;
err:
- engine_heartbeat_enable(engine, heartbeat);
+ engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
@@ -5092,10 +5188,9 @@ static int live_lrc_timestamp(void *arg)
*/
for_each_engine(data.engine, gt, id) {
- unsigned long heartbeat;
int i, err = 0;
- engine_heartbeat_disable(data.engine, &heartbeat);
+ engine_heartbeat_disable(data.engine);
for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
struct intel_context *tmp;
@@ -5128,7 +5223,7 @@ static int live_lrc_timestamp(void *arg)
}
err:
- engine_heartbeat_enable(data.engine, heartbeat);
+ engine_heartbeat_enable(data.engine);
for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
if (!data.ce[i])
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index 8831ffee2061..63f87d8608c3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -18,6 +18,20 @@ struct live_mocs {
void *vaddr;
};
+static struct intel_context *mocs_context_create(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return ce;
+
+ /* We build large requests to read the registers from the ring */
+ ce->ring = __intel_context_ring_size(SZ_16K);
+
+ return ce;
+}
+
static int request_add_sync(struct i915_request *rq, int err)
{
i915_request_get(rq);
@@ -301,7 +315,7 @@ static int live_mocs_clean(void *arg)
for_each_engine(engine, gt, id) {
struct intel_context *ce;
- ce = intel_context_create(engine);
+ ce = mocs_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
break;
@@ -395,7 +409,7 @@ static int live_mocs_reset(void *arg)
for_each_engine(engine, gt, id) {
struct intel_context *ce;
- ce = intel_context_create(engine);
+ ce = mocs_context_create(engine);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring.c b/drivers/gpu/drm/i915/gt/selftest_ring.c
new file mode 100644
index 000000000000..2a8c534dc125
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_ring.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+static struct intel_ring *mock_ring(unsigned long sz)
+{
+ struct intel_ring *ring;
+
+ ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
+ if (!ring)
+ return NULL;
+
+ kref_init(&ring->ref);
+ ring->size = sz;
+ ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(sz);
+ ring->effective_size = sz;
+ ring->vaddr = (void *)(ring + 1);
+ atomic_set(&ring->pin_count, 1);
+
+ intel_ring_update_space(ring);
+
+ return ring;
+}
+
+static void mock_ring_free(struct intel_ring *ring)
+{
+ kfree(ring);
+}
+
+static int check_ring_direction(struct intel_ring *ring,
+ u32 next, u32 prev,
+ int expected)
+{
+ int result;
+
+ result = intel_ring_direction(ring, next, prev);
+ if (result < 0)
+ result = -1;
+ else if (result > 0)
+ result = 1;
+
+ if (result != expected) {
+ pr_err("intel_ring_direction(%u, %u):%d != %d\n",
+ next, prev, result, expected);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int check_ring_step(struct intel_ring *ring, u32 x, u32 step)
+{
+ u32 prev = x, next = intel_ring_wrap(ring, x + step);
+ int err = 0;
+
+ err |= check_ring_direction(ring, next, next, 0);
+ err |= check_ring_direction(ring, prev, prev, 0);
+ err |= check_ring_direction(ring, next, prev, 1);
+ err |= check_ring_direction(ring, prev, next, -1);
+
+ return err;
+}
+
+static int check_ring_offset(struct intel_ring *ring, u32 x, u32 step)
+{
+ int err = 0;
+
+ err |= check_ring_step(ring, x, step);
+ err |= check_ring_step(ring, intel_ring_wrap(ring, x + 1), step);
+ err |= check_ring_step(ring, intel_ring_wrap(ring, x - 1), step);
+
+ return err;
+}
+
+static int igt_ring_direction(void *dummy)
+{
+ struct intel_ring *ring;
+ unsigned int half = 2048;
+ int step, err = 0;
+
+ ring = mock_ring(2 * half);
+ if (!ring)
+ return -ENOMEM;
+
+ GEM_BUG_ON(ring->size != 2 * half);
+
+ /* Precision of wrap detection is limited to ring->size / 2 */
+ for (step = 1; step < half; step <<= 1) {
+ err |= check_ring_offset(ring, 0, step);
+ err |= check_ring_offset(ring, half, step);
+ }
+ err |= check_ring_step(ring, 0, half - 64);
+
+ /* And check unwrapped handling for good measure */
+ err |= check_ring_offset(ring, 0, 2 * half + 64);
+ err |= check_ring_offset(ring, 3 * half, 1);
+
+ mock_ring_free(ring);
+ return err;
+}
+
+int intel_ring_mock_selftests(void)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(igt_ring_direction),
+ };
+
+ return i915_subtests(tests, NULL);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index 6275d69aa9cc..c91981e75ebf 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -20,24 +20,20 @@
/* Try to isolate the impact of cstates from determing frequency response */
#define CPU_LATENCY 0 /* -1 to disable pm_qos, 0 to disable cstates */
-static unsigned long engine_heartbeat_disable(struct intel_engine_cs *engine)
+static void engine_heartbeat_disable(struct intel_engine_cs *engine)
{
- unsigned long old;
-
- old = fetch_and_zero(&engine->props.heartbeat_interval_ms);
+ engine->props.heartbeat_interval_ms = 0;
intel_engine_pm_get(engine);
intel_engine_park_heartbeat(engine);
-
- return old;
}
-static void engine_heartbeat_enable(struct intel_engine_cs *engine,
- unsigned long saved)
+static void engine_heartbeat_enable(struct intel_engine_cs *engine)
{
intel_engine_pm_put(engine);
- engine->props.heartbeat_interval_ms = saved;
+ engine->props.heartbeat_interval_ms =
+ engine->defaults.heartbeat_interval_ms;
}
static void dummy_rps_work(struct work_struct *wrk)
@@ -48,9 +44,9 @@ static int cmp_u64(const void *A, const void *B)
{
const u64 *a = A, *b = B;
- if (a < b)
+ if (*a < *b)
return -1;
- else if (a > b)
+ else if (*a > *b)
return 1;
else
return 0;
@@ -60,9 +56,9 @@ static int cmp_u32(const void *A, const void *B)
{
const u32 *a = A, *b = B;
- if (a < b)
+ if (*a < *b)
return -1;
- else if (a > b)
+ else if (*a > *b)
return 1;
else
return 0;
@@ -246,7 +242,6 @@ int live_rps_clock_interval(void *arg)
intel_gt_check_clock_frequency(gt);
for_each_engine(engine, gt, id) {
- unsigned long saved_heartbeat;
struct i915_request *rq;
u32 cycles;
u64 dt;
@@ -254,13 +249,13 @@ int live_rps_clock_interval(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- saved_heartbeat = engine_heartbeat_disable(engine);
+ engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
MI_NOOP);
if (IS_ERR(rq)) {
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
err = PTR_ERR(rq);
break;
}
@@ -271,7 +266,7 @@ int live_rps_clock_interval(void *arg)
pr_err("%s: RPS spinner did not start\n",
engine->name);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt);
err = -EIO;
break;
@@ -327,7 +322,7 @@ int live_rps_clock_interval(void *arg)
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
if (err == 0) {
u64 time = intel_gt_pm_interval_to_ns(gt, cycles);
@@ -405,7 +400,6 @@ int live_rps_control(void *arg)
intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
- unsigned long saved_heartbeat;
struct i915_request *rq;
ktime_t min_dt, max_dt;
int f, limit;
@@ -414,7 +408,7 @@ int live_rps_control(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- saved_heartbeat = engine_heartbeat_disable(engine);
+ engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
@@ -430,7 +424,7 @@ int live_rps_control(void *arg)
pr_err("%s: RPS spinner did not start\n",
engine->name);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt);
err = -EIO;
break;
@@ -440,7 +434,7 @@ int live_rps_control(void *arg)
pr_err("%s: could not set minimum frequency [%x], only %x!\n",
engine->name, rps->min_freq, read_cagf(rps));
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
show_pstate_limits(rps);
err = -EINVAL;
break;
@@ -457,7 +451,7 @@ int live_rps_control(void *arg)
pr_err("%s: could not restore minimum frequency [%x], only %x!\n",
engine->name, rps->min_freq, read_cagf(rps));
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
show_pstate_limits(rps);
err = -EINVAL;
break;
@@ -472,7 +466,7 @@ int live_rps_control(void *arg)
min_dt = ktime_sub(ktime_get(), min_dt);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
pr_info("%s: range:[%x:%uMHz, %x:%uMHz] limit:[%x:%uMHz], %x:%x response %lluns:%lluns\n",
engine->name,
@@ -635,7 +629,6 @@ int live_rps_frequency_cs(void *arg)
rps->work.func = dummy_rps_work;
for_each_engine(engine, gt, id) {
- unsigned long saved_heartbeat;
struct i915_request *rq;
struct i915_vma *vma;
u32 *cancel, *cntr;
@@ -644,14 +637,14 @@ int live_rps_frequency_cs(void *arg)
int freq;
} min, max;
- saved_heartbeat = engine_heartbeat_disable(engine);
+ engine_heartbeat_disable(engine);
vma = create_spin_counter(engine,
engine->kernel_context->vm, false,
&cancel, &cntr);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
break;
}
@@ -732,7 +725,7 @@ err_vma:
i915_vma_unpin(vma);
i915_vma_put(vma);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
@@ -778,7 +771,6 @@ int live_rps_frequency_srm(void *arg)
rps->work.func = dummy_rps_work;
for_each_engine(engine, gt, id) {
- unsigned long saved_heartbeat;
struct i915_request *rq;
struct i915_vma *vma;
u32 *cancel, *cntr;
@@ -787,14 +779,14 @@ int live_rps_frequency_srm(void *arg)
int freq;
} min, max;
- saved_heartbeat = engine_heartbeat_disable(engine);
+ engine_heartbeat_disable(engine);
vma = create_spin_counter(engine,
engine->kernel_context->vm, true,
&cancel, &cntr);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
break;
}
@@ -874,7 +866,7 @@ err_vma:
i915_vma_unpin(vma);
i915_vma_put(vma);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
if (igt_flush_test(gt->i915))
err = -EIO;
if (err)
@@ -1066,16 +1058,14 @@ int live_rps_interrupt(void *arg)
for_each_engine(engine, gt, id) {
/* Keep the engine busy with a spinner; expect an UP! */
if (pm_events & GEN6_PM_RP_UP_THRESHOLD) {
- unsigned long saved_heartbeat;
-
intel_gt_pm_wait_for_idle(engine->gt);
GEM_BUG_ON(intel_rps_is_active(rps));
- saved_heartbeat = engine_heartbeat_disable(engine);
+ engine_heartbeat_disable(engine);
err = __rps_up_interrupt(rps, engine, &spin);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
if (err)
goto out;
@@ -1084,15 +1074,13 @@ int live_rps_interrupt(void *arg)
/* Keep the engine awake but idle and check for DOWN */
if (pm_events & GEN6_PM_RP_DOWN_THRESHOLD) {
- unsigned long saved_heartbeat;
-
- saved_heartbeat = engine_heartbeat_disable(engine);
+ engine_heartbeat_disable(engine);
intel_rc6_disable(&gt->rc6);
err = __rps_down_interrupt(rps, engine);
intel_rc6_enable(&gt->rc6);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
if (err)
goto out;
}
@@ -1168,7 +1156,6 @@ int live_rps_power(void *arg)
rps->work.func = dummy_rps_work;
for_each_engine(engine, gt, id) {
- unsigned long saved_heartbeat;
struct i915_request *rq;
struct {
u64 power;
@@ -1178,13 +1165,13 @@ int live_rps_power(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
- saved_heartbeat = engine_heartbeat_disable(engine);
+ engine_heartbeat_disable(engine);
rq = igt_spinner_create_request(&spin,
engine->kernel_context,
MI_NOOP);
if (IS_ERR(rq)) {
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
err = PTR_ERR(rq);
break;
}
@@ -1195,7 +1182,7 @@ int live_rps_power(void *arg)
pr_err("%s: RPS spinner did not start\n",
engine->name);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
intel_gt_set_wedged(engine->gt);
err = -EIO;
break;
@@ -1208,7 +1195,7 @@ int live_rps_power(void *arg)
min.power = measure_power_at(rps, &min.freq);
igt_spinner_end(&spin);
- engine_heartbeat_enable(engine, saved_heartbeat);
+ engine_heartbeat_enable(engine);
pr_info("%s: min:%llumW @ %uMHz, max:%llumW @ %uMHz\n",
engine->name,
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index c2578a0f2f14..ef1c35073dc0 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -751,22 +751,20 @@ out_free:
return err;
}
-static void engine_heartbeat_disable(struct intel_engine_cs *engine,
- unsigned long *saved)
+static void engine_heartbeat_disable(struct intel_engine_cs *engine)
{
- *saved = engine->props.heartbeat_interval_ms;
engine->props.heartbeat_interval_ms = 0;
intel_engine_pm_get(engine);
intel_engine_park_heartbeat(engine);
}
-static void engine_heartbeat_enable(struct intel_engine_cs *engine,
- unsigned long saved)
+static void engine_heartbeat_enable(struct intel_engine_cs *engine)
{
intel_engine_pm_put(engine);
- engine->props.heartbeat_interval_ms = saved;
+ engine->props.heartbeat_interval_ms =
+ engine->defaults.heartbeat_interval_ms;
}
static int live_hwsp_rollover_kernel(void *arg)
@@ -785,10 +783,9 @@ static int live_hwsp_rollover_kernel(void *arg)
struct intel_context *ce = engine->kernel_context;
struct intel_timeline *tl = ce->timeline;
struct i915_request *rq[3] = {};
- unsigned long heartbeat;
int i;
- engine_heartbeat_disable(engine, &heartbeat);
+ engine_heartbeat_disable(engine);
if (intel_gt_wait_for_idle(gt, HZ / 2)) {
err = -EIO;
goto out;
@@ -839,7 +836,7 @@ static int live_hwsp_rollover_kernel(void *arg)
out:
for (i = 0; i < ARRAY_SIZE(rq); i++)
i915_request_put(rq[i]);
- engine_heartbeat_enable(engine, heartbeat);
+ engine_heartbeat_enable(engine);
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index 5ed323254ee1..32785463ec9e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -623,6 +623,8 @@ err_request:
err = -EINVAL;
goto out_unpin;
}
+ } else {
+ rsvd = 0;
}
expect = results[0];
diff --git a/drivers/gpu/drm/i915/gt/shaders/README b/drivers/gpu/drm/i915/gt/shaders/README
new file mode 100644
index 000000000000..e7e96d7073c7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shaders/README
@@ -0,0 +1,46 @@
+ASM sources for auto generated shaders
+======================================
+
+The i915/gt/hsw_clear_kernel.c and i915/gt/ivb_clear_kernel.c files contain
+pre-compiled batch chunks that will clear any residual render cache during
+context switch.
+
+They are generated from their respective platform ASM files present on
+i915/gt/shaders/clear_kernel directory.
+
+The generated .c files should never be modified directly. Instead, any modification
+needs to be done on the on their respective ASM files and build instructions below
+needes to be followed.
+
+Building
+========
+
+Environment
+-----------
+
+IGT GPU tool scripts and the Mesa's i965 instruction assembler tool are used
+on building.
+
+Please make sure your Mesa tool is compiled with "-Dtools=intel" and
+"-Ddri-drivers=i965", and run this script from IGT source root directory"
+
+The instructions bellow assume:
+ * IGT gpu tools source code is located on your home directory (~) as ~/igt
+ * Mesa source code is located on your home directory (~) as ~/mesa
+ and built under the ~/mesa/build directory
+ * Linux kernel source code is under your home directory (~) as ~/linux
+
+Instructions
+------------
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm \
+ ~/igt/lib/i915/shaders/clear_kernel/ivb.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g ivb \
+ -m ~/mesa/build/src/intel/tools/i965_asm
+
+~ $ cp ~/linux/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm \
+ ~/igt/lib/i915/shaders/clear_kernel/hsw.asm
+~ $ cd ~/igt
+igt $ ./scripts/generate_clear_kernel.sh -g hsw \
+ -m ~/mesa/build/src/intel/tools/i965_asm \ No newline at end of file
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
new file mode 100644
index 000000000000..5fdf384bb621
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/hsw.asm
@@ -0,0 +1,119 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ * 1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ * 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ * 512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ * Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ * Expected to be initialized to 0 by driver/another kernel
+ * Layout:
+ * RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ * Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
+(+f0.0) jmpi(1) 352D { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ * IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ * HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8) g3<1>UD 0x00000000UD { align1 1Q };
+shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
+and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
+shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
+and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
+mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
+add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
+and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
+mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
+add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
+
+mov(8) g5<1>UD 0x00000000UD { align1 1Q };
+and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
+mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
+
+mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
+mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
+
+ render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+
+/* Delay thread for specified parameter */
+add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
+(+f0.0) jmpi(1) -32D { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/* Initialize looping parameters */
+mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
+mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
+mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
+mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
+and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
+mov(16) g3<1>UD 0x00000000UD { align1 1H };
+mov(16) g4<1>UD 0x00000000UD { align1 1H };
+mov(16) g5<1>UD 0x00000000UD { align1 1H };
+mov(16) g6<1>UD 0x00000000UD { align1 1H };
+mov(16) g7<1>UD 0x00000000UD { align1 1H };
+mov(16) g8<1>UD 0x00000000UD { align1 1H };
+mov(16) g9<1>UD 0x00000000UD { align1 1H };
+mov(16) g10<1>UD 0x00000000UD { align1 1H };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
+mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
+add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
+(+f0.0) jmpi(1) -64D { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8) null<1>UD g127<8,8,1>F 0x82000010
+ thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
diff --git a/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
new file mode 100644
index 000000000000..97c7ac9e3854
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/shaders/clear_kernel/ivb.asm
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+/*
+ * Kernel for PAVP buffer clear.
+ *
+ * 1. Clear all 64 GRF registers assigned to the kernel with designated value;
+ * 2. Write 32x16 block of all "0" to render target buffer which indirectly clears
+ * 512 bytes of Render Cache.
+ */
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/**
+ * Curbe Format
+ *
+ * DW 1.0 - Block Offset to write Render Cache
+ * DW 1.1 [15:0] - Clear Word
+ * DW 1.2 - Delay iterations
+ * DW 1.3 - Enable Instrumentation (only for debug)
+ * DW 1.4 - Rsvd (intended for context ID)
+ * DW 1.5 - [31:16]:SliceCount, [15:0]:SubSlicePerSliceCount
+ * DW 1.6 - Rsvd MBZ (intended for Enable Wait on Total Thread Count)
+ * DW 1.7 - Rsvd MBZ (inteded for Total Thread Count)
+ *
+ * Binding Table
+ *
+ * BTI 0: 2D Surface to help clear L3 (Render/Data Cache)
+ * BTI 1: Wait/Instrumentation Buffer
+ * Size : (SliceCount * SubSliceCount * 16 EUs/SubSlice) rows * (16 threads/EU) cols (Format R32_UINT)
+ * Expected to be initialized to 0 by driver/another kernel
+ * Layout :
+ * RowN: Histogram for EU-N: (SliceID*SubSlicePerSliceCount + SSID)*16 + EUID [assume max 16 EUs / SS]
+ * Col-k[DW-k]: Threads Executed on ThreadID-k for EU-N
+ */
+add(1) g1.2<1>UD g1.2<0,1,0>UD 0x00000001UD { align1 1N }; /* Loop count to delay kernel: Init to (g1.2 + 1) */
+cmp.z.f0.0(1) null<1>UD g1.3<0,1,0>UD 0x00000000UD { align1 1N };
+(+f0.0) jmpi(1) 44D { align1 WE_all 1N };
+
+/**
+ * State Register has info on where this thread is running
+ * IVB: sr0.0 :: [15:13]: MBZ, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ * HSW: sr0.0 :: 15: MBZ, [14:13]: SliceID, 12: HSID (Half-Slice ID), [11:8]EUID, [2:0] ThreadSlotID
+ */
+mov(8) g3<1>UD 0x00000000UD { align1 1Q };
+shr(1) g3<1>D sr0<0,1,0>D 12D { align1 1N };
+and(1) g3<1>D g3<0,1,0>D 1D { align1 1N }; /* g3 has HSID */
+shr(1) g3.1<1>D sr0<0,1,0>D 13D { align1 1N };
+and(1) g3.1<1>D g3.1<0,1,0>D 3D { align1 1N }; /* g3.1 has sliceID */
+mul(1) g3.5<1>D g3.1<0,1,0>D g1.10<0,1,0>UW { align1 1N };
+add(1) g3<1>D g3<0,1,0>D g3.5<0,1,0>D { align1 1N }; /* g3 = sliceID * SubSlicePerSliceCount + HSID */
+shr(1) g3.2<1>D sr0<0,1,0>D 8D { align1 1N };
+and(1) g3.2<1>D g3.2<0,1,0>D 15D { align1 1N }; /* g3.2 = EUID */
+mul(1) g3.4<1>D g3<0,1,0>D 16D { align1 1N };
+add(1) g3.2<1>D g3.2<0,1,0>D g3.4<0,1,0>D { align1 1N }; /* g3.2 now points to EU row number (Y-pixel = V address ) in instrumentation surf */
+
+mov(8) g5<1>UD 0x00000000UD { align1 1Q };
+and(1) g3.3<1>D sr0<0,1,0>D 7D { align1 1N };
+mul(1) g3.3<1>D g3.3<0,1,0>D 4D { align1 1N };
+
+mov(8) g4<1>UD g0<8,8,1>UD { align1 1Q }; /* Initialize message header with g0 */
+mov(1) g4<1>UD g3.3<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.1<1>UD g3.2<0,1,0>UD { align1 1N }; /* Block offset */
+mov(1) g4.2<1>UD 0x00000003UD { align1 1N }; /* Block size (1 row x 4 bytes) */
+and(1) g4.3<1>UD g4.3<0,1,0>UW 0xffffffffUD { align1 1N };
+
+/* Media block read to fetch current value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x02190001
+ render MsgDesc: media block read MsgCtrl = 0x0 Surface = 1 mlen 1 rlen 1 { align1 1Q };
+add(1) g5<1>D g5<0,1,0>D 1D { align1 1N };
+
+/* Media block write for updated value at specified location in instrumentation buffer */
+sendc(8) g5<1>UD g4<8,8,1>F 0x040a8001
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 1 mlen 2 rlen 0 { align1 1Q };
+/* Delay thread for specified parameter */
+add.nz.f0.0(1) g1.2<1>UD g1.2<0,1,0>UD -1D { align1 1N };
+(+f0.0) jmpi(1) -4D { align1 WE_all 1N };
+
+/* Store designated "clear GRF" value */
+mov(1) f0.1<1>UW g1.2<0,1,0>UW { align1 1N };
+
+/* Initialize looping parameters */
+mov(1) a0<1>D 0D { align1 1N }; /* Initialize a0.0:w=0 */
+mov(1) a0.4<1>W 127W { align1 1N }; /* Loop count. Each loop contains 16 GRF's */
+
+/* Write 32x16 all "0" block */
+mov(8) g2<1>UD g0<8,8,1>UD { align1 1Q };
+mov(8) g127<1>UD g0<8,8,1>UD { align1 1Q };
+mov(2) g2<1>UD g1<2,2,1>UW { align1 1N };
+mov(1) g2.2<1>UD 0x000f000fUD { align1 1N }; /* Block size (16x16) */
+and(1) g2.3<1>UD g2.3<0,1,0>UW 0xffffffefUD { align1 1N };
+mov(16) g3<1>UD 0x00000000UD { align1 1H };
+mov(16) g4<1>UD 0x00000000UD { align1 1H };
+mov(16) g5<1>UD 0x00000000UD { align1 1H };
+mov(16) g6<1>UD 0x00000000UD { align1 1H };
+mov(16) g7<1>UD 0x00000000UD { align1 1H };
+mov(16) g8<1>UD 0x00000000UD { align1 1H };
+mov(16) g9<1>UD 0x00000000UD { align1 1H };
+mov(16) g10<1>UD 0x00000000UD { align1 1H };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+add(1) g2<1>UD g1<0,1,0>UW 0x0010UW { align1 1N };
+sendc(8) null<1>UD g2<8,8,1>F 0x120a8000
+ render MsgDesc: media block write MsgCtrl = 0x0 Surface = 0 mlen 9 rlen 0 { align1 1Q };
+
+/* Now, clear all GRF registers */
+add.nz.f0.0(1) a0.4<1>W a0.4<0,1,0>W -1W { align1 1N };
+mov(16) g[a0]<1>UW f0.1<0,1,0>UW { align1 1H };
+add(1) a0<1>D a0<0,1,0>D 32D { align1 1N };
+(+f0.0) jmpi(1) -8D { align1 WE_all 1N };
+
+/* Terminante the thread */
+sendc(8) null<1>UD g127<8,8,1>F 0x82000010
+ thread_spawner MsgDesc: mlen 1 rlen 0 { align1 1Q EOT };
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index ec47d4114554..62e6a14ad58e 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -66,7 +66,7 @@ static inline int mmio_diff_handler(struct intel_gvt *gvt,
vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) {
- node = kmalloc(sizeof(*node), GFP_KERNEL);
+ node = kmalloc(sizeof(*node), GFP_ATOMIC);
if (!node)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 3e88e3b5c43a..fadd2adb8030 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1726,13 +1726,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
- if (data & _MASKED_BIT_ENABLE(1)) {
+ if (IS_MASKED_BITS_ENABLED(data, 1)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
- data & _MASKED_BIT_ENABLE(2)) {
+ IS_MASKED_BITS_ENABLED(data, 2)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
@@ -1741,14 +1741,14 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* pvinfo, if not, we will treat this guest as non-gvtg-aware
* guest, and stop emulating its cfg space, mmio, gtt, etc.
*/
- if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) ||
- (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)))
- && !vgpu->pv_notified) {
+ if ((IS_MASKED_BITS_ENABLED(data, GFX_PPGTT_ENABLE) ||
+ IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE)) &&
+ !vgpu->pv_notified) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
}
- if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))
- || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
+ if (IS_MASKED_BITS_ENABLED(data, GFX_RUN_LIST_ENABLE) ||
+ IS_MASKED_BITS_DISABLED(data, GFX_RUN_LIST_ENABLE)) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
gvt_dbg_core("EXECLIST %s on ring %s\n",
@@ -1809,7 +1809,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if (data & _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET))
+ if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
data |= RESET_CTL_READY_TO_RESET;
else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
data &= ~RESET_CTL_READY_TO_RESET;
@@ -1827,7 +1827,8 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
write_vreg(vgpu, offset, p_data, bytes);
- if (data & _MASKED_BIT_ENABLE(0x10) || data & _MASKED_BIT_ENABLE(0x8))
+ if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
+ IS_MASKED_BITS_ENABLED(data, 0x8))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
@@ -3055,6 +3056,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(_MMIO(0x72380), D_SKL_PLUS);
MMIO_D(_MMIO(0x7239c), D_SKL_PLUS);
MMIO_D(_MMIO(_PLANE_SURF_3_A), D_SKL_PLUS);
+ MMIO_D(_MMIO(_PLANE_SURF_3_B), D_SKL_PLUS);
MMIO_D(CSR_SSP_BASE, D_SKL_PLUS);
MMIO_D(CSR_HTP_SKL, D_SKL_PLUS);
@@ -3131,8 +3133,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_D(GAMT_CHKN_BIT_REG, D_KBL);
- MMIO_D(GEN9_CTX_PREEMPT_REG, D_KBL | D_SKL);
+ MMIO_D(GAMT_CHKN_BIT_REG, D_KBL | D_CFL);
+ MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 970704b18f23..3b25e7fe32f6 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -54,8 +54,8 @@ bool is_inhibit_context(struct intel_context *ce);
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
-#define IS_RESTORE_INHIBIT(a) \
- (_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) == \
- ((a) & _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)))
+
+#define IS_RESTORE_INHIBIT(a) \
+ IS_MASKED_BITS_ENABLED(a, CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT)
#endif
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 5b66e14c5b7b..b88e033cbed4 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -94,6 +94,11 @@
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
+#define IS_MASKED_BITS_ENABLED(_val, _b) \
+ (((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b))
+#define IS_MASKED_BITS_DISABLED(_val, _b) \
+ ((_val) & _MASKED_BIT_DISABLE(_b))
+
#define FORCEWAKE_RENDER_GEN9_REG 0xa278
#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
#define FORCEWAKE_BLITTER_GEN9_REG 0xa188
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index bca036ac6621..e7532e7d74e9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -230,7 +230,7 @@ static int per_file_stats(int id, void *ptr, void *data)
struct file_stats *stats = data;
struct i915_vma *vma;
- if (!kref_get_unless_zero(&obj->base.refcount))
+ if (IS_ERR_OR_NULL(obj) || !kref_get_unless_zero(&obj->base.refcount))
return 0;
stats->count++;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index adb9bf34cf97..ae99a9190200 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -410,8 +410,6 @@ struct intel_fbc {
int adjusted_x;
int adjusted_y;
- int y;
-
u16 pixel_blend_mode;
} plane;
@@ -420,6 +418,8 @@ struct intel_fbc {
unsigned int stride;
u64 modifier;
} fb;
+
+ unsigned int fence_y_offset;
u16 gen9_wa_cfb_stride;
s8 fence_id;
} state_cache;
@@ -435,15 +435,16 @@ struct intel_fbc {
struct {
enum pipe pipe;
enum i9xx_plane_id i9xx_plane;
- unsigned int fence_y_offset;
} crtc;
struct {
const struct drm_format_info *format;
unsigned int stride;
+ u64 modifier;
} fb;
int cfb_size;
+ unsigned int fence_y_offset;
u16 gen9_wa_cfb_stride;
s8 fence_id;
bool plane_visible;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 4dc601dffc08..284cf078135a 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3125,6 +3125,7 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv)
val = I915_READ(GEN11_DE_HPD_IMR);
val &= ~hotplug_irqs;
+ val |= ~enabled_irqs & hotplug_irqs;
I915_WRITE(GEN11_DE_HPD_IMR, val);
POSTING_READ(GEN11_DE_HPD_IMR);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 25329b7600c9..014f34c047d5 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1592,6 +1592,7 @@ static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs,
u32 d;
cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM;
+ cmd |= MI_SRM_LRM_GLOBAL_GTT;
if (INTEL_GEN(stream->perf->i915) >= 8)
cmd++;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index e991a707bdb7..962ded9ce73f 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -269,12 +269,48 @@ static bool exclusive_mmio_access(const struct drm_i915_private *i915)
return IS_GEN(i915, 7);
}
+static void engine_sample(struct intel_engine_cs *engine, unsigned int period_ns)
+{
+ struct intel_engine_pmu *pmu = &engine->pmu;
+ bool busy;
+ u32 val;
+
+ val = ENGINE_READ_FW(engine, RING_CTL);
+ if (val == 0) /* powerwell off => engine idle */
+ return;
+
+ if (val & RING_WAIT)
+ add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
+ if (val & RING_WAIT_SEMAPHORE)
+ add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
+
+ /* No need to sample when busy stats are supported. */
+ if (intel_engine_supports_stats(engine))
+ return;
+
+ /*
+ * While waiting on a semaphore or event, MI_MODE reports the
+ * ring as idle. However, previously using the seqno, and with
+ * execlists sampling, we account for the ring waiting as the
+ * engine being busy. Therefore, we record the sample as being
+ * busy if either waiting or !idle.
+ */
+ busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
+ if (!busy) {
+ val = ENGINE_READ_FW(engine, RING_MI_MODE);
+ busy = !(val & MODE_IDLE);
+ }
+ if (busy)
+ add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
+}
+
static void
engines_sample(struct intel_gt *gt, unsigned int period_ns)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ unsigned long flags;
if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0)
return;
@@ -283,53 +319,17 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
return;
for_each_engine(engine, gt, id) {
- struct intel_engine_pmu *pmu = &engine->pmu;
- spinlock_t *mmio_lock;
- unsigned long flags;
- bool busy;
- u32 val;
-
if (!intel_engine_pm_get_if_awake(engine))
continue;
- mmio_lock = NULL;
- if (exclusive_mmio_access(i915))
- mmio_lock = &engine->uncore->lock;
-
- if (unlikely(mmio_lock))
- spin_lock_irqsave(mmio_lock, flags);
-
- val = ENGINE_READ_FW(engine, RING_CTL);
- if (val == 0) /* powerwell off => engine idle */
- goto skip;
-
- if (val & RING_WAIT)
- add_sample(&pmu->sample[I915_SAMPLE_WAIT], period_ns);
- if (val & RING_WAIT_SEMAPHORE)
- add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns);
-
- /* No need to sample when busy stats are supported. */
- if (intel_engine_supports_stats(engine))
- goto skip;
-
- /*
- * While waiting on a semaphore or event, MI_MODE reports the
- * ring as idle. However, previously using the seqno, and with
- * execlists sampling, we account for the ring waiting as the
- * engine being busy. Therefore, we record the sample as being
- * busy if either waiting or !idle.
- */
- busy = val & (RING_WAIT_SEMAPHORE | RING_WAIT);
- if (!busy) {
- val = ENGINE_READ_FW(engine, RING_MI_MODE);
- busy = !(val & MODE_IDLE);
+ if (exclusive_mmio_access(i915)) {
+ spin_lock_irqsave(&engine->uncore->lock, flags);
+ engine_sample(engine, period_ns);
+ spin_unlock_irqrestore(&engine->uncore->lock, flags);
+ } else {
+ engine_sample(engine, period_ns);
}
- if (busy)
- add_sample(&pmu->sample[I915_SAMPLE_BUSY], period_ns);
-skip:
- if (unlikely(mmio_lock))
- spin_unlock_irqrestore(mmio_lock, flags);
intel_engine_pm_put_async(engine);
}
}
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index 5003a71113cb..8aa7866ec6b6 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -42,7 +42,7 @@ enum {
* active request.
*/
#define I915_PRIORITY_UNPREEMPTABLE INT_MAX
-#define I915_PRIORITY_BARRIER INT_MAX
+#define I915_PRIORITY_BARRIER (I915_PRIORITY_UNPREEMPTABLE - 1)
struct i915_priolist {
struct list_head requests[I915_PRIORITY_COUNT];
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 7717581350bd..06cd1d28a176 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -7896,7 +7896,7 @@ enum {
/* GEN7 chicken */
#define GEN7_COMMON_SLICE_CHICKEN1 _MMIO(0x7010)
- #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1 << 10) | (1 << 26))
+ #define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC (1 << 10)
#define GEN9_RHWO_OPTIMIZATION_DISABLE (1 << 14)
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index fc14ebf9a0b7..1f9cd33b35cb 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -104,6 +104,7 @@ vma_create(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
const struct i915_ggtt_view *view)
{
+ struct i915_vma *pos = ERR_PTR(-E2BIG);
struct i915_vma *vma;
struct rb_node *rb, **p;
@@ -184,7 +185,6 @@ vma_create(struct drm_i915_gem_object *obj,
rb = NULL;
p = &obj->vma.tree.rb_node;
while (*p) {
- struct i915_vma *pos;
long cmp;
rb = *p;
@@ -196,16 +196,12 @@ vma_create(struct drm_i915_gem_object *obj,
* and dispose of ours.
*/
cmp = i915_vma_compare(pos, vm, view);
- if (cmp == 0) {
- spin_unlock(&obj->vma.lock);
- i915_vma_free(vma);
- return pos;
- }
-
if (cmp < 0)
p = &rb->rb_right;
- else
+ else if (cmp > 0)
p = &rb->rb_left;
+ else
+ goto err_unlock;
}
rb_link_node(&vma->obj_node, rb, p);
rb_insert_color(&vma->obj_node, &obj->vma.tree);
@@ -228,8 +224,9 @@ vma_create(struct drm_i915_gem_object *obj,
err_unlock:
spin_unlock(&obj->vma.lock);
err_vma:
+ i915_vm_put(vm);
i915_vma_free(vma);
- return ERR_PTR(-E2BIG);
+ return pos;
}
static struct i915_vma *
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 696491d71a1d..07f663cd2d1c 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -6830,16 +6830,6 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
- I915_WRITE(_3D_CHICKEN2,
- _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
- _3D_CHICKEN2_WM_READ_PIPELINED);
-
- /* WaDisableRenderCachePipelinedFlush:ilk */
- I915_WRITE(CACHE_MODE_0,
- _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
-
- /* WaDisable_RenderCache_OperationalFlush:ilk */
- I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
g4x_disable_trickle_feed(dev_priv);
@@ -6902,27 +6892,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
- /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
- I915_WRITE(_3D_CHICKEN,
- _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
-
- /* WaDisable_RenderCache_OperationalFlush:snb */
- I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
- /*
- * BSpec recoomends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- I915_WRITE(GEN6_GT_MODE,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
-
- I915_WRITE(CACHE_MODE_0,
- _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
-
I915_WRITE(GEN6_UCGCTL1,
I915_READ(GEN6_UCGCTL1) |
GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
@@ -6945,18 +6914,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
- /* WaStripsFansDisableFastClipPerformanceFix:snb */
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
-
- /*
- * Bspec says:
- * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
- * 3DSTATE_SF number of SF output attributes is more than 16."
- */
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
-
/*
* According to the spec the following bits should be
* set in order to enable memory self-refresh and fbc:
@@ -6986,24 +6943,6 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv)
gen6_check_mch_setup(dev_priv);
}
-static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
-{
- u32 reg = I915_READ(GEN7_FF_THREAD_MODE);
-
- /*
- * WaVSThreadDispatchOverride:ivb,vlv
- *
- * This actually overrides the dispatch
- * mode for all thread types.
- */
- reg &= ~GEN7_FF_SCHED_MASK;
- reg |= GEN7_FF_TS_SCHED_HW;
- reg |= GEN7_FF_VS_SCHED_HW;
- reg |= GEN7_FF_DS_SCHED_HW;
-
- I915_WRITE(GEN7_FF_THREAD_MODE, reg);
-}
-
static void lpt_init_clock_gating(struct drm_i915_private *dev_priv)
{
/*
@@ -7230,45 +7169,10 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
{
- /* L3 caching of data atomics doesn't work -- disable it. */
- I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
- I915_WRITE(HSW_ROW_CHICKEN3,
- _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
-
/* This is required by WaCatErrorRejectionIssue:hsw */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
- I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
- GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
-
- /* WaVSRefCountFullforceMissDisable:hsw */
- I915_WRITE(GEN7_FF_THREAD_MODE,
- I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
-
- /* WaDisable_RenderCache_OperationalFlush:hsw */
- I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
- /* enable HiZ Raw Stall Optimization */
- I915_WRITE(CACHE_MODE_0_GEN7,
- _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
-
- /* WaDisable4x2SubspanOptimization:hsw */
- I915_WRITE(CACHE_MODE_1,
- _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- I915_WRITE(GEN7_GT_MODE,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
-
- /* WaSampleCChickenBitEnable:hsw */
- I915_WRITE(HALF_SLICE_CHICKEN3,
- _MASKED_BIT_ENABLE(HSW_SAMPLE_C_PERFORMANCE));
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -7282,32 +7186,11 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
- /* WaDisableEarlyCull:ivb */
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
-
/* WaDisableBackToBackFlipFix:ivb */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
- /* WaDisablePSDDualDispatchEnable:ivb */
- if (IS_IVB_GT1(dev_priv))
- I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
-
- /* WaDisable_RenderCache_OperationalFlush:ivb */
- I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
- /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
- I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
- GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
-
- /* WaApplyL3ControlAndL3ChickenMode:ivb */
- I915_WRITE(GEN7_L3CNTLREG1,
- GEN7_WA_FOR_GEN7_L3_CONTROL);
- I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
- GEN7_WA_L3_CHICKEN_MODE);
if (IS_IVB_GT1(dev_priv))
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
@@ -7319,10 +7202,6 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
}
- /* WaForceL3Serialization:ivb */
- I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
- ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
-
/*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:ivb workaround.
@@ -7337,29 +7216,6 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
g4x_disable_trickle_feed(dev_priv);
- gen7_setup_fixed_func_scheduler(dev_priv);
-
- if (0) { /* causes HiZ corruption on ivb:gt1 */
- /* enable HiZ Raw Stall Optimization */
- I915_WRITE(CACHE_MODE_0_GEN7,
- _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
- }
-
- /* WaDisable4x2SubspanOptimization:ivb */
- I915_WRITE(CACHE_MODE_1,
- _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- I915_WRITE(GEN7_GT_MODE,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
-
snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
@@ -7373,28 +7229,11 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv)
static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
{
- /* WaDisableEarlyCull:vlv */
- I915_WRITE(_3D_CHICKEN3,
- _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
-
/* WaDisableBackToBackFlipFix:vlv */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
- /* WaPsdDispatchEnable:vlv */
- /* WaDisablePSDDualDispatchEnable:vlv */
- I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
- GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
-
- /* WaDisable_RenderCache_OperationalFlush:vlv */
- I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
- /* WaForceL3Serialization:vlv */
- I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
- ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
-
/* WaDisableDopClockGating:vlv */
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
@@ -7404,8 +7243,6 @@ static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- gen7_setup_fixed_func_scheduler(dev_priv);
-
/*
* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
* This implements the WaDisableRCZUnitClockGating:vlv workaround.
@@ -7420,30 +7257,6 @@ static void vlv_init_clock_gating(struct drm_i915_private *dev_priv)
I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
/*
- * BSpec says this must be set, even though
- * WaDisable4x2SubspanOptimization isn't listed for VLV.
- */
- I915_WRITE(CACHE_MODE_1,
- _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- I915_WRITE(GEN7_GT_MODE,
- _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
-
- /*
- * WaIncreaseL3CreditsForVLVB0:vlv
- * This is the hardware default actually.
- */
- I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
-
- /*
* WaDisableVLVClockGating_VBIIssue:vlv
* Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events.
@@ -7495,13 +7308,6 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv)
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
- /* WaDisableRenderCachePipelinedFlush */
- I915_WRITE(CACHE_MODE_0,
- _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
-
- /* WaDisable_RenderCache_OperationalFlush:g4x */
- I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
-
g4x_disable_trickle_feed(dev_priv);
}
@@ -7517,11 +7323,6 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv)
intel_uncore_write(uncore,
MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
-
- /* WaDisable_RenderCache_OperationalFlush:gen4 */
- intel_uncore_write(uncore,
- CACHE_MODE_0,
- _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7534,9 +7335,6 @@ static void i965g_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(RENCLK_GATE_D2, 0);
I915_WRITE(MI_ARB_STATE,
_MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
-
- /* WaDisable_RenderCache_OperationalFlush:gen4 */
- I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
}
static void gen3_init_clock_gating(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 6a2be7d0dd95..6090ce35226b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -21,6 +21,7 @@ selftest(fence, i915_sw_fence_mock_selftests)
selftest(scatterlist, scatterlist_mock_selftests)
selftest(syncmap, i915_syncmap_mock_selftests)
selftest(uncore, intel_uncore_mock_selftests)
+selftest(ring, intel_ring_mock_selftests)
selftest(engine, intel_engine_cs_mock_selftests)
selftest(timelines, intel_timeline_mock_selftests)
selftest(requests, i915_request_mock_selftests)
diff --git a/drivers/gpu/drm/lima/lima_pp.c b/drivers/gpu/drm/lima/lima_pp.c
index 33f01383409c..a5c95bed08c0 100644
--- a/drivers/gpu/drm/lima/lima_pp.c
+++ b/drivers/gpu/drm/lima/lima_pp.c
@@ -271,6 +271,8 @@ void lima_pp_fini(struct lima_ip *ip)
int lima_pp_bcast_resume(struct lima_ip *ip)
{
+ /* PP has been reset by individual PP resume */
+ ip->data.async_reset = false;
return 0;
}
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index 04e1d38d41f7..08802e5177f6 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -812,7 +812,7 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc *crtc = &pipe->crtc;
struct drm_plane *plane = &pipe->plane;
struct drm_device *drm = crtc->dev;
- struct mcde *mcde = drm->dev_private;
+ struct mcde *mcde = to_mcde(drm);
const struct drm_display_mode *mode = &cstate->mode;
struct drm_framebuffer *fb = plane->state->fb;
u32 format = fb->format->format;
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 84f3e2dbd77b..80082d6dce3a 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -209,7 +209,6 @@ static int mcde_modeset_init(struct drm_device *drm)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
- drm_fbdev_generic_setup(drm, 32);
return 0;
}
@@ -264,6 +263,8 @@ static int mcde_drm_bind(struct device *dev)
if (ret < 0)
goto unbind;
+ drm_fbdev_generic_setup(drm, 32);
+
return 0;
unbind:
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index c420f5a3d33b..aa74aac3cbcc 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -6,12 +6,12 @@ config DRM_MEDIATEK
depends on COMMON_CLK
depends on HAVE_ARM_SMCCC
depends on OF
+ depends on MTK_MMSYS
select DRM_GEM_CMA_HELPER
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select DRM_PANEL
select MEMORY
- select MTK_MMSYS
select MTK_SMI
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index fe46c4bac64d..7cd8f415fd02 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -193,7 +193,6 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
int ret;
int i;
- DRM_DEBUG_DRIVER("%s\n", __func__);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
if (ret) {
@@ -213,7 +212,6 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
{
int i;
- DRM_DEBUG_DRIVER("%s\n", __func__);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
}
@@ -258,7 +256,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
int ret;
int i;
- DRM_DEBUG_DRIVER("%s\n", __func__);
if (WARN_ON(!crtc->state))
return -EINVAL;
@@ -299,7 +296,6 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
goto err_mutex_unprepare;
}
- DRM_DEBUG_DRIVER("mediatek_ddp_ddp_path_setup\n");
for (i = 0; i < mtk_crtc->ddp_comp_nr - 1; i++) {
mtk_mmsys_ddp_connect(mtk_crtc->mmsys_dev,
mtk_crtc->ddp_comp[i]->id,
@@ -349,7 +345,6 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc)
struct drm_crtc *crtc = &mtk_crtc->base;
int i;
- DRM_DEBUG_DRIVER("%s\n", __func__);
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]);
if (i == 1)
@@ -831,7 +826,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
mtk_crtc->cmdq_client =
- cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base),
+ cmdq_mbox_create(mtk_crtc->mmsys_dev,
+ drm_crtc_index(&mtk_crtc->base),
2000);
if (IS_ERR(mtk_crtc->cmdq_client)) {
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 6bd369434d9d..040a8f393fe2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -444,7 +444,6 @@ static int mtk_drm_probe(struct platform_device *pdev)
if (!private)
return -ENOMEM;
- private->data = of_device_get_match_data(dev);
private->mmsys_dev = dev->parent;
if (!private->mmsys_dev) {
dev_err(dev, "Failed to get MMSYS device\n");
@@ -514,7 +513,8 @@ static int mtk_drm_probe(struct platform_device *pdev)
goto err_node;
}
- ret = mtk_ddp_comp_init(dev, node, comp, comp_id, NULL);
+ ret = mtk_ddp_comp_init(dev->parent, node, comp,
+ comp_id, NULL);
if (ret) {
of_node_put(node);
goto err_node;
@@ -571,7 +571,6 @@ static int mtk_drm_sys_suspend(struct device *dev)
int ret;
ret = drm_mode_config_helper_suspend(drm);
- DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
return ret;
}
@@ -583,7 +582,6 @@ static int mtk_drm_sys_resume(struct device *dev)
int ret;
ret = drm_mode_config_helper_resume(drm);
- DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
return ret;
}
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index c2bd683a87c8..92141a19681b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -164,6 +164,16 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
true, true);
}
+static void mtk_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
+
+ state->pending.enable = false;
+ wmb(); /* Make sure the above parameter is set before update */
+ state->pending.dirty = true;
+}
+
static void mtk_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
@@ -178,6 +188,11 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
if (!crtc || WARN_ON(!fb))
return;
+ if (!plane->state->visible) {
+ mtk_plane_atomic_disable(plane, old_state);
+ return;
+ }
+
gem = fb->obj[0];
mtk_gem = to_mtk_gem_obj(gem);
addr = mtk_gem->dma_addr;
@@ -200,16 +215,6 @@ static void mtk_plane_atomic_update(struct drm_plane *plane,
state->pending.dirty = true;
}
-static void mtk_plane_atomic_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct mtk_plane_state *state = to_mtk_plane_state(plane->state);
-
- state->pending.enable = false;
- wmb(); /* Make sure the above parameter is set before update */
- state->pending.dirty = true;
-}
-
static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
.prepare_fb = drm_gem_fb_prepare_fb,
.atomic_check = mtk_plane_atomic_check,
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 270bf22c98fe..02ac55c13a80 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -316,10 +316,7 @@ static void mtk_dsi_lane0_ulp_mode_leave(struct mtk_dsi *dsi)
static bool mtk_dsi_clk_hs_state(struct mtk_dsi *dsi)
{
- u32 tmp_reg1;
-
- tmp_reg1 = readl(dsi->regs + DSI_PHY_LCCON);
- return ((tmp_reg1 & LC_HS_TX_EN) == 1) ? true : false;
+ return readl(dsi->regs + DSI_PHY_LCCON) & LC_HS_TX_EN;
}
static void mtk_dsi_clk_hs_mode(struct mtk_dsi *dsi, bool enter)
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 5feb760617cb..1eebe310470a 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1630,8 +1630,6 @@ static int mtk_hdmi_audio_startup(struct device *dev, void *data)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
mtk_hdmi_audio_enable(hdmi);
return 0;
@@ -1641,8 +1639,6 @@ static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
mtk_hdmi_audio_disable(hdmi);
}
@@ -1651,8 +1647,6 @@ mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- dev_dbg(dev, "%s(%d)\n", __func__, enable);
-
if (enable)
mtk_hdmi_hw_aud_mute(hdmi);
else
@@ -1665,8 +1659,6 @@ static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf,
{
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
- dev_dbg(dev, "%s\n", __func__);
-
memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
return 0;
@@ -1766,7 +1758,6 @@ static int mtk_drm_hdmi_probe(struct platform_device *pdev)
goto err_bridge_remove;
}
- dev_dbg(dev, "mediatek hdmi probe success\n");
return 0;
err_bridge_remove:
@@ -1789,7 +1780,7 @@ static int mtk_hdmi_suspend(struct device *dev)
struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
mtk_hdmi_clk_disable_audio(hdmi);
- dev_dbg(dev, "hdmi suspend success!\n");
+
return 0;
}
@@ -1804,7 +1795,6 @@ static int mtk_hdmi_resume(struct device *dev)
return ret;
}
- dev_dbg(dev, "hdmi resume success!\n");
return 0;
}
#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
index b55f51675205..827b93786fac 100644
--- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -107,60 +107,10 @@
#define RGS_HDMITX_5T1_EDG (0xf << 4)
#define RGS_HDMITX_PLUG_TST BIT(0)
-static const u8 PREDIV[3][4] = {
- {0x0, 0x0, 0x0, 0x0}, /* 27Mhz */
- {0x1, 0x1, 0x1, 0x1}, /* 74Mhz */
- {0x1, 0x1, 0x1, 0x1} /* 148Mhz */
-};
-
-static const u8 TXDIV[3][4] = {
- {0x3, 0x3, 0x3, 0x2}, /* 27Mhz */
- {0x2, 0x1, 0x1, 0x1}, /* 74Mhz */
- {0x1, 0x0, 0x0, 0x0} /* 148Mhz */
-};
-
-static const u8 FBKSEL[3][4] = {
- {0x1, 0x1, 0x1, 0x1}, /* 27Mhz */
- {0x1, 0x0, 0x1, 0x1}, /* 74Mhz */
- {0x1, 0x0, 0x1, 0x1} /* 148Mhz */
-};
-
-static const u8 FBKDIV[3][4] = {
- {19, 24, 29, 19}, /* 27Mhz */
- {19, 24, 14, 19}, /* 74Mhz */
- {19, 24, 14, 19} /* 148Mhz */
-};
-
-static const u8 DIVEN[3][4] = {
- {0x2, 0x1, 0x1, 0x2}, /* 27Mhz */
- {0x2, 0x2, 0x2, 0x2}, /* 74Mhz */
- {0x2, 0x2, 0x2, 0x2} /* 148Mhz */
-};
-
-static const u8 HTPLLBP[3][4] = {
- {0xc, 0xc, 0x8, 0xc}, /* 27Mhz */
- {0xc, 0xf, 0xf, 0xc}, /* 74Mhz */
- {0xc, 0xf, 0xf, 0xc} /* 148Mhz */
-};
-
-static const u8 HTPLLBC[3][4] = {
- {0x2, 0x3, 0x3, 0x2}, /* 27Mhz */
- {0x2, 0x3, 0x3, 0x2}, /* 74Mhz */
- {0x2, 0x3, 0x3, 0x2} /* 148Mhz */
-};
-
-static const u8 HTPLLBR[3][4] = {
- {0x1, 0x1, 0x0, 0x1}, /* 27Mhz */
- {0x1, 0x2, 0x2, 0x1}, /* 74Mhz */
- {0x1, 0x2, 0x2, 0x1} /* 148Mhz */
-};
-
static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
- dev_dbg(hdmi_phy->dev, "%s\n", __func__);
-
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
@@ -178,8 +128,6 @@ static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
{
struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
- dev_dbg(hdmi_phy->dev, "%s\n", __func__);
-
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
usleep_range(100, 150);
diff --git a/drivers/gpu/drm/meson/meson_registers.h b/drivers/gpu/drm/meson/meson_registers.h
index 8ea00546cd4e..049c4bfe2a3a 100644
--- a/drivers/gpu/drm/meson/meson_registers.h
+++ b/drivers/gpu/drm/meson/meson_registers.h
@@ -261,6 +261,12 @@
#define VIU_OSD_FIFO_DEPTH_VAL(val) ((val & 0x7f) << 12)
#define VIU_OSD_WORDS_PER_BURST(words) (((words & 0x4) >> 1) << 22)
#define VIU_OSD_FIFO_LIMITS(size) ((size & 0xf) << 24)
+#define VIU_OSD_BURST_LENGTH_24 (0x0 << 31 | 0x0 << 10)
+#define VIU_OSD_BURST_LENGTH_32 (0x0 << 31 | 0x1 << 10)
+#define VIU_OSD_BURST_LENGTH_48 (0x0 << 31 | 0x2 << 10)
+#define VIU_OSD_BURST_LENGTH_64 (0x0 << 31 | 0x3 << 10)
+#define VIU_OSD_BURST_LENGTH_96 (0x1 << 31 | 0x0 << 10)
+#define VIU_OSD_BURST_LENGTH_128 (0x1 << 31 | 0x1 << 10)
#define VD1_IF0_GEN_REG 0x1a50
#define VD1_IF0_CANVAS0 0x1a51
diff --git a/drivers/gpu/drm/meson/meson_viu.c b/drivers/gpu/drm/meson/meson_viu.c
index 304f8ff1339c..aede0c67a57f 100644
--- a/drivers/gpu/drm/meson/meson_viu.c
+++ b/drivers/gpu/drm/meson/meson_viu.c
@@ -411,13 +411,6 @@ void meson_viu_gxm_disable_osd1_afbc(struct meson_drm *priv)
priv->io_base + _REG(VIU_MISC_CTRL1));
}
-static inline uint32_t meson_viu_osd_burst_length_reg(uint32_t length)
-{
- uint32_t val = (((length & 0x80) % 24) / 12);
-
- return (((val & 0x3) << 10) | (((val & 0x4) >> 2) << 31));
-}
-
void meson_viu_init(struct meson_drm *priv)
{
uint32_t reg;
@@ -444,9 +437,9 @@ void meson_viu_init(struct meson_drm *priv)
VIU_OSD_FIFO_LIMITS(2); /* fifo_lim: 2*16=32 */
if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A))
- reg |= meson_viu_osd_burst_length_reg(32);
+ reg |= VIU_OSD_BURST_LENGTH_32;
else
- reg |= meson_viu_osd_burst_length_reg(64);
+ reg |= VIU_OSD_BURST_LENGTH_64;
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD1_FIFO_CTRL_STAT));
writel_relaxed(reg, priv->io_base + _REG(VIU_OSD2_FIFO_CTRL_STAT));
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 60f6472a3e58..6021f8d9efd1 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -408,7 +408,7 @@ a2xx_create_address_space(struct msm_gpu *gpu, struct platform_device *pdev)
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
- SZ_16M + 0xfff * SZ_64K);
+ 0xfff * SZ_64K);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 096be97ce9f9..21e77d67151f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1121,7 +1121,7 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
return -ENODEV;
mmu = msm_iommu_new(gmu->dev, domain);
- gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x7fffffff);
+ gmu->aspace = msm_gem_address_space_create(mmu, "gmu", 0x0, 0x80000000);
if (IS_ERR(gmu->aspace)) {
iommu_domain_free(domain);
return PTR_ERR(gmu->aspace);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index a1589e040c57..7768557cdfb2 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -893,8 +893,8 @@ static const struct adreno_gpu_funcs funcs = {
#if defined(CONFIG_DRM_MSM_GPU_STATE)
.gpu_state_get = a6xx_gpu_state_get,
.gpu_state_put = a6xx_gpu_state_put,
- .create_address_space = adreno_iommu_create_address_space,
#endif
+ .create_address_space = adreno_iommu_create_address_space,
},
.get_timestamp = a6xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 89673c7ed473..5db06b590943 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -194,7 +194,7 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct msm_gem_address_space *aspace;
aspace = msm_gem_address_space_create(mmu, "gpu", SZ_16M,
- 0xfffffff);
+ 0xffffffff - SZ_16M);
if (IS_ERR(aspace) && !IS_ERR(mmu))
mmu->funcs->destroy(mmu);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 63976dcd2ac8..0946a86b37b2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -521,7 +521,7 @@ static struct msm_display_topology dpu_encoder_get_topology(
struct dpu_kms *dpu_kms,
struct drm_display_mode *mode)
{
- struct msm_display_topology topology;
+ struct msm_display_topology topology = {0};
int i, intf_count = 0;
for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
@@ -537,7 +537,8 @@ static struct msm_display_topology dpu_encoder_get_topology(
* 1 LM, 1 INTF
* 2 LM, 1 INTF (stream merge to support high resolution interfaces)
*
- * Adding color blocks only to primary interface
+ * Adding color blocks only to primary interface if available in
+ * sufficient number
*/
if (intf_count == 2)
topology.num_lm = 2;
@@ -546,8 +547,11 @@ static struct msm_display_topology dpu_encoder_get_topology(
else
topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1;
- if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI)
- topology.num_dspp = topology.num_lm;
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_ENCODER_DSI) {
+ if (dpu_kms->catalog->dspp &&
+ (dpu_kms->catalog->dspp_count >= topology.num_lm))
+ topology.num_dspp = topology.num_lm;
+ }
topology.num_enc = 0;
topology.num_intf = intf_count;
@@ -2136,7 +2140,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
dpu_enc = to_dpu_encoder_virt(enc);
- mutex_init(&dpu_enc->enc_lock);
ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info);
if (ret)
goto fail;
@@ -2151,7 +2154,6 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
0);
- mutex_init(&dpu_enc->rc_lock);
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
dpu_enc->idle_timeout = IDLE_TIMEOUT;
@@ -2183,7 +2185,7 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
if (!dpu_enc)
- return ERR_PTR(ENOMEM);
+ return ERR_PTR(-ENOMEM);
rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
drm_enc_mode, NULL);
@@ -2196,6 +2198,8 @@ struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
spin_lock_init(&dpu_enc->enc_spinlock);
dpu_enc->enabled = false;
+ mutex_init(&dpu_enc->enc_lock);
+ mutex_init(&dpu_enc->rc_lock);
return &dpu_enc->base;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index b8615d4fe8a3..680527e28d09 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -780,7 +780,7 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
aspace = msm_gem_address_space_create(mmu, "dpu1",
- 0x1000, 0xfffffff);
+ 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
mmu->funcs->destroy(mmu);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 08897184b1d9..fc6a3f8134c7 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -514,7 +514,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
config->iommu);
aspace = msm_gem_address_space_create(mmu,
- "mdp4", 0x1000, 0xffffffff);
+ "mdp4", 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
if (!IS_ERR(mmu))
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 19ec48695ffb..e193865ce9a2 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -633,7 +633,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mmu = msm_iommu_new(iommu_dev, config->platform.iommu);
aspace = msm_gem_address_space_create(mmu, "mdp5",
- 0x1000, 0xffffffff);
+ 0x1000, 0x100000000 - 0x1000);
if (IS_ERR(aspace)) {
if (!IS_ERR(mmu))
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index 001fbf537440..a1d94be7883a 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -71,8 +71,10 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
queue->flags = flags;
if (priv->gpu) {
- if (prio >= priv->gpu->nr_rings)
+ if (prio >= priv->gpu->nr_rings) {
+ kfree(queue);
return -EINVAL;
+ }
queue->prio = prio;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index d472942102f5..519f99868e35 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -601,6 +601,9 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
(0x0100 << nv_crtc->index),
};
+ if (!nv_encoder->audio)
+ return;
+
nv_encoder->audio = false;
nvif_mthd(&disp->disp->object, 0, &args, sizeof(args));
diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c
index e5c230d9ae24..cc9993837508 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dmem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c
@@ -550,7 +550,7 @@ static unsigned long nouveau_dmem_migrate_copy_one(struct nouveau_drm *drm,
DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr))
goto out_free_page;
- if (drm->dmem->migrate.copy_func(drm, page_size(spage),
+ if (drm->dmem->migrate.copy_func(drm, 1,
NOUVEAU_APER_VRAM, paddr, NOUVEAU_APER_HOST, *dma_addr))
goto out_dma_unmap;
} else {
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index ba9f9359c30e..6586d9d39874 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -562,6 +562,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
.end = notifier->notifier.interval_tree.last + 1,
.pfn_flags_mask = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE,
.hmm_pfns = hmm_pfns,
+ .dev_private_owner = drm->dev,
};
struct mm_struct *mm = notifier->notifier.mm;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
index c8ab1b5741a3..db7769cb33eb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
@@ -118,10 +118,10 @@ g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (retries)
udelay(400);
- /* transaction request, wait up to 1ms for it to complete */
+ /* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00e4e4 + base, 0x00010000 | ctrl);
- timeout = 1000;
+ timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00e4e4 + base);
udelay(1);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
index 7ef60895f43a..edb6148cbca0 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
@@ -118,10 +118,10 @@ gm200_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry,
if (retries)
udelay(400);
- /* transaction request, wait up to 1ms for it to complete */
+ /* transaction request, wait up to 2ms for it to complete */
nvkm_wr32(device, 0x00d954 + base, 0x00010000 | ctrl);
- timeout = 1000;
+ timeout = 2000;
do {
ctrl = nvkm_rd32(device, 0x00d954 + base);
udelay(1);
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index b6ecd1552132..5178f87d6574 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2495,6 +2495,7 @@ static const struct panel_desc logicpd_type_28 = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct panel_desc mitsubishi_aa070mc01 = {
@@ -2663,6 +2664,7 @@ static const struct panel_desc newhaven_nhd_43_480272ef_atxl = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct display_timing nlt_nl192108ac18_02d_timing = {
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 134aa2b01f90..f434efdeca44 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5563,6 +5563,7 @@ static int ci_parse_power_table(struct radeon_device *rdev)
if (!rdev->pm.dpm.ps)
return -ENOMEM;
power_state_offset = (u8 *)state_array->states;
+ rdev->pm.dpm.num_ps = 0;
for (i = 0; i < state_array->ucNumEntries; i++) {
u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
@@ -5572,10 +5573,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
if (!rdev->pm.power_state[i].clock_info)
return -EINVAL;
ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
- if (ps == NULL) {
- kfree(rdev->pm.dpm.ps);
+ if (ps == NULL)
return -ENOMEM;
- }
rdev->pm.dpm.ps[i].ps_priv = ps;
ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info,
@@ -5597,8 +5596,8 @@ static int ci_parse_power_table(struct radeon_device *rdev)
k++;
}
power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ rdev->pm.dpm.num_ps = i + 1;
}
- rdev->pm.dpm.num_ps = state_array->ucNumEntries;
/* fill in the vce power states */
for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index b57c37ddd164..c7fbb7932f37 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -2127,7 +2127,7 @@ static int ni_init_smc_spll_table(struct radeon_device *rdev)
if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
ret = -EINVAL;
- if (clk_s & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
+ if (fb_div & ~(SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
ret = -EINVAL;
if (clk_v & ~(SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_NISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 0919f1f159a4..f65d1489dc50 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -31,6 +31,7 @@ config DRM_RCAR_DW_HDMI
config DRM_RCAR_LVDS
tristate "R-Car DU LVDS Encoder Support"
depends on DRM && DRM_BRIDGE && OF
+ select DRM_KMS_HELPER
select DRM_PANEL
select OF_FLATTREE
select OF_OVERLAY
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index ce07ddc3e058..2f2c9f0a1071 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -259,9 +259,8 @@ sun4i_hdmi_connector_detect(struct drm_connector *connector, bool force)
struct sun4i_hdmi *hdmi = drm_connector_to_sun4i_hdmi(connector);
unsigned long reg;
- if (readl_poll_timeout(hdmi->base + SUN4I_HDMI_HPD_REG, reg,
- reg & SUN4I_HDMI_HPD_HIGH,
- 0, 500000)) {
+ reg = readl(hdmi->base + SUN4I_HDMI_HPD_REG);
+ if (!(reg & SUN4I_HDMI_HPD_HIGH)) {
cec_phys_addr_invalidate(hdmi->cec_adap);
return connector_status_disconnected;
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 56cc037fd312..cc4fb916318f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -363,6 +363,19 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
mixer->engine.ops = &sun8i_engine_ops;
mixer->engine.node = dev->of_node;
+ if (of_find_property(dev->of_node, "iommus", NULL)) {
+ /*
+ * This assume we have the same DMA constraints for
+ * all our the mixers in our pipeline. This sounds
+ * bad, but it has always been the case for us, and
+ * DRM doesn't do per-device allocation either, so we
+ * would need to fix DRM first...
+ */
+ ret = of_dma_configure(drm->dev, dev->of_node, true);
+ if (ret)
+ return ret;
+ }
+
/*
* While this function can fail, we shouldn't do anything
* if this happens. Some early DE2 DT entries don't provide
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 83f31c6e891c..04d6848d19fc 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -957,6 +957,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
}
drm_plane_helper_add(&plane->base, &tegra_cursor_plane_helper_funcs);
+ drm_plane_create_zpos_immutable_property(&plane->base, 255);
return &plane->base;
}
diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c
index 8183e617bf6b..22a03f7ffdc1 100644
--- a/drivers/gpu/drm/tegra/hub.c
+++ b/drivers/gpu/drm/tegra/hub.c
@@ -149,7 +149,9 @@ int tegra_display_hub_prepare(struct tegra_display_hub *hub)
for (i = 0; i < hub->soc->num_wgrps; i++) {
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
- tegra_windowgroup_enable(wgrp);
+ /* Skip orphaned window group whose parent DC is disabled */
+ if (wgrp->parent)
+ tegra_windowgroup_enable(wgrp);
}
return 0;
@@ -166,7 +168,9 @@ void tegra_display_hub_cleanup(struct tegra_display_hub *hub)
for (i = 0; i < hub->soc->num_wgrps; i++) {
struct tegra_windowgroup *wgrp = &hub->wgrps[i];
- tegra_windowgroup_disable(wgrp);
+ /* Skip orphaned window group whose parent DC is disabled */
+ if (wgrp->parent)
+ tegra_windowgroup_disable(wgrp);
}
}
@@ -944,6 +948,15 @@ static int tegra_display_hub_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
+ err = devm_of_platform_populate(&pdev->dev);
+ if (err < 0)
+ goto unregister;
+
+ return err;
+
+unregister:
+ host1x_client_unregister(&hub->client);
+ pm_runtime_disable(&pdev->dev);
return err;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index f73b81c2576e..0f20e14a4cfd 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -883,8 +883,10 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
if (!fence)
return 0;
- if (no_wait_gpu)
+ if (no_wait_gpu) {
+ dma_fence_put(fence);
return -EBUSY;
+ }
dma_resv_add_shared_fence(bo->base.resv, fence);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index a43aa7275f12..fa03fab02076 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -300,8 +300,10 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
break;
case -EBUSY:
case -ERESTARTSYS:
+ dma_fence_put(moving);
return VM_FAULT_NOPAGE;
default:
+ dma_fence_put(moving);
return VM_FAULT_SIGBUS;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 9ffa9c75a5da..16b385629688 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -1069,10 +1069,6 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
if (new_content_type != SAME_AS_DISPLAY) {
struct vmw_surface_metadata metadata = {0};
- metadata.base_size.width = hdisplay;
- metadata.base_size.height = vdisplay;
- metadata.base_size.depth = 1;
-
/*
* If content buffer is a buffer object, then we have to
* construct surface info
@@ -1104,6 +1100,10 @@ vmw_stdu_primary_plane_prepare_fb(struct drm_plane *plane,
metadata = new_vfbs->surface->metadata;
}
+ metadata.base_size.width = hdisplay;
+ metadata.base_size.height = vdisplay;
+ metadata.base_size.depth = 1;
+
if (vps->surf) {
struct drm_vmw_size cur_base_size =
vps->surf->metadata.base_size;
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 6a995db51d6d..e201f62d62c0 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -686,8 +686,17 @@ EXPORT_SYMBOL(host1x_driver_register_full);
*/
void host1x_driver_unregister(struct host1x_driver *driver)
{
+ struct host1x *host1x;
+
driver_unregister(&driver->driver);
+ mutex_lock(&devices_lock);
+
+ list_for_each_entry(host1x, &devices, list)
+ host1x_detach_driver(host1x, driver);
+
+ mutex_unlock(&devices_lock);
+
mutex_lock(&drivers_lock);
list_del_init(&driver->list);
mutex_unlock(&drivers_lock);
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index d24344e91922..d0ebb70e2fdd 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -468,11 +468,18 @@ static int host1x_probe(struct platform_device *pdev)
err = host1x_register(host);
if (err < 0)
- goto deinit_intr;
+ goto deinit_debugfs;
+
+ err = devm_of_platform_populate(&pdev->dev);
+ if (err < 0)
+ goto unregister;
return 0;
-deinit_intr:
+unregister:
+ host1x_unregister(host);
+deinit_debugfs:
+ host1x_debug_deinit(host);
host1x_intr_deinit(host);
deinit_syncpt:
host1x_syncpt_deinit(host);
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index 6f1fe7248d81..a9c2de95c5e2 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -25,6 +25,7 @@
#define U1_MOUSE_REPORT_ID 0x01 /* Mouse data ReportID */
#define U1_ABSOLUTE_REPORT_ID 0x03 /* Absolute data ReportID */
+#define U1_ABSOLUTE_REPORT_ID_SECD 0x02 /* FW-PTP Absolute data ReportID */
#define U1_FEATURE_REPORT_ID 0x05 /* Feature ReportID */
#define U1_SP_ABSOLUTE_REPORT_ID 0x06 /* Feature ReportID */
@@ -368,6 +369,7 @@ static int u1_raw_event(struct alps_dev *hdata, u8 *data, int size)
case U1_FEATURE_REPORT_ID:
break;
case U1_ABSOLUTE_REPORT_ID:
+ case U1_ABSOLUTE_REPORT_ID_SECD:
for (i = 0; i < hdata->max_fingers; i++) {
u8 *contact = &data[i * 5];
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 359bdfbe3701..e82f604d33e9 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -60,6 +60,7 @@ MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. "
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
+ unsigned int fn_found;
DECLARE_BITMAP(pressed_numlock, KEY_CNT);
};
@@ -365,12 +366,15 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
+ struct apple_sc *asc = hid_get_drvdata(hdev);
+
if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
usage->hid == (HID_UP_MSVENDOR | 0x0003) ||
usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) {
/* The fn key on Apple USB keyboards */
set_bit(EV_REP, hi->input->evbit);
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
+ asc->fn_found = true;
apple_setup_input(hi->input);
return 1;
}
@@ -397,6 +401,19 @@ static int apple_input_mapped(struct hid_device *hdev, struct hid_input *hi,
return 0;
}
+static int apple_input_configured(struct hid_device *hdev,
+ struct hid_input *hidinput)
+{
+ struct apple_sc *asc = hid_get_drvdata(hdev);
+
+ if ((asc->quirks & APPLE_HAS_FN) && !asc->fn_found) {
+ hid_info(hdev, "Fn key not found (Apple Wireless Keyboard clone?), disabling Fn key handling\n");
+ asc->quirks = 0;
+ }
+
+ return 0;
+}
+
static int apple_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -611,6 +628,7 @@ static struct hid_driver apple_driver = {
.event = apple_event,
.input_mapping = apple_input_mapping,
.input_mapped = apple_input_mapped,
+ .input_configured = apple_input_configured,
};
module_hid_driver(apple_driver);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 874fc3791f3b..6f370e020feb 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -618,6 +618,7 @@
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081 0xa081
#define USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2 0xa0c2
#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096 0xa096
+#define USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293 0xa293
#define USB_VENDOR_ID_IMATION 0x0718
#define USB_DEVICE_ID_DISC_STAKKA 0xd000
@@ -998,6 +999,8 @@
#define USB_DEVICE_ID_ROCCAT_RYOS_MK_PRO 0x3232
#define USB_DEVICE_ID_ROCCAT_SAVU 0x2d5a
+#define USB_VENDOR_ID_SAI 0x17dd
+
#define USB_VENDOR_ID_SAITEK 0x06a3
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 48dff5d6b605..a78c13cc9f47 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -1153,7 +1153,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
if (!dj_report)
return -ENOMEM;
dj_report->report_id = REPORT_ID_DJ_SHORT;
- dj_report->device_index = 0xFF;
+ dj_report->device_index = HIDPP_RECEIVER_INDEX;
dj_report->report_type = REPORT_TYPE_CMD_GET_PAIRED_DEVICES;
retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
kfree(dj_report);
@@ -1175,7 +1175,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
if (djrcv_dev->type == recvr_type_dj) {
dj_report->report_id = REPORT_ID_DJ_SHORT;
- dj_report->device_index = 0xFF;
+ dj_report->device_index = HIDPP_RECEIVER_INDEX;
dj_report->report_type = REPORT_TYPE_CMD_SWITCH;
dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] =
@@ -1204,7 +1204,7 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
memset(buf, 0, HIDPP_REPORT_SHORT_LENGTH);
buf[0] = REPORT_ID_HIDPP_SHORT;
- buf[1] = 0xFF;
+ buf[1] = HIDPP_RECEIVER_INDEX;
buf[2] = 0x80;
buf[3] = 0x00;
buf[4] = 0x00;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 1e1cf8eae649..b8b53dc95e86 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -3146,7 +3146,7 @@ static int hi_res_scroll_enable(struct hidpp_device *hidpp)
multiplier = 1;
hidpp->vertical_wheel_counter.wheel_multiplier = multiplier;
- hid_info(hidpp->hid_dev, "multiplier = %d\n", multiplier);
+ hid_dbg(hidpp->hid_dev, "wheel multiplier = %d\n", multiplier);
return 0;
}
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 34138667f8af..abd86903875f 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -535,6 +535,12 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd
__set_bit(MSC_RAW, input->mscbit);
}
+ /*
+ * hid-input may mark device as using autorepeat, but neither
+ * the trackpad, nor the mouse actually want it.
+ */
+ __clear_bit(EV_REP, input->evbit);
+
return 0;
}
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index ca8b5c261c7c..934fc0a798d4 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -88,6 +88,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING), HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
@@ -832,6 +833,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAI, USB_DEVICE_ID_CYPRESS_HIDCOM) },
#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_TP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_INT_TP) },
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 6286204d4c56..a3b151b29bd7 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -526,7 +526,8 @@ static int steam_register(struct steam_device *steam)
steam_battery_register(steam);
mutex_lock(&steam_devices_lock);
- list_add(&steam->list, &steam_devices);
+ if (list_empty(&steam->list))
+ list_add(&steam->list, &steam_devices);
mutex_unlock(&steam_devices_lock);
}
@@ -552,7 +553,7 @@ static void steam_unregister(struct steam_device *steam)
hid_info(steam->hdev, "Steam Controller '%s' disconnected",
steam->serial_no);
mutex_lock(&steam_devices_lock);
- list_del(&steam->list);
+ list_del_init(&steam->list);
mutex_unlock(&steam_devices_lock);
steam->serial_no[0] = 0;
}
@@ -738,6 +739,7 @@ static int steam_probe(struct hid_device *hdev,
mutex_init(&steam->mutex);
steam->quirks = id->driver_data;
INIT_WORK(&steam->work_connect, steam_work_connect_cb);
+ INIT_LIST_HEAD(&steam->list);
steam->client_hdev = steam_create_client_hid(hdev);
if (IS_ERR(steam->client_hdev)) {
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index ec142bc8c1da..35f3bfc3e6f5 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -374,6 +374,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
.driver_data = (void *)&sipodev_desc
},
{
+ .ident = "Mediacom FlexBook edge 13",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "MEDIACOM"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "FlexBook_edge13-M-FBE13"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
.ident = "Odys Winbook 13",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AXDIA International GmbH"),
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 9147ee9d5f7d..d69f4efa3719 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -1368,7 +1368,7 @@ static void hv_kmsg_dump(struct kmsg_dumper *dumper,
* Write dump contents to the page. No need to synchronize; panic should
* be single-threaded.
*/
- kmsg_dump_get_buffer(dumper, true, hv_panic_page, HV_HYP_PAGE_SIZE,
+ kmsg_dump_get_buffer(dumper, false, hv_panic_page, HV_HYP_PAGE_SIZE,
&bytes_written);
if (bytes_written)
hyperv_report_panic_msg(panic_pa, bytes_written);
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 0db8ef4fd6e1..a270b975e90b 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -883,7 +883,7 @@ static int acpi_power_meter_add(struct acpi_device *device)
res = setup_attrs(resource);
if (res)
- goto exit_free;
+ goto exit_free_capability;
resource->hwmon_dev = hwmon_device_register(&device->dev);
if (IS_ERR(resource->hwmon_dev)) {
@@ -896,6 +896,8 @@ static int acpi_power_meter_add(struct acpi_device *device)
exit_remove:
remove_attrs(resource);
+exit_free_capability:
+ free_capabilities(resource);
exit_free:
kfree(resource);
exit:
diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c
index e95b7426106e..29603742c858 100644
--- a/drivers/hwmon/amd_energy.c
+++ b/drivers/hwmon/amd_energy.c
@@ -362,7 +362,7 @@ static struct platform_driver amd_energy_driver = {
static struct platform_device *amd_energy_platdev;
static const struct x86_cpu_id cpu_ids[] __initconst = {
- X86_MATCH_VENDOR_FAM(AMD, 0x17, NULL),
+ X86_MATCH_VENDOR_FAM_MODEL(AMD, 0x17, 0x31, NULL),
{}
};
MODULE_DEVICE_TABLE(x86cpu, cpu_ids);
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c
index 33fb54845bf6..3d8239fd66ed 100644
--- a/drivers/hwmon/aspeed-pwm-tacho.c
+++ b/drivers/hwmon/aspeed-pwm-tacho.c
@@ -851,6 +851,8 @@ static int aspeed_create_fan(struct device *dev,
ret = of_property_read_u32(child, "reg", &pwm_port);
if (ret)
return ret;
+ if (pwm_port >= ARRAY_SIZE(pwm_port_params))
+ return -EINVAL;
aspeed_create_pwm_port(priv, (u8)pwm_port);
ret = of_property_count_u8_elems(child, "cooling-levels");
diff --git a/drivers/hwmon/bt1-pvt.c b/drivers/hwmon/bt1-pvt.c
index 1a9772fb1f73..94698cae0497 100644
--- a/drivers/hwmon/bt1-pvt.c
+++ b/drivers/hwmon/bt1-pvt.c
@@ -64,7 +64,7 @@ static const struct pvt_sensor_info pvt_info[] = {
* 48380,
* where T = [-48380, 147438] mC and N = [0, 1023].
*/
-static const struct pvt_poly poly_temp_to_N = {
+static const struct pvt_poly __maybe_unused poly_temp_to_N = {
.total_divider = 10000,
.terms = {
{4, 18322, 10000, 10000},
@@ -96,7 +96,7 @@ static const struct pvt_poly poly_N_to_temp = {
* N = (18658e-3*V - 11572) / 10,
* V = N * 10^5 / 18658 + 11572 * 10^4 / 18658.
*/
-static const struct pvt_poly poly_volt_to_N = {
+static const struct pvt_poly __maybe_unused poly_volt_to_N = {
.total_divider = 10,
.terms = {
{1, 18658, 1000, 1},
@@ -300,12 +300,12 @@ static irqreturn_t pvt_soft_isr(int irq, void *data)
return IRQ_HANDLED;
}
-inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
{
return 0644;
}
-inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
{
return 0444;
}
@@ -462,12 +462,12 @@ static irqreturn_t pvt_hard_isr(int irq, void *data)
#define pvt_soft_isr NULL
-inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_limit_is_visible(enum pvt_sensor_type type)
{
return 0;
}
-inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
+static inline umode_t pvt_alarm_is_visible(enum pvt_sensor_type type)
{
return 0;
}
diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
index 0d4f3d97ffc6..72c760373957 100644
--- a/drivers/hwmon/drivetemp.c
+++ b/drivers/hwmon/drivetemp.c
@@ -285,6 +285,42 @@ static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val)
return err;
}
+static const char * const sct_avoid_models[] = {
+/*
+ * These drives will have WRITE FPDMA QUEUED command timeouts and sometimes just
+ * freeze until power-cycled under heavy write loads when their temperature is
+ * getting polled in SCT mode. The SMART mode seems to be fine, though.
+ *
+ * While only the 3 TB model (DT01ACA3) was actually caught exhibiting the
+ * problem let's play safe here to avoid data corruption and ban the whole
+ * DT01ACAx family.
+
+ * The models from this array are prefix-matched.
+ */
+ "TOSHIBA DT01ACA",
+};
+
+static bool drivetemp_sct_avoid(struct drivetemp_data *st)
+{
+ struct scsi_device *sdev = st->sdev;
+ unsigned int ctr;
+
+ if (!sdev->model)
+ return false;
+
+ /*
+ * The "model" field contains just the raw SCSI INQUIRY response
+ * "product identification" field, which has a width of 16 bytes.
+ * This field is space-filled, but is NOT NULL-terminated.
+ */
+ for (ctr = 0; ctr < ARRAY_SIZE(sct_avoid_models); ctr++)
+ if (!strncmp(sdev->model, sct_avoid_models[ctr],
+ strlen(sct_avoid_models[ctr])))
+ return true;
+
+ return false;
+}
+
static int drivetemp_identify_sata(struct drivetemp_data *st)
{
struct scsi_device *sdev = st->sdev;
@@ -326,6 +362,13 @@ static int drivetemp_identify_sata(struct drivetemp_data *st)
/* bail out if this is not a SATA device */
if (!is_ata || !is_sata)
return -ENODEV;
+
+ if (have_sct && drivetemp_sct_avoid(st)) {
+ dev_notice(&sdev->sdev_gendev,
+ "will avoid using SCT for temperature monitoring\n");
+ have_sct = false;
+ }
+
if (!have_sct)
goto skip_sct;
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 491a570e8e50..924c02c1631d 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -443,7 +443,7 @@ static ssize_t pwm1_enable_store(struct device *dev,
}
result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
- if (result) {
+ if (result < 0) {
count = result;
goto err;
}
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 743752a2467a..64122eb38060 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -38,8 +38,9 @@ static const u8 MAX6697_REG_CRIT[] = {
* Map device tree / platform data register bit map to chip bit map.
* Applies to alert register and over-temperature register.
*/
-#define MAX6697_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \
+#define MAX6697_ALERT_MAP_BITS(reg) ((((reg) & 0x7e) >> 1) | \
(((reg) & 0x01) << 6) | ((reg) & 0x80))
+#define MAX6697_OVERT_MAP_BITS(reg) (((reg) >> 1) | (((reg) & 0x01) << 7))
#define MAX6697_REG_STAT(n) (0x44 + (n))
@@ -562,12 +563,12 @@ static int max6697_init_chip(struct max6697_data *data,
return ret;
ret = i2c_smbus_write_byte_data(client, MAX6697_REG_ALERT_MASK,
- MAX6697_MAP_BITS(pdata->alert_mask));
+ MAX6697_ALERT_MAP_BITS(pdata->alert_mask));
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, MAX6697_REG_OVERT_MASK,
- MAX6697_MAP_BITS(pdata->over_temperature_mask));
+ MAX6697_OVERT_MAP_BITS(pdata->over_temperature_mask));
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index e7e1ddc1d631..750b08713dee 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -786,13 +786,13 @@ static const char *const nct6798_temp_label[] = {
"Agent1 Dimm1",
"BYTE_TEMP0",
"BYTE_TEMP1",
- "",
- "",
+ "PECI Agent 0 Calibration", /* undocumented */
+ "PECI Agent 1 Calibration", /* undocumented */
"",
"Virtual_TEMP"
};
-#define NCT6798_TEMP_MASK 0x8fff0ffe
+#define NCT6798_TEMP_MASK 0xbfff0ffe
#define NCT6798_VIRT_TEMP_MASK 0x80000c00
/* NCT6102D/NCT6106D specific data */
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index a337195b1c39..ea516cec1d35 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -71,7 +71,7 @@ config SENSORS_IR35221
Infineon IR35221 controller.
This driver can also be built as a module. If so, the module will
- be called ir35521.
+ be called ir35221.
config SENSORS_IR38064
tristate "Infineon IR38064"
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index e25f541227da..19317575d1c6 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -465,6 +465,7 @@ MODULE_DEVICE_TABLE(i2c, adm1275_id);
static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ s32 (*config_read_fn)(const struct i2c_client *client, u8 reg);
u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
int config, device_config;
int ret;
@@ -510,11 +511,16 @@ static int adm1275_probe(struct i2c_client *client,
"Device mismatch: Configured %s, detected %s\n",
id->name, mid->name);
- config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
+ if (mid->driver_data == adm1272 || mid->driver_data == adm1278 ||
+ mid->driver_data == adm1293 || mid->driver_data == adm1294)
+ config_read_fn = i2c_smbus_read_word_data;
+ else
+ config_read_fn = i2c_smbus_read_byte_data;
+ config = config_read_fn(client, ADM1275_PMON_CONFIG);
if (config < 0)
return config;
- device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG);
+ device_config = config_read_fn(client, ADM1275_DEVICE_CONFIG);
if (device_config < 0)
return device_config;
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index a420877ba533..2191575a448b 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -1869,7 +1869,7 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
struct pmbus_sensor *sensor;
sensor = pmbus_add_sensor(data, "fan", "target", index, page,
- PMBUS_VIRT_FAN_TARGET_1 + id, 0xff, PSC_FAN,
+ 0xff, PMBUS_VIRT_FAN_TARGET_1 + id, PSC_FAN,
false, false, true);
if (!sensor)
@@ -1880,14 +1880,14 @@ static int pmbus_add_fan_ctrl(struct i2c_client *client,
return 0;
sensor = pmbus_add_sensor(data, "pwm", NULL, index, page,
- PMBUS_VIRT_PWM_1 + id, 0xff, PSC_PWM,
+ 0xff, PMBUS_VIRT_PWM_1 + id, PSC_PWM,
false, false, true);
if (!sensor)
return -ENOMEM;
sensor = pmbus_add_sensor(data, "pwm", "enable", index, page,
- PMBUS_VIRT_PWM_ENABLE_1 + id, 0xff, PSC_PWM,
+ 0xff, PMBUS_VIRT_PWM_ENABLE_1 + id, PSC_PWM,
true, false, false);
if (!sensor)
@@ -1929,7 +1929,7 @@ static int pmbus_add_fan_attributes(struct i2c_client *client,
continue;
if (pmbus_add_sensor(data, "fan", "input", index,
- page, pmbus_fan_registers[f], 0xff,
+ page, 0xff, pmbus_fan_registers[f],
PSC_FAN, true, true, true) == NULL)
return -ENOMEM;
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 286d3cfda7de..d421e691318b 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -147,7 +147,7 @@ static enum hwmon_sensor_types scmi_types[] = {
[ENERGY] = hwmon_energy,
};
-static u32 hwmon_attributes[] = {
+static u32 hwmon_attributes[hwmon_max] = {
[hwmon_chip] = HWMON_C_REGISTER_TZ,
[hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL,
[hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL,
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
index 40387d58c8e7..3ccc703dc940 100644
--- a/drivers/hwtracing/coresight/coresight-cti.c
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -747,17 +747,50 @@ static int cti_dying_cpu(unsigned int cpu)
return 0;
}
+static int cti_pm_setup(struct cti_drvdata *drvdata)
+{
+ int ret;
+
+ if (drvdata->ctidev.cpu == -1)
+ return 0;
+
+ if (nr_cti_cpu)
+ goto done;
+
+ cpus_read_lock();
+ ret = cpuhp_setup_state_nocalls_cpuslocked(
+ CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
+ "arm/coresight_cti:starting",
+ cti_starting_cpu, cti_dying_cpu);
+ if (ret) {
+ cpus_read_unlock();
+ return ret;
+ }
+
+ ret = cpu_pm_register_notifier(&cti_cpu_pm_nb);
+ cpus_read_unlock();
+ if (ret) {
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING);
+ return ret;
+ }
+
+done:
+ nr_cti_cpu++;
+ cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata;
+
+ return 0;
+}
+
/* release PM registrations */
static void cti_pm_release(struct cti_drvdata *drvdata)
{
- if (drvdata->ctidev.cpu >= 0) {
- if (--nr_cti_cpu == 0) {
- cpu_pm_unregister_notifier(&cti_cpu_pm_nb);
+ if (drvdata->ctidev.cpu == -1)
+ return;
- cpuhp_remove_state_nocalls(
- CPUHP_AP_ARM_CORESIGHT_CTI_STARTING);
- }
- cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL;
+ cti_cpu_drvdata[drvdata->ctidev.cpu] = NULL;
+ if (--nr_cti_cpu == 0) {
+ cpu_pm_unregister_notifier(&cti_cpu_pm_nb);
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_CTI_STARTING);
}
}
@@ -823,19 +856,14 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
/* driver data*/
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata) {
- ret = -ENOMEM;
- dev_info(dev, "%s, mem err\n", __func__);
- goto err_out;
- }
+ if (!drvdata)
+ return -ENOMEM;
/* Validity for the resource is already checked by the AMBA core */
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base)) {
- ret = PTR_ERR(base);
- dev_err(dev, "%s, remap err\n", __func__);
- goto err_out;
- }
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
drvdata->base = base;
dev_set_drvdata(dev, drvdata);
@@ -854,8 +882,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
pdata = coresight_cti_get_platform_data(dev);
if (IS_ERR(pdata)) {
dev_err(dev, "coresight_cti_get_platform_data err\n");
- ret = PTR_ERR(pdata);
- goto err_out;
+ return PTR_ERR(pdata);
}
/* default to powered - could change on PM notifications */
@@ -867,35 +894,20 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->ctidev.cpu);
else
cti_desc.name = coresight_alloc_device_name(&cti_sys_devs, dev);
- if (!cti_desc.name) {
- ret = -ENOMEM;
- goto err_out;
- }
+ if (!cti_desc.name)
+ return -ENOMEM;
/* setup CPU power management handling for CPU bound CTI devices. */
- if (drvdata->ctidev.cpu >= 0) {
- cti_cpu_drvdata[drvdata->ctidev.cpu] = drvdata;
- if (!nr_cti_cpu++) {
- cpus_read_lock();
- ret = cpuhp_setup_state_nocalls_cpuslocked(
- CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
- "arm/coresight_cti:starting",
- cti_starting_cpu, cti_dying_cpu);
-
- if (!ret)
- ret = cpu_pm_register_notifier(&cti_cpu_pm_nb);
- cpus_read_unlock();
- if (ret)
- goto err_out;
- }
- }
+ ret = cti_pm_setup(drvdata);
+ if (ret)
+ return ret;
/* create dynamic attributes for connections */
ret = cti_create_cons_sysfs(dev, drvdata);
if (ret) {
dev_err(dev, "%s: create dynamic sysfs entries failed\n",
cti_desc.name);
- goto err_out;
+ goto pm_release;
}
/* set up coresight component description */
@@ -908,7 +920,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->csdev = coresight_register(&cti_desc);
if (IS_ERR(drvdata->csdev)) {
ret = PTR_ERR(drvdata->csdev);
- goto err_out;
+ goto pm_release;
}
/* add to list of CTI devices */
@@ -927,7 +939,7 @@ static int cti_probe(struct amba_device *adev, const struct amba_id *id)
dev_info(&drvdata->csdev->dev, "CTI initialized\n");
return 0;
-err_out:
+pm_release:
cti_pm_release(drvdata);
return ret;
}
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 747afc875f91..0c35cd5e0d1d 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1388,18 +1388,57 @@ static struct notifier_block etm4_cpu_pm_nb = {
.notifier_call = etm4_cpu_pm_notify,
};
-static int etm4_cpu_pm_register(void)
+/* Setup PM. Called with cpus locked. Deals with error conditions and counts */
+static int etm4_pm_setup_cpuslocked(void)
{
- if (IS_ENABLED(CONFIG_CPU_PM))
- return cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+ int ret;
- return 0;
+ if (etm4_count++)
+ return 0;
+
+ ret = cpu_pm_register_notifier(&etm4_cpu_pm_nb);
+ if (ret)
+ goto reduce_count;
+
+ ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
+ "arm/coresight4:starting",
+ etm4_starting_cpu, etm4_dying_cpu);
+
+ if (ret)
+ goto unregister_notifier;
+
+ ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
+ "arm/coresight4:online",
+ etm4_online_cpu, NULL);
+
+ /* HP dyn state ID returned in ret on success */
+ if (ret > 0) {
+ hp_online = ret;
+ return 0;
+ }
+
+ /* failed dyn state - remove others */
+ cpuhp_remove_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING);
+
+unregister_notifier:
+ cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+
+reduce_count:
+ --etm4_count;
+ return ret;
}
-static void etm4_cpu_pm_unregister(void)
+static void etm4_pm_clear(void)
{
- if (IS_ENABLED(CONFIG_CPU_PM))
- cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+ if (--etm4_count != 0)
+ return;
+
+ cpu_pm_unregister_notifier(&etm4_cpu_pm_nb);
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
+ if (hp_online) {
+ cpuhp_remove_state_nocalls(hp_online);
+ hp_online = 0;
+ }
}
static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
@@ -1453,24 +1492,15 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
etm4_init_arch_data, drvdata, 1))
dev_err(dev, "ETM arch init failed\n");
- if (!etm4_count++) {
- cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
- "arm/coresight4:starting",
- etm4_starting_cpu, etm4_dying_cpu);
- ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
- "arm/coresight4:online",
- etm4_online_cpu, NULL);
- if (ret < 0)
- goto err_arch_supported;
- hp_online = ret;
+ ret = etm4_pm_setup_cpuslocked();
+ cpus_read_unlock();
- ret = etm4_cpu_pm_register();
- if (ret)
- goto err_arch_supported;
+ /* etm4_pm_setup_cpuslocked() does its own cleanup - exit on error */
+ if (ret) {
+ etmdrvdata[drvdata->cpu] = NULL;
+ return ret;
}
- cpus_read_unlock();
-
if (etm4_arch_supported(drvdata->arch) == false) {
ret = -EINVAL;
goto err_arch_supported;
@@ -1517,13 +1547,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
err_arch_supported:
etmdrvdata[drvdata->cpu] = NULL;
- if (--etm4_count == 0) {
- etm4_cpu_pm_unregister();
-
- cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING);
- if (hp_online)
- cpuhp_remove_state_nocalls(hp_online);
- }
+ etm4_pm_clear();
return ret;
}
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index ca232ec565e8..c9ac3dc65113 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -1021,15 +1021,30 @@ int intel_th_set_output(struct intel_th_device *thdev,
{
struct intel_th_device *hub = to_intel_th_hub(thdev);
struct intel_th_driver *hubdrv = to_intel_th_driver(hub->dev.driver);
+ int ret;
/* In host mode, this is up to the external debugger, do nothing. */
if (hub->host_mode)
return 0;
- if (!hubdrv->set_output)
- return -ENOTSUPP;
+ /*
+ * hub is instantiated together with the source device that
+ * calls here, so guaranteed to be present.
+ */
+ hubdrv = to_intel_th_driver(hub->dev.driver);
+ if (!hubdrv || !try_module_get(hubdrv->driver.owner))
+ return -EINVAL;
+
+ if (!hubdrv->set_output) {
+ ret = -ENOTSUPP;
+ goto out;
+ }
+
+ ret = hubdrv->set_output(hub, master);
- return hubdrv->set_output(hub, master);
+out:
+ module_put(hubdrv->driver.owner);
+ return ret;
}
EXPORT_SYMBOL_GPL(intel_th_set_output);
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index 7ccac74553a6..21fdf0b93516 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -234,11 +234,21 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Tiger Lake PCH-H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x43a6),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Jasper Lake PCH */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4da6),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
{
+ /* Jasper Lake CPU */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4e29),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
+ {
/* Elkhart Lake CPU */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4529),
.driver_data = (kernel_ulong_t)&intel_th_2x,
@@ -248,6 +258,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4b26),
.driver_data = (kernel_ulong_t)&intel_th_2x,
},
+ {
+ /* Emmitsburg PCH */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1bcc),
+ .driver_data = (kernel_ulong_t)&intel_th_2x,
+ },
{ 0 },
};
diff --git a/drivers/hwtracing/intel_th/sth.c b/drivers/hwtracing/intel_th/sth.c
index 3a1f4e650378..a1529f571491 100644
--- a/drivers/hwtracing/intel_th/sth.c
+++ b/drivers/hwtracing/intel_th/sth.c
@@ -161,9 +161,7 @@ static int sth_stm_link(struct stm_data *stm_data, unsigned int master,
{
struct sth_device *sth = container_of(stm_data, struct sth_device, stm);
- intel_th_set_output(to_intel_th_device(sth->dev), master);
-
- return 0;
+ return intel_th_set_output(to_intel_th_device(sth->dev), master);
}
static int intel_th_sw_init(struct sth_device *sth)
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index ef39c83aaf33..bae1dc08ec9a 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -113,11 +113,18 @@ config I2C_STUB
config I2C_SLAVE
bool "I2C slave support"
+ help
+ This enables Linux to act as an I2C slave device. Note that your I2C
+ bus master driver also needs to support this functionality. Please
+ read Documentation/i2c/slave-interface.rst for further details.
if I2C_SLAVE
config I2C_SLAVE_EEPROM
tristate "I2C eeprom slave driver"
+ help
+ This backend makes Linux behave like an I2C EEPROM. Please read
+ Documentation/i2c/slave-eeprom-backend.rst for further details.
endif
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 7f10312d1b88..388978775be0 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -314,7 +314,8 @@ static int pca_xfer(struct i2c_adapter *i2c_adap,
DEB2("BUS ERROR - SDA Stuck low\n");
pca_reset(adap);
goto out;
- case 0x90: /* Bus error - SCL stuck low */
+ case 0x78: /* Bus error - SCL stuck low (PCA9665) */
+ case 0x90: /* Bus error - SCL stuck low (PCA9564) */
DEB2("BUS ERROR - SCL Stuck low\n");
pca_reset(adap);
goto out;
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 4b72398af505..e4b7f2a951ad 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -421,20 +421,21 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr)
/* Read data if receive data valid is set */
while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
CDNS_I2C_SR_RXDV) {
- /*
- * Clear hold bit that was set for FIFO control if
- * RX data left is less than FIFO depth, unless
- * repeated start is selected.
- */
- if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
- !id->bus_hold_flag)
- cdns_i2c_clear_bus_hold(id);
-
if (id->recv_count > 0) {
*(id->p_recv_buf)++ =
cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
id->recv_count--;
id->curr_recv_count--;
+
+ /*
+ * Clear hold bit that was set for FIFO control
+ * if RX data left is less than or equal to
+ * FIFO DEPTH unless repeated start is selected
+ */
+ if (id->recv_count <= CDNS_I2C_FIFO_DEPTH &&
+ !id->bus_hold_flag)
+ cdns_i2c_clear_bus_hold(id);
+
} else {
dev_err(id->adap.dev.parent,
"xfer_size reg rollover. xfer aborted!\n");
@@ -594,10 +595,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
- if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
+ if (id->recv_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
- else
- ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
@@ -654,11 +653,8 @@ static void cdns_i2c_msend(struct cdns_i2c *id)
* Check for the message size against FIFO depth and set the
* 'hold bus' bit if it is greater than FIFO depth.
*/
- if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag)
+ if (id->send_count > CDNS_I2C_FIFO_DEPTH)
ctrl_reg |= CDNS_I2C_CR_HOLD;
- else
- ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD;
-
cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET);
/* Clear the interrupts in interrupt status register. */
diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c
index e3a8640db7da..3c19aada4b30 100644
--- a/drivers/i2c/busses/i2c-designware-common.c
+++ b/drivers/i2c/busses/i2c-designware-common.c
@@ -286,10 +286,8 @@ int i2c_dw_acpi_configure(struct device *device)
}
EXPORT_SYMBOL_GPL(i2c_dw_acpi_configure);
-void i2c_dw_acpi_adjust_bus_speed(struct device *device)
+static u32 i2c_dw_acpi_round_bus_speed(struct device *device)
{
- struct dw_i2c_dev *dev = dev_get_drvdata(device);
- struct i2c_timings *t = &dev->timings;
u32 acpi_speed;
int i;
@@ -300,9 +298,22 @@ void i2c_dw_acpi_adjust_bus_speed(struct device *device)
*/
for (i = 0; i < ARRAY_SIZE(supported_speeds); i++) {
if (acpi_speed >= supported_speeds[i])
- break;
+ return supported_speeds[i];
}
- acpi_speed = i < ARRAY_SIZE(supported_speeds) ? supported_speeds[i] : 0;
+
+ return 0;
+}
+
+#else /* CONFIG_ACPI */
+
+static inline u32 i2c_dw_acpi_round_bus_speed(struct device *device) { return 0; }
+
+#endif /* CONFIG_ACPI */
+
+void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev)
+{
+ u32 acpi_speed = i2c_dw_acpi_round_bus_speed(dev->dev);
+ struct i2c_timings *t = &dev->timings;
/*
* Find bus speed from the "clock-frequency" device property, ACPI
@@ -315,9 +326,7 @@ void i2c_dw_acpi_adjust_bus_speed(struct device *device)
else
t->bus_freq_hz = I2C_MAX_FAST_MODE_FREQ;
}
-EXPORT_SYMBOL_GPL(i2c_dw_acpi_adjust_bus_speed);
-
-#endif /* CONFIG_ACPI */
+EXPORT_SYMBOL_GPL(i2c_dw_adjust_bus_speed);
u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
{
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 556673a1f61b..eb5ef4d0f463 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -361,11 +361,10 @@ static inline int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) { return 0;
#endif
int i2c_dw_validate_speed(struct dw_i2c_dev *dev);
+void i2c_dw_adjust_bus_speed(struct dw_i2c_dev *dev);
#if IS_ENABLED(CONFIG_ACPI)
int i2c_dw_acpi_configure(struct device *device);
-void i2c_dw_acpi_adjust_bus_speed(struct device *device);
#else
static inline int i2c_dw_acpi_configure(struct device *device) { return -ENODEV; }
-static inline void i2c_dw_acpi_adjust_bus_speed(struct device *device) {}
#endif
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 947c096f86e3..8522134f9ea9 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -240,7 +240,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
}
}
- i2c_dw_acpi_adjust_bus_speed(&pdev->dev);
+ i2c_dw_adjust_bus_speed(dev);
if (has_acpi_companion(&pdev->dev))
i2c_dw_acpi_configure(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 0de4e302fc6a..a71bc58fc03c 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -12,6 +12,7 @@
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/i2c.h>
@@ -191,6 +192,17 @@ static int dw_i2c_plat_request_regs(struct dw_i2c_dev *dev)
return ret;
}
+static const struct dmi_system_id dw_i2c_hwmon_class_dmi[] = {
+ {
+ .ident = "Qtechnology QT5222",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Qtechnology"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "QT5222"),
+ },
+ },
+ { } /* terminate list */
+};
+
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -228,7 +240,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
else
i2c_parse_fw_timings(&pdev->dev, t, false);
- i2c_dw_acpi_adjust_bus_speed(&pdev->dev);
+ i2c_dw_adjust_bus_speed(dev);
if (pdev->dev.of_node)
dw_i2c_of_configure(pdev);
@@ -267,7 +279,8 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
adap = &dev->adapter;
adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_DEPRECATED;
+ adap->class = dmi_check_system(dw_i2c_hwmon_class_dmi) ?
+ I2C_CLASS_HWMON : I2C_CLASS_DEPRECATED;
ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev));
adap->dev.of_node = pdev->dev.of_node;
adap->nr = -1;
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index bb810dee8fb5..73f139690e4e 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -180,6 +180,7 @@ static const struct pci_device_id pch_pcidev_id[] = {
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
{0,}
};
+MODULE_DEVICE_TABLE(pci, pch_pcidev_id);
static irqreturn_t pch_i2c_handler(int irq, void *pData);
diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c
index e0c256922d4f..977d6f524649 100644
--- a/drivers/i2c/busses/i2c-fsi.c
+++ b/drivers/i2c/busses/i2c-fsi.c
@@ -98,7 +98,7 @@
#define I2C_STAT_DAT_REQ BIT(25)
#define I2C_STAT_CMD_COMP BIT(24)
#define I2C_STAT_STOP_ERR BIT(23)
-#define I2C_STAT_MAX_PORT GENMASK(19, 16)
+#define I2C_STAT_MAX_PORT GENMASK(22, 16)
#define I2C_STAT_ANY_INT BIT(15)
#define I2C_STAT_SCL_IN BIT(11)
#define I2C_STAT_SDA_IN BIT(10)
diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c
index 2fd717d8dd30..71d7bae2cbca 100644
--- a/drivers/i2c/busses/i2c-mlxcpld.c
+++ b/drivers/i2c/busses/i2c-mlxcpld.c
@@ -337,9 +337,9 @@ static int mlxcpld_i2c_wait_for_tc(struct mlxcpld_i2c_priv *priv)
if (priv->smbus_block && (val & MLXCPLD_I2C_SMBUS_BLK_BIT)) {
mlxcpld_i2c_read_comm(priv, MLXCPLD_LPCI2C_NUM_DAT_REG,
&datalen, 1);
- if (unlikely(datalen > (I2C_SMBUS_BLOCK_MAX + 1))) {
+ if (unlikely(datalen > I2C_SMBUS_BLOCK_MAX)) {
dev_err(priv->dev, "Incorrect smbus block read message len\n");
- return -E2BIG;
+ return -EPROTO;
}
} else {
datalen = priv->xfer.data_len;
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index 18d1e4fd4cf3..7f130829bf01 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -367,7 +367,6 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
writel_relaxed(len, se->base + SE_I2C_RX_TRANS_LEN);
- geni_se_setup_m_cmd(se, I2C_READ, m_param);
if (dma_buf && geni_se_rx_dma_prep(se, dma_buf, len, &rx_dma)) {
geni_se_select_mode(se, GENI_SE_FIFO);
@@ -375,6 +374,8 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_buf = NULL;
}
+ geni_se_setup_m_cmd(se, I2C_READ, m_param);
+
time_left = wait_for_completion_timeout(&gi2c->done, XFER_TIMEOUT);
if (!time_left)
geni_i2c_abort_xfer(gi2c);
@@ -408,7 +409,6 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
geni_se_select_mode(se, GENI_SE_FIFO);
writel_relaxed(len, se->base + SE_I2C_TX_TRANS_LEN);
- geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
if (dma_buf && geni_se_tx_dma_prep(se, dma_buf, len, &tx_dma)) {
geni_se_select_mode(se, GENI_SE_FIFO);
@@ -416,6 +416,8 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
dma_buf = NULL;
}
+ geni_se_setup_m_cmd(se, I2C_WRITE, m_param);
+
if (!dma_buf) /* Get FIFO IRQ */
writel_relaxed(1, se->base + SE_GENI_TX_WATERMARK_REG);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index a45c4bf1ec01..2e3e1bb75013 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -868,6 +868,7 @@ static int rcar_unreg_slave(struct i2c_client *slave)
/* disable irqs and ensure none is running before clearing ptr */
rcar_i2c_write(priv, ICSIER, 0);
rcar_i2c_write(priv, ICSCR, 0);
+ rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
synchronize_irq(priv->irq);
priv->slave = NULL;
@@ -969,6 +970,8 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_put;
+ rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
+
if (priv->devtype == I2C_RCAR_GEN3) {
priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
if (!IS_ERR(priv->rstc)) {
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index d1f278f73011..26f03a14a478 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -816,31 +816,6 @@ out_err_silent:
EXPORT_SYMBOL_GPL(i2c_new_client_device);
/**
- * i2c_new_device - instantiate an i2c device
- * @adap: the adapter managing the device
- * @info: describes one I2C device; bus_num is ignored
- * Context: can sleep
- *
- * This deprecated function has the same functionality as
- * @i2c_new_client_device, it just returns NULL instead of an ERR_PTR in case of
- * an error for compatibility with current I2C API. It will be removed once all
- * users are converted.
- *
- * This returns the new i2c client, which may be saved for later use with
- * i2c_unregister_device(); or NULL to indicate an error.
- */
-struct i2c_client *
-i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
-{
- struct i2c_client *ret;
-
- ret = i2c_new_client_device(adap, info);
- return IS_ERR(ret) ? NULL : ret;
-}
-EXPORT_SYMBOL_GPL(i2c_new_device);
-
-
-/**
* i2c_unregister_device - reverse effect of i2c_new_*_device()
* @client: value returned from i2c_new_*_device()
* Context: can sleep
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index b34d2ff06931..f5c9787992e9 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -4,7 +4,7 @@
*
* This file contains the SMBus functions which are always included in the I2C
* core because they can be emulated via I2C. SMBus specific extensions
- * (e.g. smbalert) are handled in a seperate i2c-smbus module.
+ * (e.g. smbalert) are handled in a separate i2c-smbus module.
*
* All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl>
* SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and
@@ -495,6 +495,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_BLOCK_PROC_CALL:
+ if (msg[1].buf[0] > I2C_SMBUS_BLOCK_MAX) {
+ dev_err(&adapter->dev,
+ "Invalid block size returned: %d\n",
+ msg[1].buf[0]);
+ status = -EPROTO;
+ goto cleanup;
+ }
for (i = 0; i < msg[1].buf[0] + 1; i++)
data->block[i] = msg[1].buf[i];
break;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 00e100fc845a..813bca7cfc3e 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1685,10 +1685,13 @@ static int mma8452_probe(struct i2c_client *client,
ret = mma8452_set_freefall_mode(data, false);
if (ret < 0)
- goto buffer_cleanup;
+ goto unregister_device;
return 0;
+unregister_device:
+ iio_device_unregister(indio_dev);
+
buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
diff --git a/drivers/iio/adc/ad7780.c b/drivers/iio/adc/ad7780.c
index f47606ebbbbe..b33fe6c3907e 100644
--- a/drivers/iio/adc/ad7780.c
+++ b/drivers/iio/adc/ad7780.c
@@ -329,7 +329,7 @@ static int ad7780_probe(struct spi_device *spi)
ret = ad7780_init_gpios(&spi->dev, st);
if (ret)
- goto error_cleanup_buffer_and_trigger;
+ return ret;
st->reg = devm_regulator_get(&spi->dev, "avdd");
if (IS_ERR(st->reg))
diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c
index c24c8da99eb4..7af8f0510535 100644
--- a/drivers/iio/adc/adi-axi-adc.c
+++ b/drivers/iio/adc/adi-axi-adc.c
@@ -332,12 +332,12 @@ static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev)
if (cl->dev->of_node != cln)
continue;
- if (!try_module_get(dev->driver->owner)) {
+ if (!try_module_get(cl->dev->driver->owner)) {
mutex_unlock(&registered_clients_lock);
return ERR_PTR(-ENODEV);
}
- get_device(dev);
+ get_device(cl->dev);
cl->info = info;
mutex_unlock(&registered_clients_lock);
return cl;
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index e9f87e42ff4f..a3507624b30f 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -65,6 +65,7 @@ static const struct reg_field afe4403_reg_fields[] = {
* @regulator: Pointer to the regulator for the IC
* @trig: IIO trigger for this device
* @irq: ADC_RDY line interrupt number
+ * @buffer: Used to construct data layout to push into IIO buffer.
*/
struct afe4403_data {
struct device *dev;
@@ -74,6 +75,8 @@ struct afe4403_data {
struct regulator *regulator;
struct iio_trigger *trig;
int irq;
+ /* Ensure suitable alignment for timestamp */
+ s32 buffer[8] __aligned(8);
};
enum afe4403_chan_id {
@@ -309,7 +312,6 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
struct iio_dev *indio_dev = pf->indio_dev;
struct afe4403_data *afe = iio_priv(indio_dev);
int ret, bit, i = 0;
- s32 buffer[8];
u8 tx[4] = {AFE440X_CONTROL0, 0x0, 0x0, AFE440X_CONTROL0_READ};
u8 rx[3];
@@ -326,7 +328,7 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
if (ret)
goto err;
- buffer[i++] = get_unaligned_be24(&rx[0]);
+ afe->buffer[i++] = get_unaligned_be24(&rx[0]);
}
/* Disable reading from the device */
@@ -335,7 +337,8 @@ static irqreturn_t afe4403_trigger_handler(int irq, void *private)
if (ret)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer,
+ pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index e728bbb21ca8..cebb1fd4d0b1 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -83,6 +83,7 @@ static const struct reg_field afe4404_reg_fields[] = {
* @regulator: Pointer to the regulator for the IC
* @trig: IIO trigger for this device
* @irq: ADC_RDY line interrupt number
+ * @buffer: Used to construct a scan to push to the iio buffer.
*/
struct afe4404_data {
struct device *dev;
@@ -91,6 +92,7 @@ struct afe4404_data {
struct regulator *regulator;
struct iio_trigger *trig;
int irq;
+ s32 buffer[10] __aligned(8);
};
enum afe4404_chan_id {
@@ -328,17 +330,17 @@ static irqreturn_t afe4404_trigger_handler(int irq, void *private)
struct iio_dev *indio_dev = pf->indio_dev;
struct afe4404_data *afe = iio_priv(indio_dev);
int ret, bit, i = 0;
- s32 buffer[10];
for_each_set_bit(bit, indio_dev->active_scan_mask,
indio_dev->masklength) {
ret = regmap_read(afe->regmap, afe4404_channel_values[bit],
- &buffer[i++]);
+ &afe->buffer[i++]);
if (ret)
goto err;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buffer, pf->timestamp);
+ iio_push_to_buffers_with_timestamp(indio_dev, afe->buffer,
+ pf->timestamp);
err:
iio_trigger_notify_done(indio_dev->trig);
diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
index 7ecd2ffa3132..665eb7e38293 100644
--- a/drivers/iio/humidity/hdc100x.c
+++ b/drivers/iio/humidity/hdc100x.c
@@ -38,6 +38,11 @@ struct hdc100x_data {
/* integration time of the sensor */
int adc_int_us[2];
+ /* Ensure natural alignment of timestamp */
+ struct {
+ __be16 channels[2];
+ s64 ts __aligned(8);
+ } scan;
};
/* integration time in us */
@@ -322,7 +327,6 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
struct i2c_client *client = data->client;
int delay = data->adc_int_us[0] + data->adc_int_us[1];
int ret;
- s16 buf[8]; /* 2x s16 + padding + 8 byte timestamp */
/* dual read starts at temp register */
mutex_lock(&data->lock);
@@ -333,13 +337,13 @@ static irqreturn_t hdc100x_trigger_handler(int irq, void *p)
}
usleep_range(delay, delay + 1000);
- ret = i2c_master_recv(client, (u8 *)buf, 4);
+ ret = i2c_master_recv(client, (u8 *)data->scan.channels, 4);
if (ret < 0) {
dev_err(&client->dev, "cannot read sensor data\n");
goto err;
}
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &data->scan,
iio_get_time_ns(indio_dev));
err:
mutex_unlock(&data->lock);
diff --git a/drivers/iio/humidity/hts221.h b/drivers/iio/humidity/hts221.h
index 7d6771f7cf47..b2eb5abeaccd 100644
--- a/drivers/iio/humidity/hts221.h
+++ b/drivers/iio/humidity/hts221.h
@@ -14,8 +14,6 @@
#include <linux/iio/iio.h>
-#define HTS221_DATA_SIZE 2
-
enum hts221_sensor_type {
HTS221_SENSOR_H,
HTS221_SENSOR_T,
@@ -39,6 +37,11 @@ struct hts221_hw {
bool enabled;
u8 odr;
+ /* Ensure natural alignment of timestamp */
+ struct {
+ __le16 channels[2];
+ s64 ts __aligned(8);
+ } scan;
};
extern const struct dev_pm_ops hts221_pm_ops;
diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c
index 9fb3f33614d4..ba7d413d75ba 100644
--- a/drivers/iio/humidity/hts221_buffer.c
+++ b/drivers/iio/humidity/hts221_buffer.c
@@ -160,7 +160,6 @@ static const struct iio_buffer_setup_ops hts221_buffer_ops = {
static irqreturn_t hts221_buffer_handler_thread(int irq, void *p)
{
- u8 buffer[ALIGN(2 * HTS221_DATA_SIZE, sizeof(s64)) + sizeof(s64)];
struct iio_poll_func *pf = p;
struct iio_dev *iio_dev = pf->indio_dev;
struct hts221_hw *hw = iio_priv(iio_dev);
@@ -170,18 +169,20 @@ static irqreturn_t hts221_buffer_handler_thread(int irq, void *p)
/* humidity data */
ch = &iio_dev->channels[HTS221_SENSOR_H];
err = regmap_bulk_read(hw->regmap, ch->address,
- buffer, HTS221_DATA_SIZE);
+ &hw->scan.channels[0],
+ sizeof(hw->scan.channels[0]));
if (err < 0)
goto out;
/* temperature data */
ch = &iio_dev->channels[HTS221_SENSOR_T];
err = regmap_bulk_read(hw->regmap, ch->address,
- buffer + HTS221_DATA_SIZE, HTS221_DATA_SIZE);
+ &hw->scan.channels[1],
+ sizeof(hw->scan.channels[1]));
if (err < 0)
goto out;
- iio_push_to_buffers_with_timestamp(iio_dev, buffer,
+ iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan,
iio_get_time_ns(iio_dev));
out:
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 1527f01a44f1..352533342702 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -130,6 +130,8 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_PM2P5] = "pm2p5",
[IIO_MOD_PM4] = "pm4",
[IIO_MOD_PM10] = "pm10",
+ [IIO_MOD_ETHANOL] = "ethanol",
+ [IIO_MOD_H2] = "h2",
};
/* relies on pairs of these shared then separate */
diff --git a/drivers/iio/magnetometer/ak8974.c b/drivers/iio/magnetometer/ak8974.c
index 810fdfd37c88..91c39352fba2 100644
--- a/drivers/iio/magnetometer/ak8974.c
+++ b/drivers/iio/magnetometer/ak8974.c
@@ -192,6 +192,11 @@ struct ak8974 {
bool drdy_irq;
struct completion drdy_complete;
bool drdy_active_low;
+ /* Ensure timestamp is naturally aligned */
+ struct {
+ __le16 channels[3];
+ s64 ts __aligned(8);
+ } scan;
};
static const char ak8974_reg_avdd[] = "avdd";
@@ -657,7 +662,6 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
{
struct ak8974 *ak8974 = iio_priv(indio_dev);
int ret;
- __le16 hw_values[8]; /* Three axes + 64bit padding */
pm_runtime_get_sync(&ak8974->i2c->dev);
mutex_lock(&ak8974->lock);
@@ -667,13 +671,13 @@ static void ak8974_fill_buffer(struct iio_dev *indio_dev)
dev_err(&ak8974->i2c->dev, "error triggering measure\n");
goto out_unlock;
}
- ret = ak8974_getresult(ak8974, hw_values);
+ ret = ak8974_getresult(ak8974, ak8974->scan.channels);
if (ret) {
dev_err(&ak8974->i2c->dev, "error getting measures\n");
goto out_unlock;
}
- iio_push_to_buffers_with_timestamp(indio_dev, hw_values,
+ iio_push_to_buffers_with_timestamp(indio_dev, &ak8974->scan,
iio_get_time_ns(indio_dev));
out_unlock:
@@ -862,19 +866,21 @@ static int ak8974_probe(struct i2c_client *i2c,
ak8974->map = devm_regmap_init_i2c(i2c, &ak8974_regmap_config);
if (IS_ERR(ak8974->map)) {
dev_err(&i2c->dev, "failed to allocate register map\n");
+ pm_runtime_put_noidle(&i2c->dev);
+ pm_runtime_disable(&i2c->dev);
return PTR_ERR(ak8974->map);
}
ret = ak8974_set_power(ak8974, AK8974_PWR_ON);
if (ret) {
dev_err(&i2c->dev, "could not power on\n");
- goto power_off;
+ goto disable_pm;
}
ret = ak8974_detect(ak8974);
if (ret) {
dev_err(&i2c->dev, "neither AK8974 nor AMI30x found\n");
- goto power_off;
+ goto disable_pm;
}
ret = ak8974_selftest(ak8974);
@@ -884,14 +890,9 @@ static int ak8974_probe(struct i2c_client *i2c,
ret = ak8974_reset(ak8974);
if (ret) {
dev_err(&i2c->dev, "AK8974 reset failed\n");
- goto power_off;
+ goto disable_pm;
}
- pm_runtime_set_autosuspend_delay(&i2c->dev,
- AK8974_AUTOSUSPEND_DELAY);
- pm_runtime_use_autosuspend(&i2c->dev);
- pm_runtime_put(&i2c->dev);
-
indio_dev->dev.parent = &i2c->dev;
switch (ak8974->variant) {
case AK8974_WHOAMI_VALUE_AMI306:
@@ -957,6 +958,11 @@ no_irq:
goto cleanup_buffer;
}
+ pm_runtime_set_autosuspend_delay(&i2c->dev,
+ AK8974_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(&i2c->dev);
+ pm_runtime_put(&i2c->dev);
+
return 0;
cleanup_buffer:
@@ -965,7 +971,6 @@ disable_pm:
pm_runtime_put_noidle(&i2c->dev);
pm_runtime_disable(&i2c->dev);
ak8974_set_power(ak8974, AK8974_PWR_OFF);
-power_off:
regulator_bulk_disable(ARRAY_SIZE(ak8974->regs), ak8974->regs);
return ret;
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 2f598ad91621..f5db9fa086f3 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -212,16 +212,21 @@ static irqreturn_t ms5611_trigger_handler(int irq, void *p)
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct ms5611_state *st = iio_priv(indio_dev);
- s32 buf[4]; /* s32 (pressure) + s32 (temp) + 2 * s32 (timestamp) */
+ /* Ensure buffer elements are naturally aligned */
+ struct {
+ s32 channels[2];
+ s64 ts __aligned(8);
+ } scan;
int ret;
mutex_lock(&st->lock);
- ret = ms5611_read_temp_and_pressure(indio_dev, &buf[1], &buf[0]);
+ ret = ms5611_read_temp_and_pressure(indio_dev, &scan.channels[1],
+ &scan.channels[0]);
mutex_unlock(&st->lock);
if (ret < 0)
goto err;
- iio_push_to_buffers_with_timestamp(indio_dev, buf,
+ iio_push_to_buffers_with_timestamp(indio_dev, &scan,
iio_get_time_ns(indio_dev));
err:
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c
index 37fe851f89af..799a8dc3e248 100644
--- a/drivers/iio/pressure/zpa2326.c
+++ b/drivers/iio/pressure/zpa2326.c
@@ -665,8 +665,10 @@ static int zpa2326_resume(const struct iio_dev *indio_dev)
int err;
err = pm_runtime_get_sync(indio_dev->dev.parent);
- if (err < 0)
+ if (err < 0) {
+ pm_runtime_put(indio_dev->dev.parent);
return err;
+ }
if (err > 0) {
/*
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 9ce787e37e22..dc0558b23158 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -918,6 +918,7 @@ static void cm_free_work(struct cm_work *work)
static void cm_queue_work_unlock(struct cm_id_private *cm_id_priv,
struct cm_work *work)
+ __releases(&cm_id_priv->lock)
{
bool immediate;
@@ -3675,10 +3676,12 @@ static int cm_send_sidr_rep_locked(struct cm_id_private *cm_id_priv,
return ret;
}
cm_id_priv->id.state = IB_CM_IDLE;
+ spin_lock_irq(&cm.lock);
if (!RB_EMPTY_NODE(&cm_id_priv->sidr_id_node)) {
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
RB_CLEAR_NODE(&cm_id_priv->sidr_id_node);
}
+ spin_unlock_irq(&cm.lock);
return 0;
}
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 3d7cc9f0f3d4..c30cf5307ce3 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1624,6 +1624,8 @@ static struct rdma_id_private *cma_find_listener(
{
struct rdma_id_private *id_priv, *id_priv_dev;
+ lockdep_assert_held(&lock);
+
if (!bind_list)
return ERR_PTR(-EINVAL);
@@ -1670,6 +1672,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
}
}
+ mutex_lock(&lock);
/*
* Net namespace might be getting deleted while route lookup,
* cm_id lookup is in progress. Therefore, perform netdevice
@@ -1711,6 +1714,7 @@ cma_ib_id_from_event(struct ib_cm_id *cm_id,
id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
err:
rcu_read_unlock();
+ mutex_unlock(&lock);
if (IS_ERR(id_priv) && *net_dev) {
dev_put(*net_dev);
*net_dev = NULL;
@@ -2492,6 +2496,8 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,
struct net *net = id_priv->id.route.addr.dev_addr.net;
int ret;
+ lockdep_assert_held(&lock);
+
if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
return;
@@ -3342,6 +3348,8 @@ static void cma_bind_port(struct rdma_bind_list *bind_list,
u64 sid, mask;
__be16 port;
+ lockdep_assert_held(&lock);
+
addr = cma_src_addr(id_priv);
port = htons(bind_list->port);
@@ -3370,6 +3378,8 @@ static int cma_alloc_port(enum rdma_ucm_port_space ps,
struct rdma_bind_list *bind_list;
int ret;
+ lockdep_assert_held(&lock);
+
bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
if (!bind_list)
return -ENOMEM;
@@ -3396,6 +3406,8 @@ static int cma_port_is_unique(struct rdma_bind_list *bind_list,
struct sockaddr *saddr = cma_src_addr(id_priv);
__be16 dport = cma_port(daddr);
+ lockdep_assert_held(&lock);
+
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
struct sockaddr *cur_saddr = cma_src_addr(cur_id);
@@ -3435,6 +3447,8 @@ static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
unsigned int rover;
struct net *net = id_priv->id.route.addr.dev_addr.net;
+ lockdep_assert_held(&lock);
+
inet_get_local_port_range(net, &low, &high);
remaining = (high - low) + 1;
rover = prandom_u32() % remaining + low;
@@ -3482,6 +3496,8 @@ static int cma_check_port(struct rdma_bind_list *bind_list,
struct rdma_id_private *cur_id;
struct sockaddr *addr, *cur_addr;
+ lockdep_assert_held(&lock);
+
addr = cma_src_addr(id_priv);
hlist_for_each_entry(cur_id, &bind_list->owners, node) {
if (id_priv == cur_id)
@@ -3512,6 +3528,8 @@ static int cma_use_port(enum rdma_ucm_port_space ps,
unsigned short snum;
int ret;
+ lockdep_assert_held(&lock);
+
snum = ntohs(cma_port(cma_src_addr(id_priv)));
if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return -EACCES;
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index 2257d7f7810f..738d1faf4bba 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -202,7 +202,7 @@ static int __rdma_counter_unbind_qp(struct ib_qp *qp)
return ret;
}
-static void counter_history_stat_update(const struct rdma_counter *counter)
+static void counter_history_stat_update(struct rdma_counter *counter)
{
struct ib_device *dev = counter->device;
struct rdma_port_counter *port_counter;
@@ -212,6 +212,8 @@ static void counter_history_stat_update(const struct rdma_counter *counter)
if (!port_counter->hstats)
return;
+ rdma_counter_query_stats(counter);
+
for (i = 0; i < counter->stats->num_counters; i++)
port_counter->hstats->value[i] += counter->stats->value[i];
}
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 186e0d652e8b..a09f8e3c7f3f 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -509,10 +509,10 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
flush_workqueue(port_priv->wq);
- ib_cancel_rmpp_recvs(mad_agent_priv);
deref_mad_agent(mad_agent_priv);
wait_for_completion(&mad_agent_priv->comp);
+ ib_cancel_rmpp_recvs(mad_agent_priv);
ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
@@ -2718,6 +2718,7 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(qp_info->port_priv->device,
sg_list.addr))) {
+ kfree(mad_priv);
ret = -ENOMEM;
break;
}
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 38de4942c682..6d3ed7c6e19e 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -470,40 +470,46 @@ static struct ib_uobject *
alloc_begin_fd_uobject(const struct uverbs_api_object *obj,
struct uverbs_attr_bundle *attrs)
{
- const struct uverbs_obj_fd_type *fd_type =
- container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
+ const struct uverbs_obj_fd_type *fd_type;
int new_fd;
- struct ib_uobject *uobj;
+ struct ib_uobject *uobj, *ret;
struct file *filp;
+ uobj = alloc_uobj(attrs, obj);
+ if (IS_ERR(uobj))
+ return uobj;
+
+ fd_type =
+ container_of(obj->type_attrs, struct uverbs_obj_fd_type, type);
if (WARN_ON(fd_type->fops->release != &uverbs_uobject_fd_release &&
- fd_type->fops->release != &uverbs_async_event_release))
- return ERR_PTR(-EINVAL);
+ fd_type->fops->release != &uverbs_async_event_release)) {
+ ret = ERR_PTR(-EINVAL);
+ goto err_fd;
+ }
new_fd = get_unused_fd_flags(O_CLOEXEC);
- if (new_fd < 0)
- return ERR_PTR(new_fd);
-
- uobj = alloc_uobj(attrs, obj);
- if (IS_ERR(uobj))
+ if (new_fd < 0) {
+ ret = ERR_PTR(new_fd);
goto err_fd;
+ }
/* Note that uverbs_uobject_fd_release() is called during abort */
filp = anon_inode_getfile(fd_type->name, fd_type->fops, NULL,
fd_type->flags);
if (IS_ERR(filp)) {
- uverbs_uobject_put(uobj);
- uobj = ERR_CAST(filp);
- goto err_fd;
+ ret = ERR_CAST(filp);
+ goto err_getfile;
}
uobj->object = filp;
uobj->id = new_fd;
return uobj;
-err_fd:
+err_getfile:
put_unused_fd(new_fd);
- return uobj;
+err_fd:
+ uverbs_uobject_put(uobj);
+ return ret;
}
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
@@ -643,9 +649,6 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
{
struct ib_uverbs_file *ufile = attrs->ufile;
- /* alloc_commit consumes the uobj kref */
- uobj->uapi_object->type_class->alloc_commit(uobj);
-
/* kref is held so long as the uobj is on the uobj list. */
uverbs_uobject_get(uobj);
spin_lock_irq(&ufile->uobjects_lock);
@@ -655,6 +658,9 @@ void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
/* matches atomic_set(-1) in alloc_uobj */
atomic_set(&uobj->usecnt, 0);
+ /* alloc_commit consumes the uobj kref */
+ uobj->uapi_object->type_class->alloc_commit(uobj);
+
/* Matches the down_read in rdma_alloc_begin_uobject */
up_read(&ufile->hw_destroy_rwsem);
}
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a2ed09a3c714..8c930bf1df89 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -829,13 +829,20 @@ static int ib_nl_get_path_rec_attrs_len(ib_sa_comp_mask comp_mask)
return len;
}
-static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
+static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
{
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
void *data;
struct ib_sa_mad *mad;
int len;
+ unsigned long flags;
+ unsigned long delay;
+ gfp_t gfp_flag;
+ int ret;
+
+ INIT_LIST_HEAD(&query->list);
+ query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
mad = query->mad_buf->mad;
len = ib_nl_get_path_rec_attrs_len(mad->sa_hdr.comp_mask);
@@ -860,36 +867,25 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask)
/* Repair the nlmsg header length */
nlmsg_end(skb, nlh);
- return rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_mask);
-}
+ gfp_flag = ((gfp_mask & GFP_ATOMIC) == GFP_ATOMIC) ? GFP_ATOMIC :
+ GFP_NOWAIT;
-static int ib_nl_make_request(struct ib_sa_query *query, gfp_t gfp_mask)
-{
- unsigned long flags;
- unsigned long delay;
- int ret;
+ spin_lock_irqsave(&ib_nl_request_lock, flags);
+ ret = rdma_nl_multicast(&init_net, skb, RDMA_NL_GROUP_LS, gfp_flag);
- INIT_LIST_HEAD(&query->list);
- query->seq = (u32)atomic_inc_return(&ib_nl_sa_request_seq);
+ if (ret)
+ goto out;
- /* Put the request on the list first.*/
- spin_lock_irqsave(&ib_nl_request_lock, flags);
+ /* Put the request on the list.*/
delay = msecs_to_jiffies(sa_local_svc_timeout_ms);
query->timeout = delay + jiffies;
list_add_tail(&query->list, &ib_nl_request_list);
/* Start the timeout if this is the only request */
if (ib_nl_request_list.next == &query->list)
queue_delayed_work(ib_nl_wq, &ib_nl_timed_work, delay);
- spin_unlock_irqrestore(&ib_nl_request_lock, flags);
- ret = ib_nl_send_msg(query, gfp_mask);
- if (ret) {
- ret = -EIO;
- /* Remove the request */
- spin_lock_irqsave(&ib_nl_request_lock, flags);
- list_del(&query->list);
- spin_unlock_irqrestore(&ib_nl_request_lock, flags);
- }
+out:
+ spin_unlock_irqrestore(&ib_nl_request_lock, flags);
return ret;
}
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index 08313f7c73bc..7dd082441333 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -212,6 +212,7 @@ int efa_query_device(struct ib_device *ibdev,
props->max_send_sge = dev_attr->max_sq_sge;
props->max_recv_sge = dev_attr->max_rq_sge;
props->max_sge_rd = dev_attr->max_wr_rdma_sge;
+ props->max_pkeys = 1;
if (udata && udata->outlen) {
resp.max_sq_sge = dev_attr->max_sq_sge;
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 4633a0ce1a8c..2ced236e1553 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
{
struct hfi1_pportdata *ppd;
- int ret;
ppd = private2ppd(fp);
- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
- if (ret) /* failed - release the module */
- module_put(THIS_MODULE);
-
- return ret;
+ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
}
static int i2c1_debugfs_open(struct inode *in, struct file *fp)
@@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target)
ppd = private2ppd(fp);
release_chip_resource(ppd->dd, i2c_target(target));
- module_put(THIS_MODULE);
return 0;
}
@@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp)
static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
{
struct hfi1_pportdata *ppd;
- int ret;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
ppd = private2ppd(fp);
- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
- if (ret) /* failed - release the module */
- module_put(THIS_MODULE);
-
- return ret;
+ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
}
static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
@@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target)
ppd = private2ppd(fp);
release_chip_resource(ppd->dd, i2c_target(target));
- module_put(THIS_MODULE);
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 5eed4360695f..cb7ad1288821 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -831,6 +831,29 @@ wq_error:
}
/**
+ * destroy_workqueues - destroy per port workqueues
+ * @dd: the hfi1_ib device
+ */
+static void destroy_workqueues(struct hfi1_devdata *dd)
+{
+ int pidx;
+ struct hfi1_pportdata *ppd;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ if (ppd->hfi1_wq) {
+ destroy_workqueue(ppd->hfi1_wq);
+ ppd->hfi1_wq = NULL;
+ }
+ if (ppd->link_wq) {
+ destroy_workqueue(ppd->link_wq);
+ ppd->link_wq = NULL;
+ }
+ }
+}
+
+/**
* enable_general_intr() - Enable the IRQs that will be handled by the
* general interrupt handler.
* @dd: valid devdata
@@ -1103,15 +1126,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
* We can't count on interrupts since we are stopping.
*/
hfi1_quiet_serdes(ppd);
-
- if (ppd->hfi1_wq) {
- destroy_workqueue(ppd->hfi1_wq);
- ppd->hfi1_wq = NULL;
- }
- if (ppd->link_wq) {
- destroy_workqueue(ppd->link_wq);
- ppd->link_wq = NULL;
- }
+ if (ppd->hfi1_wq)
+ flush_workqueue(ppd->hfi1_wq);
+ if (ppd->link_wq)
+ flush_workqueue(ppd->link_wq);
}
sdma_exit(dd);
}
@@ -1756,6 +1774,7 @@ static void remove_one(struct pci_dev *pdev)
* clear dma engines, etc.
*/
shutdown_device(dd);
+ destroy_workqueues(dd);
stop_timers(dd);
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 07847cb72169..d580aa17ae37 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -399,7 +399,7 @@ static inline void iowait_get_priority(struct iowait *w)
* @wait_head: the wait queue
*
* This function is called to insert an iowait struct into a
- * wait queue after a resource (eg, sdma decriptor or pio
+ * wait queue after a resource (eg, sdma descriptor or pio
* buffer) is run out.
*/
static inline void iowait_queue(bool pkts_sent, struct iowait *w,
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
index 185c9b02c974..b8c9d0a003fb 100644
--- a/drivers/infiniband/hw/hfi1/ipoib.h
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -67,6 +67,9 @@ struct hfi1_ipoib_circ_buf {
* @sde: sdma engine
* @tx_list: tx request list
* @sent_txreqs: count of txreqs posted to sdma
+ * @stops: count of stops of queue
+ * @ring_full: ring has been filled
+ * @no_desc: descriptor shortage seen
* @flow: tracks when list needs to be flushed for a flow change
* @q_idx: ipoib Tx queue index
* @pkts_sent: indicator packets have been sent from this queue
@@ -80,6 +83,9 @@ struct hfi1_ipoib_txq {
struct sdma_engine *sde;
struct list_head tx_list;
u64 sent_txreqs;
+ atomic_t stops;
+ atomic_t ring_full;
+ atomic_t no_desc;
union hfi1_ipoib_flow flow;
u8 q_idx;
bool pkts_sent;
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index 883cb9d48022..9df292b51a05 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -55,23 +55,48 @@ static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
return sent - completed;
}
-static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
{
- if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs,
- atomic64_read(&txq->complete_txreqs)) >=
- min_t(unsigned int, txq->priv->netdev->tx_queue_len,
- txq->tx_ring.max_items - 1)))
+ return hfi1_ipoib_txreqs(txq->sent_txreqs,
+ atomic64_read(&txq->complete_txreqs));
+}
+
+static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
+{
+ if (atomic_inc_return(&txq->stops) == 1)
netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
}
+static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
+{
+ if (atomic_dec_and_test(&txq->stops))
+ netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
+}
+
+static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
+{
+ return min_t(uint, txq->priv->netdev->tx_queue_len,
+ txq->tx_ring.max_items - 1);
+}
+
+static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
+{
+ return min_t(uint, txq->priv->netdev->tx_queue_len,
+ txq->tx_ring.max_items) >> 1;
+}
+
+static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+{
+ ++txq->sent_txreqs;
+ if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
+ !atomic_xchg(&txq->ring_full, 1))
+ hfi1_ipoib_stop_txq(txq);
+}
+
static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
{
struct net_device *dev = txq->priv->netdev;
- /* If the queue is already running just return */
- if (likely(!__netif_subqueue_stopped(dev, txq->q_idx)))
- return;
-
/* If shutting down just return as queue state is irrelevant */
if (unlikely(dev->reg_state != NETREG_REGISTERED))
return;
@@ -86,11 +111,9 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
* Use the minimum of the current tx_queue_len or the rings max txreqs
* to protect against ring overflow.
*/
- if (hfi1_ipoib_txreqs(txq->sent_txreqs,
- atomic64_read(&txq->complete_txreqs))
- < min_t(unsigned int, dev->tx_queue_len,
- txq->tx_ring.max_items) >> 1)
- netif_wake_subqueue(dev, txq->q_idx);
+ if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
+ atomic_xchg(&txq->ring_full, 0))
+ hfi1_ipoib_wake_txq(txq);
}
static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
@@ -364,11 +387,12 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
if (unlikely(!tx))
return ERR_PTR(-ENOMEM);
- /* so that we can test if the sdma decriptors are there */
+ /* so that we can test if the sdma descriptors are there */
tx->txreq.num_desc = 0;
tx->priv = priv;
tx->txq = txp->txq;
tx->skb = skb;
+ INIT_LIST_HEAD(&tx->txreq.list);
hfi1_ipoib_build_ib_tx_headers(tx, txp);
@@ -469,6 +493,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
ret = hfi1_ipoib_submit_tx(txq, tx);
if (likely(!ret)) {
+tx_ok:
trace_sdma_output_ibhdr(tx->priv->dd,
&tx->sdma_hdr.hdr,
ib_is_sc5(txp->flow.sc5));
@@ -478,20 +503,8 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
txq->pkts_sent = false;
- if (ret == -EBUSY) {
- list_add_tail(&tx->txreq.list, &txq->tx_list);
-
- trace_sdma_output_ibhdr(tx->priv->dd,
- &tx->sdma_hdr.hdr,
- ib_is_sc5(txp->flow.sc5));
- hfi1_ipoib_check_queue_depth(txq);
- return NETDEV_TX_OK;
- }
-
- if (ret == -ECOMM) {
- hfi1_ipoib_check_queue_depth(txq);
- return NETDEV_TX_OK;
- }
+ if (ret == -EBUSY || ret == -ECOMM)
+ goto tx_ok;
sdma_txclean(priv->dd, &tx->txreq);
dev_kfree_skb_any(skb);
@@ -509,9 +522,17 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
struct ipoib_txreq *tx;
/* Has the flow change ? */
- if (txq->flow.as_int != txp->flow.as_int)
- (void)hfi1_ipoib_flush_tx_list(dev, txq);
-
+ if (txq->flow.as_int != txp->flow.as_int) {
+ int ret;
+
+ ret = hfi1_ipoib_flush_tx_list(dev, txq);
+ if (unlikely(ret)) {
+ if (ret == -EBUSY)
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
if (IS_ERR(tx)) {
int ret = PTR_ERR(tx);
@@ -610,10 +631,14 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
return -EAGAIN;
}
- netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
-
- if (list_empty(&txq->wait.list))
+ if (list_empty(&txreq->list))
+ /* came from non-list submit */
+ list_add_tail(&txreq->list, &txq->tx_list);
+ if (list_empty(&txq->wait.list)) {
+ if (!atomic_xchg(&txq->no_desc, 1))
+ hfi1_ipoib_stop_txq(txq);
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+ }
write_sequnlock(&sde->waitlock);
return -EBUSY;
@@ -648,9 +673,9 @@ static void hfi1_ipoib_flush_txq(struct work_struct *work)
struct net_device *dev = txq->priv->netdev;
if (likely(dev->reg_state == NETREG_REGISTERED) &&
- likely(__netif_subqueue_stopped(dev, txq->q_idx)) &&
likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
- netif_wake_subqueue(dev, txq->q_idx);
+ if (atomic_xchg(&txq->no_desc, 0))
+ hfi1_ipoib_wake_txq(txq);
}
int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
@@ -704,6 +729,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
txq->sde = NULL;
INIT_LIST_HEAD(&txq->tx_list);
atomic64_set(&txq->complete_txreqs, 0);
+ atomic_set(&txq->stops, 0);
+ atomic_set(&txq->ring_full, 0);
+ atomic_set(&txq->no_desc, 0);
txq->q_idx = i;
txq->flow.tx_queue = 0xff;
txq->flow.sc5 = 0xff;
@@ -769,7 +797,7 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
atomic64_inc(complete_txreqs);
}
- if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs)))
+ if (hfi1_ipoib_used(txq))
dd_dev_warn(txq->priv->dd,
"txq %d not empty found %llu requests\n",
txq->q_idx,
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
index 63688e85e8da..6d263c9749b3 100644
--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -373,7 +373,7 @@ void hfi1_netdev_free(struct hfi1_devdata *dd)
{
if (dd->dummy_netdev) {
dd_dev_info(dd, "hfi1 netdev freed\n");
- free_netdev(dd->dummy_netdev);
+ kfree(dd->dummy_netdev);
dd->dummy_netdev = NULL;
}
}
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 0c2ae9f7b3e8..be62284e42d9 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -195,7 +195,7 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{
/* Constraining 10KB packets to 8KB packets */
if (mtu == (enum ib_mtu)OPA_MTU_10240)
- mtu = OPA_MTU_8192;
+ mtu = (enum ib_mtu)OPA_MTU_8192;
return opa_mtu_enum_to_int((enum opa_mtu)mtu);
}
@@ -367,7 +367,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp)
struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if (dd->flags & HFI1_SHUTDOWN)
+ return true;
return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
priv->s_sde ?
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 243b4ba0b6f6..facff133139a 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if ((dd->flags & HFI1_SHUTDOWN))
+ return true;
return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
priv->s_sde ?
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index bfa6e081cb56..d2d526c5a756 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -91,7 +91,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
tx->mr = NULL;
tx->sde = priv->s_sde;
tx->psc = priv->s_sendcontext;
- /* so that we can test if the sdma decriptors are there */
+ /* so that we can test if the sdma descriptors are there */
tx->txreq.num_desc = 0;
/* Set the header type */
tx->phdr.hdr.hdr_type = priv->hdr_type;
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index a77fa6730b2d..479fa557993e 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -898,13 +898,14 @@ struct hns_roce_hw {
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu);
- int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
- unsigned long mtpt_idx);
+ int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+ struct hns_roce_mr *mr, unsigned long mtpt_idx);
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags, u32 pdn,
int mr_access_flags, u64 iova, u64 size,
void *mb_buf);
- int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
+ int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+ struct hns_roce_mr *mr);
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index d02207cd30df..cf39f560b800 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -1756,10 +1756,10 @@ static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
val);
}
-static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
+ struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v1_mpt_entry *mpt_entry;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index c597d7281629..0618ced45bf8 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -910,7 +910,7 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
instance_stage = handle->rinfo.instance_state;
reset_stage = handle->rinfo.reset_state;
reset_cnt = ops->ae_dev_reset_cnt(handle);
- hw_resetting = ops->get_hw_reset_stat(handle);
+ hw_resetting = ops->get_cmdq_stat(handle);
sw_resetting = ops->ae_dev_resetting(handle);
if (reset_cnt != hr_dev->reset_cnt)
@@ -2529,10 +2529,10 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
-static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
+static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_mpt_entry *mpt_entry,
struct hns_roce_mr *mr)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t pbl_ba;
@@ -2571,7 +2571,8 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
return 0;
}
-static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
+ void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
@@ -2620,7 +2621,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
if (mr->type == MR_TYPE_DMA)
return 0;
- ret = set_mtpt_pbl(mpt_entry, mr);
+ ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
return ret;
}
@@ -2666,15 +2667,15 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
mr->iova = iova;
mr->size = size;
- ret = set_mtpt_pbl(mpt_entry, mr);
+ ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
}
return ret;
}
-static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
+static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
+ void *mb_buf, struct hns_roce_mr *mr)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v2_mpt_entry *mpt_entry;
dma_addr_t pbl_ba = 0;
@@ -3953,6 +3954,15 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
return 0;
}
+static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr)
+{
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
+ return IB_MTU_4096;
+
+ return attr->path_mtu;
+}
+
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context,
@@ -3964,6 +3974,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t trrl_ba;
dma_addr_t irrl_ba;
+ enum ib_mtu mtu;
u8 port_num;
u64 *mtts;
u8 *dmac;
@@ -4061,23 +4072,23 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, 0);
- /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
+ mtu = get_mtu(ibqp, attr);
+
+ if (attr_mask & IB_QP_PATH_MTU) {
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, mtu);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, 0);
+ }
+
+#define MAX_LP_MSG_LEN 65536
+ /* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S,
- ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096));
+ ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)));
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
- else if (attr_mask & IB_QP_PATH_MTU)
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
-
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, 0);
-
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 4c0bbb12770d..6b226a5eb7db 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -120,7 +120,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
buf_attr.page_shift = is_fast ? PAGE_SHIFT :
- hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
buf_attr.region[0].size = length;
buf_attr.region[0].hopnum = mr->pbl_hop_num;
buf_attr.region_count = 1;
@@ -180,9 +180,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
}
if (mr->type != MR_TYPE_FRMR)
- ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+ ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
+ mtpt_idx);
else
- ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
+ ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
if (ret) {
dev_err(dev, "Write mtpt fail!\n");
goto err_page;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 343a8b8361e7..6f99ed03d88e 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -511,7 +511,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
mdev_port_num);
if (err)
goto out;
- ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
+ ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
props->active_width = IB_WIDTH_4X;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 7d2ec9ee5097..1ab676b66894 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -601,6 +601,23 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
*/
synchronize_srcu(&dev->odp_srcu);
+ /*
+ * All work on the prefetch list must be completed, xa_erase() prevented
+ * new work from being created.
+ */
+ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
+
+ /*
+ * At this point it is forbidden for any other thread to enter
+ * pagefault_mr() on this imr. It is already forbidden to call
+ * pagefault_mr() on an implicit child. Due to this additions to
+ * implicit_children are prevented.
+ */
+
+ /*
+ * Block destroy_unused_implicit_child_mr() from incrementing
+ * num_deferred_work.
+ */
xa_lock(&imr->implicit_children);
xa_for_each (&imr->implicit_children, idx, mtt) {
__xa_erase(&imr->implicit_children, idx);
@@ -609,9 +626,8 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
xa_unlock(&imr->implicit_children);
/*
- * num_deferred_work can only be incremented inside the odp_srcu, or
- * under xa_lock while the child is in the xarray. Thus at this point
- * it is only decreasing, and all work holding it is now on the wq.
+ * Wait for any concurrent destroy_unused_implicit_child_mr() to
+ * complete.
*/
wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 81bf6b975e0e..e050eade97a1 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1862,7 +1862,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!in)
return -ENOMEM;
- if (MLX5_CAP_GEN(mdev, ece_support))
+ if (MLX5_CAP_GEN(mdev, ece_support) && ucmd)
MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
@@ -2341,18 +2341,18 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
unsigned long flags;
int err;
- if (qp->ibqp.rwq_ind_tbl) {
+ if (qp->is_rss) {
destroy_rss_raw_qp_tir(dev, qp);
return;
}
- base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ base = (qp->type == IB_QPT_RAW_PACKET ||
qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
- &qp->raw_packet_qp.rq.base :
- &qp->trans_qp.base;
+ &qp->raw_packet_qp.rq.base :
+ &qp->trans_qp.base;
if (qp->state != IB_QPS_RESET) {
- if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
+ if (qp->type != IB_QPT_RAW_PACKET &&
!(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
NULL, &base->mqp, NULL);
@@ -2368,8 +2368,8 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
base->mqp.qpn);
}
- get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
- &send_cq, &recv_cq);
+ get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq,
+ &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx5_ib_lock_cqs(send_cq, recv_cq);
@@ -2391,7 +2391,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
mlx5_ib_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ if (qp->type == IB_QPT_RAW_PACKET ||
qp->flags & IB_QP_CREATE_SOURCE_QPN) {
destroy_raw_packet_qp(dev, qp);
} else {
@@ -2668,6 +2668,13 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
return (create_flags) ? -EINVAL : 0;
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
+ mlx5_get_flow_namespace(dev->mdev,
+ MLX5_FLOW_NAMESPACE_BYPASS),
+ qp);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_INTEGRITY_EN,
+ MLX5_CAP_GEN(mdev, sho), qp);
process_create_flag(dev, &create_flags,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX5_CAP_GEN(mdev, block_lb_mc), qp);
@@ -2873,7 +2880,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
static int check_ucmd_data(struct mlx5_ib_dev *dev,
struct mlx5_create_qp_params *params)
{
- struct ib_qp_init_attr *attr = params->attr;
struct ib_udata *udata = params->udata;
size_t size, last;
int ret;
@@ -2885,14 +2891,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
*/
last = sizeof(struct mlx5_ib_create_qp_rss);
else
- /* IB_QPT_RAW_PACKET doesn't have ECE data */
- switch (attr->qp_type) {
- case IB_QPT_RAW_PACKET:
- last = offsetof(struct mlx5_ib_create_qp, ece_options);
- break;
- default:
- last = offsetof(struct mlx5_ib_create_qp, reserved);
- }
+ last = offsetof(struct mlx5_ib_create_qp, reserved);
if (udata->inlen <= last)
return 0;
@@ -2907,7 +2906,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
if (!ret)
mlx5_ib_dbg(
dev,
- "udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n",
+ "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
udata->inlen, params->ucmd_size, last, size);
return ret ? 0 : -EINVAL;
}
@@ -3002,10 +3001,19 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
return &qp->ibqp;
destroy_qp:
- if (qp->type == MLX5_IB_QPT_DCT)
+ if (qp->type == MLX5_IB_QPT_DCT) {
mlx5_ib_destroy_dct(qp);
- else
+ } else {
+ /*
+ * These lines below are temp solution till QP allocation
+ * will be moved to be under IB/core responsiblity.
+ */
+ qp->ibqp.send_cq = attr->send_cq;
+ qp->ibqp.recv_cq = attr->recv_cq;
+ qp->ibqp.pd = pd;
destroy_qp_common(dev, qp, udata);
+ }
+
qp = NULL;
free_qp:
kfree(qp);
@@ -4162,8 +4170,6 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (udata->outlen < min_resp_len)
return -EINVAL;
- resp.response_length = min_resp_len;
-
/*
* If we don't have enough space for the ECE options,
* simply indicate it with resp.response_length.
@@ -4384,8 +4390,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
MLX5_GET(ads, path, src_addr_index),
MLX5_GET(ads, path, hop_limit),
MLX5_GET(ads, path, tclass));
- memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip),
- MLX5_FLD_SZ_BYTES(ads, rgid_rip));
+ rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip));
}
}
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
index c19d91d6dce8..7c3968ef9cd1 100644
--- a/drivers/infiniband/hw/mlx5/qpc.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -346,6 +346,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
int ece = 0;
switch (opcode) {
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ ece = MLX5_GET(init2init_qp_out, out, ece);
+ break;
case MLX5_CMD_OP_INIT2RTR_QP:
ece = MLX5_GET(init2rtr_qp_out, out, ece);
break;
@@ -355,6 +358,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
case MLX5_CMD_OP_RTS2RTS_QP:
ece = MLX5_GET(rts2rts_qp_out, out, ece);
break;
+ case MLX5_CMD_OP_RST2INIT_QP:
+ ece = MLX5_GET(rst2init_qp_out, out, ece);
+ break;
default:
break;
}
@@ -406,6 +412,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
return -ENOMEM;
MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
break;
case MLX5_CMD_OP_INIT2RTR_QP:
if (MBOX_ALLOC(mbox, init2rtr_qp))
@@ -439,6 +446,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
return -ENOMEM;
MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
break;
default:
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index 6f5eadc4d183..37aaacebd3f2 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *srq;
- xa_lock(&table->array);
+ xa_lock_irq(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
refcount_inc(&srq->common.refcount);
- xa_unlock(&table->array);
+ xa_unlock_irq(&table->array);
return srq;
}
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 792eecd206b6..97fc7dd353b0 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context,
if (params->cm_info) {
event.ird = params->cm_info->ird;
event.ord = params->cm_info->ord;
- event.private_data_len = params->cm_info->private_data_len;
- event.private_data = (void *)params->cm_info->private_data;
+ /* Only connect_request and reply have valid private data
+ * the rest of the events this may be left overs from
+ * connection establishment. CONNECT_REQUEST is issued via
+ * qedr_iw_mpa_request
+ */
+ if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
+ event.private_data_len =
+ params->cm_info->private_data_len;
+ event.private_data =
+ (void *)params->cm_info->private_data;
+ }
}
if (ep->cm_id)
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 511b72809e14..7db35dd6ad74 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1204,7 +1204,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
err = alloc_ud_wq_attr(qp, rdi->dparms.node);
if (err) {
ret = (ERR_PTR(err));
- goto bail_driver_priv;
+ goto bail_rq_rvt;
}
if (init_attr->create_flags & IB_QP_CREATE_NETDEV_USE)
@@ -1314,9 +1314,11 @@ bail_qpn:
rvt_free_qpn(&rdi->qp_dev->qpn_table, qp->ibqp.qp_num);
bail_rq_wq:
- rvt_free_rq(&qp->r_rq);
free_ud_wq_attr(qp);
+bail_rq_rvt:
+ rvt_free_rq(&qp->r_rq);
+
bail_driver_priv:
rdi->driver_f.qp_priv_free(rdi, qp);
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index a0b8cc643c5c..ed60c9e4643e 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -67,12 +67,13 @@ static int siw_device_register(struct siw_device *sdev, const char *name)
static int dev_id = 1;
int rv;
+ sdev->vendor_part_id = dev_id++;
+
rv = ib_register_device(base_dev, name);
if (rv) {
pr_warn("siw: device registration error %d\n", rv);
return rv;
}
- sdev->vendor_part_id = dev_id++;
siw_dbg(base_dev, "HWaddr=%pM\n", sdev->netdev->dev_addr);
diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c
index 650520244ed0..7271d705f4b0 100644
--- a/drivers/infiniband/sw/siw/siw_qp_rx.c
+++ b/drivers/infiniband/sw/siw/siw_qp_rx.c
@@ -139,7 +139,8 @@ static int siw_rx_pbl(struct siw_rx_stream *srx, int *pbl_idx,
break;
bytes = min(bytes, len);
- if (siw_rx_kva(srx, (void *)buf_addr, bytes) == bytes) {
+ if (siw_rx_kva(srx, (void *)(uintptr_t)buf_addr, bytes) ==
+ bytes) {
copied += bytes;
offset += bytes;
len -= bytes;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 3f9354baac4b..6291fb5fa015 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -951,6 +951,8 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet)
u8 hover_info = packet[ETP_HOVER_INFO_OFFSET];
bool contact_valid, hover_event;
+ pm_wakeup_event(&data->client->dev, 0);
+
hover_event = hover_info & 0x40;
for (i = 0; i < ETP_MAX_FINGERS; i++) {
contact_valid = tp_info & (1U << (3 + i));
@@ -974,6 +976,8 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report)
u8 *packet = &report[ETP_REPORT_ID_OFFSET + 1];
int x, y;
+ pm_wakeup_event(&data->client->dev, 0);
+
if (!data->tp_input) {
dev_warn_once(&data->client->dev,
"received a trackpoint report while no trackpoint device has been created. Please report upstream.\n");
@@ -998,7 +1002,6 @@ static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report)
static irqreturn_t elan_isr(int irq, void *dev_id)
{
struct elan_tp_data *data = dev_id;
- struct device *dev = &data->client->dev;
int error;
u8 report[ETP_MAX_REPORT_LEN];
@@ -1016,8 +1019,6 @@ static irqreturn_t elan_isr(int irq, void *dev_id)
if (error)
goto out;
- pm_wakeup_event(dev, 0);
-
switch (report[ETP_REPORT_ID_OFFSET]) {
case ETP_REPORT_ID:
elan_report_absolute(data, report);
@@ -1026,7 +1027,7 @@ static irqreturn_t elan_isr(int irq, void *dev_id)
elan_report_trackpoint(data, report);
break;
default:
- dev_err(dev, "invalid report id data (%x)\n",
+ dev_err(&data->client->dev, "invalid report id data (%x)\n",
report[ETP_REPORT_ID_OFFSET]);
}
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 758dae8d6500..4b81b2d0fe06 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -179,6 +179,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0093", /* T480 */
"LEN0096", /* X280 */
"LEN0097", /* X280 -> ALPS trackpoint */
+ "LEN0099", /* X1 Extreme 1st */
"LEN009b", /* T580 */
"LEN200f", /* T450s */
"LEN2044", /* L470 */
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 654252361653..13eacf6ab431 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -1021,7 +1021,7 @@ static int __init hp_sdc_register(void)
hp_sdc.base_io = (unsigned long) 0xf0428000;
hp_sdc.data_io = (unsigned long) hp_sdc.base_io + 1;
hp_sdc.status_io = (unsigned long) hp_sdc.base_io + 3;
- if (!probe_kernel_read(&i, (unsigned char *)hp_sdc.data_io, 1))
+ if (!copy_from_kernel_nofault(&i, (unsigned char *)hp_sdc.data_io, 1))
hp_sdc.dev = (void *)1;
hp_sdc.dev_err = hp_sdc_init();
#endif
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 7b08ff8ddf35..7d7f73702726 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -426,6 +426,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
},
},
{
+ /* Lenovo XiaoXin Air 12 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "80UN"),
+ },
+ },
+ {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 233cb1085bbd..5477a5718202 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1325,7 +1325,6 @@ static int elants_i2c_probe(struct i2c_client *client,
0, MT_TOOL_PALM, 0, 0);
input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res);
input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res);
- input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1);
touchscreen_parse_properties(ts->input, true, &ts->prop);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index b510f67dfa49..b0f308cb7f7c 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -211,7 +211,7 @@ config INTEL_IOMMU_DEBUGFS
config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU"
- depends on INTEL_IOMMU && X86
+ depends on INTEL_IOMMU && X86_64
select PCI_PASID
select PCI_PRI
select MMU_NOTIFIER
@@ -305,6 +305,7 @@ config ROCKCHIP_IOMMU
config SUN50I_IOMMU
bool "Allwinner H6 IOMMU Support"
+ depends on HAS_DMA
depends on ARCH_SUNXI || COMPILE_TEST
select ARM_DMA_USE_IOMMU
select IOMMU_API
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index f892992c8744..57309716fd18 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -102,7 +102,7 @@ extern int __init add_special_device(u8 type, u8 id, u16 *devid,
#ifdef CONFIG_DMI
void amd_iommu_apply_ivrs_quirks(void);
#else
-static void amd_iommu_apply_ivrs_quirks(void) { }
+static inline void amd_iommu_apply_ivrs_quirks(void) { }
#endif
#endif
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 74cca1757172..2f22326ee4df 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -3985,9 +3985,10 @@ int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
if (!fn)
return -ENOMEM;
iommu->ir_domain = irq_domain_create_tree(fn, &amd_ir_domain_ops, iommu);
- irq_domain_free_fwnode(fn);
- if (!iommu->ir_domain)
+ if (!iommu->ir_domain) {
+ irq_domain_free_fwnode(fn);
return -ENOMEM;
+ }
iommu->ir_domain->parent = arch_get_ir_parent_domain();
iommu->msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain,
diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm-smmu-qcom.c
index cf01d0215a39..be4318044f96 100644
--- a/drivers/iommu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm-smmu-qcom.c
@@ -12,7 +12,7 @@ struct qcom_smmu {
struct arm_smmu_device smmu;
};
-static const struct of_device_id qcom_smmu_client_of_match[] = {
+static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
{ .compatible = "qcom,adreno" },
{ .compatible = "qcom,mdp4" },
{ .compatible = "qcom,mdss" },
diff --git a/drivers/iommu/hyperv-iommu.c b/drivers/iommu/hyperv-iommu.c
index 3c0c67a99c7b..8919c1c70b68 100644
--- a/drivers/iommu/hyperv-iommu.c
+++ b/drivers/iommu/hyperv-iommu.c
@@ -155,7 +155,10 @@ static int __init hyperv_prepare_irq_remapping(void)
0, IOAPIC_REMAPPING_ENTRY, fn,
&hyperv_ir_domain_ops, NULL);
- irq_domain_free_fwnode(fn);
+ if (!ioapic_ir_domain) {
+ irq_domain_free_fwnode(fn);
+ return -ENOMEM;
+ }
/*
* Hyper-V doesn't provide irq remapping function for
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index cc46dff98fa0..683b812c5c47 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -898,7 +898,8 @@ int __init detect_intel_iommu(void)
if (!ret)
ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
&validate_drhd_cb);
- if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) {
+ if (!ret && !no_iommu && !iommu_detected &&
+ (!dmar_disabled || dmar_platform_optin())) {
iommu_detected = 1;
/* Make sure ACS will be enabled */
pci_request_acs();
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 9129663a7406..d759e7234e98 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -612,6 +612,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
return g_iommus[iommu_id];
}
+static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
+{
+ return sm_supported(iommu) ?
+ ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
+}
+
static void domain_update_iommu_coherency(struct dmar_domain *domain)
{
struct dmar_drhd_unit *drhd;
@@ -623,7 +629,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
for_each_domain_iommu(i, domain) {
found = true;
- if (!ecap_coherent(g_iommus[i]->ecap)) {
+ if (!iommu_paging_structure_coherency(g_iommus[i])) {
domain->iommu_coherency = 0;
break;
}
@@ -634,7 +640,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
/* No hardware attached; use lowest common denominator */
rcu_read_lock();
for_each_active_iommu(iommu, drhd) {
- if (!ecap_coherent(iommu->ecap)) {
+ if (!iommu_paging_structure_coherency(iommu)) {
domain->iommu_coherency = 0;
break;
}
@@ -921,7 +927,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (domain_use_first_level(domain))
- pteval |= DMA_FL_PTE_XD;
+ pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page);
@@ -1951,7 +1957,6 @@ static inline void
context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
{
context->hi |= pasid & ((1 << 20) - 1);
- context->hi |= (1 << 20);
}
/*
@@ -2095,7 +2100,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
context_set_fault_enable(context);
context_set_present(context);
- domain_flush_cache(domain, context, sizeof(*context));
+ if (!ecap_coherent(iommu->ecap))
+ clflush_cache_range(context, sizeof(*context));
/*
* It's a non-present to present mapping. If hardware doesn't cache
@@ -2243,7 +2249,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
if (domain_use_first_level(domain))
- attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD;
+ attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
if (!sg) {
sg_res = nr_pages;
@@ -2695,7 +2701,9 @@ static int __init si_domain_init(int hw)
end >> agaw_to_width(si_domain->agaw)))
continue;
- ret = iommu_domain_identity_map(si_domain, start, end);
+ ret = iommu_domain_identity_map(si_domain,
+ mm_to_dma_pfn(start >> PAGE_SHIFT),
+ mm_to_dma_pfn(end >> PAGE_SHIFT));
if (ret)
return ret;
}
@@ -6021,6 +6029,23 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
return ret;
}
+/*
+ * Check that the device does not live on an external facing PCI port that is
+ * marked as untrusted. Such devices should not be able to apply quirks and
+ * thus not be able to bypass the IOMMU restrictions.
+ */
+static bool risky_device(struct pci_dev *pdev)
+{
+ if (pdev->untrusted) {
+ pci_info(pdev,
+ "Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
+ pdev->vendor, pdev->device);
+ pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
+ return true;
+ }
+ return false;
+}
+
const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc,
@@ -6060,6 +6085,9 @@ const struct iommu_ops intel_iommu_ops = {
static void quirk_iommu_igfx(struct pci_dev *dev)
{
+ if (risky_device(dev))
+ return;
+
pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
dmar_map_gfx = 0;
}
@@ -6101,6 +6129,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
static void quirk_iommu_rwbf(struct pci_dev *dev)
{
+ if (risky_device(dev))
+ return;
+
/*
* Mobile 4 Series Chipset neglects to set RWBF capability,
* but needs it. Same seems to hold for the desktop versions.
@@ -6131,6 +6162,9 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
{
unsigned short ggc;
+ if (risky_device(dev))
+ return;
+
if (pci_read_config_word(dev, GGC, &ggc))
return;
@@ -6164,6 +6198,12 @@ static void __init check_tylersburg_isoch(void)
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
if (!pdev)
return;
+
+ if (risky_device(pdev)) {
+ pci_dev_put(pdev);
+ return;
+ }
+
pci_dev_put(pdev);
/* System Management Registers. Might be hidden, in which case
@@ -6173,6 +6213,11 @@ static void __init check_tylersburg_isoch(void)
if (!pdev)
return;
+ if (risky_device(pdev)) {
+ pci_dev_put(pdev);
+ return;
+ }
+
if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
pci_dev_put(pdev);
return;
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 7f8769800815..9564d23d094f 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -563,8 +563,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
0, INTR_REMAP_TABLE_ENTRIES,
fn, &intel_ir_domain_ops,
iommu);
- irq_domain_free_fwnode(fn);
if (!iommu->ir_domain) {
+ irq_domain_free_fwnode(fn);
pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
goto out_free_bitmap;
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index d43120eb1dc5..b6858adc4f17 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -295,10 +295,10 @@ void iommu_release_device(struct device *dev)
return;
iommu_device_unlink(dev->iommu->iommu_dev, dev);
- iommu_group_remove_device(dev);
ops->release_device(dev);
+ iommu_group_remove_device(dev);
module_put(ops->owner);
dev_iommu_free(dev);
}
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index c3e1fbd1988c..d176df569af8 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -65,6 +65,7 @@ struct qcom_iommu_domain {
struct mutex init_mutex; /* Protects iommu pointer */
struct iommu_domain domain;
struct qcom_iommu_dev *iommu;
+ struct iommu_fwspec *fwspec;
};
static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom)
@@ -84,9 +85,9 @@ static struct qcom_iommu_dev * to_iommu(struct device *dev)
return dev_iommu_priv_get(dev);
}
-static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid)
+static struct qcom_iommu_ctx * to_ctx(struct qcom_iommu_domain *d, unsigned asid)
{
- struct qcom_iommu_dev *qcom_iommu = to_iommu(dev);
+ struct qcom_iommu_dev *qcom_iommu = d->iommu;
if (!qcom_iommu)
return NULL;
return qcom_iommu->ctxs[asid - 1];
@@ -118,14 +119,12 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg)
static void qcom_iommu_tlb_sync(void *cookie)
{
- struct iommu_fwspec *fwspec;
- struct device *dev = cookie;
+ struct qcom_iommu_domain *qcom_domain = cookie;
+ struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i;
- fwspec = dev_iommu_fwspec_get(dev);
-
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
unsigned int val, ret;
iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0);
@@ -139,14 +138,12 @@ static void qcom_iommu_tlb_sync(void *cookie)
static void qcom_iommu_tlb_inv_context(void *cookie)
{
- struct device *dev = cookie;
- struct iommu_fwspec *fwspec;
+ struct qcom_iommu_domain *qcom_domain = cookie;
+ struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i;
- fwspec = dev_iommu_fwspec_get(dev);
-
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid);
}
@@ -156,16 +153,14 @@ static void qcom_iommu_tlb_inv_context(void *cookie)
static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
- struct device *dev = cookie;
- struct iommu_fwspec *fwspec;
+ struct qcom_iommu_domain *qcom_domain = cookie;
+ struct iommu_fwspec *fwspec = qcom_domain->fwspec;
unsigned i, reg;
reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
- fwspec = dev_iommu_fwspec_get(dev);
-
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
size_t s = size;
iova = (iova >> 12) << 12;
@@ -256,7 +251,9 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
};
qcom_domain->iommu = qcom_iommu;
- pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev);
+ qcom_domain->fwspec = fwspec;
+
+ pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, qcom_domain);
if (!pgtbl_ops) {
dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n");
ret = -ENOMEM;
@@ -269,7 +266,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
domain->geometry.force_aperture = true;
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
if (!ctx->secure_init) {
ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid);
@@ -419,7 +416,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
pm_runtime_get_sync(qcom_iommu->dev);
for (i = 0; i < fwspec->num_ids; i++) {
- struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]);
+ struct qcom_iommu_ctx *ctx = to_ctx(qcom_domain, fwspec->ids[i]);
/* Disable the context bank: */
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
diff --git a/drivers/iommu/sun50i-iommu.c b/drivers/iommu/sun50i-iommu.c
index fce605e96aa2..3b1bf2fb94f5 100644
--- a/drivers/iommu/sun50i-iommu.c
+++ b/drivers/iommu/sun50i-iommu.c
@@ -313,9 +313,9 @@ static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu)
IOMMU_TLB_FLUSH_MICRO_TLB(1) |
IOMMU_TLB_FLUSH_MICRO_TLB(0));
- ret = readl_poll_timeout(iommu->base + IOMMU_TLB_FLUSH_REG,
- reg, !reg,
- 1, 2000);
+ ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG,
+ reg, !reg,
+ 1, 2000);
if (ret)
dev_warn(iommu->dev, "TLB Flush timed out!\n");
@@ -556,7 +556,6 @@ static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova
{
struct sun50i_iommu_domain *sun50i_domain = to_sun50i_domain(domain);
phys_addr_t pt_phys;
- dma_addr_t pte_dma;
u32 *pte_addr;
u32 dte;
@@ -566,7 +565,6 @@ static size_t sun50i_iommu_unmap(struct iommu_domain *domain, unsigned long iova
pt_phys = sun50i_dte_get_pt_address(dte);
pte_addr = (u32 *)phys_to_virt(pt_phys) + sun50i_iova_get_pte_index(iova);
- pte_dma = pt_phys + sun50i_iova_get_pte_index(iova) * PT_ENTRY_SIZE;
if (!sun50i_pte_is_page_valid(*pte_addr))
return 0;
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 29fead208cad..216b3b8392b5 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -563,7 +563,7 @@ config LOONGSON_PCH_PIC
Support for the Loongson PCH PIC Controller.
config LOONGSON_PCH_MSI
- bool "Loongson PCH PIC Controller"
+ bool "Loongson PCH MSI Controller"
depends on MACH_LOONGSON64 || COMPILE_TEST
depends on PCI
default MACH_LOONGSON64
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index cd685f521c77..beac4caefad9 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3797,10 +3797,10 @@ static void its_wait_vpt_parse_complete(void)
if (!gic_rdists->has_vpend_valid_dirty)
return;
- WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER,
- val,
- !(val & GICR_VPENDBASER_Dirty),
- 10, 500));
+ WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER,
+ val,
+ !(val & GICR_VPENDBASER_Dirty),
+ 10, 500));
}
static void its_vpe_schedule(struct its_vpe *vpe)
@@ -4054,16 +4054,24 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
u64 val;
if (info->req_db) {
+ unsigned long flags;
+
/*
* vPE is going to block: make the vPE non-resident with
* PendingLast clear and DB set. The GIC guarantees that if
* we read-back PendingLast clear, then a doorbell will be
* delivered when an interrupt comes.
+ *
+ * Note the locking to deal with the concurrent update of
+ * pending_last from the doorbell interrupt handler that can
+ * run concurrently.
*/
+ raw_spin_lock_irqsave(&vpe->vpe_lock, flags);
val = its_clear_vpend_valid(vlpi_base,
GICR_VPENDBASER_PendingLast,
GICR_VPENDBASER_4_1_DB);
vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
+ raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
} else {
/*
* We're not blocking, so just make the vPE non-resident
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 00de05abd3c3..c17fabd6741e 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -329,10 +329,8 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
- void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
- unsigned int cpu, shift = (gic_irq(d) % 4) * 8;
- u32 val, mask, bit;
- unsigned long flags;
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+ unsigned int cpu;
if (!force)
cpu = cpumask_any_and(mask_val, cpu_online_mask);
@@ -342,13 +340,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL;
- gic_lock_irqsave(flags);
- mask = 0xff << shift;
- bit = gic_cpu_map[cpu] << shift;
- val = readl_relaxed(reg) & ~mask;
- writel_relaxed(val | bit, reg);
- gic_unlock_irqrestore(flags);
-
+ writeb_relaxed(gic_cpu_map[cpu], reg);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
return IRQ_SET_MASK_OK_DONE;
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index a6f97fa6ff69..8017f6d32d52 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -99,7 +99,7 @@ static int __init riscv_intc_init(struct device_node *node,
hartid = riscv_of_parent_hartid(node);
if (hartid < 0) {
- pr_warn("unable to fine hart id for %pOF\n", node);
+ pr_warn("unable to find hart id for %pOF\n", node);
return 0;
}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 39de94edd73a..6548a601edf0 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1389,7 +1389,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
if (__set_blocks(n1, n1->keys + n2->keys,
block_bytes(b->c)) >
btree_blocks(new_nodes[i]))
- goto out_nocoalesce;
+ goto out_unlock_nocoalesce;
keys = n2->keys;
/* Take the key of the node we're getting rid of */
@@ -1418,7 +1418,7 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
if (__bch_keylist_realloc(&keylist,
bkey_u64s(&new_nodes[i]->key)))
- goto out_nocoalesce;
+ goto out_unlock_nocoalesce;
bch_btree_node_write(new_nodes[i], &cl);
bch_keylist_add(&keylist, &new_nodes[i]->key);
@@ -1464,6 +1464,10 @@ static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
/* Invalidated our iterator */
return -EINTR;
+out_unlock_nocoalesce:
+ for (i = 0; i < nodes; i++)
+ mutex_unlock(&new_nodes[i]->write_lock);
+
out_nocoalesce:
closure_sync(&cl);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f9975c22bf7e..2014016f9a60 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -19,6 +19,7 @@
#include <linux/genhd.h>
#include <linux/idr.h>
#include <linux/kthread.h>
+#include <linux/workqueue.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/reboot.h>
@@ -819,7 +820,8 @@ static void bcache_device_free(struct bcache_device *d)
}
static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
- sector_t sectors, make_request_fn make_request_fn)
+ sector_t sectors, make_request_fn make_request_fn,
+ struct block_device *cached_bdev)
{
struct request_queue *q;
const size_t max_stripes = min_t(size_t, INT_MAX,
@@ -885,6 +887,20 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
q->limits.io_min = block_size;
q->limits.logical_block_size = block_size;
q->limits.physical_block_size = block_size;
+
+ if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) {
+ /*
+ * This should only happen with BCACHE_SB_VERSION_BDEV.
+ * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
+ */
+ pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n",
+ d->disk->disk_name, q->limits.logical_block_size,
+ PAGE_SIZE, bdev_logical_block_size(cached_bdev));
+
+ /* This also adjusts physical block size/min io size if needed */
+ blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev));
+ }
+
blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
@@ -1340,7 +1356,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
ret = bcache_device_init(&dc->disk, block_size,
dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
- cached_dev_make_request);
+ cached_dev_make_request, dc->bdev);
if (ret)
return ret;
@@ -1453,7 +1469,7 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
kobject_init(&d->kobj, &bch_flash_dev_ktype);
if (bcache_device_init(d, block_bytes(c), u->sectors,
- flash_dev_make_request))
+ flash_dev_make_request, NULL))
goto err;
bcache_device_attach(d, c, u - c->uuids);
@@ -2364,7 +2380,7 @@ static bool bch_is_open(struct block_device *bdev)
}
struct async_reg_args {
- struct work_struct reg_work;
+ struct delayed_work reg_work;
char *path;
struct cache_sb *sb;
struct cache_sb_disk *sb_disk;
@@ -2375,7 +2391,7 @@ static void register_bdev_worker(struct work_struct *work)
{
int fail = false;
struct async_reg_args *args =
- container_of(work, struct async_reg_args, reg_work);
+ container_of(work, struct async_reg_args, reg_work.work);
struct cached_dev *dc;
dc = kzalloc(sizeof(*dc), GFP_KERNEL);
@@ -2405,7 +2421,7 @@ static void register_cache_worker(struct work_struct *work)
{
int fail = false;
struct async_reg_args *args =
- container_of(work, struct async_reg_args, reg_work);
+ container_of(work, struct async_reg_args, reg_work.work);
struct cache *ca;
ca = kzalloc(sizeof(*ca), GFP_KERNEL);
@@ -2433,11 +2449,12 @@ out:
static void register_device_aync(struct async_reg_args *args)
{
if (SB_IS_BDEV(args->sb))
- INIT_WORK(&args->reg_work, register_bdev_worker);
+ INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker);
else
- INIT_WORK(&args->reg_work, register_cache_worker);
+ INIT_DELAYED_WORK(&args->reg_work, register_cache_worker);
- queue_work(system_wq, &args->reg_work);
+ /* 10 jiffies is enough for a delay */
+ queue_delayed_work(system_wq, &args->reg_work, 10);
}
static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 81dc5ff08909..a83a1de1e03f 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -2420,7 +2420,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
- if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
+ if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
return;
spin_lock_irq(&ic->endio_wait.lock);
@@ -2481,7 +2481,7 @@ static void integrity_recalc(struct work_struct *w)
next_chunk:
- if (unlikely(dm_suspended(ic->ti)))
+ if (unlikely(dm_post_suspending(ic->ti)))
goto unlock_ret;
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index ac83f5002ce5..489935d5f22d 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1471,7 +1471,7 @@ static void retrieve_deps(struct dm_table *table,
/*
* Check we have enough space.
*/
- needed = sizeof(*deps) + (sizeof(*deps->dev) * count);
+ needed = struct_size(deps, dev, count);
if (len < needed) {
param->flags |= DM_BUFFER_FULL_FLAG;
return;
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index f60c02512121..85e0daabad49 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -146,10 +146,6 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig)
*/
static void rq_completed(struct mapped_device *md)
{
- /* nudge anyone waiting on suspend queue */
- if (unlikely(wq_has_sleeper(&md->wait)))
- wake_up(&md->wait);
-
/*
* dm_put() must be at the end of this function. See the comment above
*/
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 74f3c506f084..5358894bb9fd 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -282,6 +282,8 @@ static int persistent_memory_claim(struct dm_writecache *wc)
while (daa-- && i < p) {
pages[i++] = pfn_t_to_page(pfn);
pfn.val++;
+ if (!(i & 15))
+ cond_resched();
}
} while (i < p);
wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
@@ -849,10 +851,14 @@ static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_
if (likely(!e->write_in_progress)) {
if (!discarded_something) {
- writecache_wait_for_ios(wc, READ);
- writecache_wait_for_ios(wc, WRITE);
+ if (!WC_MODE_PMEM(wc)) {
+ writecache_wait_for_ios(wc, READ);
+ writecache_wait_for_ios(wc, WRITE);
+ }
discarded_something = true;
}
+ if (!writecache_entry_is_committed(wc, e))
+ wc->uncommitted_blocks--;
writecache_free_entry(wc, e);
}
@@ -2260,6 +2266,12 @@ invalid_optional:
}
if (WC_MODE_PMEM(wc)) {
+ if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
+ r = -EOPNOTSUPP;
+ ti->error = "Asynchronous persistent memory not supported as pmem cache";
+ goto bad;
+ }
+
r = persistent_memory_claim(wc);
if (r) {
ti->error = "Unable to map persistent memory for cache";
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 130b5a6d9f12..b298fefb022e 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -1078,7 +1078,8 @@ static int dmz_check_sb(struct dmz_metadata *zmd, struct dmz_sb *dsb,
nr_meta_zones = (le32_to_cpu(sb->nr_meta_blocks) + zmd->zone_nr_blocks - 1)
>> zmd->zone_nr_blocks_shift;
if (!nr_meta_zones ||
- nr_meta_zones >= zmd->nr_rnd_zones) {
+ (zmd->nr_devs <= 1 && nr_meta_zones >= zmd->nr_rnd_zones) ||
+ (zmd->nr_devs > 1 && nr_meta_zones >= zmd->nr_cache_zones)) {
dmz_dev_err(dev, "Invalid number of metadata blocks");
return -ENXIO;
}
@@ -1949,7 +1950,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
unsigned int idx, bool idle)
{
struct dm_zone *dzone = NULL;
- struct dm_zone *zone, *last = NULL;
+ struct dm_zone *zone, *maxw_z = NULL;
struct list_head *zone_list;
/* If we have cache zones select from the cache zone list */
@@ -1961,18 +1962,37 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd,
} else
zone_list = &zmd->dev[idx].map_rnd_list;
+ /*
+ * Find the buffer zone with the heaviest weight or the first (oldest)
+ * data zone that can be reclaimed.
+ */
list_for_each_entry(zone, zone_list, link) {
if (dmz_is_buf(zone)) {
dzone = zone->bzone;
- if (dzone->dev->dev_idx != idx)
- continue;
- if (!last) {
- last = dzone;
+ if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
continue;
- }
- if (last->weight < dzone->weight)
+ if (!maxw_z || maxw_z->weight < dzone->weight)
+ maxw_z = dzone;
+ } else {
+ dzone = zone;
+ if (dmz_lock_zone_reclaim(dzone))
+ return dzone;
+ }
+ }
+
+ if (maxw_z && dmz_lock_zone_reclaim(maxw_z))
+ return maxw_z;
+
+ /*
+ * If we come here, none of the zones inspected could be locked for
+ * reclaim. Try again, being more aggressive, that is, find the
+ * first zone that can be reclaimed regardless of its weitght.
+ */
+ list_for_each_entry(zone, zone_list, link) {
+ if (dmz_is_buf(zone)) {
+ dzone = zone->bzone;
+ if (dmz_is_rnd(dzone) && dzone->dev->dev_idx != idx)
continue;
- dzone = last;
} else
dzone = zone;
if (dmz_lock_zone_reclaim(dzone))
@@ -2006,7 +2026,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd,
struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
unsigned int dev_idx, bool idle)
{
- struct dm_zone *zone;
+ struct dm_zone *zone = NULL;
/*
* Search for a zone candidate to reclaim: 2 cases are possible.
@@ -2019,7 +2039,7 @@ struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd,
dmz_lock_map(zmd);
if (list_empty(&zmd->reserved_seq_zones_list))
zone = dmz_get_seq_zone_for_reclaim(zmd, dev_idx);
- else
+ if (!zone)
zone = dmz_get_rnd_zone_for_reclaim(zmd, dev_idx, idle);
dmz_unlock_map(zmd);
@@ -2197,8 +2217,15 @@ struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
{
struct list_head *list;
struct dm_zone *zone;
- int i = 0;
+ int i;
+
+ /* Schedule reclaim to ensure free zones are available */
+ if (!(flags & DMZ_ALLOC_RECLAIM)) {
+ for (i = 0; i < zmd->nr_devs; i++)
+ dmz_schedule_reclaim(zmd->dev[i].reclaim);
+ }
+ i = 0;
again:
if (flags & DMZ_ALLOC_CACHE)
list = &zmd->unmap_cache_list;
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index 2261b4dd60b7..9c0ecc9568a4 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -377,6 +377,7 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
dmz_metadata_label(zmd), zrc->dev_idx);
return -EBUSY;
}
+ rzone = dzone;
start = jiffies;
if (dmz_is_cache(dzone) || dmz_is_rnd(dzone)) {
@@ -391,8 +392,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
*/
ret = dmz_reclaim_rnd_data(zrc, dzone);
}
- rzone = dzone;
-
} else {
struct dm_zone *bzone = dzone->bzone;
sector_t chunk_block = 0;
@@ -415,7 +414,6 @@ static int dmz_do_reclaim(struct dmz_reclaim *zrc)
* be later reclaimed.
*/
ret = dmz_reclaim_seq_data(zrc, dzone);
- rzone = dzone;
}
}
out:
@@ -458,6 +456,8 @@ static unsigned int dmz_reclaim_percentage(struct dmz_reclaim *zrc)
nr_zones = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
nr_unmap = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
}
+ if (nr_unmap <= 1)
+ return 0;
return nr_unmap * 100 / nr_zones;
}
@@ -503,7 +503,7 @@ static void dmz_reclaim_work(struct work_struct *work)
{
struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
struct dmz_metadata *zmd = zrc->metadata;
- unsigned int p_unmap, nr_unmap_rnd = 0, nr_rnd = 0;
+ unsigned int p_unmap;
int ret;
if (dmz_dev_is_dying(zmd))
@@ -529,9 +529,6 @@ static void dmz_reclaim_work(struct work_struct *work)
zrc->kc_throttle.throttle = min(75U, 100U - p_unmap / 2);
}
- nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd, zrc->dev_idx);
- nr_rnd = dmz_nr_rnd_zones(zmd, zrc->dev_idx);
-
DMDEBUG("(%s/%u): Reclaim (%u): %s, %u%% free zones (%u/%u cache %u/%u random)",
dmz_metadata_label(zmd), zrc->dev_idx,
zrc->kc_throttle.throttle,
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index a907a9446c0b..42aa5139df7c 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -400,15 +400,7 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
struct dmz_metadata *zmd = dmz->metadata;
struct dm_zone *zone;
- int i, ret;
-
- /*
- * Write may trigger a zone allocation. So make sure the
- * allocation can succeed.
- */
- if (bio_op(bio) == REQ_OP_WRITE)
- for (i = 0; i < dmz->nr_ddevs; i++)
- dmz_schedule_reclaim(dmz->dev[i].reclaim);
+ int ret;
dmz_lock_metadata(zmd);
@@ -890,7 +882,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
/* Set target (no write same support) */
- ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata) << 9;
+ ti->max_io_len = dmz_zone_nr_sectors(dmz->metadata);
ti->num_flush_bios = 1;
ti->num_discard_bios = 1;
ti->num_write_zeroes_bios = 1;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 109e81f33edb..5b9de2f71bb0 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
@@ -142,6 +143,7 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
#define DMF_NOFLUSH_SUSPENDING 5
#define DMF_DEFERRED_REMOVE 6
#define DMF_SUSPENDED_INTERNALLY 7
+#define DMF_POST_SUSPENDING 8
#define DM_NUMA_NODE NUMA_NO_NODE
static int dm_numa_node = DM_NUMA_NODE;
@@ -654,28 +656,6 @@ static void free_tio(struct dm_target_io *tio)
bio_put(&tio->clone);
}
-static bool md_in_flight_bios(struct mapped_device *md)
-{
- int cpu;
- struct hd_struct *part = &dm_disk(md)->part0;
- long sum = 0;
-
- for_each_possible_cpu(cpu) {
- sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
- sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
- }
-
- return sum != 0;
-}
-
-static bool md_in_flight(struct mapped_device *md)
-{
- if (queue_is_mq(md->queue))
- return blk_mq_queue_inflight(md->queue);
- else
- return md_in_flight_bios(md);
-}
-
u64 dm_start_time_ns_from_clone(struct bio *bio)
{
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
@@ -1009,6 +989,7 @@ static void clone_endio(struct bio *bio)
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
dm_endio_fn endio = tio->ti->type->end_io;
+ struct bio *orig_bio = io->orig_bio;
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
if (bio_op(bio) == REQ_OP_DISCARD &&
@@ -1022,6 +1003,18 @@ static void clone_endio(struct bio *bio)
disable_write_zeroes(md);
}
+ /*
+ * For zone-append bios get offset in zone of the written
+ * sector and add that to the original bio sector pos.
+ */
+ if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) {
+ sector_t written_sector = bio->bi_iter.bi_sector;
+ struct request_queue *q = orig_bio->bi_disk->queue;
+ u64 mask = (u64)blk_queue_zone_sectors(q) - 1;
+
+ orig_bio->bi_iter.bi_sector += written_sector & mask;
+ }
+
if (endio) {
int r = endio(tio->ti, bio, &error);
switch (r) {
@@ -1452,9 +1445,6 @@ static int __send_empty_flush(struct clone_info *ci)
BUG_ON(bio_has_data(ci->bio));
while ((ti = dm_table_get_target(ci->map, target_nr++)))
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
-
- bio_disassociate_blkg(ci->bio);
-
return 0;
}
@@ -1642,6 +1632,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
+ bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */
} else if (op_is_zone_mgmt(bio_op(bio))) {
ci.bio = bio;
@@ -1716,6 +1707,7 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
ci.bio = &flush_bio;
ci.sector_count = 0;
error = __send_empty_flush(&ci);
+ bio_uninit(ci.bio);
/* dec_pending submits any data associated with flush */
} else {
struct dm_target_io *tio;
@@ -2417,6 +2409,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
set_bit(DMF_SUSPENDED, &md->flags);
+ set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
}
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
@@ -2457,15 +2450,29 @@ void dm_put(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_put);
-static int dm_wait_for_completion(struct mapped_device *md, long task_state)
+static bool md_in_flight_bios(struct mapped_device *md)
+{
+ int cpu;
+ struct hd_struct *part = &dm_disk(md)->part0;
+ long sum = 0;
+
+ for_each_possible_cpu(cpu) {
+ sum += part_stat_local_read_cpu(part, in_flight[0], cpu);
+ sum += part_stat_local_read_cpu(part, in_flight[1], cpu);
+ }
+
+ return sum != 0;
+}
+
+static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state)
{
int r = 0;
DEFINE_WAIT(wait);
- while (1) {
+ while (true) {
prepare_to_wait(&md->wait, &wait, task_state);
- if (!md_in_flight(md))
+ if (!md_in_flight_bios(md))
break;
if (signal_pending_state(task_state, current)) {
@@ -2480,6 +2487,28 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state)
return r;
}
+static int dm_wait_for_completion(struct mapped_device *md, long task_state)
+{
+ int r = 0;
+
+ if (!queue_is_mq(md->queue))
+ return dm_wait_for_bios_completion(md, task_state);
+
+ while (true) {
+ if (!blk_mq_queue_inflight(md->queue))
+ break;
+
+ if (signal_pending_state(task_state, current)) {
+ r = -EINTR;
+ break;
+ }
+
+ msleep(5);
+ }
+
+ return r;
+}
+
/*
* Process the deferred bios
*/
@@ -2739,7 +2768,9 @@ retry:
if (r)
goto out_unlock;
+ set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
+ clear_bit(DMF_POST_SUSPENDING, &md->flags);
out_unlock:
mutex_unlock(&md->suspend_lock);
@@ -2836,7 +2867,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
(void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
DMF_SUSPENDED_INTERNALLY);
+ set_bit(DMF_POST_SUSPENDING, &md->flags);
dm_table_postsuspend_targets(map);
+ clear_bit(DMF_POST_SUSPENDING, &md->flags);
}
static void __dm_internal_resume(struct mapped_device *md)
@@ -2913,17 +2946,25 @@ EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie)
{
+ int r;
+ unsigned noio_flag;
char udev_cookie[DM_COOKIE_LENGTH];
char *envp[] = { udev_cookie, NULL };
+ noio_flag = memalloc_noio_save();
+
if (!cookie)
- return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+ r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
else {
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
DM_COOKIE_ENV_VAR_NAME, cookie);
- return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
- action, envp);
+ r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
+ action, envp);
}
+
+ memalloc_noio_restore(noio_flag);
+
+ return r;
}
uint32_t dm_next_uevent_seq(struct mapped_device *md)
@@ -2989,6 +3030,11 @@ int dm_suspended_md(struct mapped_device *md)
return test_bit(DMF_SUSPENDED, &md->flags);
}
+static int dm_post_suspending_md(struct mapped_device *md)
+{
+ return test_bit(DMF_POST_SUSPENDING, &md->flags);
+}
+
int dm_suspended_internally_md(struct mapped_device *md)
{
return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
@@ -3005,6 +3051,12 @@ int dm_suspended(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(dm_suspended);
+int dm_post_suspending(struct dm_target *ti)
+{
+ return dm_post_suspending_md(dm_table_get_md(ti->table));
+}
+EXPORT_SYMBOL_GPL(dm_post_suspending);
+
int dm_noflush_suspending(struct dm_target *ti)
{
return __noflush_suspending(dm_table_get_md(ti->table));
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index a4ee6b86663e..b91e472ee764 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -39,8 +39,6 @@
* Troy Laramy <t-laramy@ti.com>
*/
-#include <asm/cacheflush.h>
-
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index 10c214bd0903..1ac9aef70dff 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -18,7 +18,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <asm/cacheflush.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 68aea22f2b89..5216487db4fb 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1324,13 +1324,13 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
return 0; /* fw doesn't need any host buffers */
/* spin till we get enough memory */
- while(host_page_buffer_sz > 0) {
-
- if((ioc->HostPageBuffer = pci_alloc_consistent(
- ioc->pcidev,
- host_page_buffer_sz,
- &ioc->HostPageBuffer_dma)) != NULL) {
-
+ while (host_page_buffer_sz > 0) {
+ ioc->HostPageBuffer =
+ dma_alloc_coherent(&ioc->pcidev->dev,
+ host_page_buffer_sz,
+ &ioc->HostPageBuffer_dma,
+ GFP_KERNEL);
+ if (ioc->HostPageBuffer) {
dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
"host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
@@ -2741,8 +2741,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
sz = ioc->alloc_sz;
dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "free @ %p, sz=%d bytes\n",
ioc->name, ioc->alloc, ioc->alloc_sz));
- pci_free_consistent(ioc->pcidev, sz,
- ioc->alloc, ioc->alloc_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+ ioc->alloc_dma);
ioc->reply_frames = NULL;
ioc->req_frames = NULL;
ioc->alloc = NULL;
@@ -2751,8 +2751,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
if (ioc->sense_buf_pool != NULL) {
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
- pci_free_consistent(ioc->pcidev, sz,
- ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+ ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
ioc->alloc_total -= sz;
}
@@ -2802,7 +2802,7 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
"HostPageBuffer free @ %p, sz=%d bytes\n",
ioc->name, ioc->HostPageBuffer,
ioc->HostPageBuffer_sz));
- pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
+ dma_free_coherent(&ioc->pcidev->dev, ioc->HostPageBuffer_sz,
ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
ioc->HostPageBuffer = NULL;
ioc->HostPageBuffer_sz = 0;
@@ -4497,7 +4497,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
ioc->name, sz, sz, num_chain));
total_size += sz;
- mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma);
+ mem = dma_alloc_coherent(&ioc->pcidev->dev, total_size,
+ &alloc_dma, GFP_KERNEL);
if (mem == NULL) {
printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n",
ioc->name);
@@ -4574,8 +4575,8 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
spin_unlock_irqrestore(&ioc->FreeQlock, flags);
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
- ioc->sense_buf_pool =
- pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma);
+ ioc->sense_buf_pool = dma_alloc_coherent(&ioc->pcidev->dev, sz,
+ &ioc->sense_buf_pool_dma, GFP_KERNEL);
if (ioc->sense_buf_pool == NULL) {
printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n",
ioc->name);
@@ -4613,18 +4614,16 @@ out_fail:
if (ioc->alloc != NULL) {
sz = ioc->alloc_sz;
- pci_free_consistent(ioc->pcidev,
- sz,
- ioc->alloc, ioc->alloc_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->alloc,
+ ioc->alloc_dma);
ioc->reply_frames = NULL;
ioc->req_frames = NULL;
ioc->alloc_total -= sz;
}
if (ioc->sense_buf_pool != NULL) {
sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
- pci_free_consistent(ioc->pcidev,
- sz,
- ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+ dma_free_coherent(&ioc->pcidev->dev, sz, ioc->sense_buf_pool,
+ ioc->sense_buf_pool_dma);
ioc->sense_buf_pool = NULL;
}
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index f0737c57ed5f..1491561d2e5c 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -118,8 +118,6 @@ int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
int mptscsih_resume(struct pci_dev *pdev);
#endif
-#define SNS_LEN(scp) SCSI_SENSE_BUFFERSIZE
-
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*
@@ -2422,7 +2420,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
/* Copy the sense received into the scsi command block. */
req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
sense_data = ((u8 *)ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC));
- memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc));
+ memcpy(sc->sense_buffer, sense_data, MPT_SENSE_BUFFER_ALLOC);
/* Log SMART data (asc = 0x5D, non-IM case only) if required.
*/
diff --git a/drivers/mfd/ioc3.c b/drivers/mfd/ioc3.c
index 02998d4eb74b..74cee7cb0afc 100644
--- a/drivers/mfd/ioc3.c
+++ b/drivers/mfd/ioc3.c
@@ -142,10 +142,11 @@ static int ioc3_irq_domain_setup(struct ioc3_priv_data *ipd, int irq)
goto err;
domain = irq_domain_create_linear(fn, 24, &ioc3_irq_domain_ops, ipd);
- if (!domain)
+ if (!domain) {
+ irq_domain_free_fwnode(fn);
goto err;
+ }
- irq_domain_free_fwnode(fn);
ipd->domain = domain;
irq_set_chained_handler_and_data(irq, ioc3_irq_handler, domain);
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index ab4144ea1f11..d6cd5537126c 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -10,7 +10,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/atmel-ssc.h>
#include <linux/slab.h>
#include <linux/module.h>
@@ -20,7 +20,7 @@
#include "../../sound/soc/atmel/atmel_ssc_dai.h"
/* Serialize access to ssc_list and user count */
-static DEFINE_SPINLOCK(user_lock);
+static DEFINE_MUTEX(user_lock);
static LIST_HEAD(ssc_list);
struct ssc_device *ssc_request(unsigned int ssc_num)
@@ -28,7 +28,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
int ssc_valid = 0;
struct ssc_device *ssc;
- spin_lock(&user_lock);
+ mutex_lock(&user_lock);
list_for_each_entry(ssc, &ssc_list, list) {
if (ssc->pdev->dev.of_node) {
if (of_alias_get_id(ssc->pdev->dev.of_node, "ssc")
@@ -44,18 +44,18 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
}
if (!ssc_valid) {
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
pr_err("ssc: ssc%d platform device is missing\n", ssc_num);
return ERR_PTR(-ENODEV);
}
if (ssc->user) {
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
dev_dbg(&ssc->pdev->dev, "module busy\n");
return ERR_PTR(-EBUSY);
}
ssc->user++;
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
clk_prepare(ssc->clk);
@@ -67,14 +67,14 @@ void ssc_free(struct ssc_device *ssc)
{
bool disable_clk = true;
- spin_lock(&user_lock);
+ mutex_lock(&user_lock);
if (ssc->user)
ssc->user--;
else {
disable_clk = false;
dev_dbg(&ssc->pdev->dev, "device already free\n");
}
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
if (disable_clk)
clk_unprepare(ssc->clk);
@@ -237,9 +237,9 @@ static int ssc_probe(struct platform_device *pdev)
return -ENXIO;
}
- spin_lock(&user_lock);
+ mutex_lock(&user_lock);
list_add_tail(&ssc->list, &ssc_list);
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
platform_set_drvdata(pdev, ssc);
@@ -258,9 +258,9 @@ static int ssc_remove(struct platform_device *pdev)
ssc_sound_dai_remove(ssc);
- spin_lock(&user_lock);
+ mutex_lock(&user_lock);
list_del(&ssc->list);
- spin_unlock(&user_lock);
+ mutex_unlock(&user_lock);
return 0;
}
diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c
index f82974a916c3..b0f62cbbdc87 100644
--- a/drivers/misc/habanalabs/command_submission.c
+++ b/drivers/misc/habanalabs/command_submission.c
@@ -62,6 +62,12 @@ static void hl_fence_release(struct dma_fence *fence)
container_of(fence, struct hl_cs_compl, base_fence);
struct hl_device *hdev = hl_cs_cmpl->hdev;
+ /* EBUSY means the CS was never submitted and hence we don't have
+ * an attached hw_sob object that we should handle here
+ */
+ if (fence->error == -EBUSY)
+ goto free;
+
if ((hl_cs_cmpl->type == CS_TYPE_SIGNAL) ||
(hl_cs_cmpl->type == CS_TYPE_WAIT)) {
@@ -92,6 +98,7 @@ static void hl_fence_release(struct dma_fence *fence)
kref_put(&hl_cs_cmpl->hw_sob->kref, hl_sob_reset);
}
+free:
kfree_rcu(hl_cs_cmpl, base_fence.rcu);
}
@@ -328,10 +335,16 @@ static void cs_do_release(struct kref *ref)
hl_ctx_put(cs->ctx);
+ /* We need to mark an error for not submitted because in that case
+ * the dma fence release flow is different. Mainly, we don't need
+ * to handle hw_sob for signal/wait
+ */
if (cs->timedout)
dma_fence_set_error(cs->fence, -ETIMEDOUT);
else if (cs->aborted)
dma_fence_set_error(cs->fence, -EIO);
+ else if (!cs->submitted)
+ dma_fence_set_error(cs->fence, -EBUSY);
dma_fence_signal(cs->fence);
dma_fence_put(cs->fence);
diff --git a/drivers/misc/habanalabs/debugfs.c b/drivers/misc/habanalabs/debugfs.c
index 3c8dcdfba20c..fc4372c18ce2 100644
--- a/drivers/misc/habanalabs/debugfs.c
+++ b/drivers/misc/habanalabs/debugfs.c
@@ -480,7 +480,7 @@ out:
return 0;
}
-static ssize_t mmu_write(struct file *file, const char __user *buf,
+static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct seq_file *s = file->private_data;
@@ -1125,7 +1125,7 @@ static const struct hl_info_list hl_debugfs_list[] = {
{"command_submission_jobs", command_submission_jobs_show, NULL},
{"userptr", userptr_show, NULL},
{"vm", vm_show, NULL},
- {"mmu", mmu_show, mmu_write},
+ {"mmu", mmu_show, mmu_asid_va_write},
{"engines", engines_show, NULL}
};
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 61f88e9884ce..834470d10b46 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -96,7 +96,7 @@
#define GAUDI_NUM_OF_QM_ARB_ERR_CAUSE 3
-#define GAUDI_ARB_WDT_TIMEOUT 0x400000
+#define GAUDI_ARB_WDT_TIMEOUT 0x1000000
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
@@ -1893,6 +1893,8 @@ static void gaudi_init_pci_dma_qman(struct hl_device *hdev, int dma_id,
WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 + q_off, so_base_ws_lo);
WREG32(mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 + q_off, so_base_ws_hi);
+ WREG32(mmDMA0_QM_CP_BARRIER_CFG_0 + q_off, 0x100);
+
/* The following configuration is needed only once per QMAN */
if (qman_id == 0) {
/* Configure RAZWI IRQ */
@@ -2725,6 +2727,12 @@ static int gaudi_mmu_init(struct hl_device *hdev)
WREG32(mmSTLB_HOP_CONFIGURATION,
hdev->mmu_huge_page_opt ? 0x30440 : 0x40440);
+ /*
+ * The H/W expects the first PI after init to be 1. After wraparound
+ * we'll write 0.
+ */
+ gaudi->mmu_cache_inv_pi = 1;
+
gaudi->hw_cap_initialized |= HW_CAP_MMU;
return 0;
@@ -3790,6 +3798,25 @@ static int gaudi_validate_dma_pkt_no_mmu(struct hl_device *hdev,
src_in_host);
}
+static int gaudi_validate_load_and_exe_pkt(struct hl_device *hdev,
+ struct hl_cs_parser *parser,
+ struct packet_load_and_exe *user_pkt)
+{
+ u32 cfg;
+
+ cfg = le32_to_cpu(user_pkt->cfg);
+
+ if (cfg & GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK) {
+ dev_err(hdev->dev,
+ "User not allowed to use Load and Execute\n");
+ return -EPERM;
+ }
+
+ parser->patched_cb_size += sizeof(struct packet_load_and_exe);
+
+ return 0;
+}
+
static int gaudi_validate_cb(struct hl_device *hdev,
struct hl_cs_parser *parser, bool is_mmu)
{
@@ -3838,6 +3865,11 @@ static int gaudi_validate_cb(struct hl_device *hdev,
rc = -EPERM;
break;
+ case PACKET_LOAD_AND_EXE:
+ rc = gaudi_validate_load_and_exe_pkt(hdev, parser,
+ (struct packet_load_and_exe *) user_pkt);
+ break;
+
case PACKET_LIN_DMA:
parser->contains_dma_pkt = true;
if (is_mmu)
@@ -3855,7 +3887,6 @@ static int gaudi_validate_cb(struct hl_device *hdev,
case PACKET_FENCE:
case PACKET_NOP:
case PACKET_ARB_POINT:
- case PACKET_LOAD_AND_EXE:
parser->patched_cb_size += pkt_size;
break;
@@ -5994,6 +6025,8 @@ static int gaudi_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard,
mutex_lock(&hdev->mmu_cache_lock);
/* L0 & L1 invalidation */
+ WREG32(mmSTLB_INV_PS, 3);
+ WREG32(mmSTLB_CACHE_INV, gaudi->mmu_cache_inv_pi++);
WREG32(mmSTLB_INV_PS, 2);
rc = hl_poll_timeout(
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index a46530d375fa..41a8d9bff6bf 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -229,6 +229,8 @@ struct gaudi_internal_qman_info {
* @multi_msi_mode: whether we are working in multi MSI single MSI mode.
* Multi MSI is possible only with IOMMU enabled.
* @ext_queue_idx: helper index for external queues initialization.
+ * @mmu_cache_inv_pi: PI for MMU cache invalidation flow. The H/W expects an
+ * 8-bit value so use u8.
*/
struct gaudi_device {
int (*armcp_info_get)(struct hl_device *hdev);
@@ -248,6 +250,7 @@ struct gaudi_device {
u32 hw_cap_initialized;
u8 multi_msi_mode;
u8 ext_queue_idx;
+ u8 mmu_cache_inv_pi;
};
void gaudi_init_security(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
index 9a5800b0086b..0f0cd067bb43 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_packets.h
@@ -197,6 +197,9 @@ struct packet_wait {
__le32 ctl;
};
+#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_SHIFT 0
+#define GAUDI_PKT_LOAD_AND_EXE_CFG_DST_MASK 0x00000001
+
struct packet_load_and_exe {
__le32 cfg;
__le32 ctl;
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index bccd341e9ae1..d5d2af4d10e6 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -828,7 +828,7 @@ static void run_plant_and_detach_test(int is_early)
char before[BREAK_INSTR_SIZE];
char after[BREAK_INSTR_SIZE];
- probe_kernel_read(before, (char *)kgdbts_break_test,
+ copy_from_kernel_nofault(before, (char *)kgdbts_break_test,
BREAK_INSTR_SIZE);
init_simple_test();
ts.tst = plant_and_detach_test;
@@ -836,8 +836,8 @@ static void run_plant_and_detach_test(int is_early)
/* Activate test with initial breakpoint */
if (!is_early)
kgdb_breakpoint();
- probe_kernel_read(after, (char *)kgdbts_break_test,
- BREAK_INSTR_SIZE);
+ copy_from_kernel_nofault(after, (char *)kgdbts_break_test,
+ BREAK_INSTR_SIZE);
if (memcmp(before, after, BREAK_INSTR_SIZE)) {
printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
panic("kgdb memory corruption");
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 8d468e0a950a..f476dbc7252b 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -745,9 +745,8 @@ static int mei_cl_device_remove(struct device *dev)
mei_cl_bus_module_put(cldev);
module_put(THIS_MODULE);
- dev->driver = NULL;
- return ret;
+ return ret;
}
static ssize_t name_show(struct device *dev, struct device_attribute *a,
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 9392934e3a06..7becfc768bbc 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -94,6 +94,7 @@
#define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */
#define MEI_DEV_ID_TGP_LP 0xA0E0 /* Tiger Lake Point LP */
+#define MEI_DEV_ID_TGP_H 0x43E0 /* Tiger Lake Point H */
#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
@@ -107,6 +108,8 @@
# define PCI_CFG_HFS_1_D0I3_MSK 0x80000000
#define PCI_CFG_HFS_2 0x48
#define PCI_CFG_HFS_3 0x60
+# define PCI_CFG_HFS_3_FW_SKU_MSK 0x00000070
+# define PCI_CFG_HFS_3_FW_SKU_SPS 0x00000060
#define PCI_CFG_HFS_4 0x64
#define PCI_CFG_HFS_5 0x68
#define PCI_CFG_HFS_6 0x6C
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index f620442addf5..7649710a2ab9 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -1366,7 +1366,7 @@ static bool mei_me_fw_type_nm(struct pci_dev *pdev)
#define MEI_CFG_FW_NM \
.quirk_probe = mei_me_fw_type_nm
-static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+static bool mei_me_fw_type_sps_4(struct pci_dev *pdev)
{
u32 reg;
unsigned int devfn;
@@ -1382,7 +1382,36 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
return (reg & 0xf0000) == 0xf0000;
}
-#define MEI_CFG_FW_SPS \
+#define MEI_CFG_FW_SPS_4 \
+ .quirk_probe = mei_me_fw_type_sps_4
+
+/**
+ * mei_me_fw_sku_sps() - check for sps sku
+ *
+ * Read ME FW Status register to check for SPS Firmware.
+ * The SPS FW is only signaled in pci function 0
+ *
+ * @pdev: pci device
+ *
+ * Return: true in case of SPS firmware
+ */
+static bool mei_me_fw_type_sps(struct pci_dev *pdev)
+{
+ u32 reg;
+ u32 fw_type;
+ unsigned int devfn;
+
+ devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0);
+ pci_bus_read_config_dword(pdev->bus, devfn, PCI_CFG_HFS_3, &reg);
+ trace_mei_pci_cfg_read(&pdev->dev, "PCI_CFG_HFS_3", PCI_CFG_HFS_3, reg);
+ fw_type = (reg & PCI_CFG_HFS_3_FW_SKU_MSK);
+
+ dev_dbg(&pdev->dev, "fw type is %d\n", fw_type);
+
+ return fw_type == PCI_CFG_HFS_3_FW_SKU_SPS;
+}
+
+#define MEI_CFG_FW_SPS \
.quirk_probe = mei_me_fw_type_sps
#define MEI_CFG_FW_VER_SUPP \
@@ -1452,10 +1481,17 @@ static const struct mei_cfg mei_me_pch8_cfg = {
};
/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
-static const struct mei_cfg mei_me_pch8_sps_cfg = {
+static const struct mei_cfg mei_me_pch8_sps_4_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
- MEI_CFG_FW_SPS,
+ MEI_CFG_FW_SPS_4,
+};
+
+/* LBG with quirk for SPS (4.0) Firmware exclusion */
+static const struct mei_cfg mei_me_pch12_sps_4_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_FW_SPS_4,
};
/* Cannon Lake and newer devices */
@@ -1465,10 +1501,20 @@ static const struct mei_cfg mei_me_pch12_cfg = {
MEI_CFG_DMA_128,
};
-/* LBG with quirk for SPS Firmware exclusion */
+/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion */
static const struct mei_cfg mei_me_pch12_sps_cfg = {
MEI_CFG_PCH8_HFS,
MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_DMA_128,
+ MEI_CFG_FW_SPS,
+};
+
+/* Cannon Lake with quirk for SPS 5.0 and newer Firmware exclusion
+ * w/o DMA support
+ */
+static const struct mei_cfg mei_me_pch12_nodma_sps_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
MEI_CFG_FW_SPS,
};
@@ -1480,6 +1526,15 @@ static const struct mei_cfg mei_me_pch15_cfg = {
MEI_CFG_TRC,
};
+/* Tiger Lake with quirk for SPS 5.0 and newer Firmware exclusion */
+static const struct mei_cfg mei_me_pch15_sps_cfg = {
+ MEI_CFG_PCH8_HFS,
+ MEI_CFG_FW_VER_SUPP,
+ MEI_CFG_DMA_128,
+ MEI_CFG_TRC,
+ MEI_CFG_FW_SPS,
+};
+
/*
* mei_cfg_list - A list of platform platform specific configurations.
* Note: has to be synchronized with enum mei_cfg_idx.
@@ -1492,10 +1547,13 @@ static const struct mei_cfg *const mei_cfg_list[] = {
[MEI_ME_PCH7_CFG] = &mei_me_pch7_cfg,
[MEI_ME_PCH_CPT_PBG_CFG] = &mei_me_pch_cpt_pbg_cfg,
[MEI_ME_PCH8_CFG] = &mei_me_pch8_cfg,
- [MEI_ME_PCH8_SPS_CFG] = &mei_me_pch8_sps_cfg,
+ [MEI_ME_PCH8_SPS_4_CFG] = &mei_me_pch8_sps_4_cfg,
[MEI_ME_PCH12_CFG] = &mei_me_pch12_cfg,
+ [MEI_ME_PCH12_SPS_4_CFG] = &mei_me_pch12_sps_4_cfg,
[MEI_ME_PCH12_SPS_CFG] = &mei_me_pch12_sps_cfg,
+ [MEI_ME_PCH12_SPS_NODMA_CFG] = &mei_me_pch12_nodma_sps_cfg,
[MEI_ME_PCH15_CFG] = &mei_me_pch15_cfg,
+ [MEI_ME_PCH15_SPS_CFG] = &mei_me_pch15_sps_cfg,
};
const struct mei_cfg *mei_me_get_cfg(kernel_ulong_t idx)
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index b6b94e211464..6a8973649c49 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
+ * Copyright (c) 2012-2020, Intel Corporation. All rights reserved.
* Intel Management Engine Interface (Intel MEI) Linux driver
*/
@@ -76,14 +76,20 @@ struct mei_me_hw {
* with quirk for Node Manager exclusion.
* @MEI_ME_PCH8_CFG: Platform Controller Hub Gen8 and newer
* client platforms.
- * @MEI_ME_PCH8_SPS_CFG: Platform Controller Hub Gen8 and newer
+ * @MEI_ME_PCH8_SPS_4_CFG: Platform Controller Hub Gen8 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
* @MEI_ME_PCH12_CFG: Platform Controller Hub Gen12 and newer
- * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 and newer
+ * @MEI_ME_PCH12_SPS_4_CFG:Platform Controller Hub Gen12 up to 4.0
+ * servers platforms with quirk for
+ * SPS firmware exclusion.
+ * @MEI_ME_PCH12_SPS_CFG: Platform Controller Hub Gen12 5.0 and newer
* servers platforms with quirk for
* SPS firmware exclusion.
* @MEI_ME_PCH15_CFG: Platform Controller Hub Gen15 and newer
+ * @MEI_ME_PCH15_SPS_CFG: Platform Controller Hub Gen15 and newer
+ * servers platforms with quirk for
+ * SPS firmware exclusion.
* @MEI_ME_NUM_CFG: Upper Sentinel.
*/
enum mei_cfg_idx {
@@ -94,10 +100,13 @@ enum mei_cfg_idx {
MEI_ME_PCH7_CFG,
MEI_ME_PCH_CPT_PBG_CFG,
MEI_ME_PCH8_CFG,
- MEI_ME_PCH8_SPS_CFG,
+ MEI_ME_PCH8_SPS_4_CFG,
MEI_ME_PCH12_CFG,
+ MEI_ME_PCH12_SPS_4_CFG,
MEI_ME_PCH12_SPS_CFG,
+ MEI_ME_PCH12_SPS_NODMA_CFG,
MEI_ME_PCH15_CFG,
+ MEI_ME_PCH15_SPS_CFG,
MEI_ME_NUM_CFG,
};
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 71f795b510ce..2a3f2fd5df50 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -59,18 +59,18 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, MEI_ME_PCH7_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, MEI_ME_PCH7_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, MEI_ME_PCH7_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, MEI_ME_PCH8_SPS_4_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, MEI_ME_PCH8_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, MEI_ME_PCH8_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, MEI_ME_PCH8_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, MEI_ME_PCH8_SPS_4_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, MEI_ME_PCH8_SPS_4_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, MEI_ME_PCH12_SPS_4_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)},
@@ -84,8 +84,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_3, MEI_ME_PCH8_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)},
- {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_SPS_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_3, MEI_ME_PCH12_SPS_NODMA_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_LP_3, MEI_ME_PCH8_CFG)},
@@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_JSP_N, MEI_ME_PCH15_CFG)},
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 7eb38d7482c6..08a3b1c05acb 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1146,9 +1146,11 @@ static int meson_mmc_probe(struct platform_device *pdev)
mmc->caps |= MMC_CAP_CMD23;
if (host->dram_access_quirk) {
+ /* Limit segments to 1 due to low available sram memory */
+ mmc->max_segs = 1;
/* Limit to the available sram memory */
- mmc->max_segs = SD_EMMC_SRAM_DATA_BUF_LEN / mmc->max_blk_size;
- mmc->max_blk_count = mmc->max_segs;
+ mmc->max_blk_count = SD_EMMC_SRAM_DATA_BUF_LEN /
+ mmc->max_blk_size;
} else {
mmc->max_blk_count = CMD_CFG_LENGTH_MASK;
mmc->max_segs = SD_EMMC_DESC_BUF_LEN /
diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c
index 5e20c099fe03..df43f42855e2 100644
--- a/drivers/mmc/host/owl-mmc.c
+++ b/drivers/mmc/host/owl-mmc.c
@@ -689,7 +689,7 @@ MODULE_DEVICE_TABLE(of, owl_mmc_of_match);
static struct platform_driver owl_mmc_driver = {
.driver = {
.name = "owl_mmc",
- .of_match_table = of_match_ptr(owl_mmc_of_match),
+ .of_match_table = owl_mmc_of_match,
},
.probe = owl_mmc_probe,
.remove = owl_mmc_remove,
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index b277dd7fbdb5..c0d58e9fcc33 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -618,8 +618,9 @@ static int msm_init_cm_dll(struct sdhci_host *host)
config &= ~CORE_CLK_PWRSAVE;
writel_relaxed(config, host->ioaddr + msm_offset->core_vendor_spec);
- config = msm_host->dll_config;
- writel_relaxed(config, host->ioaddr + msm_offset->core_dll_config);
+ if (msm_host->dll_config)
+ writel_relaxed(msm_host->dll_config,
+ host->ioaddr + msm_offset->core_dll_config);
if (msm_host->use_14lpp_dll_reset) {
config = readl_relaxed(host->ioaddr +
diff --git a/drivers/mmc/host/sdhci-of-aspeed.c b/drivers/mmc/host/sdhci-of-aspeed.c
index 56912e30c47e..a1bcc0f4ba9e 100644
--- a/drivers/mmc/host/sdhci-of-aspeed.c
+++ b/drivers/mmc/host/sdhci-of-aspeed.c
@@ -68,7 +68,7 @@ static void aspeed_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
if (WARN_ON(clock > host->max_clk))
clock = host->max_clk;
- for (div = 1; div < 256; div *= 2) {
+ for (div = 2; div < 256; div *= 2) {
if ((parent / div) <= clock)
break;
}
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 76d832a88e0c..7d930569a7df 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -1273,8 +1273,8 @@ int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
return -EROFS;
if (!len)
return 0;
- if (!mtd->oops_panic_write)
- mtd->oops_panic_write = true;
+ if (!master->oops_panic_write)
+ master->oops_panic_write = true;
return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
retlen, buf);
diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c
index 0a5cb77966cc..f5a53aac3c5f 100644
--- a/drivers/mtd/nand/raw/nandsim.c
+++ b/drivers/mtd/nand/raw/nandsim.c
@@ -1761,7 +1761,7 @@ static void ns_switch_state(struct nandsim *ns)
NS_DBG("switch_state: operation is unknown, try to find it\n");
- if (!ns_find_operation(ns, 0))
+ if (ns_find_operation(ns, 0))
return;
if ((ns->state & ACTION_MASK) &&
diff --git a/drivers/mtd/nand/raw/xway_nand.c b/drivers/mtd/nand/raw/xway_nand.c
index 94bfba994326..29255476afdb 100644
--- a/drivers/mtd/nand/raw/xway_nand.c
+++ b/drivers/mtd/nand/raw/xway_nand.c
@@ -224,7 +224,7 @@ static int xway_nand_remove(struct platform_device *pdev)
struct nand_chip *chip = &data->chip;
int ret;
- ret = mtd_device_unregister(mtd);
+ ret = mtd_device_unregister(nand_to_mtd(chip));
WARN_ON(ret);
nand_cleanup(chip);
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 3dd46cd55114..88e7900853db 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -407,19 +407,34 @@ free_dst:
return err;
}
+static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
+{
+ if (bareudp->ethertype == proto)
+ return true;
+
+ if (!bareudp->multi_proto_mode)
+ return false;
+
+ if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
+ proto == htons(ETH_P_MPLS_MC))
+ return true;
+
+ if (bareudp->ethertype == htons(ETH_P_IP) &&
+ proto == htons(ETH_P_IPV6))
+ return true;
+
+ return false;
+}
+
static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bareudp_dev *bareudp = netdev_priv(dev);
struct ip_tunnel_info *info = NULL;
int err;
- if (skb->protocol != bareudp->ethertype) {
- if (!bareudp->multi_proto_mode ||
- (skb->protocol != htons(ETH_P_MPLS_MC) &&
- skb->protocol != htons(ETH_P_IPV6))) {
- err = -EINVAL;
- goto tx_error;
- }
+ if (!bareudp_proto_valid(bareudp, skb->protocol)) {
+ err = -EINVAL;
+ goto tx_error;
}
info = skb_tunnel_info(skb);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 004919aea5fb..f88cb097b022 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5053,15 +5053,19 @@ int bond_create(struct net *net, const char *name)
bond_dev->rtnl_link_ops = &bond_link_ops;
res = register_netdevice(bond_dev);
+ if (res < 0) {
+ free_netdev(bond_dev);
+ rtnl_unlock();
+
+ return res;
+ }
netif_carrier_off(bond_dev);
bond_work_init_all(bond);
rtnl_unlock();
- if (res < 0)
- free_netdev(bond_dev);
- return res;
+ return 0;
}
static int __net_init bond_net_init(struct net *net)
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index b43b51646b11..f0f9138e967f 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -456,11 +456,10 @@ static int bond_newlink(struct net *src_net, struct net_device *bond_dev,
return err;
err = register_netdevice(bond_dev);
-
- netif_carrier_off(bond_dev);
if (!err) {
struct bonding *bond = netdev_priv(bond_dev);
+ netif_carrier_off(bond_dev);
bond_work_init_all(bond);
}
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 47d65b77caf7..7c17b0f705ec 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -1268,6 +1268,9 @@ static int ksz8795_switch_init(struct ksz_device *dev)
return -ENOMEM;
}
+ /* set the real number of ports */
+ dev->ds->num_ports = dev->port_cnt;
+
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 9a51b8a4de5d..4a9239b2c2e4 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -974,23 +974,6 @@ static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port,
PORT_MIRROR_SNIFFER, false);
}
-static void ksz9477_phy_setup(struct ksz_device *dev, int port,
- struct phy_device *phy)
-{
- /* Only apply to port with PHY. */
- if (port >= dev->phy_port_cnt)
- return;
-
- /* The MAC actually cannot run in 1000 half-duplex mode. */
- phy_remove_link_mode(phy,
- ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
-
- /* PHY does not support gigabit. */
- if (!(dev->features & GBIT_SUPPORT))
- phy_remove_link_mode(phy,
- ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
-}
-
static bool ksz9477_get_gbit(struct ksz_device *dev, u8 data)
{
bool gbit;
@@ -1588,6 +1571,9 @@ static int ksz9477_switch_init(struct ksz_device *dev)
return -ENOMEM;
}
+ /* set the real number of ports */
+ dev->ds->num_ports = dev->port_cnt;
+
return 0;
}
@@ -1600,7 +1586,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.get_port_addr = ksz9477_get_port_addr,
.cfg_port_member = ksz9477_cfg_port_member,
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
- .phy_setup = ksz9477_phy_setup,
.port_setup = ksz9477_port_setup,
.r_mib_cnt = ksz9477_r_mib_cnt,
.r_mib_pkt = ksz9477_r_mib_pkt,
@@ -1614,7 +1599,29 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
int ksz9477_switch_register(struct ksz_device *dev)
{
- return ksz_switch_register(dev, &ksz9477_dev_ops);
+ int ret, i;
+ struct phy_device *phydev;
+
+ ret = ksz_switch_register(dev, &ksz9477_dev_ops);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dev->phy_port_cnt; ++i) {
+ if (!dsa_is_user_port(dev->ds, i))
+ continue;
+
+ phydev = dsa_to_port(dev->ds, i)->slave->phydev;
+
+ /* The MAC actually cannot run in 1000 half-duplex mode. */
+ phy_remove_link_mode(phydev,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ /* PHY does not support gigabit. */
+ if (!(dev->features & GBIT_SUPPORT))
+ phy_remove_link_mode(phydev,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT);
+ }
+ return ret;
}
EXPORT_SYMBOL(ksz9477_switch_register);
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 7d050fab0889..7951f52d860d 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -79,6 +79,7 @@ MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id);
static const struct of_device_id ksz9477_dt_ids[] = {
{ .compatible = "microchip,ksz9477" },
{ .compatible = "microchip,ksz9897" },
+ { .compatible = "microchip,ksz9893" },
{ .compatible = "microchip,ksz9567" },
{},
};
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index fd1d6676ae4f..7b6c0dce7536 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -358,8 +358,6 @@ int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy)
/* setup slave port */
dev->dev_ops->port_setup(dev, port, false);
- if (dev->dev_ops->phy_setup)
- dev->dev_ops->phy_setup(dev, port, phy);
/* port_stp_state_set() will be called after to enable the port so
* there is no need to do anything.
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index f2c9bb68fd33..7d11dd32ec0d 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -119,8 +119,6 @@ struct ksz_dev_ops {
u32 (*get_port_addr)(int port, int offset);
void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
- void (*phy_setup)(struct ksz_device *dev, int port,
- struct phy_device *phy);
void (*port_cleanup)(struct ksz_device *dev, int port);
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 7627ea61e0ea..fee16c947c2e 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -664,8 +664,11 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
const struct phylink_link_state *state)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_port *p;
int err;
+ p = &chip->ports[port];
+
/* FIXME: is this the correct test? If we're in fixed mode on an
* internal port, why should we process this any different from
* PHY mode? On the other hand, the port may be automedia between
@@ -675,10 +678,14 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
return;
mv88e6xxx_reg_lock(chip);
- /* FIXME: should we force the link down here - but if we do, how
- * do we restore the link force/unforce state? The driver layering
- * gets in the way.
+ /* In inband mode, the link may come up at any time while the link
+ * is not forced down. Force the link down while we reconfigure the
+ * interface mode.
*/
+ if (mode == MLO_AN_INBAND && p->interface != state->interface &&
+ chip->info->ops->port_set_link)
+ chip->info->ops->port_set_link(chip, port, LINK_FORCED_DOWN);
+
err = mv88e6xxx_port_config_interface(chip, port, state->interface);
if (err && err != -EOPNOTSUPP)
goto err_unlock;
@@ -691,6 +698,15 @@ static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
if (err > 0)
err = 0;
+ /* Undo the forced down state above after completing configuration
+ * irrespective of its state on entry, which allows the link to come up.
+ */
+ if (mode == MLO_AN_INBAND && p->interface != state->interface &&
+ chip->info->ops->port_set_link)
+ chip->info->ops->port_set_link(chip, port, LINK_UNFORCED);
+
+ p->interface = state->interface;
+
err_unlock:
mv88e6xxx_reg_unlock(chip);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index e5430cf2ad71..6476524e8239 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -232,6 +232,7 @@ struct mv88e6xxx_port {
u64 atu_full_violation;
u64 vtu_member_violation;
u64 vtu_miss_violation;
+ phy_interface_t interface;
u8 cmode;
bool mirror_ingress;
bool mirror_egress;
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index 0056f9c1e471..af3565160db6 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -7,6 +7,165 @@
#define SJA1105_SIZE_VL_STATUS 8
+/* Insert into the global gate list, sorted by gate action time. */
+static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
+ struct sja1105_rule *rule,
+ u8 gate_state, s64 entry_time,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gate_entry *e;
+ int rc;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->rule = rule;
+ e->gate_state = gate_state;
+ e->interval = entry_time;
+
+ if (list_empty(&gating_cfg->entries)) {
+ list_add(&e->list, &gating_cfg->entries);
+ } else {
+ struct sja1105_gate_entry *p;
+
+ list_for_each_entry(p, &gating_cfg->entries, list) {
+ if (p->interval == e->interval) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Gate conflict");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (e->interval < p->interval)
+ break;
+ }
+ list_add(&e->list, p->list.prev);
+ }
+
+ gating_cfg->num_entries++;
+
+ return 0;
+err:
+ kfree(e);
+ return rc;
+}
+
+/* The gate entries contain absolute times in their e->interval field. Convert
+ * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
+ */
+static void
+sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
+ u64 cycle_time)
+{
+ struct sja1105_gate_entry *last_e;
+ struct sja1105_gate_entry *e;
+ struct list_head *prev;
+
+ list_for_each_entry(e, &gating_cfg->entries, list) {
+ struct sja1105_gate_entry *p;
+
+ prev = e->list.prev;
+
+ if (prev == &gating_cfg->entries)
+ continue;
+
+ p = list_entry(prev, struct sja1105_gate_entry, list);
+ p->interval = e->interval - p->interval;
+ }
+ last_e = list_last_entry(&gating_cfg->entries,
+ struct sja1105_gate_entry, list);
+ last_e->interval = cycle_time - last_e->interval;
+}
+
+static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
+{
+ struct sja1105_gate_entry *e, *n;
+
+ list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
+ list_del(&e->list);
+ kfree(e);
+ }
+}
+
+static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
+ struct netlink_ext_ack *extack)
+{
+ struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
+ struct sja1105_rule *rule;
+ s64 max_cycle_time = 0;
+ s64 its_base_time = 0;
+ int i, rc = 0;
+
+ sja1105_free_gating_config(gating_cfg);
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ if (max_cycle_time < rule->vl.cycle_time) {
+ max_cycle_time = rule->vl.cycle_time;
+ its_base_time = rule->vl.base_time;
+ }
+ }
+
+ if (!max_cycle_time)
+ return 0;
+
+ dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
+ max_cycle_time, its_base_time);
+
+ gating_cfg->base_time = its_base_time;
+ gating_cfg->cycle_time = max_cycle_time;
+ gating_cfg->num_entries = 0;
+
+ list_for_each_entry(rule, &priv->flow_block.rules, list) {
+ s64 time;
+ s64 rbt;
+
+ if (rule->type != SJA1105_RULE_VL)
+ continue;
+ if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
+ continue;
+
+ /* Calculate the difference between this gating schedule's
+ * base time, and the base time of the gating schedule with the
+ * longest cycle time. We call it the relative base time (rbt).
+ */
+ rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
+ its_base_time);
+ rbt -= its_base_time;
+
+ time = rbt;
+
+ for (i = 0; i < rule->vl.num_entries; i++) {
+ u8 gate_state = rule->vl.entries[i].gate_state;
+ s64 entry_time = time;
+
+ while (entry_time < max_cycle_time) {
+ rc = sja1105_insert_gate_entry(gating_cfg, rule,
+ gate_state,
+ entry_time,
+ extack);
+ if (rc)
+ goto err;
+
+ entry_time += rule->vl.cycle_time;
+ }
+ time += rule->vl.entries[i].interval;
+ }
+ }
+
+ sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+
+ return 0;
+err:
+ sja1105_free_gating_config(gating_cfg);
+ return rc;
+}
+
/* The switch flow classification core implements TTEthernet, which 'thinks' in
* terms of Virtual Links (VL), a concept borrowed from ARINC 664 part 7.
* However it also has one other operating mode (VLLUPFORMAT=0) where it acts
@@ -390,171 +549,19 @@ int sja1105_vl_delete(struct sja1105_private *priv, int port,
kfree(rule);
}
- rc = sja1105_init_virtual_links(priv, extack);
+ rc = sja1105_compose_gating_subschedule(priv, extack);
if (rc)
return rc;
- return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
-}
-
-/* Insert into the global gate list, sorted by gate action time. */
-static int sja1105_insert_gate_entry(struct sja1105_gating_config *gating_cfg,
- struct sja1105_rule *rule,
- u8 gate_state, s64 entry_time,
- struct netlink_ext_ack *extack)
-{
- struct sja1105_gate_entry *e;
- int rc;
-
- e = kzalloc(sizeof(*e), GFP_KERNEL);
- if (!e)
- return -ENOMEM;
-
- e->rule = rule;
- e->gate_state = gate_state;
- e->interval = entry_time;
-
- if (list_empty(&gating_cfg->entries)) {
- list_add(&e->list, &gating_cfg->entries);
- } else {
- struct sja1105_gate_entry *p;
-
- list_for_each_entry(p, &gating_cfg->entries, list) {
- if (p->interval == e->interval) {
- NL_SET_ERR_MSG_MOD(extack,
- "Gate conflict");
- rc = -EBUSY;
- goto err;
- }
-
- if (e->interval < p->interval)
- break;
- }
- list_add(&e->list, p->list.prev);
- }
-
- gating_cfg->num_entries++;
-
- return 0;
-err:
- kfree(e);
- return rc;
-}
-
-/* The gate entries contain absolute times in their e->interval field. Convert
- * that to proper intervals (i.e. "0, 5, 10, 15" to "5, 5, 5, 5").
- */
-static void
-sja1105_gating_cfg_time_to_interval(struct sja1105_gating_config *gating_cfg,
- u64 cycle_time)
-{
- struct sja1105_gate_entry *last_e;
- struct sja1105_gate_entry *e;
- struct list_head *prev;
-
- list_for_each_entry(e, &gating_cfg->entries, list) {
- struct sja1105_gate_entry *p;
-
- prev = e->list.prev;
-
- if (prev == &gating_cfg->entries)
- continue;
-
- p = list_entry(prev, struct sja1105_gate_entry, list);
- p->interval = e->interval - p->interval;
- }
- last_e = list_last_entry(&gating_cfg->entries,
- struct sja1105_gate_entry, list);
- if (last_e->list.prev != &gating_cfg->entries)
- last_e->interval = cycle_time - last_e->interval;
-}
-
-static void sja1105_free_gating_config(struct sja1105_gating_config *gating_cfg)
-{
- struct sja1105_gate_entry *e, *n;
-
- list_for_each_entry_safe(e, n, &gating_cfg->entries, list) {
- list_del(&e->list);
- kfree(e);
- }
-}
-
-static int sja1105_compose_gating_subschedule(struct sja1105_private *priv,
- struct netlink_ext_ack *extack)
-{
- struct sja1105_gating_config *gating_cfg = &priv->tas_data.gating_cfg;
- struct sja1105_rule *rule;
- s64 max_cycle_time = 0;
- s64 its_base_time = 0;
- int i, rc = 0;
-
- list_for_each_entry(rule, &priv->flow_block.rules, list) {
- if (rule->type != SJA1105_RULE_VL)
- continue;
- if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
- continue;
-
- if (max_cycle_time < rule->vl.cycle_time) {
- max_cycle_time = rule->vl.cycle_time;
- its_base_time = rule->vl.base_time;
- }
- }
-
- if (!max_cycle_time)
- return 0;
-
- dev_dbg(priv->ds->dev, "max_cycle_time %lld its_base_time %lld\n",
- max_cycle_time, its_base_time);
-
- sja1105_free_gating_config(gating_cfg);
-
- gating_cfg->base_time = its_base_time;
- gating_cfg->cycle_time = max_cycle_time;
- gating_cfg->num_entries = 0;
-
- list_for_each_entry(rule, &priv->flow_block.rules, list) {
- s64 time;
- s64 rbt;
-
- if (rule->type != SJA1105_RULE_VL)
- continue;
- if (rule->vl.type != SJA1105_VL_TIME_TRIGGERED)
- continue;
-
- /* Calculate the difference between this gating schedule's
- * base time, and the base time of the gating schedule with the
- * longest cycle time. We call it the relative base time (rbt).
- */
- rbt = future_base_time(rule->vl.base_time, rule->vl.cycle_time,
- its_base_time);
- rbt -= its_base_time;
-
- time = rbt;
-
- for (i = 0; i < rule->vl.num_entries; i++) {
- u8 gate_state = rule->vl.entries[i].gate_state;
- s64 entry_time = time;
-
- while (entry_time < max_cycle_time) {
- rc = sja1105_insert_gate_entry(gating_cfg, rule,
- gate_state,
- entry_time,
- extack);
- if (rc)
- goto err;
-
- entry_time += rule->vl.cycle_time;
- }
- time += rule->vl.entries[i].interval;
- }
- }
+ rc = sja1105_init_virtual_links(priv, extack);
+ if (rc)
+ return rc;
- sja1105_gating_cfg_time_to_interval(gating_cfg, max_cycle_time);
+ rc = sja1105_init_scheduling(priv);
+ if (rc < 0)
+ return rc;
- return 0;
-err:
- sja1105_free_gating_config(gating_cfg);
- return rc;
+ return sja1105_static_config_reload(priv, SJA1105_VIRTUAL_LINKS);
}
int sja1105_vl_gate(struct sja1105_private *priv, int port,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index ed5b465bc664..992fedbe4ce3 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -64,6 +64,7 @@ struct aq_hw_caps_s {
u8 rx_rings;
bool flow_control;
bool is_64_dma;
+ u32 quirks;
u32 priv_data_len;
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 4435c6374f7e..7c7bf6bf163f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -415,6 +415,15 @@ int aq_nic_init(struct aq_nic_s *self)
self->aq_nic_cfg.aq_hw_caps->media_type == AQ_HW_MEDIA_TYPE_TP) {
self->aq_hw->phy_id = HW_ATL_PHY_ID_MAX;
err = aq_phy_init(self->aq_hw);
+
+ /* Disable the PTP on NICs where it's known to cause datapath
+ * problems.
+ * Ideally this should have been done by PHY provisioning, but
+ * many units have been shipped with enabled PTP block already.
+ */
+ if (self->aq_nic_cfg.aq_hw_caps->quirks & AQ_NIC_QUIRK_BAD_PTP)
+ if (self->aq_hw->phy_id != HW_ATL_PHY_ID_MAX)
+ aq_phy_disable_ptp(self->aq_hw);
}
for (i = 0U; i < self->aq_vecs; i++) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index 2ab003065e62..439ce9692dac 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -81,6 +81,8 @@ struct aq_nic_cfg_s {
#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
#define AQ_NIC_FLAG_ERR_HW 0x80000000U
+#define AQ_NIC_QUIRK_BAD_PTP BIT(0)
+
#define AQ_NIC_WOL_MODES (WAKE_MAGIC |\
WAKE_PHY)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
index 51ae921e3e1f..949ac2351701 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.c
@@ -1,10 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* aQuantia Corporation Network Driver
- * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2018-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
#include "aq_phy.h"
+#define HW_ATL_PTP_DISABLE_MSK BIT(10)
+
bool aq_mdio_busy_wait(struct aq_hw_s *aq_hw)
{
int err = 0;
@@ -145,3 +149,24 @@ bool aq_phy_init(struct aq_hw_s *aq_hw)
return true;
}
+
+void aq_phy_disable_ptp(struct aq_hw_s *aq_hw)
+{
+ static const u16 ptp_registers[] = {
+ 0x031e,
+ 0x031d,
+ 0x031c,
+ 0x031b,
+ };
+ u16 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ptp_registers); i++) {
+ val = aq_phy_read_reg(aq_hw, MDIO_MMD_VEND1,
+ ptp_registers[i]);
+
+ aq_phy_write_reg(aq_hw, MDIO_MMD_VEND1,
+ ptp_registers[i],
+ val & ~HW_ATL_PTP_DISABLE_MSK);
+ }
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
index 84b72ad04a4a..86cc1ee836e2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_phy.h
@@ -1,6 +1,8 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* aQuantia Corporation Network Driver
- * Copyright (C) 2018-2019 aQuantia Corporation. All rights reserved
+/* Atlantic Network Driver
+ *
+ * Copyright (C) 2018-2019 aQuantia Corporation
+ * Copyright (C) 2019-2020 Marvell International Ltd.
*/
#ifndef AQ_PHY_H
@@ -29,4 +31,6 @@ bool aq_phy_init_phy_id(struct aq_hw_s *aq_hw);
bool aq_phy_init(struct aq_hw_s *aq_hw);
+void aq_phy_disable_ptp(struct aq_hw_s *aq_hw);
+
#endif /* AQ_PHY_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 14d79f70cad7..2125bc20ab6a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -93,6 +93,25 @@ const struct aq_hw_caps_s hw_atl_b0_caps_aqc109 = {
AQ_NIC_RATE_100M,
};
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc111 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_5G |
+ AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+ .quirks = AQ_NIC_QUIRK_BAD_PTP,
+};
+
+const struct aq_hw_caps_s hw_atl_b0_caps_aqc112 = {
+ DEFAULT_B0_BOARD_BASIC_CAPABILITIES,
+ .media_type = AQ_HW_MEDIA_TYPE_TP,
+ .link_speed_msk = AQ_NIC_RATE_2G5 |
+ AQ_NIC_RATE_1G |
+ AQ_NIC_RATE_100M,
+ .quirks = AQ_NIC_QUIRK_BAD_PTP,
+};
+
static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
{
int err = 0;
@@ -354,8 +373,13 @@ static int hw_atl_b0_hw_init_tx_tc_rate_limit(struct aq_hw_s *self)
/* WSP, if min_rate is set for at least one TC.
* RR otherwise.
+ *
+ * NB! MAC FW sets arb mode itself if PTP is enabled. We shouldn't
+ * overwrite it here in that case.
*/
- hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
+ if (!nic_cfg->is_ptp)
+ hw_atl_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U);
+
/* Data TC Arbiter takes precedence over Descriptor TC Arbiter,
* leave Descriptor TC Arbiter as RR.
*/
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index 30f468f2084d..16091af17980 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -18,17 +18,15 @@ extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc100;
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc107;
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc108;
extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc109;
-
-#define hw_atl_b0_caps_aqc111 hw_atl_b0_caps_aqc108
-#define hw_atl_b0_caps_aqc112 hw_atl_b0_caps_aqc109
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc111;
+extern const struct aq_hw_caps_s hw_atl_b0_caps_aqc112;
#define hw_atl_b0_caps_aqc100s hw_atl_b0_caps_aqc100
#define hw_atl_b0_caps_aqc107s hw_atl_b0_caps_aqc107
#define hw_atl_b0_caps_aqc108s hw_atl_b0_caps_aqc108
#define hw_atl_b0_caps_aqc109s hw_atl_b0_caps_aqc109
-
-#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc108
-#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc109
+#define hw_atl_b0_caps_aqc111s hw_atl_b0_caps_aqc111
+#define hw_atl_b0_caps_aqc112s hw_atl_b0_caps_aqc112
extern const struct aq_hw_ops hw_atl_ops_b0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
index 3c8e8047ea1e..d775b23025c1 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -1700,7 +1700,7 @@ void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location,
for (i = 0; i < 4; ++i)
aq_hw_write_reg(aq_hw,
HW_ATL_RPF_L3_SRCA_ADR(location + i),
- ipv6_src[i]);
+ ipv6_src[3 - i]);
}
void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
@@ -1711,7 +1711,7 @@ void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location,
for (i = 0; i < 4; ++i)
aq_hw_write_reg(aq_hw,
HW_ATL_RPF_L3_DSTA_ADR(location + i),
- ipv6_dest[i]);
+ ipv6_dest[3 - i]);
}
u32 hw_atl_sem_ram_get(struct aq_hw_s *self)
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
index 06220792daf1..7430ff025134 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -1360,7 +1360,7 @@
*/
/* Register address for bitfield pif_rpf_l3_da0_i[31:0] */
-#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053B0 + (filter) * 0x4)
+#define HW_ATL_RPF_L3_DSTA_ADR(filter) (0x000053D0 + (filter) * 0x4)
/* Bitmask for bitfield l3_da0[1F:0] */
#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu
/* Inverted bitmask for bitfield l3_da0[1F:0] */
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 112edbd30823..38cce66ef212 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -556,7 +556,8 @@ static int ag71xx_mdio_probe(struct ag71xx *ag)
ag->mdio_reset = of_reset_control_get_exclusive(np, "mdio");
if (IS_ERR(ag->mdio_reset)) {
netif_err(ag, probe, ndev, "Failed to get reset mdio.\n");
- return PTR_ERR(ag->mdio_reset);
+ err = PTR_ERR(ag->mdio_reset);
+ goto mdio_err_put_clk;
}
mii_bus->name = "ag71xx_mdio";
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 6a884df44612..7463a1847ceb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -3418,7 +3418,7 @@ void bnxt_set_tpa_flags(struct bnxt *bp)
*/
void bnxt_set_ring_params(struct bnxt *bp)
{
- u32 ring_size, rx_size, rx_space;
+ u32 ring_size, rx_size, rx_space, max_rx_cmpl;
u32 agg_factor = 0, agg_ring_size = 0;
/* 8 for CRC and VLAN */
@@ -3474,7 +3474,15 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
- ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
+ max_rx_cmpl = bp->rx_ring_size;
+ /* MAX TPA needs to be added because TPA_START completions are
+ * immediately recycled, so the TPA completions are not bound by
+ * the RX ring size.
+ */
+ if (bp->flags & BNXT_FLAG_TPA)
+ max_rx_cmpl += bp->max_tpa;
+ /* RX and TPA completions are 32-byte, all others are 16-byte */
+ ring_size = max_rx_cmpl * 2 + agg_ring_size + bp->tx_ring_size;
bp->cp_ring_size = ring_size;
bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
@@ -10385,15 +10393,15 @@ static void bnxt_sp_task(struct work_struct *work)
&bp->sp_event))
bnxt_hwrm_phy_qcaps(bp);
- if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
- &bp->sp_event))
- bnxt_init_ethtool_link_settings(bp);
-
rc = bnxt_update_link(bp, true);
- mutex_unlock(&bp->link_lock);
if (rc)
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
rc);
+
+ if (test_and_clear_bit(BNXT_LINK_CFG_CHANGE_SP_EVENT,
+ &bp->sp_event))
+ bnxt_init_ethtool_link_settings(bp);
+ mutex_unlock(&bp->link_lock);
}
if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
int rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 6b88143af5ea..b4aa56dc4f9f 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1765,8 +1765,11 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (epause->tx_pause)
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
- if (netif_running(dev))
+ if (netif_running(dev)) {
+ mutex_lock(&bp->link_lock);
rc = bnxt_hwrm_set_pause(bp);
+ mutex_unlock(&bp->link_lock);
+ }
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 3a9a51f7063a..392e32c7122a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -396,6 +396,7 @@ static void bnxt_free_vf_resources(struct bnxt *bp)
}
}
+ bp->pf.active_vfs = 0;
kfree(bp->pf.vf);
bp->pf.vf = NULL;
}
@@ -835,7 +836,6 @@ void bnxt_sriov_disable(struct bnxt *bp)
bnxt_free_vf_resources(bp);
- bp->pf.active_vfs = 0;
/* Reclaim all resources for the PF. */
rtnl_lock();
bnxt_restore_pf_fw_resources(bp);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index ff31da0ed846..e471b14fc6e9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -459,17 +459,6 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
genet_dma_ring_regs[r]);
}
-static bool bcmgenet_hfb_is_filter_enabled(struct bcmgenet_priv *priv,
- u32 f_index)
-{
- u32 offset;
- u32 reg;
-
- offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
- reg = bcmgenet_hfb_reg_readl(priv, offset);
- return !!(reg & (1 << (f_index % 32)));
-}
-
static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
{
u32 offset;
@@ -533,19 +522,6 @@ static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
bcmgenet_hfb_reg_writel(priv, reg, offset);
}
-static int bcmgenet_hfb_find_unused_filter(struct bcmgenet_priv *priv)
-{
- u32 f_index;
-
- /* First MAX_NUM_OF_FS_RULES are reserved for Rx NFC filters */
- for (f_index = MAX_NUM_OF_FS_RULES;
- f_index < priv->hw_params->hfb_filter_cnt; f_index++)
- if (!bcmgenet_hfb_is_filter_enabled(priv, f_index))
- return f_index;
-
- return -ENOMEM;
-}
-
static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
{
while (size) {
@@ -567,14 +543,14 @@ static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
#define VALIDATE_MASK(x) \
bcmgenet_hfb_validate_mask(&(x), sizeof(x))
-static int bcmgenet_hfb_insert_data(u32 *f, int offset,
- void *val, void *mask, size_t size)
+static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index,
+ u32 offset, void *val, void *mask,
+ size_t size)
{
- int index;
- u32 tmp;
+ u32 index, tmp;
- index = offset / 2;
- tmp = f[index];
+ index = f_index * priv->hw_params->hfb_filter_size + offset / 2;
+ tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32));
while (size--) {
if (offset++ & 1) {
@@ -591,9 +567,10 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset,
tmp |= 0x10000;
break;
}
- f[index++] = tmp;
+ bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32));
if (size)
- tmp = f[index];
+ tmp = bcmgenet_hfb_readl(priv,
+ index * sizeof(u32));
} else {
tmp &= ~0xCFF00;
tmp |= (*(unsigned char *)val++) << 8;
@@ -609,43 +586,26 @@ static int bcmgenet_hfb_insert_data(u32 *f, int offset,
break;
}
if (!size)
- f[index] = tmp;
+ bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32));
}
}
return 0;
}
-static void bcmgenet_hfb_set_filter(struct bcmgenet_priv *priv, u32 *f_data,
- u32 f_length, u32 rx_queue, int f_index)
-{
- u32 base = f_index * priv->hw_params->hfb_filter_size;
- int i;
-
- for (i = 0; i < f_length; i++)
- bcmgenet_hfb_writel(priv, f_data[i], (base + i) * sizeof(u32));
-
- bcmgenet_hfb_set_filter_length(priv, f_index, 2 * f_length);
- bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f_index, rx_queue);
-}
-
-static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
- struct bcmgenet_rxnfc_rule *rule)
+static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
+ struct bcmgenet_rxnfc_rule *rule)
{
struct ethtool_rx_flow_spec *fs = &rule->fs;
- int err = 0, offset = 0, f_length = 0;
- u16 val_16, mask_16;
+ u32 offset = 0, f_length = 0, f;
u8 val_8, mask_8;
+ __be16 val_16;
+ u16 mask_16;
size_t size;
- u32 *f_data;
-
- f_data = kcalloc(priv->hw_params->hfb_filter_size, sizeof(u32),
- GFP_KERNEL);
- if (!f_data)
- return -ENOMEM;
+ f = fs->location;
if (fs->flow_type & FLOW_MAC_EXT) {
- bcmgenet_hfb_insert_data(f_data, 0,
+ bcmgenet_hfb_insert_data(priv, f, 0,
&fs->h_ext.h_dest, &fs->m_ext.h_dest,
sizeof(fs->h_ext.h_dest));
}
@@ -653,11 +613,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
if (fs->flow_type & FLOW_EXT) {
if (fs->m_ext.vlan_etype ||
fs->m_ext.vlan_tci) {
- bcmgenet_hfb_insert_data(f_data, 12,
+ bcmgenet_hfb_insert_data(priv, f, 12,
&fs->h_ext.vlan_etype,
&fs->m_ext.vlan_etype,
sizeof(fs->h_ext.vlan_etype));
- bcmgenet_hfb_insert_data(f_data, 14,
+ bcmgenet_hfb_insert_data(priv, f, 14,
&fs->h_ext.vlan_tci,
&fs->m_ext.vlan_tci,
sizeof(fs->h_ext.vlan_tci));
@@ -669,15 +629,15 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW:
f_length += DIV_ROUND_UP(ETH_HLEN, 2);
- bcmgenet_hfb_insert_data(f_data, 0,
+ bcmgenet_hfb_insert_data(priv, f, 0,
&fs->h_u.ether_spec.h_dest,
&fs->m_u.ether_spec.h_dest,
sizeof(fs->h_u.ether_spec.h_dest));
- bcmgenet_hfb_insert_data(f_data, ETH_ALEN,
+ bcmgenet_hfb_insert_data(priv, f, ETH_ALEN,
&fs->h_u.ether_spec.h_source,
&fs->m_u.ether_spec.h_source,
sizeof(fs->h_u.ether_spec.h_source));
- bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
+ bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
&fs->h_u.ether_spec.h_proto,
&fs->m_u.ether_spec.h_proto,
sizeof(fs->h_u.ether_spec.h_proto));
@@ -687,21 +647,21 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
/* Specify IP Ether Type */
val_16 = htons(ETH_P_IP);
mask_16 = 0xFFFF;
- bcmgenet_hfb_insert_data(f_data, (2 * ETH_ALEN) + offset,
+ bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
&val_16, &mask_16, sizeof(val_16));
- bcmgenet_hfb_insert_data(f_data, 15 + offset,
+ bcmgenet_hfb_insert_data(priv, f, 15 + offset,
&fs->h_u.usr_ip4_spec.tos,
&fs->m_u.usr_ip4_spec.tos,
sizeof(fs->h_u.usr_ip4_spec.tos));
- bcmgenet_hfb_insert_data(f_data, 23 + offset,
+ bcmgenet_hfb_insert_data(priv, f, 23 + offset,
&fs->h_u.usr_ip4_spec.proto,
&fs->m_u.usr_ip4_spec.proto,
sizeof(fs->h_u.usr_ip4_spec.proto));
- bcmgenet_hfb_insert_data(f_data, 26 + offset,
+ bcmgenet_hfb_insert_data(priv, f, 26 + offset,
&fs->h_u.usr_ip4_spec.ip4src,
&fs->m_u.usr_ip4_spec.ip4src,
sizeof(fs->h_u.usr_ip4_spec.ip4src));
- bcmgenet_hfb_insert_data(f_data, 30 + offset,
+ bcmgenet_hfb_insert_data(priv, f, 30 + offset,
&fs->h_u.usr_ip4_spec.ip4dst,
&fs->m_u.usr_ip4_spec.ip4dst,
sizeof(fs->h_u.usr_ip4_spec.ip4dst));
@@ -711,11 +671,11 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
/* Only supports 20 byte IPv4 header */
val_8 = 0x45;
mask_8 = 0xFF;
- bcmgenet_hfb_insert_data(f_data, ETH_HLEN + offset,
+ bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset,
&val_8, &mask_8,
sizeof(val_8));
size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
- bcmgenet_hfb_insert_data(f_data,
+ bcmgenet_hfb_insert_data(priv, f,
ETH_HLEN + 20 + offset,
&fs->h_u.usr_ip4_spec.l4_4_bytes,
&fs->m_u.usr_ip4_spec.l4_4_bytes,
@@ -724,87 +684,42 @@ static int bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
break;
}
+ bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
/* Ring 0 flows can be handled by the default Descriptor Ring
* We'll map them to ring 0, but don't enable the filter
*/
- bcmgenet_hfb_set_filter(priv, f_data, f_length, 0,
- fs->location);
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
rule->state = BCMGENET_RXNFC_STATE_DISABLED;
} else {
/* Other Rx rings are direct mapped here */
- bcmgenet_hfb_set_filter(priv, f_data, f_length,
- fs->ring_cookie, fs->location);
- bcmgenet_hfb_enable_filter(priv, fs->location);
+ bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
+ fs->ring_cookie);
+ bcmgenet_hfb_enable_filter(priv, f);
rule->state = BCMGENET_RXNFC_STATE_ENABLED;
}
-
- kfree(f_data);
-
- return err;
}
-/* bcmgenet_hfb_add_filter
- *
- * Add new filter to Hardware Filter Block to match and direct Rx traffic to
- * desired Rx queue.
- *
- * f_data is an array of unsigned 32-bit integers where each 32-bit integer
- * provides filter data for 2 bytes (4 nibbles) of Rx frame:
- *
- * bits 31:20 - unused
- * bit 19 - nibble 0 match enable
- * bit 18 - nibble 1 match enable
- * bit 17 - nibble 2 match enable
- * bit 16 - nibble 3 match enable
- * bits 15:12 - nibble 0 data
- * bits 11:8 - nibble 1 data
- * bits 7:4 - nibble 2 data
- * bits 3:0 - nibble 3 data
- *
- * Example:
- * In order to match:
- * - Ethernet frame type = 0x0800 (IP)
- * - IP version field = 4
- * - IP protocol field = 0x11 (UDP)
- *
- * The following filter is needed:
- * u32 hfb_filter_ipv4_udp[] = {
- * Rx frame offset 0x00: 0x00000000, 0x00000000, 0x00000000, 0x00000000,
- * Rx frame offset 0x08: 0x00000000, 0x00000000, 0x000F0800, 0x00084000,
- * Rx frame offset 0x10: 0x00000000, 0x00000000, 0x00000000, 0x00030011,
- * };
+/* bcmgenet_hfb_clear
*
- * To add the filter to HFB and direct the traffic to Rx queue 0, call:
- * bcmgenet_hfb_add_filter(priv, hfb_filter_ipv4_udp,
- * ARRAY_SIZE(hfb_filter_ipv4_udp), 0);
+ * Clear Hardware Filter Block and disable all filtering.
*/
-int bcmgenet_hfb_add_filter(struct bcmgenet_priv *priv, u32 *f_data,
- u32 f_length, u32 rx_queue)
+static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
{
- int f_index;
-
- f_index = bcmgenet_hfb_find_unused_filter(priv);
- if (f_index < 0)
- return -ENOMEM;
+ u32 base, i;
- if (f_length > priv->hw_params->hfb_filter_size)
- return -EINVAL;
-
- bcmgenet_hfb_set_filter(priv, f_data, f_length, rx_queue, f_index);
- bcmgenet_hfb_enable_filter(priv, f_index);
-
- return 0;
+ base = f_index * priv->hw_params->hfb_filter_size;
+ for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
+ bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
}
-/* bcmgenet_hfb_clear
- *
- * Clear Hardware Filter Block and disable all filtering.
- */
static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
{
u32 i;
+ if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
+ return;
+
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
@@ -816,19 +731,18 @@ static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
bcmgenet_hfb_reg_writel(priv, 0x0,
HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
- for (i = 0; i < priv->hw_params->hfb_filter_cnt *
- priv->hw_params->hfb_filter_size; i++)
- bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
+ for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
+ bcmgenet_hfb_clear_filter(priv, i);
}
static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
{
int i;
+ INIT_LIST_HEAD(&priv->rxnfc_list);
if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
return;
- INIT_LIST_HEAD(&priv->rxnfc_list);
for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
@@ -1513,18 +1427,15 @@ static int bcmgenet_insert_flow(struct net_device *dev,
loc_rule = &priv->rxnfc_rules[cmd->fs.location];
if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
- if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED)
+ if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&loc_rule->list);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ }
loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memcpy(&loc_rule->fs, &cmd->fs,
sizeof(struct ethtool_rx_flow_spec));
- err = bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
- if (err) {
- netdev_err(dev, "rxnfc: Could not install rule (%d)\n",
- err);
- return err;
- }
+ bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
list_add_tail(&loc_rule->list, &priv->rxnfc_list);
@@ -1549,8 +1460,10 @@ static int bcmgenet_delete_flow(struct net_device *dev,
if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
- if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
+ if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
list_del(&rule->list);
+ bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
+ }
rule->state = BCMGENET_RXNFC_STATE_UNUSED;
memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
@@ -2118,11 +2031,6 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
goto out;
}
- if (skb_padto(skb, ETH_ZLEN)) {
- ret = NETDEV_TX_OK;
- goto out;
- }
-
/* Retain how many bytes will be sent on the wire, without TSB inserted
* by transmit checksum offload
*/
@@ -2169,6 +2077,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
len_stat = (size << DMA_BUFLENGTH_SHIFT) |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
+ /* Note: if we ever change from DMA_TX_APPEND_CRC below we
+ * will need to restore software padding of "runt" packets
+ */
if (!i) {
len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -4077,7 +3988,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (err)
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
- goto err;
+ goto err_clk_disable;
/* Mii wait queue */
init_waitqueue_head(&priv->wq);
@@ -4089,14 +4000,14 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk_wol)) {
dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
err = PTR_ERR(priv->clk_wol);
- goto err;
+ goto err_clk_disable;
}
priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
if (IS_ERR(priv->clk_eee)) {
dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
err = PTR_ERR(priv->clk_eee);
- goto err;
+ goto err_clk_disable;
}
/* If this is an internal GPHY, power it on now, before UniMAC is
@@ -4207,8 +4118,9 @@ static int bcmgenet_resume(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_rxnfc_rule *rule;
unsigned long dma_ctrl;
- u32 offset, reg;
+ u32 reg;
int ret;
if (!netif_running(dev))
@@ -4239,10 +4151,11 @@ static int bcmgenet_resume(struct device *d)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- offset = HFB_FLT_ENABLE_V3PLUS;
- bcmgenet_hfb_reg_writel(priv, priv->hfb_en[1], offset);
- bcmgenet_hfb_reg_writel(priv, priv->hfb_en[2], offset + sizeof(u32));
- bcmgenet_hfb_reg_writel(priv, priv->hfb_en[0], HFB_CTRL);
+ /* Restore hardware filters */
+ bcmgenet_hfb_clear(priv);
+ list_for_each_entry(rule, &priv->rxnfc_list, list)
+ if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
+ bcmgenet_hfb_create_rxnfc_filter(priv, rule);
if (priv->internal_phy) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
@@ -4286,7 +4199,6 @@ static int bcmgenet_suspend(struct device *d)
{
struct net_device *dev = dev_get_drvdata(d);
struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 offset;
if (!netif_running(dev))
return 0;
@@ -4298,11 +4210,7 @@ static int bcmgenet_suspend(struct device *d)
if (!device_may_wakeup(d))
phy_suspend(dev->phydev);
- /* Preserve filter state and disable filtering */
- priv->hfb_en[0] = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- offset = HFB_FLT_ENABLE_V3PLUS;
- priv->hfb_en[1] = bcmgenet_hfb_reg_readl(priv, offset);
- priv->hfb_en[2] = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
+ /* Disable filtering */
bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
return 0;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index a12cb59298f4..f6ca01da141d 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -696,7 +696,6 @@ struct bcmgenet_priv {
u32 wolopts;
u8 sopass[SOPASS_MAX];
bool wol_active;
- u32 hfb_en[3];
struct bcmgenet_mib_counters mib;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 4ea6a26b04f7..1c86eddb1b51 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -217,20 +217,28 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
priv->wol_active = 0;
clk_disable_unprepare(priv->clk_wol);
+ priv->crc_fwd_en = 0;
/* Disable Magic Packet Detection */
- reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
- reg &= ~(MPD_EN | MPD_PW_EN);
- bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) {
+ reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+ if (!(reg & MPD_EN))
+ return; /* already reset so skip the rest */
+ reg &= ~(MPD_EN | MPD_PW_EN);
+ bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+ }
/* Disable WAKE_FILTER Detection */
- reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
- reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
- bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ if (priv->wolopts & WAKE_FILTER) {
+ reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
+ if (!(reg & RBUF_ACPI_EN))
+ return; /* already reset so skip the rest */
+ reg &= ~(RBUF_HFB_EN | RBUF_ACPI_EN);
+ bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
+ }
/* Disable CRC Forward */
reg = bcmgenet_umac_readl(priv, UMAC_CMD);
reg &= ~CMD_CRC_FWD;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- priv->crc_fwd_en = 0;
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 257c4920cb88..2213e6ab8151 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -2821,11 +2821,13 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct macb *bp = netdev_priv(netdev);
- wol->supported = 0;
- wol->wolopts = 0;
-
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET)
+ if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
phylink_ethtool_get_wol(bp->phylink, wol);
+ wol->supported |= WAKE_MAGIC;
+
+ if (bp->wol & MACB_WOL_ENABLED)
+ wol->wolopts |= WAKE_MAGIC;
+ }
}
static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2833,9 +2835,13 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct macb *bp = netdev_priv(netdev);
int ret;
+ /* Pass the order to phylink layer */
ret = phylink_ethtool_set_wol(bp->phylink, wol);
- if (!ret)
- return 0;
+ /* Don't manage WoL on MAC if handled by the PHY
+ * or if there's a failure in talking to the PHY
+ */
+ if (!ret || ret != -EOPNOTSUPP)
+ return ret;
if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
(wol->wolopts & ~WAKE_MAGIC))
@@ -3730,7 +3736,7 @@ static int macb_init(struct platform_device *pdev)
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
val = 0;
- if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_mode_is_rgmii(bp->phy_interface))
val = GEM_BIT(RGMII);
else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
@@ -3762,15 +3768,9 @@ static int macb_init(struct platform_device *pdev)
static struct sifive_fu540_macb_mgmt *mgmt;
-/* Initialize and start the Receiver and Transmit subsystems */
-static int at91ether_start(struct net_device *dev)
+static int at91ether_alloc_coherent(struct macb *lp)
{
- struct macb *lp = netdev_priv(dev);
struct macb_queue *q = &lp->queues[0];
- struct macb_dma_desc *desc;
- dma_addr_t addr;
- u32 ctl;
- int i;
q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR *
@@ -3792,6 +3792,43 @@ static int at91ether_start(struct net_device *dev)
return -ENOMEM;
}
+ return 0;
+}
+
+static void at91ether_free_coherent(struct macb *lp)
+{
+ struct macb_queue *q = &lp->queues[0];
+
+ if (q->rx_ring) {
+ dma_free_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR *
+ macb_dma_desc_get_size(lp),
+ q->rx_ring, q->rx_ring_dma);
+ q->rx_ring = NULL;
+ }
+
+ if (q->rx_buffers) {
+ dma_free_coherent(&lp->pdev->dev,
+ AT91ETHER_MAX_RX_DESCR *
+ AT91ETHER_MAX_RBUFF_SZ,
+ q->rx_buffers, q->rx_buffers_dma);
+ q->rx_buffers = NULL;
+ }
+}
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct macb *lp)
+{
+ struct macb_queue *q = &lp->queues[0];
+ struct macb_dma_desc *desc;
+ dma_addr_t addr;
+ u32 ctl;
+ int i, ret;
+
+ ret = at91ether_alloc_coherent(lp);
+ if (ret)
+ return ret;
+
addr = q->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
desc = macb_rx_desc(q, i);
@@ -3813,9 +3850,39 @@ static int at91ether_start(struct net_device *dev)
ctl = macb_readl(lp, NCR);
macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
+ /* Enable MAC interrupts */
+ macb_writel(lp, IER, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
return 0;
}
+static void at91ether_stop(struct macb *lp)
+{
+ u32 ctl;
+
+ /* Disable MAC interrupts */
+ macb_writel(lp, IDR, MACB_BIT(RCOMP) |
+ MACB_BIT(RXUBR) |
+ MACB_BIT(ISR_TUND) |
+ MACB_BIT(ISR_RLE) |
+ MACB_BIT(TCOMP) |
+ MACB_BIT(ISR_ROVR) |
+ MACB_BIT(HRESP));
+
+ /* Disable Receiver and Transmitter */
+ ctl = macb_readl(lp, NCR);
+ macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+ /* Free resources. */
+ at91ether_free_coherent(lp);
+}
+
/* Open the ethernet interface */
static int at91ether_open(struct net_device *dev)
{
@@ -3835,63 +3902,36 @@ static int at91ether_open(struct net_device *dev)
macb_set_hwaddr(lp);
- ret = at91ether_start(dev);
+ ret = at91ether_start(lp);
if (ret)
- return ret;
-
- /* Enable MAC interrupts */
- macb_writel(lp, IER, MACB_BIT(RCOMP) |
- MACB_BIT(RXUBR) |
- MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE) |
- MACB_BIT(TCOMP) |
- MACB_BIT(ISR_ROVR) |
- MACB_BIT(HRESP));
+ goto pm_exit;
ret = macb_phylink_connect(lp);
if (ret)
- return ret;
+ goto stop;
netif_start_queue(dev);
return 0;
+
+stop:
+ at91ether_stop(lp);
+pm_exit:
+ pm_runtime_put_sync(&lp->pdev->dev);
+ return ret;
}
/* Close the interface */
static int at91ether_close(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
- struct macb_queue *q = &lp->queues[0];
- u32 ctl;
-
- /* Disable Receiver and Transmitter */
- ctl = macb_readl(lp, NCR);
- macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
-
- /* Disable MAC interrupts */
- macb_writel(lp, IDR, MACB_BIT(RCOMP) |
- MACB_BIT(RXUBR) |
- MACB_BIT(ISR_TUND) |
- MACB_BIT(ISR_RLE) |
- MACB_BIT(TCOMP) |
- MACB_BIT(ISR_ROVR) |
- MACB_BIT(HRESP));
netif_stop_queue(dev);
phylink_stop(lp->phylink);
phylink_disconnect_phy(lp->phylink);
- dma_free_coherent(&lp->pdev->dev,
- AT91ETHER_MAX_RX_DESCR *
- macb_dma_desc_get_size(lp),
- q->rx_ring, q->rx_ring_dma);
- q->rx_ring = NULL;
-
- dma_free_coherent(&lp->pdev->dev,
- AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
- q->rx_buffers, q->rx_buffers_dma);
- q->rx_buffers = NULL;
+ at91ether_stop(lp);
return pm_runtime_put(&lp->pdev->dev);
}
@@ -4388,7 +4428,7 @@ static int macb_probe(struct platform_device *pdev)
bp->wol = 0;
if (of_get_property(np, "magic-packet", NULL))
bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
- device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+ device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
spin_lock_init(&bp->lock);
@@ -4564,10 +4604,10 @@ static int __maybe_unused macb_suspend(struct device *dev)
bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
}
- netif_carrier_off(netdev);
if (bp->ptp_info)
bp->ptp_info->ptp_remove(netdev);
- pm_runtime_force_suspend(dev);
+ if (!device_may_wakeup(dev))
+ pm_runtime_force_suspend(dev);
return 0;
}
@@ -4582,7 +4622,8 @@ static int __maybe_unused macb_resume(struct device *dev)
if (!netif_running(netdev))
return 0;
- pm_runtime_force_resume(dev);
+ if (!device_may_wakeup(dev))
+ pm_runtime_force_resume(dev);
if (bp->wol & MACB_WOL_ENABLED) {
macb_writel(bp, IDR, MACB_BIT(WOL));
@@ -4620,7 +4661,7 @@ static int __maybe_unused macb_runtime_suspend(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
- if (!(device_may_wakeup(&bp->dev->dev))) {
+ if (!(device_may_wakeup(dev))) {
clk_disable_unprepare(bp->tx_clk);
clk_disable_unprepare(bp->hclk);
clk_disable_unprepare(bp->pclk);
@@ -4636,7 +4677,7 @@ static int __maybe_unused macb_runtime_resume(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
- if (!(device_may_wakeup(&bp->dev->dev))) {
+ if (!(device_may_wakeup(dev))) {
clk_prepare_enable(bp->pclk);
clk_prepare_enable(bp->hclk);
clk_prepare_enable(bp->tx_clk);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 7a7f61a8cdf4..d02d346629b3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1112,16 +1112,16 @@ static bool is_addr_all_mask(u8 *ipmask, int family)
struct in_addr *addr;
addr = (struct in_addr *)ipmask;
- if (ntohl(addr->s_addr) == 0xffffffff)
+ if (addr->s_addr == htonl(0xffffffff))
return true;
} else if (family == AF_INET6) {
struct in6_addr *addr6;
addr6 = (struct in6_addr *)ipmask;
- if (ntohl(addr6->s6_addr32[0]) == 0xffffffff &&
- ntohl(addr6->s6_addr32[1]) == 0xffffffff &&
- ntohl(addr6->s6_addr32[2]) == 0xffffffff &&
- ntohl(addr6->s6_addr32[3]) == 0xffffffff)
+ if (addr6->s6_addr32[0] == htonl(0xffffffff) &&
+ addr6->s6_addr32[1] == htonl(0xffffffff) &&
+ addr6->s6_addr32[2] == htonl(0xffffffff) &&
+ addr6->s6_addr32[3] == htonl(0xffffffff))
return true;
}
return false;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 32a45dc51ed7..92eee66cbc84 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2938,6 +2938,7 @@ static inline int uld_send(struct adapter *adap, struct sk_buff *skb,
txq_info = adap->sge.uld_txq_info[tx_uld_type];
if (unlikely(!txq_info)) {
WARN_ON(true);
+ kfree_skb(skb);
return NET_XMIT_DROP;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 1aa6dc10dc0b..ad522f822cc2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3493,7 +3493,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
drv_fw = &fw_info->fw_hdr;
/* Read the header of the firmware on the card */
- ret = -t4_read_flash(adap, FLASH_FW_START,
+ ret = t4_read_flash(adap, FLASH_FW_START,
sizeof(*card_fw) / sizeof(uint32_t),
(uint32_t *)card_fw, 1);
if (ret == 0) {
@@ -3522,8 +3522,8 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
should_install_fs_fw(adap, card_fw_usable,
be32_to_cpu(fs_fw->fw_ver),
be32_to_cpu(card_fw->fw_ver))) {
- ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
- fw_size, 0);
+ ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
+ fw_size, 0);
if (ret != 0) {
dev_err(adap->pdev_dev,
"failed to install firmware: %d\n", ret);
@@ -3554,7 +3554,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
- ret = EINVAL;
+ ret = -EINVAL;
goto bye;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 8d13ea370db1..66e67b24a887 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2446,6 +2446,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(port->reset)) {
dev_err(dev, "no reset\n");
+ clk_disable_unprepare(port->pclk);
return PTR_ERR(port->reset);
}
reset_control_reset(port->reset);
@@ -2501,8 +2502,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
IRQF_SHARED,
port_names[port->id],
port);
- if (ret)
+ if (ret) {
+ clk_disable_unprepare(port->pclk);
return ret;
+ }
ret = register_netdev(netdev);
if (!ret) {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 2972244e6eb0..43570f4911ea 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2938,7 +2938,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
DMA_BIT_MASK(40));
if (err) {
netdev_err(net_dev, "dma_coerce_mask_and_coherent() failed\n");
- return err;
+ goto free_netdev;
}
/* If fsl_fm_max_frm is set to a higher value than the all-common 1500,
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index f150cd454fa4..0998ceb1a26e 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -3632,7 +3632,7 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
- if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
+ if (IS_ERR_OR_NULL(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
return 0;
if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 96831f49925c..22105d09bc89 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -266,7 +266,7 @@ static irqreturn_t enetc_msix(int irq, void *data)
/* disable interrupts */
enetc_wr_reg(v->rbier, 0);
- for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+ for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i), 0);
napi_schedule_irqoff(&v->napi);
@@ -302,7 +302,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
/* enable interrupts */
enetc_wr_reg(v->rbier, ENETC_RBIER_RXTIE);
- for_each_set_bit(i, &v->tx_rings_map, v->count_tx_rings)
+ for_each_set_bit(i, &v->tx_rings_map, ENETC_MAX_NUM_TXQS)
enetc_wr_reg(v->tbier_base + ENETC_BDR_OFF(i),
ENETC_TBIER_TXTIE);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 4fac57dbb3c8..7a9675bd36e8 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -906,6 +906,7 @@ static int enetc_pf_probe(struct pci_dev *pdev,
return 0;
err_reg_netdev:
+ enetc_mdio_remove(pf);
enetc_of_put_phy(priv);
enetc_free_msix(priv);
err_alloc_msix:
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index a6cdd5b61921..832a2175636d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -525,11 +525,6 @@ struct fec_enet_private {
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
- unsigned long work_tx;
- unsigned long work_rx;
- unsigned long work_ts;
- unsigned long work_mdio;
-
struct platform_device *pdev;
int dev_id;
@@ -595,6 +590,7 @@ struct fec_enet_private {
void fec_ptp_init(struct platform_device *pdev, int irq_idx);
void fec_ptp_stop(struct platform_device *pdev);
void fec_ptp_start_cyclecounter(struct net_device *ndev);
+void fec_ptp_disable_hwts(struct net_device *ndev);
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2d0d313ee7c5..cc7fbfc09354 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -75,8 +75,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev);
#define DRIVER_NAME "fec"
-#define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0))
-
/* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE (1 << 5)
#define FEC_ENET_RSEM_V 0x84
@@ -1248,8 +1246,6 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
fep = netdev_priv(ndev);
- queue_id = FEC_ENET_GET_QUQUE(queue_id);
-
txq = fep->tx_queue[queue_id];
/* get next bdp of dirty_tx */
nq = netdev_get_tx_queue(ndev, queue_id);
@@ -1298,8 +1294,13 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
ndev->stats.tx_bytes += skb->len;
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
- fep->bufdesc_ex) {
+ /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
+ * are to time stamp the packet, so we still need to check time
+ * stamping enabled flag.
+ */
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
+ fep->hwts_tx_en) &&
+ fep->bufdesc_ex) {
struct skb_shared_hwtstamps shhwtstamps;
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
@@ -1340,17 +1341,14 @@ skb_done:
writel(0, txq->bd.reg_desc_active);
}
-static void
-fec_enet_tx(struct net_device *ndev)
+static void fec_enet_tx(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- u16 queue_id;
- /* First process class A queue, then Class B and Best Effort queue */
- for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) {
- clear_bit(queue_id, &fep->work_tx);
- fec_enet_tx_queue(ndev, queue_id);
- }
- return;
+ int i;
+
+ /* Make sure that AVB queues are processed first. */
+ for (i = fep->num_tx_queues - 1; i >= 0; i--)
+ fec_enet_tx_queue(ndev, i);
}
static int
@@ -1426,7 +1424,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
#ifdef CONFIG_M532x
flush_cache_all();
#endif
- queue_id = FEC_ENET_GET_QUQUE(queue_id);
rxq = fep->rx_queue[queue_id];
/* First, grab all of the stats for the incoming packet.
@@ -1550,6 +1547,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
htons(ETH_P_8021Q),
vlan_tag);
+ skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb);
if (is_copybreak) {
@@ -1595,48 +1593,30 @@ rx_processing_done:
return pkt_received;
}
-static int
-fec_enet_rx(struct net_device *ndev, int budget)
+static int fec_enet_rx(struct net_device *ndev, int budget)
{
- int pkt_received = 0;
- u16 queue_id;
struct fec_enet_private *fep = netdev_priv(ndev);
+ int i, done = 0;
- for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) {
- int ret;
+ /* Make sure that AVB queues are processed first. */
+ for (i = fep->num_rx_queues - 1; i >= 0; i--)
+ done += fec_enet_rx_queue(ndev, budget - done, i);
- ret = fec_enet_rx_queue(ndev,
- budget - pkt_received, queue_id);
-
- if (ret < budget - pkt_received)
- clear_bit(queue_id, &fep->work_rx);
-
- pkt_received += ret;
- }
- return pkt_received;
+ return done;
}
-static bool
-fec_enet_collect_events(struct fec_enet_private *fep, uint int_events)
+static bool fec_enet_collect_events(struct fec_enet_private *fep)
{
- if (int_events == 0)
- return false;
+ uint int_events;
+
+ int_events = readl(fep->hwp + FEC_IEVENT);
- if (int_events & FEC_ENET_RXF_0)
- fep->work_rx |= (1 << 2);
- if (int_events & FEC_ENET_RXF_1)
- fep->work_rx |= (1 << 0);
- if (int_events & FEC_ENET_RXF_2)
- fep->work_rx |= (1 << 1);
+ /* Don't clear MDIO events, we poll for those */
+ int_events &= ~FEC_ENET_MII;
- if (int_events & FEC_ENET_TXF_0)
- fep->work_tx |= (1 << 2);
- if (int_events & FEC_ENET_TXF_1)
- fep->work_tx |= (1 << 0);
- if (int_events & FEC_ENET_TXF_2)
- fep->work_tx |= (1 << 1);
+ writel(int_events, fep->hwp + FEC_IEVENT);
- return true;
+ return int_events != 0;
}
static irqreturn_t
@@ -1644,18 +1624,9 @@ fec_enet_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct fec_enet_private *fep = netdev_priv(ndev);
- uint int_events;
irqreturn_t ret = IRQ_NONE;
- int_events = readl(fep->hwp + FEC_IEVENT);
-
- /* Don't clear MDIO events, we poll for those */
- int_events &= ~FEC_ENET_MII;
-
- writel(int_events, fep->hwp + FEC_IEVENT);
- fec_enet_collect_events(fep, int_events);
-
- if ((fep->work_tx || fep->work_rx) && fep->link) {
+ if (fec_enet_collect_events(fep) && fep->link) {
ret = IRQ_HANDLED;
if (napi_schedule_prep(&fep->napi)) {
@@ -1672,17 +1643,19 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
{
struct net_device *ndev = napi->dev;
struct fec_enet_private *fep = netdev_priv(ndev);
- int pkts;
+ int done = 0;
- pkts = fec_enet_rx(ndev, budget);
-
- fec_enet_tx(ndev);
+ do {
+ done += fec_enet_rx(ndev, budget - done);
+ fec_enet_tx(ndev);
+ } while ((done < budget) && fec_enet_collect_events(fep));
- if (pkts < budget) {
- napi_complete_done(napi, pkts);
+ if (done < budget) {
+ napi_complete_done(napi, done);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
}
- return pkts;
+
+ return done;
}
/* ------------------------------------------------------------------------- */
@@ -2755,10 +2728,16 @@ static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
return -ENODEV;
if (fep->bufdesc_ex) {
- if (cmd == SIOCSHWTSTAMP)
- return fec_ptp_set(ndev, rq);
- if (cmd == SIOCGHWTSTAMP)
- return fec_ptp_get(ndev, rq);
+ bool use_fec_hwts = !phy_has_hwtstamp(phydev);
+
+ if (cmd == SIOCSHWTSTAMP) {
+ if (use_fec_hwts)
+ return fec_ptp_set(ndev, rq);
+ fec_ptp_disable_hwts(ndev);
+ } else if (cmd == SIOCGHWTSTAMP) {
+ if (use_fec_hwts)
+ return fec_ptp_get(ndev, rq);
+ }
}
return phy_mii_ioctl(phydev, rq, cmd);
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 945643c02615..f8a592c96beb 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -452,6 +452,18 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
return -EOPNOTSUPP;
}
+/**
+ * fec_ptp_disable_hwts - disable hardware time stamping
+ * @ndev: pointer to net_device
+ */
+void fec_ptp_disable_hwts(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ fep->hwts_tx_en = 0;
+ fep->hwts_rx_en = 0;
+}
+
int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
{
struct fec_enet_private *fep = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index b3c69e9038ea..b513b8c5c3b5 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -779,8 +779,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
mac_addr = of_get_mac_address(np);
- if (!IS_ERR(mac_addr))
+ if (!IS_ERR(mac_addr)) {
ether_addr_copy(dev->dev_addr, mac_addr);
+ } else {
+ eth_hw_addr_random(dev);
+ dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
+ }
if (model && !strcasecmp(model, "TSEC"))
priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index c117074c16e3..23f278e46975 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -699,7 +699,7 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
struct net_device *ndev = ring_data->napi.dev;
skb->protocol = eth_type_trans(skb, ndev);
- (void)napi_gro_receive(&ring_data->napi, skb);
+ napi_gro_receive(&ring_data->napi, skb);
}
static int hns_desc_unused(struct hnae_ring *ring)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index d041cac9a487..088550db2de7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -77,6 +77,7 @@
((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
enum hns_desc_type {
+ DESC_TYPE_UNKNOWN,
DESC_TYPE_SKB,
DESC_TYPE_FRAGLIST_SKB,
DESC_TYPE_PAGE,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b14f2abc2425..71ed4c54f6d5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int k, sizeoflast;
dma_addr_t dma;
- if (type == DESC_TYPE_SKB) {
- struct sk_buff *skb = (struct sk_buff *)priv;
- int ret;
-
- ret = hns3_fill_skb_desc(ring, skb, desc);
- if (unlikely(ret < 0))
- return ret;
-
- dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
- } else if (type == DESC_TYPE_FRAGLIST_SKB) {
+ if (type == DESC_TYPE_FRAGLIST_SKB ||
+ type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
@@ -1118,12 +1110,12 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return -ENOMEM;
}
+ desc_cb->priv = priv;
desc_cb->length = size;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
if (likely(size <= HNS3_MAX_BD_SIZE)) {
- desc_cb->priv = priv;
- desc_cb->dma = dma;
- desc_cb->type = type;
desc->addr = cpu_to_le64(dma);
desc->tx.send_size = cpu_to_le16(size);
desc->tx.bdtp_fe_sc_vld_ra_ri =
@@ -1135,18 +1127,11 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
}
frag_buf_num = hns3_tx_bd_count(size);
- sizeoflast = size & HNS3_TX_LAST_SIZE_M;
+ sizeoflast = size % HNS3_MAX_BD_SIZE;
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
/* When frag size is bigger than hardware limit, split this frag */
for (k = 0; k < frag_buf_num; k++) {
- /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
- desc_cb->priv = priv;
- desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
- desc_cb->type = ((type == DESC_TYPE_FRAGLIST_SKB ||
- type == DESC_TYPE_SKB) && !k) ?
- type : DESC_TYPE_PAGE;
-
/* now, fill the descriptor */
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
@@ -1158,7 +1143,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
/* move ring pointer to next */
ring_ptr_move_fw(ring, next_to_use);
- desc_cb = &ring->desc_cb[ring->next_to_use];
desc = &ring->desc[ring->next_to_use];
}
@@ -1346,6 +1330,10 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
unsigned int i;
for (i = 0; i < ring->desc_num; i++) {
+ struct hns3_desc *desc = &ring->desc[ring->next_to_use];
+
+ memset(desc, 0, sizeof(*desc));
+
/* check if this is where we started */
if (ring->next_to_use == next_to_use_orig)
break;
@@ -1353,6 +1341,9 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
/* rollback one */
ring_ptr_move_bw(ring, next_to_use);
+ if (!ring->desc_cb[ring->next_to_use].dma)
+ continue;
+
/* unmap the descriptor dma address */
if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB ||
ring->desc_cb[ring->next_to_use].type ==
@@ -1369,6 +1360,7 @@ static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
ring->desc_cb[ring->next_to_use].length = 0;
ring->desc_cb[ring->next_to_use].dma = 0;
+ ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN;
}
}
@@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
+ ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
+ if (unlikely(ret < 0))
+ goto fill_err;
+
ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
if (unlikely(ret < 0))
goto fill_err;
@@ -4127,9 +4123,8 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
hns3_put_ring_config(priv);
- hns3_dbg_uninit(handle);
-
out_netdev_free:
+ hns3_dbg_uninit(handle);
free_netdev(netdev);
}
@@ -4141,8 +4136,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
return;
if (linkup) {
- netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
+ netif_carrier_on(netdev);
if (netif_msg_link(handle))
netdev_info(netdev, "link up\n");
} else {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 66cd4395f781..a8776620acbc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -165,8 +165,6 @@ enum hns3_nic_state {
#define HNS3_TXD_MSS_S 0
#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
-#define HNS3_TX_LAST_SIZE_M 0xffff
-
#define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
#define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 6b1545f982aa..2622e04e8eed 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -180,18 +180,21 @@ static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring,
{
struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector;
unsigned char *packet = skb->data;
+ u32 len = skb_headlen(skb);
u32 i;
- for (i = 0; i < skb->len; i++)
+ len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE);
+
+ for (i = 0; i < len; i++)
if (packet[i] != (unsigned char)(i & 0xff))
break;
/* The packet is correctly received */
- if (i == skb->len)
+ if (i == HNS3_NIC_LB_TEST_PACKET_SIZE)
tqp_vector->rx_group.total_packets++;
else
print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1,
- skb->data, skb->len, true);
+ skb->data, len, true);
dev_kfree_skb_any(skb);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 96bfad52630d..36575e72a915 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -2673,11 +2673,10 @@ void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
delay_time);
}
-static int hclge_get_mac_link_status(struct hclge_dev *hdev)
+static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
{
struct hclge_link_status_cmd *req;
struct hclge_desc desc;
- int link_status;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
@@ -2689,33 +2688,25 @@ static int hclge_get_mac_link_status(struct hclge_dev *hdev)
}
req = (struct hclge_link_status_cmd *)desc.data;
- link_status = req->status & HCLGE_LINK_STATUS_UP_M;
+ *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
+ HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
- return !!link_status;
+ return 0;
}
-static int hclge_get_mac_phy_link(struct hclge_dev *hdev)
+static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
{
- unsigned int mac_state;
- int link_stat;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ *link_status = HCLGE_LINK_STATUS_DOWN;
if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
return 0;
- mac_state = hclge_get_mac_link_status(hdev);
-
- if (hdev->hw.mac.phydev) {
- if (hdev->hw.mac.phydev->state == PHY_RUNNING)
- link_stat = mac_state &
- hdev->hw.mac.phydev->link;
- else
- link_stat = 0;
-
- } else {
- link_stat = mac_state;
- }
+ if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
+ return 0;
- return !!link_stat;
+ return hclge_get_mac_link_status(hdev, link_status);
}
static void hclge_update_link_status(struct hclge_dev *hdev)
@@ -2725,6 +2716,7 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
struct hnae3_handle *rhandle;
struct hnae3_handle *handle;
int state;
+ int ret;
int i;
if (!client)
@@ -2733,7 +2725,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
return;
- state = hclge_get_mac_phy_link(hdev);
+ ret = hclge_get_mac_phy_link(hdev, &state);
+ if (ret) {
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+ return;
+ }
+
if (state != hdev->hw.mac.link) {
for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
handle = &hdev->vport[i].nic;
@@ -5809,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
/* to avoid rule conflict, when user configure rule by ethtool,
* we need to clear all arfs rules
*/
+ spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
- spin_lock_bh(&hdev->fd_rule_lock);
ret = hclge_fd_config_rule(hdev, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
@@ -5854,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
return ret;
}
+/* make sure being called after lock up with fd_rule_lock */
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bool clear_list)
{
@@ -5866,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
if (!hnae3_dev_fd_supported(hdev))
return;
- spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
@@ -5883,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bitmap_zero(hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
}
-
- spin_unlock_bh(&hdev->fd_rule_lock);
}
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
@@ -6266,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys)
{
struct hclge_vport *vport = hclge_get_vport(handle);
- struct hclge_fd_rule_tuples new_tuples;
+ struct hclge_fd_rule_tuples new_tuples = {};
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
u16 tmp_queue_id;
@@ -6276,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
- memset(&new_tuples, 0, sizeof(new_tuples));
- hclge_fd_get_flow_tuples(fkeys, &new_tuples);
-
- spin_lock_bh(&hdev->fd_rule_lock);
-
/* when there is already fd rule existed add by user,
* arfs should not work
*/
+ spin_lock_bh(&hdev->fd_rule_lock);
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP;
}
+ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+
/* check is there flow director filter existed for this flow,
* if not, create a new filter for it;
* if filter exist with different queue id, modify the filter;
@@ -6371,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
#endif
}
+/* make sure being called after lock up with fd_rule_lock */
static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
{
#ifdef CONFIG_RFS_ACCEL
@@ -6423,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
hdev->fd_en = enable;
clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
- if (!enable)
+
+ if (!enable) {
+ spin_lock_bh(&hdev->fd_rule_lock);
hclge_del_all_fd_entries(handle, clear);
- else
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ } else {
hclge_restore_fd_entries(handle);
+ }
}
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
@@ -6524,14 +6522,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
{
#define HCLGE_MAC_LINK_STATUS_NUM 100
+ int link_status;
int i = 0;
int ret;
do {
- ret = hclge_get_mac_link_status(hdev);
- if (ret < 0)
+ ret = hclge_get_mac_link_status(hdev, &link_status);
+ if (ret)
return ret;
- else if (ret == link_ret)
+ if (link_status == link_ret)
return 0;
msleep(HCLGE_LINK_STATUS_MS);
@@ -6542,9 +6541,6 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
bool is_phy)
{
-#define HCLGE_LINK_STATUS_DOWN 0
-#define HCLGE_LINK_STATUS_UP 1
-
int link_ret;
link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
@@ -6891,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state);
-
+ spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
+ spin_unlock_bh(&hdev->fd_rule_lock);
/* If it is not PF reset, the firmware will disable the MAC,
* so it only need to stop phy here.
@@ -9045,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
bool writen_to_tbl = false;
int ret = 0;
- /* When device is resetting, firmware is unable to handle
- * mailbox. Just record the vlan id, and remove it after
+ /* When device is resetting or reset failed, firmware is unable to
+ * handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
- if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap);
return -EBUSY;
}
@@ -9859,7 +9857,7 @@ retry:
set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
hdev->reset_type = HNAE3_FLR_RESET;
ret = hclge_reset_prepare(hdev);
- if (ret) {
+ if (ret || hdev->reset_pending) {
dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
ret);
if (hdev->reset_pending ||
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 46e6e0fef3ba..9bbdd4557c27 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -317,6 +317,9 @@ enum hclge_link_fail_code {
HCLGE_LF_XSFP_ABSENT,
};
+#define HCLGE_LINK_STATUS_DOWN 0
+#define HCLGE_LINK_STATUS_UP 1
+
#define HCLGE_PG_NUM 4
#define HCLGE_SCH_MODE_SP 0
#define HCLGE_SCH_MODE_DWRR 1
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 1b9578d0bd80..9162856de1b1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
- /* When device is resetting, firmware is unable to handle
- * mailbox. Just record the vlan id, and remove it after
+ /* When device is resetting or reset failed, firmware is unable to
+ * handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
- if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
+ if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
return -EBUSY;
}
@@ -1793,6 +1794,11 @@ static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to assert VF reset, ret = %d\n", ret);
+ return ret;
+ }
hdev->rst_stats.vf_func_rst_cnt++;
}
@@ -3434,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
{
struct hnae3_handle *nic = &hdev->nic;
struct hclge_vf_to_pf_msg send_msg;
+ int ret;
rtnl_lock();
- hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
- rtnl_unlock();
+
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
+ dev_warn(&hdev->pdev->dev,
+ "is resetting when updating port based vlan info\n");
+ rtnl_unlock();
+ return;
+ }
+
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return;
+ }
/* send msg to PF and wait update port based vlan info */
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_PORT_BASE_VLAN_CFG);
memcpy(send_msg.data, port_base_vlan_info, data_size);
- hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
-
- if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
- nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
- else
- nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ if (!ret) {
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+ nic->port_base_vlan_state = state;
+ else
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+ }
- rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 0245da02efbb..b735bc537508 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -814,6 +814,8 @@ err_aeqs_init:
err_init_msix:
err_pfhwdev_alloc:
hinic_free_hwif(hwif);
+ if (err > 0)
+ err = -EIO;
return ERR_PTR(err);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
index c33eb1147055..e0f5a81d8620 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c
@@ -370,48 +370,89 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt,
MSG_NOT_RESP, timeout);
}
-/**
- * mgmt_recv_msg_handler - handler for message from mgmt cpu
- * @pf_to_mgmt: PF to MGMT channel
- * @recv_msg: received message details
- **/
-static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
- struct hinic_recv_msg *recv_msg)
+static void recv_mgmt_msg_work_handler(struct work_struct *work)
{
- struct hinic_hwif *hwif = pf_to_mgmt->hwif;
- struct pci_dev *pdev = hwif->pdev;
- u8 *buf_out = recv_msg->buf_out;
+ struct hinic_mgmt_msg_handle_work *mgmt_work =
+ container_of(work, struct hinic_mgmt_msg_handle_work, work);
+ struct hinic_pf_to_mgmt *pf_to_mgmt = mgmt_work->pf_to_mgmt;
+ struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
+ u8 *buf_out = pf_to_mgmt->mgmt_ack_buf;
struct hinic_mgmt_cb *mgmt_cb;
unsigned long cb_state;
u16 out_size = 0;
- if (recv_msg->mod >= HINIC_MOD_MAX) {
+ memset(buf_out, 0, MAX_PF_MGMT_BUF_SIZE);
+
+ if (mgmt_work->mod >= HINIC_MOD_MAX) {
dev_err(&pdev->dev, "Unknown MGMT MSG module = %d\n",
- recv_msg->mod);
+ mgmt_work->mod);
+ kfree(mgmt_work->msg);
+ kfree(mgmt_work);
return;
}
- mgmt_cb = &pf_to_mgmt->mgmt_cb[recv_msg->mod];
+ mgmt_cb = &pf_to_mgmt->mgmt_cb[mgmt_work->mod];
cb_state = cmpxchg(&mgmt_cb->state,
HINIC_MGMT_CB_ENABLED,
HINIC_MGMT_CB_ENABLED | HINIC_MGMT_CB_RUNNING);
if ((cb_state == HINIC_MGMT_CB_ENABLED) && (mgmt_cb->cb))
- mgmt_cb->cb(mgmt_cb->handle, recv_msg->cmd,
- recv_msg->msg, recv_msg->msg_len,
+ mgmt_cb->cb(mgmt_cb->handle, mgmt_work->cmd,
+ mgmt_work->msg, mgmt_work->msg_len,
buf_out, &out_size);
else
dev_err(&pdev->dev, "No MGMT msg handler, mod: %d, cmd: %d\n",
- recv_msg->mod, recv_msg->cmd);
+ mgmt_work->mod, mgmt_work->cmd);
mgmt_cb->state &= ~HINIC_MGMT_CB_RUNNING;
- if (!recv_msg->async_mgmt_to_pf)
+ if (!mgmt_work->async_mgmt_to_pf)
/* MGMT sent sync msg, send the response */
- msg_to_mgmt_async(pf_to_mgmt, recv_msg->mod, recv_msg->cmd,
+ msg_to_mgmt_async(pf_to_mgmt, mgmt_work->mod, mgmt_work->cmd,
buf_out, out_size, MGMT_RESP,
- recv_msg->msg_id);
+ mgmt_work->msg_id);
+
+ kfree(mgmt_work->msg);
+ kfree(mgmt_work);
+}
+
+/**
+ * mgmt_recv_msg_handler - handler for message from mgmt cpu
+ * @pf_to_mgmt: PF to MGMT channel
+ * @recv_msg: received message details
+ **/
+static void mgmt_recv_msg_handler(struct hinic_pf_to_mgmt *pf_to_mgmt,
+ struct hinic_recv_msg *recv_msg)
+{
+ struct hinic_mgmt_msg_handle_work *mgmt_work = NULL;
+ struct pci_dev *pdev = pf_to_mgmt->hwif->pdev;
+
+ mgmt_work = kzalloc(sizeof(*mgmt_work), GFP_KERNEL);
+ if (!mgmt_work) {
+ dev_err(&pdev->dev, "Allocate mgmt work memory failed\n");
+ return;
+ }
+
+ if (recv_msg->msg_len) {
+ mgmt_work->msg = kzalloc(recv_msg->msg_len, GFP_KERNEL);
+ if (!mgmt_work->msg) {
+ dev_err(&pdev->dev, "Allocate mgmt msg memory failed\n");
+ kfree(mgmt_work);
+ return;
+ }
+ }
+
+ mgmt_work->pf_to_mgmt = pf_to_mgmt;
+ mgmt_work->msg_len = recv_msg->msg_len;
+ memcpy(mgmt_work->msg, recv_msg->msg, recv_msg->msg_len);
+ mgmt_work->msg_id = recv_msg->msg_id;
+ mgmt_work->mod = recv_msg->mod;
+ mgmt_work->cmd = recv_msg->cmd;
+ mgmt_work->async_mgmt_to_pf = recv_msg->async_mgmt_to_pf;
+
+ INIT_WORK(&mgmt_work->work, recv_mgmt_msg_work_handler);
+ queue_work(pf_to_mgmt->workq, &mgmt_work->work);
}
/**
@@ -546,6 +587,12 @@ static int alloc_msg_buf(struct hinic_pf_to_mgmt *pf_to_mgmt)
if (!pf_to_mgmt->sync_msg_buf)
return -ENOMEM;
+ pf_to_mgmt->mgmt_ack_buf = devm_kzalloc(&pdev->dev,
+ MAX_PF_MGMT_BUF_SIZE,
+ GFP_KERNEL);
+ if (!pf_to_mgmt->mgmt_ack_buf)
+ return -ENOMEM;
+
return 0;
}
@@ -571,6 +618,11 @@ int hinic_pf_to_mgmt_init(struct hinic_pf_to_mgmt *pf_to_mgmt,
return 0;
sema_init(&pf_to_mgmt->sync_msg_lock, 1);
+ pf_to_mgmt->workq = create_singlethread_workqueue("hinic_mgmt");
+ if (!pf_to_mgmt->workq) {
+ dev_err(&pdev->dev, "Failed to initialize MGMT workqueue\n");
+ return -ENOMEM;
+ }
pf_to_mgmt->sync_msg_id = 0;
err = alloc_msg_buf(pf_to_mgmt);
@@ -605,4 +657,5 @@ void hinic_pf_to_mgmt_free(struct hinic_pf_to_mgmt *pf_to_mgmt)
hinic_aeq_unregister_hw_cb(&hwdev->aeqs, HINIC_MSG_FROM_MGMT_CPU);
hinic_api_cmd_free(pf_to_mgmt->cmd_chain);
+ destroy_workqueue(pf_to_mgmt->workq);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
index c2b142c08b0e..a824fbda59db 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.h
@@ -119,6 +119,7 @@ struct hinic_pf_to_mgmt {
struct semaphore sync_msg_lock;
u16 sync_msg_id;
u8 *sync_msg_buf;
+ void *mgmt_ack_buf;
struct hinic_recv_msg recv_resp_msg_from_mgmt;
struct hinic_recv_msg recv_msg_from_mgmt;
@@ -126,6 +127,21 @@ struct hinic_pf_to_mgmt {
struct hinic_api_cmd_chain *cmd_chain[HINIC_API_CMD_MAX];
struct hinic_mgmt_cb mgmt_cb[HINIC_MOD_MAX];
+
+ struct workqueue_struct *workq;
+};
+
+struct hinic_mgmt_msg_handle_work {
+ struct work_struct work;
+ struct hinic_pf_to_mgmt *pf_to_mgmt;
+
+ void *msg;
+ u16 msg_len;
+
+ enum hinic_mod_type mod;
+ u8 cmd;
+ u16 msg_id;
+ int async_mgmt_to_pf;
};
void hinic_register_mgmt_msg_cb(struct hinic_pf_to_mgmt *pf_to_mgmt,
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 0fd7eae25fe9..5afb3c9c52d2 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -3206,7 +3206,7 @@ req_rx_irq_failed:
req_tx_irq_failed:
for (j = 0; j < i; j++) {
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
- irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ irq_dispose_mapping(adapter->tx_scrq[j]->irq);
}
release_sub_crqs(adapter, 1);
return rc;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f999cca37a8a..489bb5b59475 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -301,10 +301,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
ret_val = e1000_disable_ulp_lpt_lp(hw, true);
- if (ret_val) {
+ if (ret_val)
e_warn("Failed to disable ULP\n");
- goto out;
- }
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 8bb3db2cbd41..6e5861bfb0fa 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -6224,9 +6224,18 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
+ rtnl_lock();
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IGB_DOWN, &adapter->state) ||
+ test_bit(__IGB_RESETTING, &adapter->state)) {
+ rtnl_unlock();
+ return;
+ }
+
igb_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index af6000172848..7d5d9d34f4e4 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3571,7 +3571,7 @@ static int mvneta_config_interface(struct mvneta_port *pp,
MVNETA_HSGMII_SERDES_PROTO);
break;
default:
- return -EINVAL;
+ break;
}
}
@@ -3959,7 +3959,7 @@ static void mvneta_mac_config(struct phylink_config *config, unsigned int mode,
/* When at 2.5G, the link partner can send frames with shortened
* preambles.
*/
- if (state->speed == SPEED_2500)
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
new_ctrl4 |= MVNETA_GMAC4_SHORT_PREAMBLE_ENABLE;
if (pp->phy_interface != state->interface) {
@@ -5009,10 +5009,18 @@ static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
}
/* Power up the port */
-static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
+static int mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
{
/* MAC Cause register should be cleared */
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
+
+ if (phy_mode != PHY_INTERFACE_MODE_QSGMII &&
+ phy_mode != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(phy_mode) &&
+ !phy_interface_mode_is_rgmii(phy_mode))
+ return -EINVAL;
+
+ return 0;
}
/* Device initialization routine */
@@ -5198,7 +5206,11 @@ static int mvneta_probe(struct platform_device *pdev)
if (err < 0)
goto err_netdev;
- mvneta_port_power_up(pp, phy_mode);
+ err = mvneta_port_power_up(pp, pp->phy_interface);
+ if (err < 0) {
+ dev_err(&pdev->dev, "can't power up port\n");
+ return err;
+ }
/* Armada3700 network controller does not support per-cpu
* operation, so only single NAPI should be initialized.
@@ -5352,7 +5364,11 @@ static int mvneta_resume(struct device *device)
}
}
mvneta_defaults_set(pp);
- mvneta_port_power_up(pp, pp->phy_interface);
+ err = mvneta_port_power_up(pp, pp->phy_interface);
+ if (err < 0) {
+ dev_err(device, "can't power up port\n");
+ return err;
+ }
netif_device_attach(dev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 64786568af0d..75a8c407e815 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1730,10 +1730,12 @@ static void otx2_reset_task(struct work_struct *work)
if (!netif_running(pf->netdev))
return;
+ rtnl_lock();
otx2_stop(pf->netdev);
pf->reset_count++;
otx2_open(pf->netdev);
netif_trans_update(pf->netdev);
+ rtnl_unlock();
}
static const struct net_device_ops otx2_netdev_ops = {
@@ -2111,6 +2113,7 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
+ cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index f4227517dc8e..92a3db69a6cd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -617,6 +617,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
+ cancel_work_sync(&vf->reset_task);
+ unregister_netdev(netdev);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 241f00716979..fe54764caea9 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -203,7 +203,7 @@ io_error:
static inline u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
{
- u16 v;
+ u16 v = 0;
__gm_phy_read(hw, port, reg, &v);
return v;
}
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index f6a1f8666f95..a1c45b39a230 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
return 0;
}
-static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
+ phy_interface_t interface, int speed)
{
u32 val;
int ret;
+ if (interface == PHY_INTERFACE_MODE_TRGMII) {
+ mtk_w32(eth, TRGMII_MODE, INTF_MODE);
+ val = 500000000;
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
+ if (ret)
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+ return;
+ }
+
val = (speed == SPEED_1000) ?
INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
mtk_w32(eth, val, INTF_MODE);
@@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
state->interface))
goto err_phy;
} else {
- if (state->interface !=
- PHY_INTERFACE_MODE_TRGMII)
- mtk_gmac0_rgmii_adjust(mac->hw,
- state->speed);
+ mtk_gmac0_rgmii_adjust(mac->hw,
+ state->interface,
+ state->speed);
/* mt7623_pad_clk_setup */
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
@@ -2882,6 +2891,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->irq = eth->irq[0];
eth->netdev[id]->dev.of_node = np;
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+
return 0;
free_netdev:
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 3d9aa7da95e9..2d3e45780719 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4356,12 +4356,14 @@ end:
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
+ struct mlx4_dev *dev = persist->dev;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
+ mlx4_pci_disable_device(dev);
}
static const struct pci_error_handlers mlx4_err_handler = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
index 7be6b2d36b60..9976de8b9047 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dcbnl.h
@@ -29,6 +29,7 @@ struct mlx5e_dcbx {
bool manual_buffer;
u32 cable_len;
u32 xoff;
+ u16 port_buff_cell_sz;
};
#define MLX5E_MAX_DSCP (64)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 2a8950b3056f..3cf3e35053f7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -78,11 +78,26 @@ static const u32 mlx5e_ext_link_speed[MLX5E_EXT_LINK_MODES_NUMBER] = {
[MLX5E_400GAUI_8] = 400000,
};
+bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_port_eth_proto eproto;
+ int err;
+
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet))
+ return true;
+
+ err = mlx5_port_query_eth_proto(mdev, 1, true, &eproto);
+ if (err)
+ return false;
+
+ return !!eproto.cap;
+}
+
static void mlx5e_port_get_speed_arr(struct mlx5_core_dev *mdev,
const u32 **arr, u32 *size,
bool force_legacy)
{
- bool ext = force_legacy ? false : MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ bool ext = force_legacy ? false : mlx5e_ptys_ext_supported(mdev);
*size = ext ? ARRAY_SIZE(mlx5e_ext_link_speed) :
ARRAY_SIZE(mlx5e_link_speed);
@@ -177,7 +192,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
bool ext;
int err;
- ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ ext = mlx5e_ptys_ext_supported(mdev);
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err)
goto out;
@@ -205,7 +220,7 @@ int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
int err;
int i;
- ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ ext = mlx5e_ptys_ext_supported(mdev);
err = mlx5_port_query_eth_proto(mdev, 1, ext, &eproto);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
index a2ddd446dd59..7a7defe60792 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.h
@@ -54,7 +54,7 @@ int mlx5e_port_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
int mlx5e_port_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
u32 mlx5e_port_speed2linkmodes(struct mlx5_core_dev *mdev, u32 speed,
bool force_legacy);
-
+bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev);
int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out);
int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in);
int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index ae99fac08b53..673f1c82d381 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -34,6 +34,7 @@
int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
struct mlx5e_port_buffer *port_buffer)
{
+ u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
u32 total_used = 0;
@@ -57,11 +58,11 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
port_buffer->buffer[i].epsb =
MLX5_GET(bufferx_reg, buffer, epsb);
port_buffer->buffer[i].size =
- MLX5_GET(bufferx_reg, buffer, size) << MLX5E_BUFFER_CELL_SHIFT;
+ MLX5_GET(bufferx_reg, buffer, size) * port_buff_cell_sz;
port_buffer->buffer[i].xon =
- MLX5_GET(bufferx_reg, buffer, xon_threshold) << MLX5E_BUFFER_CELL_SHIFT;
+ MLX5_GET(bufferx_reg, buffer, xon_threshold) * port_buff_cell_sz;
port_buffer->buffer[i].xoff =
- MLX5_GET(bufferx_reg, buffer, xoff_threshold) << MLX5E_BUFFER_CELL_SHIFT;
+ MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz;
total_used += port_buffer->buffer[i].size;
mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
@@ -73,7 +74,7 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
}
port_buffer->port_buffer_size =
- MLX5_GET(pbmc_reg, out, port_buffer_size) << MLX5E_BUFFER_CELL_SHIFT;
+ MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz;
port_buffer->spare_buffer_size =
port_buffer->port_buffer_size - total_used;
@@ -88,9 +89,9 @@ out:
static int port_set_buffer(struct mlx5e_priv *priv,
struct mlx5e_port_buffer *port_buffer)
{
+ u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(pbmc_reg);
- void *buffer;
void *in;
int err;
int i;
@@ -104,16 +105,18 @@ static int port_set_buffer(struct mlx5e_priv *priv,
goto out;
for (i = 0; i < MLX5E_MAX_BUFFER; i++) {
- buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
-
- MLX5_SET(bufferx_reg, buffer, size,
- port_buffer->buffer[i].size >> MLX5E_BUFFER_CELL_SHIFT);
- MLX5_SET(bufferx_reg, buffer, lossy,
- port_buffer->buffer[i].lossy);
- MLX5_SET(bufferx_reg, buffer, xoff_threshold,
- port_buffer->buffer[i].xoff >> MLX5E_BUFFER_CELL_SHIFT);
- MLX5_SET(bufferx_reg, buffer, xon_threshold,
- port_buffer->buffer[i].xon >> MLX5E_BUFFER_CELL_SHIFT);
+ void *buffer = MLX5_ADDR_OF(pbmc_reg, in, buffer[i]);
+ u64 size = port_buffer->buffer[i].size;
+ u64 xoff = port_buffer->buffer[i].xoff;
+ u64 xon = port_buffer->buffer[i].xon;
+
+ do_div(size, port_buff_cell_sz);
+ do_div(xoff, port_buff_cell_sz);
+ do_div(xon, port_buff_cell_sz);
+ MLX5_SET(bufferx_reg, buffer, size, size);
+ MLX5_SET(bufferx_reg, buffer, lossy, port_buffer->buffer[i].lossy);
+ MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff);
+ MLX5_SET(bufferx_reg, buffer, xon_threshold, xon);
}
err = mlx5e_port_set_pbmc(mdev, in);
@@ -143,7 +146,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
}
static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
- u32 xoff, unsigned int max_mtu)
+ u32 xoff, unsigned int max_mtu, u16 port_buff_cell_sz)
{
int i;
@@ -155,7 +158,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
}
if (port_buffer->buffer[i].size <
- (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) {
+ (xoff + max_mtu + port_buff_cell_sz)) {
pr_err("buffer_size[%d]=%d is not enough for lossless buffer\n",
i, port_buffer->buffer[i].size);
return -ENOMEM;
@@ -175,6 +178,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* @pfc_en: <input> current pfc configuration
* @buffer: <input> current prio to buffer mapping
* @xoff: <input> xoff value
+ * @port_buff_cell_sz: <input> port buffer cell_size
* @port_buffer: <output> port receive buffer configuration
* @change: <output>
*
@@ -189,7 +193,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* sets change to true if buffer configuration was modified.
*/
static int update_buffer_lossy(unsigned int max_mtu,
- u8 pfc_en, u8 *buffer, u32 xoff,
+ u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz,
struct mlx5e_port_buffer *port_buffer,
bool *change)
{
@@ -225,7 +229,7 @@ static int update_buffer_lossy(unsigned int max_mtu,
}
if (changed) {
- err = update_xoff_threshold(port_buffer, xoff, max_mtu);
+ err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err)
return err;
@@ -262,6 +266,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u32 *buffer_size,
u8 *prio2buffer)
{
+ u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
struct mlx5e_port_buffer port_buffer;
u32 xoff = calculate_xoff(priv, mtu);
bool update_prio2buffer = false;
@@ -282,7 +287,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err)
return err;
}
@@ -292,7 +297,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
+ err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz,
&port_buffer, &update_buffer);
if (err)
return err;
@@ -304,7 +309,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, port_buff_cell_sz,
xoff, &port_buffer, &update_buffer);
if (err)
return err;
@@ -329,7 +334,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
return -EINVAL;
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err)
return err;
}
@@ -337,7 +342,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
/* Need to update buffer configuration if xoff value is changed */
if (!update_buffer && xoff != priv->dcbx.xoff) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu, port_buff_cell_sz);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
index 34f55b81a0de..80af7a5ac604 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
@@ -36,7 +36,6 @@
#include "port.h"
#define MLX5E_MAX_BUFFER 8
-#define MLX5E_BUFFER_CELL_SHIFT 7
#define MLX5E_DEFAULT_CABLE_LEN 7 /* 7 meters */
#define MLX5_BUFFER_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, pcam_reg) && \
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
index bdb71332cbf2..3e44e4d820c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c
@@ -183,13 +183,16 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
- struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5e_rep_priv *rpriv;
+ struct mlx5e_priv *priv;
/* A given netdev is not a representor or not a slave of LAG configuration */
if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
return false;
+ priv = netdev_priv(netdev);
+ rpriv = priv->ppriv;
+
/* Egress acl forward to vport is supported only non-uplink representor */
return rpriv->rep->vport != MLX5_VPORT_UPLINK;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
index baa162432e75..c3d167fa944c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/neigh.c
@@ -6,7 +6,6 @@
#include <linux/rculist.h>
#include <linux/rtnetlink.h>
#include <linux/workqueue.h>
-#include <linux/rwlock.h>
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <net/netevent.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index eefeb1cdc2ee..245a99f69641 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -551,19 +551,31 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
}
}
- tun_dst = tun_rx_dst(enc_opts.key.len);
+ if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ } else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
+ key.enc_ip.tos, key.enc_ip.ttl,
+ key.enc_tp.dst, 0, TUNNEL_KEY,
+ key32_to_tunnel_id(key.enc_key_id.keyid),
+ enc_opts.key.len);
+ } else {
+ netdev_dbg(priv->netdev,
+ "Couldn't restore tunnel, unsupported addr_type: %d\n",
+ key.enc_control.addr_type);
+ return false;
+ }
+
if (!tun_dst) {
- WARN_ON_ONCE(true);
+ netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
return false;
}
- ip_tunnel_key_init(&tun_dst->u.tun_info.key,
- key.enc_ipv4.src, key.enc_ipv4.dst,
- key.enc_ip.tos, key.enc_ip.ttl,
- 0, /* label */
- key.enc_tp.src, key.enc_tp.dst,
- key32_to_tunnel_id(key.enc_key_id.keyid),
- TUNNEL_KEY);
+ tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
if (enc_opts.key.len)
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 430025550fad..aad1c29b23db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -1097,6 +1097,7 @@ mlx5_tc_ct_flush_ft_entry(void *ptr, void *arg)
struct mlx5_ct_entry *entry = ptr;
mlx5_tc_ct_entry_del_rules(ct_priv, entry);
+ kfree(entry);
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
index 951ea26d96bc..e472ed0eacfb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
@@ -301,6 +301,8 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB);
}
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
index 58b13192df23..2805416c32a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
@@ -80,6 +80,8 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
}
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index 37b176801bcc..038a0f1cecec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -136,6 +136,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
be32_to_cpu(enc_keyid.key->keyid));
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index bc102d094bbd..d20243d6a032 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -1217,6 +1217,24 @@ static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
return 0;
}
+#define MLX5E_BUFFER_CELL_SHIFT 7
+
+static u16 mlx5e_query_port_buffers_cell_size(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(sbcam_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(sbcam_reg)] = {};
+
+ if (!MLX5_CAP_GEN(mdev, sbcam_reg))
+ return (1 << MLX5E_BUFFER_CELL_SHIFT);
+
+ if (mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_SBCAM, 0, 0))
+ return (1 << MLX5E_BUFFER_CELL_SHIFT);
+
+ return MLX5_GET(sbcam_reg, out, cap_cell_size);
+}
+
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
@@ -1234,6 +1252,7 @@ void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
+ priv->dcbx.port_buff_cell_sz = mlx5e_query_port_buffers_cell_size(priv);
priv->dcbx.manual_buffer = false;
priv->dcbx.cable_len = MLX5E_DEFAULT_CABLE_LEN;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index ec5658bbe3c5..c2464c349117 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -200,7 +200,7 @@ static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
struct ptys2ethtool_config **arr,
u32 *size)
{
- bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ bool ext = mlx5e_ptys_ext_supported(mdev);
*arr = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
*size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
@@ -883,7 +883,7 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
- bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ bool ext = mlx5e_ptys_ext_supported(mdev);
ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
}
@@ -913,7 +913,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
__func__, err);
goto err_query_regs;
}
- ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
eth_proto_cap = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_capability);
eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
@@ -1066,7 +1066,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
autoneg = link_ksettings->base.autoneg;
speed = link_ksettings->base.speed;
- ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
+ ext_supported = mlx5e_ptys_ext_supported(mdev);
ext = ext_requested(autoneg, adver, ext_supported);
if (!ext_supported && ext)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a836a02a2116..3b892ec301b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -419,7 +419,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
&rq->wq_ctrl);
if (err)
- return err;
+ goto err_rq_wq_destroy;
rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
@@ -470,7 +470,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
- return err;
+ goto err_rq_wq_destroy;
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
@@ -3069,6 +3069,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
}
+static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
+ enum mlx5_port_status state)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int vport_admin_state;
+
+ mlx5_set_port_admin_status(mdev, state);
+
+ if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
+ return;
+
+ if (state == MLX5_PORT_UP)
+ vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ else
+ vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+
+ mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
+}
+
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3101,12 +3120,9 @@ int mlx5e_open(struct net_device *netdev)
mutex_lock(&priv->state_lock);
err = mlx5e_open_locked(netdev);
if (!err)
- mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
+ mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
- if (mlx5_vxlan_allowed(priv->mdev->vxlan))
- udp_tunnel_get_rx_info(netdev);
-
return err;
}
@@ -3138,7 +3154,7 @@ int mlx5e_close(struct net_device *netdev)
return -ENODEV;
mutex_lock(&priv->state_lock);
- mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
+ mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
err = mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock);
@@ -5121,6 +5137,10 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_flow_steering;
+#ifdef CONFIG_MLX5_EN_ARFS
+ priv->netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(priv->mdev);
+#endif
+
return 0;
err_destroy_flow_steering:
@@ -5181,7 +5201,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
- mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
+ mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
mlx5e_set_netdev_mtu_boundaries(priv);
mlx5e_set_dev_port_mtu(priv);
@@ -5202,6 +5222,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
rtnl_lock();
if (netif_running(netdev))
mlx5e_open(netdev);
+ if (mlx5_vxlan_allowed(priv->mdev->vxlan))
+ udp_tunnel_get_rx_info(netdev);
netif_device_attach(netdev);
rtnl_unlock();
}
@@ -5216,6 +5238,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
rtnl_lock();
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
+ if (mlx5_vxlan_allowed(priv->mdev->vxlan))
+ udp_tunnel_drop_rx_info(priv->netdev);
netif_device_detach(priv->netdev);
rtnl_unlock();
@@ -5288,10 +5312,6 @@ int mlx5e_netdev_init(struct net_device *netdev,
/* netdev init */
netif_carrier_off(netdev);
-#ifdef CONFIG_MLX5_EN_ARFS
- netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev);
-#endif
-
return 0;
err_free_cpumask:
@@ -5389,6 +5409,8 @@ err_cleanup_tx:
profile->cleanup_tx(priv);
out:
+ set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ cancel_work_sync(&priv->update_stats_work);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 006807e04eda..9519a61bd8ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -936,6 +936,7 @@ err_close_drop_rq:
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
+ mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
@@ -1080,6 +1081,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5e_rep_tc_enable(priv);
+ mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
+ 0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
mlx5_lag_add(mdev, netdev);
priv->events_nb.notifier_call = uplink_rep_async_event;
mlx5_notifier_register(mdev, &priv->events_nb);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 7fc84f58e28a..fcedb5bdca9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2356,6 +2356,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
}
@@ -4670,9 +4671,10 @@ static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
struct mlx5e_rep_priv *rpriv)
{
/* Offloaded flow rule is allowed to duplicate on non-uplink representor
- * sharing tc block with other slaves of a lag device.
+ * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
+ * function is called from NIC mode.
*/
- return netif_is_lag_port(dev) && rpriv->rep->vport != MLX5_VPORT_UPLINK;
+ return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
}
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
@@ -4686,13 +4688,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
rcu_read_lock();
flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
- rcu_read_unlock();
if (flow) {
/* Same flow rule offloaded to non-uplink representor sharing tc block,
* just return 0.
*/
if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
- goto out;
+ goto rcu_unlock;
NL_SET_ERR_MSG_MOD(extack,
"flow cookie already exists, ignoring");
@@ -4700,8 +4701,12 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
"flow cookie %lx already exists, ignoring\n",
f->cookie);
err = -EEXIST;
- goto out;
+ goto rcu_unlock;
}
+rcu_unlock:
+ rcu_read_unlock();
+ if (flow)
+ goto out;
trace_mlx5e_configure_flower(f);
err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
index 5dc335e621c5..b68976b378b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
@@ -217,7 +217,6 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw,
}
/* Create ingress allow rule */
- memset(spec, 0, sizeof(*spec));
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 1116ab9bea6c..43005caff09e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1608,7 +1608,7 @@ abort:
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
-
+ esw_destroy_tsar(esw);
return err;
}
@@ -1653,8 +1653,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw_offloads_disable(esw);
- esw_destroy_tsar(esw);
-
old_mode = esw->mode;
esw->mode = MLX5_ESWITCH_NONE;
@@ -1664,6 +1662,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
+ esw_destroy_tsar(esw);
+
if (clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
}
@@ -1826,6 +1826,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
u16 vport, int link_state)
{
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
+ int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
+ int other_vport = 1;
int err = 0;
if (!ESW_ALLOWED(esw))
@@ -1833,15 +1835,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
if (IS_ERR(evport))
return PTR_ERR(evport);
+ if (vport == MLX5_VPORT_UPLINK) {
+ opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
+ other_vport = 0;
+ vport = 0;
+ }
mutex_lock(&esw->state_lock);
- err = mlx5_modify_vport_admin_state(esw->dev,
- MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
- vport, 1, link_state);
+ err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
if (err) {
- mlx5_core_warn(esw->dev,
- "Failed to set vport %d link state, err = %d",
- vport, err);
+ mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
+ vport, opmod, err);
goto unlock;
}
@@ -1883,8 +1887,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int err = 0;
- if (!ESW_ALLOWED(esw))
- return -EPERM;
if (IS_ERR(evport))
return PTR_ERR(evport);
if (vlan > 4095 || qos > 7)
@@ -1912,6 +1914,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u8 set_flags = 0;
int err;
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+
if (vlan || qos)
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index a5175e98c0b3..5785596f13f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -680,6 +680,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { r
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
+static inline
+int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
return ERR_PTR(-EOPNOTSUPP);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 060354bb211a..ed75353c56b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -236,6 +236,15 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
return &esw->offloads.vport_reps[idx];
}
+static void
+mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_esw_flow_attr *attr)
+{
+ if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
+ attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK)
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+}
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
@@ -259,9 +268,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
- if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
- spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
@@ -279,10 +285,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
-
- if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
- attr->in_rep->vport == MLX5_VPORT_UPLINK)
- spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
}
struct mlx5_flow_handle *
@@ -396,6 +398,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
goto err_esw_get;
}
+ mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
+
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
&flow_act, dest, i);
@@ -462,6 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
i++;
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
+ mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 13e2fb79c21a..2569bb6228b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -797,7 +797,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
return ft;
}
-/* If reverse if false then return the first flow table in next priority of
+/* If reverse is false then return the first flow table in next priority of
* prio in the tree, else return the last flow table in the previous priority
* of prio in the tree.
*/
@@ -829,34 +829,16 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
return find_closest_ft(prio, true);
}
-static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root,
- struct mlx5_flow_namespace *ns)
-{
- struct mlx5_flow_namespace *root_ns = &root->ns;
- struct fs_prio *iter_prio;
- struct fs_prio *prio;
-
- fs_get_obj(prio, ns->node.parent);
- list_for_each_entry(iter_prio, &root_ns->node.children, node.list) {
- if (iter_prio == prio &&
- !list_is_last(&prio->node.children, &iter_prio->node.list))
- return list_next_entry(iter_prio, node.list);
- }
- return NULL;
-}
-
static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
struct mlx5_flow_act *flow_act)
{
- struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct fs_prio *prio;
+ bool next_ns;
- if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS)
- prio = find_fwd_ns_prio(root, ft->ns);
- else
- fs_get_obj(prio, ft->node.parent);
+ next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
+ fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
- return (prio) ? find_next_chained_ft(prio) : NULL;
+ return find_next_chained_ft(prio);
}
static int connect_fts_in_prio(struct mlx5_core_dev *dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index ef0706d15a5b..2d55b7c22c03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -273,17 +273,17 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
if (rq->extts.index >= clock->ptp_info.n_pins)
return -EINVAL;
+ pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+
if (on) {
- pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
- if (pin < 0)
- return -EBUSY;
pin_mode = MLX5_PIN_MODE_IN;
pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
field_select = MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
MLX5_MTPPS_FS_ENABLE;
} else {
- pin = rq->extts.index;
field_select = MLX5_MTPPS_FS_ENABLE;
}
@@ -331,12 +331,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
- if (on) {
- pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
- rq->perout.index);
- if (pin < 0)
- return -EBUSY;
+ pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
ts.tv_sec = rq->perout.period.sec;
@@ -362,7 +362,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_MTPPS_FS_ENABLE |
MLX5_MTPPS_FS_TIME_STAMP;
} else {
- pin = rq->perout.index;
field_select = MLX5_MTPPS_FS_ENABLE;
}
@@ -409,10 +408,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
return 0;
}
+enum {
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
+};
+
static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
- return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+
+ switch (func) {
+ case PTP_PF_NONE:
+ return 0;
+ case PTP_PF_EXTTS:
+ return !(clock->pps_info.pin_caps[pin] &
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
+ case PTP_PF_PEROUT:
+ return !(clock->pps_info.pin_caps[pin] &
+ MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
}
static const struct ptp_clock_info mlx5_ptp_clock_info = {
@@ -432,6 +452,38 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.verify = NULL,
};
+static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
+ u32 *mtpps, u32 mtpps_size)
+{
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
+
+ MLX5_SET(mtpps_reg, in, pin, pin);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
+ mtpps_size, MLX5_REG_MTPPS, 0, 0);
+}
+
+static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
+{
+ struct mlx5_core_dev *mdev = clock->mdev;
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
+ u8 mode;
+ int err;
+
+ err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
+ if (err || !MLX5_GET(mtpps_reg, out, enable))
+ return PTP_PF_NONE;
+
+ mode = MLX5_GET(mtpps_reg, out, pin_mode);
+
+ if (mode == MLX5_PIN_MODE_IN)
+ return PTP_PF_EXTTS;
+ else if (mode == MLX5_PIN_MODE_OUT)
+ return PTP_PF_PEROUT;
+
+ return PTP_PF_NONE;
+}
+
static int mlx5_init_pin_config(struct mlx5_clock *clock)
{
int i;
@@ -451,8 +503,8 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock)
sizeof(clock->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
clock->ptp_info.pin_config[i].index = i;
- clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
- clock->ptp_info.pin_config[i].chan = i;
+ clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
+ clock->ptp_info.pin_config[i].chan = 0;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 9f829e68fc73..e4186e84b3ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -293,7 +293,40 @@ static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
return 0;
}
-static int mlx5_eeprom_page(int offset)
+static int mlx5_query_module_id(struct mlx5_core_dev *dev, int module_num,
+ u8 *module_id)
+{
+ u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
+ u32 out[MLX5_ST_SZ_DW(mcia_reg)];
+ int err, status;
+ u8 *ptr;
+
+ MLX5_SET(mcia_reg, in, i2c_device_address, MLX5_I2C_ADDR_LOW);
+ MLX5_SET(mcia_reg, in, module, module_num);
+ MLX5_SET(mcia_reg, in, device_address, 0);
+ MLX5_SET(mcia_reg, in, page_number, 0);
+ MLX5_SET(mcia_reg, in, size, 1);
+ MLX5_SET(mcia_reg, in, l, 0);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCIA, 0, 0);
+ if (err)
+ return err;
+
+ status = MLX5_GET(mcia_reg, out, status);
+ if (status) {
+ mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
+ status);
+ return -EIO;
+ }
+ ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+
+ *module_id = ptr[0];
+
+ return 0;
+}
+
+static int mlx5_qsfp_eeprom_page(u16 offset)
{
if (offset < MLX5_EEPROM_PAGE_LENGTH)
/* Addresses between 0-255 - page 00 */
@@ -307,7 +340,7 @@ static int mlx5_eeprom_page(int offset)
MLX5_EEPROM_HIGH_PAGE_LENGTH);
}
-static int mlx5_eeprom_high_page_offset(int page_num)
+static int mlx5_qsfp_eeprom_high_page_offset(int page_num)
{
if (!page_num) /* Page 0 always start from low page */
return 0;
@@ -316,35 +349,62 @@ static int mlx5_eeprom_high_page_offset(int page_num)
return page_num * MLX5_EEPROM_HIGH_PAGE_LENGTH;
}
+static void mlx5_qsfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
+{
+ *i2c_addr = MLX5_I2C_ADDR_LOW;
+ *page_num = mlx5_qsfp_eeprom_page(*offset);
+ *offset -= mlx5_qsfp_eeprom_high_page_offset(*page_num);
+}
+
+static void mlx5_sfp_eeprom_params_set(u16 *i2c_addr, int *page_num, u16 *offset)
+{
+ *i2c_addr = MLX5_I2C_ADDR_LOW;
+ *page_num = 0;
+
+ if (*offset < MLX5_EEPROM_PAGE_LENGTH)
+ return;
+
+ *i2c_addr = MLX5_I2C_ADDR_HIGH;
+ *offset -= MLX5_EEPROM_PAGE_LENGTH;
+}
+
int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
u16 offset, u16 size, u8 *data)
{
- int module_num, page_num, status, err;
+ int module_num, status, err, page_num = 0;
+ u32 in[MLX5_ST_SZ_DW(mcia_reg)] = {};
u32 out[MLX5_ST_SZ_DW(mcia_reg)];
- u32 in[MLX5_ST_SZ_DW(mcia_reg)];
- u16 i2c_addr;
- void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+ u16 i2c_addr = 0;
+ u8 module_id;
+ void *ptr;
err = mlx5_query_module_num(dev, &module_num);
if (err)
return err;
- memset(in, 0, sizeof(in));
- size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
-
- /* Get the page number related to the given offset */
- page_num = mlx5_eeprom_page(offset);
+ err = mlx5_query_module_id(dev, module_num, &module_id);
+ if (err)
+ return err;
- /* Set the right offset according to the page number,
- * For page_num > 0, relative offset is always >= 128 (high page).
- */
- offset -= mlx5_eeprom_high_page_offset(page_num);
+ switch (module_id) {
+ case MLX5_MODULE_ID_SFP:
+ mlx5_sfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ case MLX5_MODULE_ID_QSFP:
+ case MLX5_MODULE_ID_QSFP_PLUS:
+ case MLX5_MODULE_ID_QSFP28:
+ mlx5_qsfp_eeprom_params_set(&i2c_addr, &page_num, &offset);
+ break;
+ default:
+ mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
+ return -EINVAL;
+ }
if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
- i2c_addr = MLX5_I2C_ADDR_LOW;
+ size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
MLX5_SET(mcia_reg, in, l, 0);
MLX5_SET(mcia_reg, in, module, module_num);
@@ -365,6 +425,7 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
return -EIO;
}
+ ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
memcpy(data, ptr, size);
return size;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index e9ccd333f61d..71b6185b4904 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -710,7 +710,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
if (err)
- return err;
+ goto err_trap_register;
err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
if (err)
@@ -722,6 +722,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
err_emad_trap_set:
mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
mlxsw_core);
+err_trap_register:
destroy_workqueue(mlxsw_core->emad_wq);
return err;
}
@@ -1813,7 +1814,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
bulk_list, cb, cb_priv, tid);
if (err) {
- kfree(trans);
+ kfree_rcu(trans, rcu);
return err;
}
return 0;
@@ -2050,11 +2051,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
break;
}
}
- rcu_read_unlock();
- if (!found)
+ if (!found) {
+ rcu_read_unlock();
goto drop;
+ }
rxl->func(skb, local_port, rxl_item->priv);
+ rcu_read_unlock();
return;
drop:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 08215fed193d..a7d86df7123f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -45,7 +45,7 @@ static int mlxsw_env_validate_cable_ident(struct mlxsw_core *core, int id,
static int
mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
u16 offset, u16 size, void *data,
- unsigned int *p_read_size)
+ bool qsfp, unsigned int *p_read_size)
{
char eeprom_tmp[MLXSW_REG_MCIA_EEPROM_SIZE];
char mcia_pl[MLXSW_REG_MCIA_LEN];
@@ -54,6 +54,10 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
int status;
int err;
+ /* MCIA register accepts buffer size <= 48. Page of size 128 should be
+ * read by chunks of size 48, 48, 32. Align the size of the last chunk
+ * to avoid reading after the end of the page.
+ */
size = min_t(u16, size, MLXSW_REG_MCIA_EEPROM_SIZE);
if (offset < MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH &&
@@ -63,18 +67,25 @@ mlxsw_env_query_module_eeprom(struct mlxsw_core *mlxsw_core, int module,
i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_LOW;
if (offset >= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH) {
- page = MLXSW_REG_MCIA_PAGE_GET(offset);
- offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page;
- /* When reading upper pages 1, 2 and 3 the offset starts at
- * 128. Please refer to "QSFP+ Memory Map" figure in SFF-8436
- * specification for graphical depiction.
- * MCIA register accepts buffer size <= 48. Page of size 128
- * should be read by chunks of size 48, 48, 32. Align the size
- * of the last chunk to avoid reading after the end of the
- * page.
- */
- if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
- size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset;
+ if (qsfp) {
+ /* When reading upper pages 1, 2 and 3 the offset
+ * starts at 128. Please refer to "QSFP+ Memory Map"
+ * figure in SFF-8436 specification for graphical
+ * depiction.
+ */
+ page = MLXSW_REG_MCIA_PAGE_GET(offset);
+ offset -= MLXSW_REG_MCIA_EEPROM_UP_PAGE_LENGTH * page;
+ if (offset + size > MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH)
+ size = MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH - offset;
+ } else {
+ /* When reading upper pages 1, 2 and 3 the offset
+ * starts at 0 and I2C high address is used. Please refer
+ * refer to "Memory Organization" figure in SFF-8472
+ * specification for graphical depiction.
+ */
+ i2c_addr = MLXSW_REG_MCIA_I2C_ADDR_HIGH;
+ offset -= MLXSW_REG_MCIA_EEPROM_PAGE_LENGTH;
+ }
}
mlxsw_reg_mcia_pack(mcia_pl, module, 0, page, offset, size, i2c_addr);
@@ -166,7 +177,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
int err;
err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 0, offset,
- module_info, &read_size);
+ module_info, false, &read_size);
if (err)
return err;
@@ -197,7 +208,7 @@ int mlxsw_env_get_module_info(struct mlxsw_core *mlxsw_core, int module,
/* Verify if transceiver provides diagnostic monitoring page */
err = mlxsw_env_query_module_eeprom(mlxsw_core, module,
SFP_DIAGMON, 1, &diag_mon,
- &read_size);
+ false, &read_size);
if (err)
return err;
@@ -225,17 +236,22 @@ int mlxsw_env_get_module_eeprom(struct net_device *netdev,
int offset = ee->offset;
unsigned int read_size;
int i = 0;
+ bool qsfp;
int err;
if (!ee->len)
return -EINVAL;
memset(data, 0, ee->len);
+ /* Validate module identifier value. */
+ err = mlxsw_env_validate_cable_ident(mlxsw_core, module, &qsfp);
+ if (err)
+ return err;
while (i < ee->len) {
err = mlxsw_env_query_module_eeprom(mlxsw_core, module, offset,
ee->len - i, data + i,
- &read_size);
+ qsfp, &read_size);
if (err) {
netdev_err(netdev, "Eeprom query failed\n");
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index fd0e97de44e7..c04ec1a92826 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1414,23 +1414,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
u16 num_pages;
int err;
- mutex_init(&mlxsw_pci->cmd.lock);
- init_waitqueue_head(&mlxsw_pci->cmd.wait);
-
mlxsw_pci->core = mlxsw_core;
mbox = mlxsw_cmd_mbox_alloc();
if (!mbox)
return -ENOMEM;
- err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
- if (err)
- goto mbox_put;
-
- err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
- if (err)
- goto err_out_mbox_alloc;
-
err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
if (err)
goto err_sw_reset;
@@ -1537,9 +1526,6 @@ err_query_fw:
mlxsw_pci_free_irq_vectors(mlxsw_pci);
err_alloc_irq:
err_sw_reset:
- mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
-err_out_mbox_alloc:
- mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
mbox_put:
mlxsw_cmd_mbox_free(mbox);
return err;
@@ -1553,8 +1539,6 @@ static void mlxsw_pci_fini(void *bus_priv)
mlxsw_pci_aqs_fini(mlxsw_pci);
mlxsw_pci_fw_area_fini(mlxsw_pci);
mlxsw_pci_free_irq_vectors(mlxsw_pci);
- mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
- mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
}
static struct mlxsw_pci_queue *
@@ -1776,6 +1760,37 @@ static const struct mlxsw_bus mlxsw_pci_bus = {
.features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
};
+static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
+{
+ int err;
+
+ mutex_init(&mlxsw_pci->cmd.lock);
+ init_waitqueue_head(&mlxsw_pci->cmd.wait);
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+ if (err)
+ goto err_in_mbox_alloc;
+
+ err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ if (err)
+ goto err_out_mbox_alloc;
+
+ return 0;
+
+err_out_mbox_alloc:
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+err_in_mbox_alloc:
+ mutex_destroy(&mlxsw_pci->cmd.lock);
+ return err;
+}
+
+static void mlxsw_pci_cmd_fini(struct mlxsw_pci *mlxsw_pci)
+{
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
+ mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
+ mutex_destroy(&mlxsw_pci->cmd.lock);
+}
+
static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const char *driver_name = pdev->driver->name;
@@ -1831,6 +1846,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mlxsw_pci->pdev = pdev;
pci_set_drvdata(pdev, mlxsw_pci);
+ err = mlxsw_pci_cmd_init(mlxsw_pci);
+ if (err)
+ goto err_pci_cmd_init;
+
mlxsw_pci->bus_info.device_kind = driver_name;
mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
mlxsw_pci->bus_info.dev = &pdev->dev;
@@ -1848,6 +1867,8 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_bus_device_register:
+ mlxsw_pci_cmd_fini(mlxsw_pci);
+err_pci_cmd_init:
iounmap(mlxsw_pci->hw_addr);
err_ioremap:
err_pci_resource_len_check:
@@ -1865,6 +1886,7 @@ static void mlxsw_pci_remove(struct pci_dev *pdev)
struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
+ mlxsw_pci_cmd_fini(mlxsw_pci);
iounmap(mlxsw_pci->hw_addr);
pci_release_regions(mlxsw_pci->pdev);
pci_disable_device(mlxsw_pci->pdev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index fcb88d4271bf..8ac987c8c8bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -5536,6 +5536,7 @@ enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST,
MLXSW_REG_HTGT_TRAP_GROUP_SP_NEIGH_DISCOVERY,
MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP,
+ MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME,
MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP,
MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 770de0222e7b..0521e9d48c45 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -5001,15 +5001,6 @@ static void mlxsw_sp_router_fib4_del(struct mlxsw_sp *mlxsw_sp,
static bool mlxsw_sp_fib6_rt_should_ignore(const struct fib6_info *rt)
{
- /* Packets with link-local destination IP arriving to the router
- * are trapped to the CPU, so no need to program specific routes
- * for them. Only allow prefix routes (usually one fe80::/64) so
- * that packets are trapped for the right reason.
- */
- if ((ipv6_addr_type(&rt->fib6_dst.addr) & IPV6_ADDR_LINKLOCAL) &&
- (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)))
- return true;
-
/* Multicast routes aren't supported, so ignore them. Neighbour
* Discovery packets are specifically trapped.
*/
@@ -6262,7 +6253,7 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb,
}
fib_work = kzalloc(sizeof(*fib_work), GFP_ATOMIC);
- if (WARN_ON(!fib_work))
+ if (!fib_work)
return NOTIFY_BAD;
fib_work->mlxsw_sp = router->mlxsw_sp;
@@ -8078,16 +8069,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp->router = router;
router->mlxsw_sp = mlxsw_sp;
- router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
- err = register_inetaddr_notifier(&router->inetaddr_nb);
- if (err)
- goto err_register_inetaddr_notifier;
-
- router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
- err = register_inet6addr_notifier(&router->inet6addr_nb);
- if (err)
- goto err_register_inet6addr_notifier;
-
INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list);
err = __mlxsw_sp_router_init(mlxsw_sp);
if (err)
@@ -8128,12 +8109,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_neigh_init;
- mlxsw_sp->router->netevent_nb.notifier_call =
- mlxsw_sp_router_netevent_event;
- err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
- if (err)
- goto err_register_netevent_notifier;
-
err = mlxsw_sp_mp_hash_init(mlxsw_sp);
if (err)
goto err_mp_hash_init;
@@ -8142,6 +8117,22 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_dscp_init;
+ router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event;
+ err = register_inetaddr_notifier(&router->inetaddr_nb);
+ if (err)
+ goto err_register_inetaddr_notifier;
+
+ router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event;
+ err = register_inet6addr_notifier(&router->inet6addr_nb);
+ if (err)
+ goto err_register_inet6addr_notifier;
+
+ mlxsw_sp->router->netevent_nb.notifier_call =
+ mlxsw_sp_router_netevent_event;
+ err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+ if (err)
+ goto err_register_netevent_notifier;
+
mlxsw_sp->router->fib_nb.notifier_call = mlxsw_sp_router_fib_event;
err = register_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb,
@@ -8152,10 +8143,15 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_register_fib_notifier:
-err_dscp_init:
-err_mp_hash_init:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
+ unregister_inet6addr_notifier(&router->inet6addr_nb);
+err_register_inet6addr_notifier:
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
+err_register_inetaddr_notifier:
+ mlxsw_core_flush_owq();
+err_dscp_init:
+err_mp_hash_init:
mlxsw_sp_neigh_fini(mlxsw_sp);
err_neigh_init:
mlxsw_sp_vrs_fini(mlxsw_sp);
@@ -8174,10 +8170,6 @@ err_ipips_init:
err_rifs_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
- unregister_inet6addr_notifier(&router->inet6addr_nb);
-err_register_inet6addr_notifier:
- unregister_inetaddr_notifier(&router->inetaddr_nb);
-err_register_inetaddr_notifier:
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
return err;
@@ -8188,6 +8180,9 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
&mlxsw_sp->router->fib_nb);
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
+ unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
+ mlxsw_core_flush_owq();
mlxsw_sp_neigh_fini(mlxsw_sp);
mlxsw_sp_vrs_fini(mlxsw_sp);
mlxsw_sp_mr_fini(mlxsw_sp);
@@ -8197,8 +8192,6 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_ipips_fini(mlxsw_sp);
mlxsw_sp_rifs_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
- unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
- unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
mutex_destroy(&mlxsw_sp->router->lock);
kfree(mlxsw_sp->router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
index 157a42c63066..1e38dfe7cf64 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c
@@ -328,6 +328,9 @@ mlxsw_sp_trap_policer_items_arr[] = {
{
.policer = MLXSW_SP_TRAP_POLICER(18, 1024, 128),
},
+ {
+ .policer = MLXSW_SP_TRAP_POLICER(19, 1024, 512),
+ },
};
static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
@@ -422,6 +425,11 @@ static const struct mlxsw_sp_trap_group_item mlxsw_sp_trap_group_items_arr[] = {
.priority = 2,
},
{
+ .group = DEVLINK_TRAP_GROUP_GENERIC(EXTERNAL_DELIVERY, 19),
+ .hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_EXTERNAL_ROUTE,
+ .priority = 1,
+ },
+ {
.group = DEVLINK_TRAP_GROUP_GENERIC(IPV6, 15),
.hw_group_id = MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6,
.priority = 2,
@@ -882,11 +890,11 @@ static const struct mlxsw_sp_trap_item mlxsw_sp_trap_items_arr[] = {
},
},
{
- .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, LOCAL_DELIVERY,
+ .trap = MLXSW_SP_TRAP_CONTROL(EXTERNAL_ROUTE, EXTERNAL_DELIVERY,
TRAP),
.listeners_arr = {
- MLXSW_SP_RXL_MARK(RTR_INGRESS0, IP2ME, TRAP_TO_CPU,
- false),
+ MLXSW_SP_RXL_MARK(RTR_INGRESS0, EXTERNAL_ROUTE,
+ TRAP_TO_CPU, false),
},
},
{
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 9cfe1fd98c30..f17da67a4622 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -748,21 +748,21 @@ void ocelot_get_txtstamp(struct ocelot *ocelot)
spin_unlock_irqrestore(&port->tx_skbs.lock, flags);
- /* Next ts */
- ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
+ /* Get the h/w timestamp */
+ ocelot_get_hwtimestamp(ocelot, &ts);
if (unlikely(!skb_match))
continue;
- /* Get the h/w timestamp */
- ocelot_get_hwtimestamp(ocelot, &ts);
-
/* Set the timestamp into the skb */
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
skb_tstamp_tx(skb_match, &shhwtstamps);
dev_kfree_skb_any(skb_match);
+
+ /* Next ts */
+ ocelot_write(ocelot, SYS_PTP_NXT_PTP_NXT, SYS_PTP_NXT);
}
}
EXPORT_SYMBOL(ocelot_get_txtstamp);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 9b63574b6202..b5f1849fd280 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -98,7 +98,7 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
{
struct sk_buff **skb_ptr = NULL;
struct sk_buff **temp;
-#define NR_SKB_COMPLETED 128
+#define NR_SKB_COMPLETED 16
struct sk_buff *completed[NR_SKB_COMPLETED];
int more;
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index d2708a57f2ff..4075f5e59955 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1299,19 +1299,21 @@ static int nixge_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->napi, nixge_poll, NAPI_POLL_WEIGHT);
err = nixge_of_get_resources(pdev);
if (err)
- return err;
+ goto free_netdev;
__nixge_hw_set_mac_address(ndev);
priv->tx_irq = platform_get_irq_byname(pdev, "tx");
if (priv->tx_irq < 0) {
netdev_err(ndev, "could not find 'tx' irq");
- return priv->tx_irq;
+ err = priv->tx_irq;
+ goto free_netdev;
}
priv->rx_irq = platform_get_irq_byname(pdev, "rx");
if (priv->rx_irq < 0) {
netdev_err(ndev, "could not find 'rx' irq");
- return priv->rx_irq;
+ err = priv->rx_irq;
+ goto free_netdev;
}
priv->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index f7e3ce3de04d..095561924bdc 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -103,15 +103,18 @@ static void ionic_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
void *p)
{
struct ionic_lif *lif = netdev_priv(netdev);
+ unsigned int offset;
unsigned int size;
regs->version = IONIC_DEV_CMD_REG_VERSION;
+ offset = 0;
size = IONIC_DEV_INFO_REG_COUNT * sizeof(u32);
- memcpy_fromio(p, lif->ionic->idev.dev_info_regs->words, size);
+ memcpy_fromio(p + offset, lif->ionic->idev.dev_info_regs->words, size);
+ offset += size;
size = IONIC_DEV_CMD_REG_COUNT * sizeof(u32);
- memcpy_fromio(p, lif->ionic->idev.dev_cmd_regs->words, size);
+ memcpy_fromio(p + offset, lif->ionic->idev.dev_cmd_regs->words, size);
}
static int ionic_get_link_ksettings(struct net_device *netdev,
@@ -468,12 +471,18 @@ static void ionic_get_ringparam(struct net_device *netdev,
ring->rx_pending = lif->nrxq_descs;
}
+static void ionic_set_ringsize(struct ionic_lif *lif, void *arg)
+{
+ struct ethtool_ringparam *ring = arg;
+
+ lif->ntxq_descs = ring->tx_pending;
+ lif->nrxq_descs = ring->rx_pending;
+}
+
static int ionic_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ionic_lif *lif = netdev_priv(netdev);
- bool running;
- int err;
if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
netdev_info(netdev, "Changing jumbo or mini descriptors not supported\n");
@@ -491,22 +500,7 @@ static int ionic_set_ringparam(struct net_device *netdev,
ring->rx_pending == lif->nrxq_descs)
return 0;
- err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
- if (err)
- return err;
-
- running = test_bit(IONIC_LIF_F_UP, lif->state);
- if (running)
- ionic_stop(netdev);
-
- lif->ntxq_descs = ring->tx_pending;
- lif->nrxq_descs = ring->rx_pending;
-
- if (running)
- ionic_open(netdev);
- clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
-
- return 0;
+ return ionic_reset_queues(lif, ionic_set_ringsize, ring);
}
static void ionic_get_channels(struct net_device *netdev,
@@ -521,12 +515,17 @@ static void ionic_get_channels(struct net_device *netdev,
ch->combined_count = lif->nxqs;
}
+static void ionic_set_queuecount(struct ionic_lif *lif, void *arg)
+{
+ struct ethtool_channels *ch = arg;
+
+ lif->nxqs = ch->combined_count;
+}
+
static int ionic_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct ionic_lif *lif = netdev_priv(netdev);
- bool running;
- int err;
if (!ch->combined_count || ch->other_count ||
ch->rx_count || ch->tx_count)
@@ -535,21 +534,7 @@ static int ionic_set_channels(struct net_device *netdev,
if (ch->combined_count == lif->nxqs)
return 0;
- err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
- if (err)
- return err;
-
- running = test_bit(IONIC_LIF_F_UP, lif->state);
- if (running)
- ionic_stop(netdev);
-
- lif->nxqs = ch->combined_count;
-
- if (running)
- ionic_open(netdev);
- clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
-
- return 0;
+ return ionic_reset_queues(lif, ionic_set_queuecount, ch);
}
static u32 ionic_get_priv_flags(struct net_device *netdev)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index aaa00edd9d5b..e55d41546cff 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -96,8 +96,7 @@ static void ionic_link_status_check(struct ionic_lif *lif)
u16 link_status;
bool link_up;
- if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state) ||
- test_bit(IONIC_LIF_F_QUEUE_RESET, lif->state))
+ if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
return;
link_status = le16_to_cpu(lif->info->status.link_status);
@@ -114,16 +113,22 @@ static void ionic_link_status_check(struct ionic_lif *lif)
netif_carrier_on(netdev);
}
- if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
+ if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
+ mutex_lock(&lif->queue_lock);
ionic_start_queues(lif);
+ mutex_unlock(&lif->queue_lock);
+ }
} else {
if (netif_carrier_ok(netdev)) {
netdev_info(netdev, "Link down\n");
netif_carrier_off(netdev);
}
- if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev))
+ if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
+ mutex_lock(&lif->queue_lock);
ionic_stop_queues(lif);
+ mutex_unlock(&lif->queue_lock);
+ }
}
clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
@@ -863,8 +868,7 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
if (f)
return 0;
- netdev_dbg(lif->netdev, "rx_filter add ADDR %pM (id %d)\n", addr,
- ctx.comp.rx_filter_add.filter_id);
+ netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
err = ionic_adminq_post_wait(lif, &ctx);
@@ -893,6 +897,9 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
return -ENOENT;
}
+ netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
+ addr, f->filter_id);
+
ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
ionic_rx_filter_free(lif, f);
spin_unlock_bh(&lif->rx_filters.lock);
@@ -901,9 +908,6 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
if (err && err != -EEXIST)
return err;
- netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr,
- ctx.cmd.rx_filter_del.filter_id);
-
return 0;
}
@@ -1313,7 +1317,7 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
return err;
netdev->mtu = new_mtu;
- err = ionic_reset_queues(lif);
+ err = ionic_reset_queues(lif, NULL, NULL);
return err;
}
@@ -1325,7 +1329,7 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
netdev_info(lif->netdev, "Tx Timeout recovery\n");
rtnl_lock();
- ionic_reset_queues(lif);
+ ionic_reset_queues(lif, NULL, NULL);
rtnl_unlock();
}
@@ -1351,13 +1355,11 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
};
int err;
+ netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
err = ionic_adminq_post_wait(lif, &ctx);
if (err)
return err;
- netdev_dbg(netdev, "rx_filter add VLAN %d (id %d)\n", vid,
- ctx.comp.rx_filter_add.filter_id);
-
return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
}
@@ -1382,8 +1384,8 @@ static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
return -ENOENT;
}
- netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n", vid,
- le32_to_cpu(ctx.cmd.rx_filter_del.filter_id));
+ netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
+ vid, f->filter_id);
ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
ionic_rx_filter_free(lif, f);
@@ -1673,6 +1675,14 @@ int ionic_open(struct net_device *netdev)
if (err)
goto err_out;
+ err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
+ if (err)
+ goto err_txrx_deinit;
+
+ err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
+ if (err)
+ goto err_txrx_deinit;
+
/* don't start the queues until we have link */
if (netif_carrier_ok(netdev)) {
err = ionic_start_queues(lif);
@@ -1980,26 +1990,30 @@ static const struct net_device_ops ionic_netdev_ops = {
.ndo_get_vf_stats = ionic_get_vf_stats,
};
-int ionic_reset_queues(struct ionic_lif *lif)
+int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
{
bool running;
int err = 0;
- err = ionic_wait_for_bit(lif, IONIC_LIF_F_QUEUE_RESET);
- if (err)
- return err;
-
+ mutex_lock(&lif->queue_lock);
running = netif_running(lif->netdev);
if (running) {
netif_device_detach(lif->netdev);
err = ionic_stop(lif->netdev);
+ if (err)
+ goto reset_out;
}
- if (!err && running) {
- ionic_open(lif->netdev);
+
+ if (cb)
+ cb(lif, arg);
+
+ if (running) {
+ err = ionic_open(lif->netdev);
netif_device_attach(lif->netdev);
}
- clear_bit(IONIC_LIF_F_QUEUE_RESET, lif->state);
+reset_out:
+ mutex_unlock(&lif->queue_lock);
return err;
}
@@ -2146,7 +2160,9 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
+ mutex_lock(&lif->queue_lock);
ionic_stop_queues(lif);
+ mutex_unlock(&lif->queue_lock);
}
if (netif_running(lif->netdev)) {
@@ -2265,15 +2281,15 @@ static void ionic_lif_deinit(struct ionic_lif *lif)
cancel_work_sync(&lif->deferred.work);
cancel_work_sync(&lif->tx_timeout_work);
ionic_rx_filters_deinit(lif);
+ if (lif->netdev->features & NETIF_F_RXHASH)
+ ionic_lif_rss_deinit(lif);
}
- if (lif->netdev->features & NETIF_F_RXHASH)
- ionic_lif_rss_deinit(lif);
-
napi_disable(&lif->adminqcq->napi);
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
ionic_lif_qcq_deinit(lif, lif->adminqcq);
+ mutex_destroy(&lif->queue_lock);
ionic_lif_reset(lif);
}
@@ -2450,6 +2466,7 @@ static int ionic_lif_init(struct ionic_lif *lif)
return err;
lif->hw_index = le16_to_cpu(comp.hw_index);
+ mutex_init(&lif->queue_lock);
/* now that we have the hw_index we can figure out our doorbell page */
lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index c3428034a17b..8dc2c5d77424 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -135,7 +135,6 @@ enum ionic_lif_state_flags {
IONIC_LIF_F_SW_DEBUG_STATS,
IONIC_LIF_F_UP,
IONIC_LIF_F_LINK_CHECK_REQUESTED,
- IONIC_LIF_F_QUEUE_RESET,
IONIC_LIF_F_FW_RESET,
/* leave this as last */
@@ -165,6 +164,7 @@ struct ionic_lif {
unsigned int hw_index;
unsigned int kern_pid;
u64 __iomem *kern_dbpage;
+ struct mutex queue_lock; /* lock for queue structures */
spinlock_t adminq_lock; /* lock for AdminQ operations */
struct ionic_qcq *adminqcq;
struct ionic_qcq *notifyqcq;
@@ -213,12 +213,6 @@ struct ionic_lif {
#define lif_to_txq(lif, i) (&lif_to_txqcq((lif), i)->q)
#define lif_to_rxq(lif, i) (&lif_to_txqcq((lif), i)->q)
-/* return 0 if successfully set the bit, else non-zero */
-static inline int ionic_wait_for_bit(struct ionic_lif *lif, int bitname)
-{
- return wait_on_bit_lock(lif->state, bitname, TASK_INTERRUPTIBLE);
-}
-
static inline u32 ionic_coal_usec_to_hw(struct ionic *ionic, u32 usecs)
{
u32 mult = le32_to_cpu(ionic->ident.dev.intr_coal_mult);
@@ -248,6 +242,8 @@ static inline u32 ionic_coal_hw_to_usec(struct ionic *ionic, u32 units)
return (units * div) / mult;
}
+typedef void (*ionic_reset_cb)(struct ionic_lif *lif, void *arg);
+
void ionic_link_status_check_request(struct ionic_lif *lif);
void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
@@ -267,7 +263,7 @@ int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
int ionic_open(struct net_device *netdev);
int ionic_stop(struct net_device *netdev);
-int ionic_reset_queues(struct ionic_lif *lif);
+int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg);
static inline void debug_stats_txq_post(struct ionic_qcq *qcq,
struct ionic_txq_desc *desc, bool dbell)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 80eeb7696e01..cd0076fc3044 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -21,13 +21,16 @@ void ionic_rx_filter_free(struct ionic_lif *lif, struct ionic_rx_filter *f)
void ionic_rx_filter_replay(struct ionic_lif *lif)
{
struct ionic_rx_filter_add_cmd *ac;
+ struct hlist_head new_id_list;
struct ionic_admin_ctx ctx;
struct ionic_rx_filter *f;
struct hlist_head *head;
struct hlist_node *tmp;
+ unsigned int key;
unsigned int i;
int err;
+ INIT_HLIST_HEAD(&new_id_list);
ac = &ctx.cmd.rx_filter_add;
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
@@ -58,9 +61,30 @@ void ionic_rx_filter_replay(struct ionic_lif *lif)
ac->mac.addr);
break;
}
+ spin_lock_bh(&lif->rx_filters.lock);
+ ionic_rx_filter_free(lif, f);
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ continue;
}
+
+ /* remove from old id list, save new id in tmp list */
+ spin_lock_bh(&lif->rx_filters.lock);
+ hlist_del(&f->by_id);
+ spin_unlock_bh(&lif->rx_filters.lock);
+ f->filter_id = le32_to_cpu(ctx.comp.rx_filter_add.filter_id);
+ hlist_add_head(&f->by_id, &new_id_list);
}
}
+
+ /* rebuild the by_id hash lists with the new filter ids */
+ spin_lock_bh(&lif->rx_filters.lock);
+ hlist_for_each_entry_safe(f, tmp, &new_id_list, by_id) {
+ key = f->filter_id & IONIC_RX_FILTER_HLISTS_MASK;
+ head = &lif->rx_filters.by_id[key];
+ hlist_add_head(&f->by_id, head);
+ }
+ spin_unlock_bh(&lif->rx_filters.lock);
}
int ionic_rx_filters_init(struct ionic_lif *lif)
@@ -69,10 +93,12 @@ int ionic_rx_filters_init(struct ionic_lif *lif)
spin_lock_init(&lif->rx_filters.lock);
+ spin_lock_bh(&lif->rx_filters.lock);
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
INIT_HLIST_HEAD(&lif->rx_filters.by_hash[i]);
INIT_HLIST_HEAD(&lif->rx_filters.by_id[i]);
}
+ spin_unlock_bh(&lif->rx_filters.lock);
return 0;
}
@@ -84,11 +110,13 @@ void ionic_rx_filters_deinit(struct ionic_lif *lif)
struct hlist_node *tmp;
unsigned int i;
+ spin_lock_bh(&lif->rx_filters.lock);
for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
head = &lif->rx_filters.by_id[i];
hlist_for_each_entry_safe(f, tmp, head, by_id)
ionic_rx_filter_free(lif, f);
}
+ spin_unlock_bh(&lif->rx_filters.lock);
}
int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
@@ -124,6 +152,7 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index,
f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id);
f->rxq_index = rxq_index;
memcpy(&f->cmd, ac, sizeof(f->cmd));
+ netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id);
INIT_HLIST_NODE(&f->by_hash);
INIT_HLIST_NODE(&f->by_id);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index b7f900c11834..85eb8f276a37 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -161,12 +161,6 @@ static void ionic_rx_clean(struct ionic_queue *q,
return;
}
- /* no packet processing while resetting */
- if (unlikely(test_bit(IONIC_LIF_F_QUEUE_RESET, q->lif->state))) {
- stats->dropped++;
- return;
- }
-
stats->pkts++;
stats->bytes += le16_to_cpu(comp->len);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index a49743d56b9c..6c2f9ff4a53e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -876,6 +876,8 @@ struct qed_dev {
struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
u8 engine_for_debug;
bool disable_ilt_dump;
+ bool dbg_bin_dump;
+
DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 08ba9d54ab63..d13ec88313c3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -2008,8 +2008,8 @@ static void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
enum protocol_type proto;
if (p_hwfn->mcp_info->func_info.protocol == QED_PCI_ETH_RDMA) {
- DP_NOTICE(p_hwfn,
- "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Current day drivers don't support RoCE & iWARP simultaneously on the same PF. Default to RoCE-only\n");
p_hwfn->hw_info.personality = QED_PCI_ETH_ROCE;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 57a0dab88431..3b9bbafafe68 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -5568,7 +5568,8 @@ static const char * const s_status_str[] = {
/* DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS */
"The filter/trigger constraint dword offsets are not enabled for recording",
-
+ /* DBG_STATUS_NO_MATCHING_FRAMING_MODE */
+ "No matching framing mode",
/* DBG_STATUS_VFC_READ_ERROR */
"Error reading from VFC",
@@ -7505,6 +7506,12 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->print_dbg_data)
qed_dbg_print_feature(text_buf, text_size_bytes);
+ /* Just return the original binary buffer if requested */
+ if (p_hwfn->cdev->dbg_bin_dump) {
+ vfree(text_buf);
+ return DBG_STATUS_OK;
+ }
+
/* Free the old dump_buf and point the dump_buf to the newly allocagted
* and formatted text buffer.
*/
@@ -7732,7 +7739,9 @@ int qed_dbg_mcp_trace_size(struct qed_dev *cdev)
#define REGDUMP_HEADER_SIZE_SHIFT 0
#define REGDUMP_HEADER_SIZE_MASK 0xffffff
#define REGDUMP_HEADER_FEATURE_SHIFT 24
-#define REGDUMP_HEADER_FEATURE_MASK 0x3f
+#define REGDUMP_HEADER_FEATURE_MASK 0x1f
+#define REGDUMP_HEADER_BIN_DUMP_SHIFT 29
+#define REGDUMP_HEADER_BIN_DUMP_MASK 0x1
#define REGDUMP_HEADER_OMIT_ENGINE_SHIFT 30
#define REGDUMP_HEADER_OMIT_ENGINE_MASK 0x1
#define REGDUMP_HEADER_ENGINE_SHIFT 31
@@ -7770,6 +7779,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev,
feature, feature_size);
SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
+ SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1);
SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
@@ -7793,6 +7803,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
omit_engine = 1;
mutex_lock(&qed_dbg_lock);
+ cdev->dbg_bin_dump = true;
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
@@ -7930,6 +7941,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
}
+ /* Re-populate nvm attribute info */
+ qed_mcp_nvm_info_free(p_hwfn);
+ qed_mcp_nvm_info_populate(p_hwfn);
+
/* nvm cfg1 */
rc = qed_dbg_nvm_image(cdev,
(u8 *)buffer + offset +
@@ -7992,6 +8007,7 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
}
+ cdev->dbg_bin_dump = false;
mutex_unlock(&qed_dbg_lock);
return 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 3aa51374e727..dbdac983ccde 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -3102,7 +3102,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
}
/* Log and clear previous pglue_b errors if such exist */
- qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt);
+ qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_main_ptt, true);
/* Enable the PF's internal FID_enable in the PXP */
rc = qed_pglueb_set_pfid_enable(p_hwfn, p_hwfn->p_main_ptt,
@@ -4472,12 +4472,6 @@ static int qed_get_dev_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
return 0;
}
-static void qed_nvm_info_free(struct qed_hwfn *p_hwfn)
-{
- kfree(p_hwfn->nvm_info.image_att);
- p_hwfn->nvm_info.image_att = NULL;
-}
-
static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
void __iomem *p_regview,
void __iomem *p_doorbells,
@@ -4562,7 +4556,7 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
return rc;
err3:
if (IS_LEAD_HWFN(p_hwfn))
- qed_nvm_info_free(p_hwfn);
+ qed_mcp_nvm_info_free(p_hwfn);
err2:
if (IS_LEAD_HWFN(p_hwfn))
qed_iov_free_hw_info(p_hwfn->cdev);
@@ -4623,7 +4617,7 @@ int qed_hw_prepare(struct qed_dev *cdev,
if (rc) {
if (IS_PF(cdev)) {
qed_init_free(p_hwfn);
- qed_nvm_info_free(p_hwfn);
+ qed_mcp_nvm_info_free(p_hwfn);
qed_mcp_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn);
}
@@ -4657,7 +4651,7 @@ void qed_hw_remove(struct qed_dev *cdev)
qed_iov_free_hw_info(cdev);
- qed_nvm_info_free(p_hwfn);
+ qed_mcp_nvm_info_free(p_hwfn);
}
static void qed_chain_free_next_ptr(struct qed_dev *cdev,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index b7b974f0ef21..5eec1fc6229d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -257,9 +257,10 @@ out:
#define PGLUE_ATTENTION_ZLR_VALID (1 << 25)
#define PGLUE_ATTENTION_ILT_VALID (1 << 23)
-int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt)
+int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool hw_init)
{
+ char msg[256];
u32 tmp;
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
@@ -273,22 +274,23 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
details = qed_rd(p_hwfn, p_ptt,
PGLUE_B_REG_TX_ERR_WR_DETAILS);
- DP_NOTICE(p_hwfn,
- "Illegal write by chip to [%08x:%08x] blocked.\n"
- "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
- "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
- addr_hi, addr_lo, details,
- (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
- (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
- GET_FIELD(details,
- PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0,
- tmp,
- GET_FIELD(tmp,
- PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 : 0,
- GET_FIELD(tmp,
- PGLUE_ATTENTION_DETAILS2_BME) ? 1 : 0,
- GET_FIELD(tmp,
- PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 : 0);
+ snprintf(msg, sizeof(msg),
+ "Illegal write by chip to [%08x:%08x] blocked.\n"
+ "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]\n"
+ "Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]",
+ addr_hi, addr_lo, details,
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_PFID),
+ (u8)GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VFID),
+ !!GET_FIELD(details, PGLUE_ATTENTION_DETAILS_VF_VALID),
+ tmp,
+ !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_WAS_ERR),
+ !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_BME),
+ !!GET_FIELD(tmp, PGLUE_ATTENTION_DETAILS2_FID_EN));
+
+ if (hw_init)
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
+ else
+ DP_NOTICE(p_hwfn, "%s\n", msg);
}
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
@@ -321,8 +323,14 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
}
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
- if (tmp & PGLUE_ATTENTION_ICPL_VALID)
- DP_NOTICE(p_hwfn, "ICPL error - %08x\n", tmp);
+ if (tmp & PGLUE_ATTENTION_ICPL_VALID) {
+ snprintf(msg, sizeof(msg), "ICPL error - %08x", tmp);
+
+ if (hw_init)
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "%s\n", msg);
+ else
+ DP_NOTICE(p_hwfn, "%s\n", msg);
+ }
tmp = qed_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
if (tmp & PGLUE_ATTENTION_ZLR_VALID) {
@@ -361,7 +369,7 @@ int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
static int qed_pglueb_rbc_attn_cb(struct qed_hwfn *p_hwfn)
{
- return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
+ return qed_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt, false);
}
static int qed_fw_assertion(struct qed_hwfn *p_hwfn)
@@ -1193,7 +1201,8 @@ static int qed_int_attentions(struct qed_hwfn *p_hwfn)
index, attn_bits, attn_acks, asserted_bits,
deasserted_bits, p_sb_attn_sw->known_attn);
} else if (asserted_bits == 0x100) {
- DP_INFO(p_hwfn, "MFW indication via attention\n");
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "MFW indication via attention\n");
} else {
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
"MFW indication [deassertion]\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index e09db3386367..110169e90121 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -442,7 +442,7 @@ int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
-int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt);
+int qed_pglueb_rbc_attn_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool hw_init);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 9624616806e7..0fd4520d0666 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -3280,6 +3280,13 @@ err0:
return rc;
}
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn)
+{
+ kfree(p_hwfn->nvm_info.image_att);
+ p_hwfn->nvm_info.image_att = NULL;
+ p_hwfn->nvm_info.valid = false;
+}
+
int
qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
enum qed_nvm_images image_id,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 5750b4c5ef63..12a705ed4bac 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -1221,6 +1221,13 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
/**
+ * @brief Delete nvm info shadow in the given hardware function
+ *
+ * @param p_hwfn
+ */
+void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
+
+/**
* @brief Get the engine affinity configuration.
*
* @param p_hwfn
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 40efe60eff8d..fcdecddb2812 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -47,15 +47,23 @@ static int rmnet_unregister_real_device(struct net_device *real_dev)
return 0;
}
-static int rmnet_register_real_device(struct net_device *real_dev)
+static int rmnet_register_real_device(struct net_device *real_dev,
+ struct netlink_ext_ack *extack)
{
struct rmnet_port *port;
int rc, entry;
ASSERT_RTNL();
- if (rmnet_is_real_dev_registered(real_dev))
+ if (rmnet_is_real_dev_registered(real_dev)) {
+ port = rmnet_get_port_rtnl(real_dev);
+ if (port->rmnet_mode != RMNET_EPMODE_VND) {
+ NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
+ return -EINVAL;
+ }
+
return 0;
+ }
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
@@ -133,7 +141,7 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
- err = rmnet_register_real_device(real_dev);
+ err = rmnet_register_real_device(real_dev, extack);
if (err)
goto err0;
@@ -422,7 +430,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
}
if (port->rmnet_mode != RMNET_EPMODE_VND) {
- NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
+ NL_SET_ERR_MSG_MOD(extack, "more than one bridge dev attached");
return -EINVAL;
}
@@ -433,7 +441,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
return -EBUSY;
}
- err = rmnet_register_real_device(slave_dev);
+ err = rmnet_register_real_device(slave_dev, extack);
if (err)
return -EBUSY;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index a442bcf64b9c..99f7aae102ce 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1450,6 +1450,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
struct ravb_private *priv = container_of(work, struct ravb_private,
work);
struct net_device *ndev = priv->ndev;
+ int error;
netif_tx_stop_all_queues(ndev);
@@ -1458,15 +1459,36 @@ static void ravb_tx_timeout_work(struct work_struct *work)
ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
- ravb_stop_dma(ndev);
+ if (ravb_stop_dma(ndev)) {
+ /* If ravb_stop_dma() fails, the hardware is still operating
+ * for TX and/or RX. So, this should not call the following
+ * functions because ravb_dmac_init() is possible to fail too.
+ * Also, this should not retry ravb_stop_dma() again and again
+ * here because it's possible to wait forever. So, this just
+ * re-enables the TX and RX and skip the following
+ * re-initialization procedure.
+ */
+ ravb_rcv_snd_enable(ndev);
+ goto out;
+ }
ravb_ring_free(ndev, RAVB_BE);
ravb_ring_free(ndev, RAVB_NC);
/* Device init */
- ravb_dmac_init(ndev);
+ error = ravb_dmac_init(ndev);
+ if (error) {
+ /* If ravb_dmac_init() fails, descriptors are freed. So, this
+ * should return here to avoid re-enabling the TX and RX in
+ * ravb_emac_init().
+ */
+ netdev_err(ndev, "%s: ravb_dmac_init() failed, error %d\n",
+ __func__, error);
+ return;
+ }
ravb_emac_init(ndev);
+out:
/* Initialise PTP Clock driver */
if (priv->chip_id == RCAR_GEN2)
ravb_ptp_init(ndev, priv->pdev);
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 90410f9d3b1a..1c4fea9c3ec4 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2274,7 +2274,7 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio,
"power", 0, 0, 100);
if (ret)
- return ret;
+ goto out_free_netdev;
/*
* Optional reset GPIO configured? Minimum 100 ns reset needed
@@ -2283,7 +2283,7 @@ static int smc_drv_probe(struct platform_device *pdev)
ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio,
"reset", 0, 0, 100);
if (ret)
- return ret;
+ goto out_free_netdev;
/*
* Need to wait for optional EEPROM to load, max 750 us according
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 328bc38848bb..0f366cc50b74 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1044,8 +1044,9 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
skb->ip_summed = CHECKSUM_UNNECESSARY;
next:
- if ((skb && napi_gro_receive(&priv->napi, skb) != GRO_DROP) ||
- xdp_result) {
+ if (skb)
+ napi_gro_receive(&priv->napi, skb);
+ if (skb || xdp_result) {
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += xdp.data_end - xdp.data;
}
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index f2638446b62e..81b554dd7221 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1191,7 +1191,7 @@ static int ave_init(struct net_device *ndev)
ret = regmap_update_bits(priv->regmap, SG_ETPINMODE,
priv->pinmode_mask, priv->pinmode_val);
if (ret)
- return ret;
+ goto out_reset_assert;
ave_global_reset(ndev);
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 1492648247d9..6d778bc3d012 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1850,7 +1850,8 @@ static int am65_cpsw_nuss_init_ndev_2g(struct am65_cpsw_common *common)
port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
port->ndev->hw_features = NETIF_F_SG |
NETIF_F_RXCSUM |
- NETIF_F_HW_CSUM;
+ NETIF_F_HW_CSUM |
+ NETIF_F_HW_TC;
port->ndev->features = port->ndev->hw_features |
NETIF_F_HW_VLAN_CTAG_FILTER;
port->ndev->vlan_features |= NETIF_F_SG;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 4661ef865807..dec52b763d50 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1615,11 +1615,11 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[],
struct netlink_ext_ack *extack)
{
struct geneve_dev *geneve = netdev_priv(dev);
+ enum ifla_geneve_df df = geneve->df;
struct geneve_sock *gs4, *gs6;
struct ip_tunnel_info info;
bool metadata;
bool use_udp6_rx_checksums;
- enum ifla_geneve_df df;
bool ttl_inherit;
int err;
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 2a6ec5394966..a4b3fce69ecd 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -1242,7 +1242,7 @@ static int rr_open(struct net_device *dev)
rrpriv->info = NULL;
}
if (rrpriv->rx_ctrl) {
- pci_free_consistent(pdev, sizeof(struct ring_ctrl),
+ pci_free_consistent(pdev, 256 * sizeof(struct ring_ctrl),
rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma);
rrpriv->rx_ctrl = NULL;
}
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 5a37514e4234..c11f32f644db 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -4,7 +4,7 @@
*
* Copyright 2009-2017 Analog Devices Inc.
*
- * http://www.analog.com/ADF7242
+ * https://www.analog.com/ADF7242
*/
#include <linux/kernel.h>
@@ -1262,7 +1262,7 @@ static int adf7242_probe(struct spi_device *spi)
WQ_MEM_RECLAIM);
if (unlikely(!lp->wqueue)) {
ret = -ENOMEM;
- goto err_hw_init;
+ goto err_alloc_wq;
}
ret = adf7242_hw_init(lp);
@@ -1294,6 +1294,8 @@ static int adf7242_probe(struct spi_device *spi)
return ret;
err_hw_init:
+ destroy_workqueue(lp->wqueue);
+err_alloc_wq:
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 55226b264e3c..ac7e5a04c8ac 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -500,6 +500,13 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
int ret;
state = gsi_channel_state(channel);
+
+ /* Channel could have entered STOPPED state since last call
+ * if it timed out. If so, we're done.
+ */
+ if (state == GSI_CHANNEL_STATE_STOPPED)
+ return 0;
+
if (state != GSI_CHANNEL_STATE_STARTED &&
state != GSI_CHANNEL_STATE_STOP_IN_PROC)
return -EINVAL;
@@ -789,20 +796,11 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id)
int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- enum gsi_channel_state state;
u32 retries;
int ret;
gsi_channel_freeze(channel);
- /* Channel could have entered STOPPED state since last call if the
- * STOP command timed out. We won't stop a channel if stopping it
- * was successful previously (so we still want the freeze above).
- */
- state = gsi_channel_state(channel);
- if (state == GSI_CHANNEL_STATE_STOPPED)
- return 0;
-
/* RX channels might require a little time to enter STOPPED state */
retries = channel->toward_ipa ? 0 : GSI_CHANNEL_STOP_RX_RETRIES;
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index c9ab865e7290..d92dd3f09b73 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -586,6 +586,21 @@ u32 ipa_cmd_tag_process_count(void)
return 4;
}
+void ipa_cmd_tag_process(struct ipa *ipa)
+{
+ u32 count = ipa_cmd_tag_process_count();
+ struct gsi_trans *trans;
+
+ trans = ipa_cmd_trans_alloc(ipa, count);
+ if (trans) {
+ ipa_cmd_tag_process_add(trans);
+ gsi_trans_commit_wait(trans);
+ } else {
+ dev_err(&ipa->pdev->dev,
+ "error allocating %u entry tag transaction\n", count);
+ }
+}
+
static struct ipa_cmd_info *
ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
{
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index e440aa69c8b5..1a646e0264a0 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -172,6 +172,14 @@ void ipa_cmd_tag_process_add(struct gsi_trans *trans);
u32 ipa_cmd_tag_process_count(void);
/**
+ * ipa_cmd_tag_process() - Perform a tag process
+ *
+ * @Return: The number of elements to allocate in a transaction
+ * to hold tag process commands
+ */
+void ipa_cmd_tag_process(struct ipa *ipa);
+
+/**
* ipa_cmd_trans_alloc() - Allocate a transaction for the command TX endpoint
* @ipa: IPA pointer
* @tre_count: Number of elements in the transaction
diff --git a/drivers/net/ipa/ipa_data-sdm845.c b/drivers/net/ipa/ipa_data-sdm845.c
index 52d4b84e0dac..de2768d71ab5 100644
--- a/drivers/net/ipa/ipa_data-sdm845.c
+++ b/drivers/net/ipa/ipa_data-sdm845.c
@@ -44,7 +44,6 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.endpoint = {
.seq_type = IPA_SEQ_INVALID,
.config = {
- .checksum = true,
.aggregation = true,
.status_enable = true,
.rx = {
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 9f50d0d11704..9e58e495d373 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1450,6 +1450,8 @@ void ipa_endpoint_suspend(struct ipa *ipa)
if (ipa->modem_netdev)
ipa_modem_suspend(ipa->modem_netdev);
+ ipa_cmd_tag_process(ipa);
+
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
}
diff --git a/drivers/net/ipa/ipa_gsi.c b/drivers/net/ipa/ipa_gsi.c
index dc4a5c2196ae..d323adb03383 100644
--- a/drivers/net/ipa/ipa_gsi.c
+++ b/drivers/net/ipa/ipa_gsi.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
+#include "ipa_gsi.h"
#include "gsi_trans.h"
#include "ipa.h"
#include "ipa_endpoint.h"
diff --git a/drivers/net/ipa/ipa_gsi.h b/drivers/net/ipa/ipa_gsi.h
index 3cf18600c68e..0a40f3dc55fc 100644
--- a/drivers/net/ipa/ipa_gsi.h
+++ b/drivers/net/ipa/ipa_gsi.h
@@ -8,7 +8,9 @@
#include <linux/types.h>
+struct gsi;
struct gsi_trans;
+struct ipa_gsi_endpoint_data;
/**
* ipa_gsi_trans_complete() - GSI transaction completion callback
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
index 03a1d0e55964..73413371e3d3 100644
--- a/drivers/net/ipa/ipa_qmi_msg.c
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -119,7 +119,7 @@ struct qmi_elem_info ipa_driver_init_complete_rsp_ei[] = {
sizeof_field(struct ipa_driver_init_complete_rsp,
rsp),
.tlv_type = 0x02,
- .elem_size = offsetof(struct ipa_driver_init_complete_rsp,
+ .offset = offsetof(struct ipa_driver_init_complete_rsp,
rsp),
.ei_array = qmi_response_type_v01_ei,
},
@@ -137,7 +137,7 @@ struct qmi_elem_info ipa_init_complete_ind_ei[] = {
sizeof_field(struct ipa_init_complete_ind,
status),
.tlv_type = 0x02,
- .elem_size = offsetof(struct ipa_init_complete_ind,
+ .offset = offsetof(struct ipa_init_complete_ind,
status),
.ei_array = qmi_response_type_v01_ei,
},
@@ -218,7 +218,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
sizeof_field(struct ipa_init_modem_driver_req,
platform_type_valid),
.tlv_type = 0x10,
- .elem_size = offsetof(struct ipa_init_modem_driver_req,
+ .offset = offsetof(struct ipa_init_modem_driver_req,
platform_type_valid),
},
{
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index e56547bfdac9..9159846b8b93 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -4052,9 +4052,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
return err;
netdev_lockdep_set_classes(dev);
- lockdep_set_class_and_subclass(&dev->addr_list_lock,
- &macsec_netdev_addr_lock_key,
- dev->lower_level);
+ lockdep_set_class(&dev->addr_list_lock,
+ &macsec_netdev_addr_lock_key);
err = netdev_upper_dev_link(real_dev, dev, extack);
if (err < 0)
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 6a6cc9f75307..4942f6112e51 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -880,9 +880,8 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
static void macvlan_set_lockdep_class(struct net_device *dev)
{
netdev_lockdep_set_classes(dev);
- lockdep_set_class_and_subclass(&dev->addr_list_lock,
- &macvlan_netdev_addr_lock_key,
- dev->lower_level);
+ lockdep_set_class(&dev->addr_list_lock,
+ &macvlan_netdev_addr_lock_key);
}
static int macvlan_init(struct net_device *dev)
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 2908e0a0d6e1..23950e7a0f81 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -302,7 +302,7 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
rtnl_lock();
err = nsim_bpf_init(ns);
if (err)
- goto err_free_netdev;
+ goto err_rtnl_unlock;
nsim_ipsec_init(ns);
@@ -316,8 +316,8 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
err_ipsec_teardown:
nsim_ipsec_teardown(ns);
nsim_bpf_uninit(ns);
+err_rtnl_unlock:
rtnl_unlock();
-err_free_netdev:
free_netdev(dev);
return ERR_PTR(err);
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index f25702386d83..e351d65533aa 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -480,8 +480,7 @@ config MICROCHIP_T1_PHY
config MICROSEMI_PHY
tristate "Microsemi PHYs"
depends on MACSEC || MACSEC=n
- select CRYPTO_AES
- select CRYPTO_ECB
+ select CRYPTO_LIB_AES if MACSEC
help
Currently supports VSC8514, VSC8530, VSC8531, VSC8540 and VSC8541 PHYs
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index ecbd5e0d685c..acb0aae60755 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1260,6 +1260,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V1;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
@@ -1267,6 +1268,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4;
dp83640->version = PTP_CLASS_V2;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
@@ -1274,6 +1276,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
@@ -1281,6 +1284,7 @@ static int dp83640_hwtstamp(struct mii_timestamper *mii_ts, struct ifreq *ifr)
dp83640->hwts_rx_en = 1;
dp83640->layer = PTP_CLASS_L4 | PTP_CLASS_L2;
dp83640->version = PTP_CLASS_V2;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
default:
return -ERANGE;
diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c
index b4d3dc4068e2..d53ca884b5c9 100644
--- a/drivers/net/phy/mscc/mscc_macsec.c
+++ b/drivers/net/phy/mscc/mscc_macsec.c
@@ -10,7 +10,7 @@
#include <linux/phy.h>
#include <dt-bindings/net/mscc-phy-vsc8531.h>
-#include <crypto/skcipher.h>
+#include <crypto/aes.h>
#include <net/macsec.h>
@@ -500,39 +500,17 @@ static u32 vsc8584_macsec_flow_context_id(struct macsec_flow *flow)
static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN],
u16 key_len, u8 hkey[16])
{
- struct crypto_skcipher *tfm = crypto_alloc_skcipher("ecb(aes)", 0, 0);
- struct skcipher_request *req = NULL;
- struct scatterlist src, dst;
- DECLARE_CRYPTO_WAIT(wait);
- u32 input[4] = {0};
+ const u8 input[AES_BLOCK_SIZE] = {0};
+ struct crypto_aes_ctx ctx;
int ret;
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- req = skcipher_request_alloc(tfm, GFP_KERNEL);
- if (!req) {
- ret = -ENOMEM;
- goto out;
- }
-
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
- CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done,
- &wait);
- ret = crypto_skcipher_setkey(tfm, key, key_len);
- if (ret < 0)
- goto out;
-
- sg_init_one(&src, input, 16);
- sg_init_one(&dst, hkey, 16);
- skcipher_request_set_crypt(req, &src, &dst, 16, NULL);
-
- ret = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
+ ret = aes_expandkey(&ctx, key, key_len);
+ if (ret)
+ return ret;
-out:
- skcipher_request_free(req);
- crypto_free_skcipher(tfm);
- return ret;
+ aes_encrypt(&ctx, hkey, input);
+ memzero_explicit(&ctx, sizeof(ctx));
+ return 0;
}
static int vsc8584_macsec_transformation(struct phy_device *phydev,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1de3938628f4..56cfae950472 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -840,7 +840,7 @@ static void phy_error(struct phy_device *phydev)
* phy_disable_interrupts - Disable the PHY interrupts from the PHY side
* @phydev: target phy_device struct
*/
-static int phy_disable_interrupts(struct phy_device *phydev)
+int phy_disable_interrupts(struct phy_device *phydev)
{
int err;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 85ba95b598b5..b4978c5fb2ca 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1092,6 +1092,10 @@ int phy_init_hw(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = phy_disable_interrupts(phydev);
+ if (ret)
+ return ret;
+
if (phydev->drv->config_init)
ret = phydev->drv->config_init(phydev);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 858b012074bd..7adeb91bd368 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -62,6 +62,7 @@
#include <net/rtnetlink.h>
#include <net/sock.h>
#include <net/xdp.h>
+#include <net/ip_tunnels.h>
#include <linux/seq_file.h>
#include <linux/uio.h>
#include <linux/skb_array.h>
@@ -1351,6 +1352,7 @@ static void tun_net_init(struct net_device *dev)
switch (tun->flags & TUN_TYPE_MASK) {
case IFF_TUN:
dev->netdev_ops = &tun_netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
/* Point-to-Point TUN Device */
dev->hard_header_len = 0;
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index 4e514f5d7c6c..fd3a04d98dc1 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -187,6 +187,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
if (ret < ETH_ALEN) {
netdev_err(dev->net, "Failed to read MAC address: %d\n", ret);
+ ret = -EIO;
goto free;
}
memcpy(dev->net->dev_addr, buf, ETH_ALEN);
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index bb8c34d746ab..d2fdb5430d27 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1390,8 +1390,9 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
unsigned long flags;
if (old)
- hso_dbg(0x16, "Termios called with: cflags new[%d] - old[%d]\n",
- tty->termios.c_cflag, old->c_cflag);
+ hso_dbg(0x16, "Termios called with: cflags new[%u] - old[%u]\n",
+ (unsigned int)tty->termios.c_cflag,
+ (unsigned int)old->c_cflag);
/* the actual setup */
spin_lock_irqsave(&serial->serial_lock, flags);
@@ -2260,12 +2261,14 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
minor = get_free_serial_index();
if (minor < 0)
- goto exit;
+ goto exit2;
/* register our minor number */
serial->parent->dev = tty_port_register_device_attr(&serial->port,
tty_drv, minor, &serial->parent->interface->dev,
serial->parent, hso_serial_dev_groups);
+ if (IS_ERR(serial->parent->dev))
+ goto exit2;
/* fill in specific data for later use */
serial->minor = minor;
@@ -2310,6 +2313,7 @@ static int hso_serial_common_create(struct hso_serial *serial, int num_urbs,
return 0;
exit:
hso_serial_tty_unregister(serial);
+exit2:
hso_serial_common_free(serial);
return -1;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index eccbf4cd7149..442507f25aad 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -377,10 +377,6 @@ struct lan78xx_net {
struct tasklet_struct bh;
struct delayed_work wq;
- struct usb_host_endpoint *ep_blkin;
- struct usb_host_endpoint *ep_blkout;
- struct usb_host_endpoint *ep_intr;
-
int msg_enable;
struct urb *urb_intr;
@@ -2860,78 +2856,12 @@ lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
return NETDEV_TX_OK;
}
-static int
-lan78xx_get_endpoints(struct lan78xx_net *dev, struct usb_interface *intf)
-{
- int tmp;
- struct usb_host_interface *alt = NULL;
- struct usb_host_endpoint *in = NULL, *out = NULL;
- struct usb_host_endpoint *status = NULL;
-
- for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
- unsigned ep;
-
- in = NULL;
- out = NULL;
- status = NULL;
- alt = intf->altsetting + tmp;
-
- for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
- struct usb_host_endpoint *e;
- int intr = 0;
-
- e = alt->endpoint + ep;
- switch (e->desc.bmAttributes) {
- case USB_ENDPOINT_XFER_INT:
- if (!usb_endpoint_dir_in(&e->desc))
- continue;
- intr = 1;
- /* FALLTHROUGH */
- case USB_ENDPOINT_XFER_BULK:
- break;
- default:
- continue;
- }
- if (usb_endpoint_dir_in(&e->desc)) {
- if (!intr && !in)
- in = e;
- else if (intr && !status)
- status = e;
- } else {
- if (!out)
- out = e;
- }
- }
- if (in && out)
- break;
- }
- if (!alt || !in || !out)
- return -EINVAL;
-
- dev->pipe_in = usb_rcvbulkpipe(dev->udev,
- in->desc.bEndpointAddress &
- USB_ENDPOINT_NUMBER_MASK);
- dev->pipe_out = usb_sndbulkpipe(dev->udev,
- out->desc.bEndpointAddress &
- USB_ENDPOINT_NUMBER_MASK);
- dev->ep_intr = status;
-
- return 0;
-}
-
static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
{
struct lan78xx_priv *pdata = NULL;
int ret;
int i;
- ret = lan78xx_get_endpoints(dev, intf);
- if (ret) {
- netdev_warn(dev->net, "lan78xx_get_endpoints failed: %d\n",
- ret);
- return ret;
- }
-
dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
pdata = (struct lan78xx_priv *)(dev->data[0]);
@@ -3700,6 +3630,7 @@ static void lan78xx_stat_monitor(struct timer_list *t)
static int lan78xx_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
struct lan78xx_net *dev;
struct net_device *netdev;
struct usb_device *udev;
@@ -3748,6 +3679,34 @@ static int lan78xx_probe(struct usb_interface *intf,
mutex_init(&dev->stats.access_lock);
+ if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
+ ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
+ if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
+ ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
+ if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ ep_intr = &intf->cur_altsetting->endpoint[2];
+ if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
+ ret = -ENODEV;
+ goto out2;
+ }
+
+ dev->pipe_intr = usb_rcvintpipe(dev->udev,
+ usb_endpoint_num(&ep_intr->desc));
+
ret = lan78xx_bind(dev, intf);
if (ret < 0)
goto out2;
@@ -3759,18 +3718,7 @@ static int lan78xx_probe(struct usb_interface *intf,
netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
netif_set_gso_max_size(netdev, MAX_SINGLE_PACKET_SIZE - MAX_HEADER);
- dev->ep_blkin = (intf->cur_altsetting)->endpoint + 0;
- dev->ep_blkout = (intf->cur_altsetting)->endpoint + 1;
- dev->ep_intr = (intf->cur_altsetting)->endpoint + 2;
-
- dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
- dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
-
- dev->pipe_intr = usb_rcvintpipe(dev->udev,
- dev->ep_intr->desc.bEndpointAddress &
- USB_ENDPOINT_NUMBER_MASK);
- period = dev->ep_intr->desc.bInterval;
-
+ period = ep_intr->desc.bInterval;
maxp = usb_maxpacket(dev->udev, dev->pipe_intr, 0);
buf = kmalloc(maxp, GFP_KERNEL);
if (buf) {
@@ -3783,6 +3731,7 @@ static int lan78xx_probe(struct usb_interface *intf,
usb_fill_int_urb(dev->urb_intr, dev->udev,
dev->pipe_intr, buf, maxp,
intr_complete, dev, period);
+ dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
}
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 31b1d4b959f6..07c42c0719f5 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1370,6 +1370,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */
{QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 3cf4dc3433f9..bb4ccbda031a 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1287,11 +1287,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
/* Init all registers */
ret = smsc95xx_reset(dev);
+ if (ret)
+ goto free_pdata;
/* detect device revision as different features may be available */
ret = smsc95xx_read_reg(dev, ID_REV, &val);
if (ret < 0)
- return ret;
+ goto free_pdata;
+
val >>= 16;
pdata->chip_id = val;
pdata->mdix_ctrl = get_mdix_status(dev->net);
@@ -1317,6 +1320,10 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
schedule_delayed_work(&pdata->carrier_check, CARRIER_CHECK_DELAY);
return 0;
+
+free_pdata:
+ kfree(pdata);
+ return ret;
}
static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index e8085ab6d484..5efe1e28f270 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1376,17 +1376,24 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct vxlan_fdb *f;
+ rcu_read_lock();
hlist_for_each_entry_rcu(f, &vxlan->fdb_head[h], hlist) {
struct vxlan_rdst *rd;
if (rcu_access_pointer(f->nh)) {
+ if (*idx < cb->args[2])
+ goto skip_nh;
err = vxlan_fdb_info(skb, vxlan, f,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, NULL);
- if (err < 0)
+ if (err < 0) {
+ rcu_read_unlock();
goto out;
+ }
+skip_nh:
+ *idx += 1;
continue;
}
@@ -1399,12 +1406,15 @@ static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
cb->nlh->nlmsg_seq,
RTM_NEWNEIGH,
NLM_F_MULTI, rd);
- if (err < 0)
+ if (err < 0) {
+ rcu_read_unlock();
goto out;
+ }
skip:
*idx += 1;
}
}
+ rcu_read_unlock();
}
out:
return err;
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index c84536b03aa8..f70336bb6f52 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -71,8 +71,10 @@ static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
{
unsigned char *ptr;
- if (skb_cow(skb, 1))
+ if (skb_cow(skb, 1)) {
+ kfree_skb(skb);
return NET_RX_DROP;
+ }
skb_push(skb, 1);
skb_reset_network_header(skb);
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index e30d91a38cfb..b2868433718f 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -128,10 +128,12 @@ static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
{
unsigned char *ptr;
- skb_push(skb, 1);
-
- if (skb_cow(skb, 1))
+ if (skb_cow(skb, 1)) {
+ kfree_skb(skb);
return NET_RX_DROP;
+ }
+
+ skb_push(skb, 1);
ptr = skb->data;
*ptr = X25_IFACE_DATA;
@@ -303,7 +305,6 @@ static void lapbeth_setup(struct net_device *dev)
dev->netdev_ops = &lapbeth_netdev_ops;
dev->needs_free_netdev = true;
dev->type = ARPHRD_X25;
- dev->hard_header_len = 3;
dev->mtu = 1000;
dev->addr_len = 0;
}
@@ -324,6 +325,14 @@ static int lapbeth_new_device(struct net_device *dev)
if (!ndev)
goto out;
+ /* When transmitting data:
+ * first this driver removes a pseudo header of 1 byte,
+ * then the lapb module prepends an LAPB header of at most 3 bytes,
+ * then this driver prepends a length field of 2 bytes,
+ * then the underlying Ethernet device prepends its own header.
+ */
+ ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len;
+
lapbeth = netdev_priv(ndev);
lapbeth->axdev = ndev;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 69773d228ec1..84640a0c13f3 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -183,7 +183,7 @@ static inline void x25_asy_unlock(struct x25_asy *sl)
netif_wake_queue(sl->dev);
}
-/* Send one completely decapsulated IP datagram to the IP layer. */
+/* Send an LAPB frame to the LAPB module to process. */
static void x25_asy_bump(struct x25_asy *sl)
{
@@ -195,13 +195,12 @@ static void x25_asy_bump(struct x25_asy *sl)
count = sl->rcount;
dev->stats.rx_bytes += count;
- skb = dev_alloc_skb(count+1);
+ skb = dev_alloc_skb(count);
if (skb == NULL) {
netdev_warn(sl->dev, "memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
return;
}
- skb_push(skb, 1); /* LAPB internal control */
skb_put_data(skb, sl->rbuff, count);
skb->protocol = x25_type_trans(skb, sl->dev);
err = lapb_data_received(skb->dev, skb);
@@ -209,7 +208,6 @@ static void x25_asy_bump(struct x25_asy *sl)
kfree_skb(skb);
printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
} else {
- netif_rx(skb);
dev->stats.rx_packets++;
}
}
@@ -356,12 +354,21 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb,
*/
/*
- * Called when I frame data arrives. We did the work above - throw it
- * at the net layer.
+ * Called when I frame data arrive. We add a pseudo header for upper
+ * layers and pass it to upper layers.
*/
static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
{
+ if (skb_cow(skb, 1)) {
+ kfree_skb(skb);
+ return NET_RX_DROP;
+ }
+ skb_push(skb, 1);
+ skb->data[0] = X25_IFACE_DATA;
+
+ skb->protocol = x25_type_trans(skb, dev);
+
return netif_rx(skb);
}
@@ -657,7 +664,7 @@ static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
switch (s) {
case X25_END:
if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
- sl->rcount > 2)
+ sl->rcount >= 2)
x25_asy_bump(sl);
clear_bit(SLF_ESCAPE, &sl->flags);
sl->rcount = 0;
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index a8f151b1b5fa..c9f65e96ccb0 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -262,6 +262,7 @@ static void wg_setup(struct net_device *dev)
max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
dev->netdev_ops = &netdev_ops;
+ dev->header_ops = &ip_tunnel_header_ops;
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->needed_headroom = DATA_PACKET_HEAD_ROOM;
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index c58df439dbbe..dfb674e03076 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -11,6 +11,7 @@
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <net/ip_tunnels.h>
struct wg_device;
struct wg_peer;
@@ -65,25 +66,9 @@ struct packet_cb {
#define PACKET_CB(skb) ((struct packet_cb *)((skb)->cb))
#define PACKET_PEER(skb) (PACKET_CB(skb)->keypair->entry.peer)
-/* Returns either the correct skb->protocol value, or 0 if invalid. */
-static inline __be16 wg_examine_packet_protocol(struct sk_buff *skb)
-{
- if (skb_network_header(skb) >= skb->head &&
- (skb_network_header(skb) + sizeof(struct iphdr)) <=
- skb_tail_pointer(skb) &&
- ip_hdr(skb)->version == 4)
- return htons(ETH_P_IP);
- if (skb_network_header(skb) >= skb->head &&
- (skb_network_header(skb) + sizeof(struct ipv6hdr)) <=
- skb_tail_pointer(skb) &&
- ipv6_hdr(skb)->version == 6)
- return htons(ETH_P_IPV6);
- return 0;
-}
-
static inline bool wg_check_packet_protocol(struct sk_buff *skb)
{
- __be16 real_protocol = wg_examine_packet_protocol(skb);
+ __be16 real_protocol = ip_tunnel_parse_protocol(skb);
return real_protocol && skb->protocol == real_protocol;
}
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 91438144e4f7..2c9551ea6dc7 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -387,7 +387,7 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
*/
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = ~0; /* All levels */
- skb->protocol = wg_examine_packet_protocol(skb);
+ skb->protocol = ip_tunnel_parse_protocol(skb);
if (skb->protocol == htons(ETH_P_IP)) {
len = ntohs(ip_hdr(skb)->tot_len);
if (unlikely(len < sizeof(struct iphdr)))
@@ -414,14 +414,8 @@ static void wg_packet_consume_data_done(struct wg_peer *peer,
if (unlikely(routed_peer != peer))
goto dishonest_packet_peer;
- if (unlikely(napi_gro_receive(&peer->napi, skb) == GRO_DROP)) {
- ++dev->stats.rx_dropped;
- net_dbg_ratelimited("%s: Failed to give packet to userspace from peer %llu (%pISpfsc)\n",
- dev->name, peer->internal_id,
- &peer->endpoint.addr);
- } else {
- update_rx_stats(peer, message_data_len(len_before_trim));
- }
+ napi_gro_receive(&peer->napi, skb);
+ update_rx_stats(peer, message_data_len(len_before_trim));
return;
dishonest_packet_peer:
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index 342a7e58018a..05a61975c83f 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -820,7 +820,7 @@ err_free_irq:
ath10k_ahb_release_irq_legacy(ar);
err_free_pipes:
- ath10k_pci_free_pipes(ar);
+ ath10k_pci_release_resource(ar);
err_resource_deinit:
ath10k_ahb_resource_deinit(ar);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 1d941d53fdc9..cfde7791291a 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3473,6 +3473,28 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
timer_setup(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry, 0);
+ ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
+ sizeof(pci_host_ce_config_wlan),
+ GFP_KERNEL);
+ if (!ar_pci->attr)
+ return -ENOMEM;
+
+ ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
+ sizeof(pci_target_ce_config_wlan),
+ GFP_KERNEL);
+ if (!ar_pci->pipe_config) {
+ ret = -ENOMEM;
+ goto err_free_attr;
+ }
+
+ ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
+ sizeof(pci_target_service_to_ce_map_wlan),
+ GFP_KERNEL);
+ if (!ar_pci->serv_to_pipe) {
+ ret = -ENOMEM;
+ goto err_free_pipe_config;
+ }
+
if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
ath10k_pci_override_ce_config(ar);
@@ -3480,18 +3502,31 @@ int ath10k_pci_setup_resource(struct ath10k *ar)
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
ret);
- return ret;
+ goto err_free_serv_to_pipe;
}
return 0;
+
+err_free_serv_to_pipe:
+ kfree(ar_pci->serv_to_pipe);
+err_free_pipe_config:
+ kfree(ar_pci->pipe_config);
+err_free_attr:
+ kfree(ar_pci->attr);
+ return ret;
}
void ath10k_pci_release_resource(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
ath10k_pci_rx_retry_sync(ar);
netif_napi_del(&ar->napi);
ath10k_pci_ce_deinit(ar);
ath10k_pci_free_pipes(ar);
+ kfree(ar_pci->attr);
+ kfree(ar_pci->pipe_config);
+ kfree(ar_pci->serv_to_pipe);
}
static const struct ath10k_bus_ops ath10k_pci_bus_ops = {
@@ -3601,30 +3636,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
timer_setup(&ar_pci->ps_timer, ath10k_pci_ps_timer, 0);
- ar_pci->attr = kmemdup(pci_host_ce_config_wlan,
- sizeof(pci_host_ce_config_wlan),
- GFP_KERNEL);
- if (!ar_pci->attr) {
- ret = -ENOMEM;
- goto err_free;
- }
-
- ar_pci->pipe_config = kmemdup(pci_target_ce_config_wlan,
- sizeof(pci_target_ce_config_wlan),
- GFP_KERNEL);
- if (!ar_pci->pipe_config) {
- ret = -ENOMEM;
- goto err_free;
- }
-
- ar_pci->serv_to_pipe = kmemdup(pci_target_service_to_ce_map_wlan,
- sizeof(pci_target_service_to_ce_map_wlan),
- GFP_KERNEL);
- if (!ar_pci->serv_to_pipe) {
- ret = -ENOMEM;
- goto err_free;
- }
-
ret = ath10k_pci_setup_resource(ar);
if (ret) {
ath10k_err(ar, "failed to setup resource: %d\n", ret);
@@ -3705,10 +3716,9 @@ err_unsupported:
err_free_irq:
ath10k_pci_free_irq(ar);
- ath10k_pci_rx_retry_sync(ar);
err_deinit_irq:
- ath10k_pci_deinit_irq(ar);
+ ath10k_pci_release_resource(ar);
err_sleep:
ath10k_pci_sleep_sync(ar);
@@ -3720,29 +3730,18 @@ err_free_pipes:
err_core_destroy:
ath10k_core_destroy(ar);
-err_free:
- kfree(ar_pci->attr);
- kfree(ar_pci->pipe_config);
- kfree(ar_pci->serv_to_pipe);
-
return ret;
}
static void ath10k_pci_remove(struct pci_dev *pdev)
{
struct ath10k *ar = pci_get_drvdata(pdev);
- struct ath10k_pci *ar_pci;
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
if (!ar)
return;
- ar_pci = ath10k_pci_priv(ar);
-
- if (!ar_pci)
- return;
-
ath10k_core_unregister(ar);
ath10k_pci_free_irq(ar);
ath10k_pci_deinit_irq(ar);
@@ -3750,9 +3749,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_pci_sleep_sync(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
- kfree(ar_pci->attr);
- kfree(ar_pci->pipe_config);
- kfree(ar_pci->serv_to_pipe);
}
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 4ed21dad6a8e..3f563e02d17d 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -733,11 +733,13 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
return;
}
+ rx_buf->skb = nskb;
+
usb_fill_int_urb(urb, hif_dev->udev,
usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
nskb->data, MAX_REG_IN_BUF_SIZE,
- ath9k_hif_usb_reg_in_cb, nskb, 1);
+ ath9k_hif_usb_reg_in_cb, rx_buf, 1);
}
resubmit:
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index bc8c15fb609d..080e5aa60bea 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -897,7 +897,6 @@ static void wil_rx_handle_eapol(struct wil6210_vif *vif, struct sk_buff *skb)
void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
struct wil_net_stats *stats, bool gro)
{
- gro_result_t rc = GRO_NORMAL;
struct wil6210_vif *vif = ndev_to_vif(ndev);
struct wil6210_priv *wil = ndev_to_wil(ndev);
struct wireless_dev *wdev = vif_to_wdev(vif);
@@ -908,22 +907,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
*/
int mcast = is_multicast_ether_addr(da);
struct sk_buff *xmit_skb = NULL;
- static const char * const gro_res_str[] = {
- [GRO_MERGED] = "GRO_MERGED",
- [GRO_MERGED_FREE] = "GRO_MERGED_FREE",
- [GRO_HELD] = "GRO_HELD",
- [GRO_NORMAL] = "GRO_NORMAL",
- [GRO_DROP] = "GRO_DROP",
- [GRO_CONSUMED] = "GRO_CONSUMED",
- };
if (wdev->iftype == NL80211_IFTYPE_STATION) {
sa = wil_skb_get_sa(skb);
if (mcast && ether_addr_equal(sa, ndev->dev_addr)) {
/* mcast packet looped back to us */
- rc = GRO_DROP;
dev_kfree_skb(skb);
- goto stats;
+ ndev->stats.rx_dropped++;
+ stats->rx_dropped++;
+ wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
+ return;
}
} else if (wdev->iftype == NL80211_IFTYPE_AP && !vif->ap_isolate) {
if (mcast) {
@@ -967,26 +960,16 @@ void wil_netif_rx(struct sk_buff *skb, struct net_device *ndev, int cid,
wil_rx_handle_eapol(vif, skb);
if (gro)
- rc = napi_gro_receive(&wil->napi_rx, skb);
+ napi_gro_receive(&wil->napi_rx, skb);
else
netif_rx_ni(skb);
- wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n",
- len, gro_res_str[rc]);
- }
-stats:
- /* statistics. rc set to GRO_NORMAL for AP bridging */
- if (unlikely(rc == GRO_DROP)) {
- ndev->stats.rx_dropped++;
- stats->rx_dropped++;
- wil_dbg_txrx(wil, "Rx drop %d bytes\n", len);
- } else {
- ndev->stats.rx_packets++;
- stats->rx_packets++;
- ndev->stats.rx_bytes += len;
- stats->rx_bytes += len;
- if (mcast)
- ndev->stats.multicast++;
}
+ ndev->stats.rx_packets++;
+ stats->rx_packets++;
+ ndev->stats.rx_bytes += len;
+ stats->rx_bytes += len;
+ if (mcast)
+ ndev->stats.multicast++;
}
void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 7987a288917b..27116c7d3f4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -271,6 +271,8 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
{
struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
u32 tp = le32_to_cpu(trig->time_point);
+ struct iwl_ucode_tlv *dup = NULL;
+ int ret;
if (le32_to_cpu(tlv->length) < sizeof(*trig))
return -EINVAL;
@@ -283,10 +285,20 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
return -EINVAL;
}
- if (!le32_to_cpu(trig->occurrences))
+ if (!le32_to_cpu(trig->occurrences)) {
+ dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
+ GFP_KERNEL);
+ if (!dup)
+ return -ENOMEM;
+ trig = (void *)dup->data;
trig->occurrences = cpu_to_le32(-1);
+ tlv = dup;
+ }
+
+ ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
+ kfree(dup);
- return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
+ return ret;
}
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index fee01cbbd3ac..27977992fd7f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1189,17 +1189,15 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
iwl_mvm_change_queue_tid(mvm, i);
+ rcu_read_unlock();
+
if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
alloc_for_sta);
- if (ret) {
- rcu_read_unlock();
+ if (ret)
return ret;
- }
}
- rcu_read_unlock();
-
return free_queue;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 65d65c6baf4c..e02bafb8921f 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -582,6 +582,8 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x30DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
IWL_DEV_INFO(0x31DC, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
+ IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
+ IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index dfe625a53c63..3d7db6ffb599 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -301,6 +301,7 @@ struct mt76_hw_cap {
#define MT_DRV_TX_ALIGNED4_SKBS BIT(1)
#define MT_DRV_SW_RX_AIRTIME BIT(2)
#define MT_DRV_RX_DMA_HDR BIT(3)
+#define MT_DRV_HW_MGMT_TXQ BIT(4)
struct mt76_driver_ops {
u32 drv_flags;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 26cb711b465f..83dfa6da4761 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -642,8 +642,10 @@ mt7603_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
struct mt7603_dev *dev = hw->priv;
+ mutex_lock(&dev->mt76.mutex);
dev->coverage_class = max_t(s16, coverage_class, 0);
mt7603_mac_set_timing(dev);
+ mutex_unlock(&dev->mt76.mutex);
}
static void mt7603_tx(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index fd3ef483a87c..d06afcf46d67 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -234,10 +234,11 @@ mt7615_queues_acq(struct seq_file *s, void *data)
int i;
for (i = 0; i < 16; i++) {
- int j, acs = i / 4, index = i % 4;
+ int j, wmm_idx = i % MT7615_MAX_WMM_SETS;
+ int acs = i / MT7615_MAX_WMM_SETS;
u32 ctrl, val, qlen = 0;
- val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, wmm_idx));
ctrl = BIT(31) | BIT(15) | (acs << 8);
for (j = 0; j < 32; j++) {
@@ -245,11 +246,11 @@ mt7615_queues_acq(struct seq_file *s, void *data)
continue;
mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
- ctrl | (j + (index << 5)));
+ ctrl | (j + (wmm_idx << 5)));
qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
GENMASK(11, 0));
}
- seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
+ seq_printf(s, "AC%d%d: queued=%d\n", wmm_idx, acs, qlen);
}
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index 5a124610d4af..e5a965df899a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -36,10 +36,10 @@ static int
mt7622_init_tx_queues_multi(struct mt7615_dev *dev)
{
static const u8 wmm_queue_map[] = {
- MT7622_TXQ_AC0,
- MT7622_TXQ_AC1,
- MT7622_TXQ_AC2,
- MT7622_TXQ_AC3,
+ [IEEE80211_AC_BK] = MT7622_TXQ_AC0,
+ [IEEE80211_AC_BE] = MT7622_TXQ_AC1,
+ [IEEE80211_AC_VI] = MT7622_TXQ_AC2,
+ [IEEE80211_AC_VO] = MT7622_TXQ_AC3,
};
int ret;
int i;
@@ -100,6 +100,7 @@ mt7615_tx_cleanup(struct mt7615_dev *dev)
int i;
mt76_queue_tx_cleanup(dev, MT_TXQ_MCU, false);
+ mt76_queue_tx_cleanup(dev, MT_TXQ_PSD, false);
if (is_mt7615(&dev->mt76)) {
mt76_queue_tx_cleanup(dev, MT_TXQ_BE, false);
} else {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
index edac37e7847b..22e4eabe6578 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c
@@ -72,8 +72,7 @@ static int mt7615_eeprom_load(struct mt7615_dev *dev, u32 addr)
{
int ret;
- ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_SIZE +
- MT7615_EEPROM_EXTRA_DATA);
+ ret = mt76_eeprom_init(&dev->mt76, MT7615_EEPROM_FULL_SIZE);
if (ret < 0)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
index 40fed7adc58a..a024dee10362 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.h
@@ -17,7 +17,7 @@
#define MT7615_EEPROM_TXDPD_SIZE 216
#define MT7615_EEPROM_TXDPD_COUNT (44 + 3)
-#define MT7615_EEPROM_EXTRA_DATA (MT7615_EEPROM_TXDPD_OFFSET + \
+#define MT7615_EEPROM_FULL_SIZE (MT7615_EEPROM_TXDPD_OFFSET + \
MT7615_EEPROM_TXDPD_COUNT * \
MT7615_EEPROM_TXDPD_SIZE)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index 9f1c6ca7a665..d97315ec7265 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -526,22 +526,16 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
fc_type = (le16_to_cpu(fc) & IEEE80211_FCTL_FTYPE) >> 2;
fc_stype = (le16_to_cpu(fc) & IEEE80211_FCTL_STYPE) >> 4;
- if (ieee80211_is_data(fc) || ieee80211_is_bufferable_mmpdu(fc)) {
- q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
- skb_get_queue_mapping(skb);
- p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
- } else if (beacon) {
- if (ext_phy)
- q_idx = MT_LMAC_BCN1;
- else
- q_idx = MT_LMAC_BCN0;
+ if (beacon) {
p_fmt = MT_TX_TYPE_FW;
+ q_idx = ext_phy ? MT_LMAC_BCN1 : MT_LMAC_BCN0;
+ } else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
+ p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
+ q_idx = ext_phy ? MT_LMAC_ALTX1 : MT_LMAC_ALTX0;
} else {
- if (ext_phy)
- q_idx = MT_LMAC_ALTX1;
- else
- q_idx = MT_LMAC_ALTX0;
p_fmt = is_usb ? MT_TX_TYPE_SF : MT_TX_TYPE_CT;
+ q_idx = wmm_idx * MT7615_MAX_WMM_SETS +
+ mt7615_lmac_mapping(dev, skb_get_queue_mapping(skb));
}
val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
index f0d4b29a52a2..81608ab656b8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.h
@@ -124,21 +124,6 @@ enum tx_pkt_type {
MT_TX_TYPE_FW,
};
-enum tx_pkt_queue_idx {
- MT_LMAC_AC00,
- MT_LMAC_AC01,
- MT_LMAC_AC02,
- MT_LMAC_AC03,
- MT_LMAC_ALTX0 = 0x10,
- MT_LMAC_BMC0,
- MT_LMAC_BCN0,
- MT_LMAC_PSMP0,
- MT_LMAC_ALTX1,
- MT_LMAC_BMC1,
- MT_LMAC_BCN1,
- MT_LMAC_PSMP1,
-};
-
enum tx_port_idx {
MT_TX_PORT_IDX_LMAC,
MT_TX_PORT_IDX_MCU
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index c26f99b368d9..beaca8127680 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -397,6 +397,7 @@ mt7615_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct mt7615_dev *dev = mt7615_hw_dev(hw);
+ queue = mt7615_lmac_mapping(dev, queue);
queue += mvif->wmm_idx * MT7615_MAX_WMM_SETS;
return mt7615_mcu_set_wmm(dev, queue, params);
@@ -735,9 +736,12 @@ static void
mt7615_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
struct mt7615_phy *phy = mt7615_hw_phy(hw);
+ struct mt7615_dev *dev = phy->dev;
+ mutex_lock(&dev->mt76.mutex);
phy->coverage_class = max_t(s16, coverage_class, 0);
mt7615_mac_set_timing(phy);
+ mutex_unlock(&dev->mt76.mutex);
}
static int
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
index e670393506f0..2e99845b9c96 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mmio.c
@@ -146,7 +146,7 @@ int mt7615_mmio_probe(struct device *pdev, void __iomem *mem_base,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7615_txp_common),
- .drv_flags = MT_DRV_TXWI_NO_FREE,
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index d6176d316bee..3e7d51bf42a4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -282,6 +282,21 @@ struct mt7615_dev {
struct list_head wd_head;
};
+enum tx_pkt_queue_idx {
+ MT_LMAC_AC00,
+ MT_LMAC_AC01,
+ MT_LMAC_AC02,
+ MT_LMAC_AC03,
+ MT_LMAC_ALTX0 = 0x10,
+ MT_LMAC_BMC0,
+ MT_LMAC_BCN0,
+ MT_LMAC_PSMP0,
+ MT_LMAC_ALTX1,
+ MT_LMAC_BMC1,
+ MT_LMAC_BCN1,
+ MT_LMAC_PSMP1,
+};
+
enum {
HW_BSSID_0 = 0x0,
HW_BSSID_1,
@@ -447,6 +462,21 @@ static inline u16 mt7615_wtbl_size(struct mt7615_dev *dev)
return MT7615_WTBL_SIZE;
}
+static inline u8 mt7615_lmac_mapping(struct mt7615_dev *dev, u8 ac)
+{
+ static const u8 lmac_queue_map[] = {
+ [IEEE80211_AC_BK] = MT_LMAC_AC00,
+ [IEEE80211_AC_BE] = MT_LMAC_AC01,
+ [IEEE80211_AC_VI] = MT_LMAC_AC02,
+ [IEEE80211_AC_VO] = MT_LMAC_AC03,
+ };
+
+ if (WARN_ON_ONCE(ac >= ARRAY_SIZE(lmac_queue_map)))
+ return MT_LMAC_AC01; /* BE */
+
+ return lmac_queue_map[ac];
+}
+
void mt7615_dma_reset(struct mt7615_dev *dev);
void mt7615_scan_work(struct work_struct *work);
void mt7615_roc_work(struct work_struct *work);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index a50077eb24d7..5be6704770ad 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -270,7 +270,7 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
{
static const struct mt76_driver_ops drv_ops = {
.txwi_size = MT_USB_TXD_SIZE,
- .drv_flags = MT_DRV_RX_DMA_HDR,
+ .drv_flags = MT_DRV_RX_DMA_HDR | MT_DRV_HW_MGMT_TXQ,
.tx_prepare_skb = mt7663u_tx_prepare_skb,
.tx_complete_skb = mt7663u_tx_complete_skb,
.tx_status_data = mt7663u_tx_status_data,
@@ -329,25 +329,26 @@ static int mt7663u_probe(struct usb_interface *usb_intf,
if (!mt76_poll_msec(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_PWR_ON,
FW_STATE_PWR_ON << 1, 500)) {
dev_err(dev->mt76.dev, "Timeout for power on\n");
- return -EIO;
+ ret = -EIO;
+ goto error;
}
alloc_queues:
ret = mt76u_alloc_mcu_queue(&dev->mt76);
if (ret)
- goto error;
+ goto error_free_q;
ret = mt76u_alloc_queues(&dev->mt76);
if (ret)
- goto error;
+ goto error_free_q;
ret = mt7663u_register_device(dev);
if (ret)
- goto error_freeq;
+ goto error_free_q;
return 0;
-error_freeq:
+error_free_q:
mt76u_queues_deinit(&dev->mt76);
error:
mt76u_deinit(&dev->mt76);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index cbbe986655fe..5fda6e7b120c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -456,8 +456,9 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
tasklet_disable(&dev->mt76.tx_tasklet);
napi_disable(&dev->mt76.tx_napi);
- for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++)
+ mt76_for_each_q_rx(&dev->mt76, i) {
napi_disable(&dev->mt76.napi[i]);
+ }
mutex_lock(&dev->mt76.mutex);
@@ -515,7 +516,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
- for (i = 0; i < ARRAY_SIZE(dev->mt76.napi); i++) {
+ mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
napi_schedule(&dev->mt76.napi[i]);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 0575c259f245..05b5650c56c8 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -716,9 +716,12 @@ static void
mt7915_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = phy->dev;
+ mutex_lock(&dev->mt76.mutex);
phy->coverage_class = max_t(s16, coverage_class, 0);
mt7915_mac_set_timing(phy);
+ mutex_unlock(&dev->mt76.mutex);
}
static int
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index fca38ea2441f..f10c98aa883c 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -264,6 +264,13 @@ mt76_tx(struct mt76_phy *phy, struct ieee80211_sta *sta,
skb_set_queue_mapping(skb, qid);
}
+ if ((dev->drv->drv_flags & MT_DRV_HW_MGMT_TXQ) &&
+ !ieee80211_is_data(hdr->frame_control) &&
+ !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
+ qid = MT_TXQ_PSD;
+ skb_set_queue_mapping(skb, qid);
+ }
+
if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index fb97ea25b4d4..87382b2f7443 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -1010,17 +1010,18 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
static u8 mt76u_ac_to_hwq(struct mt76_dev *dev, u8 ac)
{
if (mt76_chip(dev) == 0x7663) {
- static const u8 wmm_queue_map[] = {
- [IEEE80211_AC_VO] = 0,
- [IEEE80211_AC_VI] = 1,
- [IEEE80211_AC_BE] = 2,
- [IEEE80211_AC_BK] = 4,
+ static const u8 lmac_queue_map[] = {
+ /* ac to lmac mapping */
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 4,
};
- if (WARN_ON(ac >= ARRAY_SIZE(wmm_queue_map)))
- return 2; /* BE */
+ if (WARN_ON(ac >= ARRAY_SIZE(lmac_queue_map)))
+ return 1; /* BE */
- return wmm_queue_map[ac];
+ return lmac_queue_map[ac];
}
return mt76_ac_to_hwq(ac);
@@ -1066,11 +1067,16 @@ static int mt76u_alloc_tx(struct mt76_dev *dev)
static void mt76u_free_tx(struct mt76_dev *dev)
{
- struct mt76_queue *q;
- int i, j;
+ int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
+ struct mt76_queue *q;
+ int j;
+
q = dev->q_tx[i].q;
+ if (!q)
+ continue;
+
for (j = 0; j < q->ndesc; j++)
usb_free_urb(q->entry[j].urb);
}
@@ -1078,17 +1084,22 @@ static void mt76u_free_tx(struct mt76_dev *dev)
void mt76u_stop_tx(struct mt76_dev *dev)
{
- struct mt76_queue_entry entry;
- struct mt76_queue *q;
- int i, j, ret;
+ int ret;
ret = wait_event_timeout(dev->tx_wait, !mt76_has_tx_pending(&dev->phy),
HZ / 5);
if (!ret) {
+ struct mt76_queue_entry entry;
+ struct mt76_queue *q;
+ int i, j;
+
dev_err(dev->dev, "timed out waiting for pending tx\n");
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->q_tx[i].q;
+ if (!q)
+ continue;
+
for (j = 0; j < q->ndesc; j++)
usb_kill_urb(q->entry[j].urb);
}
@@ -1100,6 +1111,8 @@ void mt76u_stop_tx(struct mt76_dev *dev)
*/
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->q_tx[i].q;
+ if (!q)
+ continue;
/* Assure we are in sync with killed tasklet. */
spin_lock_bh(&q->lock);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 482c6c8b0fb7..88280057e032 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -63,6 +63,8 @@ module_param_named(max_queues, xennet_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
"Maximum number of queues per virtual interface");
+#define XENNET_TIMEOUT (5 * HZ)
+
static const struct ethtool_ops xennet_ethtool_ops;
struct netfront_cb {
@@ -1334,12 +1336,15 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
- xenbus_switch_state(dev, XenbusStateInitialising);
- wait_event(module_wq,
- xenbus_read_driver_state(dev->otherend) !=
- XenbusStateClosed &&
- xenbus_read_driver_state(dev->otherend) !=
- XenbusStateUnknown);
+ do {
+ xenbus_switch_state(dev, XenbusStateInitialising);
+ err = wait_event_timeout(module_wq,
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateClosed &&
+ xenbus_read_driver_state(dev->otherend) !=
+ XenbusStateUnknown, XENNET_TIMEOUT);
+ } while (!err);
+
return netdev;
exit:
@@ -2139,28 +2144,43 @@ static const struct attribute_group xennet_dev_group = {
};
#endif /* CONFIG_SYSFS */
-static int xennet_remove(struct xenbus_device *dev)
+static void xennet_bus_close(struct xenbus_device *dev)
{
- struct netfront_info *info = dev_get_drvdata(&dev->dev);
-
- dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ int ret;
- if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
+ if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
+ return;
+ do {
xenbus_switch_state(dev, XenbusStateClosing);
- wait_event(module_wq,
- xenbus_read_driver_state(dev->otherend) ==
- XenbusStateClosing ||
- xenbus_read_driver_state(dev->otherend) ==
- XenbusStateUnknown);
+ ret = wait_event_timeout(module_wq,
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateClosing ||
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateClosed ||
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateUnknown,
+ XENNET_TIMEOUT);
+ } while (!ret);
+
+ if (xenbus_read_driver_state(dev->otherend) == XenbusStateClosed)
+ return;
+ do {
xenbus_switch_state(dev, XenbusStateClosed);
- wait_event(module_wq,
- xenbus_read_driver_state(dev->otherend) ==
- XenbusStateClosed ||
- xenbus_read_driver_state(dev->otherend) ==
- XenbusStateUnknown);
- }
+ ret = wait_event_timeout(module_wq,
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateClosed ||
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateUnknown,
+ XENNET_TIMEOUT);
+ } while (!ret);
+}
+
+static int xennet_remove(struct xenbus_device *dev)
+{
+ struct netfront_info *info = dev_get_drvdata(&dev->dev);
+ xennet_bus_close(dev);
xennet_disconnect_backend(info);
if (info->netdev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c
index 91d4d5b28a7d..ba6c486d6465 100644
--- a/drivers/nfc/s3fwrn5/core.c
+++ b/drivers/nfc/s3fwrn5/core.c
@@ -198,6 +198,7 @@ int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb,
case S3FWRN5_MODE_FW:
return s3fwrn5_fw_recv_frame(ndev, skb);
default:
+ kfree_skb(skb);
return -ENODEV;
}
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index ccbb5b43b8b2..4502f9c4708d 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -679,18 +679,8 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
return a->mode;
}
- if (a == &dev_attr_align.attr) {
- int i;
-
- for (i = 0; i < nd_region->ndr_mappings; i++) {
- struct nd_mapping *nd_mapping = &nd_region->mapping[i];
- struct nvdimm *nvdimm = nd_mapping->nvdimm;
-
- if (test_bit(NDD_LABELING, &nvdimm->flags))
- return a->mode;
- }
- return 0;
- }
+ if (a == &dev_attr_align.attr)
+ return a->mode;
if (a != &dev_attr_set_cookie.attr
&& a != &dev_attr_available_size.attr)
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 89b85970912d..4cef69bd3c1b 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -95,7 +95,7 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
struct encrypted_key_payload *epayload;
struct device *dev = &nvdimm->dev;
- keyref = lookup_user_key(id, 0, 0);
+ keyref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
if (IS_ERR(keyref))
return NULL;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c2c5bc4fb702..add040168e67 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1116,10 +1116,16 @@ static int nvme_identify_ns_descs(struct nvme_ctrl *ctrl, unsigned nsid,
dev_warn(ctrl->device,
"Identify Descriptors failed (%d)\n", status);
/*
- * Don't treat an error as fatal, as we potentially already
- * have a NGUID or EUI-64.
+ * Don't treat non-retryable errors as fatal, as we potentially
+ * already have a NGUID or EUI-64. If we failed with DNR set,
+ * we want to silently ignore the error as we can still
+ * identify the device, but if the status has DNR set, we want
+ * to propagate the error back specifically for the disk
+ * revalidation flow to make sure we don't abandon the
+ * device just because of a temporal retry-able error (such
+ * as path of transport errors).
*/
- if (status > 0 && !(status & NVME_SC_DNR))
+ if (status > 0 && (status & NVME_SC_DNR))
status = 0;
goto free_data;
}
@@ -1974,7 +1980,7 @@ static int __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
- revalidate_disk(ns->head->disk);
+ nvme_mpath_update_disk_size(ns->head->disk);
}
#endif
return 0;
@@ -4174,6 +4180,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
ctrl->dev = dev;
ctrl->ops = ops;
ctrl->quirks = quirks;
+ ctrl->numa_node = NUMA_NO_NODE;
INIT_WORK(&ctrl->scan_work, nvme_scan_work);
INIT_WORK(&ctrl->async_event_work, nvme_async_event_work);
INIT_WORK(&ctrl->fw_act_work, nvme_fw_act_work);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index da78e499947a..66509472fe06 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -409,15 +409,14 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;
- lockdep_assert_held(&ns->head->lock);
-
if (!head->disk)
return;
- if (!(head->disk->flags & GENHD_FL_UP))
+ if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags))
device_add_disk(&head->subsys->dev, head->disk,
nvme_ns_id_attr_groups);
+ mutex_lock(&head->lock);
if (nvme_path_is_optimized(ns)) {
int node, srcu_idx;
@@ -426,9 +425,10 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
__nvme_find_path(head, node);
srcu_read_unlock(&head->srcu, srcu_idx);
}
+ mutex_unlock(&head->lock);
- synchronize_srcu(&ns->head->srcu);
- kblockd_schedule_work(&ns->head->requeue_work);
+ synchronize_srcu(&head->srcu);
+ kblockd_schedule_work(&head->requeue_work);
}
static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
@@ -483,14 +483,12 @@ static inline bool nvme_state_is_live(enum nvme_ana_state state)
static void nvme_update_ns_ana_state(struct nvme_ana_group_desc *desc,
struct nvme_ns *ns)
{
- mutex_lock(&ns->head->lock);
ns->ana_grpid = le32_to_cpu(desc->grpid);
ns->ana_state = desc->state;
clear_bit(NVME_NS_ANA_PENDING, &ns->flags);
if (nvme_state_is_live(ns->ana_state))
nvme_mpath_set_live(ns);
- mutex_unlock(&ns->head->lock);
}
static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
@@ -640,38 +638,45 @@ static ssize_t ana_state_show(struct device *dev, struct device_attribute *attr,
}
DEVICE_ATTR_RO(ana_state);
-static int nvme_set_ns_ana_state(struct nvme_ctrl *ctrl,
+static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
struct nvme_ana_group_desc *desc, void *data)
{
- struct nvme_ns *ns = data;
+ struct nvme_ana_group_desc *dst = data;
- if (ns->ana_grpid == le32_to_cpu(desc->grpid)) {
- nvme_update_ns_ana_state(desc, ns);
- return -ENXIO; /* just break out of the loop */
- }
+ if (desc->grpid != dst->grpid)
+ return 0;
- return 0;
+ *dst = *desc;
+ return -ENXIO; /* just break out of the loop */
}
void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id)
{
if (nvme_ctrl_use_ana(ns->ctrl)) {
+ struct nvme_ana_group_desc desc = {
+ .grpid = id->anagrpid,
+ .state = 0,
+ };
+
mutex_lock(&ns->ctrl->ana_lock);
ns->ana_grpid = le32_to_cpu(id->anagrpid);
- nvme_parse_ana_log(ns->ctrl, ns, nvme_set_ns_ana_state);
+ nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc);
mutex_unlock(&ns->ctrl->ana_lock);
+ if (desc.state) {
+ /* found the group desc: update */
+ nvme_update_ns_ana_state(&desc, ns);
+ }
} else {
- mutex_lock(&ns->head->lock);
ns->ana_state = NVME_ANA_OPTIMIZED;
nvme_mpath_set_live(ns);
- mutex_unlock(&ns->head->lock);
}
if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) {
- struct backing_dev_info *info =
- ns->head->disk->queue->backing_dev_info;
+ struct gendisk *disk = ns->head->disk;
- info->capabilities |= BDI_CAP_STABLE_WRITES;
+ if (disk)
+ disk->queue->backing_dev_info->capabilities |=
+ BDI_CAP_STABLE_WRITES;
}
}
@@ -686,6 +691,14 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work);
blk_cleanup_queue(head->disk->queue);
+ if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
+ /*
+ * if device_add_disk wasn't called, prevent
+ * disk release to put a bogus reference on the
+ * request queue
+ */
+ head->disk->queue = NULL;
+ }
put_disk(head->disk);
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index c0f4226d3299..1de3f9b827aa 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -364,6 +364,8 @@ struct nvme_ns_head {
spinlock_t requeue_lock;
struct work_struct requeue_work;
struct mutex lock;
+ unsigned long flags;
+#define NVME_NSHEAD_DISK_LIVE 0
struct nvme_ns __rcu *current_path[];
#endif
};
@@ -602,6 +604,16 @@ static inline void nvme_trace_bio_complete(struct request *req,
trace_block_bio_complete(ns->head->disk->queue, req->bio);
}
+static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
+{
+ struct block_device *bdev = bdget_disk(disk, 0);
+
+ if (bdev) {
+ bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT);
+ bdput(bdev);
+ }
+}
+
extern struct device_attribute dev_attr_ana_grpid;
extern struct device_attribute dev_attr_ana_state;
extern struct device_attribute subsys_attr_iopolicy;
@@ -677,6 +689,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
{
}
+static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
+{
+}
#endif /* CONFIG_NVME_MULTIPATH */
#ifdef CONFIG_NVM
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index e2bacd369a88..b1d18f0633c7 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1593,7 +1593,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
dev->admin_tagset.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
dev->admin_tagset.timeout = ADMIN_TIMEOUT;
- dev->admin_tagset.numa_node = dev_to_node(dev->dev);
+ dev->admin_tagset.numa_node = dev->ctrl.numa_node;
dev->admin_tagset.cmd_size = sizeof(struct nvme_iod);
dev->admin_tagset.flags = BLK_MQ_F_NO_SCHED;
dev->admin_tagset.driver_data = dev;
@@ -1669,6 +1669,8 @@ static int nvme_pci_configure_admin_queue(struct nvme_dev *dev)
if (result)
return result;
+ dev->ctrl.numa_node = dev_to_node(dev->dev);
+
nvmeq = &dev->queues[0];
aqa = nvmeq->q_depth - 1;
aqa |= aqa << 16;
@@ -2257,7 +2259,7 @@ static void nvme_dev_add(struct nvme_dev *dev)
if (dev->io_queues[HCTX_TYPE_POLL])
dev->tagset.nr_maps++;
dev->tagset.timeout = NVME_IO_TIMEOUT;
- dev->tagset.numa_node = dev_to_node(dev->dev);
+ dev->tagset.numa_node = dev->ctrl.numa_node;
dev->tagset.queue_depth =
min_t(int, dev->q_depth, BLK_MQ_MAX_DEPTH) - 1;
dev->tagset.cmd_size = sizeof(struct nvme_iod);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f8f856dc0c67..13506a87a444 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -470,7 +470,7 @@ static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
* Spread I/O queues completion vectors according their queue index.
* Admin queues can always go on completion vector 0.
*/
- comp_vector = idx == 0 ? idx : idx - 1;
+ comp_vector = (idx == 0 ? idx : idx - 1) % ibdev->num_comp_vectors;
/* Polling queues need direct cq polling context */
if (nvme_rdma_poll_queue(queue))
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 3345ec7efaff..79ef2b8e2b3c 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -1532,7 +1532,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
set->ops = &nvme_tcp_admin_mq_ops;
set->queue_depth = NVME_AQ_MQ_TAG_DEPTH;
set->reserved_tags = 2; /* connect + keep-alive */
- set->numa_node = NUMA_NO_NODE;
+ set->numa_node = nctrl->numa_node;
set->flags = BLK_MQ_F_BLOCKING;
set->cmd_size = sizeof(struct nvme_tcp_request);
set->driver_data = ctrl;
@@ -1544,7 +1544,7 @@ static struct blk_mq_tag_set *nvme_tcp_alloc_tagset(struct nvme_ctrl *nctrl,
set->ops = &nvme_tcp_mq_ops;
set->queue_depth = nctrl->sqsize + 1;
set->reserved_tags = 1; /* fabric connect */
- set->numa_node = NUMA_NO_NODE;
+ set->numa_node = nctrl->numa_node;
set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
set->cmd_size = sizeof(struct nvme_tcp_request);
set->driver_data = ctrl;
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 0d54e730cbf2..6344e73c9354 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -340,7 +340,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
- ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
ctrl->admin_tag_set.driver_data = ctrl;
@@ -512,7 +512,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
ctrl->tag_set.ops = &nvme_loop_mq_ops;
ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
ctrl->tag_set.reserved_tags = 1; /* fabric connect */
- ctrl->tag_set.numa_node = NUMA_NO_NODE;
+ ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
diff --git a/drivers/opp/of.c b/drivers/opp/of.c
index 9a5873591a40..314f306140a1 100644
--- a/drivers/opp/of.c
+++ b/drivers/opp/of.c
@@ -902,6 +902,10 @@ static int _of_add_opp_table_v1(struct device *dev, struct opp_table *opp_table)
return -EINVAL;
}
+ mutex_lock(&opp_table->lock);
+ opp_table->parsed_static_opps = 1;
+ mutex_unlock(&opp_table->lock);
+
val = prop->value;
while (nr) {
unsigned long freq = be32_to_cpup(val++) * 1000;
diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c
index e386d4eac407..9a64cf90c291 100644
--- a/drivers/pci/controller/vmd.c
+++ b/drivers/pci/controller/vmd.c
@@ -546,9 +546,10 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
x86_vector_domain);
- irq_domain_free_fwnode(fn);
- if (!vmd->irq_domain)
+ if (!vmd->irq_domain) {
+ irq_domain_free_fwnode(fn);
return -ENODEV;
+ }
pci_add_resource(&resources, &vmd->resources[0]);
pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index ce096272f52b..c9338f914a0e 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4638,8 +4638,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
* pcie_wait_for_link_delay - Wait until link is active or inactive
* @pdev: Bridge device
* @active: waiting for active or inactive?
- * @delay: Delay to wait after link has become active (in ms). Specify %0
- * for no delay.
+ * @delay: Delay to wait after link has become active (in ms)
*
* Use this to wait till link becomes active or inactive.
*/
@@ -4680,7 +4679,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
msleep(10);
timeout -= 10;
}
- if (active && ret && delay)
+ if (active && ret)
msleep(delay);
else if (ret != active)
pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
@@ -4801,28 +4800,17 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
if (!pcie_downstream_port(dev))
return;
- /*
- * Per PCIe r5.0, sec 6.6.1, for downstream ports that support
- * speeds > 5 GT/s, we must wait for link training to complete
- * before the mandatory delay.
- *
- * We can only tell when link training completes via DLL Link
- * Active, which is required for downstream ports that support
- * speeds > 5 GT/s (sec 7.5.3.6). Unfortunately some common
- * devices do not implement Link Active reporting even when it's
- * required, so we'll check for that directly instead of checking
- * the supported link speed. We assume devices without Link Active
- * reporting can train in 100 ms regardless of speed.
- */
- if (dev->link_active_reporting) {
- pci_dbg(dev, "waiting for link to train\n");
- if (!pcie_wait_for_link_delay(dev, true, 0)) {
+ if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
+ pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
+ msleep(delay);
+ } else {
+ pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
+ delay);
+ if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
return;
}
}
- pci_dbg(child, "waiting %d ms to become accessible\n", delay);
- msleep(delay);
if (!pci_device_is_present(child)) {
pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
index 1b8e337a29ca..87c4be9dd412 100644
--- a/drivers/perf/arm-cci.c
+++ b/drivers/perf/arm-cci.c
@@ -1718,6 +1718,7 @@ static struct platform_driver cci_pmu_driver = {
.driver = {
.name = DRIVER_NAME,
.of_match_table = arm_cci_pmu_matches,
+ .suppress_bind_attrs = true,
},
.probe = cci_pmu_probe,
.remove = cci_pmu_remove,
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index d50edef91f59..7b7d23f25713 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -1545,6 +1545,7 @@ static struct platform_driver arm_ccn_driver = {
.driver = {
.name = "arm-ccn",
.of_match_table = arm_ccn_match,
+ .suppress_bind_attrs = true,
},
.probe = arm_ccn_probe,
.remove = arm_ccn_remove,
diff --git a/drivers/perf/arm_dsu_pmu.c b/drivers/perf/arm_dsu_pmu.c
index 518d0603e24f..96ed93cc78e6 100644
--- a/drivers/perf/arm_dsu_pmu.c
+++ b/drivers/perf/arm_dsu_pmu.c
@@ -757,6 +757,7 @@ static struct platform_driver dsu_pmu_driver = {
.driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(dsu_pmu_of_match),
+ .suppress_bind_attrs = true,
},
.probe = dsu_pmu_device_probe,
.remove = dsu_pmu_device_remove,
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index 48e28ef93a70..4cdb35d166ac 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -742,6 +742,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, smmu_pmu);
smmu_pmu->pmu = (struct pmu) {
+ .module = THIS_MODULE,
.task_ctx_nr = perf_invalid_context,
.pmu_enable = smmu_pmu_enable,
.pmu_disable = smmu_pmu_disable,
@@ -859,6 +860,7 @@ static void smmu_pmu_shutdown(struct platform_device *pdev)
static struct platform_driver smmu_pmu_driver = {
.driver = {
.name = "arm-smmu-v3-pmcg",
+ .suppress_bind_attrs = true,
},
.probe = smmu_pmu_probe,
.remove = smmu_pmu_remove,
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index d80f48798bce..e51ddb6d63ed 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -1226,6 +1226,7 @@ static struct platform_driver arm_spe_pmu_driver = {
.driver = {
.name = DRVNAME,
.of_match_table = of_match_ptr(arm_spe_pmu_of_match),
+ .suppress_bind_attrs = true,
},
.probe = arm_spe_pmu_device_probe,
.remove = arm_spe_pmu_device_remove,
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 90884d14f95f..397540a4b799 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -512,6 +512,7 @@ static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base,
{
*pmu = (struct ddr_pmu) {
.pmu = (struct pmu) {
+ .module = THIS_MODULE,
.capabilities = PERF_PMU_CAP_NO_EXCLUDE,
.task_ctx_nr = perf_invalid_context,
.attr_groups = attr_groups,
@@ -706,6 +707,7 @@ static struct platform_driver imx_ddr_pmu_driver = {
.driver = {
.name = "imx-ddr-pmu",
.of_match_table = imx_ddr_pmu_dt_ids,
+ .suppress_bind_attrs = true,
},
.probe = ddr_perf_probe,
.remove = ddr_perf_remove,
diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
index 15713faaa07e..5e3645c96443 100644
--- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c
@@ -378,6 +378,7 @@ static int hisi_ddrc_pmu_probe(struct platform_device *pdev)
ddrc_pmu->sccl_id, ddrc_pmu->index_id);
ddrc_pmu->pmu = (struct pmu) {
.name = name,
+ .module = THIS_MODULE,
.task_ctx_nr = perf_invalid_context,
.event_init = hisi_uncore_pmu_event_init,
.pmu_enable = hisi_uncore_pmu_enable,
@@ -418,6 +419,7 @@ static struct platform_driver hisi_ddrc_pmu_driver = {
.driver = {
.name = "hisi_ddrc_pmu",
.acpi_match_table = ACPI_PTR(hisi_ddrc_pmu_acpi_match),
+ .suppress_bind_attrs = true,
},
.probe = hisi_ddrc_pmu_probe,
.remove = hisi_ddrc_pmu_remove,
diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
index dcc5600788a9..5eb8168029c0 100644
--- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c
@@ -390,6 +390,7 @@ static int hisi_hha_pmu_probe(struct platform_device *pdev)
hha_pmu->sccl_id, hha_pmu->index_id);
hha_pmu->pmu = (struct pmu) {
.name = name,
+ .module = THIS_MODULE,
.task_ctx_nr = perf_invalid_context,
.event_init = hisi_uncore_pmu_event_init,
.pmu_enable = hisi_uncore_pmu_enable,
@@ -430,6 +431,7 @@ static struct platform_driver hisi_hha_pmu_driver = {
.driver = {
.name = "hisi_hha_pmu",
.acpi_match_table = ACPI_PTR(hisi_hha_pmu_acpi_match),
+ .suppress_bind_attrs = true,
},
.probe = hisi_hha_pmu_probe,
.remove = hisi_hha_pmu_remove,
diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
index 7719ae4e2c56..3e8b5eab5514 100644
--- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c
@@ -380,6 +380,7 @@ static int hisi_l3c_pmu_probe(struct platform_device *pdev)
l3c_pmu->sccl_id, l3c_pmu->index_id);
l3c_pmu->pmu = (struct pmu) {
.name = name,
+ .module = THIS_MODULE,
.task_ctx_nr = perf_invalid_context,
.event_init = hisi_uncore_pmu_event_init,
.pmu_enable = hisi_uncore_pmu_enable,
@@ -420,6 +421,7 @@ static struct platform_driver hisi_l3c_pmu_driver = {
.driver = {
.name = "hisi_l3c_pmu",
.acpi_match_table = ACPI_PTR(hisi_l3c_pmu_acpi_match),
+ .suppress_bind_attrs = true,
},
.probe = hisi_l3c_pmu_probe,
.remove = hisi_l3c_pmu_remove,
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 21d6991dbe0b..4da37f650f98 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -1028,6 +1028,7 @@ static struct platform_driver l2_cache_pmu_driver = {
.driver = {
.name = "qcom-l2cache-pmu",
.acpi_match_table = ACPI_PTR(l2_cache_pmu_acpi_match),
+ .suppress_bind_attrs = true,
},
.probe = l2_cache_pmu_probe,
.remove = l2_cache_pmu_remove,
diff --git a/drivers/perf/qcom_l3_pmu.c b/drivers/perf/qcom_l3_pmu.c
index 656e830798d9..9ddb577c542b 100644
--- a/drivers/perf/qcom_l3_pmu.c
+++ b/drivers/perf/qcom_l3_pmu.c
@@ -814,6 +814,7 @@ static struct platform_driver qcom_l3_cache_pmu_driver = {
.driver = {
.name = "qcom-l3cache-pmu",
.acpi_match_table = ACPI_PTR(qcom_l3_cache_pmu_acpi_match),
+ .suppress_bind_attrs = true,
},
.probe = qcom_l3_cache_pmu_probe,
};
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index 51b31d6ff2c4..aac9823b0c6b 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -1017,6 +1017,7 @@ static struct platform_driver tx2_uncore_driver = {
.driver = {
.name = "tx2-uncore-pmu",
.acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match),
+ .suppress_bind_attrs = true,
},
.probe = tx2_uncore_probe,
.remove = tx2_uncore_remove,
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 46ee6807d533..edac28cd25dd 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1975,6 +1975,7 @@ static struct platform_driver xgene_pmu_driver = {
.name = "xgene-pmu",
.of_match_table = xgene_pmu_of_match,
.acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match),
+ .suppress_bind_attrs = true,
},
};
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index 856927382248..e5842e48a5e0 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -545,13 +545,14 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
struct sun4i_usb_phy_data *data =
container_of(work, struct sun4i_usb_phy_data, detect.work);
struct phy *phy0 = data->phys[0].phy;
- struct sun4i_usb_phy *phy = phy_get_drvdata(phy0);
+ struct sun4i_usb_phy *phy;
bool force_session_end, id_notify = false, vbus_notify = false;
int id_det, vbus_det;
- if (phy0 == NULL)
+ if (!phy0)
return;
+ phy = phy_get_drvdata(phy0);
id_det = sun4i_usb_phy0_get_id_det(data);
vbus_det = sun4i_usb_phy0_get_vbus_det(data);
diff --git a/drivers/phy/intel/phy-intel-combo.c b/drivers/phy/intel/phy-intel-combo.c
index c2a35be4cdfb..360b1eb2ebd6 100644
--- a/drivers/phy/intel/phy-intel-combo.c
+++ b/drivers/phy/intel/phy-intel-combo.c
@@ -134,7 +134,7 @@ static inline void combo_phy_w32_off_mask(void __iomem *base, unsigned int reg,
reg_val = readl(base + reg);
reg_val &= ~mask;
- reg_val |= FIELD_PREP(mask, val);
+ reg_val |= val;
writel(reg_val, base + reg);
}
@@ -169,7 +169,7 @@ static int intel_cbphy_pcie_en_pad_refclk(struct intel_cbphy_iphy *iphy)
return 0;
combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
- PCIE_PHY_CLK_PAD, 0);
+ PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 0));
/* Delay for stable clock PLL */
usleep_range(50, 100);
@@ -192,14 +192,14 @@ static int intel_cbphy_pcie_dis_pad_refclk(struct intel_cbphy_iphy *iphy)
return 0;
combo_phy_w32_off_mask(cbphy->app_base, PCIE_PHY_GEN_CTRL,
- PCIE_PHY_CLK_PAD, 1);
+ PCIE_PHY_CLK_PAD, FIELD_PREP(PCIE_PHY_CLK_PAD, 1));
return 0;
}
static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
{
- enum intel_combo_mode cb_mode = PHY_PCIE_MODE;
+ enum intel_combo_mode cb_mode;
enum aggregated_mode aggr = cbphy->aggr_mode;
struct device *dev = cbphy->dev;
enum intel_phy_mode mode;
@@ -224,6 +224,8 @@ static int intel_cbphy_set_mode(struct intel_combo_phy *cbphy)
cb_mode = SATA0_SATA1_MODE;
break;
+ default:
+ return -EINVAL;
}
ret = regmap_write(cbphy->hsiocfg, REG_COMBO_MODE(cbphy->bid), cb_mode);
@@ -385,7 +387,7 @@ static int intel_cbphy_calibrate(struct phy *phy)
/* trigger auto RX adaptation */
combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
- ADAPT_REQ_MSK, 3);
+ ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 3));
/* Wait RX adaptation to finish */
ret = readl_poll_timeout(cr_base + CR_ADDR(PCS_XF_RX_ADAPT_ACK, id),
val, val & RX_ADAPT_ACK_BIT, 10, 5000);
@@ -396,7 +398,7 @@ static int intel_cbphy_calibrate(struct phy *phy)
/* Stop RX adaptation */
combo_phy_w32_off_mask(cr_base, CR_ADDR(PCS_XF_ATE_OVRD_IN_2, id),
- ADAPT_REQ_MSK, 0);
+ ADAPT_REQ_MSK, FIELD_PREP(ADAPT_REQ_MSK, 0));
return ret;
}
diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
index a7c6c940a3a8..8af8c6c5cc02 100644
--- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
+++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c
@@ -607,8 +607,8 @@ static int inno_dsidphy_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, inno);
inno->phy_base = devm_platform_ioremap_resource(pdev, 0);
- if (!inno->phy_base)
- return -ENOMEM;
+ if (IS_ERR(inno->phy_base))
+ return PTR_ERR(inno->phy_base);
inno->ref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(inno->ref_clk)) {
diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c
index 0a166d5a6414..a174b3c3f010 100644
--- a/drivers/phy/ti/phy-am654-serdes.c
+++ b/drivers/phy/ti/phy-am654-serdes.c
@@ -72,7 +72,7 @@ struct serdes_am654_clk_mux {
#define to_serdes_am654_clk_mux(_hw) \
container_of(_hw, struct serdes_am654_clk_mux, hw)
-static struct regmap_config serdes_am654_regmap_config = {
+static const struct regmap_config serdes_am654_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c
index 30ea5b207285..33c4cf0105a4 100644
--- a/drivers/phy/ti/phy-j721e-wiz.c
+++ b/drivers/phy/ti/phy-j721e-wiz.c
@@ -117,7 +117,7 @@ struct wiz_clk_mux {
struct wiz_clk_divider {
struct clk_hw hw;
struct regmap_field *field;
- struct clk_div_table *table;
+ const struct clk_div_table *table;
struct clk_init_data clk_data;
};
@@ -131,7 +131,7 @@ struct wiz_clk_mux_sel {
struct wiz_clk_div_sel {
struct regmap_field *field;
- struct clk_div_table *table;
+ const struct clk_div_table *table;
const char *node_name;
};
@@ -173,7 +173,7 @@ static struct wiz_clk_mux_sel clk_mux_sel_10g[] = {
},
};
-static struct clk_div_table clk_div_table[] = {
+static const struct clk_div_table clk_div_table[] = {
{ .val = 0, .div = 1, },
{ .val = 1, .div = 2, },
{ .val = 2, .div = 4, },
@@ -559,7 +559,7 @@ static const struct clk_ops wiz_clk_div_ops = {
static int wiz_div_clk_register(struct wiz *wiz, struct device_node *node,
struct regmap_field *field,
- struct clk_div_table *table)
+ const struct clk_div_table *table)
{
struct device *dev = wiz->dev;
struct wiz_clk_divider *div;
@@ -756,7 +756,7 @@ static const struct reset_control_ops wiz_phy_reset_ops = {
.deassert = wiz_phy_reset_deassert,
};
-static struct regmap_config wiz_regmap_config = {
+static const struct regmap_config wiz_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c
index cb7e0f08d2cf..1f81569c7ae3 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx.c
@@ -824,13 +824,12 @@ int imx_pinctrl_probe(struct platform_device *pdev,
return -EINVAL;
}
- ipctl->input_sel_base = devm_of_iomap(&pdev->dev, np,
- 0, NULL);
+ ipctl->input_sel_base = of_iomap(np, 0);
of_node_put(np);
- if (IS_ERR(ipctl->input_sel_base)) {
+ if (!ipctl->input_sel_base) {
dev_err(&pdev->dev,
"iomuxc input select base address not found\n");
- return PTR_ERR(ipctl->input_sel_base);
+ return -ENOMEM;
}
}
}
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 0ff7c55173da..615174a9d1e0 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -800,6 +800,21 @@ static void byt_gpio_disable_free(struct pinctrl_dev *pctl_dev,
pm_runtime_put(vg->dev);
}
+static void byt_gpio_direct_irq_check(struct intel_pinctrl *vg,
+ unsigned int offset)
+{
+ void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
+
+ /*
+ * Before making any direction modifications, do a check if gpio is set
+ * for direct IRQ. On Bay Trail, setting GPIO to output does not make
+ * sense, so let's at least inform the caller before they shoot
+ * themselves in the foot.
+ */
+ if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
+ dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+}
+
static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
struct pinctrl_gpio_range *range,
unsigned int offset,
@@ -807,7 +822,6 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
{
struct intel_pinctrl *vg = pinctrl_dev_get_drvdata(pctl_dev);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
- void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
unsigned long flags;
u32 value;
@@ -817,14 +831,8 @@ static int byt_gpio_set_direction(struct pinctrl_dev *pctl_dev,
value &= ~BYT_DIR_MASK;
if (input)
value |= BYT_OUTPUT_EN;
- else if (readl(conf_reg) & BYT_DIRECT_IRQ_EN)
- /*
- * Before making any direction modifications, do a check if gpio
- * is set for direct IRQ. On baytrail, setting GPIO to output
- * does not make sense, so let's at least inform the caller before
- * they shoot themselves in the foot.
- */
- dev_info_once(vg->dev, "Potential Error: Setting GPIO with direct_irq_en to output");
+ else
+ byt_gpio_direct_irq_check(vg, offset);
writel(value, val_reg);
@@ -1165,19 +1173,50 @@ static int byt_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
{
- return pinctrl_gpio_direction_input(chip->base + offset);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
+ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ unsigned long flags;
+ u32 reg;
+
+ raw_spin_lock_irqsave(&byt_lock, flags);
+
+ reg = readl(val_reg);
+ reg &= ~BYT_DIR_MASK;
+ reg |= BYT_OUTPUT_EN;
+ writel(reg, val_reg);
+
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
+ return 0;
}
+/*
+ * Note despite the temptation this MUST NOT be converted into a call to
+ * pinctrl_gpio_direction_output() + byt_gpio_set() that does not work this
+ * MUST be done as a single BYT_VAL_REG register write.
+ * See the commit message of the commit adding this comment for details.
+ */
static int byt_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int value)
{
- int ret = pinctrl_gpio_direction_output(chip->base + offset);
+ struct intel_pinctrl *vg = gpiochip_get_data(chip);
+ void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ unsigned long flags;
+ u32 reg;
- if (ret)
- return ret;
+ raw_spin_lock_irqsave(&byt_lock, flags);
+
+ byt_gpio_direct_irq_check(vg, offset);
- byt_gpio_set(chip, offset, value);
+ reg = readl(val_reg);
+ reg &= ~BYT_DIR_MASK;
+ if (value)
+ reg |= BYT_LEVEL;
+ else
+ reg &= ~BYT_LEVEL;
+ writel(reg, val_reg);
+
+ raw_spin_unlock_irqrestore(&byt_lock, flags);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-amd.h b/drivers/pinctrl/pinctrl-amd.h
index 3e5760f1a715..d4a192df5fab 100644
--- a/drivers/pinctrl/pinctrl-amd.h
+++ b/drivers/pinctrl/pinctrl-amd.h
@@ -252,7 +252,7 @@ static const struct amd_pingroup kerncz_groups[] = {
{
.name = "uart0",
.pins = uart0_pins,
- .npins = 9,
+ .npins = 5,
},
{
.name = "uart1",
diff --git a/drivers/pinctrl/pinctrl-mcp23s08_spi.c b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
index e06fb885fd2b..1f47a661b0a7 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08_spi.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08_spi.c
@@ -126,10 +126,7 @@ static int mcp23s08_spi_regmap_init(struct mcp23s08 *mcp, struct device *dev,
copy->name = name;
mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, copy);
- if (IS_ERR(mcp->regmap))
- return PTR_ERR(mcp->regmap);
-
- return 0;
+ return PTR_ERR_OR_ZERO(mcp->regmap);
}
static int mcp23s08_probe(struct spi_device *spi)
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 1e0614daee9b..f3a8a465d27e 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -958,7 +958,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np,
}
/**
- * smux_parse_one_pinctrl_entry() - parses a device tree mux entry
+ * pcs_parse_one_pinctrl_entry() - parses a device tree mux entry
* @pctldev: pin controller device
* @pcs: pinctrl driver instance
* @np: device node of the mux entry
diff --git a/drivers/pinctrl/qcom/pinctrl-ipq6018.c b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
index 38c33a778cb8..ec50a3b4bd16 100644
--- a/drivers/pinctrl/qcom/pinctrl-ipq6018.c
+++ b/drivers/pinctrl/qcom/pinctrl-ipq6018.c
@@ -367,7 +367,8 @@ static const char * const wci20_groups[] = {
static const char * const qpic_pad_groups[] = {
"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio9", "gpio10",
- "gpio11", "gpio17",
+ "gpio11", "gpio17", "gpio15", "gpio12", "gpio13", "gpio14", "gpio5",
+ "gpio6", "gpio7", "gpio8",
};
static const char * const burn0_groups[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index fe0be8a6ebb7..092a48e4dff5 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -170,6 +170,7 @@ struct pmic_gpio_state {
struct regmap *map;
struct pinctrl_dev *ctrl;
struct gpio_chip chip;
+ struct irq_chip irq;
};
static const struct pinconf_generic_params pmic_gpio_bindings[] = {
@@ -917,16 +918,6 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state,
return 0;
}
-static struct irq_chip pmic_gpio_irq_chip = {
- .name = "spmi-gpio",
- .irq_ack = irq_chip_ack_parent,
- .irq_mask = irq_chip_mask_parent,
- .irq_unmask = irq_chip_unmask_parent,
- .irq_set_type = irq_chip_set_type_parent,
- .irq_set_wake = irq_chip_set_wake_parent,
- .flags = IRQCHIP_MASK_ON_SUSPEND,
-};
-
static int pmic_gpio_domain_translate(struct irq_domain *domain,
struct irq_fwspec *fwspec,
unsigned long *hwirq,
@@ -1053,8 +1044,16 @@ static int pmic_gpio_probe(struct platform_device *pdev)
if (!parent_domain)
return -ENXIO;
+ state->irq.name = "spmi-gpio",
+ state->irq.irq_ack = irq_chip_ack_parent,
+ state->irq.irq_mask = irq_chip_mask_parent,
+ state->irq.irq_unmask = irq_chip_unmask_parent,
+ state->irq.irq_set_type = irq_chip_set_type_parent,
+ state->irq.irq_set_wake = irq_chip_set_wake_parent,
+ state->irq.flags = IRQCHIP_MASK_ON_SUSPEND,
+
girq = &state->chip.irq;
- girq->chip = &pmic_gpio_irq_chip;
+ girq->chip = &state->irq;
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_level_irq;
girq->fwnode = of_node_to_fwnode(state->dev->of_node);
diff --git a/drivers/pinctrl/tegra/pinctrl-tegra.c b/drivers/pinctrl/tegra/pinctrl-tegra.c
index 21661f6490d6..195cfe557511 100644
--- a/drivers/pinctrl/tegra/pinctrl-tegra.c
+++ b/drivers/pinctrl/tegra/pinctrl-tegra.c
@@ -731,8 +731,8 @@ static int tegra_pinctrl_resume(struct device *dev)
}
const struct dev_pm_ops tegra_pinctrl_pm = {
- .suspend = &tegra_pinctrl_suspend,
- .resume = &tegra_pinctrl_resume
+ .suspend_noirq = &tegra_pinctrl_suspend,
+ .resume_noirq = &tegra_pinctrl_resume
};
static bool tegra_pinctrl_gpio_node_has_range(struct tegra_pmx *pmx)
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 877aade19497..8f4acdc06b13 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -441,6 +441,7 @@ static int asus_wmi_battery_add(struct power_supply *battery)
* battery is named BATT.
*/
if (strcmp(battery->desc->name, "BAT0") != 0 &&
+ strcmp(battery->desc->name, "BAT1") != 0 &&
strcmp(battery->desc->name, "BATT") != 0)
return -ENODEV;
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h
index 1409a5bb5582..4f6f7f0761fc 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.h
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.h
@@ -13,6 +13,9 @@
#define INTEL_RAPL_PRIO_DEVID_0 0x3451
#define INTEL_CFG_MBOX_DEVID_0 0x3459
+#define INTEL_RAPL_PRIO_DEVID_1 0x3251
+#define INTEL_CFG_MBOX_DEVID_1 0x3259
+
/*
* Validate maximum commands in a single request.
* This is enough to handle command to every core in one ioctl, or all
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
index d84e2174cbde..95f01e7a87d5 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mbox_pci.c
@@ -147,6 +147,7 @@ static long isst_if_mbox_proc_cmd(u8 *cmd_ptr, int *write_only, int resume)
static const struct pci_device_id isst_if_mbox_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_0)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_CFG_MBOX_DEVID_1)},
{ 0 },
};
MODULE_DEVICE_TABLE(pci, isst_if_mbox_ids);
diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
index 3584859fcc42..aa17fd7817f8 100644
--- a/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
+++ b/drivers/platform/x86/intel_speed_select_if/isst_if_mmio.c
@@ -72,6 +72,7 @@ static long isst_if_mmio_rd_wr(u8 *cmd_ptr, int *write_only, int resume)
static const struct pci_device_id isst_if_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_0)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, INTEL_RAPL_PRIO_DEVID_1)},
{ 0 },
};
MODULE_DEVICE_TABLE(pci, isst_if_ids);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index ff7f0a4f2475..0f6fceda5fc0 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -885,11 +885,19 @@ static ssize_t dispatch_proc_write(struct file *file,
if (!ibm || !ibm->write)
return -EINVAL;
+ if (count > PAGE_SIZE - 1)
+ return -EINVAL;
+
+ kernbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kernbuf)
+ return -ENOMEM;
- kernbuf = strndup_user(userbuf, PAGE_SIZE);
- if (IS_ERR(kernbuf))
- return PTR_ERR(kernbuf);
+ if (copy_from_user(kernbuf, userbuf, count)) {
+ kfree(kernbuf);
+ return -EFAULT;
+ }
+ kernbuf[count] = 0;
ret = ibm->write(kernbuf);
if (ret == 0)
ret = count;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 8f677f5d79b4..edb1c4f8b496 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -684,7 +684,7 @@ config REGULATOR_MT6323
config REGULATOR_MT6358
tristate "MediaTek MT6358 PMIC"
- depends on MFD_MT6397 && BROKEN
+ depends on MFD_MT6397
help
Say y here to select this option to enable the power regulator of
MediaTek MT6358 PMIC.
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index e8f163371071..0796e4a47afa 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -31,7 +31,7 @@ obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o
obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o
obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o
obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o
-obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
+obj-$(CONFIG_REGULATOR_DA903X) += da903x-regulator.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o
obj-$(CONFIG_REGULATOR_DA9062) += da9062-regulator.o
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x-regulator.c
index 770e694824ac..770e694824ac 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x-regulator.c
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index e1d6c8f6d40b..fe65b5acaf28 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -512,7 +512,6 @@ static const struct da9063_regulator_info da9063_regulator_info[] = {
},
{
DA9063_LDO(DA9063, LDO9, 950, 50, 3600),
- .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL),
},
{
DA9063_LDO(DA9063, LDO11, 900, 50, 3600),
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
index e970e9d2f8be..e4bb09bbd3fa 100644
--- a/drivers/regulator/helpers.c
+++ b/drivers/regulator/helpers.c
@@ -486,7 +486,7 @@ int regulator_map_voltage_pickable_linear_range(struct regulator_dev *rdev,
continue;
}
- ret = selector + sel;
+ ret = selector + sel - range->min_sel;
voltage = rdev->desc->ops->list_voltage(rdev, ret);
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 689537927f6f..4c8e8b472287 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -209,6 +209,19 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
};
+static const struct regulator_ops pfuze3000_sw_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = pfuze100_set_ramp_delay,
+
+};
+
#define PFUZE100_FIXED_REG(_chip, _name, base, voltage) \
[_chip ## _ ## _name] = { \
.desc = { \
@@ -318,23 +331,28 @@ static const struct regulator_ops pfuze100_swb_regulator_ops = {
.stby_mask = 0x20, \
}
-
-#define PFUZE3000_SW2_REG(_chip, _name, base, min, max, step) { \
- .desc = { \
- .name = #_name,\
- .n_voltages = ((max) - (min)) / (step) + 1, \
- .ops = &pfuze100_sw_regulator_ops, \
- .type = REGULATOR_VOLTAGE, \
- .id = _chip ## _ ## _name, \
- .owner = THIS_MODULE, \
- .min_uV = (min), \
- .uV_step = (step), \
- .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
- .vsel_mask = 0x7, \
- }, \
- .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
- .stby_mask = 0x7, \
-}
+/* No linar case for the some switches of PFUZE3000 */
+#define PFUZE3000_SW_REG(_chip, _name, base, mask, voltages) \
+ [_chip ## _ ## _name] = { \
+ .desc = { \
+ .name = #_name, \
+ .n_voltages = ARRAY_SIZE(voltages), \
+ .ops = &pfuze3000_sw_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = _chip ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ .volt_table = voltages, \
+ .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
+ .vsel_mask = (mask), \
+ .enable_reg = (base) + PFUZE100_MODE_OFFSET, \
+ .enable_mask = 0xf, \
+ .enable_val = 0x8, \
+ .enable_time = 500, \
+ }, \
+ .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
+ .stby_mask = (mask), \
+ .sw_reg = true, \
+ }
#define PFUZE3000_SW3_REG(_chip, _name, base, min, max, step) { \
.desc = { \
@@ -391,9 +409,9 @@ static struct pfuze_regulator pfuze200_regulators[] = {
};
static struct pfuze_regulator pfuze3000_regulators[] = {
- PFUZE100_SWB_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+ PFUZE3000_SW_REG(PFUZE3000, SW1A, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
PFUZE100_SW_REG(PFUZE3000, SW1B, PFUZE100_SW1CVOL, 700000, 1475000, 25000),
- PFUZE100_SWB_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+ PFUZE3000_SW_REG(PFUZE3000, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
PFUZE3000_SW3_REG(PFUZE3000, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
PFUZE100_SWB_REG(PFUZE3000, SWBST, PFUZE100_SWBSTCON1, 0x3, pfuze100_swbst),
PFUZE100_SWB_REG(PFUZE3000, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
@@ -407,8 +425,8 @@ static struct pfuze_regulator pfuze3000_regulators[] = {
};
static struct pfuze_regulator pfuze3001_regulators[] = {
- PFUZE100_SWB_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
- PFUZE100_SWB_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
+ PFUZE3000_SW_REG(PFUZE3001, SW1, PFUZE100_SW1ABVOL, 0x1f, pfuze3000_sw1a),
+ PFUZE3000_SW_REG(PFUZE3001, SW2, PFUZE100_SW2VOL, 0x7, pfuze3000_sw2lo),
PFUZE3000_SW3_REG(PFUZE3001, SW3, PFUZE100_SW3AVOL, 900000, 1650000, 50000),
PFUZE100_SWB_REG(PFUZE3001, VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
PFUZE100_VGEN_REG(PFUZE3001, VLDO1, PFUZE100_VGEN1VOL, 1800000, 3300000, 100000),
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 53a64d856926..7f5c318c8259 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -821,7 +821,7 @@ static const struct rpm_regulator_data rpm_pm8994_regulators[] = {
static const struct rpm_regulator_data rpm_pmi8994_regulators[] = {
{ "s1", QCOM_SMD_RPM_SMPB, 1, &pmi8994_ftsmps, "vdd_s1" },
{ "s2", QCOM_SMD_RPM_SMPB, 2, &pmi8994_hfsmps, "vdd_s2" },
- { "s2", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" },
+ { "s3", QCOM_SMD_RPM_SMPB, 3, &pmi8994_hfsmps, "vdd_s3" },
{ "boost-bypass", QCOM_SMD_RPM_BBYB, 1, &pmi8994_bby, "vdd_bst_byp" },
{}
};
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index eb13c479e11d..bb1c8402c67d 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -182,10 +182,9 @@ enum qdio_irq_poll_states {
};
struct qdio_input_q {
- /* first ACK'ed buffer */
- int ack_start;
- /* how many SBALs are acknowledged */
- int ack_count;
+ /* Batch of SBALs that we processed while polling the queue: */
+ unsigned int batch_start;
+ unsigned int batch_count;
/* last time of noticing incoming data */
u64 timestamp;
};
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 286b044fb027..da95c923d81a 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -110,8 +110,8 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "nr_used: %d ftc: %d\n",
atomic_read(&q->nr_buf_used), q->first_to_check);
if (q->is_input_q) {
- seq_printf(m, "ack start: %d ack count: %d\n",
- q->u.in.ack_start, q->u.in.ack_count);
+ seq_printf(m, "batch start: %u batch count: %u\n",
+ q->u.in.batch_start, q->u.in.batch_count);
seq_printf(m, "DSCI: %x IRQs disabled: %u\n",
*(u8 *)q->irq_ptr->dsci,
test_bit(QDIO_IRQ_DISABLED,
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 610c05f59589..0c919a11a46e 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -254,10 +254,17 @@ static inline int set_buf_states(struct qdio_q *q, int bufnr,
if (is_qebsm(q))
return qdio_do_sqbs(q, state, bufnr, count);
+ /* Ensure that all preceding changes to the SBALs are visible: */
+ mb();
+
for (i = 0; i < count; i++) {
- xchg(&q->slsb.val[bufnr], state);
+ WRITE_ONCE(q->slsb.val[bufnr], state);
bufnr = next_buf(bufnr);
}
+
+ /* Make our SLSB changes visible: */
+ mb();
+
return count;
}
@@ -393,15 +400,15 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
static inline void qdio_stop_polling(struct qdio_q *q)
{
- if (!q->u.in.ack_count)
+ if (!q->u.in.batch_count)
return;
qperf_inc(q, stop_polling);
/* show the card that we are not polling anymore */
- set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
- q->u.in.ack_count);
- q->u.in.ack_count = 0;
+ set_buf_states(q, q->u.in.batch_start, SLSB_P_INPUT_NOT_INIT,
+ q->u.in.batch_count);
+ q->u.in.batch_count = 0;
}
static inline void account_sbals(struct qdio_q *q, unsigned int count)
@@ -441,42 +448,13 @@ static void process_buffer_error(struct qdio_q *q, unsigned int start,
static inline void inbound_handle_work(struct qdio_q *q, unsigned int start,
int count, bool auto_ack)
{
- int new;
-
- if (auto_ack) {
- if (!q->u.in.ack_count) {
- q->u.in.ack_count = count;
- q->u.in.ack_start = start;
- return;
- }
-
- /* delete the previous ACK's */
- set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
- q->u.in.ack_count);
- q->u.in.ack_count = count;
- q->u.in.ack_start = start;
- return;
- }
-
- /*
- * ACK the newest buffer. The ACK will be removed in qdio_stop_polling
- * or by the next inbound run.
- */
- new = add_buf(start, count - 1);
- set_buf_state(q, new, SLSB_P_INPUT_ACK);
-
- /* delete the previous ACKs */
- if (q->u.in.ack_count)
- set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
- q->u.in.ack_count);
+ /* ACK the newest SBAL: */
+ if (!auto_ack)
+ set_buf_state(q, add_buf(start, count - 1), SLSB_P_INPUT_ACK);
- q->u.in.ack_count = 1;
- q->u.in.ack_start = new;
- count--;
- if (!count)
- return;
- /* need to change ALL buffers to get more interrupts */
- set_buf_states(q, start, SLSB_P_INPUT_NOT_INIT, count);
+ if (!q->u.in.batch_count)
+ q->u.in.batch_start = start;
+ q->u.in.batch_count += count;
}
static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
@@ -525,15 +503,18 @@ static int get_inbound_buffer_frontier(struct qdio_q *q, unsigned int start)
account_sbals_error(q, count);
return count;
case SLSB_CU_INPUT_EMPTY:
- case SLSB_P_INPUT_NOT_INIT:
- case SLSB_P_INPUT_ACK:
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
q->nr, start);
return 0;
+ case SLSB_P_INPUT_NOT_INIT:
+ case SLSB_P_INPUT_ACK:
+ /* We should never see this state, throw a WARN: */
default:
- WARN_ON_ONCE(1);
+ dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
+ "found state %#x at index %u on queue %u\n",
+ state, start, q->nr);
return 0;
}
}
@@ -738,11 +719,14 @@ static int get_outbound_buffer_frontier(struct qdio_q *q, unsigned int start)
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d",
q->nr);
return 0;
- case SLSB_P_OUTPUT_NOT_INIT:
case SLSB_P_OUTPUT_HALTED:
return 0;
+ case SLSB_P_OUTPUT_NOT_INIT:
+ /* We should never see this state, throw a WARN: */
default:
- WARN_ON_ONCE(1);
+ dev_WARN_ONCE(&q->irq_ptr->cdev->dev, 1,
+ "found state %#x at index %u on queue %u\n",
+ state, start, q->nr);
return 0;
}
}
@@ -938,10 +922,10 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
}
}
-static void qdio_handle_activate_check(struct ccw_device *cdev,
- unsigned long intparm, int cstat, int dstat)
+static void qdio_handle_activate_check(struct qdio_irq *irq_ptr,
+ unsigned long intparm, int cstat,
+ int dstat)
{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
struct qdio_q *q;
DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no);
@@ -968,11 +952,9 @@ no_handler:
lgr_info_log();
}
-static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat,
+static void qdio_establish_handle_irq(struct qdio_irq *irq_ptr, int cstat,
int dstat)
{
- struct qdio_irq *irq_ptr = cdev->private->qdio_data;
-
DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq");
if (cstat)
@@ -1019,7 +1001,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
switch (irq_ptr->state) {
case QDIO_IRQ_STATE_INACTIVE:
- qdio_establish_handle_irq(cdev, cstat, dstat);
+ qdio_establish_handle_irq(irq_ptr, cstat, dstat);
break;
case QDIO_IRQ_STATE_CLEANUP:
qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
@@ -1031,7 +1013,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
return;
}
if (cstat || dstat)
- qdio_handle_activate_check(cdev, intparm, cstat,
+ qdio_handle_activate_check(irq_ptr, intparm, cstat,
dstat);
break;
case QDIO_IRQ_STATE_STOPPED:
@@ -1446,12 +1428,12 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
qperf_inc(q, inbound_call);
- /* If any ACKed SBALs are returned to HW, adjust ACK tracking: */
- overlap = min(count - sub_buf(q->u.in.ack_start, bufnr),
- q->u.in.ack_count);
+ /* If any processed SBALs are returned to HW, adjust our tracking: */
+ overlap = min_t(int, count - sub_buf(q->u.in.batch_start, bufnr),
+ q->u.in.batch_count);
if (overlap > 0) {
- q->u.in.ack_start = add_buf(q->u.in.ack_start, overlap);
- q->u.in.ack_count -= overlap;
+ q->u.in.batch_start = add_buf(q->u.in.batch_start, overlap);
+ q->u.in.batch_count -= overlap;
}
count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count);
@@ -1535,12 +1517,11 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
int q_nr, unsigned int bufnr, unsigned int count)
{
- struct qdio_irq *irq_ptr;
+ struct qdio_irq *irq_ptr = cdev->private->qdio_data;
if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
return -EINVAL;
- irq_ptr = cdev->private->qdio_data;
if (!irq_ptr)
return -ENODEV;
diff --git a/drivers/s390/cio/vfio_ccw_chp.c b/drivers/s390/cio/vfio_ccw_chp.c
index a646fc81c872..13b26a1c7988 100644
--- a/drivers/s390/cio/vfio_ccw_chp.c
+++ b/drivers/s390/cio/vfio_ccw_chp.c
@@ -8,6 +8,7 @@
* Eric Farman <farman@linux.ibm.com>
*/
+#include <linux/slab.h>
#include <linux/vfio.h>
#include "vfio_ccw_private.h"
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index 004ce022fc78..3c3d403abe92 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -195,11 +195,10 @@ static inline struct ep11_cprb *alloc_cprb(size_t payload_len)
size_t len = sizeof(struct ep11_cprb) + payload_len;
struct ep11_cprb *cprb;
- cprb = kmalloc(len, GFP_KERNEL);
+ cprb = kzalloc(len, GFP_KERNEL);
if (!cprb)
return NULL;
- memset(cprb, 0, len);
cprb->cprb_len = sizeof(struct ep11_cprb);
cprb->cprb_ver_id = 0x04;
memcpy(cprb->func_id, "T4", 2);
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index db320dab1fee..79f6e8fb03ca 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -577,7 +577,10 @@ static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
ZFCP_STATUS_ERP_TIMEDOUT)) {
req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
zfcp_dbf_rec_run("erscf_1", act);
- req->erp_action = NULL;
+ /* lock-free concurrent access with
+ * zfcp_erp_timeout_handler()
+ */
+ WRITE_ONCE(req->erp_action, NULL);
}
if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
zfcp_dbf_rec_run("erscf_2", act);
@@ -613,8 +616,14 @@ void zfcp_erp_notify(struct zfcp_erp_action *erp_action, unsigned long set_mask)
void zfcp_erp_timeout_handler(struct timer_list *t)
{
struct zfcp_fsf_req *fsf_req = from_timer(fsf_req, t, timer);
- struct zfcp_erp_action *act = fsf_req->erp_action;
+ struct zfcp_erp_action *act;
+ if (fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
+ return;
+ /* lock-free concurrent access with zfcp_erp_strategy_check_fsfreq() */
+ act = READ_ONCE(fsf_req->erp_action);
+ if (!act)
+ return;
zfcp_erp_notify(act, ZFCP_STATUS_ERP_TIMEDOUT);
}
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 957889a42d2e..5730572b52cd 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -1372,27 +1372,6 @@ static struct ccw_device_id virtio_ids[] = {
{},
};
-#ifdef CONFIG_PM_SLEEP
-static int virtio_ccw_freeze(struct ccw_device *cdev)
-{
- struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
-
- return virtio_device_freeze(&vcdev->vdev);
-}
-
-static int virtio_ccw_restore(struct ccw_device *cdev)
-{
- struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
- int ret;
-
- ret = virtio_ccw_set_transport_rev(vcdev);
- if (ret)
- return ret;
-
- return virtio_device_restore(&vcdev->vdev);
-}
-#endif
-
static struct ccw_driver virtio_ccw_driver = {
.driver = {
.owner = THIS_MODULE,
@@ -1405,11 +1384,6 @@ static struct ccw_driver virtio_ccw_driver = {
.set_online = virtio_ccw_online,
.notify = virtio_ccw_cio_notify,
.int_class = IRQIO_VIR,
-#ifdef CONFIG_PM_SLEEP
- .freeze = virtio_ccw_freeze,
- .thaw = virtio_ccw_restore,
- .restore = virtio_ccw_restore,
-#endif
};
static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index d022407e5645..bef47f38dd0d 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -40,6 +40,7 @@ static struct scsi_host_template aic94xx_sht = {
/* .name is initialized */
.name = "aic94xx",
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.scan_finished = asd_scan_finished,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 2e1718f9ade2..09a7669dad4c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1756,6 +1756,7 @@ static struct scsi_host_template sht_v1_hw = {
.proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
.slave_configure = hisi_sas_slave_configure,
.scan_finished = hisi_sas_scan_finished,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index e7e7849a4c14..968d38702353 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -3532,6 +3532,7 @@ static struct scsi_host_template sht_v2_hw = {
.proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
.slave_configure = hisi_sas_slave_configure,
.scan_finished = hisi_sas_scan_finished,
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 3e6b78a1f993..55e2321a65bc 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -3075,6 +3075,7 @@ static struct scsi_host_template sht_v3_hw = {
.proc_name = DRV_NAME,
.module = THIS_MODULE,
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
.slave_configure = hisi_sas_slave_configure,
.scan_finished = hisi_sas_scan_finished,
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 7d77997d26d4..7d86f4ca266c 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6731,6 +6731,7 @@ static struct scsi_host_template driver_template = {
.compat_ioctl = ipr_ioctl,
#endif
.queuecommand = ipr_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.eh_abort_handler = ipr_eh_abort,
.eh_device_reset_handler = ipr_eh_dev_reset,
.eh_host_reset_handler = ipr_eh_host_reset,
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 974c3b9116d5..085e285f427d 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -153,6 +153,7 @@ static struct scsi_host_template isci_sht = {
.name = DRV_NAME,
.proc_name = DRV_NAME,
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.scan_finished = isci_host_scan_finished,
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 773c45af9387..278d15ff1c5a 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -133,8 +133,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
lockdep_assert_held(&lport->disc.disc_mutex);
rdata = fc_rport_lookup(lport, port_id);
- if (rdata)
+ if (rdata) {
+ kref_put(&rdata->kref, fc_rport_destroy);
return rdata;
+ }
if (lport->rport_priv_size > 0)
rport_priv_size = lport->rport_priv_size;
@@ -481,10 +483,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
fc_rport_state_enter(rdata, RPORT_ST_DELETE);
- kref_get(&rdata->kref);
- if (rdata->event == RPORT_EV_NONE &&
- !queue_work(rport_event_queue, &rdata->event_work))
- kref_put(&rdata->kref, fc_rport_destroy);
+ if (rdata->event == RPORT_EV_NONE) {
+ kref_get(&rdata->kref);
+ if (!queue_work(rport_event_queue, &rdata->event_work))
+ kref_put(&rdata->kref, fc_rport_destroy);
+ }
rdata->event = event;
}
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index e5a64d4f255c..49c8a1818baf 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2629,7 +2629,7 @@ struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
"iscsi_q_%d", shost->host_no);
ihost->workq = alloc_workqueue("%s",
WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 2, ihost->workq_name);
+ 1, ihost->workq_name);
if (!ihost->workq)
goto free_host;
}
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 69a5249e007a..6637f84a3d1b 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -11878,7 +11878,8 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba)
lpfc_sli4_xri_exchange_busy_wait(phba);
/* per-phba callback de-registration for hotplug event */
- lpfc_cpuhp_remove(phba);
+ if (phba->pport)
+ lpfc_cpuhp_remove(phba);
/* Disable PCI subsystem interrupt */
lpfc_sli4_disable_intr(phba);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 319f241da4b6..fcf03f733e41 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3739,10 +3739,8 @@ static irqreturn_t megasas_isr_fusion(int irq, void *devp)
if (instance->mask_interrupts)
return IRQ_NONE;
-#if defined(ENABLE_IRQ_POLL)
if (irq_context->irq_poll_scheduled)
return IRQ_HANDLED;
-#endif
if (!instance->msix_vectors) {
mfiStatus = instance->instancet->clear_intr(instance);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 62e552838565..983e568ff231 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -3145,19 +3145,18 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
if (!ioc->is_warpdrive) {
ioc_err(ioc, "%s: BRM attribute is only for warpdrive\n",
__func__);
- goto out;
+ return 0;
}
/* pci_access_mutex lock acquired by sysfs show path */
mutex_lock(&ioc->pci_access_mutex);
- if (ioc->pci_error_recovery || ioc->remove_host) {
- mutex_unlock(&ioc->pci_access_mutex);
- return 0;
- }
+ if (ioc->pci_error_recovery || ioc->remove_host)
+ goto out;
/* allocate upto GPIOVal 36 entries */
sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36);
io_unit_pg3 = kzalloc(sz, GFP_KERNEL);
if (!io_unit_pg3) {
+ rc = -ENOMEM;
ioc_err(ioc, "%s: failed allocating memory for iounit_pg3: (%d) bytes\n",
__func__, sz);
goto out;
@@ -3167,6 +3166,7 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
0) {
ioc_err(ioc, "%s: failed reading iounit_pg3\n",
__func__);
+ rc = -EINVAL;
goto out;
}
@@ -3174,12 +3174,14 @@ BRM_status_show(struct device *cdev, struct device_attribute *attr,
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
ioc_err(ioc, "%s: iounit_pg3 failed with ioc_status(0x%04x)\n",
__func__, ioc_status);
+ rc = -EINVAL;
goto out;
}
if (io_unit_pg3->GPIOCount < 25) {
ioc_err(ioc, "%s: iounit_pg3->GPIOCount less than 25 entries, detected (%d) entries\n",
__func__, io_unit_pg3->GPIOCount);
+ rc = -EINVAL;
goto out;
}
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 5973eed94938..b0de3bdb01db 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -33,6 +33,7 @@ static struct scsi_host_template mvs_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.scan_finished = mvs_scan_finished,
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index a8f5344fdfda..9e99262a2b9d 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -87,6 +87,7 @@ static struct scsi_host_template pm8001_sht = {
.module = THIS_MODULE,
.name = DRV_NAME,
.queuecommand = sas_queuecommand,
+ .dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
.slave_configure = sas_slave_configure,
.scan_finished = pm8001_scan_finished,
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 42c3ad27f1cb..df670fba2ab8 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3496,7 +3496,9 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
qla2x00_clear_loop_id(fcport);
fcport->flags |= FCF_FABRIC_DEVICE;
} else if (fcport->d_id.b24 != rp->id.b24 ||
- fcport->scan_needed) {
+ (fcport->scan_needed &&
+ fcport->port_type != FCT_INITIATOR &&
+ fcport->port_type != FCT_NVME_INITIATOR)) {
qlt_schedule_sess_for_deletion(fcport);
}
fcport->d_id.b24 = rp->id.b24;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 4576d3ae9937..2436a17f5cd9 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -5944,7 +5944,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha)
break;
}
- if (NVME_TARGET(vha->hw, fcport)) {
+ if (found && NVME_TARGET(vha->hw, fcport)) {
if (fcport->disc_state == DSC_DELETE_PEND) {
qla2x00_set_fcport_disc_state(fcport, DSC_GNL);
vha->fcport_count--;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index d66d47a0f958..fa695a4007f8 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -139,11 +139,12 @@ static void qla_nvme_release_fcp_cmd_kref(struct kref *kref)
sp->priv = NULL;
if (priv->comp_status == QLA_SUCCESS) {
fd->rcv_rsplen = le16_to_cpu(nvme->u.nvme.rsp_pyld_len);
+ fd->status = NVME_SC_SUCCESS;
} else {
fd->rcv_rsplen = 0;
fd->transferred_length = 0;
+ fd->status = NVME_SC_INTERNAL;
}
- fd->status = 0;
spin_unlock_irqrestore(&priv->cmd_lock, flags);
fd->done(fd);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index eed31021e788..ba84244c1b4f 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -239,6 +239,7 @@ static struct {
{"LSI", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"ENGENIO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"LENOVO", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
+ {"FUJITSU", "Universal Xport", "*", BLIST_NO_ULD_ATTACH},
{"SanDisk", "Cruzer Blade", NULL, BLIST_TRY_VPD_PAGES |
BLIST_INQUIRY_36},
{"SMSC", "USB 2 HS-CF", NULL, BLIST_SPARSELUN | BLIST_INQUIRY_36},
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 42f0550d6b11..6f41e4b5a2b8 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -63,6 +63,7 @@ static const struct scsi_dh_blist scsi_dh_blist[] = {
{"LSI", "INF-01-00", "rdac", },
{"ENGENIO", "INF-01-00", "rdac", },
{"LENOVO", "DE_Series", "rdac", },
+ {"FUJITSU", "ETERNUS_AHB", "rdac", },
{NULL, NULL, NULL },
};
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index f4cc08eb47ba..7ae5024e7824 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -4760,7 +4760,7 @@ static __init int iscsi_transport_init(void)
iscsi_eh_timer_workq = alloc_workqueue("%s",
WQ_SYSFS | __WQ_LEGACY | WQ_MEM_RECLAIM | WQ_UNBOUND,
- 2, "iscsi_eh");
+ 1, "iscsi_eh");
if (!iscsi_eh_timer_workq) {
err = -ENOMEM;
goto release_nls;
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f8661062ef95..f3d5b1bbd5aa 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -339,7 +339,7 @@ store_spi_transport_##field(struct device *dev, \
struct spi_transport_attrs *tp \
= (struct spi_transport_attrs *)&starget->starget_data; \
\
- if (i->f->set_##field) \
+ if (!i->f->set_##field) \
return -EINVAL; \
val = simple_strtoul(buf, NULL, 0); \
if (val > tp->max_##field) \
diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c
index 53dd87628cbe..516a7f573942 100644
--- a/drivers/scsi/ufs/ufs_bsg.c
+++ b/drivers/scsi/ufs/ufs_bsg.c
@@ -106,8 +106,10 @@ static int ufs_bsg_request(struct bsg_job *job)
desc_op = bsg_request->upiu_req.qr.opcode;
ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff,
&desc_len, desc_op);
- if (ret)
+ if (ret) {
+ pm_runtime_put_sync(hba->dev);
goto out;
+ }
/* fall through */
case UPIU_TRANSACTION_NOP_OUT:
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 01fc0d20a70d..6f54bd832c8b 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -66,10 +66,12 @@ static const struct meson_gx_package_id {
{ "A113D", 0x25, 0x22, 0xff },
{ "S905D2", 0x28, 0x10, 0xf0 },
{ "S905X2", 0x28, 0x40, 0xf0 },
- { "S922X", 0x29, 0x40, 0xf0 },
{ "A311D", 0x29, 0x10, 0xf0 },
- { "S905X3", 0x2b, 0x5, 0xf },
- { "S905D3", 0x2b, 0xb0, 0xf0 },
+ { "S922X", 0x29, 0x40, 0xf0 },
+ { "S905D3", 0x2b, 0x4, 0xf5 },
+ { "S905X3", 0x2b, 0x5, 0xf5 },
+ { "S905X3", 0x2b, 0x10, 0x3f },
+ { "S905D3", 0x2b, 0x30, 0x3f },
{ "A113L", 0x2c, 0x0, 0xf8 },
};
diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c
index fec3d672b606..01bfea1cb64a 100644
--- a/drivers/soc/imx/soc-imx.c
+++ b/drivers/soc/imx/soc-imx.c
@@ -33,6 +33,9 @@ static int __init imx_soc_device_init(void)
u32 val;
int ret;
+ if (of_machine_is_compatible("fsl,ls1021a"))
+ return 0;
+
soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
if (!soc_dev_attr)
return -ENOMEM;
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 7b0759adb47d..cc57a384d74d 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -22,6 +22,8 @@
#define OCOTP_UID_LOW 0x410
#define OCOTP_UID_HIGH 0x420
+#define IMX8MP_OCOTP_UID_OFFSET 0x10
+
/* Same as ANADIG_DIGPROG_IMX7D */
#define ANADIG_DIGPROG_IMX8MM 0x800
@@ -87,6 +89,8 @@ static void __init imx8mm_soc_uid(void)
{
void __iomem *ocotp_base;
struct device_node *np;
+ u32 offset = of_machine_is_compatible("fsl,imx8mp") ?
+ IMX8MP_OCOTP_UID_OFFSET : 0;
np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-ocotp");
if (!np)
@@ -95,9 +99,9 @@ static void __init imx8mm_soc_uid(void)
ocotp_base = of_iomap(np, 0);
WARN_ON(!ocotp_base);
- soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH);
+ soc_uid = readl_relaxed(ocotp_base + OCOTP_UID_HIGH + offset);
soc_uid <<= 32;
- soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW);
+ soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW + offset);
iounmap(ocotp_base);
of_node_put(np);
@@ -146,7 +150,7 @@ static const struct imx8_soc_data imx8mp_soc_data = {
.soc_revision = imx8mm_soc_revision,
};
-static const struct of_device_id imx8_soc_match[] = {
+static __maybe_unused const struct of_device_id imx8_soc_match[] = {
{ .compatible = "fsl,imx8mq", .data = &imx8mq_soc_data, },
{ .compatible = "fsl,imx8mm", .data = &imx8mm_soc_data, },
{ .compatible = "fsl,imx8mn", .data = &imx8mn_soc_data, },
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
index 96c6f777519c..c9b3f9ebf0bb 100644
--- a/drivers/soc/ti/omap_prm.c
+++ b/drivers/soc/ti/omap_prm.c
@@ -256,10 +256,10 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
goto exit;
/* wait for the status to be set */
- ret = readl_relaxed_poll_timeout(reset->prm->base +
- reset->prm->data->rstst,
- v, v & BIT(st_bit), 1,
- OMAP_RESET_MAX_WAIT);
+ ret = readl_relaxed_poll_timeout_atomic(reset->prm->base +
+ reset->prm->data->rstst,
+ v, v & BIT(st_bit), 1,
+ OMAP_RESET_MAX_WAIT);
if (ret)
pr_err("%s: timedout waiting for %s:%lu\n", __func__,
reset->prm->data->name, id);
diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c
index 4cfdd074e310..c7422740edd4 100644
--- a/drivers/soundwire/intel.c
+++ b/drivers/soundwire/intel.c
@@ -930,8 +930,9 @@ static int intel_create_dai(struct sdw_cdns *cdns,
/* TODO: Read supported rates/formats from hardware */
for (i = off; i < (off + num); i++) {
- dais[i].name = kasprintf(GFP_KERNEL, "SDW%d Pin%d",
- cdns->instance, i);
+ dais[i].name = devm_kasprintf(cdns->dev, GFP_KERNEL,
+ "SDW%d Pin%d",
+ cdns->instance, i);
if (!dais[i].name)
return -ENOMEM;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index a35faced0456..91c6affe139c 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -588,14 +588,14 @@ static void dspi_release_dma(struct fsl_dspi *dspi)
return;
if (dma->chan_tx) {
- dma_unmap_single(dma->chan_tx->device->dev, dma->tx_dma_phys,
- dma_bufsize, DMA_TO_DEVICE);
+ dma_free_coherent(dma->chan_tx->device->dev, dma_bufsize,
+ dma->tx_dma_buf, dma->tx_dma_phys);
dma_release_channel(dma->chan_tx);
}
if (dma->chan_rx) {
- dma_unmap_single(dma->chan_rx->device->dev, dma->rx_dma_phys,
- dma_bufsize, DMA_FROM_DEVICE);
+ dma_free_coherent(dma->chan_rx->device->dev, dma_bufsize,
+ dma->rx_dma_buf, dma->rx_dma_phys);
dma_release_channel(dma->chan_rx);
}
}
@@ -1109,6 +1109,8 @@ static int dspi_suspend(struct device *dev)
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
+ if (dspi->irq)
+ disable_irq(dspi->irq);
spi_controller_suspend(ctlr);
clk_disable_unprepare(dspi->clk);
@@ -1129,6 +1131,8 @@ static int dspi_resume(struct device *dev)
if (ret)
return ret;
spi_controller_resume(ctlr);
+ if (dspi->irq)
+ enable_irq(dspi->irq);
return 0;
}
@@ -1385,22 +1389,22 @@ static int dspi_probe(struct platform_device *pdev)
goto poll_mode;
}
- ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt,
- IRQF_SHARED, pdev->name, dspi);
+ init_completion(&dspi->xfer_done);
+
+ ret = request_threaded_irq(dspi->irq, dspi_interrupt, NULL,
+ IRQF_SHARED, pdev->name, dspi);
if (ret < 0) {
dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
goto out_clk_put;
}
- init_completion(&dspi->xfer_done);
-
poll_mode:
if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) {
ret = dspi_request_dma(dspi, res->start);
if (ret < 0) {
dev_err(&pdev->dev, "can't get dma channels\n");
- goto out_clk_put;
+ goto out_free_irq;
}
}
@@ -1415,11 +1419,14 @@ poll_mode:
ret = spi_register_controller(ctlr);
if (ret != 0) {
dev_err(&pdev->dev, "Problem registering DSPI ctlr\n");
- goto out_clk_put;
+ goto out_free_irq;
}
return ret;
+out_free_irq:
+ if (dspi->irq)
+ free_irq(dspi->irq, dspi);
out_clk_put:
clk_disable_unprepare(dspi->clk);
out_ctlr_put:
@@ -1434,18 +1441,8 @@ static int dspi_remove(struct platform_device *pdev)
struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
/* Disconnect from the SPI framework */
- dspi_release_dma(dspi);
- clk_disable_unprepare(dspi->clk);
spi_unregister_controller(dspi->ctlr);
- return 0;
-}
-
-static void dspi_shutdown(struct platform_device *pdev)
-{
- struct spi_controller *ctlr = platform_get_drvdata(pdev);
- struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr);
-
/* Disable RX and TX */
regmap_update_bits(dspi->regmap, SPI_MCR,
SPI_MCR_DIS_TXF | SPI_MCR_DIS_RXF,
@@ -1455,8 +1452,16 @@ static void dspi_shutdown(struct platform_device *pdev)
regmap_update_bits(dspi->regmap, SPI_MCR, SPI_MCR_HALT, SPI_MCR_HALT);
dspi_release_dma(dspi);
+ if (dspi->irq)
+ free_irq(dspi->irq, dspi);
clk_disable_unprepare(dspi->clk);
- spi_unregister_controller(dspi->ctlr);
+
+ return 0;
+}
+
+static void dspi_shutdown(struct platform_device *pdev)
+{
+ dspi_remove(pdev);
}
static struct platform_driver fsl_dspi_driver = {
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 6783e12c40c2..a556795caeef 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -36,7 +36,6 @@
#define SPI_CFG0_SCK_LOW_OFFSET 8
#define SPI_CFG0_CS_HOLD_OFFSET 16
#define SPI_CFG0_CS_SETUP_OFFSET 24
-#define SPI_ADJUST_CFG0_SCK_LOW_OFFSET 16
#define SPI_ADJUST_CFG0_CS_HOLD_OFFSET 0
#define SPI_ADJUST_CFG0_CS_SETUP_OFFSET 16
@@ -48,6 +47,8 @@
#define SPI_CFG1_CS_IDLE_MASK 0xff
#define SPI_CFG1_PACKET_LOOP_MASK 0xff00
#define SPI_CFG1_PACKET_LENGTH_MASK 0x3ff0000
+#define SPI_CFG2_SCK_HIGH_OFFSET 0
+#define SPI_CFG2_SCK_LOW_OFFSET 16
#define SPI_CMD_ACT BIT(0)
#define SPI_CMD_RESUME BIT(1)
@@ -283,7 +284,7 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
static void mtk_spi_prepare_transfer(struct spi_master *master,
struct spi_transfer *xfer)
{
- u32 spi_clk_hz, div, sck_time, cs_time, reg_val = 0;
+ u32 spi_clk_hz, div, sck_time, cs_time, reg_val;
struct mtk_spi *mdata = spi_master_get_devdata(master);
spi_clk_hz = clk_get_rate(mdata->spi_clk);
@@ -296,18 +297,18 @@ static void mtk_spi_prepare_transfer(struct spi_master *master,
cs_time = sck_time * 2;
if (mdata->dev_comp->enhance_timing) {
+ reg_val = (((sck_time - 1) & 0xffff)
+ << SPI_CFG2_SCK_HIGH_OFFSET);
reg_val |= (((sck_time - 1) & 0xffff)
- << SPI_CFG0_SCK_HIGH_OFFSET);
- reg_val |= (((sck_time - 1) & 0xffff)
- << SPI_ADJUST_CFG0_SCK_LOW_OFFSET);
+ << SPI_CFG2_SCK_LOW_OFFSET);
writel(reg_val, mdata->base + SPI_CFG2_REG);
- reg_val |= (((cs_time - 1) & 0xffff)
+ reg_val = (((cs_time - 1) & 0xffff)
<< SPI_ADJUST_CFG0_CS_HOLD_OFFSET);
reg_val |= (((cs_time - 1) & 0xffff)
<< SPI_ADJUST_CFG0_CS_SETUP_OFFSET);
writel(reg_val, mdata->base + SPI_CFG0_REG);
} else {
- reg_val |= (((sck_time - 1) & 0xff)
+ reg_val = (((sck_time - 1) & 0xff)
<< SPI_CFG0_SCK_HIGH_OFFSET);
reg_val |= (((sck_time - 1) & 0xff) << SPI_CFG0_SCK_LOW_OFFSET);
reg_val |= (((cs_time - 1) & 0xff) << SPI_CFG0_CS_HOLD_OFFSET);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 6721910e5f2a..0040362b7162 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1485,6 +1485,11 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ PCI_VDEVICE(INTEL, 0x4daa), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x4dab), LPSS_CNL_SSP },
{ PCI_VDEVICE(INTEL, 0x4dfb), LPSS_CNL_SSP },
+ /* TGL-H */
+ { PCI_VDEVICE(INTEL, 0x43aa), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x43ab), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x43fb), LPSS_CNL_SSP },
+ { PCI_VDEVICE(INTEL, 0x43fd), LPSS_CNL_SSP },
/* APL */
{ PCI_VDEVICE(INTEL, 0x5ac2), LPSS_BXT_SSP },
{ PCI_VDEVICE(INTEL, 0x5ac4), LPSS_BXT_SSP },
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 06192c9ea813..cbc2387d450c 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -179,7 +179,7 @@
struct rspi_data {
void __iomem *addr;
- u32 max_speed_hz;
+ u32 speed_hz;
struct spi_controller *ctlr;
struct platform_device *pdev;
wait_queue_head_t wait;
@@ -258,8 +258,7 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
- spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
- 2 * rspi->max_speed_hz) - 1;
+ spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz) - 1;
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
/* Disable dummy transmission, set 16-bit word access, 1 frame */
@@ -299,14 +298,14 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
clksrc = clk_get_rate(rspi->clk);
while (div < 3) {
- if (rspi->max_speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
+ if (rspi->speed_hz >= clksrc/4) /* 4=(CLK/2)/2 */
break;
div++;
clksrc /= 2;
}
/* Sets transfer bit rate */
- spbr = DIV_ROUND_UP(clksrc, 2 * rspi->max_speed_hz) - 1;
+ spbr = DIV_ROUND_UP(clksrc, 2 * rspi->speed_hz) - 1;
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
rspi->spcmd |= div << 2;
@@ -341,7 +340,7 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
- spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz);
+ spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->speed_hz);
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
/* Disable dummy transmission, set byte access */
@@ -949,9 +948,24 @@ static int rspi_prepare_message(struct spi_controller *ctlr,
{
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
struct spi_device *spi = msg->spi;
+ const struct spi_transfer *xfer;
int ret;
- rspi->max_speed_hz = spi->max_speed_hz;
+ /*
+ * As the Bit Rate Register must not be changed while the device is
+ * active, all transfers in a message must use the same bit rate.
+ * In theory, the sequencer could be enabled, and each Command Register
+ * could divide the base bit rate by a different value.
+ * However, most RSPI variants do not have Transfer Data Length
+ * Multiplier Setting Registers, so each sequence step would be limited
+ * to a single word, making this feature unsuitable for large
+ * transfers, which would gain most from it.
+ */
+ rspi->speed_hz = spi->max_speed_hz;
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (xfer->speed_hz < rspi->speed_hz)
+ rspi->speed_hz = xfer->speed_hz;
+ }
rspi->spcmd = SPCMD_SSLKP;
if (spi->mode & SPI_CPOL)
diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c
index 88e6543648cb..bd23c4689b46 100644
--- a/drivers/spi/spi-sprd-adi.c
+++ b/drivers/spi/spi-sprd-adi.c
@@ -389,9 +389,9 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_CTRL, val);
/* Load the watchdog timeout value, 50ms is always enough. */
+ sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_LOW,
WDG_LOAD_VAL & WDG_LOAD_MASK);
- sprd_adi_write(sadi, sadi->slave_pbase + REG_WDG_LOAD_HIGH, 0);
/* Start the watchdog to reset system */
sprd_adi_read(sadi, sadi->slave_pbase + REG_WDG_CTRL, &val);
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 3c44bb2fd9b1..a900962b4336 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -553,20 +553,6 @@ static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
.exec_op = stm32_qspi_exec_op,
};
-static void stm32_qspi_release(struct stm32_qspi *qspi)
-{
- pm_runtime_get_sync(qspi->dev);
- /* disable qspi */
- writel_relaxed(0, qspi->io_base + QSPI_CR);
- stm32_qspi_dma_free(qspi);
- mutex_destroy(&qspi->lock);
- pm_runtime_put_noidle(qspi->dev);
- pm_runtime_disable(qspi->dev);
- pm_runtime_set_suspended(qspi->dev);
- pm_runtime_dont_use_autosuspend(qspi->dev);
- clk_disable_unprepare(qspi->clk);
-}
-
static int stm32_qspi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -642,7 +628,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
if (IS_ERR(rstc)) {
ret = PTR_ERR(rstc);
if (ret == -EPROBE_DEFER)
- goto err_qspi_release;
+ goto err_clk_disable;
} else {
reset_control_assert(rstc);
udelay(2);
@@ -653,7 +639,7 @@ static int stm32_qspi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qspi);
ret = stm32_qspi_dma_setup(qspi);
if (ret)
- goto err_qspi_release;
+ goto err_dma_free;
mutex_init(&qspi->lock);
@@ -673,15 +659,26 @@ static int stm32_qspi_probe(struct platform_device *pdev)
ret = devm_spi_register_master(dev, ctrl);
if (ret)
- goto err_qspi_release;
+ goto err_pm_runtime_free;
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
return 0;
-err_qspi_release:
- stm32_qspi_release(qspi);
+err_pm_runtime_free:
+ pm_runtime_get_sync(qspi->dev);
+ /* disable qspi */
+ writel_relaxed(0, qspi->io_base + QSPI_CR);
+ mutex_destroy(&qspi->lock);
+ pm_runtime_put_noidle(qspi->dev);
+ pm_runtime_disable(qspi->dev);
+ pm_runtime_set_suspended(qspi->dev);
+ pm_runtime_dont_use_autosuspend(qspi->dev);
+err_dma_free:
+ stm32_qspi_dma_free(qspi);
+err_clk_disable:
+ clk_disable_unprepare(qspi->clk);
err_master_put:
spi_master_put(qspi->ctrl);
@@ -692,7 +689,16 @@ static int stm32_qspi_remove(struct platform_device *pdev)
{
struct stm32_qspi *qspi = platform_get_drvdata(pdev);
- stm32_qspi_release(qspi);
+ pm_runtime_get_sync(qspi->dev);
+ /* disable qspi */
+ writel_relaxed(0, qspi->io_base + QSPI_CR);
+ stm32_qspi_dma_free(qspi);
+ mutex_destroy(&qspi->lock);
+ pm_runtime_put_noidle(qspi->dev);
+ pm_runtime_disable(qspi->dev);
+ pm_runtime_set_suspended(qspi->dev);
+ pm_runtime_dont_use_autosuspend(qspi->dev);
+ clk_disable_unprepare(qspi->clk);
return 0;
}
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
index ecea15534c42..fa11cc0e809b 100644
--- a/drivers/spi/spi-sun6i.c
+++ b/drivers/spi/spi-sun6i.c
@@ -198,7 +198,7 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
struct spi_transfer *tfr)
{
struct sun6i_spi *sspi = spi_master_get_devdata(master);
- unsigned int mclk_rate, div, timeout;
+ unsigned int mclk_rate, div, div_cdr1, div_cdr2, timeout;
unsigned int start, end, tx_time;
unsigned int trig_level;
unsigned int tx_len = 0;
@@ -287,14 +287,12 @@ static int sun6i_spi_transfer_one(struct spi_master *master,
* First try CDR2, and if we can't reach the expected
* frequency, fall back to CDR1.
*/
- div = mclk_rate / (2 * tfr->speed_hz);
- if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
- if (div > 0)
- div--;
-
- reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS;
+ div_cdr1 = DIV_ROUND_UP(mclk_rate, tfr->speed_hz);
+ div_cdr2 = DIV_ROUND_UP(div_cdr1, 2);
+ if (div_cdr2 <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
+ reg = SUN6I_CLK_CTL_CDR2(div_cdr2 - 1) | SUN6I_CLK_CTL_DRS;
} else {
- div = ilog2(mclk_rate) - ilog2(tfr->speed_hz);
+ div = min(SUN6I_CLK_CTL_CDR1_MASK, order_base_2(div_cdr1));
reg = SUN6I_CLK_CTL_CDR1(div);
}
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d753df700e9e..59e07675ef86 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -609,15 +609,20 @@ err_find_dev:
static int spidev_release(struct inode *inode, struct file *filp)
{
struct spidev_data *spidev;
+ int dofree;
mutex_lock(&device_list_lock);
spidev = filp->private_data;
filp->private_data = NULL;
+ spin_lock_irq(&spidev->spi_lock);
+ /* ... after we unbound from the underlying device? */
+ dofree = (spidev->spi == NULL);
+ spin_unlock_irq(&spidev->spi_lock);
+
/* last close? */
spidev->users--;
if (!spidev->users) {
- int dofree;
kfree(spidev->tx_buffer);
spidev->tx_buffer = NULL;
@@ -625,19 +630,14 @@ static int spidev_release(struct inode *inode, struct file *filp)
kfree(spidev->rx_buffer);
spidev->rx_buffer = NULL;
- spin_lock_irq(&spidev->spi_lock);
- if (spidev->spi)
- spidev->speed_hz = spidev->spi->max_speed_hz;
-
- /* ... after we unbound from the underlying device? */
- dofree = (spidev->spi == NULL);
- spin_unlock_irq(&spidev->spi_lock);
-
if (dofree)
kfree(spidev);
+ else
+ spidev->speed_hz = spidev->spi->max_speed_hz;
}
#ifdef CONFIG_SPI_SLAVE
- spi_slave_abort(spidev->spi);
+ if (!dofree)
+ spi_slave_abort(spidev->spi);
#endif
mutex_unlock(&device_list_lock);
@@ -787,13 +787,13 @@ static int spidev_remove(struct spi_device *spi)
{
struct spidev_data *spidev = spi_get_drvdata(spi);
+ /* prevent new opens */
+ mutex_lock(&device_list_lock);
/* make sure ops on existing fds can abort cleanly */
spin_lock_irq(&spidev->spi_lock);
spidev->spi = NULL;
spin_unlock_irq(&spidev->spi_lock);
- /* prevent new opens */
- mutex_lock(&device_list_lock);
list_del(&spidev->device_entry);
device_destroy(spidev_class, spidev->devt);
clear_bit(MINOR(spidev->devt), minors);
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index 45ad4ba92f94..689acd69a1b9 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -456,9 +456,9 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev,
unsigned int lo_mask = data[5] << shift;
unsigned int chan_mask = hi_mask | lo_mask;
unsigned int old_mask = (1 << shift) - 1;
- unsigned int pm = devpriv->pm[trig] & old_mask;
- unsigned int pt = devpriv->pt[trig] & old_mask;
- unsigned int pp = devpriv->pp[trig] & old_mask;
+ unsigned int pm;
+ unsigned int pt;
+ unsigned int pp;
if (trig > 1) {
dev_dbg(dev->class_dev,
@@ -471,6 +471,10 @@ static int apci1500_di_cfg_trig(struct comedi_device *dev,
return -EINVAL;
}
+ pm = devpriv->pm[trig] & old_mask;
+ pt = devpriv->pt[trig] & old_mask;
+ pp = devpriv->pp[trig] & old_mask;
+
switch (data[2]) {
case COMEDI_DIGITAL_TRIG_DISABLE:
/* clear trigger configuration */
diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig
index fea06cb0eb48..37577bb72998 100644
--- a/drivers/staging/media/atomisp/Kconfig
+++ b/drivers/staging/media/atomisp/Kconfig
@@ -22,7 +22,7 @@ config VIDEO_ATOMISP
module will be called atomisp
config VIDEO_ATOMISP_ISP2401
- bool "VIDEO_ATOMISP_ISP2401"
+ bool "Use Intel Atom ISP on Cherrytail/Anniedale (ISP2401)"
depends on VIDEO_ATOMISP
help
Enable support for Atom ISP2401-based boards.
diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile
index 9dc8072799e3..205d0f8cc2e1 100644
--- a/drivers/staging/media/atomisp/Makefile
+++ b/drivers/staging/media/atomisp/Makefile
@@ -156,6 +156,7 @@ atomisp-objs += \
pci/hive_isp_css_common/host/timed_ctrl.o \
pci/hive_isp_css_common/host/vmem.o \
pci/hive_isp_css_shared/host/tag.o \
+ pci/system_local.o \
obj-byt = \
pci/css_2400_system/hive/ia_css_isp_configs.o \
@@ -182,7 +183,6 @@ INCLUDES += \
-I$(atomisp)/include/hmm/ \
-I$(atomisp)/include/mmu/ \
-I$(atomisp)/pci/ \
- -I$(atomisp)/pci/hrt/ \
-I$(atomisp)/pci/base/circbuf/interface/ \
-I$(atomisp)/pci/base/refcount/interface/ \
-I$(atomisp)/pci/camera/pipe/interface/ \
@@ -192,7 +192,6 @@ INCLUDES += \
-I$(atomisp)/pci/hive_isp_css_include/ \
-I$(atomisp)/pci/hive_isp_css_include/device_access/ \
-I$(atomisp)/pci/hive_isp_css_include/host/ \
- -I$(atomisp)/pci/hive_isp_css_include/memory_access/ \
-I$(atomisp)/pci/hive_isp_css_shared/ \
-I$(atomisp)/pci/hive_isp_css_shared/host/ \
-I$(atomisp)/pci/isp/kernels/ \
@@ -311,9 +310,7 @@ INCLUDES += \
-I$(atomisp)/pci/runtime/tagger/interface/
INCLUDES_byt += \
- -I$(atomisp)/pci/css_2400_system/ \
-I$(atomisp)/pci/css_2400_system/hive/ \
- -I$(atomisp)/pci/css_2400_system/hrt/ \
INCLUDES_cht += \
-I$(atomisp)/pci/css_2401_system/ \
@@ -321,7 +318,6 @@ INCLUDES_cht += \
-I$(atomisp)/pci/css_2401_system/hive/ \
-I$(atomisp)/pci/css_2401_system/hrt/ \
-# -I$(atomisp)/pci/css_2401_system/hrt/ \
# -I$(atomisp)/pci/css_2401_system/hive_isp_css_2401_system_generated/ \
DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
index 90d125ba080f..c90730513438 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c
@@ -495,11 +495,11 @@ static int ov2680_h_flip(struct v4l2_subdev *sd, s32 value)
ret = ov2680_read_reg(client, 1, OV2680_MIRROR_REG, &val);
if (ret)
return ret;
- if (value) {
+ if (value)
val |= OV2680_FLIP_MIRROR_BIT_ENABLE;
- } else {
+ else
val &= ~OV2680_FLIP_MIRROR_BIT_ENABLE;
- }
+
ret = ov2680_write_reg(client, 1,
OV2680_MIRROR_REG, val);
if (ret)
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
index 97ab10bc45ca..e698b63d6cb7 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
+++ b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
@@ -1899,7 +1899,7 @@ static int ov5693_probe(struct i2c_client *client)
{
struct ov5693_device *dev;
int i2c;
- int ret = 0;
+ int ret;
void *pdata;
unsigned int i;
@@ -1929,8 +1929,10 @@ static int ov5693_probe(struct i2c_client *client)
pdata = gmin_camera_platform_data(&dev->sd,
ATOMISP_INPUT_FORMAT_RAW_10,
atomisp_bayer_order_bggr);
- if (!pdata)
+ if (!pdata) {
+ ret = -EINVAL;
goto out_free;
+ }
ret = ov5693_s_config(&dev->sd, client->irq, pdata);
if (ret)
diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
index 873344a02ccf..5a5121d958ed 100644
--- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
+++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h
@@ -250,6 +250,7 @@ const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void);
#define IS_MFLD __IS_SOC(INTEL_FAM6_ATOM_SALTWELL_MID)
#define IS_BYT __IS_SOC(INTEL_FAM6_ATOM_SILVERMONT)
#define IS_CHT __IS_SOC(INTEL_FAM6_ATOM_AIRMONT)
+#define IS_MRFD __IS_SOC(INTEL_FAM6_ATOM_SILVERMONT_MID)
#define IS_MOFD __IS_SOC(INTEL_FAM6_ATOM_AIRMONT_MID)
/* Both CHT and MOFD come with ISP2401 */
diff --git a/drivers/staging/media/atomisp/pci/atomisp-regs.h b/drivers/staging/media/atomisp/pci/atomisp-regs.h
index de34ee28e390..022997f47121 100644
--- a/drivers/staging/media/atomisp/pci/atomisp-regs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp-regs.h
@@ -20,9 +20,6 @@
#define ATOMISP_REGS_H
/* common register definitions */
-#define PUNIT_PORT 0x04
-#define CCK_PORT 0x14
-
#define PCICMDSTS 0x01
#define INTR 0x0f
#define MSI_CAPID 0x24
diff --git a/drivers/staging/media/atomisp/pci/atomisp_acc.c b/drivers/staging/media/atomisp/pci/atomisp_acc.c
index 76861396ba86..f638d0bd09fe 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_acc.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_acc.c
@@ -355,11 +355,11 @@ int atomisp_acc_map(struct atomisp_sub_device *asd, struct atomisp_acc_map *map)
pgnr = DIV_ROUND_UP(map->length, PAGE_SIZE);
if (pgnr < ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) {
- dev_err(atomisp_dev,
+ dev_err(asd->isp->dev,
"user space memory size is less than the expected size..\n");
return -ENOMEM;
} else if (pgnr > ((PAGE_ALIGN(map->length)) >> PAGE_SHIFT)) {
- dev_err(atomisp_dev,
+ dev_err(asd->isp->dev,
"user space memory size is large than the expected size..\n");
return -ENOMEM;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
index 7b936e5a5f03..8ea65bef35d2 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c
@@ -21,6 +21,7 @@
#include <linux/firmware.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/kfifo.h>
#include <linux/pm_runtime.h>
@@ -109,7 +110,7 @@ struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev)
static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd)
{
- struct v4l2_subdev_frame_interval fi;
+ struct v4l2_subdev_frame_interval fi = { 0 };
struct atomisp_device *isp = asd->isp;
unsigned short fps = 0;
@@ -206,6 +207,7 @@ int atomisp_freq_scaling(struct atomisp_device *isp,
enum atomisp_dfs_mode mode,
bool force)
{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
/* FIXME! Only use subdev[0] status yet */
struct atomisp_sub_device *asd = &isp->asd[0];
const struct atomisp_dfs_config *dfs;
@@ -219,7 +221,7 @@ int atomisp_freq_scaling(struct atomisp_device *isp,
return -EINVAL;
}
- if ((isp->pdev->device & ATOMISP_PCI_DEVICE_SOC_MASK) ==
+ if ((pdev->device & ATOMISP_PCI_DEVICE_SOC_MASK) ==
ATOMISP_PCI_DEVICE_SOC_CHT && ATOMISP_USE_YUVPP(asd))
isp->dfs = &dfs_config_cht_soc;
@@ -357,39 +359,41 @@ static void clear_isp_irq(enum hrt_isp_css_irq irq)
irq_clear_all(IRQ0_ID);
}
-void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev)
+void atomisp_msi_irq_init(struct atomisp_device *isp)
{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
u32 msg32;
u16 msg16;
- pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32);
+ pci_read_config_dword(pdev, PCI_MSI_CAPID, &msg32);
msg32 |= 1 << MSI_ENABLE_BIT;
- pci_write_config_dword(dev, PCI_MSI_CAPID, msg32);
+ pci_write_config_dword(pdev, PCI_MSI_CAPID, msg32);
msg32 = (1 << INTR_IER) | (1 << INTR_IIR);
- pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg32);
- pci_read_config_word(dev, PCI_COMMAND, &msg16);
+ pci_read_config_word(pdev, PCI_COMMAND, &msg16);
msg16 |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
PCI_COMMAND_INTX_DISABLE);
- pci_write_config_word(dev, PCI_COMMAND, msg16);
+ pci_write_config_word(pdev, PCI_COMMAND, msg16);
}
-void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev)
+void atomisp_msi_irq_uninit(struct atomisp_device *isp)
{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
u32 msg32;
u16 msg16;
- pci_read_config_dword(dev, PCI_MSI_CAPID, &msg32);
+ pci_read_config_dword(pdev, PCI_MSI_CAPID, &msg32);
msg32 &= ~(1 << MSI_ENABLE_BIT);
- pci_write_config_dword(dev, PCI_MSI_CAPID, msg32);
+ pci_write_config_dword(pdev, PCI_MSI_CAPID, msg32);
msg32 = 0x0;
- pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, msg32);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg32);
- pci_read_config_word(dev, PCI_COMMAND, &msg16);
+ pci_read_config_word(pdev, PCI_COMMAND, &msg16);
msg16 &= ~(PCI_COMMAND_MASTER);
- pci_write_config_word(dev, PCI_COMMAND, msg16);
+ pci_write_config_word(pdev, PCI_COMMAND, msg16);
}
static void atomisp_sof_event(struct atomisp_sub_device *asd)
@@ -480,11 +484,12 @@ static void print_csi_rx_errors(enum mipi_port_id port,
/* Clear irq reg */
static void clear_irq_reg(struct atomisp_device *isp)
{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
u32 msg_ret;
- pci_read_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, &msg_ret);
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &msg_ret);
msg_ret |= 1 << INTR_IIR;
- pci_write_config_dword(isp->pdev, PCI_INTERRUPT_CTRL, msg_ret);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, msg_ret);
}
static struct atomisp_sub_device *
@@ -665,11 +670,10 @@ bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe)
void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
unsigned int size)
{
- u32 __iomem *io_virt_addr;
unsigned int data = 0;
unsigned int size32 = DIV_ROUND_UP(size, sizeof(u32));
- dev_dbg(isp->dev, "atomisp_io_base:%p\n", atomisp_io_base);
+ dev_dbg(isp->dev, "atomisp mmio base: %p\n", isp->base);
dev_dbg(isp->dev, "%s, addr:0x%x, size: %d, size32: %d\n", __func__,
addr, size, size32);
if (size32 * 4 + addr > 0x4000) {
@@ -678,13 +682,12 @@ void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr,
return;
}
addr += SP_DMEM_BASE;
- io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
+ addr &= 0x003FFFFF;
do {
- data = *io_virt_addr;
+ data = readl(isp->base + addr);
dev_dbg(isp->dev, "%s, \t [0x%x]:0x%x\n", __func__, addr, data);
- io_virt_addr += sizeof(u32);
- size32 -= 1;
- } while (size32 > 0);
+ addr += sizeof(u32);
+ } while (--size32);
}
static struct videobuf_buffer *atomisp_css_frame_to_vbuf(
@@ -1289,6 +1292,7 @@ void atomisp_delayed_init_work(struct work_struct *work)
static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
enum ia_css_pipe_id css_pipe_id;
bool stream_restart[MAX_STREAM_NUM] = {0};
bool depth_mode = false;
@@ -1372,8 +1376,8 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout)
clear_isp_irq(hrt_isp_css_irq_sp);
/* Set the SRSE to 3 before resetting */
- pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control |
- MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
+ pci_write_config_dword(pdev, PCI_I_CONTROL,
+ isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
/* reset ISP and restore its state */
isp->isp_timeout = true;
@@ -6158,6 +6162,7 @@ out:
/*Turn off ISP dphy */
int atomisp_ospm_dphy_down(struct atomisp_device *isp)
{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
unsigned long flags;
u32 reg;
@@ -6179,9 +6184,9 @@ done:
* MRFLD HW design need all CSI ports are disabled before
* powering down the IUNIT.
*/
- pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, &reg);
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, &reg);
reg |= MRFLD_ALL_CSI_PORTS_OFF_MASK;
- pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, reg);
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, reg);
return 0;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
index 0bde995f1a8d..1c0d464c2ac1 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_cmd.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.h
@@ -68,8 +68,8 @@ bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe);
/*
* Interrupt functions
*/
-void atomisp_msi_irq_init(struct atomisp_device *isp, struct pci_dev *dev);
-void atomisp_msi_irq_uninit(struct atomisp_device *isp, struct pci_dev *dev);
+void atomisp_msi_irq_init(struct atomisp_device *isp);
+void atomisp_msi_irq_uninit(struct atomisp_device *isp);
void atomisp_wdt_work(struct work_struct *work);
void atomisp_wdt(struct timer_list *t);
void atomisp_setup_flash(struct atomisp_sub_device *asd);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp_compat.h
index b2ed83c2f337..6a2a81a3eb23 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat.h
@@ -29,8 +29,6 @@ struct atomisp_sub_device;
struct video_device;
enum atomisp_input_stream_id;
-extern void __iomem *atomisp_io_base;
-
struct atomisp_metadata_buf {
struct ia_css_metadata *metadata;
void *md_vptr;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
index c1e282a974d0..cccc5bfa1057 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c
@@ -33,13 +33,12 @@
#include "atomisp_ioctl.h"
#include "atomisp_acc.h"
-#include <asm/intel-mid.h>
-
#include "ia_css_debug.h"
#include "ia_css_isp_param.h"
#include "sh_css_hrt.h"
#include "ia_css_isys.h"
+#include <linux/io.h>
#include <linux/pm_runtime.h>
/* Assume max number of ACC stages */
@@ -69,92 +68,94 @@ struct bayer_ds_factor {
static void atomisp_css2_hw_store_8(hrt_address addr, uint8_t data)
{
- s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
spin_lock_irqsave(&mmio_lock, flags);
- *io_virt_addr = data;
+ writeb(data, isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
}
static void atomisp_css2_hw_store_16(hrt_address addr, uint16_t data)
{
- s16 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
spin_lock_irqsave(&mmio_lock, flags);
- *io_virt_addr = data;
+ writew(data, isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
}
void atomisp_css2_hw_store_32(hrt_address addr, uint32_t data)
{
- s32 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
spin_lock_irqsave(&mmio_lock, flags);
- *io_virt_addr = data;
+ writel(data, isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
}
static uint8_t atomisp_css2_hw_load_8(hrt_address addr)
{
- s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
u8 ret;
spin_lock_irqsave(&mmio_lock, flags);
- ret = *io_virt_addr;
+ ret = readb(isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
return ret;
}
static uint16_t atomisp_css2_hw_load_16(hrt_address addr)
{
- s16 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
u16 ret;
spin_lock_irqsave(&mmio_lock, flags);
- ret = *io_virt_addr;
+ ret = readw(isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
return ret;
}
static uint32_t atomisp_css2_hw_load_32(hrt_address addr)
{
- s32 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
u32 ret;
spin_lock_irqsave(&mmio_lock, flags);
- ret = *io_virt_addr;
+ ret = readl(isp->base + (addr & 0x003FFFFF));
spin_unlock_irqrestore(&mmio_lock, flags);
return ret;
}
-static void atomisp_css2_hw_store(hrt_address addr,
- const void *from, uint32_t n)
+static void atomisp_css2_hw_store(hrt_address addr, const void *from, uint32_t n)
{
- s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
unsigned int i;
+ addr &= 0x003FFFFF;
spin_lock_irqsave(&mmio_lock, flags);
- for (i = 0; i < n; i++, io_virt_addr++, from++)
- *io_virt_addr = *(s8 *)from;
+ for (i = 0; i < n; i++, from++)
+ writeb(*(s8 *)from, isp->base + addr + i);
+
spin_unlock_irqrestore(&mmio_lock, flags);
}
static void atomisp_css2_hw_load(hrt_address addr, void *to, uint32_t n)
{
- s8 __iomem *io_virt_addr = atomisp_io_base + (addr & 0x003FFFFF);
+ struct atomisp_device *isp = dev_get_drvdata(atomisp_dev);
unsigned long flags;
unsigned int i;
+ addr &= 0x003FFFFF;
spin_lock_irqsave(&mmio_lock, flags);
- for (i = 0; i < n; i++, to++, io_virt_addr++)
- *(s8 *)to = *io_virt_addr;
+ for (i = 0; i < n; i++, to++)
+ *(s8 *)to = readb(isp->base + addr + i);
spin_unlock_irqrestore(&mmio_lock, flags);
}
@@ -181,10 +182,10 @@ void atomisp_load_uint32(hrt_address addr, uint32_t *data)
*data = atomisp_css2_hw_load_32(addr);
}
-static int hmm_get_mmu_base_addr(unsigned int *mmu_base_addr)
+static int hmm_get_mmu_base_addr(struct device *dev, unsigned int *mmu_base_addr)
{
if (!sh_mmu_mrfld.get_pd_base) {
- dev_err(atomisp_dev, "get mmu base address failed.\n");
+ dev_err(dev, "get mmu base address failed.\n");
return -EINVAL;
}
@@ -839,7 +840,7 @@ int atomisp_css_init(struct atomisp_device *isp)
int ret;
int err;
- ret = hmm_get_mmu_base_addr(&mmu_base_addr);
+ ret = hmm_get_mmu_base_addr(isp->dev, &mmu_base_addr);
if (ret)
return ret;
@@ -941,7 +942,7 @@ int atomisp_css_resume(struct atomisp_device *isp)
unsigned int mmu_base_addr;
int ret;
- ret = hmm_get_mmu_base_addr(&mmu_base_addr);
+ ret = hmm_get_mmu_base_addr(isp->dev, &mmu_base_addr);
if (ret) {
dev_err(isp->dev, "get base address error.\n");
return -EINVAL;
@@ -1966,8 +1967,7 @@ void atomisp_css_input_set_mode(struct atomisp_sub_device *asd,
true,
0x13000,
&size_mem_words) != 0) {
- if (intel_mid_identify_cpu() ==
- INTEL_MID_CPU_CHIP_TANGIER)
+ if (IS_MRFD)
size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_2;
else
size_mem_words = CSS_MIPI_FRAME_BUFFER_SIZE_1;
@@ -2414,13 +2414,13 @@ static void __configure_preview_pp_input(struct atomisp_sub_device *asd,
struct ia_css_resolution *effective_res =
&stream_config->input_config.effective_res;
- const struct bayer_ds_factor bds_fct[] = {{2, 1}, {3, 2}, {5, 4} };
+ static const struct bayer_ds_factor bds_fct[] = {{2, 1}, {3, 2}, {5, 4} };
/*
* BZ201033: YUV decimation factor of 4 causes couple of rightmost
* columns to be shaded. Remove this factor to work around the CSS bug.
* const unsigned int yuv_dec_fct[] = {4, 2};
*/
- const unsigned int yuv_dec_fct[] = { 2 };
+ static const unsigned int yuv_dec_fct[] = { 2 };
unsigned int i;
if (width == 0 && height == 0)
@@ -2540,7 +2540,7 @@ static void __configure_video_pp_input(struct atomisp_sub_device *asd,
struct ia_css_resolution *effective_res =
&stream_config->input_config.effective_res;
- const struct bayer_ds_factor bds_factors[] = {
+ static const struct bayer_ds_factor bds_factors[] = {
{8, 1}, {6, 1}, {4, 1}, {3, 1}, {2, 1}, {3, 2}
};
unsigned int i;
@@ -4337,7 +4337,7 @@ static const char * const fw_acc_type_name[] = {
[IA_CSS_ACC_STANDALONE] = "Stand-alone acceleration",
};
-int atomisp_css_dump_blob_infor(void)
+int atomisp_css_dump_blob_infor(struct atomisp_device *isp)
{
struct ia_css_blob_descr *bd = sh_css_blob_info;
unsigned int i, nm = sh_css_num_binaries;
@@ -4354,8 +4354,7 @@ int atomisp_css_dump_blob_infor(void)
for (i = 0; i < sh_css_num_binaries - NUM_OF_SPS; i++) {
switch (bd[i].header.type) {
case ia_css_isp_firmware:
- dev_dbg(atomisp_dev,
- "Num%2d type %s (%s), binary id is %2d, name is %s\n",
+ dev_dbg(isp->dev, "Num%2d type %s (%s), binary id is %2d, name is %s\n",
i + NUM_OF_SPS,
fw_type_name[bd[i].header.type],
fw_acc_type_name[bd[i].header.info.isp.type],
@@ -4363,8 +4362,7 @@ int atomisp_css_dump_blob_infor(void)
bd[i].name);
break;
default:
- dev_dbg(atomisp_dev,
- "Num%2d type %s, name is %s\n",
+ dev_dbg(isp->dev, "Num%2d type %s, name is %s\n",
i + NUM_OF_SPS, fw_type_name[bd[i].header.type],
bd[i].name);
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h
index 8376aec18e3e..e0601534380f 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.h
@@ -153,7 +153,7 @@ int atomisp_css_debug_dump_isp_binary(void);
int atomisp_css_dump_sp_raw_copy_linecount(bool reduced);
-int atomisp_css_dump_blob_infor(void);
+int atomisp_css_dump_blob_infor(struct atomisp_device *isp);
void atomisp_css_set_isp_config_id(struct atomisp_sub_device *asd,
uint32_t isp_config_id);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_drvfs.c b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
index fe0e2bfde27f..f670faf978e6 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_drvfs.c
@@ -62,9 +62,9 @@ static inline int iunit_dump_dbgopt(struct atomisp_device *isp,
if (opt & OPTION_VALID) {
if (opt & OPTION_BIN_LIST) {
- ret = atomisp_css_dump_blob_infor();
+ ret = atomisp_css_dump_blob_infor(isp);
if (ret) {
- dev_err(atomisp_dev, "%s dump blob infor err[ret:%d]\n",
+ dev_err(isp->dev, "%s dump blob infor err[ret:%d]\n",
__func__, ret);
goto opt_err;
}
@@ -76,7 +76,7 @@ static inline int iunit_dump_dbgopt(struct atomisp_device *isp,
atomisp_css_debug_dump_isp_binary();
} else {
ret = -EPERM;
- dev_err(atomisp_dev, "%s dump running bin err[ret:%d]\n",
+ dev_err(isp->dev, "%s dump running bin err[ret:%d]\n",
__func__, ret);
goto opt_err;
}
@@ -86,8 +86,7 @@ static inline int iunit_dump_dbgopt(struct atomisp_device *isp,
hmm_show_mem_stat(__func__, __LINE__);
} else {
ret = -EINVAL;
- dev_err(atomisp_dev, "%s dump nothing[ret=%d]\n", __func__,
- ret);
+ dev_err(isp->dev, "%s dump nothing[ret=%d]\n", __func__, ret);
}
opt_err:
@@ -185,8 +184,9 @@ static void iunit_drvfs_remove_files(struct device_driver *drv)
driver_remove_file(drv, &iunit_drvfs_attrs[i]);
}
-int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp)
+int atomisp_drvfs_init(struct atomisp_device *isp)
{
+ struct device_driver *drv = isp->dev->driver;
int ret;
iunit_debug.isp = isp;
@@ -194,7 +194,7 @@ int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp)
ret = iunit_drvfs_create_files(iunit_debug.drv);
if (ret) {
- dev_err(atomisp_dev, "drvfs_create_files error: %d\n", ret);
+ dev_err(isp->dev, "drvfs_create_files error: %d\n", ret);
iunit_drvfs_remove_files(iunit_debug.drv);
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_drvfs.h b/drivers/staging/media/atomisp/pci/atomisp_drvfs.h
index 4911037231fb..8f4cc722b881 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_drvfs.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_drvfs.h
@@ -19,7 +19,7 @@
#ifndef __ATOMISP_DRVFS_H__
#define __ATOMISP_DRVFS_H__
-int atomisp_drvfs_init(struct device_driver *drv, struct atomisp_device *isp);
+int atomisp_drvfs_init(struct atomisp_device *isp);
void atomisp_drvfs_exit(void);
#endif /* __ATOMISP_DRVFS_H__ */
diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
index 1af9da8acf4c..0df46a1af5f0 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c
@@ -26,6 +26,9 @@ enum clock_rate {
#define CLK_RATE_19_2MHZ 19200000
#define CLK_RATE_25_0MHZ 25000000
+/* Valid clock number range from 0 to 5 */
+#define MAX_CLK_COUNT 5
+
/* X-Powers AXP288 register set */
#define ALDO1_SEL_REG 0x28
#define ALDO1_CTRL3_REG 0x13
@@ -61,9 +64,7 @@ enum clock_rate {
struct gmin_subdev {
struct v4l2_subdev *subdev;
- int clock_num;
enum clock_rate clock_src;
- bool clock_on;
struct clk *pmc_clk;
struct gpio_desc *gpio0;
struct gpio_desc *gpio1;
@@ -75,11 +76,16 @@ struct gmin_subdev {
unsigned int csi_lanes;
enum atomisp_input_format csi_fmt;
enum atomisp_bayer_order csi_bayer;
+
+ bool clock_on;
bool v1p8_on;
bool v2p8_on;
bool v1p2_on;
bool v2p8_vcm_on;
+ int v1p8_gpio;
+ int v2p8_gpio;
+
u8 pwm_i2c_addr;
/* For PMIC AXP */
@@ -90,9 +96,9 @@ struct gmin_subdev {
static struct gmin_subdev gmin_subdevs[MAX_SUBDEVS];
/* ACPI HIDs for the PMICs that could be used by this driver */
-#define PMIC_ACPI_AXP "INT33F4:00" /* XPower AXP288 PMIC */
-#define PMIC_ACPI_TI "INT33F5:00" /* Dollar Cove TI PMIC */
-#define PMIC_ACPI_CRYSTALCOVE "INT33FD:00" /* Crystal Cove PMIC */
+#define PMIC_ACPI_AXP "INT33F4" /* XPower AXP288 PMIC */
+#define PMIC_ACPI_TI "INT33F5" /* Dollar Cove TI PMIC */
+#define PMIC_ACPI_CRYSTALCOVE "INT33FD" /* Crystal Cove PMIC */
#define PMIC_PLATFORM_TI "intel_soc_pmic_chtdc_ti"
@@ -105,7 +111,7 @@ static enum {
} pmic_id;
static const char *pmic_name[] = {
- [PMIC_UNSET] = "unset",
+ [PMIC_UNSET] = "ACPI device PM",
[PMIC_REGULATOR] = "regulator driver",
[PMIC_AXP] = "XPower AXP288 PMIC",
[PMIC_TI] = "Dollar Cove TI PMIC",
@@ -119,24 +125,6 @@ static const struct atomisp_platform_data pdata = {
.subdevs = pdata_subdevs,
};
-/*
- * Something of a hack. The ECS E7 board drives camera 2.8v from an
- * external regulator instead of the PMIC. There's a gmin_CamV2P8
- * config variable that specifies the GPIO to handle this particular
- * case, but this needs a broader architecture for handling camera
- * power.
- */
-enum { V2P8_GPIO_UNSET = -2, V2P8_GPIO_NONE = -1 };
-static int v2p8_gpio = V2P8_GPIO_UNSET;
-
-/*
- * Something of a hack. The CHT RVP board drives camera 1.8v from an
- * external regulator instead of the PMIC just like ECS E7 board, see the
- * comments above.
- */
-enum { V1P8_GPIO_UNSET = -2, V1P8_GPIO_NONE = -1 };
-static int v1p8_gpio = V1P8_GPIO_UNSET;
-
static LIST_HEAD(vcm_devices);
static DEFINE_MUTEX(vcm_lock);
@@ -199,6 +187,8 @@ int atomisp_register_i2c_module(struct v4l2_subdev *subdev,
* gmin_subdev struct is already initialized for us.
*/
gs = find_gmin_subdev(subdev);
+ if (!gs)
+ return -ENODEV;
pdata.subdevs[i].type = type;
pdata.subdevs[i].port = gs->csi_port;
@@ -294,6 +284,7 @@ static struct gmin_cfg_var mrd7_vars[] = {
{"INT33F8:00_CsiFmt", "13"},
{"INT33F8:00_CsiBayer", "0"},
{"INT33F8:00_CamClk", "0"},
+
{"INT33F9:00_CamType", "1"},
{"INT33F9:00_CsiPort", "0"},
{"INT33F9:00_CsiLanes", "1"},
@@ -309,6 +300,7 @@ static struct gmin_cfg_var ecs7_vars[] = {
{"INT33BE:00_CsiFmt", "13"},
{"INT33BE:00_CsiBayer", "2"},
{"INT33BE:00_CamClk", "0"},
+
{"INT33F0:00_CsiPort", "0"},
{"INT33F0:00_CsiLanes", "1"},
{"INT33F0:00_CsiFmt", "13"},
@@ -322,6 +314,7 @@ static struct gmin_cfg_var i8880_vars[] = {
{"XXOV2680:00_CsiPort", "1"},
{"XXOV2680:00_CsiLanes", "1"},
{"XXOV2680:00_CamClk", "0"},
+
{"XXGC0310:00_CsiPort", "0"},
{"XXGC0310:00_CsiLanes", "1"},
{"XXGC0310:00_CamClk", "1"},
@@ -381,34 +374,27 @@ static const guid_t atomisp_dsm_guid = GUID_INIT(0xdc2f6c4f, 0x045b, 0x4f1d,
#define GMIN_PMC_CLK_NAME 14 /* "pmc_plt_clk_[0..5]" */
static char gmin_pmc_clk_name[GMIN_PMC_CLK_NAME];
-static int gmin_i2c_match_one(struct device *dev, const void *data)
-{
- const char *name = data;
- struct i2c_client *client;
-
- if (dev->type != &i2c_client_type)
- return 0;
-
- client = to_i2c_client(dev);
-
- return (!strcmp(name, client->name));
-}
-
static struct i2c_client *gmin_i2c_dev_exists(struct device *dev, char *name,
struct i2c_client **client)
{
+ struct acpi_device *adev;
struct device *d;
- while ((d = bus_find_device(&i2c_bus_type, NULL, name,
- gmin_i2c_match_one))) {
- *client = to_i2c_client(d);
- dev_dbg(dev, "found '%s' at address 0x%02x, adapter %d\n",
- (*client)->name, (*client)->addr,
- (*client)->adapter->nr);
- return *client;
- }
+ adev = acpi_dev_get_first_match_dev(name, NULL, -1);
+ if (!adev)
+ return NULL;
- return NULL;
+ d = bus_find_device_by_acpi_dev(&i2c_bus_type, adev);
+ acpi_dev_put(adev);
+ if (!d)
+ return NULL;
+
+ *client = i2c_verify_client(d);
+ put_device(d);
+
+ dev_dbg(dev, "found '%s' at address 0x%02x, adapter %d\n",
+ (*client)->name, (*client)->addr, (*client)->adapter->nr);
+ return *client;
}
static int gmin_i2c_write(struct device *dev, u16 i2c_addr, u8 reg,
@@ -427,94 +413,222 @@ static int gmin_i2c_write(struct device *dev, u16 i2c_addr, u8 reg,
"I2C write, addr: 0x%02x, reg: 0x%02x, value: 0x%02x, mask: 0x%02x\n",
i2c_addr, reg, value, mask);
- ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_addr, reg,
- value, mask);
-
- if (ret == -EOPNOTSUPP) {
+ ret = intel_soc_pmic_exec_mipi_pmic_seq_element(i2c_addr, reg, value, mask);
+ if (ret == -EOPNOTSUPP)
dev_err(dev,
"ACPI didn't mapped the OpRegion needed to access I2C address 0x%02x.\n"
- "Need to compile the Kernel using CONFIG_*_PMIC_OPREGION settings\n",
+ "Need to compile the kernel using CONFIG_*_PMIC_OPREGION settings\n",
i2c_addr);
- return ret;
- }
return ret;
}
-static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
+static int atomisp_get_acpi_power(struct device *dev)
{
- struct i2c_client *power = NULL, *client = v4l2_get_subdevdata(subdev);
- struct acpi_device *adev;
- acpi_handle handle;
- struct device *dev;
- int i, ret;
+ char name[5];
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ struct acpi_buffer b_name = { sizeof(name), name };
+ union acpi_object *package, *element;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ acpi_handle rhandle;
+ acpi_status status;
+ int clock_num = -1;
+ int i;
- if (!client)
- return NULL;
+ status = acpi_evaluate_object(handle, "_PR0", NULL, &buffer);
+ if (!ACPI_SUCCESS(status))
+ return -1;
- dev = &client->dev;
+ package = buffer.pointer;
- handle = ACPI_HANDLE(dev);
+ if (!buffer.length || !package
+ || package->type != ACPI_TYPE_PACKAGE
+ || !package->package.count)
+ goto fail;
- // FIXME: may need to release resources allocated by acpi_bus_get_device()
- if (!handle || acpi_bus_get_device(handle, &adev)) {
- dev_err(dev, "Error could not get ACPI device\n");
- return NULL;
- }
+ for (i = 0; i < package->package.count; i++) {
+ element = &package->package.elements[i];
- dev_info(&client->dev, "%s: ACPI detected it on bus ID=%s, HID=%s\n",
- __func__, acpi_device_bid(adev), acpi_device_hid(adev));
+ if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
+ continue;
- if (!pmic_id) {
- if (gmin_i2c_dev_exists(dev, PMIC_ACPI_TI, &power))
- pmic_id = PMIC_TI;
- else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_AXP, &power))
- pmic_id = PMIC_AXP;
- else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_CRYSTALCOVE, &power))
- pmic_id = PMIC_CRYSTALCOVE;
- else
- pmic_id = PMIC_REGULATOR;
+ rhandle = element->reference.handle;
+ if (!rhandle)
+ goto fail;
+
+ acpi_get_name(rhandle, ACPI_SINGLE_NAME, &b_name);
+
+ dev_dbg(dev, "Found PM resource '%s'\n", name);
+ if (strlen(name) == 4 && !strncmp(name, "CLK", 3)) {
+ if (name[3] >= '0' && name[3] <= '4')
+ clock_num = name[3] - '0';
+#if 0
+ /*
+ * We could abort here, but let's parse all resources,
+ * as this is helpful for debugging purposes
+ */
+ if (clock_num >= 0)
+ break;
+#endif
+ }
}
- for (i = 0; i < MAX_SUBDEVS && gmin_subdevs[i].subdev; i++)
- ;
- if (i >= MAX_SUBDEVS)
- return NULL;
+fail:
+ ACPI_FREE(buffer.pointer);
+
+ return clock_num;
+}
+
+static u8 gmin_get_pmic_id_and_addr(struct device *dev)
+{
+ struct i2c_client *power;
+ static u8 pmic_i2c_addr;
+
+ if (pmic_id)
+ return pmic_i2c_addr;
+
+ if (gmin_i2c_dev_exists(dev, PMIC_ACPI_TI, &power))
+ pmic_id = PMIC_TI;
+ else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_AXP, &power))
+ pmic_id = PMIC_AXP;
+ else if (gmin_i2c_dev_exists(dev, PMIC_ACPI_CRYSTALCOVE, &power))
+ pmic_id = PMIC_CRYSTALCOVE;
+ else
+ pmic_id = PMIC_REGULATOR;
+
+ pmic_i2c_addr = power ? power->addr : 0;
+ return pmic_i2c_addr;
+}
+
+static int gmin_detect_pmic(struct v4l2_subdev *subdev)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct device *dev = &client->dev;
+ u8 pmic_i2c_addr;
+
+ pmic_i2c_addr = gmin_get_pmic_id_and_addr(dev);
+ dev_info(dev, "gmin: power management provided via %s (i2c addr 0x%02x)\n",
+ pmic_name[pmic_id], pmic_i2c_addr);
+ return pmic_i2c_addr;
+}
+
+static int gmin_subdev_add(struct gmin_subdev *gs)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(gs->subdev);
+ struct device *dev = &client->dev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ int ret, clock_num = -1;
+
+ dev_info(dev, "%s: ACPI path is %pfw\n", __func__, dev_fwnode(dev));
+
+ /*WA:CHT requires XTAL clock as PLL is not stable.*/
+ gs->clock_src = gmin_get_var_int(dev, false, "ClkSrc",
+ VLV2_CLK_PLL_19P2MHZ);
+
+ gs->csi_port = gmin_get_var_int(dev, false, "CsiPort", 0);
+ gs->csi_lanes = gmin_get_var_int(dev, false, "CsiLanes", 1);
+
+ gs->gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW);
+ if (IS_ERR(gs->gpio0))
+ gs->gpio0 = NULL;
+ else
+ dev_info(dev, "will handle gpio0 via ACPI\n");
+
+ gs->gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW);
+ if (IS_ERR(gs->gpio1))
+ gs->gpio1 = NULL;
+ else
+ dev_info(dev, "will handle gpio1 via ACPI\n");
+
+ /*
+ * Those are used only when there is an external regulator apart
+ * from the PMIC that would be providing power supply, like on the
+ * two cases below:
+ *
+ * The ECS E7 board drives camera 2.8v from an external regulator
+ * instead of the PMIC. There's a gmin_CamV2P8 config variable
+ * that specifies the GPIO to handle this particular case,
+ * but this needs a broader architecture for handling camera power.
+ *
+ * The CHT RVP board drives camera 1.8v from an* external regulator
+ * instead of the PMIC just like ECS E7 board.
+ */
- if (power) {
- gmin_subdevs[i].pwm_i2c_addr = power->addr;
+ gs->v1p8_gpio = gmin_get_var_int(dev, true, "V1P8GPIO", -1);
+ gs->v2p8_gpio = gmin_get_var_int(dev, true, "V2P8GPIO", -1);
+
+ /*
+ * FIXME:
+ *
+ * The ACPI handling code checks for the _PR? tables in order to
+ * know what is required to switch the device from power state
+ * D0 (_PR0) up to D3COLD (_PR3).
+ *
+ * The adev->flags.power_manageable is set to true if the device
+ * has a _PR0 table, which can be checked by calling
+ * acpi_device_power_manageable(adev).
+ *
+ * However, this only says that the device can be set to power off
+ * mode.
+ *
+ * At least on the DSDT tables we've seen so far, there's no _PR3,
+ * nor _PS3 (which would have a somewhat similar effect).
+ * So, using ACPI for power management won't work, except if adding
+ * an ACPI override logic somewhere.
+ *
+ * So, at least for the existing devices we know, the check below
+ * will always be false.
+ */
+ if (acpi_device_can_wakeup(adev) &&
+ acpi_device_can_poweroff(adev)) {
dev_info(dev,
- "gmin: power management provided via %s (i2c addr 0x%02x)\n",
- pmic_name[pmic_id], power->addr);
- } else {
- dev_info(dev, "gmin: power management provided via %s\n",
- pmic_name[pmic_id]);
+ "gmin: power management provided via device PM\n");
+ return 0;
}
- gmin_subdevs[i].subdev = subdev;
- gmin_subdevs[i].clock_num = gmin_get_var_int(dev, false, "CamClk", 0);
- /*WA:CHT requires XTAL clock as PLL is not stable.*/
- gmin_subdevs[i].clock_src = gmin_get_var_int(dev, false, "ClkSrc",
- VLV2_CLK_PLL_19P2MHZ);
- gmin_subdevs[i].csi_port = gmin_get_var_int(dev, false, "CsiPort", 0);
- gmin_subdevs[i].csi_lanes = gmin_get_var_int(dev, false, "CsiLanes", 1);
+ /*
+ * The code below is here due to backward compatibility with devices
+ * whose ACPI BIOS may not contain everything that would be needed
+ * in order to set clocks and do power management.
+ */
+
+ /*
+ * According with :
+ * https://github.com/projectceladon/hardware-intel-kernelflinger/blob/master/doc/fastboot.md
+ *
+ * The "CamClk" EFI var is set via fastboot on some Android devices,
+ * and seems to contain the number of the clock used to feed the
+ * sensor.
+ *
+ * On systems with a proper ACPI table, this is given via the _PR0
+ * power resource table. The logic below should first check if there
+ * is a power resource already, falling back to the EFI vars detection
+ * otherwise.
+ */
- /* get PMC clock with clock framework */
- snprintf(gmin_pmc_clk_name,
- sizeof(gmin_pmc_clk_name),
- "%s_%d", "pmc_plt_clk", gmin_subdevs[i].clock_num);
+ /* Try first to use ACPI to get the clock resource */
+ if (acpi_device_power_manageable(adev))
+ clock_num = atomisp_get_acpi_power(dev);
- gmin_subdevs[i].pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name);
- if (IS_ERR(gmin_subdevs[i].pmc_clk)) {
- ret = PTR_ERR(gmin_subdevs[i].pmc_clk);
+ /* Fall-back use EFI and/or DMI match */
+ if (clock_num < 0)
+ clock_num = gmin_get_var_int(dev, false, "CamClk", 0);
- dev_err(dev,
- "Failed to get clk from %s : %d\n",
- gmin_pmc_clk_name,
- ret);
+ if (clock_num < 0 || clock_num > MAX_CLK_COUNT) {
+ dev_err(dev, "Invalid clock number\n");
+ return -EINVAL;
+ }
- return NULL;
+ snprintf(gmin_pmc_clk_name, sizeof(gmin_pmc_clk_name),
+ "%s_%d", "pmc_plt_clk", clock_num);
+
+ gs->pmc_clk = devm_clk_get(dev, gmin_pmc_clk_name);
+ if (IS_ERR(gs->pmc_clk)) {
+ ret = PTR_ERR(gs->pmc_clk);
+ dev_err(dev, "Failed to get clk from %s: %d\n", gmin_pmc_clk_name, ret);
+ return ret;
}
+ dev_info(dev, "Will use CLK%d (%s)\n", clock_num, gmin_pmc_clk_name);
/*
* The firmware might enable the clock at
@@ -526,25 +640,17 @@ static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
* to disable a clock that has not been enabled,
* we need to enable the clock first.
*/
- ret = clk_prepare_enable(gmin_subdevs[i].pmc_clk);
+ ret = clk_prepare_enable(gs->pmc_clk);
if (!ret)
- clk_disable_unprepare(gmin_subdevs[i].pmc_clk);
-
- gmin_subdevs[i].gpio0 = gpiod_get_index(dev, NULL, 0, GPIOD_OUT_LOW);
- if (IS_ERR(gmin_subdevs[i].gpio0))
- gmin_subdevs[i].gpio0 = NULL;
-
- gmin_subdevs[i].gpio1 = gpiod_get_index(dev, NULL, 1, GPIOD_OUT_LOW);
- if (IS_ERR(gmin_subdevs[i].gpio1))
- gmin_subdevs[i].gpio1 = NULL;
+ clk_disable_unprepare(gs->pmc_clk);
switch (pmic_id) {
case PMIC_REGULATOR:
- gmin_subdevs[i].v1p8_reg = regulator_get(dev, "V1P8SX");
- gmin_subdevs[i].v2p8_reg = regulator_get(dev, "V2P8SX");
+ gs->v1p8_reg = regulator_get(dev, "V1P8SX");
+ gs->v2p8_reg = regulator_get(dev, "V2P8SX");
- gmin_subdevs[i].v1p2_reg = regulator_get(dev, "V1P2A");
- gmin_subdevs[i].v2p8_vcm_reg = regulator_get(dev, "VPROG4B");
+ gs->v1p2_reg = regulator_get(dev, "V1P2A");
+ gs->v2p8_vcm_reg = regulator_get(dev, "VPROG4B");
/* Note: ideally we would initialize v[12]p8_on to the
* output of regulator_is_enabled(), but sadly that
@@ -556,32 +662,31 @@ static struct gmin_subdev *gmin_subdev_add(struct v4l2_subdev *subdev)
break;
case PMIC_AXP:
- gmin_subdevs[i].eldo1_1p8v = gmin_get_var_int(dev, false,
- "eldo1_1p8v",
- ELDO1_1P8V);
- gmin_subdevs[i].eldo1_sel_reg = gmin_get_var_int(dev, false,
- "eldo1_sel_reg",
- ELDO1_SEL_REG);
- gmin_subdevs[i].eldo1_ctrl_shift = gmin_get_var_int(dev, false,
- "eldo1_ctrl_shift",
- ELDO1_CTRL_SHIFT);
- gmin_subdevs[i].eldo2_1p8v = gmin_get_var_int(dev, false,
- "eldo2_1p8v",
- ELDO2_1P8V);
- gmin_subdevs[i].eldo2_sel_reg = gmin_get_var_int(dev, false,
- "eldo2_sel_reg",
- ELDO2_SEL_REG);
- gmin_subdevs[i].eldo2_ctrl_shift = gmin_get_var_int(dev, false,
- "eldo2_ctrl_shift",
- ELDO2_CTRL_SHIFT);
- gmin_subdevs[i].pwm_i2c_addr = power->addr;
+ gs->eldo1_1p8v = gmin_get_var_int(dev, false,
+ "eldo1_1p8v",
+ ELDO1_1P8V);
+ gs->eldo1_sel_reg = gmin_get_var_int(dev, false,
+ "eldo1_sel_reg",
+ ELDO1_SEL_REG);
+ gs->eldo1_ctrl_shift = gmin_get_var_int(dev, false,
+ "eldo1_ctrl_shift",
+ ELDO1_CTRL_SHIFT);
+ gs->eldo2_1p8v = gmin_get_var_int(dev, false,
+ "eldo2_1p8v",
+ ELDO2_1P8V);
+ gs->eldo2_sel_reg = gmin_get_var_int(dev, false,
+ "eldo2_sel_reg",
+ ELDO2_SEL_REG);
+ gs->eldo2_ctrl_shift = gmin_get_var_int(dev, false,
+ "eldo2_ctrl_shift",
+ ELDO2_CTRL_SHIFT);
break;
default:
break;
}
- return &gmin_subdevs[i];
+ return 0;
}
static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev)
@@ -591,7 +696,17 @@ static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev)
for (i = 0; i < MAX_SUBDEVS; i++)
if (gmin_subdevs[i].subdev == subdev)
return &gmin_subdevs[i];
- return gmin_subdev_add(subdev);
+ return NULL;
+}
+
+static struct gmin_subdev *find_free_gmin_subdev_slot(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < MAX_SUBDEVS; i++)
+ if (gmin_subdevs[i].subdev == NULL)
+ return &gmin_subdevs[i];
+ return NULL;
}
static int axp_regulator_set(struct device *dev, struct gmin_subdev *gs,
@@ -700,32 +815,24 @@ static int gmin_v1p8_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
int ret;
- struct device *dev;
- struct i2c_client *client = v4l2_get_subdevdata(subdev);
int value;
- dev = &client->dev;
-
- if (v1p8_gpio == V1P8_GPIO_UNSET) {
- v1p8_gpio = gmin_get_var_int(dev, true,
- "V1P8GPIO", V1P8_GPIO_NONE);
- if (v1p8_gpio != V1P8_GPIO_NONE) {
- pr_info("atomisp_gmin_platform: 1.8v power on GPIO %d\n",
- v1p8_gpio);
- ret = gpio_request(v1p8_gpio, "camera_v1p8_en");
- if (!ret)
- ret = gpio_direction_output(v1p8_gpio, 0);
- if (ret)
- pr_err("V1P8 GPIO initialization failed\n");
- }
+ if (gs->v1p8_gpio >= 0) {
+ pr_info("atomisp_gmin_platform: 1.8v power on GPIO %d\n",
+ gs->v1p8_gpio);
+ ret = gpio_request(gs->v1p8_gpio, "camera_v1p8_en");
+ if (!ret)
+ ret = gpio_direction_output(gs->v1p8_gpio, 0);
+ if (ret)
+ pr_err("V1P8 GPIO initialization failed\n");
}
if (!gs || gs->v1p8_on == on)
return 0;
gs->v1p8_on = on;
- if (v1p8_gpio >= 0)
- gpio_set_value(v1p8_gpio, on);
+ if (gs->v1p8_gpio >= 0)
+ gpio_set_value(gs->v1p8_gpio, on);
if (gs->v1p8_reg) {
regulator_set_voltage(gs->v1p8_reg, 1800000, 1800000);
@@ -762,32 +869,24 @@ static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
{
struct gmin_subdev *gs = find_gmin_subdev(subdev);
int ret;
- struct device *dev;
- struct i2c_client *client = v4l2_get_subdevdata(subdev);
int value;
- dev = &client->dev;
-
- if (v2p8_gpio == V2P8_GPIO_UNSET) {
- v2p8_gpio = gmin_get_var_int(dev, true,
- "V2P8GPIO", V2P8_GPIO_NONE);
- if (v2p8_gpio != V2P8_GPIO_NONE) {
- pr_info("atomisp_gmin_platform: 2.8v power on GPIO %d\n",
- v2p8_gpio);
- ret = gpio_request(v2p8_gpio, "camera_v2p8");
- if (!ret)
- ret = gpio_direction_output(v2p8_gpio, 0);
- if (ret)
- pr_err("V2P8 GPIO initialization failed\n");
- }
+ if (gs->v2p8_gpio >= 0) {
+ pr_info("atomisp_gmin_platform: 2.8v power on GPIO %d\n",
+ gs->v2p8_gpio);
+ ret = gpio_request(gs->v2p8_gpio, "camera_v2p8");
+ if (!ret)
+ ret = gpio_direction_output(gs->v2p8_gpio, 0);
+ if (ret)
+ pr_err("V2P8 GPIO initialization failed\n");
}
if (!gs || gs->v2p8_on == on)
return 0;
gs->v2p8_on = on;
- if (v2p8_gpio >= 0)
- gpio_set_value(v2p8_gpio, on);
+ if (gs->v2p8_gpio >= 0)
+ gpio_set_value(gs->v2p8_gpio, on);
if (gs->v2p8_reg) {
regulator_set_voltage(gs->v2p8_reg, 2900000, 2900000);
@@ -819,6 +918,37 @@ static int gmin_v2p8_ctrl(struct v4l2_subdev *subdev, int on)
return -EINVAL;
}
+static int gmin_acpi_pm_ctrl(struct v4l2_subdev *subdev, int on)
+{
+ int ret = 0;
+ struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ struct acpi_device *adev = ACPI_COMPANION(&client->dev);
+
+ /* Use the ACPI power management to control it */
+ on = !!on;
+ if (gs->clock_on == on)
+ return 0;
+
+ dev_dbg(subdev->dev, "Setting power state to %s\n",
+ on ? "on" : "off");
+
+ if (on)
+ ret = acpi_device_set_power(adev,
+ ACPI_STATE_D0);
+ else
+ ret = acpi_device_set_power(adev,
+ ACPI_STATE_D3_COLD);
+
+ if (!ret)
+ gs->clock_on = on;
+ else
+ dev_err(subdev->dev, "Couldn't set power state to %s\n",
+ on ? "on" : "off");
+
+ return ret;
+}
+
static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on)
{
int ret = 0;
@@ -884,7 +1014,7 @@ static struct camera_vcm_control *gmin_get_vcm_ctrl(struct v4l2_subdev *subdev,
return NULL;
}
-static struct camera_sensor_platform_data gmin_plat = {
+static struct camera_sensor_platform_data pmic_gmin_plat = {
.gpio0_ctrl = gmin_gpio0_ctrl,
.gpio1_ctrl = gmin_gpio1_ctrl,
.v1p8_ctrl = gmin_v1p8_ctrl,
@@ -895,17 +1025,36 @@ static struct camera_sensor_platform_data gmin_plat = {
.get_vcm_ctrl = gmin_get_vcm_ctrl,
};
+static struct camera_sensor_platform_data acpi_gmin_plat = {
+ .gpio0_ctrl = gmin_gpio0_ctrl,
+ .gpio1_ctrl = gmin_gpio1_ctrl,
+ .v1p8_ctrl = gmin_acpi_pm_ctrl,
+ .v2p8_ctrl = gmin_acpi_pm_ctrl,
+ .v1p2_ctrl = gmin_acpi_pm_ctrl,
+ .flisclk_ctrl = gmin_acpi_pm_ctrl,
+ .csi_cfg = gmin_csi_cfg,
+ .get_vcm_ctrl = gmin_get_vcm_ctrl,
+};
+
struct camera_sensor_platform_data *gmin_camera_platform_data(
struct v4l2_subdev *subdev,
enum atomisp_input_format csi_format,
enum atomisp_bayer_order csi_bayer)
{
- struct gmin_subdev *gs = find_gmin_subdev(subdev);
+ u8 pmic_i2c_addr = gmin_detect_pmic(subdev);
+ struct gmin_subdev *gs;
+ gs = find_free_gmin_subdev_slot();
+ gs->subdev = subdev;
gs->csi_fmt = csi_format;
gs->csi_bayer = csi_bayer;
+ gs->pwm_i2c_addr = pmic_i2c_addr;
- return &gmin_plat;
+ gmin_subdev_add(gs);
+ if (gs->pmc_clk)
+ return &pmic_gmin_plat;
+ else
+ return &acpi_gmin_plat;
}
EXPORT_SYMBOL_GPL(gmin_camera_platform_data);
@@ -957,12 +1106,28 @@ static int gmin_get_config_dsm_var(struct device *dev,
union acpi_object *obj, *cur = NULL;
int i;
+ /*
+ * The data reported by "CamClk" seems to be either 0 or 1 at the
+ * _DSM table.
+ *
+ * At the ACPI tables we looked so far, this is not related to the
+ * actual clock source for the sensor, which is given by the
+ * _PR0 ACPI table. So, ignore it, as otherwise this will be
+ * set to a wrong value.
+ */
+ if (!strcmp(var, "CamClk"))
+ return -EINVAL;
+
obj = acpi_evaluate_dsm(handle, &atomisp_dsm_guid, 0, 0, NULL);
if (!obj) {
dev_info_once(dev, "Didn't find ACPI _DSM table.\n");
return -EINVAL;
}
+ /* Return on unexpected object type */
+ if (obj->type != ACPI_TYPE_PACKAGE)
+ return -EINVAL;
+
#if 0 /* Just for debugging purposes */
for (i = 0; i < obj->package.count; i++) {
union acpi_object *cur = &obj->package.elements[i];
@@ -1155,10 +1320,10 @@ EXPORT_SYMBOL_GPL(camera_sensor_csi);
* trying. The driver itself does direct calls to the PUNIT to manage
* ISP power.
*/
-static void isp_pm_cap_fixup(struct pci_dev *dev)
+static void isp_pm_cap_fixup(struct pci_dev *pdev)
{
- dev_info(&dev->dev, "Disabling PCI power management on camera ISP\n");
- dev->pm_cap = 0;
+ dev_info(&pdev->dev, "Disabling PCI power management on camera ISP\n");
+ pdev->pm_cap = 0;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0f38, isp_pm_cap_fixup);
diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h
index ff3becd41110..c01db10bb735 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_internal.h
+++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h
@@ -216,12 +216,12 @@ struct atomisp_sw_contex {
* ci device struct
*/
struct atomisp_device {
- struct pci_dev *pdev;
struct device *dev;
struct v4l2_device v4l2_dev;
struct media_device media_dev;
struct atomisp_platform_data *pdata;
void *mmu_l1_base;
+ void __iomem *base;
const struct firmware *firmware;
struct pm_qos_request pm_qos;
diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
index 9404a678fa6f..f8d616f08b51 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c
@@ -549,8 +549,7 @@ static int atomisp_querycap(struct file *file, void *fh,
strscpy(cap->driver, DRIVER, sizeof(cap->driver));
strscpy(cap->card, CARD, sizeof(cap->card));
- snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
- pci_name(isp->pdev));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", dev_name(isp->dev));
return 0;
}
@@ -1635,6 +1634,7 @@ static int atomisp_streamon(struct file *file, void *fh,
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
struct atomisp_sub_device *asd = pipe->asd;
struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
enum ia_css_pipe_id css_pipe_id;
unsigned int sensor_start_stream;
unsigned int wdt_duration = ATOMISP_ISP_TIMEOUT_DURATION;
@@ -1844,9 +1844,8 @@ start_sensor:
/* Enable the CSI interface on ANN B0/K0 */
if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) {
- pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL,
- isp->saved_regs.csi_control |
- MRFLD_PCI_CSI_CONTROL_CSI_READY);
+ pci_write_config_word(pdev, MRFLD_PCI_CSI_CONTROL,
+ isp->saved_regs.csi_control | MRFLD_PCI_CSI_CONTROL_CSI_READY);
}
/* stream on the sensor */
@@ -1891,6 +1890,7 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type)
{
struct video_device *vdev = video_devdata(file);
struct atomisp_device *isp = video_get_drvdata(vdev);
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev);
struct atomisp_sub_device *asd = pipe->asd;
struct atomisp_video_pipe *capture_pipe = NULL;
@@ -2076,9 +2076,8 @@ stopsensor:
/* Disable the CSI interface on ANN B0/K0 */
if (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)) {
- pci_write_config_word(isp->pdev, MRFLD_PCI_CSI_CONTROL,
- isp->saved_regs.csi_control &
- ~MRFLD_PCI_CSI_CONTROL_CSI_READY);
+ pci_write_config_word(pdev, MRFLD_PCI_CSI_CONTROL,
+ isp->saved_regs.csi_control & ~MRFLD_PCI_CSI_CONTROL_CSI_READY);
}
if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, false))
@@ -2111,8 +2110,8 @@ stopsensor:
}
/* disable PUNIT/ISP acknowlede/handshake - SRSE=3 */
- pci_write_config_dword(isp->pdev, PCI_I_CONTROL, isp->saved_regs.i_control |
- MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
+ pci_write_config_dword(pdev, PCI_I_CONTROL,
+ isp->saved_regs.i_control | MRFLD_PCI_I_CONTROL_SRSE_RESET_MASK);
dev_err(isp->dev, "atomisp_reset");
atomisp_reset(isp);
for (i = 0; i < isp->num_of_streams; i++) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
index d36809a0182c..a000a1e316f7 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c
@@ -127,8 +127,6 @@ MODULE_PARM_DESC(pad_h, "extra data for ISP processing");
struct device *atomisp_dev;
-void __iomem *atomisp_io_base;
-
static const struct atomisp_freq_scaling_rule dfs_rules_merr[] = {
{
.width = ISP_FREQ_RULE_ANY,
@@ -512,30 +510,27 @@ void atomisp_acc_unregister(struct atomisp_acc_pipe *video)
static int atomisp_save_iunit_reg(struct atomisp_device *isp)
{
- struct pci_dev *dev = isp->pdev;
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
dev_dbg(isp->dev, "%s\n", __func__);
- pci_read_config_word(dev, PCI_COMMAND, &isp->saved_regs.pcicmdsts);
+ pci_read_config_word(pdev, PCI_COMMAND, &isp->saved_regs.pcicmdsts);
/* isp->saved_regs.ispmmadr is set from the atomisp_pci_probe() */
- pci_read_config_dword(dev, PCI_MSI_CAPID, &isp->saved_regs.msicap);
- pci_read_config_dword(dev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr);
- pci_read_config_word(dev, PCI_MSI_DATA, &isp->saved_regs.msi_data);
- pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr);
- pci_read_config_dword(dev, PCI_INTERRUPT_CTRL,
- &isp->saved_regs.interrupt_control);
-
- pci_read_config_dword(dev, MRFLD_PCI_PMCS,
- &isp->saved_regs.pmcs);
+ pci_read_config_dword(pdev, PCI_MSI_CAPID, &isp->saved_regs.msicap);
+ pci_read_config_dword(pdev, PCI_MSI_ADDR, &isp->saved_regs.msi_addr);
+ pci_read_config_word(pdev, PCI_MSI_DATA, &isp->saved_regs.msi_data);
+ pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &isp->saved_regs.intr);
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &isp->saved_regs.interrupt_control);
+
+ pci_read_config_dword(pdev, MRFLD_PCI_PMCS, &isp->saved_regs.pmcs);
/* Ensure read/write combining is enabled. */
- pci_read_config_dword(dev, PCI_I_CONTROL,
- &isp->saved_regs.i_control);
+ pci_read_config_dword(pdev, PCI_I_CONTROL, &isp->saved_regs.i_control);
isp->saved_regs.i_control |=
MRFLD_PCI_I_CONTROL_ENABLE_READ_COMBINING |
MRFLD_PCI_I_CONTROL_ENABLE_WRITE_COMBINING;
- pci_read_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
&isp->saved_regs.csi_access_viol);
- pci_read_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL,
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_RCOMP_CONTROL,
&isp->saved_regs.csi_rcomp_config);
/*
* Hardware bugs require setting CSI_HS_OVR_CLK_GATE_ON_UPDATE.
@@ -545,65 +540,58 @@ static int atomisp_save_iunit_reg(struct atomisp_device *isp)
* is missed, and IUNIT can hang.
* For both issues, setting this bit is a workaround.
*/
- isp->saved_regs.csi_rcomp_config |=
- MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE;
- pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
+ isp->saved_regs.csi_rcomp_config |= MRFLD_PCI_CSI_HS_OVR_CLK_GATE_ON_UPDATE;
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
&isp->saved_regs.csi_afe_dly);
- pci_read_config_dword(dev, MRFLD_PCI_CSI_CONTROL,
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL,
&isp->saved_regs.csi_control);
if (isp->media_dev.hw_revision >=
(ATOMISP_HW_REVISION_ISP2401 << ATOMISP_HW_REVISION_SHIFT))
- isp->saved_regs.csi_control |=
- MRFLD_PCI_CSI_CONTROL_PARPATHEN;
+ isp->saved_regs.csi_control |= MRFLD_PCI_CSI_CONTROL_PARPATHEN;
/*
* On CHT CSI_READY bit should be enabled before stream on
*/
if (IS_CHT && (isp->media_dev.hw_revision >= ((ATOMISP_HW_REVISION_ISP2401 <<
ATOMISP_HW_REVISION_SHIFT) | ATOMISP_HW_STEPPING_B0)))
- isp->saved_regs.csi_control |=
- MRFLD_PCI_CSI_CONTROL_CSI_READY;
- pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
+ isp->saved_regs.csi_control |= MRFLD_PCI_CSI_CONTROL_CSI_READY;
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
&isp->saved_regs.csi_afe_rcomp_config);
- pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
&isp->saved_regs.csi_afe_hs_control);
- pci_read_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
&isp->saved_regs.csi_deadline_control);
return 0;
}
static int __maybe_unused atomisp_restore_iunit_reg(struct atomisp_device *isp)
{
- struct pci_dev *dev = isp->pdev;
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
dev_dbg(isp->dev, "%s\n", __func__);
- pci_write_config_word(dev, PCI_COMMAND, isp->saved_regs.pcicmdsts);
- pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
- isp->saved_regs.ispmmadr);
- pci_write_config_dword(dev, PCI_MSI_CAPID, isp->saved_regs.msicap);
- pci_write_config_dword(dev, PCI_MSI_ADDR, isp->saved_regs.msi_addr);
- pci_write_config_word(dev, PCI_MSI_DATA, isp->saved_regs.msi_data);
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE, isp->saved_regs.intr);
- pci_write_config_dword(dev, PCI_INTERRUPT_CTRL,
- isp->saved_regs.interrupt_control);
- pci_write_config_dword(dev, PCI_I_CONTROL,
- isp->saved_regs.i_control);
-
- pci_write_config_dword(dev, MRFLD_PCI_PMCS,
- isp->saved_regs.pmcs);
- pci_write_config_dword(dev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
+ pci_write_config_word(pdev, PCI_COMMAND, isp->saved_regs.pcicmdsts);
+ pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, isp->saved_regs.ispmmadr);
+ pci_write_config_dword(pdev, PCI_MSI_CAPID, isp->saved_regs.msicap);
+ pci_write_config_dword(pdev, PCI_MSI_ADDR, isp->saved_regs.msi_addr);
+ pci_write_config_word(pdev, PCI_MSI_DATA, isp->saved_regs.msi_data);
+ pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, isp->saved_regs.intr);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, isp->saved_regs.interrupt_control);
+ pci_write_config_dword(pdev, PCI_I_CONTROL, isp->saved_regs.i_control);
+
+ pci_write_config_dword(pdev, MRFLD_PCI_PMCS, isp->saved_regs.pmcs);
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_ACCESS_CTRL_VIOL,
isp->saved_regs.csi_access_viol);
- pci_write_config_dword(dev, MRFLD_PCI_CSI_RCOMP_CONTROL,
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_RCOMP_CONTROL,
isp->saved_regs.csi_rcomp_config);
- pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
isp->saved_regs.csi_afe_dly);
- pci_write_config_dword(dev, MRFLD_PCI_CSI_CONTROL,
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL,
isp->saved_regs.csi_control);
- pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_RCOMP_CONTROL,
isp->saved_regs.csi_afe_rcomp_config);
- pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_HS_CONTROL,
isp->saved_regs.csi_afe_hs_control);
- pci_write_config_dword(dev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_DEADLINE_CONTROL,
isp->saved_regs.csi_deadline_control);
/*
@@ -619,7 +607,7 @@ static int __maybe_unused atomisp_restore_iunit_reg(struct atomisp_device *isp)
static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp)
{
- struct pci_dev *dev = isp->pdev;
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
u32 irq;
unsigned long flags;
@@ -635,11 +623,11 @@ static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp)
* So, here we need to check if there is any pending
* IRQ, if so, waiting for it to be served
*/
- pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq = irq & 1 << INTR_IIR;
- pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
- pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
if (!(irq & (1 << INTR_IIR)))
goto done;
@@ -652,11 +640,11 @@ static int atomisp_mrfld_pre_power_down(struct atomisp_device *isp)
spin_unlock_irqrestore(&isp->lock, flags);
return -EAGAIN;
} else {
- pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq = irq & 1 << INTR_IIR;
- pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
- pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
if (!(irq & (1 << INTR_IIR))) {
atomisp_css2_hw_store_32(MRFLD_INTR_ENABLE_REG, 0x0);
goto done;
@@ -675,11 +663,11 @@ done:
* to IIR. It could block subsequent interrupt messages.
* HW sighting:4568410.
*/
- pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq &= ~(1 << INTR_IER);
- pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
- atomisp_msi_irq_uninit(isp, dev);
+ atomisp_msi_irq_uninit(isp);
atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW, true);
spin_unlock_irqrestore(&isp->lock, flags);
@@ -755,7 +743,7 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable)
/* Wait until ISPSSPM0 bit[25:24] shows the right value */
iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, MRFLD_ISPSSPM0, &tmp);
- tmp = (tmp & MRFLD_ISPSSPM0_ISPSSC_MASK) >> MRFLD_ISPSSPM0_ISPSSS_OFFSET;
+ tmp = (tmp >> MRFLD_ISPSSPM0_ISPSSS_OFFSET) & MRFLD_ISPSSPM0_ISPSSC_MASK;
if (tmp == val) {
trace_ipu_cstate(enable);
return 0;
@@ -778,15 +766,13 @@ static int atomisp_mrfld_power(struct atomisp_device *isp, bool enable)
/* Workaround for pmu_nc_set_power_state not ready in MRFLD */
int atomisp_mrfld_power_down(struct atomisp_device *isp)
{
-// FIXME: at least with ISP2401, enabling this code causes the driver to break
- return 0 && atomisp_mrfld_power(isp, false);
+ return atomisp_mrfld_power(isp, false);
}
/* Workaround for pmu_nc_set_power_state not ready in MRFLD */
int atomisp_mrfld_power_up(struct atomisp_device *isp)
{
-// FIXME: at least with ISP2401, enabling this code causes the driver to break
- return 0 && atomisp_mrfld_power(isp, true);
+ return atomisp_mrfld_power(isp, true);
}
int atomisp_runtime_suspend(struct device *dev)
@@ -902,6 +888,7 @@ static int __maybe_unused atomisp_resume(struct device *dev)
int atomisp_csi_lane_config(struct atomisp_device *isp)
{
+ struct pci_dev *pdev = to_pci_dev(isp->dev);
static const struct {
u8 code;
u8 lanes[MRFLD_PORT_NUM];
@@ -1003,7 +990,7 @@ int atomisp_csi_lane_config(struct atomisp_device *isp)
return -EINVAL;
}
- pci_read_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, &csi_control);
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, &csi_control);
csi_control &= ~port_config_mask;
csi_control |= (portconfigs[i].code << MRFLD_PORT_CONFIGCODE_SHIFT)
| (portconfigs[i].lanes[0] ? 0 : (1 << MRFLD_PORT1_ENABLE_SHIFT))
@@ -1013,7 +1000,7 @@ int atomisp_csi_lane_config(struct atomisp_device *isp)
| (((1 << portconfigs[i].lanes[1]) - 1) << MRFLD_PORT2_LANES_SHIFT)
| (((1 << portconfigs[i].lanes[2]) - 1) << port3_lanes_shift);
- pci_write_config_dword(isp->pdev, MRFLD_PCI_CSI_CONTROL, csi_control);
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_CONTROL, csi_control);
dev_dbg(isp->dev,
"%s: the portconfig is %d-%d-%d, CSI_CONTROL is 0x%08X\n",
@@ -1440,8 +1427,7 @@ atomisp_load_firmware(struct atomisp_device *isp)
* Check for flags the driver was compiled with against the PCI
* device. Always returns true on other than ISP 2400.
*/
-static bool is_valid_device(struct pci_dev *dev,
- const struct pci_device_id *id)
+static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id)
{
unsigned int a0_max_id = 0;
const char *name;
@@ -1465,14 +1451,14 @@ static bool is_valid_device(struct pci_dev *dev,
name = "Cherrytrail";
break;
default:
- dev_err(&dev->dev, "%s: unknown device ID %x04:%x04\n",
+ dev_err(&pdev->dev, "%s: unknown device ID %x04:%x04\n",
product, id->vendor, id->device);
return false;
}
- if (dev->revision <= ATOMISP_PCI_REV_BYT_A0_MAX) {
- dev_err(&dev->dev, "%s revision %d is not unsupported\n",
- name, dev->revision);
+ if (pdev->revision <= ATOMISP_PCI_REV_BYT_A0_MAX) {
+ dev_err(&pdev->dev, "%s revision %d is not unsupported\n",
+ name, pdev->revision);
return false;
}
@@ -1483,22 +1469,20 @@ static bool is_valid_device(struct pci_dev *dev,
#if defined(ISP2400)
if (IS_ISP2401) {
- dev_err(&dev->dev, "Support for %s (ISP2401) was disabled at compile time\n",
+ dev_err(&pdev->dev, "Support for %s (ISP2401) was disabled at compile time\n",
name);
return false;
}
#else
if (!IS_ISP2401) {
- dev_err(&dev->dev, "Support for %s (ISP2400) was disabled at compile time\n",
+ dev_err(&pdev->dev, "Support for %s (ISP2400) was disabled at compile time\n",
name);
return false;
}
#endif
- dev_info(&dev->dev, "Detected %s version %d (ISP240%c) on %s\n",
- name, dev->revision,
- IS_ISP2401 ? '1' : '0',
- product);
+ dev_info(&pdev->dev, "Detected %s version %d (ISP240%c) on %s\n",
+ name, pdev->revision, IS_ISP2401 ? '1' : '0', product);
return true;
}
@@ -1538,66 +1522,60 @@ alloc_fail:
#define ATOM_ISP_PCI_BAR 0
-static int atomisp_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
const struct atomisp_platform_data *pdata;
struct atomisp_device *isp;
unsigned int start;
- void __iomem *base;
int err, val;
u32 irq;
- if (!is_valid_device(dev, id))
+ if (!is_valid_device(pdev, id))
return -ENODEV;
/* Pointer to struct device. */
- atomisp_dev = &dev->dev;
+ atomisp_dev = &pdev->dev;
pdata = atomisp_get_platform_data();
if (!pdata)
- dev_warn(&dev->dev, "no platform data available\n");
+ dev_warn(&pdev->dev, "no platform data available\n");
- err = pcim_enable_device(dev);
+ err = pcim_enable_device(pdev);
if (err) {
- dev_err(&dev->dev, "Failed to enable CI ISP device (%d)\n",
- err);
+ dev_err(&pdev->dev, "Failed to enable CI ISP device (%d)\n", err);
return err;
}
- start = pci_resource_start(dev, ATOM_ISP_PCI_BAR);
- dev_dbg(&dev->dev, "start: 0x%x\n", start);
+ start = pci_resource_start(pdev, ATOM_ISP_PCI_BAR);
+ dev_dbg(&pdev->dev, "start: 0x%x\n", start);
- err = pcim_iomap_regions(dev, 1 << ATOM_ISP_PCI_BAR, pci_name(dev));
+ err = pcim_iomap_regions(pdev, 1 << ATOM_ISP_PCI_BAR, pci_name(pdev));
if (err) {
- dev_err(&dev->dev, "Failed to I/O memory remapping (%d)\n",
- err);
+ dev_err(&pdev->dev, "Failed to I/O memory remapping (%d)\n", err);
goto ioremap_fail;
}
- base = pcim_iomap_table(dev)[ATOM_ISP_PCI_BAR];
- dev_dbg(&dev->dev, "base: %p\n", base);
-
- atomisp_io_base = base;
-
- dev_dbg(&dev->dev, "atomisp_io_base: %p\n", atomisp_io_base);
-
- isp = devm_kzalloc(&dev->dev, sizeof(struct atomisp_device), GFP_KERNEL);
+ isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
if (!isp) {
err = -ENOMEM;
goto atomisp_dev_alloc_fail;
}
- isp->pdev = dev;
- isp->dev = &dev->dev;
+
+ isp->dev = &pdev->dev;
+ isp->base = pcim_iomap_table(pdev)[ATOM_ISP_PCI_BAR];
isp->sw_contex.power_state = ATOM_ISP_POWER_UP;
isp->saved_regs.ispmmadr = start;
+ dev_dbg(&pdev->dev, "atomisp mmio base: %p\n", isp->base);
+
rt_mutex_init(&isp->mutex);
mutex_init(&isp->streamoff_mutex);
spin_lock_init(&isp->lock);
/* This is not a true PCI device on SoC, so the delay is not needed. */
- isp->pdev->d3_delay = 0;
+ pdev->d3_delay = 0;
+
+ pci_set_drvdata(pdev, isp);
switch (id->device & ATOMISP_PCI_DEVICE_SOC_MASK) {
case ATOMISP_PCI_DEVICE_SOC_MRFLD:
@@ -1648,15 +1626,14 @@ static int atomisp_pci_probe(struct pci_dev *dev,
* have specs yet for exactly how it varies. Default to
* BYT-CR but let provisioning set it via EFI variable
*/
- isp->hpll_freq = gmin_get_var_int(&dev->dev, false, "HpllFreq",
- HPLL_FREQ_2000MHZ);
+ isp->hpll_freq = gmin_get_var_int(&pdev->dev, false, "HpllFreq", HPLL_FREQ_2000MHZ);
/*
* for BYT/CHT we are put isp into D3cold to avoid pci registers access
* in power off. Set d3cold_delay to 0 since default 100ms is not
* necessary.
*/
- isp->pdev->d3cold_delay = 0;
+ pdev->d3cold_delay = 0;
break;
case ATOMISP_PCI_DEVICE_SOC_ANN:
isp->media_dev.hw_revision = (
@@ -1666,7 +1643,7 @@ static int atomisp_pci_probe(struct pci_dev *dev,
ATOMISP_HW_REVISION_ISP2401_LEGACY
#endif
<< ATOMISP_HW_REVISION_SHIFT);
- isp->media_dev.hw_revision |= isp->pdev->revision < 2 ?
+ isp->media_dev.hw_revision |= pdev->revision < 2 ?
ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
isp->dfs = &dfs_config_merr;
isp->hpll_freq = HPLL_FREQ_1600MHZ;
@@ -1679,13 +1656,13 @@ static int atomisp_pci_probe(struct pci_dev *dev,
ATOMISP_HW_REVISION_ISP2401_LEGACY
#endif
<< ATOMISP_HW_REVISION_SHIFT);
- isp->media_dev.hw_revision |= isp->pdev->revision < 2 ?
+ isp->media_dev.hw_revision |= pdev->revision < 2 ?
ATOMISP_HW_STEPPING_A0 : ATOMISP_HW_STEPPING_B0;
isp->dfs = &dfs_config_cht;
- isp->pdev->d3cold_delay = 0;
+ pdev->d3cold_delay = 0;
- iosf_mbi_read(CCK_PORT, MBI_REG_READ, CCK_FUSE_REG_0, &val);
+ iosf_mbi_read(BT_MBI_UNIT_CCK, MBI_REG_READ, CCK_FUSE_REG_0, &val);
switch (val & CCK_FUSE_HPLL_FREQ_MASK) {
case 0x00:
isp->hpll_freq = HPLL_FREQ_800MHZ;
@@ -1698,18 +1675,16 @@ static int atomisp_pci_probe(struct pci_dev *dev,
break;
default:
isp->hpll_freq = HPLL_FREQ_1600MHZ;
- dev_warn(isp->dev,
- "read HPLL from cck failed. Default to 1600 MHz.\n");
+ dev_warn(&pdev->dev, "read HPLL from cck failed. Default to 1600 MHz.\n");
}
break;
default:
- dev_err(&dev->dev, "un-supported IUNIT device\n");
+ dev_err(&pdev->dev, "un-supported IUNIT device\n");
err = -ENODEV;
goto atomisp_dev_alloc_fail;
}
- dev_info(&dev->dev, "ISP HPLL frequency base = %d MHz\n",
- isp->hpll_freq);
+ dev_info(&pdev->dev, "ISP HPLL frequency base = %d MHz\n", isp->hpll_freq);
isp->max_isr_latency = ATOMISP_MAX_ISR_LATENCY;
@@ -1718,30 +1693,28 @@ static int atomisp_pci_probe(struct pci_dev *dev,
isp->firmware = atomisp_load_firmware(isp);
if (!isp->firmware) {
err = -ENOENT;
- dev_dbg(&dev->dev, "Firmware load failed\n");
+ dev_dbg(&pdev->dev, "Firmware load failed\n");
goto load_fw_fail;
}
- err = sh_css_check_firmware_version(isp->dev,
- isp->firmware->data);
+ err = sh_css_check_firmware_version(isp->dev, isp->firmware->data);
if (err) {
- dev_dbg(&dev->dev, "Firmware version check failed\n");
+ dev_dbg(&pdev->dev, "Firmware version check failed\n");
goto fw_validation_fail;
}
} else {
- dev_info(&dev->dev, "Firmware load will be deferred\n");
+ dev_info(&pdev->dev, "Firmware load will be deferred\n");
}
- pci_set_master(dev);
- pci_set_drvdata(dev, isp);
+ pci_set_master(pdev);
- err = pci_enable_msi(dev);
+ err = pci_enable_msi(pdev);
if (err) {
- dev_err(&dev->dev, "Failed to enable msi (%d)\n", err);
+ dev_err(&pdev->dev, "Failed to enable msi (%d)\n", err);
goto enable_msi_fail;
}
- atomisp_msi_irq_init(isp, dev);
+ atomisp_msi_irq_init(isp);
cpu_latency_qos_add_request(&isp->pm_qos, PM_QOS_DEFAULT_VALUE);
@@ -1762,8 +1735,7 @@ static int atomisp_pci_probe(struct pci_dev *dev,
* Workaround for imbalance data eye issue which is observed
* on TNG B0.
*/
- pci_read_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
- &csi_afe_trim);
+ pci_read_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, &csi_afe_trim);
csi_afe_trim &= ~((MRFLD_PCI_CSI_HSRXCLKTRIM_MASK <<
MRFLD_PCI_CSI1_HSRXCLKTRIM_SHIFT) |
(MRFLD_PCI_CSI_HSRXCLKTRIM_MASK <<
@@ -1776,20 +1748,18 @@ static int atomisp_pci_probe(struct pci_dev *dev,
MRFLD_PCI_CSI2_HSRXCLKTRIM_SHIFT) |
(MRFLD_PCI_CSI3_HSRXCLKTRIM <<
MRFLD_PCI_CSI3_HSRXCLKTRIM_SHIFT);
- pci_write_config_dword(dev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL,
- csi_afe_trim);
+ pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, csi_afe_trim);
}
err = atomisp_initialize_modules(isp);
if (err < 0) {
- dev_err(&dev->dev, "atomisp_initialize_modules (%d)\n", err);
+ dev_err(&pdev->dev, "atomisp_initialize_modules (%d)\n", err);
goto initialize_modules_fail;
}
err = atomisp_register_entities(isp);
if (err < 0) {
- dev_err(&dev->dev, "atomisp_register_entities failed (%d)\n",
- err);
+ dev_err(&pdev->dev, "atomisp_register_entities failed (%d)\n", err);
goto register_entities_fail;
}
err = atomisp_create_pads_links(isp);
@@ -1802,24 +1772,24 @@ static int atomisp_pci_probe(struct pci_dev *dev,
/* save the iunit context only once after all the values are init'ed. */
atomisp_save_iunit_reg(isp);
- pm_runtime_put_noidle(&dev->dev);
- pm_runtime_allow(&dev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
hmm_init_mem_stat(repool_pgnr, dypool_enable, dypool_pgnr);
err = hmm_pool_register(repool_pgnr, HMM_POOL_TYPE_RESERVED);
if (err) {
- dev_err(&dev->dev, "Failed to register reserved memory pool.\n");
+ dev_err(&pdev->dev, "Failed to register reserved memory pool.\n");
goto hmm_pool_fail;
}
/* Init ISP memory management */
hmm_init();
- err = devm_request_threaded_irq(&dev->dev, dev->irq,
+ err = devm_request_threaded_irq(&pdev->dev, pdev->irq,
atomisp_isr, atomisp_isr_thread,
IRQF_SHARED, "isp_irq", isp);
if (err) {
- dev_err(&dev->dev, "Failed to request irq (%d)\n", err);
+ dev_err(&pdev->dev, "Failed to request irq (%d)\n", err);
goto request_irq_fail;
}
@@ -1827,23 +1797,23 @@ static int atomisp_pci_probe(struct pci_dev *dev,
if (!defer_fw_load) {
err = atomisp_css_load_firmware(isp);
if (err) {
- dev_err(&dev->dev, "Failed to init css.\n");
+ dev_err(&pdev->dev, "Failed to init css.\n");
goto css_init_fail;
}
} else {
- dev_dbg(&dev->dev, "Skip css init.\n");
+ dev_dbg(&pdev->dev, "Skip css init.\n");
}
/* Clear FW image from memory */
release_firmware(isp->firmware);
isp->firmware = NULL;
isp->css_env.isp_css_fw.data = NULL;
- atomisp_drvfs_init(&dev->driver->driver, isp);
+ atomisp_drvfs_init(isp);
return 0;
css_init_fail:
- devm_free_irq(&dev->dev, dev->irq, isp);
+ devm_free_irq(&pdev->dev, pdev->irq, isp);
request_irq_fail:
hmm_cleanup();
hmm_pool_unregister(HMM_POOL_TYPE_RESERVED);
@@ -1856,8 +1826,8 @@ register_entities_fail:
atomisp_uninitialize_modules(isp);
initialize_modules_fail:
cpu_latency_qos_remove_request(&isp->pm_qos);
- atomisp_msi_irq_uninit(isp, dev);
- pci_disable_msi(dev);
+ atomisp_msi_irq_uninit(isp);
+ pci_disable_msi(pdev);
enable_msi_fail:
fw_validation_fail:
release_firmware(isp->firmware);
@@ -1869,35 +1839,34 @@ load_fw_fail:
* The following lines have been copied from atomisp suspend path
*/
- pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq = irq & 1 << INTR_IIR;
- pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
- pci_read_config_dword(dev, PCI_INTERRUPT_CTRL, &irq);
+ pci_read_config_dword(pdev, PCI_INTERRUPT_CTRL, &irq);
irq &= ~(1 << INTR_IER);
- pci_write_config_dword(dev, PCI_INTERRUPT_CTRL, irq);
+ pci_write_config_dword(pdev, PCI_INTERRUPT_CTRL, irq);
- atomisp_msi_irq_uninit(isp, dev);
+ atomisp_msi_irq_uninit(isp);
atomisp_ospm_dphy_down(isp);
/* Address later when we worry about the ...field chips */
if (IS_ENABLED(CONFIG_PM) && atomisp_mrfld_power_down(isp))
- dev_err(&dev->dev, "Failed to switch off ISP\n");
+ dev_err(&pdev->dev, "Failed to switch off ISP\n");
atomisp_dev_alloc_fail:
- pcim_iounmap_regions(dev, 1 << ATOM_ISP_PCI_BAR);
+ pcim_iounmap_regions(pdev, 1 << ATOM_ISP_PCI_BAR);
ioremap_fail:
return err;
}
-static void atomisp_pci_remove(struct pci_dev *dev)
+static void atomisp_pci_remove(struct pci_dev *pdev)
{
- struct atomisp_device *isp = (struct atomisp_device *)
- pci_get_drvdata(dev);
+ struct atomisp_device *isp = pci_get_drvdata(pdev);
- dev_info(&dev->dev, "Removing atomisp driver\n");
+ dev_info(&pdev->dev, "Removing atomisp driver\n");
atomisp_drvfs_exit();
@@ -1906,11 +1875,11 @@ static void atomisp_pci_remove(struct pci_dev *dev)
ia_css_unload_firmware();
hmm_cleanup();
- pm_runtime_forbid(&dev->dev);
- pm_runtime_get_noresume(&dev->dev);
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
cpu_latency_qos_remove_request(&isp->pm_qos);
- atomisp_msi_irq_uninit(isp, dev);
+ atomisp_msi_irq_uninit(isp);
atomisp_unregister_entities(isp);
destroy_workqueue(isp->wdt_work_queue);
diff --git a/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c b/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c
index cf02737cf8d4..a9c881631f4a 100644
--- a/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c
+++ b/drivers/staging/media/atomisp/pci/base/refcount/src/refcount.c
@@ -48,7 +48,7 @@ static struct ia_css_refcount_entry *refcount_find_entry(ia_css_ptr ptr,
return NULL;
if (!myrefcount.items) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
- "refcount_find_entry(): Ref count not initialized!\n");
+ "%s(): Ref count not initialized!\n", __func__);
return NULL;
}
@@ -73,12 +73,12 @@ int ia_css_refcount_init(uint32_t size)
if (size == 0) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_refcount_init(): Size of 0 for Ref count init!\n");
+ "%s(): Size of 0 for Ref count init!\n", __func__);
return -EINVAL;
}
if (myrefcount.items) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_refcount_init(): Ref count is already initialized\n");
+ "%s(): Ref count is already initialized\n", __func__);
return -EINVAL;
}
myrefcount.items =
@@ -99,7 +99,7 @@ void ia_css_refcount_uninit(void)
u32 i;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_refcount_uninit() entry\n");
+ "%s() entry\n", __func__);
for (i = 0; i < myrefcount.size; i++) {
/* driver verifier tool has issues with &arr[i]
and prefers arr + i; as these are actually equivalent
@@ -120,7 +120,7 @@ void ia_css_refcount_uninit(void)
myrefcount.items = NULL;
myrefcount.size = 0;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_refcount_uninit() leave\n");
+ "%s() leave\n", __func__);
}
ia_css_ptr ia_css_refcount_increment(s32 id, ia_css_ptr ptr)
@@ -133,7 +133,7 @@ ia_css_ptr ia_css_refcount_increment(s32 id, ia_css_ptr ptr)
entry = refcount_find_entry(ptr, false);
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_refcount_increment(%x) 0x%x\n", id, ptr);
+ "%s(%x) 0x%x\n", __func__, id, ptr);
if (!entry) {
entry = refcount_find_entry(ptr, true);
@@ -145,7 +145,7 @@ ia_css_ptr ia_css_refcount_increment(s32 id, ia_css_ptr ptr)
if (entry->id != id) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
- "ia_css_refcount_increment(): Ref count IDS do not match!\n");
+ "%s(): Ref count IDS do not match!\n", __func__);
return mmgr_NULL;
}
@@ -165,7 +165,7 @@ bool ia_css_refcount_decrement(s32 id, ia_css_ptr ptr)
struct ia_css_refcount_entry *entry;
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_refcount_decrement(%x) 0x%x\n", id, ptr);
+ "%s(%x) 0x%x\n", __func__, id, ptr);
if (ptr == mmgr_NULL)
return false;
@@ -175,7 +175,7 @@ bool ia_css_refcount_decrement(s32 id, ia_css_ptr ptr)
if (entry) {
if (entry->id != id) {
ia_css_debug_dtrace(IA_CSS_DEBUG_ERROR,
- "ia_css_refcount_decrement(): Ref count IDS do not match!\n");
+ "%s(): Ref count IDS do not match!\n", __func__);
return false;
}
if (entry->count > 0) {
@@ -225,8 +225,8 @@ void ia_css_refcount_clear(s32 id, clear_func clear_func_ptr)
u32 count = 0;
assert(clear_func_ptr);
- ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "ia_css_refcount_clear(%x)\n",
- id);
+ ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE, "%s(%x)\n",
+ __func__, id);
for (i = 0; i < myrefcount.size; i++) {
/* driver verifier tool has issues with &arr[i]
@@ -236,14 +236,14 @@ void ia_css_refcount_clear(s32 id, clear_func clear_func_ptr)
entry = myrefcount.items + i;
if ((entry->data != mmgr_NULL) && (entry->id == id)) {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_refcount_clear: %x: 0x%x\n",
+ "%s: %x: 0x%x\n", __func__,
id, entry->data);
if (clear_func_ptr) {
/* clear using provided function */
clear_func_ptr(entry->data);
} else {
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_refcount_clear: using hmm_free: no clear_func\n");
+ "%s: using hmm_free: no clear_func\n", __func__);
hmm_free(entry->data);
}
@@ -260,7 +260,7 @@ void ia_css_refcount_clear(s32 id, clear_func clear_func_ptr)
}
}
ia_css_debug_dtrace(IA_CSS_DEBUG_TRACE,
- "ia_css_refcount_clear(%x): cleared %d\n", id,
+ "%s(%x): cleared %d\n", __func__, id,
count);
}
diff --git a/drivers/staging/media/atomisp/pci/hive_types.h b/drivers/staging/media/atomisp/pci/hive_types.h
index addda9b81d7b..4b8a679fb672 100644
--- a/drivers/staging/media/atomisp/pci/hive_types.h
+++ b/drivers/staging/media/atomisp/pci/hive_types.h
@@ -52,32 +52,14 @@ typedef unsigned short hive_uint16;
typedef unsigned int hive_uint32;
typedef unsigned long long hive_uint64;
-/* by default assume 32 bit master port (both data and address) */
-#ifndef HRT_DATA_WIDTH
-#define HRT_DATA_WIDTH 32
-#endif
-#ifndef HRT_ADDRESS_WIDTH
-#define HRT_ADDRESS_WIDTH 32
-#endif
-
+#define HRT_DATA_WIDTH 32
+#define HRT_ADDRESS_WIDTH 64
#define HRT_DATA_BYTES (HRT_DATA_WIDTH / 8)
#define HRT_ADDRESS_BYTES (HRT_ADDRESS_WIDTH / 8)
+#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3)
-#if HRT_DATA_WIDTH == 64
-typedef hive_uint64 hrt_data;
-#elif HRT_DATA_WIDTH == 32
typedef hive_uint32 hrt_data;
-#else
-#error data width not supported
-#endif
-
-#if HRT_ADDRESS_WIDTH == 64
typedef hive_uint64 hrt_address;
-#elif HRT_ADDRESS_WIDTH == 32
-typedef hive_uint32 hrt_address;
-#else
-#error adddres width not supported
-#endif
/* use 64 bit addresses in simulation, where possible */
typedef hive_uint64 hive_sim_address;
diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm.c b/drivers/staging/media/atomisp/pci/hmm/hmm.c
index 42fef1779862..2bd39b4939f1 100644
--- a/drivers/staging/media/atomisp/pci/hmm/hmm.c
+++ b/drivers/staging/media/atomisp/pci/hmm/hmm.c
@@ -735,11 +735,11 @@ ia_css_ptr hmm_host_vaddr_to_hrt_vaddr(const void *ptr)
void hmm_show_mem_stat(const char *func, const int line)
{
- trace_printk("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n",
- hmm_mem_stat.tol_cnt,
- hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
- hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,
- hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size);
+ pr_info("tol_cnt=%d usr_size=%d res_size=%d res_cnt=%d sys_size=%d dyc_thr=%d dyc_size=%d.\n",
+ hmm_mem_stat.tol_cnt,
+ hmm_mem_stat.usr_size, hmm_mem_stat.res_size,
+ hmm_mem_stat.res_cnt, hmm_mem_stat.sys_size,
+ hmm_mem_stat.dyc_thr, hmm_mem_stat.dyc_size);
}
void hmm_init_mem_stat(int res_pgnr, int dyc_en, int dyc_pgnr)
diff --git a/drivers/staging/media/atomisp/pci/isp2400_system_global.h b/drivers/staging/media/atomisp/pci/isp2400_system_global.h
index d87ddf1d2fe9..74fff465e8e8 100644
--- a/drivers/staging/media/atomisp/pci/isp2400_system_global.h
+++ b/drivers/staging/media/atomisp/pci/isp2400_system_global.h
@@ -13,306 +13,4 @@
* more details.
*/
-#ifndef __SYSTEM_GLOBAL_H_INCLUDED__
-#define __SYSTEM_GLOBAL_H_INCLUDED__
-
-#include <hive_isp_css_defs.h>
-#include <type_support.h>
-
-/*
- * The longest allowed (uninteruptible) bus transfer, does not
- * take stalling into account
- */
-#define HIVE_ISP_MAX_BURST_LENGTH 1024
-
-/*
- * Maximum allowed burst length in words for the ISP DMA
- */
-#define ISP_DMA_MAX_BURST_LENGTH 128
-
-/*
- * Create a list of HAS and IS properties that defines the system
- *
- * The configuration assumes the following
- * - The system is hetereogeneous; Multiple cells and devices classes
- * - The cell and device instances are homogeneous, each device type
- * belongs to the same class
- * - Device instances supporting a subset of the class capabilities are
- * allowed
- *
- * We could manage different device classes through the enumerated
- * lists (C) or the use of classes (C++), but that is presently not
- * fully supported
- *
- * N.B. the 3 input formatters are of 2 different classess
- */
-
#define USE_INPUT_SYSTEM_VERSION_2
-
-#define HAS_MMU_VERSION_2
-#define HAS_DMA_VERSION_2
-#define HAS_GDC_VERSION_2
-#define HAS_VAMEM_VERSION_2
-#define HAS_HMEM_VERSION_1
-#define HAS_BAMEM_VERSION_2
-#define HAS_IRQ_VERSION_2
-#define HAS_IRQ_MAP_VERSION_2
-#define HAS_INPUT_FORMATTER_VERSION_2
-/* 2401: HAS_INPUT_SYSTEM_VERSION_2401 */
-#define HAS_INPUT_SYSTEM_VERSION_2
-#define HAS_BUFFERED_SENSOR
-#define HAS_FIFO_MONITORS_VERSION_2
-/* #define HAS_GP_REGS_VERSION_2 */
-#define HAS_GP_DEVICE_VERSION_2
-#define HAS_GPIO_VERSION_1
-#define HAS_TIMED_CTRL_VERSION_1
-#define HAS_RX_VERSION_2
-
-#define DMA_DDR_TO_VAMEM_WORKAROUND
-#define DMA_DDR_TO_HMEM_WORKAROUND
-
-/*
- * Semi global. "HRT" is accessible from SP, but the HRT types do not fully apply
- */
-#define HRT_VADDRESS_WIDTH 32
-//#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property*/
-#define HRT_DATA_WIDTH 32
-
-#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3)
-#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8)
-
-/* The main bus connecting all devices */
-#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH
-#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES
-
-/* per-frame parameter handling support */
-#define SH_CSS_ENABLE_PER_FRAME_PARAMS
-
-typedef u32 hrt_bus_align_t;
-
-/*
- * Enumerate the devices, device access through the API is by ID, through the DLI by address
- * The enumerator terminators are used to size the wiring arrays and as an exception value.
- */
-typedef enum {
- DDR0_ID = 0,
- N_DDR_ID
-} ddr_ID_t;
-
-typedef enum {
- ISP0_ID = 0,
- N_ISP_ID
-} isp_ID_t;
-
-typedef enum {
- SP0_ID = 0,
- N_SP_ID
-} sp_ID_t;
-
-typedef enum {
- MMU0_ID = 0,
- MMU1_ID,
- N_MMU_ID
-} mmu_ID_t;
-
-typedef enum {
- DMA0_ID = 0,
- N_DMA_ID
-} dma_ID_t;
-
-typedef enum {
- GDC0_ID = 0,
- GDC1_ID,
- N_GDC_ID
-} gdc_ID_t;
-
-#define N_GDC_ID_CPP 2 // this extra define is needed because we want to use it also in the preprocessor, and that doesn't work with enums.
-
-typedef enum {
- VAMEM0_ID = 0,
- VAMEM1_ID,
- VAMEM2_ID,
- N_VAMEM_ID
-} vamem_ID_t;
-
-typedef enum {
- BAMEM0_ID = 0,
- N_BAMEM_ID
-} bamem_ID_t;
-
-typedef enum {
- HMEM0_ID = 0,
- N_HMEM_ID
-} hmem_ID_t;
-
-/*
-typedef enum {
- IRQ0_ID = 0,
- N_IRQ_ID
-} irq_ID_t;
-*/
-
-typedef enum {
- IRQ0_ID = 0, // GP IRQ block
- IRQ1_ID, // Input formatter
- IRQ2_ID, // input system
- IRQ3_ID, // input selector
- N_IRQ_ID
-} irq_ID_t;
-
-typedef enum {
- FIFO_MONITOR0_ID = 0,
- N_FIFO_MONITOR_ID
-} fifo_monitor_ID_t;
-
-/*
- * Deprecated: Since all gp_reg instances are different
- * and put in the address maps of other devices we cannot
- * enumerate them as that assumes the instrances are the
- * same.
- *
- * We define a single GP_DEVICE containing all gp_regs
- * w.r.t. a single base address
- *
-typedef enum {
- GP_REGS0_ID = 0,
- N_GP_REGS_ID
-} gp_regs_ID_t;
- */
-typedef enum {
- GP_DEVICE0_ID = 0,
- N_GP_DEVICE_ID
-} gp_device_ID_t;
-
-typedef enum {
- GP_TIMER0_ID = 0,
- GP_TIMER1_ID,
- GP_TIMER2_ID,
- GP_TIMER3_ID,
- GP_TIMER4_ID,
- GP_TIMER5_ID,
- GP_TIMER6_ID,
- GP_TIMER7_ID,
- N_GP_TIMER_ID
-} gp_timer_ID_t;
-
-typedef enum {
- GPIO0_ID = 0,
- N_GPIO_ID
-} gpio_ID_t;
-
-typedef enum {
- TIMED_CTRL0_ID = 0,
- N_TIMED_CTRL_ID
-} timed_ctrl_ID_t;
-
-typedef enum {
- INPUT_FORMATTER0_ID = 0,
- INPUT_FORMATTER1_ID,
- INPUT_FORMATTER2_ID,
- INPUT_FORMATTER3_ID,
- N_INPUT_FORMATTER_ID
-} input_formatter_ID_t;
-
-/* The IF RST is outside the IF */
-#define INPUT_FORMATTER0_SRST_OFFSET 0x0824
-#define INPUT_FORMATTER1_SRST_OFFSET 0x0624
-#define INPUT_FORMATTER2_SRST_OFFSET 0x0424
-#define INPUT_FORMATTER3_SRST_OFFSET 0x0224
-
-#define INPUT_FORMATTER0_SRST_MASK 0x0001
-#define INPUT_FORMATTER1_SRST_MASK 0x0002
-#define INPUT_FORMATTER2_SRST_MASK 0x0004
-#define INPUT_FORMATTER3_SRST_MASK 0x0008
-
-typedef enum {
- INPUT_SYSTEM0_ID = 0,
- N_INPUT_SYSTEM_ID
-} input_system_ID_t;
-
-typedef enum {
- RX0_ID = 0,
- N_RX_ID
-} rx_ID_t;
-
-enum mipi_port_id {
- MIPI_PORT0_ID = 0,
- MIPI_PORT1_ID,
- MIPI_PORT2_ID,
- N_MIPI_PORT_ID
-};
-
-#define N_RX_CHANNEL_ID 4
-
-/* Generic port enumeration with an internal port type ID */
-typedef enum {
- CSI_PORT0_ID = 0,
- CSI_PORT1_ID,
- CSI_PORT2_ID,
- TPG_PORT0_ID,
- PRBS_PORT0_ID,
- FIFO_PORT0_ID,
- MEMORY_PORT0_ID,
- N_INPUT_PORT_ID
-} input_port_ID_t;
-
-typedef enum {
- CAPTURE_UNIT0_ID = 0,
- CAPTURE_UNIT1_ID,
- CAPTURE_UNIT2_ID,
- ACQUISITION_UNIT0_ID,
- DMA_UNIT0_ID,
- CTRL_UNIT0_ID,
- GPREGS_UNIT0_ID,
- FIFO_UNIT0_ID,
- IRQ_UNIT0_ID,
- N_SUB_SYSTEM_ID
-} sub_system_ID_t;
-
-#define N_CAPTURE_UNIT_ID 3
-#define N_ACQUISITION_UNIT_ID 1
-#define N_CTRL_UNIT_ID 1
-
-enum ia_css_isp_memories {
- IA_CSS_ISP_PMEM0 = 0,
- IA_CSS_ISP_DMEM0,
- IA_CSS_ISP_VMEM0,
- IA_CSS_ISP_VAMEM0,
- IA_CSS_ISP_VAMEM1,
- IA_CSS_ISP_VAMEM2,
- IA_CSS_ISP_HMEM0,
- IA_CSS_SP_DMEM0,
- IA_CSS_DDR,
- N_IA_CSS_MEMORIES
-};
-
-#define IA_CSS_NUM_MEMORIES 9
-/* For driver compatibility */
-#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES
-#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
-
-#if 0
-typedef enum {
- dev_chn, /* device channels, external resource */
- ext_mem, /* external memories */
- int_mem, /* internal memories */
- int_chn /* internal channels, user defined */
-} resource_type_t;
-
-/* if this enum is extended with other memory resources, pls also extend the function resource_to_memptr() */
-typedef enum {
- vied_nci_dev_chn_dma_ext0,
- int_mem_vmem0,
- int_mem_dmem0
-} resource_id_t;
-
-/* enum listing the different memories within a program group.
- This enum is used in the mem_ptr_t type */
-typedef enum {
- buf_mem_invalid = 0,
- buf_mem_vmem_prog0,
- buf_mem_dmem_prog0
-} buf_mem_t;
-
-#endif
-#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2400_system_local.h b/drivers/staging/media/atomisp/pci/isp2400_system_local.h
deleted file mode 100644
index 675b8e5bdcc1..000000000000
--- a/drivers/staging/media/atomisp/pci/isp2400_system_local.h
+++ /dev/null
@@ -1,321 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2010-2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __SYSTEM_LOCAL_H_INCLUDED__
-#define __SYSTEM_LOCAL_H_INCLUDED__
-
-#ifdef HRT_ISP_CSS_CUSTOM_HOST
-#ifndef HRT_USE_VIR_ADDRS
-#define HRT_USE_VIR_ADDRS
-#endif
-#endif
-
-#include "system_global.h"
-
-/* HRT assumes 32 by default (see Linux/include/hive_types.h), overrule it in case it is different */
-#undef HRT_ADDRESS_WIDTH
-#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */
-
-/* This interface is deprecated */
-#include "hive_types.h"
-
-/*
- * Cell specific address maps
- */
-#if HRT_ADDRESS_WIDTH == 64
-
-#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */
-
-/* DDR */
-static const hrt_address DDR_BASE[N_DDR_ID] = {
- (hrt_address)0x0000000120000000ULL
-};
-
-/* ISP */
-static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
- (hrt_address)0x0000000000020000ULL
-};
-
-static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
- (hrt_address)0x0000000000200000ULL
-};
-
-static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
- (hrt_address)0x0000000000100000ULL
-};
-
-static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
- (hrt_address)0x00000000001C0000ULL,
- (hrt_address)0x00000000001D0000ULL,
- (hrt_address)0x00000000001E0000ULL
-};
-
-static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
- (hrt_address)0x00000000001F0000ULL
-};
-
-/* SP */
-static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
- (hrt_address)0x0000000000010000ULL
-};
-
-static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
- (hrt_address)0x0000000000300000ULL
-};
-
-static const hrt_address SP_PMEM_BASE[N_SP_ID] = {
- (hrt_address)0x00000000000B0000ULL
-};
-
-/* MMU */
-/*
- * MMU0_ID: The data MMU
- * MMU1_ID: The icache MMU
- */
-static const hrt_address MMU_BASE[N_MMU_ID] = {
- (hrt_address)0x0000000000070000ULL,
- (hrt_address)0x00000000000A0000ULL
-};
-
-/* DMA */
-static const hrt_address DMA_BASE[N_DMA_ID] = {
- (hrt_address)0x0000000000040000ULL
-};
-
-/* IRQ */
-static const hrt_address IRQ_BASE[N_IRQ_ID] = {
- (hrt_address)0x0000000000000500ULL,
- (hrt_address)0x0000000000030A00ULL,
- (hrt_address)0x000000000008C000ULL,
- (hrt_address)0x0000000000090200ULL
-};
-
-/*
- (hrt_address)0x0000000000000500ULL};
- */
-
-/* GDC */
-static const hrt_address GDC_BASE[N_GDC_ID] = {
- (hrt_address)0x0000000000050000ULL,
- (hrt_address)0x0000000000060000ULL
-};
-
-/* FIFO_MONITOR (not a subset of GP_DEVICE) */
-static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
- (hrt_address)0x0000000000000000ULL
-};
-
-/*
-static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
- (hrt_address)0x0000000000000000ULL};
-
-static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
- (hrt_address)0x0000000000090000ULL};
-*/
-
-/* GP_DEVICE (single base for all separate GP_REG instances) */
-static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
- (hrt_address)0x0000000000000000ULL
-};
-
-/*GP TIMER , all timer registers are inter-twined,
- * so, having multiple base addresses for
- * different timers does not help*/
-static const hrt_address GP_TIMER_BASE =
- (hrt_address)0x0000000000000600ULL;
-/* GPIO */
-static const hrt_address GPIO_BASE[N_GPIO_ID] = {
- (hrt_address)0x0000000000000400ULL
-};
-
-/* TIMED_CTRL */
-static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
- (hrt_address)0x0000000000000100ULL
-};
-
-/* INPUT_FORMATTER */
-static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
- (hrt_address)0x0000000000030000ULL,
- (hrt_address)0x0000000000030200ULL,
- (hrt_address)0x0000000000030400ULL,
- (hrt_address)0x0000000000030600ULL
-}; /* memcpy() */
-
-/* INPUT_SYSTEM */
-static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
- (hrt_address)0x0000000000080000ULL
-};
-
-/* (hrt_address)0x0000000000081000ULL, */ /* capture A */
-/* (hrt_address)0x0000000000082000ULL, */ /* capture B */
-/* (hrt_address)0x0000000000083000ULL, */ /* capture C */
-/* (hrt_address)0x0000000000084000ULL, */ /* Acquisition */
-/* (hrt_address)0x0000000000085000ULL, */ /* DMA */
-/* (hrt_address)0x0000000000089000ULL, */ /* ctrl */
-/* (hrt_address)0x000000000008A000ULL, */ /* GP regs */
-/* (hrt_address)0x000000000008B000ULL, */ /* FIFO */
-/* (hrt_address)0x000000000008C000ULL, */ /* IRQ */
-
-/* RX, the MIPI lane control regs start at offset 0 */
-static const hrt_address RX_BASE[N_RX_ID] = {
- (hrt_address)0x0000000000080100ULL
-};
-
-#elif HRT_ADDRESS_WIDTH == 32
-
-#define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */
-
-/* DDR : Attention, this value not defined in 32-bit */
-static const hrt_address DDR_BASE[N_DDR_ID] = {
- (hrt_address)0x00000000UL
-};
-
-/* ISP */
-static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
- (hrt_address)0x00020000UL
-};
-
-static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
- (hrt_address)0x00200000UL
-};
-
-static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
- (hrt_address)0x100000UL
-};
-
-static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
- (hrt_address)0xffffffffUL,
- (hrt_address)0xffffffffUL,
- (hrt_address)0xffffffffUL
-};
-
-static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
- (hrt_address)0xffffffffUL
-};
-
-/* SP */
-static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
- (hrt_address)0x00010000UL
-};
-
-static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
- (hrt_address)0x00300000UL
-};
-
-static const hrt_address SP_PMEM_BASE[N_SP_ID] = {
- (hrt_address)0x000B0000UL
-};
-
-/* MMU */
-/*
- * MMU0_ID: The data MMU
- * MMU1_ID: The icache MMU
- */
-static const hrt_address MMU_BASE[N_MMU_ID] = {
- (hrt_address)0x00070000UL,
- (hrt_address)0x000A0000UL
-};
-
-/* DMA */
-static const hrt_address DMA_BASE[N_DMA_ID] = {
- (hrt_address)0x00040000UL
-};
-
-/* IRQ */
-static const hrt_address IRQ_BASE[N_IRQ_ID] = {
- (hrt_address)0x00000500UL,
- (hrt_address)0x00030A00UL,
- (hrt_address)0x0008C000UL,
- (hrt_address)0x00090200UL
-};
-
-/*
- (hrt_address)0x00000500UL};
- */
-
-/* GDC */
-static const hrt_address GDC_BASE[N_GDC_ID] = {
- (hrt_address)0x00050000UL,
- (hrt_address)0x00060000UL
-};
-
-/* FIFO_MONITOR (not a subset of GP_DEVICE) */
-static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
- (hrt_address)0x00000000UL
-};
-
-/*
-static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
- (hrt_address)0x00000000UL};
-
-static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
- (hrt_address)0x00090000UL};
-*/
-
-/* GP_DEVICE (single base for all separate GP_REG instances) */
-static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
- (hrt_address)0x00000000UL
-};
-
-/*GP TIMER , all timer registers are inter-twined,
- * so, having multiple base addresses for
- * different timers does not help*/
-static const hrt_address GP_TIMER_BASE =
- (hrt_address)0x00000600UL;
-
-/* GPIO */
-static const hrt_address GPIO_BASE[N_GPIO_ID] = {
- (hrt_address)0x00000400UL
-};
-
-/* TIMED_CTRL */
-static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
- (hrt_address)0x00000100UL
-};
-
-/* INPUT_FORMATTER */
-static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
- (hrt_address)0x00030000UL,
- (hrt_address)0x00030200UL,
- (hrt_address)0x00030400UL
-};
-
-/* (hrt_address)0x00030600UL, */ /* memcpy() */
-
-/* INPUT_SYSTEM */
-static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
- (hrt_address)0x00080000UL
-};
-
-/* (hrt_address)0x00081000UL, */ /* capture A */
-/* (hrt_address)0x00082000UL, */ /* capture B */
-/* (hrt_address)0x00083000UL, */ /* capture C */
-/* (hrt_address)0x00084000UL, */ /* Acquisition */
-/* (hrt_address)0x00085000UL, */ /* DMA */
-/* (hrt_address)0x00089000UL, */ /* ctrl */
-/* (hrt_address)0x0008A000UL, */ /* GP regs */
-/* (hrt_address)0x0008B000UL, */ /* FIFO */
-/* (hrt_address)0x0008C000UL, */ /* IRQ */
-
-/* RX, the MIPI lane control regs start at offset 0 */
-static const hrt_address RX_BASE[N_RX_ID] = {
- (hrt_address)0x00080100UL
-};
-
-#else
-#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}"
-#endif
-
-#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2401_system_global.h b/drivers/staging/media/atomisp/pci/isp2401_system_global.h
index 8bb2a956f983..27cd2535bab8 100644
--- a/drivers/staging/media/atomisp/pci/isp2401_system_global.h
+++ b/drivers/staging/media/atomisp/pci/isp2401_system_global.h
@@ -13,415 +13,7 @@
* more details.
*/
-#ifndef __SYSTEM_GLOBAL_H_INCLUDED__
-#define __SYSTEM_GLOBAL_H_INCLUDED__
-
-#include <hive_isp_css_defs.h>
-#include <type_support.h>
-
-/*
- * The longest allowed (uninteruptible) bus transfer, does not
- * take stalling into account
- */
-#define HIVE_ISP_MAX_BURST_LENGTH 1024
-
-/*
- * Maximum allowed burst length in words for the ISP DMA
- * This value is set to 2 to prevent the ISP DMA from blocking
- * the bus for too long; as the input system can only buffer
- * 2 lines on Moorefield and Cherrytrail, the input system buffers
- * may overflow if blocked for too long (BZ 2726).
- */
-#define ISP_DMA_MAX_BURST_LENGTH 2
-
-/*
- * Create a list of HAS and IS properties that defines the system
- *
- * The configuration assumes the following
- * - The system is hetereogeneous; Multiple cells and devices classes
- * - The cell and device instances are homogeneous, each device type
- * belongs to the same class
- * - Device instances supporting a subset of the class capabilities are
- * allowed
- *
- * We could manage different device classes through the enumerated
- * lists (C) or the use of classes (C++), but that is presently not
- * fully supported
- *
- * N.B. the 3 input formatters are of 2 different classess
- */
-
+#define HAS_NO_INPUT_FORMATTER
#define USE_INPUT_SYSTEM_VERSION_2401
-
-#define HAS_MMU_VERSION_2
-#define HAS_DMA_VERSION_2
-#define HAS_GDC_VERSION_2
-#define HAS_VAMEM_VERSION_2
-#define HAS_HMEM_VERSION_1
-#define HAS_BAMEM_VERSION_2
-#define HAS_IRQ_VERSION_2
-#define HAS_IRQ_MAP_VERSION_2
-#define HAS_INPUT_FORMATTER_VERSION_2
-/* 2401: HAS_INPUT_SYSTEM_VERSION_3 */
-/* 2400: HAS_INPUT_SYSTEM_VERSION_2 */
-#define HAS_INPUT_SYSTEM_VERSION_2
#define HAS_INPUT_SYSTEM_VERSION_2401
-#define HAS_BUFFERED_SENSOR
-#define HAS_FIFO_MONITORS_VERSION_2
-/* #define HAS_GP_REGS_VERSION_2 */
-#define HAS_GP_DEVICE_VERSION_2
-#define HAS_GPIO_VERSION_1
-#define HAS_TIMED_CTRL_VERSION_1
-#define HAS_RX_VERSION_2
-#define HAS_NO_INPUT_FORMATTER
-/*#define HAS_NO_PACKED_RAW_PIXELS*/
-/*#define HAS_NO_DVS_6AXIS_CONFIG_UPDATE*/
-
-#define DMA_DDR_TO_VAMEM_WORKAROUND
-#define DMA_DDR_TO_HMEM_WORKAROUND
-
-/*
- * Semi global. "HRT" is accessible from SP, but
- * the HRT types do not fully apply
- */
-#define HRT_VADDRESS_WIDTH 32
-/* Surprise, this is a local property*/
-/*#define HRT_ADDRESS_WIDTH 64 */
-#define HRT_DATA_WIDTH 32
-
-#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3)
-#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8)
-
-/* The main bus connecting all devices */
-#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH
-#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES
-
#define CSI2P_DISABLE_ISYS2401_ONLINE_MODE
-
-/* per-frame parameter handling support */
-#define SH_CSS_ENABLE_PER_FRAME_PARAMS
-
-typedef u32 hrt_bus_align_t;
-
-/*
- * Enumerate the devices, device access through the API is by ID,
- * through the DLI by address. The enumerator terminators are used
- * to size the wiring arrays and as an exception value.
- */
-typedef enum {
- DDR0_ID = 0,
- N_DDR_ID
-} ddr_ID_t;
-
-typedef enum {
- ISP0_ID = 0,
- N_ISP_ID
-} isp_ID_t;
-
-typedef enum {
- SP0_ID = 0,
- N_SP_ID
-} sp_ID_t;
-
-typedef enum {
- MMU0_ID = 0,
- MMU1_ID,
- N_MMU_ID
-} mmu_ID_t;
-
-typedef enum {
- DMA0_ID = 0,
- N_DMA_ID
-} dma_ID_t;
-
-typedef enum {
- GDC0_ID = 0,
- GDC1_ID,
- N_GDC_ID
-} gdc_ID_t;
-
-/* this extra define is needed because we want to use it also
- in the preprocessor, and that doesn't work with enums.
- */
-#define N_GDC_ID_CPP 2
-
-typedef enum {
- VAMEM0_ID = 0,
- VAMEM1_ID,
- VAMEM2_ID,
- N_VAMEM_ID
-} vamem_ID_t;
-
-typedef enum {
- BAMEM0_ID = 0,
- N_BAMEM_ID
-} bamem_ID_t;
-
-typedef enum {
- HMEM0_ID = 0,
- N_HMEM_ID
-} hmem_ID_t;
-
-typedef enum {
- ISYS_IRQ0_ID = 0, /* port a */
- ISYS_IRQ1_ID, /* port b */
- ISYS_IRQ2_ID, /* port c */
- N_ISYS_IRQ_ID
-} isys_irq_ID_t;
-
-typedef enum {
- IRQ0_ID = 0, /* GP IRQ block */
- IRQ1_ID, /* Input formatter */
- IRQ2_ID, /* input system */
- IRQ3_ID, /* input selector */
- N_IRQ_ID
-} irq_ID_t;
-
-typedef enum {
- FIFO_MONITOR0_ID = 0,
- N_FIFO_MONITOR_ID
-} fifo_monitor_ID_t;
-
-/*
- * Deprecated: Since all gp_reg instances are different
- * and put in the address maps of other devices we cannot
- * enumerate them as that assumes the instrances are the
- * same.
- *
- * We define a single GP_DEVICE containing all gp_regs
- * w.r.t. a single base address
- *
-typedef enum {
- GP_REGS0_ID = 0,
- N_GP_REGS_ID
-} gp_regs_ID_t;
- */
-typedef enum {
- GP_DEVICE0_ID = 0,
- N_GP_DEVICE_ID
-} gp_device_ID_t;
-
-typedef enum {
- GP_TIMER0_ID = 0,
- GP_TIMER1_ID,
- GP_TIMER2_ID,
- GP_TIMER3_ID,
- GP_TIMER4_ID,
- GP_TIMER5_ID,
- GP_TIMER6_ID,
- GP_TIMER7_ID,
- N_GP_TIMER_ID
-} gp_timer_ID_t;
-
-typedef enum {
- GPIO0_ID = 0,
- N_GPIO_ID
-} gpio_ID_t;
-
-typedef enum {
- TIMED_CTRL0_ID = 0,
- N_TIMED_CTRL_ID
-} timed_ctrl_ID_t;
-
-typedef enum {
- INPUT_FORMATTER0_ID = 0,
- INPUT_FORMATTER1_ID,
- INPUT_FORMATTER2_ID,
- INPUT_FORMATTER3_ID,
- N_INPUT_FORMATTER_ID
-} input_formatter_ID_t;
-
-/* The IF RST is outside the IF */
-#define INPUT_FORMATTER0_SRST_OFFSET 0x0824
-#define INPUT_FORMATTER1_SRST_OFFSET 0x0624
-#define INPUT_FORMATTER2_SRST_OFFSET 0x0424
-#define INPUT_FORMATTER3_SRST_OFFSET 0x0224
-
-#define INPUT_FORMATTER0_SRST_MASK 0x0001
-#define INPUT_FORMATTER1_SRST_MASK 0x0002
-#define INPUT_FORMATTER2_SRST_MASK 0x0004
-#define INPUT_FORMATTER3_SRST_MASK 0x0008
-
-typedef enum {
- INPUT_SYSTEM0_ID = 0,
- N_INPUT_SYSTEM_ID
-} input_system_ID_t;
-
-typedef enum {
- RX0_ID = 0,
- N_RX_ID
-} rx_ID_t;
-
-enum mipi_port_id {
- MIPI_PORT0_ID = 0,
- MIPI_PORT1_ID,
- MIPI_PORT2_ID,
- N_MIPI_PORT_ID
-};
-
-#define N_RX_CHANNEL_ID 4
-
-/* Generic port enumeration with an internal port type ID */
-typedef enum {
- CSI_PORT0_ID = 0,
- CSI_PORT1_ID,
- CSI_PORT2_ID,
- TPG_PORT0_ID,
- PRBS_PORT0_ID,
- FIFO_PORT0_ID,
- MEMORY_PORT0_ID,
- N_INPUT_PORT_ID
-} input_port_ID_t;
-
-typedef enum {
- CAPTURE_UNIT0_ID = 0,
- CAPTURE_UNIT1_ID,
- CAPTURE_UNIT2_ID,
- ACQUISITION_UNIT0_ID,
- DMA_UNIT0_ID,
- CTRL_UNIT0_ID,
- GPREGS_UNIT0_ID,
- FIFO_UNIT0_ID,
- IRQ_UNIT0_ID,
- N_SUB_SYSTEM_ID
-} sub_system_ID_t;
-
-#define N_CAPTURE_UNIT_ID 3
-#define N_ACQUISITION_UNIT_ID 1
-#define N_CTRL_UNIT_ID 1
-
-/*
- * Input-buffer Controller.
- */
-typedef enum {
- IBUF_CTRL0_ID = 0, /* map to ISYS2401_IBUF_CNTRL_A */
- IBUF_CTRL1_ID, /* map to ISYS2401_IBUF_CNTRL_B */
- IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */
- N_IBUF_CTRL_ID
-} ibuf_ctrl_ID_t;
-/* end of Input-buffer Controller */
-
-/*
- * Stream2MMIO.
- */
-typedef enum {
- STREAM2MMIO0_ID = 0, /* map to ISYS2401_S2M_A */
- STREAM2MMIO1_ID, /* map to ISYS2401_S2M_B */
- STREAM2MMIO2_ID, /* map to ISYS2401_S2M_C */
- N_STREAM2MMIO_ID
-} stream2mmio_ID_t;
-
-typedef enum {
- /*
- * Stream2MMIO 0 has 8 SIDs that are indexed by
- * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID].
- *
- * Stream2MMIO 1 has 4 SIDs that are indexed by
- * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID].
- *
- * Stream2MMIO 2 has 4 SIDs that are indexed by
- * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID].
- */
- STREAM2MMIO_SID0_ID = 0,
- STREAM2MMIO_SID1_ID,
- STREAM2MMIO_SID2_ID,
- STREAM2MMIO_SID3_ID,
- STREAM2MMIO_SID4_ID,
- STREAM2MMIO_SID5_ID,
- STREAM2MMIO_SID6_ID,
- STREAM2MMIO_SID7_ID,
- N_STREAM2MMIO_SID_ID
-} stream2mmio_sid_ID_t;
-/* end of Stream2MMIO */
-
-/**
- * Input System 2401: CSI-MIPI recevier.
- */
-typedef enum {
- CSI_RX_BACKEND0_ID = 0, /* map to ISYS2401_MIPI_BE_A */
- CSI_RX_BACKEND1_ID, /* map to ISYS2401_MIPI_BE_B */
- CSI_RX_BACKEND2_ID, /* map to ISYS2401_MIPI_BE_C */
- N_CSI_RX_BACKEND_ID
-} csi_rx_backend_ID_t;
-
-typedef enum {
- CSI_RX_FRONTEND0_ID = 0, /* map to ISYS2401_CSI_RX_A */
- CSI_RX_FRONTEND1_ID, /* map to ISYS2401_CSI_RX_B */
- CSI_RX_FRONTEND2_ID, /* map to ISYS2401_CSI_RX_C */
-#define N_CSI_RX_FRONTEND_ID (CSI_RX_FRONTEND2_ID + 1)
-} csi_rx_frontend_ID_t;
-
-typedef enum {
- CSI_RX_DLANE0_ID = 0, /* map to DLANE0 in CSI RX */
- CSI_RX_DLANE1_ID, /* map to DLANE1 in CSI RX */
- CSI_RX_DLANE2_ID, /* map to DLANE2 in CSI RX */
- CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */
- N_CSI_RX_DLANE_ID
-} csi_rx_fe_dlane_ID_t;
-/* end of CSI-MIPI receiver */
-
-typedef enum {
- ISYS2401_DMA0_ID = 0,
- N_ISYS2401_DMA_ID
-} isys2401_dma_ID_t;
-
-/**
- * Pixel-generator. ("system_global.h")
- */
-typedef enum {
- PIXELGEN0_ID = 0,
- PIXELGEN1_ID,
- PIXELGEN2_ID,
- N_PIXELGEN_ID
-} pixelgen_ID_t;
-/* end of pixel-generator. ("system_global.h") */
-
-typedef enum {
- INPUT_SYSTEM_CSI_PORT0_ID = 0,
- INPUT_SYSTEM_CSI_PORT1_ID,
- INPUT_SYSTEM_CSI_PORT2_ID,
-
- INPUT_SYSTEM_PIXELGEN_PORT0_ID,
- INPUT_SYSTEM_PIXELGEN_PORT1_ID,
- INPUT_SYSTEM_PIXELGEN_PORT2_ID,
-
- N_INPUT_SYSTEM_INPUT_PORT_ID
-} input_system_input_port_ID_t;
-
-#define N_INPUT_SYSTEM_CSI_PORT 3
-
-typedef enum {
- ISYS2401_DMA_CHANNEL_0 = 0,
- ISYS2401_DMA_CHANNEL_1,
- ISYS2401_DMA_CHANNEL_2,
- ISYS2401_DMA_CHANNEL_3,
- ISYS2401_DMA_CHANNEL_4,
- ISYS2401_DMA_CHANNEL_5,
- ISYS2401_DMA_CHANNEL_6,
- ISYS2401_DMA_CHANNEL_7,
- ISYS2401_DMA_CHANNEL_8,
- ISYS2401_DMA_CHANNEL_9,
- ISYS2401_DMA_CHANNEL_10,
- ISYS2401_DMA_CHANNEL_11,
- N_ISYS2401_DMA_CHANNEL
-} isys2401_dma_channel;
-
-enum ia_css_isp_memories {
- IA_CSS_ISP_PMEM0 = 0,
- IA_CSS_ISP_DMEM0,
- IA_CSS_ISP_VMEM0,
- IA_CSS_ISP_VAMEM0,
- IA_CSS_ISP_VAMEM1,
- IA_CSS_ISP_VAMEM2,
- IA_CSS_ISP_HMEM0,
- IA_CSS_SP_DMEM0,
- IA_CSS_DDR,
- N_IA_CSS_MEMORIES
-};
-
-#define IA_CSS_NUM_MEMORIES 9
-/* For driver compatibility */
-#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES
-#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
-
-#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/isp2401_system_local.h b/drivers/staging/media/atomisp/pci/isp2401_system_local.h
deleted file mode 100644
index b09f8faadb13..000000000000
--- a/drivers/staging/media/atomisp/pci/isp2401_system_local.h
+++ /dev/null
@@ -1,402 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Support for Intel Camera Imaging ISP subsystem.
- * Copyright (c) 2015, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- */
-
-#ifndef __SYSTEM_LOCAL_H_INCLUDED__
-#define __SYSTEM_LOCAL_H_INCLUDED__
-
-#ifdef HRT_ISP_CSS_CUSTOM_HOST
-#ifndef HRT_USE_VIR_ADDRS
-#define HRT_USE_VIR_ADDRS
-#endif
-#endif
-
-#include "system_global.h"
-
-#define HRT_ADDRESS_WIDTH 64 /* Surprise, this is a local property */
-
-/* This interface is deprecated */
-#include "hive_types.h"
-
-/*
- * Cell specific address maps
- */
-#if HRT_ADDRESS_WIDTH == 64
-
-#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */
-
-/* DDR */
-static const hrt_address DDR_BASE[N_DDR_ID] = {
- 0x0000000120000000ULL
-};
-
-/* ISP */
-static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
- 0x0000000000020000ULL
-};
-
-static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
- 0x0000000000200000ULL
-};
-
-static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
- 0x0000000000100000ULL
-};
-
-static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
- 0x00000000001C0000ULL,
- 0x00000000001D0000ULL,
- 0x00000000001E0000ULL
-};
-
-static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
- 0x00000000001F0000ULL
-};
-
-/* SP */
-static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
- 0x0000000000010000ULL
-};
-
-static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
- 0x0000000000300000ULL
-};
-
-/* MMU */
-/*
- * MMU0_ID: The data MMU
- * MMU1_ID: The icache MMU
- */
-static const hrt_address MMU_BASE[N_MMU_ID] = {
- 0x0000000000070000ULL,
- 0x00000000000A0000ULL
-};
-
-/* DMA */
-static const hrt_address DMA_BASE[N_DMA_ID] = {
- 0x0000000000040000ULL
-};
-
-static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = {
- 0x00000000000CA000ULL
-};
-
-/* IRQ */
-static const hrt_address IRQ_BASE[N_IRQ_ID] = {
- 0x0000000000000500ULL,
- 0x0000000000030A00ULL,
- 0x000000000008C000ULL,
- 0x0000000000090200ULL
-};
-
-/*
- 0x0000000000000500ULL};
- */
-
-/* GDC */
-static const hrt_address GDC_BASE[N_GDC_ID] = {
- 0x0000000000050000ULL,
- 0x0000000000060000ULL
-};
-
-/* FIFO_MONITOR (not a subset of GP_DEVICE) */
-static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
- 0x0000000000000000ULL
-};
-
-/*
-static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
- 0x0000000000000000ULL};
-
-static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
- 0x0000000000090000ULL};
-*/
-
-/* GP_DEVICE (single base for all separate GP_REG instances) */
-static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
- 0x0000000000000000ULL
-};
-
-/*GP TIMER , all timer registers are inter-twined,
- * so, having multiple base addresses for
- * different timers does not help*/
-static const hrt_address GP_TIMER_BASE =
- (hrt_address)0x0000000000000600ULL;
-
-/* GPIO */
-static const hrt_address GPIO_BASE[N_GPIO_ID] = {
- 0x0000000000000400ULL
-};
-
-/* TIMED_CTRL */
-static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
- 0x0000000000000100ULL
-};
-
-/* INPUT_FORMATTER */
-static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
- 0x0000000000030000ULL,
- 0x0000000000030200ULL,
- 0x0000000000030400ULL,
- 0x0000000000030600ULL
-}; /* memcpy() */
-
-/* INPUT_SYSTEM */
-static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
- 0x0000000000080000ULL
-};
-
-/* 0x0000000000081000ULL, */ /* capture A */
-/* 0x0000000000082000ULL, */ /* capture B */
-/* 0x0000000000083000ULL, */ /* capture C */
-/* 0x0000000000084000ULL, */ /* Acquisition */
-/* 0x0000000000085000ULL, */ /* DMA */
-/* 0x0000000000089000ULL, */ /* ctrl */
-/* 0x000000000008A000ULL, */ /* GP regs */
-/* 0x000000000008B000ULL, */ /* FIFO */
-/* 0x000000000008C000ULL, */ /* IRQ */
-
-/* RX, the MIPI lane control regs start at offset 0 */
-static const hrt_address RX_BASE[N_RX_ID] = {
- 0x0000000000080100ULL
-};
-
-/* IBUF_CTRL, part of the Input System 2401 */
-static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = {
- 0x00000000000C1800ULL, /* ibuf controller A */
- 0x00000000000C3800ULL, /* ibuf controller B */
- 0x00000000000C5800ULL /* ibuf controller C */
-};
-
-/* ISYS IRQ Controllers, part of the Input System 2401 */
-static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = {
- 0x00000000000C1400ULL, /* port a */
- 0x00000000000C3400ULL, /* port b */
- 0x00000000000C5400ULL /* port c */
-};
-
-/* CSI FE, part of the Input System 2401 */
-static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = {
- 0x00000000000C0400ULL, /* csi fe controller A */
- 0x00000000000C2400ULL, /* csi fe controller B */
- 0x00000000000C4400ULL /* csi fe controller C */
-};
-
-/* CSI BE, part of the Input System 2401 */
-static const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = {
- 0x00000000000C0800ULL, /* csi be controller A */
- 0x00000000000C2800ULL, /* csi be controller B */
- 0x00000000000C4800ULL /* csi be controller C */
-};
-
-/* PIXEL Generator, part of the Input System 2401 */
-static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = {
- 0x00000000000C1000ULL, /* pixel gen controller A */
- 0x00000000000C3000ULL, /* pixel gen controller B */
- 0x00000000000C5000ULL /* pixel gen controller C */
-};
-
-/* Stream2MMIO, part of the Input System 2401 */
-static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = {
- 0x00000000000C0C00ULL, /* stream2mmio controller A */
- 0x00000000000C2C00ULL, /* stream2mmio controller B */
- 0x00000000000C4C00ULL /* stream2mmio controller C */
-};
-#elif HRT_ADDRESS_WIDTH == 32
-
-#define GP_FIFO_BASE ((hrt_address)0x00090104) /* This is NOT a base address */
-
-/* DDR : Attention, this value not defined in 32-bit */
-static const hrt_address DDR_BASE[N_DDR_ID] = {
- 0x00000000UL
-};
-
-/* ISP */
-static const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
- 0x00020000UL
-};
-
-static const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
- 0xffffffffUL
-};
-
-static const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
- 0xffffffffUL
-};
-
-static const hrt_address ISP_VAMEM_BASE[N_VAMEM_ID] = {
- 0xffffffffUL,
- 0xffffffffUL,
- 0xffffffffUL
-};
-
-static const hrt_address ISP_HMEM_BASE[N_HMEM_ID] = {
- 0xffffffffUL
-};
-
-/* SP */
-static const hrt_address SP_CTRL_BASE[N_SP_ID] = {
- 0x00010000UL
-};
-
-static const hrt_address SP_DMEM_BASE[N_SP_ID] = {
- 0x00300000UL
-};
-
-/* MMU */
-/*
- * MMU0_ID: The data MMU
- * MMU1_ID: The icache MMU
- */
-static const hrt_address MMU_BASE[N_MMU_ID] = {
- 0x00070000UL,
- 0x000A0000UL
-};
-
-/* DMA */
-static const hrt_address DMA_BASE[N_DMA_ID] = {
- 0x00040000UL
-};
-
-static const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = {
- 0x000CA000UL
-};
-
-/* IRQ */
-static const hrt_address IRQ_BASE[N_IRQ_ID] = {
- 0x00000500UL,
- 0x00030A00UL,
- 0x0008C000UL,
- 0x00090200UL
-};
-
-/*
- 0x00000500UL};
- */
-
-/* GDC */
-static const hrt_address GDC_BASE[N_GDC_ID] = {
- 0x00050000UL,
- 0x00060000UL
-};
-
-/* FIFO_MONITOR (not a subset of GP_DEVICE) */
-static const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
- 0x00000000UL
-};
-
-/*
-static const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
- 0x00000000UL};
-
-static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
- 0x00090000UL};
-*/
-
-/* GP_DEVICE (single base for all separate GP_REG instances) */
-static const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
- 0x00000000UL
-};
-
-/*GP TIMER , all timer registers are inter-twined,
- * so, having multiple base addresses for
- * different timers does not help*/
-static const hrt_address GP_TIMER_BASE =
- (hrt_address)0x00000600UL;
-/* GPIO */
-static const hrt_address GPIO_BASE[N_GPIO_ID] = {
- 0x00000400UL
-};
-
-/* TIMED_CTRL */
-static const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
- 0x00000100UL
-};
-
-/* INPUT_FORMATTER */
-static const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
- 0x00030000UL,
- 0x00030200UL,
- 0x00030400UL
-};
-
-/* 0x00030600UL, */ /* memcpy() */
-
-/* INPUT_SYSTEM */
-static const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
- 0x00080000UL
-};
-
-/* 0x00081000UL, */ /* capture A */
-/* 0x00082000UL, */ /* capture B */
-/* 0x00083000UL, */ /* capture C */
-/* 0x00084000UL, */ /* Acquisition */
-/* 0x00085000UL, */ /* DMA */
-/* 0x00089000UL, */ /* ctrl */
-/* 0x0008A000UL, */ /* GP regs */
-/* 0x0008B000UL, */ /* FIFO */
-/* 0x0008C000UL, */ /* IRQ */
-
-/* RX, the MIPI lane control regs start at offset 0 */
-static const hrt_address RX_BASE[N_RX_ID] = {
- 0x00080100UL
-};
-
-/* IBUF_CTRL, part of the Input System 2401 */
-static const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = {
- 0x000C1800UL, /* ibuf controller A */
- 0x000C3800UL, /* ibuf controller B */
- 0x000C5800UL /* ibuf controller C */
-};
-
-/* ISYS IRQ Controllers, part of the Input System 2401 */
-static const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = {
- 0x000C1400ULL, /* port a */
- 0x000C3400ULL, /* port b */
- 0x000C5400ULL /* port c */
-};
-
-/* CSI FE, part of the Input System 2401 */
-static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = {
- 0x000C0400UL, /* csi fe controller A */
- 0x000C2400UL, /* csi fe controller B */
- 0x000C4400UL /* csi fe controller C */
-};
-
-/* CSI BE, part of the Input System 2401 */
-static const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = {
- 0x000C0800UL, /* csi be controller A */
- 0x000C2800UL, /* csi be controller B */
- 0x000C4800UL /* csi be controller C */
-};
-
-/* PIXEL Generator, part of the Input System 2401 */
-static const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = {
- 0x000C1000UL, /* pixel gen controller A */
- 0x000C3000UL, /* pixel gen controller B */
- 0x000C5000UL /* pixel gen controller C */
-};
-
-/* Stream2MMIO, part of the Input System 2401 */
-static const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = {
- 0x000C0C00UL, /* stream2mmio controller A */
- 0x000C2C00UL, /* stream2mmio controller B */
- 0x000C4C00UL /* stream2mmio controller C */
-};
-
-#else
-#error "system_local.h: HRT_ADDRESS_WIDTH must be one of {32,64}"
-#endif
-
-#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/sh_css.c b/drivers/staging/media/atomisp/pci/sh_css.c
index 6676537f0e97..54434c2dbaf9 100644
--- a/drivers/staging/media/atomisp/pci/sh_css.c
+++ b/drivers/staging/media/atomisp/pci/sh_css.c
@@ -1841,8 +1841,13 @@ ia_css_init(struct device *dev, const struct ia_css_env *env,
#endif
#if !defined(HAS_NO_INPUT_SYSTEM)
- dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN,
- ISP_DMA_MAX_BURST_LENGTH);
+
+ if (!IS_ISP2401)
+ dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN,
+ ISP2400_DMA_MAX_BURST_LENGTH);
+ else
+ dma_set_max_burst_size(DMA0_ID, HIVE_DMA_BUS_DDR_CONN,
+ ISP2401_DMA_MAX_BURST_LENGTH);
if (ia_css_isys_init() != INPUT_SYSTEM_ERR_NO_ERROR)
err = -EINVAL;
diff --git a/drivers/staging/media/atomisp/pci/system_global.h b/drivers/staging/media/atomisp/pci/system_global.h
index 16d0a2e9a4dc..90210f6943d2 100644
--- a/drivers/staging/media/atomisp/pci/system_global.h
+++ b/drivers/staging/media/atomisp/pci/system_global.h
@@ -4,8 +4,403 @@
* (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
*/
+#ifndef __SYSTEM_GLOBAL_H_INCLUDED__
+#define __SYSTEM_GLOBAL_H_INCLUDED__
+
+/*
+ * Create a list of HAS and IS properties that defines the system
+ * Those are common for both ISP2400 and ISP2401
+ *
+ * The configuration assumes the following
+ * - The system is hetereogeneous; Multiple cells and devices classes
+ * - The cell and device instances are homogeneous, each device type
+ * belongs to the same class
+ * - Device instances supporting a subset of the class capabilities are
+ * allowed
+ *
+ * We could manage different device classes through the enumerated
+ * lists (C) or the use of classes (C++), but that is presently not
+ * fully supported
+ *
+ * N.B. the 3 input formatters are of 2 different classess
+ */
+
+#define HAS_MMU_VERSION_2
+#define HAS_DMA_VERSION_2
+#define HAS_GDC_VERSION_2
+#define HAS_VAMEM_VERSION_2
+#define HAS_HMEM_VERSION_1
+#define HAS_BAMEM_VERSION_2
+#define HAS_IRQ_VERSION_2
+#define HAS_IRQ_MAP_VERSION_2
+#define HAS_INPUT_FORMATTER_VERSION_2
+#define HAS_INPUT_SYSTEM_VERSION_2
+#define HAS_BUFFERED_SENSOR
+#define HAS_FIFO_MONITORS_VERSION_2
+#define HAS_GP_DEVICE_VERSION_2
+#define HAS_GPIO_VERSION_1
+#define HAS_TIMED_CTRL_VERSION_1
+#define HAS_RX_VERSION_2
+
+/* per-frame parameter handling support */
+#define SH_CSS_ENABLE_PER_FRAME_PARAMS
+
+#define DMA_DDR_TO_VAMEM_WORKAROUND
+#define DMA_DDR_TO_HMEM_WORKAROUND
+
+/*
+ * The longest allowed (uninteruptible) bus transfer, does not
+ * take stalling into account
+ */
+#define HIVE_ISP_MAX_BURST_LENGTH 1024
+
+/*
+ * Maximum allowed burst length in words for the ISP DMA
+ * This value is set to 2 to prevent the ISP DMA from blocking
+ * the bus for too long; as the input system can only buffer
+ * 2 lines on Moorefield and Cherrytrail, the input system buffers
+ * may overflow if blocked for too long (BZ 2726).
+ */
+#define ISP2400_DMA_MAX_BURST_LENGTH 128
+#define ISP2401_DMA_MAX_BURST_LENGTH 2
+
#ifdef ISP2401
# include "isp2401_system_global.h"
#else
# include "isp2400_system_global.h"
#endif
+
+#include <hive_isp_css_defs.h>
+#include <type_support.h>
+
+/* This interface is deprecated */
+#include "hive_types.h"
+
+/*
+ * Semi global. "HRT" is accessible from SP, but the HRT types do not fully apply
+ */
+#define HRT_VADDRESS_WIDTH 32
+
+#define SIZEOF_HRT_REG (HRT_DATA_WIDTH >> 3)
+#define HIVE_ISP_CTRL_DATA_BYTES (HIVE_ISP_CTRL_DATA_WIDTH / 8)
+
+/* The main bus connecting all devices */
+#define HRT_BUS_WIDTH HIVE_ISP_CTRL_DATA_WIDTH
+#define HRT_BUS_BYTES HIVE_ISP_CTRL_DATA_BYTES
+
+typedef u32 hrt_bus_align_t;
+
+/*
+ * Enumerate the devices, device access through the API is by ID,
+ * through the DLI by address. The enumerator terminators are used
+ * to size the wiring arrays and as an exception value.
+ */
+typedef enum {
+ DDR0_ID = 0,
+ N_DDR_ID
+} ddr_ID_t;
+
+typedef enum {
+ ISP0_ID = 0,
+ N_ISP_ID
+} isp_ID_t;
+
+typedef enum {
+ SP0_ID = 0,
+ N_SP_ID
+} sp_ID_t;
+
+typedef enum {
+ MMU0_ID = 0,
+ MMU1_ID,
+ N_MMU_ID
+} mmu_ID_t;
+
+typedef enum {
+ DMA0_ID = 0,
+ N_DMA_ID
+} dma_ID_t;
+
+typedef enum {
+ GDC0_ID = 0,
+ GDC1_ID,
+ N_GDC_ID
+} gdc_ID_t;
+
+/* this extra define is needed because we want to use it also
+ in the preprocessor, and that doesn't work with enums.
+ */
+#define N_GDC_ID_CPP 2
+
+typedef enum {
+ VAMEM0_ID = 0,
+ VAMEM1_ID,
+ VAMEM2_ID,
+ N_VAMEM_ID
+} vamem_ID_t;
+
+typedef enum {
+ BAMEM0_ID = 0,
+ N_BAMEM_ID
+} bamem_ID_t;
+
+typedef enum {
+ HMEM0_ID = 0,
+ N_HMEM_ID
+} hmem_ID_t;
+
+typedef enum {
+ IRQ0_ID = 0, /* GP IRQ block */
+ IRQ1_ID, /* Input formatter */
+ IRQ2_ID, /* input system */
+ IRQ3_ID, /* input selector */
+ N_IRQ_ID
+} irq_ID_t;
+
+typedef enum {
+ FIFO_MONITOR0_ID = 0,
+ N_FIFO_MONITOR_ID
+} fifo_monitor_ID_t;
+
+typedef enum {
+ GP_DEVICE0_ID = 0,
+ N_GP_DEVICE_ID
+} gp_device_ID_t;
+
+typedef enum {
+ GP_TIMER0_ID = 0,
+ GP_TIMER1_ID,
+ GP_TIMER2_ID,
+ GP_TIMER3_ID,
+ GP_TIMER4_ID,
+ GP_TIMER5_ID,
+ GP_TIMER6_ID,
+ GP_TIMER7_ID,
+ N_GP_TIMER_ID
+} gp_timer_ID_t;
+
+typedef enum {
+ GPIO0_ID = 0,
+ N_GPIO_ID
+} gpio_ID_t;
+
+typedef enum {
+ TIMED_CTRL0_ID = 0,
+ N_TIMED_CTRL_ID
+} timed_ctrl_ID_t;
+
+typedef enum {
+ INPUT_FORMATTER0_ID = 0,
+ INPUT_FORMATTER1_ID,
+ INPUT_FORMATTER2_ID,
+ INPUT_FORMATTER3_ID,
+ N_INPUT_FORMATTER_ID
+} input_formatter_ID_t;
+
+/* The IF RST is outside the IF */
+#define INPUT_FORMATTER0_SRST_OFFSET 0x0824
+#define INPUT_FORMATTER1_SRST_OFFSET 0x0624
+#define INPUT_FORMATTER2_SRST_OFFSET 0x0424
+#define INPUT_FORMATTER3_SRST_OFFSET 0x0224
+
+#define INPUT_FORMATTER0_SRST_MASK 0x0001
+#define INPUT_FORMATTER1_SRST_MASK 0x0002
+#define INPUT_FORMATTER2_SRST_MASK 0x0004
+#define INPUT_FORMATTER3_SRST_MASK 0x0008
+
+typedef enum {
+ INPUT_SYSTEM0_ID = 0,
+ N_INPUT_SYSTEM_ID
+} input_system_ID_t;
+
+typedef enum {
+ RX0_ID = 0,
+ N_RX_ID
+} rx_ID_t;
+
+enum mipi_port_id {
+ MIPI_PORT0_ID = 0,
+ MIPI_PORT1_ID,
+ MIPI_PORT2_ID,
+ N_MIPI_PORT_ID
+};
+
+#define N_RX_CHANNEL_ID 4
+
+/* Generic port enumeration with an internal port type ID */
+typedef enum {
+ CSI_PORT0_ID = 0,
+ CSI_PORT1_ID,
+ CSI_PORT2_ID,
+ TPG_PORT0_ID,
+ PRBS_PORT0_ID,
+ FIFO_PORT0_ID,
+ MEMORY_PORT0_ID,
+ N_INPUT_PORT_ID
+} input_port_ID_t;
+
+typedef enum {
+ CAPTURE_UNIT0_ID = 0,
+ CAPTURE_UNIT1_ID,
+ CAPTURE_UNIT2_ID,
+ ACQUISITION_UNIT0_ID,
+ DMA_UNIT0_ID,
+ CTRL_UNIT0_ID,
+ GPREGS_UNIT0_ID,
+ FIFO_UNIT0_ID,
+ IRQ_UNIT0_ID,
+ N_SUB_SYSTEM_ID
+} sub_system_ID_t;
+
+#define N_CAPTURE_UNIT_ID 3
+#define N_ACQUISITION_UNIT_ID 1
+#define N_CTRL_UNIT_ID 1
+
+
+enum ia_css_isp_memories {
+ IA_CSS_ISP_PMEM0 = 0,
+ IA_CSS_ISP_DMEM0,
+ IA_CSS_ISP_VMEM0,
+ IA_CSS_ISP_VAMEM0,
+ IA_CSS_ISP_VAMEM1,
+ IA_CSS_ISP_VAMEM2,
+ IA_CSS_ISP_HMEM0,
+ IA_CSS_SP_DMEM0,
+ IA_CSS_DDR,
+ N_IA_CSS_MEMORIES
+};
+
+#define IA_CSS_NUM_MEMORIES 9
+/* For driver compatibility */
+#define N_IA_CSS_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+#define IA_CSS_NUM_ISP_MEMORIES IA_CSS_NUM_MEMORIES
+
+/*
+ * ISP2401 specific enums
+ */
+
+typedef enum {
+ ISYS_IRQ0_ID = 0, /* port a */
+ ISYS_IRQ1_ID, /* port b */
+ ISYS_IRQ2_ID, /* port c */
+ N_ISYS_IRQ_ID
+} isys_irq_ID_t;
+
+
+/*
+ * Input-buffer Controller.
+ */
+typedef enum {
+ IBUF_CTRL0_ID = 0, /* map to ISYS2401_IBUF_CNTRL_A */
+ IBUF_CTRL1_ID, /* map to ISYS2401_IBUF_CNTRL_B */
+ IBUF_CTRL2_ID, /* map ISYS2401_IBUF_CNTRL_C */
+ N_IBUF_CTRL_ID
+} ibuf_ctrl_ID_t;
+/* end of Input-buffer Controller */
+
+/*
+ * Stream2MMIO.
+ */
+typedef enum {
+ STREAM2MMIO0_ID = 0, /* map to ISYS2401_S2M_A */
+ STREAM2MMIO1_ID, /* map to ISYS2401_S2M_B */
+ STREAM2MMIO2_ID, /* map to ISYS2401_S2M_C */
+ N_STREAM2MMIO_ID
+} stream2mmio_ID_t;
+
+typedef enum {
+ /*
+ * Stream2MMIO 0 has 8 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID7_ID].
+ *
+ * Stream2MMIO 1 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...TREAM2MMIO_SID3_ID].
+ *
+ * Stream2MMIO 2 has 4 SIDs that are indexed by
+ * [STREAM2MMIO_SID0_ID...STREAM2MMIO_SID3_ID].
+ */
+ STREAM2MMIO_SID0_ID = 0,
+ STREAM2MMIO_SID1_ID,
+ STREAM2MMIO_SID2_ID,
+ STREAM2MMIO_SID3_ID,
+ STREAM2MMIO_SID4_ID,
+ STREAM2MMIO_SID5_ID,
+ STREAM2MMIO_SID6_ID,
+ STREAM2MMIO_SID7_ID,
+ N_STREAM2MMIO_SID_ID
+} stream2mmio_sid_ID_t;
+/* end of Stream2MMIO */
+
+/**
+ * Input System 2401: CSI-MIPI recevier.
+ */
+typedef enum {
+ CSI_RX_BACKEND0_ID = 0, /* map to ISYS2401_MIPI_BE_A */
+ CSI_RX_BACKEND1_ID, /* map to ISYS2401_MIPI_BE_B */
+ CSI_RX_BACKEND2_ID, /* map to ISYS2401_MIPI_BE_C */
+ N_CSI_RX_BACKEND_ID
+} csi_rx_backend_ID_t;
+
+typedef enum {
+ CSI_RX_FRONTEND0_ID = 0, /* map to ISYS2401_CSI_RX_A */
+ CSI_RX_FRONTEND1_ID, /* map to ISYS2401_CSI_RX_B */
+ CSI_RX_FRONTEND2_ID, /* map to ISYS2401_CSI_RX_C */
+#define N_CSI_RX_FRONTEND_ID (CSI_RX_FRONTEND2_ID + 1)
+} csi_rx_frontend_ID_t;
+
+typedef enum {
+ CSI_RX_DLANE0_ID = 0, /* map to DLANE0 in CSI RX */
+ CSI_RX_DLANE1_ID, /* map to DLANE1 in CSI RX */
+ CSI_RX_DLANE2_ID, /* map to DLANE2 in CSI RX */
+ CSI_RX_DLANE3_ID, /* map to DLANE3 in CSI RX */
+ N_CSI_RX_DLANE_ID
+} csi_rx_fe_dlane_ID_t;
+/* end of CSI-MIPI receiver */
+
+typedef enum {
+ ISYS2401_DMA0_ID = 0,
+ N_ISYS2401_DMA_ID
+} isys2401_dma_ID_t;
+
+/**
+ * Pixel-generator. ("system_global.h")
+ */
+typedef enum {
+ PIXELGEN0_ID = 0,
+ PIXELGEN1_ID,
+ PIXELGEN2_ID,
+ N_PIXELGEN_ID
+} pixelgen_ID_t;
+/* end of pixel-generator. ("system_global.h") */
+
+typedef enum {
+ INPUT_SYSTEM_CSI_PORT0_ID = 0,
+ INPUT_SYSTEM_CSI_PORT1_ID,
+ INPUT_SYSTEM_CSI_PORT2_ID,
+
+ INPUT_SYSTEM_PIXELGEN_PORT0_ID,
+ INPUT_SYSTEM_PIXELGEN_PORT1_ID,
+ INPUT_SYSTEM_PIXELGEN_PORT2_ID,
+
+ N_INPUT_SYSTEM_INPUT_PORT_ID
+} input_system_input_port_ID_t;
+
+#define N_INPUT_SYSTEM_CSI_PORT 3
+
+typedef enum {
+ ISYS2401_DMA_CHANNEL_0 = 0,
+ ISYS2401_DMA_CHANNEL_1,
+ ISYS2401_DMA_CHANNEL_2,
+ ISYS2401_DMA_CHANNEL_3,
+ ISYS2401_DMA_CHANNEL_4,
+ ISYS2401_DMA_CHANNEL_5,
+ ISYS2401_DMA_CHANNEL_6,
+ ISYS2401_DMA_CHANNEL_7,
+ ISYS2401_DMA_CHANNEL_8,
+ ISYS2401_DMA_CHANNEL_9,
+ ISYS2401_DMA_CHANNEL_10,
+ ISYS2401_DMA_CHANNEL_11,
+ N_ISYS2401_DMA_CHANNEL
+} isys2401_dma_channel;
+
+#endif /* __SYSTEM_GLOBAL_H_INCLUDED__ */
diff --git a/drivers/staging/media/atomisp/pci/system_local.c b/drivers/staging/media/atomisp/pci/system_local.c
new file mode 100644
index 000000000000..4ca8569d7feb
--- /dev/null
+++ b/drivers/staging/media/atomisp/pci/system_local.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include "system_local.h"
+
+/* ISP */
+const hrt_address ISP_CTRL_BASE[N_ISP_ID] = {
+ 0x0000000000020000ULL
+};
+
+const hrt_address ISP_DMEM_BASE[N_ISP_ID] = {
+ 0x0000000000200000ULL
+};
+
+const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID] = {
+ 0x0000000000100000ULL
+};
+
+/* SP */
+const hrt_address SP_CTRL_BASE[N_SP_ID] = {
+ 0x0000000000010000ULL
+};
+
+const hrt_address SP_DMEM_BASE[N_SP_ID] = {
+ 0x0000000000300000ULL
+};
+
+/* MMU */
+/*
+ * MMU0_ID: The data MMU
+ * MMU1_ID: The icache MMU
+ */
+const hrt_address MMU_BASE[N_MMU_ID] = {
+ 0x0000000000070000ULL,
+ 0x00000000000A0000ULL
+};
+
+/* DMA */
+const hrt_address DMA_BASE[N_DMA_ID] = {
+ 0x0000000000040000ULL
+};
+
+const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID] = {
+ 0x00000000000CA000ULL
+};
+
+/* IRQ */
+const hrt_address IRQ_BASE[N_IRQ_ID] = {
+ 0x0000000000000500ULL,
+ 0x0000000000030A00ULL,
+ 0x000000000008C000ULL,
+ 0x0000000000090200ULL
+};
+
+/*
+ 0x0000000000000500ULL};
+ */
+
+/* GDC */
+const hrt_address GDC_BASE[N_GDC_ID] = {
+ 0x0000000000050000ULL,
+ 0x0000000000060000ULL
+};
+
+/* FIFO_MONITOR (not a subset of GP_DEVICE) */
+const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID] = {
+ 0x0000000000000000ULL
+};
+
+/*
+const hrt_address GP_REGS_BASE[N_GP_REGS_ID] = {
+ 0x0000000000000000ULL};
+
+const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ 0x0000000000090000ULL};
+*/
+
+/* GP_DEVICE (single base for all separate GP_REG instances) */
+const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID] = {
+ 0x0000000000000000ULL
+};
+
+/*GP TIMER , all timer registers are inter-twined,
+ * so, having multiple base addresses for
+ * different timers does not help*/
+const hrt_address GP_TIMER_BASE =
+ (hrt_address)0x0000000000000600ULL;
+
+/* GPIO */
+const hrt_address GPIO_BASE[N_GPIO_ID] = {
+ 0x0000000000000400ULL
+};
+
+/* TIMED_CTRL */
+const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID] = {
+ 0x0000000000000100ULL
+};
+
+/* INPUT_FORMATTER */
+const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID] = {
+ 0x0000000000030000ULL,
+ 0x0000000000030200ULL,
+ 0x0000000000030400ULL,
+ 0x0000000000030600ULL
+}; /* memcpy() */
+
+/* INPUT_SYSTEM */
+const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID] = {
+ 0x0000000000080000ULL
+};
+
+/* 0x0000000000081000ULL, */ /* capture A */
+/* 0x0000000000082000ULL, */ /* capture B */
+/* 0x0000000000083000ULL, */ /* capture C */
+/* 0x0000000000084000ULL, */ /* Acquisition */
+/* 0x0000000000085000ULL, */ /* DMA */
+/* 0x0000000000089000ULL, */ /* ctrl */
+/* 0x000000000008A000ULL, */ /* GP regs */
+/* 0x000000000008B000ULL, */ /* FIFO */
+/* 0x000000000008C000ULL, */ /* IRQ */
+
+/* RX, the MIPI lane control regs start at offset 0 */
+const hrt_address RX_BASE[N_RX_ID] = {
+ 0x0000000000080100ULL
+};
+
+/* IBUF_CTRL, part of the Input System 2401 */
+const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID] = {
+ 0x00000000000C1800ULL, /* ibuf controller A */
+ 0x00000000000C3800ULL, /* ibuf controller B */
+ 0x00000000000C5800ULL /* ibuf controller C */
+};
+
+/* ISYS IRQ Controllers, part of the Input System 2401 */
+const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID] = {
+ 0x00000000000C1400ULL, /* port a */
+ 0x00000000000C3400ULL, /* port b */
+ 0x00000000000C5400ULL /* port c */
+};
+
+/* CSI FE, part of the Input System 2401 */
+const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID] = {
+ 0x00000000000C0400ULL, /* csi fe controller A */
+ 0x00000000000C2400ULL, /* csi fe controller B */
+ 0x00000000000C4400ULL /* csi fe controller C */
+};
+
+/* CSI BE, part of the Input System 2401 */
+const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID] = {
+ 0x00000000000C0800ULL, /* csi be controller A */
+ 0x00000000000C2800ULL, /* csi be controller B */
+ 0x00000000000C4800ULL /* csi be controller C */
+};
+
+/* PIXEL Generator, part of the Input System 2401 */
+const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID] = {
+ 0x00000000000C1000ULL, /* pixel gen controller A */
+ 0x00000000000C3000ULL, /* pixel gen controller B */
+ 0x00000000000C5000ULL /* pixel gen controller C */
+};
+
+/* Stream2MMIO, part of the Input System 2401 */
+const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID] = {
+ 0x00000000000C0C00ULL, /* stream2mmio controller A */
+ 0x00000000000C2C00ULL, /* stream2mmio controller B */
+ 0x00000000000C4C00ULL /* stream2mmio controller C */
+};
diff --git a/drivers/staging/media/atomisp/pci/system_local.h b/drivers/staging/media/atomisp/pci/system_local.h
index dfcc4c2b8f16..a47258c2e8a8 100644
--- a/drivers/staging/media/atomisp/pci/system_local.h
+++ b/drivers/staging/media/atomisp/pci/system_local.h
@@ -1,11 +1,103 @@
/* SPDX-License-Identifier: GPL-2.0 */
-// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * (c) 2020 Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+ * Support for Intel Camera Imaging ISP subsystem.
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
*/
-#ifdef ISP2401
-# include "isp2401_system_local.h"
-#else
-# include "isp2400_system_local.h"
+#ifndef __SYSTEM_LOCAL_H_INCLUDED__
+#define __SYSTEM_LOCAL_H_INCLUDED__
+
+#ifdef HRT_ISP_CSS_CUSTOM_HOST
+#ifndef HRT_USE_VIR_ADDRS
+#define HRT_USE_VIR_ADDRS
+#endif
#endif
+
+#include "system_global.h"
+
+/* This interface is deprecated */
+#include "hive_types.h"
+
+/*
+ * Cell specific address maps
+ */
+
+#define GP_FIFO_BASE ((hrt_address)0x0000000000090104) /* This is NOT a base address */
+
+/* ISP */
+extern const hrt_address ISP_CTRL_BASE[N_ISP_ID];
+extern const hrt_address ISP_DMEM_BASE[N_ISP_ID];
+extern const hrt_address ISP_BAMEM_BASE[N_BAMEM_ID];
+
+/* SP */
+extern const hrt_address SP_CTRL_BASE[N_SP_ID];
+extern const hrt_address SP_DMEM_BASE[N_SP_ID];
+
+/* MMU */
+
+extern const hrt_address MMU_BASE[N_MMU_ID];
+
+/* DMA */
+extern const hrt_address DMA_BASE[N_DMA_ID];
+extern const hrt_address ISYS2401_DMA_BASE[N_ISYS2401_DMA_ID];
+
+/* IRQ */
+extern const hrt_address IRQ_BASE[N_IRQ_ID];
+
+/* GDC */
+extern const hrt_address GDC_BASE[N_GDC_ID];
+
+/* FIFO_MONITOR (not a subset of GP_DEVICE) */
+extern const hrt_address FIFO_MONITOR_BASE[N_FIFO_MONITOR_ID];
+
+/* GP_DEVICE (single base for all separate GP_REG instances) */
+extern const hrt_address GP_DEVICE_BASE[N_GP_DEVICE_ID];
+
+/*GP TIMER , all timer registers are inter-twined,
+ * so, having multiple base addresses for
+ * different timers does not help*/
+extern const hrt_address GP_TIMER_BASE;
+
+/* GPIO */
+extern const hrt_address GPIO_BASE[N_GPIO_ID];
+
+/* TIMED_CTRL */
+extern const hrt_address TIMED_CTRL_BASE[N_TIMED_CTRL_ID];
+
+/* INPUT_FORMATTER */
+extern const hrt_address INPUT_FORMATTER_BASE[N_INPUT_FORMATTER_ID];
+
+/* INPUT_SYSTEM */
+extern const hrt_address INPUT_SYSTEM_BASE[N_INPUT_SYSTEM_ID];
+
+/* RX, the MIPI lane control regs start at offset 0 */
+extern const hrt_address RX_BASE[N_RX_ID];
+
+/* IBUF_CTRL, part of the Input System 2401 */
+extern const hrt_address IBUF_CTRL_BASE[N_IBUF_CTRL_ID];
+
+/* ISYS IRQ Controllers, part of the Input System 2401 */
+extern const hrt_address ISYS_IRQ_BASE[N_ISYS_IRQ_ID];
+
+/* CSI FE, part of the Input System 2401 */
+extern const hrt_address CSI_RX_FE_CTRL_BASE[N_CSI_RX_FRONTEND_ID];
+
+/* CSI BE, part of the Input System 2401 */
+extern const hrt_address CSI_RX_BE_CTRL_BASE[N_CSI_RX_BACKEND_ID];
+
+/* PIXEL Generator, part of the Input System 2401 */
+extern const hrt_address PIXELGEN_CTRL_BASE[N_PIXELGEN_ID];
+
+/* Stream2MMIO, part of the Input System 2401 */
+extern const hrt_address STREAM2MMIO_CTRL_BASE[N_STREAM2MMIO_ID];
+
+#endif /* __SYSTEM_LOCAL_H_INCLUDED__ */
diff --git a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
index 69bcd172b298..a3ea7ce3e12e 100644
--- a/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723bs/core/rtw_wlan_util.c
@@ -1824,12 +1824,14 @@ int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_l
pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
if (!pIE)
return _FAIL;
+ if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates))
+ return _FAIL;
memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
supportRateNum = ie_len;
pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
- if (pIE)
+ if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum))
memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
return _SUCCESS;
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c
index 893b67f2f792..5110f9b93762 100644
--- a/drivers/staging/wfx/hif_tx.c
+++ b/drivers/staging/wfx/hif_tx.c
@@ -240,7 +240,7 @@ int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
}
int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
- int chan_start_idx, int chan_num)
+ int chan_start_idx, int chan_num, int *timeout)
{
int ret, i;
struct hif_msg *hif;
@@ -289,11 +289,13 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
tmo_chan_fg = 512 * USEC_PER_TU + body->probe_delay;
tmo_chan_fg *= body->num_of_probe_requests;
tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg) + 512 * USEC_PER_TU;
+ if (timeout)
+ *timeout = usecs_to_jiffies(tmo);
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
kfree(hif);
- return ret ? ret : usecs_to_jiffies(tmo);
+ return ret;
}
int hif_stop_scan(struct wfx_vif *wvif)
diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h
index e9eca9330178..e1da28aef706 100644
--- a/drivers/staging/wfx/hif_tx.h
+++ b/drivers/staging/wfx/hif_tx.h
@@ -42,7 +42,7 @@ int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
void *buf, size_t buf_size);
int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211,
- int chan_start, int chan_num);
+ int chan_start, int chan_num, int *timeout);
int hif_stop_scan(struct wfx_vif *wvif);
int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
struct ieee80211_channel *channel, const u8 *ssid, int ssidlen);
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
index 3248ecefda56..93ea2b72febd 100644
--- a/drivers/staging/wfx/queue.c
+++ b/drivers/staging/wfx/queue.c
@@ -246,7 +246,7 @@ static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
sorted_queues[i] = &wdev->tx_queue[i];
for (j = i; j > 0; j--)
- if (atomic_read(&sorted_queues[j]->pending_frames) >
+ if (atomic_read(&sorted_queues[j]->pending_frames) <
atomic_read(&sorted_queues[j - 1]->pending_frames))
swap(sorted_queues[j - 1], sorted_queues[j]);
}
@@ -291,15 +291,12 @@ struct hif_msg *wfx_tx_queues_get(struct wfx_dev *wdev)
if (atomic_read(&wdev->tx_lock))
return NULL;
-
- for (;;) {
- skb = wfx_tx_queues_get_skb(wdev);
- if (!skb)
- return NULL;
- skb_queue_tail(&wdev->tx_pending, skb);
- wake_up(&wdev->tx_dequeue);
- tx_priv = wfx_skb_tx_priv(skb);
- tx_priv->xmit_timestamp = ktime_get();
- return (struct hif_msg *)skb->data;
- }
+ skb = wfx_tx_queues_get_skb(wdev);
+ if (!skb)
+ return NULL;
+ skb_queue_tail(&wdev->tx_pending, skb);
+ wake_up(&wdev->tx_dequeue);
+ tx_priv = wfx_skb_tx_priv(skb);
+ tx_priv->xmit_timestamp = ktime_get();
+ return (struct hif_msg *)skb->data;
}
diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c
index 57ea9997800b..e9de19784865 100644
--- a/drivers/staging/wfx/scan.c
+++ b/drivers/staging/wfx/scan.c
@@ -56,10 +56,10 @@ static int send_scan_req(struct wfx_vif *wvif,
wfx_tx_lock_flush(wvif->wdev);
wvif->scan_abort = false;
reinit_completion(&wvif->scan_complete);
- timeout = hif_scan(wvif, req, start_idx, i - start_idx);
- if (timeout < 0) {
+ ret = hif_scan(wvif, req, start_idx, i - start_idx, &timeout);
+ if (ret) {
wfx_tx_unlock(wvif->wdev);
- return timeout;
+ return -EIO;
}
ret = wait_for_completion_timeout(&wvif->scan_complete, timeout);
if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower)
diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c
index 9e124020519f..6c0e1b053126 100644
--- a/drivers/thermal/cpufreq_cooling.c
+++ b/drivers/thermal/cpufreq_cooling.c
@@ -123,12 +123,12 @@ static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev,
{
int i;
- for (i = cpufreq_cdev->max_level - 1; i >= 0; i--) {
- if (power > cpufreq_cdev->em->table[i].power)
+ for (i = cpufreq_cdev->max_level; i >= 0; i--) {
+ if (power >= cpufreq_cdev->em->table[i].power)
break;
}
- return cpufreq_cdev->em->table[i + 1].frequency;
+ return cpufreq_cdev->em->table[i].frequency;
}
/**
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index e761c9b42217..1b84ea674edb 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -649,7 +649,7 @@ MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data)
{
struct device_node *np;
- int ret;
+ int ret = 0;
data->policy = cpufreq_cpu_get(0);
if (!data->policy) {
@@ -664,11 +664,12 @@ static int imx_thermal_register_legacy_cooling(struct imx_thermal_data *data)
if (IS_ERR(data->cdev)) {
ret = PTR_ERR(data->cdev);
cpufreq_cpu_put(data->policy);
- return ret;
}
}
- return 0;
+ of_node_put(np);
+
+ return ret;
}
static void imx_thermal_unregister_legacy_cooling(struct imx_thermal_data *data)
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 0b3a62655843..12448ccd27f1 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -216,11 +216,16 @@ static int int3400_thermal_run_osc(acpi_handle handle,
acpi_status status;
int result = 0;
struct acpi_osc_context context = {
- .uuid_str = int3400_thermal_uuids[uuid],
+ .uuid_str = NULL,
.rev = 1,
.cap.length = 8,
};
+ if (uuid < 0 || uuid >= INT3400_THERMAL_MAXIMUM_UUID)
+ return -EINVAL;
+
+ context.uuid_str = int3400_thermal_uuids[uuid];
+
buf[OSC_QUERY_DWORD] = 0;
buf[OSC_SUPPORT_DWORD] = enable;
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index f86cbb125e2f..ec1d58c4ceaa 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -74,7 +74,7 @@ static void int3403_notify(acpi_handle handle,
THERMAL_TRIP_CHANGED);
break;
default:
- dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
+ dev_dbg(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
break;
}
}
diff --git a/drivers/thermal/mtk_thermal.c b/drivers/thermal/mtk_thermal.c
index 76e30603d4d5..42c9cd0e5f77 100644
--- a/drivers/thermal/mtk_thermal.c
+++ b/drivers/thermal/mtk_thermal.c
@@ -211,6 +211,9 @@ enum {
/* The total number of temperature sensors in the MT8183 */
#define MT8183_NUM_SENSORS 6
+/* The number of banks in the MT8183 */
+#define MT8183_NUM_ZONES 1
+
/* The number of sensing points per bank */
#define MT8183_NUM_SENSORS_PER_ZONE 6
@@ -497,7 +500,7 @@ static const struct mtk_thermal_data mt7622_thermal_data = {
*/
static const struct mtk_thermal_data mt8183_thermal_data = {
.auxadc_channel = MT8183_TEMP_AUXADC_CHANNEL,
- .num_banks = MT8183_NUM_SENSORS_PER_ZONE,
+ .num_banks = MT8183_NUM_ZONES,
.num_sensors = MT8183_NUM_SENSORS,
.vts_index = mt8183_vts_index,
.cali_val = MT8183_CALIBRATION,
@@ -591,8 +594,7 @@ static int mtk_thermal_bank_temperature(struct mtk_thermal_bank *bank)
u32 raw;
for (i = 0; i < conf->bank_data[bank->id].num_sensors; i++) {
- raw = readl(mt->thermal_base +
- conf->msr[conf->bank_data[bank->id].sensors[i]]);
+ raw = readl(mt->thermal_base + conf->msr[i]);
temp = raw_to_mcelsius(mt,
conf->bank_data[bank->id].sensors[i],
@@ -733,8 +735,7 @@ static void mtk_thermal_init_bank(struct mtk_thermal *mt, int num,
for (i = 0; i < conf->bank_data[num].num_sensors; i++)
writel(conf->sensor_mux_values[conf->bank_data[num].sensors[i]],
- mt->thermal_base +
- conf->adcpnp[conf->bank_data[num].sensors[i]]);
+ mt->thermal_base + conf->adcpnp[i]);
writel((1 << conf->bank_data[num].num_sensors) - 1,
controller_base + TEMP_MONCTL0);
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index 8d3e94d2a9ed..39c4462e38f6 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -382,7 +382,7 @@ static inline u32 masked_irq(u32 hw_id, u32 mask, enum tsens_ver ver)
*
* Return: IRQ_HANDLED
*/
-irqreturn_t tsens_critical_irq_thread(int irq, void *data)
+static irqreturn_t tsens_critical_irq_thread(int irq, void *data)
{
struct tsens_priv *priv = data;
struct tsens_irq_data d;
@@ -452,7 +452,7 @@ irqreturn_t tsens_critical_irq_thread(int irq, void *data)
*
* Return: IRQ_HANDLED
*/
-irqreturn_t tsens_irq_thread(int irq, void *data)
+static irqreturn_t tsens_irq_thread(int irq, void *data)
{
struct tsens_priv *priv = data;
struct tsens_irq_data d;
@@ -520,7 +520,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
-int tsens_set_trips(void *_sensor, int low, int high)
+static int tsens_set_trips(void *_sensor, int low, int high)
{
struct tsens_sensor *s = _sensor;
struct tsens_priv *priv = s->priv;
@@ -557,7 +557,7 @@ int tsens_set_trips(void *_sensor, int low, int high)
return 0;
}
-int tsens_enable_irq(struct tsens_priv *priv)
+static int tsens_enable_irq(struct tsens_priv *priv)
{
int ret;
int val = tsens_version(priv) > VER_1_X ? 7 : 1;
@@ -570,7 +570,7 @@ int tsens_enable_irq(struct tsens_priv *priv)
return ret;
}
-void tsens_disable_irq(struct tsens_priv *priv)
+static void tsens_disable_irq(struct tsens_priv *priv)
{
regmap_field_write(priv->rf[INT_EN], 0);
}
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 58fe7c1ef00b..c48c5e9b8f20 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -167,7 +167,7 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
{
struct rcar_gen3_thermal_tsc *tsc = devdata;
int mcelsius, val;
- u32 reg;
+ int reg;
/* Read register and convert to mili Celsius */
reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK;
diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c
index a340374e8c51..4cde70dcf655 100644
--- a/drivers/thermal/sprd_thermal.c
+++ b/drivers/thermal/sprd_thermal.c
@@ -348,8 +348,8 @@ static int sprd_thm_probe(struct platform_device *pdev)
thm->var_data = pdata;
thm->base = devm_platform_ioremap_resource(pdev, 0);
- if (!thm->base)
- return -ENOMEM;
+ if (IS_ERR(thm->base))
+ return PTR_ERR(thm->base);
thm->nr_sensors = of_get_child_count(np);
if (thm->nr_sensors == 0 || thm->nr_sensors > SPRD_THM_MAX_SENSOR) {
diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c
index dbe90bcf4ad4..c144ca9b032c 100644
--- a/drivers/thunderbolt/tunnel.c
+++ b/drivers/thunderbolt/tunnel.c
@@ -913,21 +913,21 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down)
* case.
*/
path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1,
- &tunnel->dst_port, "USB3 Up");
+ &tunnel->dst_port, "USB3 Down");
if (!path) {
/* Just disable the downstream port */
tb_usb3_port_enable(down, false);
goto err_free;
}
- tunnel->paths[TB_USB3_PATH_UP] = path;
- tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
+ tunnel->paths[TB_USB3_PATH_DOWN] = path;
+ tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL,
- "USB3 Down");
+ "USB3 Up");
if (!path)
goto err_deactivate;
- tunnel->paths[TB_USB3_PATH_DOWN] = path;
- tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]);
+ tunnel->paths[TB_USB3_PATH_UP] = path;
+ tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]);
/* Validate that the tunnel is complete */
if (!tb_port_is_usb3_up(tunnel->dst_port)) {
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index a04f74d2e854..4df47d02b34b 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1215,7 +1215,12 @@ static int cpm_uart_init_port(struct device_node *np,
pinfo->gpios[i] = NULL;
- gpiod = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS);
+ gpiod = devm_gpiod_get_index_optional(dev, NULL, i, GPIOD_ASIS);
+
+ if (IS_ERR(gpiod)) {
+ ret = PTR_ERR(gpiod);
+ goto out_irq;
+ }
if (gpiod) {
if (i == GPIO_RTS || i == GPIO_DTR)
@@ -1237,6 +1242,8 @@ static int cpm_uart_init_port(struct device_node *np,
return cpm_uart_request_port(&pinfo->port);
+out_irq:
+ irq_dispose_mapping(pinfo->port.irq);
out_pram:
cpm_uart_unmap_pram(pinfo, pram);
out_mem:
diff --git a/drivers/tty/serial/kgdb_nmi.c b/drivers/tty/serial/kgdb_nmi.c
index 5022447afa23..6004c0c1d173 100644
--- a/drivers/tty/serial/kgdb_nmi.c
+++ b/drivers/tty/serial/kgdb_nmi.c
@@ -50,7 +50,7 @@ static int kgdb_nmi_console_setup(struct console *co, char *options)
* I/O utilities that messages sent to the console will automatically
* be displayed on the dbg_io.
*/
- dbg_io_ops->is_console = true;
+ dbg_io_ops->cons = co;
return 0;
}
diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
index 41396982e9e0..84ffede27f23 100644
--- a/drivers/tty/serial/kgdboc.c
+++ b/drivers/tty/serial/kgdboc.c
@@ -45,7 +45,6 @@ static struct platform_device *kgdboc_pdev;
#if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
static struct kgdb_io kgdboc_earlycon_io_ops;
-static struct console *earlycon;
static int (*earlycon_orig_exit)(struct console *con);
#endif /* IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
@@ -145,7 +144,7 @@ static void kgdboc_unregister_kbd(void)
#if IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE)
static void cleanup_earlycon(void)
{
- if (earlycon)
+ if (kgdboc_earlycon_io_ops.cons)
kgdb_unregister_io_module(&kgdboc_earlycon_io_ops);
}
#else /* !IS_BUILTIN(CONFIG_KGDB_SERIAL_CONSOLE) */
@@ -178,7 +177,7 @@ static int configure_kgdboc(void)
goto noconfig;
}
- kgdboc_io_ops.is_console = 0;
+ kgdboc_io_ops.cons = NULL;
kgdb_tty_driver = NULL;
kgdboc_use_kms = 0;
@@ -198,7 +197,7 @@ static int configure_kgdboc(void)
int idx;
if (cons->device && cons->device(cons, &idx) == p &&
idx == tty_line) {
- kgdboc_io_ops.is_console = 1;
+ kgdboc_io_ops.cons = cons;
break;
}
}
@@ -433,7 +432,8 @@ static int kgdboc_earlycon_get_char(void)
{
char c;
- if (!earlycon->read(earlycon, &c, 1))
+ if (!kgdboc_earlycon_io_ops.cons->read(kgdboc_earlycon_io_ops.cons,
+ &c, 1))
return NO_POLL_CHAR;
return c;
@@ -441,7 +441,8 @@ static int kgdboc_earlycon_get_char(void)
static void kgdboc_earlycon_put_char(u8 chr)
{
- earlycon->write(earlycon, &chr, 1);
+ kgdboc_earlycon_io_ops.cons->write(kgdboc_earlycon_io_ops.cons, &chr,
+ 1);
}
static void kgdboc_earlycon_pre_exp_handler(void)
@@ -461,7 +462,7 @@ static void kgdboc_earlycon_pre_exp_handler(void)
* boot if we detect this case.
*/
for_each_console(con)
- if (con == earlycon)
+ if (con == kgdboc_earlycon_io_ops.cons)
return;
already_warned = true;
@@ -484,25 +485,25 @@ static int kgdboc_earlycon_deferred_exit(struct console *con)
static void kgdboc_earlycon_deinit(void)
{
- if (!earlycon)
+ if (!kgdboc_earlycon_io_ops.cons)
return;
- if (earlycon->exit == kgdboc_earlycon_deferred_exit)
+ if (kgdboc_earlycon_io_ops.cons->exit == kgdboc_earlycon_deferred_exit)
/*
* kgdboc_earlycon is exiting but original boot console exit
* was never called (AKA kgdboc_earlycon_deferred_exit()
* didn't ever run). Undo our trap.
*/
- earlycon->exit = earlycon_orig_exit;
- else if (earlycon->exit)
+ kgdboc_earlycon_io_ops.cons->exit = earlycon_orig_exit;
+ else if (kgdboc_earlycon_io_ops.cons->exit)
/*
* We skipped calling the exit() routine so we could try to
* keep using the boot console even after it went away. We're
* finally done so call the function now.
*/
- earlycon->exit(earlycon);
+ kgdboc_earlycon_io_ops.cons->exit(kgdboc_earlycon_io_ops.cons);
- earlycon = NULL;
+ kgdboc_earlycon_io_ops.cons = NULL;
}
static struct kgdb_io kgdboc_earlycon_io_ops = {
@@ -511,7 +512,6 @@ static struct kgdb_io kgdboc_earlycon_io_ops = {
.write_char = kgdboc_earlycon_put_char,
.pre_exception = kgdboc_earlycon_pre_exp_handler,
.deinit = kgdboc_earlycon_deinit,
- .is_console = true,
};
#define MAX_CONSOLE_NAME_LEN (sizeof((struct console *) 0)->name)
@@ -557,10 +557,10 @@ static int __init kgdboc_earlycon_init(char *opt)
goto unlock;
}
- earlycon = con;
+ kgdboc_earlycon_io_ops.cons = con;
pr_info("Going to register kgdb with earlycon '%s'\n", con->name);
if (kgdb_register_io_module(&kgdboc_earlycon_io_ops) != 0) {
- earlycon = NULL;
+ kgdboc_earlycon_io_ops.cons = NULL;
pr_info("Failed to register kgdb with earlycon\n");
} else {
/* Trap exit so we can keep earlycon longer if needed. */
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index b4f835e7de23..b784323a6a7b 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1698,21 +1698,21 @@ static int mxs_auart_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
- goto out_disable_clks;
+ goto out_iounmap;
}
s->port.irq = irq;
ret = devm_request_irq(&pdev->dev, irq, mxs_auart_irq_handle, 0,
dev_name(&pdev->dev), s);
if (ret)
- goto out_disable_clks;
+ goto out_iounmap;
platform_set_drvdata(pdev, s);
ret = mxs_auart_init_gpios(s, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "Failed to initialize GPIOs.\n");
- goto out_disable_clks;
+ goto out_iounmap;
}
/*
@@ -1720,7 +1720,7 @@ static int mxs_auart_probe(struct platform_device *pdev)
*/
ret = mxs_auart_request_gpio_irq(s);
if (ret)
- goto out_disable_clks;
+ goto out_iounmap;
auart_port[s->port.line] = s;
@@ -1746,6 +1746,9 @@ out_free_qpio_irq:
mxs_auart_free_gpio_irq(s);
auart_port[pdev->id] = NULL;
+out_iounmap:
+ iounmap(s->port.membase);
+
out_disable_clks:
if (is_asm9260_auart(s)) {
clk_disable_unprepare(s->clk);
@@ -1761,6 +1764,7 @@ static int mxs_auart_remove(struct platform_device *pdev)
uart_remove_one_port(&auart_driver, &s->port);
auart_port[pdev->id] = NULL;
mxs_auart_free_gpio_irq(s);
+ iounmap(s->port.membase);
if (is_asm9260_auart(s)) {
clk_disable_unprepare(s->clk);
clk_disable_unprepare(s->clk_ahb);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 57840cf90388..5f3daabdc916 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -41,8 +41,6 @@ static struct lock_class_key port_lock_key;
#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
-#define SYSRQ_TIMEOUT (HZ * 5)
-
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios);
static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
@@ -1916,6 +1914,12 @@ static inline bool uart_console_enabled(struct uart_port *port)
return uart_console(port) && (port->cons->flags & CON_ENABLED);
}
+static void __uart_port_spin_lock_init(struct uart_port *port)
+{
+ spin_lock_init(&port->lock);
+ lockdep_set_class(&port->lock, &port_lock_key);
+}
+
/*
* Ensure that the serial console lock is initialised early.
* If this port is a console, then the spinlock is already initialised.
@@ -1925,8 +1929,7 @@ static inline void uart_port_spin_lock_init(struct uart_port *port)
if (uart_console(port))
return;
- spin_lock_init(&port->lock);
- lockdep_set_class(&port->lock, &port_lock_key);
+ __uart_port_spin_lock_init(port);
}
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
@@ -2373,6 +2376,13 @@ uart_configure_port(struct uart_driver *drv, struct uart_state *state,
uart_change_pm(state, UART_PM_STATE_ON);
/*
+ * If this driver supports console, and it hasn't been
+ * successfully registered yet, initialise spin lock for it.
+ */
+ if (port->cons && !(port->cons->flags & CON_ENABLED))
+ __uart_port_spin_lock_init(port);
+
+ /*
* Ensure that the modem control lines are de-activated.
* keep the DTR setting that is set in uart_set_options()
* We probably don't need a spinlock around this, but
@@ -3163,7 +3173,7 @@ static DECLARE_WORK(sysrq_enable_work, uart_sysrq_on);
* Returns false if @ch is out of enabling sequence and should be
* handled some other way, true if @ch was consumed.
*/
-static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
+bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
{
int sysrq_toggle_seq_len = strlen(sysrq_toggle_seq);
@@ -3186,99 +3196,9 @@ static bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
port->sysrq = 0;
return true;
}
-#else
-static inline bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch)
-{
- return false;
-}
+EXPORT_SYMBOL_GPL(uart_try_toggle_sysrq);
#endif
-int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
-{
- if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL))
- return 0;
-
- if (!port->has_sysrq || !port->sysrq)
- return 0;
-
- if (ch && time_before(jiffies, port->sysrq)) {
- if (sysrq_mask()) {
- handle_sysrq(ch);
- port->sysrq = 0;
- return 1;
- }
- if (uart_try_toggle_sysrq(port, ch))
- return 1;
- }
- port->sysrq = 0;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(uart_handle_sysrq_char);
-
-int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
-{
- if (!IS_ENABLED(CONFIG_MAGIC_SYSRQ_SERIAL))
- return 0;
-
- if (!port->has_sysrq || !port->sysrq)
- return 0;
-
- if (ch && time_before(jiffies, port->sysrq)) {
- if (sysrq_mask()) {
- port->sysrq_ch = ch;
- port->sysrq = 0;
- return 1;
- }
- if (uart_try_toggle_sysrq(port, ch))
- return 1;
- }
- port->sysrq = 0;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(uart_prepare_sysrq_char);
-
-void uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long flags)
-__releases(&port->lock)
-{
- if (port->has_sysrq) {
- int sysrq_ch = port->sysrq_ch;
-
- port->sysrq_ch = 0;
- spin_unlock_irqrestore(&port->lock, flags);
- if (sysrq_ch)
- handle_sysrq(sysrq_ch);
- } else {
- spin_unlock_irqrestore(&port->lock, flags);
- }
-}
-EXPORT_SYMBOL_GPL(uart_unlock_and_check_sysrq);
-
-/*
- * We do the SysRQ and SAK checking like this...
- */
-int uart_handle_break(struct uart_port *port)
-{
- struct uart_state *state = port->state;
-
- if (port->handle_break)
- port->handle_break(port);
-
- if (port->has_sysrq && uart_console(port)) {
- if (!port->sysrq) {
- port->sysrq = jiffies + SYSRQ_TIMEOUT;
- return 1;
- }
- port->sysrq = 0;
- }
-
- if (port->flags & UPF_SAK)
- do_SAK(state->port.tty);
- return 0;
-}
-EXPORT_SYMBOL_GPL(uart_handle_break);
-
EXPORT_SYMBOL(uart_write_wakeup);
EXPORT_SYMBOL(uart_register_driver);
EXPORT_SYMBOL(uart_unregister_driver);
@@ -3289,8 +3209,7 @@ EXPORT_SYMBOL(uart_remove_one_port);
/**
* uart_get_rs485_mode() - retrieve rs485 properties for given uart
- * @dev: uart device
- * @rs485conf: output parameter
+ * @port: uart device's target port
*
* This function implements the device tree binding described in
* Documentation/devicetree/bindings/serial/rs485.txt.
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index e1179e74a2b8..204bb68ce3ca 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -3301,6 +3301,9 @@ static int sci_probe_single(struct platform_device *dev,
sciport->port.flags |= UPF_HARD_FLOW;
}
+ if (sci_uart_driver.cons->index == sciport->port.line)
+ spin_lock_init(&sciport->port.lock);
+
ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
if (ret) {
sci_cleanup_single(sciport);
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index b9d672af8b65..672cfa075e28 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1465,7 +1465,6 @@ static int cdns_uart_probe(struct platform_device *pdev)
cdns_uart_uart_driver.nr = CDNS_UART_NR_PORTS;
#ifdef CONFIG_SERIAL_XILINX_PS_UART_CONSOLE
cdns_uart_uart_driver.cons = &cdns_uart_console;
- cdns_uart_console.index = id;
#endif
rc = uart_register_driver(&cdns_uart_uart_driver);
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index ae319ef3a832..b60173bc93ce 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -159,9 +159,9 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
priv->pdev = pdev;
if (!uioinfo->irq) {
- ret = platform_get_irq(pdev, 0);
+ ret = platform_get_irq_optional(pdev, 0);
uioinfo->irq = ret;
- if (ret == -ENXIO && pdev->dev.of_node)
+ if (ret == -ENXIO)
uioinfo->irq = UIO_IRQ_NONE;
else if (ret == -EPROBE_DEFER)
return ret;
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index 633c52de3bb3..9865750bc31e 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -486,7 +486,7 @@ c67x00_giveback_urb(struct c67x00_hcd *c67x00, struct urb *urb, int status)
c67x00_release_urb(c67x00, urb);
usb_hcd_unlink_urb_from_ep(c67x00_hcd_to_hcd(c67x00), urb);
spin_unlock(&c67x00->lock);
- usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, urbp->status);
+ usb_hcd_giveback_urb(c67x00_hcd_to_hcd(c67x00), urb, status);
spin_lock(&c67x00->lock);
}
diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c
index 82645a2a0f52..5aa69980e7ff 100644
--- a/drivers/usb/cdns3/ep0.c
+++ b/drivers/usb/cdns3/ep0.c
@@ -37,18 +37,18 @@ static void cdns3_ep0_run_transfer(struct cdns3_device *priv_dev,
struct cdns3_usb_regs __iomem *regs = priv_dev->regs;
struct cdns3_endpoint *priv_ep = priv_dev->eps[0];
- priv_ep->trb_pool[0].buffer = TRB_BUFFER(dma_addr);
- priv_ep->trb_pool[0].length = TRB_LEN(length);
+ priv_ep->trb_pool[0].buffer = cpu_to_le32(TRB_BUFFER(dma_addr));
+ priv_ep->trb_pool[0].length = cpu_to_le32(TRB_LEN(length));
if (zlp) {
- priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_TYPE(TRB_NORMAL);
- priv_ep->trb_pool[1].buffer = TRB_BUFFER(dma_addr);
- priv_ep->trb_pool[1].length = TRB_LEN(0);
- priv_ep->trb_pool[1].control = TRB_CYCLE | TRB_IOC |
- TRB_TYPE(TRB_NORMAL);
+ priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_NORMAL));
+ priv_ep->trb_pool[1].buffer = cpu_to_le32(TRB_BUFFER(dma_addr));
+ priv_ep->trb_pool[1].length = cpu_to_le32(TRB_LEN(0));
+ priv_ep->trb_pool[1].control = cpu_to_le32(TRB_CYCLE | TRB_IOC |
+ TRB_TYPE(TRB_NORMAL));
} else {
- priv_ep->trb_pool[0].control = TRB_CYCLE | TRB_IOC |
- TRB_TYPE(TRB_NORMAL);
+ priv_ep->trb_pool[0].control = cpu_to_le32(TRB_CYCLE | TRB_IOC |
+ TRB_TYPE(TRB_NORMAL));
priv_ep->trb_pool[1].control = 0;
}
@@ -264,11 +264,11 @@ static int cdns3_req_ep0_get_status(struct cdns3_device *priv_dev,
case USB_RECIP_INTERFACE:
return cdns3_ep0_delegate_req(priv_dev, ctrl);
case USB_RECIP_ENDPOINT:
- index = cdns3_ep_addr_to_index(ctrl->wIndex);
+ index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex));
priv_ep = priv_dev->eps[index];
/* check if endpoint is stalled or stall is pending */
- cdns3_select_ep(priv_dev, ctrl->wIndex);
+ cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex));
if (EP_STS_STALL(readl(&priv_dev->regs->ep_sts)) ||
(priv_ep->flags & EP_STALL_PENDING))
usb_status = BIT(USB_ENDPOINT_HALT);
@@ -327,7 +327,8 @@ static int cdns3_ep0_feature_handle_device(struct cdns3_device *priv_dev,
if (!set || (tmode & 0xff) != 0)
return -EINVAL;
- switch (tmode >> 8) {
+ tmode >>= 8;
+ switch (tmode) {
case TEST_J:
case TEST_K:
case TEST_SE0_NAK:
@@ -380,10 +381,10 @@ static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev,
if (!(ctrl->wIndex & ~USB_DIR_IN))
return 0;
- index = cdns3_ep_addr_to_index(ctrl->wIndex);
+ index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex));
priv_ep = priv_dev->eps[index];
- cdns3_select_ep(priv_dev, ctrl->wIndex);
+ cdns3_select_ep(priv_dev, le16_to_cpu(ctrl->wIndex));
if (set)
__cdns3_gadget_ep_set_halt(priv_ep);
@@ -444,7 +445,7 @@ static int cdns3_req_ep0_set_sel(struct cdns3_device *priv_dev,
if (priv_dev->gadget.state < USB_STATE_ADDRESS)
return -EINVAL;
- if (ctrl_req->wLength != 6) {
+ if (le16_to_cpu(ctrl_req->wLength) != 6) {
dev_err(priv_dev->dev, "Set SEL should be 6 bytes, got %d\n",
ctrl_req->wLength);
return -EINVAL;
@@ -468,7 +469,7 @@ static int cdns3_req_ep0_set_isoch_delay(struct cdns3_device *priv_dev,
if (ctrl_req->wIndex || ctrl_req->wLength)
return -EINVAL;
- priv_dev->isoch_delay = ctrl_req->wValue;
+ priv_dev->isoch_delay = le16_to_cpu(ctrl_req->wValue);
return 0;
}
@@ -704,15 +705,17 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
int ret = 0;
u8 zlp = 0;
+ spin_lock_irqsave(&priv_dev->lock, flags);
trace_cdns3_ep0_queue(priv_dev, request);
/* cancel the request if controller receive new SETUP packet. */
- if (cdns3_check_new_setup(priv_dev))
+ if (cdns3_check_new_setup(priv_dev)) {
+ spin_unlock_irqrestore(&priv_dev->lock, flags);
return -ECONNRESET;
+ }
/* send STATUS stage. Should be called only for SET_CONFIGURATION */
if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) {
- spin_lock_irqsave(&priv_dev->lock, flags);
cdns3_select_ep(priv_dev, 0x00);
erdy_sent = !priv_dev->hw_configured_flag;
@@ -737,7 +740,6 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep,
return 0;
}
- spin_lock_irqsave(&priv_dev->lock, flags);
if (!list_empty(&priv_ep->pending_req_list)) {
dev_err(priv_dev->dev,
"can't handle multiple requests for ep0\n");
diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h
index 8d121e207fd8..0a2a3269bfac 100644
--- a/drivers/usb/cdns3/trace.h
+++ b/drivers/usb/cdns3/trace.h
@@ -156,7 +156,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep0_irq,
__dynamic_array(char, str, CDNS3_MSG_MAX)
),
TP_fast_assign(
- __entry->ep_dir = priv_dev->ep0_data_dir;
+ __entry->ep_dir = priv_dev->selected_ep;
__entry->ep_sts = ep_sts;
),
TP_printk("%s", cdns3_decode_ep0_irq(__get_str(str),
@@ -404,9 +404,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb,
TP_fast_assign(
__assign_str(name, priv_ep->name);
__entry->trb = trb;
- __entry->buffer = trb->buffer;
- __entry->length = trb->length;
- __entry->control = trb->control;
+ __entry->buffer = le32_to_cpu(trb->buffer);
+ __entry->length = le32_to_cpu(trb->length);
+ __entry->control = le32_to_cpu(trb->control);
__entry->type = usb_endpoint_type(priv_ep->endpoint.desc);
__entry->last_stream_id = priv_ep->last_stream_id;
),
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 9a7c53d09ab4..bb133245beed 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1243,6 +1243,29 @@ static void ci_controller_suspend(struct ci_hdrc *ci)
enable_irq(ci->irq);
}
+/*
+ * Handle the wakeup interrupt triggered by extcon connector
+ * We need to call ci_irq again for extcon since the first
+ * interrupt (wakeup int) only let the controller be out of
+ * low power mode, but not handle any interrupts.
+ */
+static void ci_extcon_wakeup_int(struct ci_hdrc *ci)
+{
+ struct ci_hdrc_cable *cable_id, *cable_vbus;
+ u32 otgsc = hw_read_otgsc(ci, ~0);
+
+ cable_id = &ci->platdata->id_extcon;
+ cable_vbus = &ci->platdata->vbus_extcon;
+
+ if (!IS_ERR(cable_id->edev) && ci->is_otg &&
+ (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS))
+ ci_irq(ci->irq, ci);
+
+ if (!IS_ERR(cable_vbus->edev) && ci->is_otg &&
+ (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS))
+ ci_irq(ci->irq, ci);
+}
+
static int ci_controller_resume(struct device *dev)
{
struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -1275,6 +1298,7 @@ static int ci_controller_resume(struct device *dev)
enable_irq(ci->irq);
if (ci_otg_is_fsm_mode(ci))
ci_otg_fsm_wakeup_by_srp(ci);
+ ci_extcon_wakeup_int(ci);
}
return 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f67088bb8218..d5187b50fc82 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1689,6 +1689,8 @@ static int acm_pre_reset(struct usb_interface *intf)
static const struct usb_device_id acm_ids[] = {
/* quirky and broken devices */
+ { USB_DEVICE(0x0424, 0x274e), /* Microchip Technology, Inc. (formerly SMSC) */
+ .driver_info = DISABLE_ECHO, }, /* DISABLE ECHO in termios flag */
{ USB_DEVICE(0x076d, 0x0006), /* Denso Cradle CU-321 */
.driver_info = NO_UNION_NORMAL, },/* has no union descriptor */
{ USB_DEVICE(0x17ef, 0x7000), /* Lenovo USB modem */
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 3e8efe759c3e..e0b77674869c 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -218,11 +218,12 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Logitech HD Webcam C270 */
{ USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME },
- /* Logitech HD Pro Webcams C920, C920-C, C925e and C930e */
+ /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x085b), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x085c), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */
{ USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 12b98b466287..7faf5f8c056d 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -4920,12 +4920,6 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
epnum, 0);
}
- ret = usb_add_gadget_udc(dev, &hsotg->gadget);
- if (ret) {
- dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
- hsotg->ctrl_req);
- return ret;
- }
dwc2_hsotg_dump(hsotg);
return 0;
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index e571c8ae65ec..cb8ddbd53718 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -342,7 +342,8 @@ static void dwc2_driver_shutdown(struct platform_device *dev)
{
struct dwc2_hsotg *hsotg = platform_get_drvdata(dev);
- disable_irq(hsotg->irq);
+ dwc2_disable_global_interrupts(hsotg);
+ synchronize_irq(hsotg->irq);
}
/**
@@ -575,6 +576,17 @@ static int dwc2_driver_probe(struct platform_device *dev)
if (hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)
dwc2_lowlevel_hw_disable(hsotg);
+#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \
+ IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE)
+ /* Postponed adding a new gadget to the udc class driver list */
+ if (hsotg->gadget_enabled) {
+ retval = usb_add_gadget_udc(hsotg->dev, &hsotg->gadget);
+ if (retval) {
+ dwc2_hsotg_remove(hsotg);
+ goto error_init;
+ }
+ }
+#endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */
return 0;
error_init:
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 48b68b6f0dc8..90bb022737da 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -162,12 +162,6 @@ static const struct dwc3_exynos_driverdata exynos5250_drvdata = {
.suspend_clk_idx = -1,
};
-static const struct dwc3_exynos_driverdata exynos5420_drvdata = {
- .clk_names = { "usbdrd30", "usbdrd30_susp_clk"},
- .num_clks = 2,
- .suspend_clk_idx = 1,
-};
-
static const struct dwc3_exynos_driverdata exynos5433_drvdata = {
.clk_names = { "aclk", "susp_clk", "pipe_pclk", "phyclk" },
.num_clks = 4,
@@ -185,9 +179,6 @@ static const struct of_device_id exynos_dwc3_match[] = {
.compatible = "samsung,exynos5250-dwusb3",
.data = &exynos5250_drvdata,
}, {
- .compatible = "samsung,exynos5420-dwusb3",
- .data = &exynos5420_drvdata,
- }, {
.compatible = "samsung,exynos5433-dwusb3",
.data = &exynos5433_drvdata,
}, {
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index b67372737dc9..139474c3e77b 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -38,6 +38,8 @@
#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee
#define PCI_DEVICE_ID_INTEL_EHLLP 0x4b7e
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
+#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee
+#define PCI_DEVICE_ID_INTEL_JSP 0x4dee
#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
@@ -206,8 +208,10 @@ static void dwc3_pci_resume_work(struct work_struct *work)
int ret;
ret = pm_runtime_get_sync(&dwc3->dev);
- if (ret)
+ if (ret) {
+ pm_runtime_put_sync_autosuspend(&dwc3->dev);
return;
+ }
pm_runtime_mark_last_busy(&dwc3->dev);
pm_runtime_put_sync_autosuspend(&dwc3->dev);
@@ -356,6 +360,12 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPLP),
(kernel_ulong_t) &dwc3_pci_intel_properties, },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPH),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP),
+ (kernel_ulong_t) &dwc3_pci_intel_properties, },
+
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB),
(kernel_ulong_t) &dwc3_pci_amd_properties, },
{ } /* Terminating Entry */
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index ea0d531c63e2..775cf70cfb3e 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -1058,7 +1058,8 @@ static int __init kgdbdbgp_parse_config(char *str)
kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
}
kgdb_register_io_module(&kgdbdbgp_io_ops);
- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
+ if (early_dbgp_console.index != -1)
+ kgdbdbgp_io_ops.cons = &early_dbgp_console;
return 0;
}
diff --git a/drivers/usb/gadget/function/f_uac1_legacy.c b/drivers/usb/gadget/function/f_uac1_legacy.c
index 349deae7cabd..e2d7f69128a0 100644
--- a/drivers/usb/gadget/function/f_uac1_legacy.c
+++ b/drivers/usb/gadget/function/f_uac1_legacy.c
@@ -336,7 +336,9 @@ static int f_audio_out_ep_complete(struct usb_ep *ep, struct usb_request *req)
/* Copy buffer is full, add it to the play_queue */
if (audio_buf_size - copy_buf->actual < req->actual) {
+ spin_lock_irq(&audio->lock);
list_add_tail(&copy_buf->list, &audio->play_queue);
+ spin_unlock_irq(&audio->lock);
schedule_work(&audio->playback_work);
copy_buf = f_audio_buffer_alloc(audio_buf_size);
if (IS_ERR(copy_buf))
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index d69f61ff0181..9342a3d24963 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -676,13 +676,7 @@ static int usba_ep_disable(struct usb_ep *_ep)
if (!ep->ep.desc) {
spin_unlock_irqrestore(&udc->lock, flags);
- /* REVISIT because this driver disables endpoints in
- * reset_all_endpoints() before calling disconnect(),
- * most gadget drivers would trigger this non-error ...
- */
- if (udc->gadget.speed != USB_SPEED_UNKNOWN)
- DBG(DBG_ERR, "ep_disable: %s not enabled\n",
- ep->ep.name);
+ DBG(DBG_ERR, "ep_disable: %s not enabled\n", ep->ep.name);
return -EINVAL;
}
ep->ep.desc = NULL;
@@ -871,7 +865,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
u32 status;
DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n",
- ep->ep.name, req);
+ ep->ep.name, _req);
spin_lock_irqsave(&udc->lock, flags);
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 7164ad9800f1..7419889ebe9a 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -1980,9 +1980,12 @@ static int gr_ep_init(struct gr_udc *dev, int num, int is_in, u32 maxplimit)
if (num == 0) {
_req = gr_alloc_request(&ep->ep, GFP_ATOMIC);
+ if (!_req)
+ return -ENOMEM;
+
buf = devm_kzalloc(dev->dev, PAGE_SIZE, GFP_DMA | GFP_ATOMIC);
- if (!_req || !buf) {
- /* possible _req freed by gr_probe via gr_remove */
+ if (!buf) {
+ gr_free_request(&ep->ep, _req);
return -ENOMEM;
}
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index cafde053788b..80a1b52c656e 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2313,7 +2313,8 @@ static int mv_udc_probe(struct platform_device *pdev)
return 0;
err_create_workqueue:
- destroy_workqueue(udc->qwork);
+ if (udc->qwork)
+ destroy_workqueue(udc->qwork);
err_destroy_dma:
dma_pool_destroy(udc->dtd_pool);
err_free_dma:
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index 58a4d3325090..119505fac777 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_get_string);
/**
* usb_validate_langid - validate usb language identifiers
- * @lang: usb language identifier
+ * @langid: usb language identifier
*
* Returns true for valid language identifier, otherwise false.
*/
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index a4e9abcbdc4f..1a9b7572e17f 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -203,9 +203,8 @@ static int exynos_ehci_probe(struct platform_device *pdev)
hcd->rsrc_len = resource_size(res);
irq = platform_get_irq(pdev, 0);
- if (!irq) {
- dev_err(&pdev->dev, "Failed to get IRQ\n");
- err = -ENODEV;
+ if (irq < 0) {
+ err = irq;
goto fail_io;
}
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 3c3820ad9092..af3c1b9b38b2 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -216,6 +216,13 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
ehci_info(ehci, "applying MosChip frame-index workaround\n");
ehci->frame_index_bug = 1;
break;
+ case PCI_VENDOR_ID_HUAWEI:
+ /* Synopsys HC bug */
+ if (pdev->device == 0xa239) {
+ ehci_info(ehci, "applying Synopsys HC workaround\n");
+ ehci->has_synopsys_hc_bug = 1;
+ }
+ break;
}
/* optional debug port, normally in the first BAR */
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index cff965240327..b91d50da6127 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -191,6 +191,7 @@ static int ohci_hcd_sm501_drv_remove(struct platform_device *pdev)
struct resource *mem;
usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index bfbdb3ceed29..4311d4c9b68d 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -587,6 +587,9 @@ static int xhci_mtk_remove(struct platform_device *dev)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct usb_hcd *shared_hcd = xhci->shared_hcd;
+ pm_runtime_put_noidle(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+
usb_remove_hcd(shared_hcd);
xhci->shared_hcd = NULL;
device_init_wakeup(&dev->dev, false);
@@ -597,8 +600,6 @@ static int xhci_mtk_remove(struct platform_device *dev)
xhci_mtk_sch_exit(mtk);
xhci_mtk_clks_disable(mtk);
xhci_mtk_ldos_disable(mtk);
- pm_runtime_put_sync(&dev->dev);
- pm_runtime_disable(&dev->dev);
return 0;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index bee5deccc83d..ed468eed299c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1430,6 +1430,7 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
xhci->devs[slot_id]->out_ctx, ep_index);
ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
+ ep_ctx->ep_info &= cpu_to_le32(~EP_STATE_MASK);/* must clear */
ep_ctx->ep_info2 &= cpu_to_le32(~MAX_PACKET_MASK);
ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet_size));
@@ -4390,6 +4391,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
int hird, exit_latency;
int ret;
+ if (xhci->quirks & XHCI_HW_LPM_DISABLE)
+ return -EPERM;
+
if (hcd->speed >= HCD_USB3 || !xhci->hw_lpm_support ||
!udev->lpm_capable)
return -EPERM;
@@ -4412,7 +4416,7 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
xhci_dbg(xhci, "%s port %d USB2 hardware LPM\n",
enable ? "enable" : "disable", port_num + 1);
- if (enable && !(xhci->quirks & XHCI_HW_LPM_DISABLE)) {
+ if (enable) {
/* Host supports BESL timeout instead of HIRD */
if (udev->usb2_hw_lpm_besl_capable) {
/* if device doesn't have a preferred BESL value use a
@@ -4471,6 +4475,9 @@ static int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
mutex_lock(hcd->bandwidth_mutex);
xhci_change_max_exit_latency(xhci, udev, 0);
mutex_unlock(hcd->bandwidth_mutex);
+ readl_poll_timeout(ports[port_num]->addr, pm_val,
+ (pm_val & PORT_PLS_MASK) == XDEV_U0,
+ 100, 10000);
return 0;
}
}
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 2c6c4f8d1ee1..c295e8a7f5ae 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -716,7 +716,7 @@ struct xhci_ep_ctx {
* 4 - TRB error
* 5-7 - reserved
*/
-#define EP_STATE_MASK (0xf)
+#define EP_STATE_MASK (0x7)
#define EP_STATE_DISABLED 0
#define EP_STATE_RUNNING 1
#define EP_STATE_HALTED 2
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 98ada1a3425c..bae88893ee8e 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -2873,6 +2873,7 @@ static void usbtest_disconnect(struct usb_interface *intf)
usb_set_intfdata(intf, NULL);
dev_dbg(&intf->dev, "disconnect\n");
+ kfree(dev->buf);
kfree(dev);
}
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index cffe2aced488..03a333797382 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -1199,11 +1199,7 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tegra_phy);
- err = usb_add_phy_dev(&tegra_phy->u_phy);
- if (err)
- return err;
-
- return 0;
+ return usb_add_phy_dev(&tegra_phy->u_phy);
}
static int tegra_usb_phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 01c6a48c41bc..ac9a81ae8216 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -803,7 +803,8 @@ static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
return info->dma_map_ctrl(chan->device->dev, pkt, map);
}
-static void usbhsf_dma_complete(void *arg);
+static void usbhsf_dma_complete(void *arg,
+ const struct dmaengine_result *result);
static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
{
struct usbhs_pipe *pipe = pkt->pipe;
@@ -813,6 +814,7 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
struct dma_chan *chan;
struct device *dev = usbhs_priv_to_dev(priv);
enum dma_transfer_direction dir;
+ dma_cookie_t cookie;
fifo = usbhs_pipe_to_fifo(pipe);
if (!fifo)
@@ -827,11 +829,11 @@ static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
if (!desc)
return;
- desc->callback = usbhsf_dma_complete;
- desc->callback_param = pipe;
+ desc->callback_result = usbhsf_dma_complete;
+ desc->callback_param = pkt;
- pkt->cookie = dmaengine_submit(desc);
- if (pkt->cookie < 0) {
+ cookie = dmaengine_submit(desc);
+ if (cookie < 0) {
dev_err(dev, "Failed to submit dma descriptor\n");
return;
}
@@ -1152,12 +1154,10 @@ static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt,
struct dma_chan *chan, int dtln)
{
struct usbhs_pipe *pipe = pkt->pipe;
- struct dma_tx_state state;
size_t received_size;
int maxp = usbhs_pipe_get_maxpacket(pipe);
- dmaengine_tx_status(chan, pkt->cookie, &state);
- received_size = pkt->length - state.residue;
+ received_size = pkt->length - pkt->dma_result->residue;
if (dtln) {
received_size -= USBHS_USB_DMAC_XFER_SIZE;
@@ -1363,13 +1363,16 @@ static int usbhsf_irq_ready(struct usbhs_priv *priv,
return 0;
}
-static void usbhsf_dma_complete(void *arg)
+static void usbhsf_dma_complete(void *arg,
+ const struct dmaengine_result *result)
{
- struct usbhs_pipe *pipe = arg;
+ struct usbhs_pkt *pkt = arg;
+ struct usbhs_pipe *pipe = pkt->pipe;
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
int ret;
+ pkt->dma_result = result;
ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
if (ret < 0)
dev_err(dev, "dma_complete run_error %d : %d\n",
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index 7d3700bf41d9..039a2b993157 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -50,7 +50,7 @@ struct usbhs_pkt {
struct usbhs_pkt *pkt);
struct work_struct work;
dma_addr_t dma;
- dma_cookie_t cookie;
+ const struct dmaengine_result *dma_result;
void *buf;
int length;
int trans;
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 89675ee29645..8fbaef5c9d69 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -77,6 +77,7 @@
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x4348, 0x5523) },
+ { USB_DEVICE(0x1a86, 0x7522) },
{ USB_DEVICE(0x1a86, 0x7523) },
{ USB_DEVICE(0x1a86, 0x5523) },
{ },
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index 216edd5826ca..ecda82198798 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -59,6 +59,7 @@ static const struct usb_device_id id_table_earthmate[] = {
static const struct usb_device_id id_table_cyphidcomrs232[] = {
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
+ { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
{ USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
{ } /* Terminating entry */
@@ -73,6 +74,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
+ { USB_DEVICE(VENDOR_ID_SAI, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
{ USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) },
{ USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h
index 35e223751c0e..16b7410ad057 100644
--- a/drivers/usb/serial/cypress_m8.h
+++ b/drivers/usb/serial/cypress_m8.h
@@ -25,6 +25,9 @@
#define VENDOR_ID_CYPRESS 0x04b4
#define PRODUCT_ID_CYPHIDCOM 0x5500
+/* Simply Automated HID->COM UPB PIM (using Cypress PID 0x5500) */
+#define VENDOR_ID_SAI 0x17dd
+
/* FRWD Dongle - a GPS sports watch */
#define VENDOR_ID_FRWD 0x6737
#define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index d5bff69b1769..b8dfeb4fb2ed 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -697,14 +697,16 @@ static int iuu_uart_write(struct tty_struct *tty, struct usb_serial_port *port,
struct iuu_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
- if (count > 256)
- return -ENOMEM;
-
spin_lock_irqsave(&priv->lock, flags);
+ count = min(count, 256 - priv->writelen);
+ if (count == 0)
+ goto out;
+
/* fill the buffer */
memcpy(priv->writebuf + priv->writelen, buf, count);
priv->writelen += count;
+out:
spin_unlock_irqrestore(&priv->lock, flags);
return count;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 254a8bbeea67..9b7cee98ea60 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -245,6 +245,7 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Quectel's vendor ID */
#define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EC25 0x0125
+#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
#define QUECTEL_PRODUCT_EP06 0x0306
#define QUECTEL_PRODUCT_EM12 0x0512
@@ -1097,6 +1098,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
.driver_info = RSVD(4) },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EG95),
+ .driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
@@ -2028,6 +2031,9 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
+ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
+ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
+ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index 962bc69a6a59..70ddc9d6d49e 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -148,7 +148,8 @@ pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_mux_state *state)
msg[0] = PMC_USB_DP_HPD;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
- msg[1] = PMC_USB_DP_HPD_IRQ;
+ if (data->status & DP_STATUS_IRQ_HPD)
+ msg[1] = PMC_USB_DP_HPD_IRQ;
if (data->status & DP_STATUS_HPD_STATE)
msg[1] |= PMC_USB_DP_HPD_LVL;
@@ -161,6 +162,7 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
{
struct typec_displayport_data *data = state->data;
struct altmode_req req = { };
+ int ret;
if (data->status & DP_STATUS_IRQ_HPD)
return pmc_usb_mux_dp_hpd(port, state);
@@ -181,7 +183,14 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state)
if (data->status & DP_STATUS_HPD_STATE)
req.mode_data |= PMC_USB_ALTMODE_HPD_HIGH;
- return pmc_usb_command(port, (void *)&req, sizeof(req));
+ ret = pmc_usb_command(port, (void *)&req, sizeof(req));
+ if (ret)
+ return ret;
+
+ if (data->status & DP_STATUS_HPD_STATE)
+ return pmc_usb_mux_dp_hpd(port, state);
+
+ return 0;
}
static int
diff --git a/drivers/usb/typec/tcpm/tcpci_rt1711h.c b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
index 017389021b96..b56a0880a044 100644
--- a/drivers/usb/typec/tcpm/tcpci_rt1711h.c
+++ b/drivers/usb/typec/tcpm/tcpci_rt1711h.c
@@ -179,26 +179,6 @@ out:
return tcpci_irq(chip->tcpci);
}
-static int rt1711h_init_alert(struct rt1711h_chip *chip,
- struct i2c_client *client)
-{
- int ret;
-
- /* Disable chip interrupts before requesting irq */
- ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
- if (ret < 0)
- return ret;
-
- ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
- rt1711h_irq,
- IRQF_ONESHOT | IRQF_TRIGGER_LOW,
- dev_name(chip->dev), chip);
- if (ret < 0)
- return ret;
- enable_irq_wake(client->irq);
- return 0;
-}
-
static int rt1711h_sw_reset(struct rt1711h_chip *chip)
{
int ret;
@@ -260,7 +240,8 @@ static int rt1711h_probe(struct i2c_client *client,
if (ret < 0)
return ret;
- ret = rt1711h_init_alert(chip, client);
+ /* Disable chip interrupts before requesting irq */
+ ret = rt1711h_write16(chip, TCPC_ALERT_MASK, 0);
if (ret < 0)
return ret;
@@ -271,6 +252,14 @@ static int rt1711h_probe(struct i2c_client *client,
if (IS_ERR_OR_NULL(chip->tcpci))
return PTR_ERR(chip->tcpci);
+ ret = devm_request_threaded_irq(chip->dev, client->irq, NULL,
+ rt1711h_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_LOW,
+ dev_name(chip->dev), chip);
+ if (ret < 0)
+ return ret;
+ enable_irq_wake(client->irq);
+
return 0;
}
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index ff6562f602e0..de211ef3738c 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -63,7 +63,7 @@ static void vdpa_release_dev(struct device *d)
* @config: the bus operations that is supported by this device
* @size: size of the parent structure that contains private data
*
- * Drvier should use vdap_alloc_device() wrapper macro instead of
+ * Driver should use vdpa_alloc_device() wrapper macro instead of
* using this directly.
*
* Returns an error when parent/config/dma_dev is not set or fail to get
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 7c0779018b1b..de881a6cff35 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -521,10 +521,19 @@ static void vfio_pci_release(void *device_data)
vfio_pci_vf_token_user_add(vdev, -1);
vfio_spapr_pci_eeh_release(vdev->pdev);
vfio_pci_disable(vdev);
- if (vdev->err_trigger)
+ mutex_lock(&vdev->igate);
+ if (vdev->err_trigger) {
eventfd_ctx_put(vdev->err_trigger);
- if (vdev->req_trigger)
+ vdev->err_trigger = NULL;
+ }
+ mutex_unlock(&vdev->igate);
+
+ mutex_lock(&vdev->igate);
+ if (vdev->req_trigger) {
eventfd_ctx_put(vdev->req_trigger);
+ vdev->req_trigger = NULL;
+ }
+ mutex_unlock(&vdev->igate);
}
mutex_unlock(&vdev->reflck->lock);
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index 8746c943247a..d98843feddce 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -398,9 +398,15 @@ static inline void p_setd(struct perm_bits *p, int off, u32 virt, u32 write)
/* Caller should hold memory_lock semaphore */
bool __vfio_pci_memory_enabled(struct vfio_pci_device *vdev)
{
+ struct pci_dev *pdev = vdev->pdev;
u16 cmd = le16_to_cpu(*(__le16 *)&vdev->vconfig[PCI_COMMAND]);
- return cmd & PCI_COMMAND_MEMORY;
+ /*
+ * SR-IOV VF memory enable is handled by the MSE bit in the
+ * PF SR-IOV capability, there's therefore no need to trigger
+ * faults based on the virtual value.
+ */
+ return pdev->is_virtfn || (cmd & PCI_COMMAND_MEMORY);
}
/*
@@ -1728,6 +1734,15 @@ int vfio_config_init(struct vfio_pci_device *vdev)
vconfig[PCI_INTERRUPT_PIN]);
vconfig[PCI_INTERRUPT_PIN] = 0; /* Gratuitous for good VFs */
+
+ /*
+ * VFs do no implement the memory enable bit of the COMMAND
+ * register therefore we'll not have it set in our initial
+ * copy of config space after pci_enable_device(). For
+ * consistency with PFs, set the virtual enable bit here.
+ */
+ *(__le16 *)&vconfig[PCI_COMMAND] |=
+ cpu_to_le16(PCI_COMMAND_MEMORY);
}
if (!IS_ENABLED(CONFIG_VFIO_PCI_INTX) || vdev->nointx)
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 0466921f4772..a09dedc79f68 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -263,9 +263,62 @@ static int vhost_test_set_features(struct vhost_test *n, u64 features)
return 0;
}
+static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
+{
+ static void *backend;
+
+ const bool enable = fd != -1;
+ struct vhost_virtqueue *vq;
+ int r;
+
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_check_owner(&n->dev);
+ if (r)
+ goto err;
+
+ if (index >= VHOST_TEST_VQ_MAX) {
+ r = -ENOBUFS;
+ goto err;
+ }
+ vq = &n->vqs[index];
+ mutex_lock(&vq->mutex);
+
+ /* Verify that ring has been setup correctly. */
+ if (!vhost_vq_access_ok(vq)) {
+ r = -EFAULT;
+ goto err_vq;
+ }
+ if (!enable) {
+ vhost_poll_stop(&vq->poll);
+ backend = vhost_vq_get_backend(vq);
+ vhost_vq_set_backend(vq, NULL);
+ } else {
+ vhost_vq_set_backend(vq, backend);
+ r = vhost_vq_init_access(vq);
+ if (r == 0)
+ r = vhost_poll_start(&vq->poll, vq->kick);
+ }
+
+ mutex_unlock(&vq->mutex);
+
+ if (enable) {
+ vhost_test_flush_vq(n, index);
+ }
+
+ mutex_unlock(&n->dev.mutex);
+ return 0;
+
+err_vq:
+ mutex_unlock(&vq->mutex);
+err:
+ mutex_unlock(&n->dev.mutex);
+ return r;
+}
+
static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
+ struct vhost_vring_file backend;
struct vhost_test *n = f->private_data;
void __user *argp = (void __user *)arg;
u64 __user *featurep = argp;
@@ -277,6 +330,10 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
if (copy_from_user(&test, argp, sizeof test))
return -EFAULT;
return vhost_test_run(n, test);
+ case VHOST_TEST_SET_BACKEND:
+ if (copy_from_user(&backend, argp, sizeof backend))
+ return -EFAULT;
+ return vhost_test_set_backend(n, backend.index, backend.fd);
case VHOST_GET_FEATURES:
features = VHOST_FEATURES;
if (copy_to_user(featurep, &features, sizeof features))
diff --git a/drivers/vhost/test.h b/drivers/vhost/test.h
index 7dd265bfdf81..822bc4bee03a 100644
--- a/drivers/vhost/test.h
+++ b/drivers/vhost/test.h
@@ -4,5 +4,6 @@
/* Start a given test on the virtio null device. 0 stops all tests. */
#define VHOST_TEST_RUN _IOW(VHOST_VIRTIO, 0x31, int)
+#define VHOST_TEST_SET_BACKEND _IOW(VHOST_VIRTIO, 0x32, int)
#endif
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 7580e34f76c1..a54b60d6623f 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -818,7 +818,7 @@ static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
struct vdpa_device *vdpa = v->vdpa;
const struct vdpa_config_ops *ops = vdpa->config;
struct vdpa_notification_area notify;
- int index = vma->vm_pgoff;
+ unsigned long index = vma->vm_pgoff;
if (vma->vm_end - vma->vm_start != PAGE_SIZE)
return -EINVAL;
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index e8ab583e5098..113116d3585c 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -107,7 +107,7 @@ static void tosa_lcd_tg_on(struct tosa_lcd_data *data)
/* TG LCD GVSS */
tosa_tg_send(spi, TG_PINICTL, 0x0);
- if (!data->i2c) {
+ if (IS_ERR_OR_NULL(data->i2c)) {
/*
* after the pannel is powered up the first time,
* we can access the i2c bus so probe for the DAC
@@ -119,7 +119,7 @@ static void tosa_lcd_tg_on(struct tosa_lcd_data *data)
.addr = DAC_BASE,
.platform_data = data->spi,
};
- data->i2c = i2c_new_device(adap, &info);
+ data->i2c = i2c_new_client_device(adap, &info);
}
}
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 9d28a8e3328f..e2a490c5ae08 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -2402,7 +2402,8 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
ops->graphics = 1;
if (!blank) {
- var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
+ var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE |
+ FB_ACTIVATE_KD_TEXT;
fb_set_var(info, &var);
ops->graphics = 0;
ops->var = info->var;
diff --git a/drivers/video/fbdev/hpfb.c b/drivers/video/fbdev/hpfb.c
index f02be0db335e..8d418abdd767 100644
--- a/drivers/video/fbdev/hpfb.c
+++ b/drivers/video/fbdev/hpfb.c
@@ -402,7 +402,7 @@ int __init hpfb_init(void)
if (err)
return err;
- err = probe_kernel_read(&i, (unsigned char *)INTFBVADDR + DIO_IDOFF, 1);
+ err = copy_from_kernel_nofault(&i, (unsigned char *)INTFBVADDR + DIO_IDOFF, 1);
if (!err && (i == DIO_ID_FBUFFER) && topcat_sid_ok(sid = DIO_SECID(INTFBVADDR))) {
if (!request_mem_region(INTFBPADDR, DIO_DEVSIZE, "Internal Topcat"))
diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c
index bee29aadc646..def14ac0ebe1 100644
--- a/drivers/video/fbdev/uvesafb.c
+++ b/drivers/video/fbdev/uvesafb.c
@@ -1836,7 +1836,7 @@ static int uvesafb_setup(char *options)
else if (!strcmp(this_opt, "noedid"))
noedid = true;
else if (!strcmp(this_opt, "noblank"))
- blank = true;
+ blank = false;
else if (!strncmp(this_opt, "vtotal:", 7))
vram_total = simple_strtoul(this_opt + 7, NULL, 0);
else if (!strncmp(this_opt, "vremap:", 7))
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index b690a8a4bf9e..18ebd7a6af98 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -1444,7 +1444,7 @@ static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
or_mask = caps->u.in.or_mask;
not_mask = caps->u.in.not_mask;
- if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
+ if ((or_mask | not_mask) & ~VMMDEV_GUEST_CAPABILITIES_MASK)
return -EINVAL;
ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
@@ -1520,7 +1520,8 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
/* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
- req == VBG_IOCTL_VMMDEV_REQUEST_BIG)
+ req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
+ req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT)
return vbg_ioctl_vmmrequest(gdev, session, data);
if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
@@ -1558,6 +1559,7 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
case VBG_IOCTL_HGCM_CALL(0):
return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
case VBG_IOCTL_LOG(0):
+ case VBG_IOCTL_LOG_ALT(0):
return vbg_ioctl_log(data);
}
diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
index 4188c12b839f..77c3a9c8255d 100644
--- a/drivers/virt/vboxguest/vboxguest_core.h
+++ b/drivers/virt/vboxguest/vboxguest_core.h
@@ -15,6 +15,21 @@
#include <linux/vboxguest.h>
#include "vmmdev.h"
+/*
+ * The mainline kernel version (this version) of the vboxguest module
+ * contained a bug where it defined VBGL_IOCTL_VMMDEV_REQUEST_BIG and
+ * VBGL_IOCTL_LOG using _IOC(_IOC_READ | _IOC_WRITE, 'V', ...) instead
+ * of _IO(V, ...) as the out of tree VirtualBox upstream version does.
+ *
+ * These _ALT definitions keep compatibility with the wrong defines the
+ * mainline kernel version used for a while.
+ * Note the VirtualBox userspace bits have always been built against
+ * VirtualBox upstream's headers, so this is likely not necessary. But
+ * we must never break our ABI so we keep these around to be 100% sure.
+ */
+#define VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT _IOC(_IOC_READ | _IOC_WRITE, 'V', 3, 0)
+#define VBG_IOCTL_LOG_ALT(s) _IOC(_IOC_READ | _IOC_WRITE, 'V', 9, s)
+
struct vbg_session;
/** VBox guest memory balloon. */
diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
index 6e8c0f1c1056..32c2c52f7e84 100644
--- a/drivers/virt/vboxguest/vboxguest_linux.c
+++ b/drivers/virt/vboxguest/vboxguest_linux.c
@@ -131,7 +131,8 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
* the need for a bounce-buffer and another copy later on.
*/
is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
- req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
+ req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
+ req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT;
if (is_vmmdev_req)
buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
diff --git a/drivers/virt/vboxguest/vmmdev.h b/drivers/virt/vboxguest/vmmdev.h
index 6337b8d75d96..21f408120e3f 100644
--- a/drivers/virt/vboxguest/vmmdev.h
+++ b/drivers/virt/vboxguest/vmmdev.h
@@ -206,6 +206,8 @@ VMMDEV_ASSERT_SIZE(vmmdev_mask, 24 + 8);
* not.
*/
#define VMMDEV_GUEST_SUPPORTS_GRAPHICS BIT(2)
+/* The mask of valid capabilities, for sanity checking. */
+#define VMMDEV_GUEST_CAPABILITIES_MASK 0x00000007U
/** struct vmmdev_hypervisorinfo - Hypervisor info structure. */
struct vmmdev_hypervisorinfo {
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 50c689f25045..f26f5f64ae82 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -101,6 +101,11 @@ struct virtio_mem {
/* The parent resource for all memory added via this device. */
struct resource *parent_resource;
+ /*
+ * Copy of "System RAM (virtio_mem)" to be used for
+ * add_memory_driver_managed().
+ */
+ const char *resource_name;
/* Summary of all memory block states. */
unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT];
@@ -414,8 +419,20 @@ static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
if (nid == NUMA_NO_NODE)
nid = memory_add_physaddr_to_nid(addr);
+ /*
+ * When force-unloading the driver and we still have memory added to
+ * Linux, the resource name has to stay.
+ */
+ if (!vm->resource_name) {
+ vm->resource_name = kstrdup_const("System RAM (virtio_mem)",
+ GFP_KERNEL);
+ if (!vm->resource_name)
+ return -ENOMEM;
+ }
+
dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
- return add_memory(nid, addr, memory_block_size_bytes());
+ return add_memory_driver_managed(nid, addr, memory_block_size_bytes(),
+ vm->resource_name);
}
/*
@@ -1192,7 +1209,7 @@ static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
VIRTIO_MEM_MB_STATE_OFFLINE);
}
- return rc;
+ return 0;
}
/*
@@ -1890,10 +1907,12 @@ static void virtio_mem_remove(struct virtio_device *vdev)
vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL] ||
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE])
+ vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_MOVABLE]) {
dev_warn(&vdev->dev, "device still has system memory added\n");
- else
+ } else {
virtio_mem_delete_resource(vm);
+ kfree_const(vm->resource_name);
+ }
/* remove all tracking data - no locking needed */
vfree(vm->mb_state);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 9d16aaffca9d..627ac0487494 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -641,11 +641,11 @@ static int vm_cmdline_set(const char *device,
&vm_cmdline_id, &consumed);
/*
- * sscanf() must processes at least 2 chunks; also there
+ * sscanf() must process at least 2 chunks; also there
* must be no extra characters after the last chunk, so
* str[consumed] must be '\0'
*/
- if (processed < 2 || str[consumed])
+ if (processed < 2 || str[consumed] || irq == 0)
return -EINVAL;
resources[0].flags = IORESOURCE_MEM;
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 040d2a43e8e3..786fbb7d8be0 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -69,11 +69,27 @@ struct xenbus_map_node {
unsigned int nr_handles;
};
+struct map_ring_valloc {
+ struct xenbus_map_node *node;
+
+ /* Why do we need two arrays? See comment of __xenbus_map_ring */
+ union {
+ unsigned long addrs[XENBUS_MAX_RING_GRANTS];
+ pte_t *ptes[XENBUS_MAX_RING_GRANTS];
+ };
+ phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
+
+ struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
+ struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
+
+ unsigned int idx; /* HVM only. */
+};
+
static DEFINE_SPINLOCK(xenbus_valloc_lock);
static LIST_HEAD(xenbus_valloc_pages);
struct xenbus_ring_ops {
- int (*map)(struct xenbus_device *dev,
+ int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info,
grant_ref_t *gnt_refs, unsigned int nr_grefs,
void **vaddr);
int (*unmap)(struct xenbus_device *dev, void *vaddr);
@@ -440,8 +456,7 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
* Map @nr_grefs pages of memory into this domain from another
* domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs
* pages of virtual address space, maps the pages to that address, and
- * sets *vaddr to that address. Returns 0 on success, and GNTST_*
- * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
+ * sets *vaddr to that address. Returns 0 on success, and -errno on
* error. If an error is returned, device will switch to
* XenbusStateClosing and the error message will be saved in XenStore.
*/
@@ -449,12 +464,25 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
unsigned int nr_grefs, void **vaddr)
{
int err;
+ struct map_ring_valloc *info;
+
+ *vaddr = NULL;
+
+ if (nr_grefs > XENBUS_MAX_RING_GRANTS)
+ return -EINVAL;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
- err = ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
- /* Some hypervisors are buggy and can return 1. */
- if (err > 0)
- err = GNTST_general_error;
+ info->node = kzalloc(sizeof(*info->node), GFP_KERNEL);
+ if (!info->node)
+ err = -ENOMEM;
+ else
+ err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr);
+ kfree(info->node);
+ kfree(info);
return err;
}
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
@@ -466,62 +494,57 @@ static int __xenbus_map_ring(struct xenbus_device *dev,
grant_ref_t *gnt_refs,
unsigned int nr_grefs,
grant_handle_t *handles,
- phys_addr_t *addrs,
+ struct map_ring_valloc *info,
unsigned int flags,
bool *leaked)
{
- struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS];
- struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
int i, j;
- int err = GNTST_okay;
if (nr_grefs > XENBUS_MAX_RING_GRANTS)
return -EINVAL;
for (i = 0; i < nr_grefs; i++) {
- memset(&map[i], 0, sizeof(map[i]));
- gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
- dev->otherend_id);
+ gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags,
+ gnt_refs[i], dev->otherend_id);
handles[i] = INVALID_GRANT_HANDLE;
}
- gnttab_batch_map(map, i);
+ gnttab_batch_map(info->map, i);
for (i = 0; i < nr_grefs; i++) {
- if (map[i].status != GNTST_okay) {
- err = map[i].status;
- xenbus_dev_fatal(dev, map[i].status,
+ if (info->map[i].status != GNTST_okay) {
+ xenbus_dev_fatal(dev, info->map[i].status,
"mapping in shared page %d from domain %d",
gnt_refs[i], dev->otherend_id);
goto fail;
} else
- handles[i] = map[i].handle;
+ handles[i] = info->map[i].handle;
}
- return GNTST_okay;
+ return 0;
fail:
for (i = j = 0; i < nr_grefs; i++) {
if (handles[i] != INVALID_GRANT_HANDLE) {
- memset(&unmap[j], 0, sizeof(unmap[j]));
- gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
+ gnttab_set_unmap_op(&info->unmap[j],
+ info->phys_addrs[i],
GNTMAP_host_map, handles[i]);
j++;
}
}
- if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
+ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j))
BUG();
*leaked = false;
for (i = 0; i < j; i++) {
- if (unmap[i].status != GNTST_okay) {
+ if (info->unmap[i].status != GNTST_okay) {
*leaked = true;
break;
}
}
- return err;
+ return -ENOENT;
}
/**
@@ -566,21 +589,12 @@ static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles,
return err;
}
-struct map_ring_valloc_hvm
-{
- unsigned int idx;
-
- /* Why do we need two arrays? See comment of __xenbus_map_ring */
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
- unsigned long addrs[XENBUS_MAX_RING_GRANTS];
-};
-
static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
unsigned int goffset,
unsigned int len,
void *data)
{
- struct map_ring_valloc_hvm *info = data;
+ struct map_ring_valloc *info = data;
unsigned long vaddr = (unsigned long)gfn_to_virt(gfn);
info->phys_addrs[info->idx] = vaddr;
@@ -589,39 +603,28 @@ static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn,
info->idx++;
}
-static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
- grant_ref_t *gnt_ref,
- unsigned int nr_grefs,
- void **vaddr)
+static int xenbus_map_ring_hvm(struct xenbus_device *dev,
+ struct map_ring_valloc *info,
+ grant_ref_t *gnt_ref,
+ unsigned int nr_grefs,
+ void **vaddr)
{
- struct xenbus_map_node *node;
+ struct xenbus_map_node *node = info->node;
int err;
void *addr;
bool leaked = false;
- struct map_ring_valloc_hvm info = {
- .idx = 0,
- };
unsigned int nr_pages = XENBUS_PAGES(nr_grefs);
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- *vaddr = NULL;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
-
err = alloc_xenballooned_pages(nr_pages, node->hvm.pages);
if (err)
goto out_err;
gnttab_foreach_grant(node->hvm.pages, nr_grefs,
xenbus_map_ring_setup_grant_hvm,
- &info);
+ info);
err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
- info.phys_addrs, GNTMAP_host_map, &leaked);
+ info, GNTMAP_host_map, &leaked);
node->nr_handles = nr_grefs;
if (err)
@@ -641,11 +644,13 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
spin_unlock(&xenbus_valloc_lock);
*vaddr = addr;
+ info->node = NULL;
+
return 0;
out_xenbus_unmap_ring:
if (!leaked)
- xenbus_unmap_ring(dev, node->handles, nr_grefs, info.addrs);
+ xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs);
else
pr_alert("leaking %p size %u page(s)",
addr, nr_pages);
@@ -653,7 +658,6 @@ static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
if (!leaked)
free_xenballooned_pages(nr_pages, node->hvm.pages);
out_err:
- kfree(node);
return err;
}
@@ -676,40 +680,28 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
#ifdef CONFIG_XEN_PV
-static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
- grant_ref_t *gnt_refs,
- unsigned int nr_grefs,
- void **vaddr)
+static int xenbus_map_ring_pv(struct xenbus_device *dev,
+ struct map_ring_valloc *info,
+ grant_ref_t *gnt_refs,
+ unsigned int nr_grefs,
+ void **vaddr)
{
- struct xenbus_map_node *node;
+ struct xenbus_map_node *node = info->node;
struct vm_struct *area;
- pte_t *ptes[XENBUS_MAX_RING_GRANTS];
- phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS];
int err = GNTST_okay;
int i;
bool leaked;
- *vaddr = NULL;
-
- if (nr_grefs > XENBUS_MAX_RING_GRANTS)
- return -EINVAL;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
+ area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, info->ptes);
+ if (!area)
return -ENOMEM;
- area = alloc_vm_area(XEN_PAGE_SIZE * nr_grefs, ptes);
- if (!area) {
- kfree(node);
- return -ENOMEM;
- }
-
for (i = 0; i < nr_grefs; i++)
- phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
+ info->phys_addrs[i] =
+ arbitrary_virt_to_machine(info->ptes[i]).maddr;
err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
- phys_addrs,
- GNTMAP_host_map | GNTMAP_contains_pte,
+ info, GNTMAP_host_map | GNTMAP_contains_pte,
&leaked);
if (err)
goto failed;
@@ -722,6 +714,8 @@ static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
spin_unlock(&xenbus_valloc_lock);
*vaddr = area->addr;
+ info->node = NULL;
+
return 0;
failed:
@@ -730,11 +724,10 @@ failed:
else
pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);
- kfree(node);
return err;
}
-static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr)
{
struct xenbus_map_node *node;
struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS];
@@ -798,12 +791,12 @@ static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
}
static const struct xenbus_ring_ops ring_ops_pv = {
- .map = xenbus_map_ring_valloc_pv,
- .unmap = xenbus_unmap_ring_vfree_pv,
+ .map = xenbus_map_ring_pv,
+ .unmap = xenbus_unmap_ring_pv,
};
#endif
-struct unmap_ring_vfree_hvm
+struct unmap_ring_hvm
{
unsigned int idx;
unsigned long addrs[XENBUS_MAX_RING_GRANTS];
@@ -814,19 +807,19 @@ static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn,
unsigned int len,
void *data)
{
- struct unmap_ring_vfree_hvm *info = data;
+ struct unmap_ring_hvm *info = data;
info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn);
info->idx++;
}
-static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
+static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr)
{
int rv;
struct xenbus_map_node *node;
void *addr;
- struct unmap_ring_vfree_hvm info = {
+ struct unmap_ring_hvm info = {
.idx = 0,
};
unsigned int nr_pages;
@@ -887,8 +880,8 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
static const struct xenbus_ring_ops ring_ops_hvm = {
- .map = xenbus_map_ring_valloc_hvm,
- .unmap = xenbus_unmap_ring_vfree_hvm,
+ .map = xenbus_map_ring_hvm,
+ .unmap = xenbus_unmap_ring_hvm,
};
void __init xenbus_ring_ops_init(void)