summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig6
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_pnp.c3
-rw-r--r--drivers/acpi/cppc_acpi.c141
-rw-r--r--drivers/acpi/nfit/core.c15
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/acpi/scan.c143
-rw-r--r--drivers/acpi/sleep.c305
-rw-r--r--drivers/acpi/sleep.h16
-rw-r--r--drivers/acpi/x86/s2idle.c448
-rw-r--r--drivers/atm/idt77252.c2
-rw-r--r--drivers/base/core.c9
-rw-r--r--drivers/base/regmap/regmap-debugfs.c9
-rw-r--r--drivers/block/Kconfig1
-rw-r--r--drivers/block/nbd.c9
-rw-r--r--drivers/block/ps3disk.c3
-rw-r--r--drivers/block/ps3vram.c3
-rw-r--r--drivers/block/rbd.c8
-rw-r--r--drivers/block/rnbd/Kconfig1
-rw-r--r--drivers/block/rnbd/README1
-rw-r--r--drivers/block/rnbd/rnbd-clt-sysfs.c5
-rw-r--r--drivers/block/rnbd/rnbd-clt.c100
-rw-r--r--drivers/block/rnbd/rnbd-clt.h12
-rw-r--r--drivers/block/rnbd/rnbd-proto.h9
-rw-r--r--drivers/block/rnbd/rnbd-srv.c16
-rw-r--r--drivers/block/xen-blkfront.c1
-rw-r--r--drivers/bus/ti-sysc.c41
-rw-r--r--drivers/char/agp/Makefile6
-rw-r--r--drivers/char/agp/agp.h5
-rw-r--r--drivers/char/hw_random/Kconfig16
-rw-r--r--drivers/char/ps3flash.c3
-rw-r--r--drivers/clk/Kconfig11
-rw-r--r--drivers/clk/Makefile1
-rw-r--r--drivers/clk/at91/at91rm9200.c21
-rw-r--r--drivers/clk/at91/at91sam9260.c26
-rw-r--r--drivers/clk/at91/at91sam9g45.c32
-rw-r--r--drivers/clk/at91/at91sam9n12.c36
-rw-r--r--drivers/clk/at91/at91sam9rl.c23
-rw-r--r--drivers/clk/at91/at91sam9x5.c28
-rw-r--r--drivers/clk/at91/clk-master.c337
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c145
-rw-r--r--drivers/clk/at91/dt-compat.c15
-rw-r--r--drivers/clk/at91/pmc.h22
-rw-r--r--drivers/clk/at91/sam9x60.c51
-rw-r--r--drivers/clk/at91/sama5d2.c42
-rw-r--r--drivers/clk/at91/sama5d3.c38
-rw-r--r--drivers/clk/at91/sama5d4.c40
-rw-r--r--drivers/clk/at91/sama7g5.c223
-rw-r--r--drivers/clk/bcm/clk-bcm2711-dvp.c4
-rw-r--r--drivers/clk/clk-axi-clkgen.c64
-rw-r--r--drivers/clk/clk-composite.c50
-rw-r--r--drivers/clk/clk-divider.c34
-rw-r--r--drivers/clk/clk-fsl-flexspi.c106
-rw-r--r--drivers/clk/clk-fsl-sai.c14
-rw-r--r--drivers/clk/clk-pwm.c2
-rw-r--r--drivers/clk/clk-qoriq.c13
-rw-r--r--drivers/clk/clk-s2mps11.c1
-rw-r--r--drivers/clk/clk-scpi.c2
-rw-r--r--drivers/clk/clk-si5351.c13
-rw-r--r--drivers/clk/clk-versaclock5.c4
-rw-r--r--drivers/clk/clk.c132
-rw-r--r--drivers/clk/imx/clk-gate2.c68
-rw-r--r--drivers/clk/imx/clk-imx8mm.c2
-rw-r--r--drivers/clk/imx/clk-imx8mn.c2
-rw-r--r--drivers/clk/imx/clk-imx8mp.c4
-rw-r--r--drivers/clk/imx/clk-imx8mq.c2
-rw-r--r--drivers/clk/imx/clk-imx8qxp-lpcg.c139
-rw-r--r--drivers/clk/imx/clk-imx8qxp.c136
-rw-r--r--drivers/clk/imx/clk-lpcg-scu.c53
-rw-r--r--drivers/clk/imx/clk-pll14xx.c2
-rw-r--r--drivers/clk/imx/clk-scu.c227
-rw-r--r--drivers/clk/imx/clk-scu.h56
-rw-r--r--drivers/clk/imx/clk.h27
-rw-r--r--drivers/clk/ingenic/cgu.c14
-rw-r--r--drivers/clk/mediatek/clk-mux.c2
-rw-r--r--drivers/clk/mediatek/clk-mux.h4
-rw-r--r--drivers/clk/meson/Kconfig7
-rw-r--r--drivers/clk/meson/axg-aoclk.c5
-rw-r--r--drivers/clk/meson/axg.c824
-rw-r--r--drivers/clk/meson/axg.h23
-rw-r--r--drivers/clk/meson/g12a-aoclk.c5
-rw-r--r--drivers/clk/meson/g12a.c181
-rw-r--r--drivers/clk/meson/g12a.h3
-rw-r--r--drivers/clk/meson/gxbb-aoclk.c5
-rw-r--r--drivers/clk/meson/gxbb.c5
-rw-r--r--drivers/clk/meson/meson-aoclk.c4
-rw-r--r--drivers/clk/meson/meson-eeclk.c3
-rw-r--r--drivers/clk/mvebu/armada-37xx-xtal.c4
-rw-r--r--drivers/clk/qcom/Kconfig25
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/camcc-sc7180.c1732
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c217
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.h4
-rw-r--r--drivers/clk/qcom/clk-rpmh.c56
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c1
-rw-r--r--drivers/clk/qcom/gcc-sc7180.c5
-rw-r--r--drivers/clk/qcom/gcc-sdx55.c1659
-rw-r--r--drivers/clk/qcom/lpass-gfm-sm8250.c320
-rw-r--r--drivers/clk/qcom/lpasscorecc-sc7180.c135
-rw-r--r--drivers/clk/renesas/clk-sh73a0.c2
-rw-r--r--drivers/clk/renesas/r8a774a1-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/r8a774b1-cpg-mssr.c8
-rw-r--r--drivers/clk/renesas/r8a774c0-cpg-mssr.c9
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c51
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c79
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.h5
-rw-r--r--drivers/clk/renesas/rcar-usb2-clock-sel.c2
-rw-r--r--drivers/clk/renesas/renesas-cpg-mssr.c3
-rw-r--r--drivers/clk/rockchip/Kconfig12
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c35
-rw-r--r--drivers/clk/rockchip/clk.c3
-rw-r--r--drivers/clk/samsung/Kconfig77
-rw-r--r--drivers/clk/samsung/Makefile24
-rw-r--r--drivers/clk/samsung/clk-exynos-clkout.c199
-rw-r--r--drivers/clk/samsung/clk-pll.c147
-rw-r--r--drivers/clk/sifive/Kconfig8
-rw-r--r--drivers/clk/sifive/Makefile2
-rw-r--r--drivers/clk/sifive/fu540-prci.c599
-rw-r--r--drivers/clk/sifive/fu540-prci.h21
-rw-r--r--drivers/clk/sifive/fu740-prci.c123
-rw-r--r--drivers/clk/sifive/fu740-prci.h21
-rw-r--r--drivers/clk/sifive/sifive-prci.c574
-rw-r--r--drivers/clk/sifive/sifive-prci.h299
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c1
-rw-r--r--drivers/clk/tegra/clk-bpmp.c6
-rw-r--r--drivers/clk/tegra/clk-dfll.c4
-rw-r--r--drivers/clk/tegra/clk-id.h1
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c2
-rw-r--r--drivers/clk/tegra/clk-tegra30.c2
-rw-r--r--drivers/clk/ti/clk-33xx.c2
-rw-r--r--drivers/clk/ti/clk-43xx.c8
-rw-r--r--drivers/clk/ti/clk-44xx.c2
-rw-r--r--drivers/clk/ti/clk-54xx.c12
-rw-r--r--drivers/clk/ti/clk-7xx.c7
-rw-r--r--drivers/clk/ti/fapll.c11
-rw-r--r--drivers/connector/cn_queue.c8
-rw-r--r--drivers/connector/connector.c4
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c204
-rw-r--r--drivers/cpufreq/cpufreq.c40
-rw-r--r--drivers/cpufreq/intel_pstate.c96
-rw-r--r--drivers/cpufreq/powernow-k8.c9
-rw-r--r--drivers/cpuidle/cpuidle-big_little.c3
-rw-r--r--drivers/crypto/keembay/Kconfig5
-rw-r--r--drivers/crypto/qat/Kconfig1
-rw-r--r--drivers/dax/bus.c71
-rw-r--r--drivers/dax/pmem/core.c2
-rw-r--r--drivers/dax/super.c1
-rw-r--r--drivers/dma-buf/dma-buf.c40
-rw-r--r--drivers/dma-buf/dma-resv.c2
-rw-r--r--drivers/dma-buf/heaps/Makefile1
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c330
-rw-r--r--drivers/dma-buf/heaps/heap-helpers.c274
-rw-r--r--drivers/dma-buf/heaps/heap-helpers.h53
-rw-r--r--drivers/dma-buf/heaps/system_heap.c414
-rw-r--r--drivers/dma/Kconfig10
-rw-r--r--drivers/dma/at_xdmac.c163
-rw-r--r--drivers/dma/dma-jz4780.c2
-rw-r--r--drivers/dma/dmatest.c13
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c2
-rw-r--r--drivers/dma/dw-edma/dw-edma-core.c4
-rw-r--r--drivers/dma/dw/core.c6
-rw-r--r--drivers/dma/hisi_dma.c5
-rw-r--r--drivers/dma/idma64.c8
-rw-r--r--drivers/dma/idxd/cdev.c50
-rw-r--r--drivers/dma/idxd/device.c143
-rw-r--r--drivers/dma/idxd/dma.c9
-rw-r--r--drivers/dma/idxd/idxd.h58
-rw-r--r--drivers/dma/idxd/init.c123
-rw-r--r--drivers/dma/idxd/irq.c146
-rw-r--r--drivers/dma/idxd/registers.h28
-rw-r--r--drivers/dma/idxd/submit.c37
-rw-r--r--drivers/dma/idxd/sysfs.c211
-rw-r--r--drivers/dma/imx-dma.c33
-rw-r--r--drivers/dma/imx-sdma.c38
-rw-r--r--drivers/dma/ipu/ipu_idmac.c11
-rw-r--r--drivers/dma/k3dma.c9
-rw-r--r--drivers/dma/mediatek/mtk-hsdma.c1
-rw-r--r--drivers/dma/milbeaut-xdmac.c9
-rw-r--r--drivers/dma/moxart-dma.c5
-rw-r--r--drivers/dma/mv_xor.c2
-rw-r--r--drivers/dma/mv_xor_v2.c4
-rw-r--r--drivers/dma/mxs-dma.c37
-rw-r--r--drivers/dma/of-dma.c10
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/ppc4xx/adma.c4
-rw-r--r--drivers/dma/pxa_dma.c5
-rw-r--r--drivers/dma/qcom/Kconfig23
-rw-r--r--drivers/dma/qcom/Makefile2
-rw-r--r--drivers/dma/qcom/bam_dma.c8
-rw-r--r--drivers/dma/qcom/gpi.c2303
-rw-r--r--drivers/dma/qcom/qcom_adm.c905
-rw-r--r--drivers/dma/sf-pdma/sf-pdma.c12
-rw-r--r--drivers/dma/ste_dma40.c5
-rw-r--r--drivers/dma/stm32-dma.c47
-rw-r--r--drivers/dma/stm32-dmamux.c2
-rw-r--r--drivers/dma/stm32-mdma.c68
-rw-r--r--drivers/dma/sun6i-dma.c25
-rw-r--r--drivers/dma/tegra210-adma.c7
-rw-r--r--drivers/dma/ti/Makefile3
-rw-r--r--drivers/dma/ti/dma-crossbar.c6
-rw-r--r--drivers/dma/ti/k3-psil-am64.c158
-rw-r--r--drivers/dma/ti/k3-psil-priv.h1
-rw-r--r--drivers/dma/ti/k3-psil.c1
-rw-r--r--drivers/dma/ti/k3-udma-glue.c383
-rw-r--r--drivers/dma/ti/k3-udma-private.c45
-rw-r--r--drivers/dma/ti/k3-udma.c1934
-rw-r--r--drivers/dma/ti/k3-udma.h28
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c11
-rw-r--r--drivers/firmware/arm_scmi/notify.c10
-rw-r--r--drivers/firmware/arm_scmi/sensors.c720
-rw-r--r--drivers/firmware/efi/Kconfig4
-rw-r--r--drivers/firmware/efi/Makefile5
-rw-r--r--drivers/firmware/efi/capsule.c16
-rw-r--r--drivers/firmware/efi/libstub/efi-stub.c1
-rw-r--r--drivers/firmware/efi/libstub/efistub.h3
-rw-r--r--drivers/firmware/efi/libstub/fdt.c3
-rw-r--r--drivers/firmware/efi/libstub/secureboot.c44
-rw-r--r--drivers/firmware/efi/libstub/x86-stub.c5
-rw-r--r--drivers/firmware/efi/test/efi_test.c16
-rw-r--r--drivers/firmware/efi/test/efi_test.h3
-rw-r--r--drivers/firmware/imx/imx-dsp.c72
-rw-r--r--drivers/firmware/imx/scu-pd.c12
-rw-r--r--drivers/firmware/meson/Kconfig5
-rw-r--r--drivers/firmware/meson/meson_sm.c1
-rw-r--r--drivers/firmware/psci/psci.c126
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c6
-rw-r--r--drivers/firmware/ti_sci.c213
-rw-r--r--drivers/firmware/ti_sci.h72
-rw-r--r--drivers/firmware/xilinx/zynqmp.c46
-rw-r--r--drivers/gpio/Kconfig30
-rw-r--r--drivers/gpio/Makefile2
-rw-r--r--drivers/gpio/TODO85
-rw-r--r--drivers/gpio/gpio-104-idi-48.c6
-rw-r--r--drivers/gpio/gpio-amd8111.c11
-rw-r--r--drivers/gpio/gpio-ath79.c1
-rw-r--r--drivers/gpio/gpio-bt8xx.c8
-rw-r--r--drivers/gpio/gpio-cs5535.c8
-rw-r--r--drivers/gpio/gpio-dwapb.c7
-rw-r--r--drivers/gpio/gpio-exar.c155
-rw-r--r--drivers/gpio/gpio-hisi.c323
-rw-r--r--drivers/gpio/gpio-mockup.c11
-rw-r--r--drivers/gpio/gpio-msc313.c460
-rw-r--r--drivers/gpio/gpio-mvebu.c71
-rw-r--r--drivers/gpio/gpio-mxc.c102
-rw-r--r--drivers/gpio/gpio-mxs.c14
-rw-r--r--drivers/gpio/gpio-omap.c7
-rw-r--r--drivers/gpio/gpio-rcar.c87
-rw-r--r--drivers/gpio/gpio-sifive.c25
-rw-r--r--drivers/gpio/gpio-stmpe.c10
-rw-r--r--drivers/gpio/gpio-tegra.c22
-rw-r--r--drivers/gpio/gpio-tegra186.c11
-rw-r--r--drivers/gpio/gpio-xilinx.c49
-rw-r--r--drivers/gpio/gpio-xra1403.c10
-rw-r--r--drivers/gpio/gpiolib-acpi.c139
-rw-r--r--drivers/gpio/gpiolib-acpi.h2
-rw-r--r--drivers/gpio/gpiolib-cdev.c79
-rw-r--r--drivers/gpio/gpiolib-devres.c27
-rw-r--r--drivers/gpio/gpiolib-of.c14
-rw-r--r--drivers/gpio/gpiolib-sysfs.c2
-rw-r--r--drivers/gpio/gpiolib.c301
-rw-r--r--drivers/gpio/gpiolib.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c42
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c4
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c35
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h21
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c5
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h62
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c22
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c7
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c35
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.h1
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h1
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h1
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_types.h4
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h6
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c166
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c289
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c46
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_dev.c1
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c3
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c8
-rw-r--r--drivers/gpu/drm/drm_blend.c2
-rw-r--r--drivers/gpu/drm/drm_bufs.c1
-rw-r--r--drivers/gpu/drm/drm_client.c4
-rw-r--r--drivers/gpu/drm/drm_crtc.c12
-rw-r--r--drivers/gpu/drm/drm_edid.c4
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c155
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c40
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c1
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c27
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h15
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c61
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c19
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h8
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-dev.h3
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-plane.c24
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-scaler.c47
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c2
-rw-r--r--drivers/gpu/drm/mcde/Kconfig1
-rw-r--r--drivers/gpu/drm/mcde/Makefile2
-rw-r--r--drivers/gpu/drm/mcde/mcde_clk_div.c192
-rw-r--r--drivers/gpu/drm/mcde/mcde_display.c456
-rw-r--r--drivers/gpu/drm/mcde/mcde_display_regs.h91
-rw-r--r--drivers/gpu/drm/mcde/mcde_drm.h10
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c46
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h34
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c51
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c1
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h6
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c7
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c17
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c31
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c3
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e63m0.c345
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c28
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c19
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c53
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/v3d/v3d_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_txp.c4
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c3
-rw-r--r--drivers/gpu/drm/via/via_irq.c1
-rw-r--r--drivers/gpu/drm/via/via_verifier.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_writeback.c7
-rw-r--r--drivers/hsi/controllers/omap_ssi_core.c2
-rw-r--r--drivers/hsi/hsi_core.c2
-rw-r--r--drivers/hv/vmbus_drv.c2
-rw-r--r--drivers/hwmon/amd_energy.c3
-rw-r--r--drivers/hwmon/k10temp.c98
-rw-r--r--drivers/hwmon/pwm-fan.c12
-rw-r--r--drivers/hwmon/scmi-hwmon.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-mt65xx.c27
-rw-r--r--drivers/i2c/busses/i2c-sprd.c8
-rw-r--r--drivers/i3c/master.c5
-rw-r--r--drivers/i3c/master/Kconfig13
-rw-r--r--drivers/i3c/master/Makefile1
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/Makefile6
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd.h67
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c378
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v2.c316
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c798
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat.h32
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dat_v1.c184
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dct.h16
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dct_v1.c36
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c784
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ext_caps.c308
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ext_caps.h19
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h144
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/ibi.h42
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/pio.c1041
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/xfer_mode_rate.h79
-rw-r--r--drivers/ide/ide-atapi.c1
-rw-r--r--drivers/ide/ide-io.c7
-rw-r--r--drivers/ide/ide-pm.c2
-rw-r--r--drivers/idle/intel_idle.c41
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c6
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.h7
-rw-r--r--drivers/interconnect/imx/imx.c3
-rw-r--r--drivers/interconnect/imx/imx8mq.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig23
-rw-r--r--drivers/interconnect/qcom/sdm845.c3
-rw-r--r--drivers/iommu/amd/init.c3
-rw-r--r--drivers/iommu/amd/iommu.c3
-rw-r--r--drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c2
-rw-r--r--drivers/iommu/dma-iommu.c27
-rw-r--r--drivers/iommu/intel/dmar.c4
-rw-r--r--drivers/iommu/intel/iommu.c148
-rw-r--r--drivers/iommu/intel/irq_remapping.c2
-rw-r--r--drivers/iommu/intel/svm.c23
-rw-r--r--drivers/iommu/iova.c8
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c12
-rw-r--r--drivers/irqchip/irq-gic-v4.c19
-rw-r--r--drivers/isdn/mISDN/Kconfig1
-rw-r--r--drivers/lightnvm/Kconfig1
-rw-r--r--drivers/mailbox/Kconfig9
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/arm_mhu_db.c2
-rw-r--r--drivers/mailbox/arm_mhuv2.c1136
-rw-r--r--drivers/mailbox/stm32-ipcc.c15
-rw-r--r--drivers/md/Kconfig22
-rw-r--r--drivers/md/Makefile20
-rw-r--r--drivers/md/bcache/features.c2
-rw-r--r--drivers/md/bcache/features.h30
-rw-r--r--drivers/md/bcache/super.c55
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/dm-cache-target.c7
-rw-r--r--drivers/md/dm-crypt.c6
-rw-r--r--drivers/md/dm-ebs-target.c2
-rw-r--r--drivers/md/dm-ioctl.c1
-rw-r--r--drivers/md/dm-ps-historical-service-time.c (renamed from drivers/md/dm-historical-service-time.c)0
-rw-r--r--drivers/md/dm-ps-io-affinity.c272
-rw-r--r--drivers/md/dm-ps-queue-length.c (renamed from drivers/md/dm-queue-length.c)0
-rw-r--r--drivers/md/dm-ps-round-robin.c (renamed from drivers/md/dm-round-robin.c)0
-rw-r--r--drivers/md/dm-ps-service-time.c (renamed from drivers/md/dm-service-time.c)0
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-switch.c1
-rw-r--r--drivers/md/dm-unstripe.c1
-rw-r--r--drivers/md/dm-verity-target.c12
-rw-r--r--drivers/md/dm-verity-verify-sig.c9
-rw-r--r--drivers/md/dm-zero.c1
-rw-r--r--drivers/md/dm.c2
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c27
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c17
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c4
-rw-r--r--drivers/memory/Kconfig6
-rw-r--r--drivers/memory/jz4780-nemc.c6
-rw-r--r--drivers/memory/mtk-smi.c19
-rw-r--r--drivers/memory/renesas-rpc-if.c18
-rw-r--r--drivers/memory/tegra/Kconfig10
-rw-r--r--drivers/memory/tegra/mc.c155
-rw-r--r--drivers/memory/tegra/mc.h22
-rw-r--r--drivers/memory/tegra/tegra114.c6
-rw-r--r--drivers/memory/tegra/tegra124-emc.c22
-rw-r--r--drivers/memory/tegra/tegra124.c6
-rw-r--r--drivers/memory/tegra/tegra20-emc.c520
-rw-r--r--drivers/memory/tegra/tegra20.c77
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c39
-rw-r--r--drivers/memory/tegra/tegra210.c60
-rw-r--r--drivers/memory/tegra/tegra30-emc.c411
-rw-r--r--drivers/memory/tegra/tegra30.c245
-rw-r--r--drivers/mfd/ab8500-debugfs.c18
-rw-r--r--drivers/mfd/menelaus.c2
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c77
-rw-r--r--drivers/misc/habanalabs/common/device.c8
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c60
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h4
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c1
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c7
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c5
-rw-r--r--drivers/misc/habanalabs/common/pci.c28
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c191
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h7
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_coresight.c3
-rw-r--r--drivers/misc/habanalabs/goya/goya.c75
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h9
-rw-r--r--drivers/misc/lkdtm/Makefile1
-rw-r--r--drivers/misc/lkdtm/core.c3
-rw-r--r--drivers/misc/lkdtm/lkdtm.h3
-rw-r--r--drivers/misc/lkdtm/powerpc.c120
-rw-r--r--drivers/misc/ocxl/context.c4
-rw-r--r--drivers/misc/ocxl/link.c70
-rw-r--r--drivers/misc/ocxl/ocxl_internal.h9
-rw-r--r--drivers/misc/ocxl/trace.h64
-rw-r--r--drivers/misc/pvpanic.c19
-rw-r--r--drivers/mtd/ubi/build.c6
-rw-r--r--drivers/mtd/ubi/io.c9
-rw-r--r--drivers/net/bareudp.c25
-rw-r--r--drivers/net/can/Kconfig1
-rw-r--r--drivers/net/can/m_can/m_can.c2
-rw-r--r--drivers/net/can/m_can/tcan4x5x.c26
-rw-r--r--drivers/net/can/rcar/Kconfig4
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c17
-rw-r--r--drivers/net/dsa/hirschmann/Kconfig1
-rw-r--r--drivers/net/dsa/lantiq_gswip.c34
-rw-r--r--drivers/net/dsa/qca/ar9331.c33
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c7
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c38
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h7
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c71
-rw-r--r--drivers/net/ethernet/ethoc.c3
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c3
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c19
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c46
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c59
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c5
-rw-r--r--drivers/net/ethernet/korina.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c27
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c38
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c14
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c43
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c8
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c12
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.c6
-rw-r--r--drivers/net/ethernet/ni/Kconfig2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c129
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c3
-rw-r--r--drivers/net/hyperv/netvsc_drv.c11
-rw-r--r--drivers/net/ipa/gsi.c127
-rw-r--r--drivers/net/ipa/ipa_clock.c4
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/cdc_ncm.c11
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/virtio_net.c13
-rw-r--r--drivers/net/wan/Kconfig1
-rw-r--r--drivers/net/wan/hdlc_ppp.c7
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c44
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c17
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c24
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/Kconfig1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c8
-rw-r--r--drivers/nfc/s3fwrn5/nci.c25
-rw-r--r--drivers/nfc/s3fwrn5/nci.h22
-rw-r--r--drivers/nfc/s3fwrn5/phy_common.c3
-rw-r--r--drivers/ntb/hw/idt/ntb_hw_idt.c4
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen1.h1
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.c27
-rw-r--r--drivers/ntb/hw/intel/ntb_hw_gen4.h15
-rw-r--r--drivers/ntb/msi.c4
-rw-r--r--drivers/nvdimm/btt.h3
-rw-r--r--drivers/nvdimm/claim.c1
-rw-r--r--drivers/nvdimm/core.c1
-rw-r--r--drivers/nvdimm/label.c13
-rw-r--r--drivers/nvme/host/core.c8
-rw-r--r--drivers/nvme/host/fc.c15
-rw-r--r--drivers/nvme/host/nvme.h9
-rw-r--r--drivers/nvme/host/pci.c10
-rw-r--r--drivers/nvme/host/tcp.c12
-rw-r--r--drivers/nvme/target/fcloop.c7
-rw-r--r--drivers/nvme/target/rdma.c10
-rw-r--r--drivers/opp/core.c9
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c8
-rw-r--r--drivers/pci/controller/dwc/pcie-tegra194.c55
-rw-r--r--drivers/pci/controller/mobiveil/pcie-mobiveil-host.c8
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c8
-rw-r--r--drivers/pcmcia/Kconfig5
-rw-r--r--drivers/pcmcia/Makefile1
-rw-r--r--drivers/pcmcia/at91_cf.c50
-rw-r--r--drivers/pcmcia/db1xxx_ss.c2
-rw-r--r--drivers/pcmcia/electra_cf.c2
-rw-r--r--drivers/pcmcia/omap_cf.c8
-rw-r--r--drivers/pcmcia/vrc4173_cardu.c591
-rw-r--r--drivers/pcmcia/vrc4173_cardu.h247
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c11
-rw-r--r--drivers/platform/chrome/cros_ec_proto.c14
-rw-r--r--drivers/platform/chrome/cros_ec_typec.c336
-rw-r--r--drivers/power/reset/Kconfig7
-rw-r--r--drivers/power/reset/Makefile1
-rw-r--r--drivers/power/reset/ocelot-reset.c30
-rw-r--r--drivers/power/reset/qnap-poweroff.c8
-rw-r--r--drivers/power/reset/regulator-poweroff.c82
-rw-r--r--drivers/power/reset/syscon-poweroff.c8
-rw-r--r--drivers/power/supply/ab8500_btemp.c68
-rw-r--r--drivers/power/supply/ab8500_charger.c99
-rw-r--r--drivers/power/supply/ab8500_fg.c106
-rw-r--r--drivers/power/supply/abx500_chargalg.c19
-rw-r--r--drivers/power/supply/axp20x_usb_power.c10
-rw-r--r--drivers/power/supply/axp288_charger.c28
-rw-r--r--drivers/power/supply/bq24190_charger.c21
-rw-r--r--drivers/power/supply/bq24735-charger.c1
-rw-r--r--drivers/power/supply/bq25890_charger.c2
-rw-r--r--drivers/power/supply/collie_battery.c151
-rw-r--r--drivers/power/supply/generic-adc-battery.c31
-rw-r--r--drivers/power/supply/max17042_battery.c23
-rw-r--r--drivers/power/supply/max8997_charger.c67
-rw-r--r--drivers/power/supply/pm2301_charger.c3
-rw-r--r--drivers/power/supply/power_supply_sysfs.c2
-rw-r--r--drivers/power/supply/s3c_adc_battery.c57
-rw-r--r--drivers/power/supply/wm831x_power.c1
-rw-r--r--drivers/ps3/ps3-lpm.c3
-rw-r--r--drivers/ps3/ps3-vuart.c10
-rw-r--r--drivers/ps3/ps3stor_lib.c2
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/pwm/Kconfig71
-rw-r--r--drivers/pwm/Makefile3
-rw-r--r--drivers/pwm/core.c2
-rw-r--r--drivers/pwm/pwm-ab8500.c4
-rw-r--r--drivers/pwm/pwm-atmel-tcb.c264
-rw-r--r--drivers/pwm/pwm-atmel.c4
-rw-r--r--drivers/pwm/pwm-bcm-iproc.c4
-rw-r--r--drivers/pwm/pwm-bcm-kona.c4
-rw-r--r--drivers/pwm/pwm-bcm2835.c73
-rw-r--r--drivers/pwm/pwm-berlin.c4
-rw-r--r--drivers/pwm/pwm-brcmstb.c4
-rw-r--r--drivers/pwm/pwm-clps711x.c4
-rw-r--r--drivers/pwm/pwm-crc.c2
-rw-r--r--drivers/pwm/pwm-dwc.c319
-rw-r--r--drivers/pwm/pwm-ep93xx.c4
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c4
-rw-r--r--drivers/pwm/pwm-hibvt.c7
-rw-r--r--drivers/pwm/pwm-img.c4
-rw-r--r--drivers/pwm/pwm-imx-tpm.c10
-rw-r--r--drivers/pwm/pwm-imx1.c25
-rw-r--r--drivers/pwm/pwm-imx27.c28
-rw-r--r--drivers/pwm/pwm-intel-lgm.c244
-rw-r--r--drivers/pwm/pwm-iqs620a.c2
-rw-r--r--drivers/pwm/pwm-keembay.c245
-rw-r--r--drivers/pwm/pwm-lp3943.c1
-rw-r--r--drivers/pwm/pwm-lpc18xx-sct.c4
-rw-r--r--drivers/pwm/pwm-lpc32xx.c4
-rw-r--r--drivers/pwm/pwm-lpss-platform.c39
-rw-r--r--drivers/pwm/pwm-lpss.c7
-rw-r--r--drivers/pwm/pwm-mediatek.c23
-rw-r--r--drivers/pwm/pwm-meson.c4
-rw-r--r--drivers/pwm/pwm-mtk-disp.c4
-rw-r--r--drivers/pwm/pwm-pxa.c4
-rw-r--r--drivers/pwm/pwm-rcar.c6
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c4
-rw-r--r--drivers/pwm/pwm-rockchip.c4
-rw-r--r--drivers/pwm/pwm-samsung.c4
-rw-r--r--drivers/pwm/pwm-sifive.c4
-rw-r--r--drivers/pwm/pwm-sl28cpld.c4
-rw-r--r--drivers/pwm/pwm-spear.c4
-rw-r--r--drivers/pwm/pwm-sti.c54
-rw-r--r--drivers/pwm/pwm-sun4i.c10
-rw-r--r--drivers/pwm/pwm-tegra.c4
-rw-r--r--drivers/pwm/pwm-tiecap.c4
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c16
-rw-r--r--drivers/pwm/pwm-vt8500.c4
-rw-r--r--drivers/pwm/pwm-zx.c5
-rw-r--r--drivers/regulator/Kconfig1
-rw-r--r--drivers/regulator/bd718x7-regulator.c57
-rw-r--r--drivers/regulator/pf8x00-regulator.c8
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c2
-rw-r--r--drivers/remoteproc/wkup_m3_rproc.c41
-rw-r--r--drivers/reset/Kconfig3
-rw-r--r--drivers/reset/core.c73
-rw-r--r--drivers/reset/reset-meson.c8
-rw-r--r--drivers/reset/reset-socfpga.c11
-rw-r--r--drivers/reset/reset-ti-syscon.c4
-rw-r--r--drivers/rtc/Kconfig33
-rw-r--r--drivers/rtc/class.c52
-rw-r--r--drivers/rtc/nvmem.c91
-rw-r--r--drivers/rtc/rtc-88pm80x.c2
-rw-r--r--drivers/rtc/rtc-88pm860x.c2
-rw-r--r--drivers/rtc/rtc-ab-b5ze-s3.c2
-rw-r--r--drivers/rtc/rtc-ab-eoz9.c2
-rw-r--r--drivers/rtc/rtc-ab3100.c2
-rw-r--r--drivers/rtc/rtc-ab8500.c2
-rw-r--r--drivers/rtc/rtc-abx80x.c2
-rw-r--r--drivers/rtc/rtc-ac100.c2
-rw-r--r--drivers/rtc/rtc-armada38x.c2
-rw-r--r--drivers/rtc/rtc-aspeed.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c108
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-au1xxx.c2
-rw-r--r--drivers/rtc/rtc-bd70528.c2
-rw-r--r--drivers/rtc/rtc-brcmstb-waketimer.c5
-rw-r--r--drivers/rtc/rtc-cadence.c2
-rw-r--r--drivers/rtc/rtc-cmos.c6
-rw-r--r--drivers/rtc/rtc-coh901331.c2
-rw-r--r--drivers/rtc/rtc-cpcap.c8
-rw-r--r--drivers/rtc/rtc-cros-ec.c2
-rw-r--r--drivers/rtc/rtc-da9052.c2
-rw-r--r--drivers/rtc/rtc-da9063.c4
-rw-r--r--drivers/rtc/rtc-davinci.c2
-rw-r--r--drivers/rtc/rtc-digicolor.c2
-rw-r--r--drivers/rtc/rtc-dm355evm.c2
-rw-r--r--drivers/rtc/rtc-ds1305.c5
-rw-r--r--drivers/rtc/rtc-ds1307.c83
-rw-r--r--drivers/rtc/rtc-ds1343.c5
-rw-r--r--drivers/rtc/rtc-ds1347.c2
-rw-r--r--drivers/rtc/rtc-ds1374.c2
-rw-r--r--drivers/rtc/rtc-ds1511.c6
-rw-r--r--drivers/rtc/rtc-ds1553.c6
-rw-r--r--drivers/rtc/rtc-ds1672.c2
-rw-r--r--drivers/rtc/rtc-ds1685.c5
-rw-r--r--drivers/rtc/rtc-ds1742.c6
-rw-r--r--drivers/rtc/rtc-ds2404.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c2
-rw-r--r--drivers/rtc/rtc-ep93xx.c8
-rw-r--r--drivers/rtc/rtc-fsl-ftm-alarm.c2
-rw-r--r--drivers/rtc/rtc-ftrtc010.c2
-rw-r--r--drivers/rtc/rtc-goldfish.c2
-rw-r--r--drivers/rtc/rtc-hym8563.c7
-rw-r--r--drivers/rtc/rtc-imx-sc.c2
-rw-r--r--drivers/rtc/rtc-imxdi.c2
-rw-r--r--drivers/rtc/rtc-isl12026.c4
-rw-r--r--drivers/rtc/rtc-isl1208.c4
-rw-r--r--drivers/rtc/rtc-jz4740.c2
-rw-r--r--drivers/rtc/rtc-lpc32xx.c2
-rw-r--r--drivers/rtc/rtc-ls1x.c2
-rw-r--r--drivers/rtc/rtc-m41t80.c2
-rw-r--r--drivers/rtc/rtc-m48t59.c5
-rw-r--r--drivers/rtc/rtc-m48t86.c5
-rw-r--r--drivers/rtc/rtc-mc13xxx.c2
-rw-r--r--drivers/rtc/rtc-meson-vrtc.c2
-rw-r--r--drivers/rtc/rtc-meson.c4
-rw-r--r--drivers/rtc/rtc-mpc5121.c2
-rw-r--r--drivers/rtc/rtc-mrst.c2
-rw-r--r--drivers/rtc/rtc-mt2712.c2
-rw-r--r--drivers/rtc/rtc-mt6397.c2
-rw-r--r--drivers/rtc/rtc-mv.c2
-rw-r--r--drivers/rtc/rtc-mxc.c25
-rw-r--r--drivers/rtc/rtc-mxc_v2.c2
-rw-r--r--drivers/rtc/rtc-omap.c13
-rw-r--r--drivers/rtc/rtc-pcap.c2
-rw-r--r--drivers/rtc/rtc-pcf2123.c2
-rw-r--r--drivers/rtc/rtc-pcf2127.c73
-rw-r--r--drivers/rtc/rtc-pcf85063.c4
-rw-r--r--drivers/rtc/rtc-pcf8523.c34
-rw-r--r--drivers/rtc/rtc-pcf85363.c4
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-pic32.c2
-rw-r--r--drivers/rtc/rtc-pl030.c2
-rw-r--r--drivers/rtc/rtc-pl031.c8
-rw-r--r--drivers/rtc/rtc-pm8xxx.c2
-rw-r--r--drivers/rtc/rtc-ps3.c2
-rw-r--r--drivers/rtc/rtc-r9701.c2
-rw-r--r--drivers/rtc/rtc-rc5t619.c2
-rw-r--r--drivers/rtc/rtc-rk808.c2
-rw-r--r--drivers/rtc/rtc-rp5c01.c5
-rw-r--r--drivers/rtc/rtc-rs5c348.c2
-rw-r--r--drivers/rtc/rtc-rv3028.c6
-rw-r--r--drivers/rtc/rtc-rv3029c2.c4
-rw-r--r--drivers/rtc/rtc-rv3032.c8
-rw-r--r--drivers/rtc/rtc-rv8803.c5
-rw-r--r--drivers/rtc/rtc-rx6110.c165
-rw-r--r--drivers/rtc/rtc-rx8010.c2
-rw-r--r--drivers/rtc/rtc-rx8581.c4
-rw-r--r--drivers/rtc/rtc-s35390a.c2
-rw-r--r--drivers/rtc/rtc-s3c.c230
-rw-r--r--drivers/rtc/rtc-sa1100.c2
-rw-r--r--drivers/rtc/rtc-sc27xx.c42
-rw-r--r--drivers/rtc/rtc-sd3078.c2
-rw-r--r--drivers/rtc/rtc-sh.c2
-rw-r--r--drivers/rtc/rtc-sirfsoc.c2
-rw-r--r--drivers/rtc/rtc-snvs.c69
-rw-r--r--drivers/rtc/rtc-st-lpc.c2
-rw-r--r--drivers/rtc/rtc-starfire.c2
-rw-r--r--drivers/rtc/rtc-stk17ta8.c5
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c2
-rw-r--r--drivers/rtc/rtc-sun4v.c2
-rw-r--r--drivers/rtc/rtc-sun6i.c10
-rw-r--r--drivers/rtc/rtc-sunxi.c2
-rw-r--r--drivers/rtc/rtc-tegra.c2
-rw-r--r--drivers/rtc/rtc-test.c3
-rw-r--r--drivers/rtc/rtc-tps6586x.c2
-rw-r--r--drivers/rtc/rtc-tps65910.c2
-rw-r--r--drivers/rtc/rtc-tx4939.c5
-rw-r--r--drivers/rtc/rtc-vr41xx.c2
-rw-r--r--drivers/rtc/rtc-vt8500.c2
-rw-r--r--drivers/rtc/rtc-wilco-ec.c2
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/rtc/rtc-xgene.c2
-rw-r--r--drivers/rtc/rtc-zynqmp.c2
-rw-r--r--drivers/rtc/sysfs.c2
-rw-r--r--drivers/s390/block/dasd_alias.c22
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c26
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h4
-rw-r--r--drivers/s390/crypto/zcrypt_cex2a.c2
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c2
-rw-r--r--drivers/s390/net/qeth_core.h3
-rw-r--r--drivers/s390/net/qeth_core_main.c38
-rw-r--r--drivers/s390/net/qeth_l2_main.c2
-rw-r--r--drivers/s390/net/qeth_l3_main.c4
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/Kconfig1
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c11
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c66
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c39
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c29
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c2
-rw-r--r--drivers/scsi/ps3rom.c3
-rw-r--r--drivers/scsi/scsi_lib.c27
-rw-r--r--drivers/scsi/scsi_transport_spi.c27
-rw-r--r--drivers/scsi/ufs/ufs-mediatek-trace.h2
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c21
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h1
-rw-r--r--drivers/scsi/ufs/ufs.h2
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c73
-rw-r--r--drivers/scsi/ufs/ufshcd.c45
-rw-r--r--drivers/scsi/ufs/ufshcd.h14
-rw-r--r--drivers/soc/Kconfig1
-rw-r--r--drivers/soc/Makefile3
-rw-r--r--drivers/soc/amlogic/Kconfig12
-rw-r--r--drivers/soc/amlogic/meson-canvas.c4
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c5
-rw-r--r--drivers/soc/amlogic/meson-ee-pwrc.c8
-rw-r--r--drivers/soc/amlogic/meson-gx-pwrc-vpu.c7
-rw-r--r--drivers/soc/amlogic/meson-secure-pwrc.c5
-rw-r--r--drivers/soc/aspeed/Kconfig47
-rw-r--r--drivers/soc/aspeed/Makefile1
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-ctrl.c37
-rw-r--r--drivers/soc/aspeed/aspeed-lpc-snoop.c2
-rw-r--r--drivers/soc/aspeed/aspeed-socinfo.c135
-rw-r--r--drivers/soc/atmel/soc.c6
-rw-r--r--drivers/soc/atmel/soc.h3
-rw-r--r--drivers/soc/bcm/brcmstb/pm/pm-arm.c2
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c18
-rw-r--r--drivers/soc/fsl/qbman/qman.c8
-rw-r--r--drivers/soc/fsl/qe/qe_common.c2
-rw-r--r--drivers/soc/fsl/rcpm.c35
-rw-r--r--drivers/soc/litex/Kconfig19
-rw-r--r--drivers/soc/litex/Makefile3
-rw-r--r--drivers/soc/litex/litex_soc_ctrl.c176
-rw-r--r--drivers/soc/mediatek/Kconfig22
-rw-r--r--drivers/soc/mediatek/Makefile2
-rw-r--r--drivers/soc/mediatek/mt8173-pm-domains.h94
-rw-r--r--drivers/soc/mediatek/mt8183-pm-domains.h221
-rw-r--r--drivers/soc/mediatek/mt8192-pm-domains.h292
-rw-r--r--drivers/soc/mediatek/mtk-cmdq-helper.c41
-rw-r--r--drivers/soc/mediatek/mtk-devapc.c308
-rw-r--r--drivers/soc/mediatek/mtk-infracfg.c5
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c11
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.c614
-rw-r--r--drivers/soc/mediatek/mtk-pm-domains.h102
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c5
-rw-r--r--drivers/soc/qcom/Kconfig5
-rw-r--r--drivers/soc/qcom/cmd-db.c8
-rw-r--r--drivers/soc/qcom/kryo-l2-accessors.c2
-rw-r--r--drivers/soc/qcom/llcc-qcom.c147
-rw-r--r--drivers/soc/qcom/pdr_interface.c8
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c9
-rw-r--r--drivers/soc/qcom/qcom_aoss.c4
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c7
-rw-r--r--drivers/soc/qcom/rpmh.c14
-rw-r--r--drivers/soc/qcom/rpmhpd.c16
-rw-r--r--drivers/soc/qcom/rpmpd.c85
-rw-r--r--drivers/soc/qcom/smem.c3
-rw-r--r--drivers/soc/qcom/smp2p.c6
-rw-r--r--drivers/soc/qcom/smsm.c4
-rw-r--r--drivers/soc/qcom/socinfo.c6
-rw-r--r--drivers/soc/qcom/wcnss_ctrl.c8
-rw-r--r--drivers/soc/renesas/rmobile-sysc.c17
-rw-r--r--drivers/soc/rockchip/io-domain.c4
-rw-r--r--drivers/soc/samsung/exynos-chipid.c11
-rw-r--r--drivers/soc/samsung/exynos-pmu.c11
-rw-r--r--drivers/soc/samsung/exynos5422-asv.c2
-rw-r--r--drivers/soc/samsung/s3c-pm-check.c2
-rw-r--r--drivers/soc/sunxi/Kconfig8
-rw-r--r--drivers/soc/sunxi/Makefile1
-rw-r--r--drivers/soc/sunxi/sunxi_mbus.c132
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra124.c21
-rw-r--r--drivers/soc/tegra/fuse/speedo-tegra210.c8
-rw-r--r--drivers/soc/ti/Kconfig18
-rw-r--r--drivers/soc/ti/k3-ringacc.c423
-rw-r--r--drivers/soc/ti/k3-socinfo.c1
-rw-r--r--drivers/soc/ti/knav_dma.c15
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c66
-rw-r--r--drivers/soc/ti/omap_prm.c358
-rw-r--r--drivers/soc/ti/pm33xx.c21
-rw-r--r--drivers/soc/ti/pruss.c6
-rw-r--r--drivers/soc/ti/ti_sci_inta_msi.c12
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c8
-rw-r--r--drivers/soc/xilinx/Kconfig1
-rw-r--r--drivers/soc/xilinx/xlnx_vcu.c96
-rw-r--r--drivers/spi/spi-altera.c26
-rw-r--r--drivers/spi/spi-geni-qcom.c84
-rw-r--r--drivers/spi/spi-stm32.c4
-rw-r--r--drivers/spi/spi.c11
-rw-r--r--drivers/staging/android/ashmem.c6
-rw-r--r--drivers/staging/comedi/comedi_fops.c4
-rw-r--r--drivers/staging/hikey9xx/hisi-spmi-controller.c21
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_subdev.c20
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c1
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h3
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c18
-rw-r--r--drivers/staging/mt7621-dma/mtk-hsdma.c4
-rw-r--r--drivers/target/target_core_xcopy.c119
-rw-r--r--drivers/target/target_core_xcopy.h1
-rw-r--r--drivers/tee/optee/device.c2
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3403_thermal.c1
-rw-r--r--drivers/tty/Kconfig14
-rw-r--r--drivers/tty/Makefile3
-rw-r--r--drivers/tty/serial/Kconfig32
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/liteuart.c404
-rw-r--r--drivers/tty/ttynull.c18
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c6
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/class/cdc-wdm.c16
-rw-r--r--drivers/usb/class/usblp.c21
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/dwc3/core.h1
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c2
-rw-r--r--drivers/usb/dwc3/gadget.c16
-rw-r--r--drivers/usb/dwc3/ulpi.c38
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/composite.c10
-rw-r--r--drivers/usb/gadget/configfs.c19
-rw-r--r--drivers/usb/gadget/function/f_printer.c1
-rw-r--r--drivers/usb/gadget/function/f_uac2.c69
-rw-r--r--drivers/usb/gadget/function/u_ether.c9
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c4
-rw-r--r--drivers/usb/gadget/udc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/core.c3
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c35
-rw-r--r--drivers/usb/gadget/udc/fsl_mxc_udc.c122
-rw-r--r--drivers/usb/host/ehci-ps3.c4
-rw-r--r--drivers/usb/host/ohci-ps3.c4
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/misc/yurex.c3
-rw-r--r--drivers/usb/serial/iuu_phoenix.c20
-rw-r--r--drivers/usb/serial/option.c3
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/altmodes/Kconfig2
-rw-r--r--drivers/usb/typec/class.c2
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c11
-rw-r--r--drivers/usb/usbip/vhci_hcd.c2
-rw-r--r--drivers/vdpa/Kconfig16
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c11
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c5
-rw-r--r--drivers/vdpa/vdpa.c2
-rw-r--r--drivers/vdpa/vdpa_sim/Makefile1
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.c298
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim.h105
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c177
-rw-r--r--drivers/vfio/mdev/mdev_core.c4
-rw-r--r--drivers/vfio/mdev/vfio_mdev.c13
-rw-r--r--drivers/vfio/pci/vfio_pci.c7
-rw-r--r--drivers/vfio/pci/vfio_pci_nvlink2.c7
-rw-r--r--drivers/vfio/vfio.c18
-rw-r--r--drivers/vfio/vfio_iommu_type1.c24
-rw-r--r--drivers/vfio/virqfd.c3
-rw-r--r--drivers/vhost/net.c6
-rw-r--r--drivers/vhost/scsi.c3
-rw-r--r--drivers/vhost/vdpa.c10
-rw-r--r--drivers/vhost/vsock.c68
-rw-r--r--drivers/video/fbdev/geode/lxfb_ops.c1
-rw-r--r--drivers/video/fbdev/pm2fb.c1
-rw-r--r--drivers/video/fbdev/ps3fb.c4
-rw-r--r--drivers/virtio/virtio_mem.c1789
-rw-r--r--drivers/virtio/virtio_ring.c8
-rw-r--r--drivers/watchdog/Kconfig14
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/geodewdt.c2
-rw-r--r--drivers/watchdog/hpwdt.c8
-rw-r--r--drivers/watchdog/iTCO_wdt.c34
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c4
-rw-r--r--drivers/watchdog/pnx833x_wdt.c277
-rw-r--r--drivers/watchdog/qcom-wdt.c20
-rw-r--r--drivers/watchdog/rti_wdt.c4
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c12
-rw-r--r--drivers/watchdog/sp805_wdt.c1
-rw-r--r--drivers/watchdog/sprd_wdt.c43
-rw-r--r--drivers/watchdog/stm32_iwdg.c13
-rw-r--r--drivers/watchdog/watchdog_core.c22
-rw-r--r--drivers/watchdog/wdat_wdt.c6
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/events/events_base.c182
-rw-r--r--drivers/xen/evtchn.c34
-rw-r--r--drivers/xen/manage.c1
1109 files changed, 40124 insertions, 11248 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index edf1558c1105..ebcf534514be 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -395,9 +395,6 @@ config ACPI_CONTAINER
This helps support hotplug of nodes, CPUs, and memory.
- To compile this driver as a module, choose M here:
- the module will be called container.
-
config ACPI_HOTPLUG_MEMORY
bool "Memory Hotplug"
depends on MEMORY_HOTPLUG
@@ -411,9 +408,6 @@ config ACPI_HOTPLUG_MEMORY
removing memory devices at runtime, you need not enable
this driver.
- To compile this driver as a module, choose M here:
- the module will be called acpi_memhotplug.
-
config ACPI_HOTPLUG_IOAPIC
bool
depends on PCI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 44e412506317..076894a3330f 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -54,6 +54,7 @@ acpi-y += property.o
acpi-$(CONFIG_X86) += acpi_cmos_rtc.o
acpi-$(CONFIG_X86) += x86/apple.o
acpi-$(CONFIG_X86) += x86/utils.o
+acpi-$(CONFIG_X86) += x86/s2idle.o
acpi-$(CONFIG_DEBUG_FS) += debugfs.o
acpi-y += acpi_lpat.o
acpi-$(CONFIG_ACPI_LPIT) += acpi_lpit.o
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 4ed755a963aa..8f2dc176bb41 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -319,6 +319,9 @@ static bool matching_id(const char *idstr, const char *list_id)
{
int i;
+ if (strlen(idstr) != strlen(list_id))
+ return false;
+
if (memcmp(idstr, list_id, 3))
return false;
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index a852dc4927f7..75aaf94ae0a9 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -414,109 +414,88 @@ end:
return result;
}
+bool acpi_cpc_valid(void)
+{
+ struct cpc_desc *cpc_ptr;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
+ if (!cpc_ptr)
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(acpi_cpc_valid);
+
/**
- * acpi_get_psd_map - Map the CPUs in a common freq domain.
- * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
+ * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
+ * @cpu: Find all CPUs that share a domain with cpu.
+ * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
*
* Return: 0 for success or negative value for err.
*/
-int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
+int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
{
- int count_target;
- int retval = 0;
- unsigned int i, j;
- cpumask_var_t covered_cpus;
- struct cppc_cpudata *pr, *match_pr;
- struct acpi_psd_package *pdomain;
- struct acpi_psd_package *match_pdomain;
struct cpc_desc *cpc_ptr, *match_cpc_ptr;
-
- if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
- return -ENOMEM;
+ struct acpi_psd_package *match_pdomain;
+ struct acpi_psd_package *pdomain;
+ int count_target, i;
/*
* Now that we have _PSD data from all CPUs, let's setup P-state
* domain info.
*/
- for_each_possible_cpu(i) {
- if (cpumask_test_cpu(i, covered_cpus))
- continue;
-
- pr = all_cpu_data[i];
- cpc_ptr = per_cpu(cpc_desc_ptr, i);
- if (!cpc_ptr) {
- retval = -EFAULT;
- goto err_ret;
- }
+ cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
+ if (!cpc_ptr)
+ return -EFAULT;
- pdomain = &(cpc_ptr->domain_info);
- cpumask_set_cpu(i, pr->shared_cpu_map);
- cpumask_set_cpu(i, covered_cpus);
- if (pdomain->num_processors <= 1)
- continue;
+ pdomain = &(cpc_ptr->domain_info);
+ cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
+ if (pdomain->num_processors <= 1)
+ return 0;
- /* Validate the Domain info */
- count_target = pdomain->num_processors;
- if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
- pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
- else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
- pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
- else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
- pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
-
- for_each_possible_cpu(j) {
- if (i == j)
- continue;
-
- match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
- if (!match_cpc_ptr) {
- retval = -EFAULT;
- goto err_ret;
- }
+ /* Validate the Domain info */
+ count_target = pdomain->num_processors;
+ if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
+ cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
+ cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
+ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
+ cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
- match_pdomain = &(match_cpc_ptr->domain_info);
- if (match_pdomain->domain != pdomain->domain)
- continue;
+ for_each_possible_cpu(i) {
+ if (i == cpu)
+ continue;
- /* Here i and j are in the same domain */
- if (match_pdomain->num_processors != count_target) {
- retval = -EFAULT;
- goto err_ret;
- }
+ match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
+ if (!match_cpc_ptr)
+ goto err_fault;
- if (pdomain->coord_type != match_pdomain->coord_type) {
- retval = -EFAULT;
- goto err_ret;
- }
+ match_pdomain = &(match_cpc_ptr->domain_info);
+ if (match_pdomain->domain != pdomain->domain)
+ continue;
- cpumask_set_cpu(j, covered_cpus);
- cpumask_set_cpu(j, pr->shared_cpu_map);
- }
+ /* Here i and cpu are in the same domain */
+ if (match_pdomain->num_processors != count_target)
+ goto err_fault;
- for_each_cpu(j, pr->shared_cpu_map) {
- if (i == j)
- continue;
+ if (pdomain->coord_type != match_pdomain->coord_type)
+ goto err_fault;
- match_pr = all_cpu_data[j];
- match_pr->shared_type = pr->shared_type;
- cpumask_copy(match_pr->shared_cpu_map,
- pr->shared_cpu_map);
- }
+ cpumask_set_cpu(i, cpu_data->shared_cpu_map);
}
- goto out;
-err_ret:
- for_each_possible_cpu(i) {
- pr = all_cpu_data[i];
+ return 0;
- /* Assume no coordination on any error parsing domain info */
- cpumask_clear(pr->shared_cpu_map);
- cpumask_set_cpu(i, pr->shared_cpu_map);
- pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
- }
-out:
- free_cpumask_var(covered_cpus);
- return retval;
+err_fault:
+ /* Assume no coordination on any error parsing domain info */
+ cpumask_clear(cpu_data->shared_cpu_map);
+ cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
+ cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
+
+ return -EFAULT;
}
EXPORT_SYMBOL_GPL(acpi_get_psd_map);
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index 442608220b5c..b11b08a60684 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -5,6 +5,7 @@
#include <linux/list_sort.h>
#include <linux/libnvdimm.h>
#include <linux/module.h>
+#include <linux/nospec.h>
#include <linux/mutex.h>
#include <linux/ndctl.h>
#include <linux/sysfs.h>
@@ -282,18 +283,19 @@ err:
static union acpi_object *int_to_buf(union acpi_object *integer)
{
- union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
+ union acpi_object *buf = NULL;
void *dst = NULL;
- if (!buf)
- goto err;
-
if (integer->type != ACPI_TYPE_INTEGER) {
WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
integer->type);
goto err;
}
+ buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
+ if (!buf)
+ goto err;
+
dst = buf + 1;
buf->type = ACPI_TYPE_BUFFER;
buf->buffer.length = 4;
@@ -478,8 +480,11 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
cmd_mask = nd_desc->cmd_mask;
if (cmd == ND_CMD_CALL && call_pkg->nd_family) {
family = call_pkg->nd_family;
- if (!test_bit(family, &nd_desc->bus_family_mask))
+ if (family > NVDIMM_BUS_FAMILY_MAX ||
+ !test_bit(family, &nd_desc->bus_family_mask))
return -EINVAL;
+ family = array_index_nospec(family,
+ NVDIMM_BUS_FAMILY_MAX + 1);
dsm_mask = acpi_desc->family_dsm_mask[family];
guid = to_nfit_bus_uuid(family);
} else {
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0dcedd652807..32f0f554ccae 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -708,7 +708,7 @@ err_ret:
if (retval) {
cpumask_clear(pr->performance->shared_cpu_map);
cpumask_set_cpu(i, pr->performance->shared_cpu_map);
- pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+ pr->performance->shared_type = CPUFREQ_SHARED_TYPE_NONE;
}
pr->performance = NULL; /* Will be set for real in register */
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index a1b226eb2ce2..80b668c80073 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -752,6 +752,7 @@ static bool acpi_info_matches_ids(struct acpi_device_info *info,
/* List of HIDs for which we ignore matching ACPI devices, when checking _DEP lists. */
static const char * const acpi_ignore_dep_ids[] = {
"PNP0D80", /* Windows-compatible System Power Management Controller */
+ "INT33BD", /* Intel Baytrail Mailbox Device */
NULL
};
@@ -1635,8 +1636,6 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
device_initialize(&device->dev);
dev_set_uevent_suppress(&device->dev, true);
acpi_init_coherency(device);
- /* Assume there are unmet deps until acpi_device_dep_initialize() runs */
- device->dep_unmet = 1;
}
void acpi_device_add_finalize(struct acpi_device *device)
@@ -1842,32 +1841,36 @@ static void acpi_scan_init_hotplug(struct acpi_device *adev)
}
}
-static void acpi_device_dep_initialize(struct acpi_device *adev)
+static u32 acpi_scan_check_dep(acpi_handle handle)
{
- struct acpi_dep_data *dep;
struct acpi_handle_list dep_devices;
acpi_status status;
+ u32 count;
int i;
- adev->dep_unmet = 0;
-
- if (!acpi_has_method(adev->handle, "_DEP"))
- return;
+ /*
+ * Check for _HID here to avoid deferring the enumeration of:
+ * 1. PCI devices.
+ * 2. ACPI nodes describing USB ports.
+ * Still, checking for _HID catches more then just these cases ...
+ */
+ if (!acpi_has_method(handle, "_DEP") || !acpi_has_method(handle, "_HID"))
+ return 0;
- status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
- &dep_devices);
+ status = acpi_evaluate_reference(handle, "_DEP", NULL, &dep_devices);
if (ACPI_FAILURE(status)) {
- dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
- return;
+ acpi_handle_debug(handle, "Failed to evaluate _DEP.\n");
+ return 0;
}
- for (i = 0; i < dep_devices.count; i++) {
+ for (count = 0, i = 0; i < dep_devices.count; i++) {
struct acpi_device_info *info;
- int skip;
+ struct acpi_dep_data *dep;
+ bool skip;
status = acpi_get_object_info(dep_devices.handles[i], &info);
if (ACPI_FAILURE(status)) {
- dev_dbg(&adev->dev, "Error reading _DEP device info\n");
+ acpi_handle_debug(handle, "Error reading _DEP device info\n");
continue;
}
@@ -1877,26 +1880,45 @@ static void acpi_device_dep_initialize(struct acpi_device *adev)
if (skip)
continue;
- dep = kzalloc(sizeof(struct acpi_dep_data), GFP_KERNEL);
+ dep = kzalloc(sizeof(*dep), GFP_KERNEL);
if (!dep)
- return;
+ continue;
+
+ count++;
dep->supplier = dep_devices.handles[i];
- dep->consumer = adev->handle;
- adev->dep_unmet++;
+ dep->consumer = handle;
mutex_lock(&acpi_dep_list_lock);
list_add_tail(&dep->node , &acpi_dep_list);
mutex_unlock(&acpi_dep_list_lock);
}
+
+ return count;
}
-static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
- void *not_used, void **return_value)
+static void acpi_scan_dep_init(struct acpi_device *adev)
+{
+ struct acpi_dep_data *dep;
+
+ mutex_lock(&acpi_dep_list_lock);
+
+ list_for_each_entry(dep, &acpi_dep_list, node) {
+ if (dep->consumer == adev->handle)
+ adev->dep_unmet++;
+ }
+
+ mutex_unlock(&acpi_dep_list_lock);
+}
+
+static bool acpi_bus_scan_second_pass;
+
+static acpi_status acpi_bus_check_add(acpi_handle handle, bool check_dep,
+ struct acpi_device **adev_p)
{
struct acpi_device *device = NULL;
- int type;
unsigned long long sta;
+ int type;
int result;
acpi_bus_get_device(handle, &device);
@@ -1912,20 +1934,42 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
return AE_OK;
}
+ if (type == ACPI_BUS_TYPE_DEVICE && check_dep) {
+ u32 count = acpi_scan_check_dep(handle);
+ /* Bail out if the number of recorded dependencies is not 0. */
+ if (count > 0) {
+ acpi_bus_scan_second_pass = true;
+ return AE_CTRL_DEPTH;
+ }
+ }
+
acpi_add_single_object(&device, handle, type, sta);
if (!device)
return AE_CTRL_DEPTH;
acpi_scan_init_hotplug(device);
- acpi_device_dep_initialize(device);
+ if (!check_dep)
+ acpi_scan_dep_init(device);
- out:
- if (!*return_value)
- *return_value = device;
+out:
+ if (!*adev_p)
+ *adev_p = device;
return AE_OK;
}
+static acpi_status acpi_bus_check_add_1(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **ret_p)
+{
+ return acpi_bus_check_add(handle, true, (struct acpi_device **)ret_p);
+}
+
+static acpi_status acpi_bus_check_add_2(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **ret_p)
+{
+ return acpi_bus_check_add(handle, false, (struct acpi_device **)ret_p);
+}
+
static void acpi_default_enumeration(struct acpi_device *device)
{
/*
@@ -1993,12 +2037,16 @@ static int acpi_scan_attach_handler(struct acpi_device *device)
return ret;
}
-static void acpi_bus_attach(struct acpi_device *device)
+static void acpi_bus_attach(struct acpi_device *device, bool first_pass)
{
struct acpi_device *child;
+ bool skip = !first_pass && device->flags.visited;
acpi_handle ejd;
int ret;
+ if (skip)
+ goto ok;
+
if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd)))
register_dock_dependent_device(device, ejd);
@@ -2045,9 +2093,9 @@ static void acpi_bus_attach(struct acpi_device *device)
ok:
list_for_each_entry(child, &device->children, node)
- acpi_bus_attach(child);
+ acpi_bus_attach(child, first_pass);
- if (device->handler && device->handler->hotplug.notify_online)
+ if (!skip && device->handler && device->handler->hotplug.notify_online)
device->handler->hotplug.notify_online(device);
}
@@ -2065,7 +2113,8 @@ void acpi_walk_dep_device_list(acpi_handle handle)
adev->dep_unmet--;
if (!adev->dep_unmet)
- acpi_bus_attach(adev);
+ acpi_bus_attach(adev, true);
+
list_del(&dep->node);
kfree(dep);
}
@@ -2090,17 +2139,37 @@ EXPORT_SYMBOL_GPL(acpi_walk_dep_device_list);
*/
int acpi_bus_scan(acpi_handle handle)
{
- void *device = NULL;
+ struct acpi_device *device = NULL;
+
+ acpi_bus_scan_second_pass = false;
- if (ACPI_SUCCESS(acpi_bus_check_add(handle, 0, NULL, &device)))
+ /* Pass 1: Avoid enumerating devices with missing dependencies. */
+
+ if (ACPI_SUCCESS(acpi_bus_check_add(handle, true, &device)))
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
- acpi_bus_check_add, NULL, NULL, &device);
+ acpi_bus_check_add_1, NULL, NULL,
+ (void **)&device);
+
+ if (!device)
+ return -ENODEV;
- if (device) {
- acpi_bus_attach(device);
+ acpi_bus_attach(device, true);
+
+ if (!acpi_bus_scan_second_pass)
return 0;
- }
- return -ENODEV;
+
+ /* Pass 2: Enumerate all of the remaining devices. */
+
+ device = NULL;
+
+ if (ACPI_SUCCESS(acpi_bus_check_add(handle, false, &device)))
+ acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
+ acpi_bus_check_add_2, NULL, NULL,
+ (void **)&device);
+
+ acpi_bus_attach(device, false);
+
+ return 0;
}
EXPORT_SYMBOL(acpi_bus_scan);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index aff13bf4d947..09fd13757b65 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -92,10 +92,6 @@ bool acpi_sleep_state_supported(u8 sleep_state)
}
#ifdef CONFIG_ACPI_SLEEP
-static bool sleep_no_lps0 __read_mostly;
-module_param(sleep_no_lps0, bool, 0644);
-MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
-
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
u32 acpi_target_system_state(void)
@@ -165,7 +161,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
return 0;
}
-static bool acpi_sleep_default_s3;
+bool acpi_sleep_default_s3;
static int __init init_default_s3(const struct dmi_system_id *d)
{
@@ -688,268 +684,13 @@ static const struct platform_suspend_ops acpi_suspend_ops_old = {
static bool s2idle_wakeup;
-/*
- * On platforms supporting the Low Power S0 Idle interface there is an ACPI
- * device object with the PNP0D80 compatible device ID (System Power Management
- * Controller) and a specific _DSM method under it. That method, if present,
- * can be used to indicate to the platform that the OS is transitioning into a
- * low-power state in which certain types of activity are not desirable or that
- * it is leaving such a state, which allows the platform to adjust its operation
- * mode accordingly.
- */
-static const struct acpi_device_id lps0_device_ids[] = {
- {"PNP0D80", },
- {"", },
-};
-
-#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
-
-#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
-#define ACPI_LPS0_SCREEN_OFF 3
-#define ACPI_LPS0_SCREEN_ON 4
-#define ACPI_LPS0_ENTRY 5
-#define ACPI_LPS0_EXIT 6
-
-static acpi_handle lps0_device_handle;
-static guid_t lps0_dsm_guid;
-static char lps0_dsm_func_mask;
-
-/* Device constraint entry structure */
-struct lpi_device_info {
- char *name;
- int enabled;
- union acpi_object *package;
-};
-
-/* Constraint package structure */
-struct lpi_device_constraint {
- int uid;
- int min_dstate;
- int function_states;
-};
-
-struct lpi_constraints {
- acpi_handle handle;
- int min_dstate;
-};
-
-static struct lpi_constraints *lpi_constraints_table;
-static int lpi_constraints_table_size;
-
-static void lpi_device_get_constraints(void)
-{
- union acpi_object *out_obj;
- int i;
-
- out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
- 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
- NULL, ACPI_TYPE_PACKAGE);
-
- acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
- out_obj ? "successful" : "failed");
-
- if (!out_obj)
- return;
-
- lpi_constraints_table = kcalloc(out_obj->package.count,
- sizeof(*lpi_constraints_table),
- GFP_KERNEL);
- if (!lpi_constraints_table)
- goto free_acpi_buffer;
-
- acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n");
-
- for (i = 0; i < out_obj->package.count; i++) {
- struct lpi_constraints *constraint;
- acpi_status status;
- union acpi_object *package = &out_obj->package.elements[i];
- struct lpi_device_info info = { };
- int package_count = 0, j;
-
- if (!package)
- continue;
-
- for (j = 0; j < package->package.count; ++j) {
- union acpi_object *element =
- &(package->package.elements[j]);
-
- switch (element->type) {
- case ACPI_TYPE_INTEGER:
- info.enabled = element->integer.value;
- break;
- case ACPI_TYPE_STRING:
- info.name = element->string.pointer;
- break;
- case ACPI_TYPE_PACKAGE:
- package_count = element->package.count;
- info.package = element->package.elements;
- break;
- }
- }
-
- if (!info.enabled || !info.package || !info.name)
- continue;
-
- constraint = &lpi_constraints_table[lpi_constraints_table_size];
-
- status = acpi_get_handle(NULL, info.name, &constraint->handle);
- if (ACPI_FAILURE(status))
- continue;
-
- acpi_handle_debug(lps0_device_handle,
- "index:%d Name:%s\n", i, info.name);
-
- constraint->min_dstate = -1;
-
- for (j = 0; j < package_count; ++j) {
- union acpi_object *info_obj = &info.package[j];
- union acpi_object *cnstr_pkg;
- union acpi_object *obj;
- struct lpi_device_constraint dev_info;
-
- switch (info_obj->type) {
- case ACPI_TYPE_INTEGER:
- /* version */
- break;
- case ACPI_TYPE_PACKAGE:
- if (info_obj->package.count < 2)
- break;
-
- cnstr_pkg = info_obj->package.elements;
- obj = &cnstr_pkg[0];
- dev_info.uid = obj->integer.value;
- obj = &cnstr_pkg[1];
- dev_info.min_dstate = obj->integer.value;
-
- acpi_handle_debug(lps0_device_handle,
- "uid:%d min_dstate:%s\n",
- dev_info.uid,
- acpi_power_state_string(dev_info.min_dstate));
-
- constraint->min_dstate = dev_info.min_dstate;
- break;
- }
- }
-
- if (constraint->min_dstate < 0) {
- acpi_handle_debug(lps0_device_handle,
- "Incomplete constraint defined\n");
- continue;
- }
-
- lpi_constraints_table_size++;
- }
-
- acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
-
-free_acpi_buffer:
- ACPI_FREE(out_obj);
-}
-
-static void lpi_check_constraints(void)
-{
- int i;
-
- for (i = 0; i < lpi_constraints_table_size; ++i) {
- acpi_handle handle = lpi_constraints_table[i].handle;
- struct acpi_device *adev;
-
- if (!handle || acpi_bus_get_device(handle, &adev))
- continue;
-
- acpi_handle_debug(handle,
- "LPI: required min power state:%s current power state:%s\n",
- acpi_power_state_string(lpi_constraints_table[i].min_dstate),
- acpi_power_state_string(adev->power.state));
-
- if (!adev->flags.power_manageable) {
- acpi_handle_info(handle, "LPI: Device not power manageable\n");
- lpi_constraints_table[i].handle = NULL;
- continue;
- }
-
- if (adev->power.state < lpi_constraints_table[i].min_dstate)
- acpi_handle_info(handle,
- "LPI: Constraint not met; min power state:%s current power state:%s\n",
- acpi_power_state_string(lpi_constraints_table[i].min_dstate),
- acpi_power_state_string(adev->power.state));
- }
-}
-
-static void acpi_sleep_run_lps0_dsm(unsigned int func)
-{
- union acpi_object *out_obj;
-
- if (!(lps0_dsm_func_mask & (1 << func)))
- return;
-
- out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, 1, func, NULL);
- ACPI_FREE(out_obj);
-
- acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
- func, out_obj ? "successful" : "failed");
-}
-
-static int lps0_device_attach(struct acpi_device *adev,
- const struct acpi_device_id *not_used)
-{
- union acpi_object *out_obj;
-
- if (lps0_device_handle)
- return 0;
-
- if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
- return 0;
-
- guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
- /* Check if the _DSM is present and as expected. */
- out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
- if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
- acpi_handle_debug(adev->handle,
- "_DSM function 0 evaluation failed\n");
- return 0;
- }
-
- lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
-
- ACPI_FREE(out_obj);
-
- acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
- lps0_dsm_func_mask);
-
- lps0_device_handle = adev->handle;
-
- lpi_device_get_constraints();
-
- /*
- * Use suspend-to-idle by default if the default suspend mode was not
- * set from the command line.
- */
- if (mem_sleep_default > PM_SUSPEND_MEM && !acpi_sleep_default_s3)
- mem_sleep_current = PM_SUSPEND_TO_IDLE;
-
- /*
- * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
- * EC GPE to be enabled while suspended for certain wakeup devices to
- * work, so mark it as wakeup-capable.
- */
- acpi_ec_mark_gpe_for_wake();
-
- return 0;
-}
-
-static struct acpi_scan_handler lps0_handler = {
- .ids = lps0_device_ids,
- .attach = lps0_device_attach,
-};
-
-static int acpi_s2idle_begin(void)
+int acpi_s2idle_begin(void)
{
acpi_scan_lock_acquire();
return 0;
}
-static int acpi_s2idle_prepare(void)
+int acpi_s2idle_prepare(void)
{
if (acpi_sci_irq_valid()) {
enable_irq_wake(acpi_sci_irq);
@@ -966,21 +707,7 @@ static int acpi_s2idle_prepare(void)
return 0;
}
-static int acpi_s2idle_prepare_late(void)
-{
- if (!lps0_device_handle || sleep_no_lps0)
- return 0;
-
- if (pm_debug_messages_on)
- lpi_check_constraints();
-
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
-
- return 0;
-}
-
-static bool acpi_s2idle_wake(void)
+bool acpi_s2idle_wake(void)
{
if (!acpi_sci_irq_valid())
return pm_wakeup_pending();
@@ -1046,16 +773,7 @@ static bool acpi_s2idle_wake(void)
return false;
}
-static void acpi_s2idle_restore_early(void)
-{
- if (!lps0_device_handle || sleep_no_lps0)
- return;
-
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
- acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
-}
-
-static void acpi_s2idle_restore(void)
+void acpi_s2idle_restore(void)
{
/*
* Drain pending events before restoring the working-state configuration
@@ -1077,7 +795,7 @@ static void acpi_s2idle_restore(void)
}
}
-static void acpi_s2idle_end(void)
+void acpi_s2idle_end(void)
{
acpi_scan_lock_release();
}
@@ -1085,13 +803,16 @@ static void acpi_s2idle_end(void)
static const struct platform_s2idle_ops acpi_s2idle_ops = {
.begin = acpi_s2idle_begin,
.prepare = acpi_s2idle_prepare,
- .prepare_late = acpi_s2idle_prepare_late,
.wake = acpi_s2idle_wake,
- .restore_early = acpi_s2idle_restore_early,
.restore = acpi_s2idle_restore,
.end = acpi_s2idle_end,
};
+void __weak acpi_s2idle_setup(void)
+{
+ s2idle_set_ops(&acpi_s2idle_ops);
+}
+
static void acpi_sleep_suspend_setup(void)
{
int i;
@@ -1103,13 +824,11 @@ static void acpi_sleep_suspend_setup(void)
suspend_set_ops(old_suspend_ordering ?
&acpi_suspend_ops_old : &acpi_suspend_ops);
- acpi_scan_add_handler(&lps0_handler);
- s2idle_set_ops(&acpi_s2idle_ops);
+ acpi_s2idle_setup();
}
#else /* !CONFIG_SUSPEND */
#define s2idle_wakeup (false)
-#define lps0_device_handle (NULL)
static inline void acpi_sleep_suspend_setup(void) {}
#endif /* !CONFIG_SUSPEND */
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 3d90480ce1b1..1856f76ac83f 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -15,3 +15,19 @@ static inline acpi_status acpi_set_waking_vector(u32 wakeup_address)
return acpi_set_firmware_waking_vector(
(acpi_physical_address)wakeup_address, 0);
}
+
+extern int acpi_s2idle_begin(void);
+extern int acpi_s2idle_prepare(void);
+extern int acpi_s2idle_prepare_late(void);
+extern bool acpi_s2idle_wake(void);
+extern void acpi_s2idle_restore_early(void);
+extern void acpi_s2idle_restore(void);
+extern void acpi_s2idle_end(void);
+
+extern void acpi_s2idle_setup(void);
+
+#ifdef CONFIG_ACPI_SLEEP
+extern bool acpi_sleep_default_s3;
+#else
+#define acpi_sleep_default_s3 (1)
+#endif
diff --git a/drivers/acpi/x86/s2idle.c b/drivers/acpi/x86/s2idle.c
new file mode 100644
index 000000000000..2b69536cdccb
--- /dev/null
+++ b/drivers/acpi/x86/s2idle.c
@@ -0,0 +1,448 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Architecture-specific ACPI-based support for suspend-to-idle.
+ *
+ * Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+ * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+ * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
+ *
+ * On platforms supporting the Low Power S0 Idle interface there is an ACPI
+ * device object with the PNP0D80 compatible device ID (System Power Management
+ * Controller) and a specific _DSM method under it. That method, if present,
+ * can be used to indicate to the platform that the OS is transitioning into a
+ * low-power state in which certain types of activity are not desirable or that
+ * it is leaving such a state, which allows the platform to adjust its operation
+ * mode accordingly.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/suspend.h>
+
+#include "../sleep.h"
+
+#ifdef CONFIG_SUSPEND
+
+static bool sleep_no_lps0 __read_mostly;
+module_param(sleep_no_lps0, bool, 0644);
+MODULE_PARM_DESC(sleep_no_lps0, "Do not use the special LPS0 device interface");
+
+static const struct acpi_device_id lps0_device_ids[] = {
+ {"PNP0D80", },
+ {"", },
+};
+
+#define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66"
+
+#define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1
+#define ACPI_LPS0_SCREEN_OFF 3
+#define ACPI_LPS0_SCREEN_ON 4
+#define ACPI_LPS0_ENTRY 5
+#define ACPI_LPS0_EXIT 6
+
+/* AMD */
+#define ACPI_LPS0_DSM_UUID_AMD "e3f32452-febc-43ce-9039-932122d37721"
+#define ACPI_LPS0_SCREEN_OFF_AMD 4
+#define ACPI_LPS0_SCREEN_ON_AMD 5
+
+static acpi_handle lps0_device_handle;
+static guid_t lps0_dsm_guid;
+static char lps0_dsm_func_mask;
+
+/* Device constraint entry structure */
+struct lpi_device_info {
+ char *name;
+ int enabled;
+ union acpi_object *package;
+};
+
+/* Constraint package structure */
+struct lpi_device_constraint {
+ int uid;
+ int min_dstate;
+ int function_states;
+};
+
+struct lpi_constraints {
+ acpi_handle handle;
+ int min_dstate;
+};
+
+/* AMD */
+/* Device constraint entry structure */
+struct lpi_device_info_amd {
+ int revision;
+ int count;
+ union acpi_object *package;
+};
+
+/* Constraint package structure */
+struct lpi_device_constraint_amd {
+ char *name;
+ int enabled;
+ int function_states;
+ int min_dstate;
+};
+
+static struct lpi_constraints *lpi_constraints_table;
+static int lpi_constraints_table_size;
+static int rev_id;
+
+static void lpi_device_get_constraints_amd(void)
+{
+ union acpi_object *out_obj;
+ int i, j, k;
+
+ out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
+ 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+ NULL, ACPI_TYPE_PACKAGE);
+
+ if (!out_obj)
+ return;
+
+ acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
+ out_obj ? "successful" : "failed");
+
+ for (i = 0; i < out_obj->package.count; i++) {
+ union acpi_object *package = &out_obj->package.elements[i];
+
+ if (package->type == ACPI_TYPE_PACKAGE) {
+ lpi_constraints_table = kcalloc(package->package.count,
+ sizeof(*lpi_constraints_table),
+ GFP_KERNEL);
+
+ if (!lpi_constraints_table)
+ goto free_acpi_buffer;
+
+ acpi_handle_debug(lps0_device_handle,
+ "LPI: constraints list begin:\n");
+
+ for (j = 0; j < package->package.count; ++j) {
+ union acpi_object *info_obj = &package->package.elements[j];
+ struct lpi_device_constraint_amd dev_info = {};
+ struct lpi_constraints *list;
+ acpi_status status;
+
+ for (k = 0; k < info_obj->package.count; ++k) {
+ union acpi_object *obj = &info_obj->package.elements[k];
+
+ list = &lpi_constraints_table[lpi_constraints_table_size];
+ list->min_dstate = -1;
+
+ switch (k) {
+ case 0:
+ dev_info.enabled = obj->integer.value;
+ break;
+ case 1:
+ dev_info.name = obj->string.pointer;
+ break;
+ case 2:
+ dev_info.function_states = obj->integer.value;
+ break;
+ case 3:
+ dev_info.min_dstate = obj->integer.value;
+ break;
+ }
+
+ if (!dev_info.enabled || !dev_info.name ||
+ !dev_info.min_dstate)
+ continue;
+
+ status = acpi_get_handle(NULL, dev_info.name,
+ &list->handle);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ acpi_handle_debug(lps0_device_handle,
+ "Name:%s\n", dev_info.name);
+
+ list->min_dstate = dev_info.min_dstate;
+
+ if (list->min_dstate < 0) {
+ acpi_handle_debug(lps0_device_handle,
+ "Incomplete constraint defined\n");
+ continue;
+ }
+ }
+ lpi_constraints_table_size++;
+ }
+ }
+ }
+
+ acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
+
+free_acpi_buffer:
+ ACPI_FREE(out_obj);
+}
+
+static void lpi_device_get_constraints(void)
+{
+ union acpi_object *out_obj;
+ int i;
+
+ out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid,
+ 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS,
+ NULL, ACPI_TYPE_PACKAGE);
+
+ acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n",
+ out_obj ? "successful" : "failed");
+
+ if (!out_obj)
+ return;
+
+ lpi_constraints_table = kcalloc(out_obj->package.count,
+ sizeof(*lpi_constraints_table),
+ GFP_KERNEL);
+ if (!lpi_constraints_table)
+ goto free_acpi_buffer;
+
+ acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n");
+
+ for (i = 0; i < out_obj->package.count; i++) {
+ struct lpi_constraints *constraint;
+ acpi_status status;
+ union acpi_object *package = &out_obj->package.elements[i];
+ struct lpi_device_info info = { };
+ int package_count = 0, j;
+
+ if (!package)
+ continue;
+
+ for (j = 0; j < package->package.count; ++j) {
+ union acpi_object *element =
+ &(package->package.elements[j]);
+
+ switch (element->type) {
+ case ACPI_TYPE_INTEGER:
+ info.enabled = element->integer.value;
+ break;
+ case ACPI_TYPE_STRING:
+ info.name = element->string.pointer;
+ break;
+ case ACPI_TYPE_PACKAGE:
+ package_count = element->package.count;
+ info.package = element->package.elements;
+ break;
+ }
+ }
+
+ if (!info.enabled || !info.package || !info.name)
+ continue;
+
+ constraint = &lpi_constraints_table[lpi_constraints_table_size];
+
+ status = acpi_get_handle(NULL, info.name, &constraint->handle);
+ if (ACPI_FAILURE(status))
+ continue;
+
+ acpi_handle_debug(lps0_device_handle,
+ "index:%d Name:%s\n", i, info.name);
+
+ constraint->min_dstate = -1;
+
+ for (j = 0; j < package_count; ++j) {
+ union acpi_object *info_obj = &info.package[j];
+ union acpi_object *cnstr_pkg;
+ union acpi_object *obj;
+ struct lpi_device_constraint dev_info;
+
+ switch (info_obj->type) {
+ case ACPI_TYPE_INTEGER:
+ /* version */
+ break;
+ case ACPI_TYPE_PACKAGE:
+ if (info_obj->package.count < 2)
+ break;
+
+ cnstr_pkg = info_obj->package.elements;
+ obj = &cnstr_pkg[0];
+ dev_info.uid = obj->integer.value;
+ obj = &cnstr_pkg[1];
+ dev_info.min_dstate = obj->integer.value;
+
+ acpi_handle_debug(lps0_device_handle,
+ "uid:%d min_dstate:%s\n",
+ dev_info.uid,
+ acpi_power_state_string(dev_info.min_dstate));
+
+ constraint->min_dstate = dev_info.min_dstate;
+ break;
+ }
+ }
+
+ if (constraint->min_dstate < 0) {
+ acpi_handle_debug(lps0_device_handle,
+ "Incomplete constraint defined\n");
+ continue;
+ }
+
+ lpi_constraints_table_size++;
+ }
+
+ acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n");
+
+free_acpi_buffer:
+ ACPI_FREE(out_obj);
+}
+
+static void lpi_check_constraints(void)
+{
+ int i;
+
+ for (i = 0; i < lpi_constraints_table_size; ++i) {
+ acpi_handle handle = lpi_constraints_table[i].handle;
+ struct acpi_device *adev;
+
+ if (!handle || acpi_bus_get_device(handle, &adev))
+ continue;
+
+ acpi_handle_debug(handle,
+ "LPI: required min power state:%s current power state:%s\n",
+ acpi_power_state_string(lpi_constraints_table[i].min_dstate),
+ acpi_power_state_string(adev->power.state));
+
+ if (!adev->flags.power_manageable) {
+ acpi_handle_info(handle, "LPI: Device not power manageable\n");
+ lpi_constraints_table[i].handle = NULL;
+ continue;
+ }
+
+ if (adev->power.state < lpi_constraints_table[i].min_dstate)
+ acpi_handle_info(handle,
+ "LPI: Constraint not met; min power state:%s current power state:%s\n",
+ acpi_power_state_string(lpi_constraints_table[i].min_dstate),
+ acpi_power_state_string(adev->power.state));
+ }
+}
+
+static void acpi_sleep_run_lps0_dsm(unsigned int func)
+{
+ union acpi_object *out_obj;
+
+ if (!(lps0_dsm_func_mask & (1 << func)))
+ return;
+
+ out_obj = acpi_evaluate_dsm(lps0_device_handle, &lps0_dsm_guid, rev_id, func, NULL);
+ ACPI_FREE(out_obj);
+
+ acpi_handle_debug(lps0_device_handle, "_DSM function %u evaluation %s\n",
+ func, out_obj ? "successful" : "failed");
+}
+
+static bool acpi_s2idle_vendor_amd(void)
+{
+ return boot_cpu_data.x86_vendor == X86_VENDOR_AMD;
+}
+
+static int lps0_device_attach(struct acpi_device *adev,
+ const struct acpi_device_id *not_used)
+{
+ union acpi_object *out_obj;
+
+ if (lps0_device_handle)
+ return 0;
+
+ if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0))
+ return 0;
+
+ if (acpi_s2idle_vendor_amd()) {
+ guid_parse(ACPI_LPS0_DSM_UUID_AMD, &lps0_dsm_guid);
+ out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 0, 0, NULL);
+ rev_id = 0;
+ } else {
+ guid_parse(ACPI_LPS0_DSM_UUID, &lps0_dsm_guid);
+ out_obj = acpi_evaluate_dsm(adev->handle, &lps0_dsm_guid, 1, 0, NULL);
+ rev_id = 1;
+ }
+
+ /* Check if the _DSM is present and as expected. */
+ if (!out_obj || out_obj->type != ACPI_TYPE_BUFFER) {
+ acpi_handle_debug(adev->handle,
+ "_DSM function 0 evaluation failed\n");
+ return 0;
+ }
+
+ lps0_dsm_func_mask = *(char *)out_obj->buffer.pointer;
+
+ ACPI_FREE(out_obj);
+
+ acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n",
+ lps0_dsm_func_mask);
+
+ lps0_device_handle = adev->handle;
+
+ if (acpi_s2idle_vendor_amd())
+ lpi_device_get_constraints_amd();
+ else
+ lpi_device_get_constraints();
+
+ /*
+ * Use suspend-to-idle by default if the default suspend mode was not
+ * set from the command line.
+ */
+ if (mem_sleep_default > PM_SUSPEND_MEM && !acpi_sleep_default_s3)
+ mem_sleep_current = PM_SUSPEND_TO_IDLE;
+
+ /*
+ * Some LPS0 systems, like ASUS Zenbook UX430UNR/i7-8550U, require the
+ * EC GPE to be enabled while suspended for certain wakeup devices to
+ * work, so mark it as wakeup-capable.
+ */
+ acpi_ec_mark_gpe_for_wake();
+
+ return 0;
+}
+
+static struct acpi_scan_handler lps0_handler = {
+ .ids = lps0_device_ids,
+ .attach = lps0_device_attach,
+};
+
+int acpi_s2idle_prepare_late(void)
+{
+ if (!lps0_device_handle || sleep_no_lps0)
+ return 0;
+
+ if (pm_debug_messages_on)
+ lpi_check_constraints();
+
+ if (acpi_s2idle_vendor_amd()) {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF_AMD);
+ } else {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_ENTRY);
+ }
+
+ return 0;
+}
+
+void acpi_s2idle_restore_early(void)
+{
+ if (!lps0_device_handle || sleep_no_lps0)
+ return;
+
+ if (acpi_s2idle_vendor_amd()) {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON_AMD);
+ } else {
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_EXIT);
+ acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_ON);
+ }
+}
+
+static const struct platform_s2idle_ops acpi_s2idle_ops_lps0 = {
+ .begin = acpi_s2idle_begin,
+ .prepare = acpi_s2idle_prepare,
+ .prepare_late = acpi_s2idle_prepare_late,
+ .wake = acpi_s2idle_wake,
+ .restore_early = acpi_s2idle_restore_early,
+ .restore = acpi_s2idle_restore,
+ .end = acpi_s2idle_end,
+};
+
+void acpi_s2idle_setup(void)
+{
+ acpi_scan_add_handler(&lps0_handler);
+ s2idle_set_ops(&acpi_s2idle_ops_lps0);
+}
+
+#endif /* CONFIG_SUSPEND */
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 65a3886f68c9..5f0472c18bcb 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3607,7 +3607,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
- return err;
+ goto err_out_disable_pdev;
}
card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 25e08e5f40bd..14f165816742 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -4414,6 +4414,12 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
*
* Set the device's firmware node pointer to @fwnode, but if a secondary
* firmware node of the device is present, preserve it.
+ *
+ * Valid fwnode cases are:
+ * - primary --> secondary --> -ENODEV
+ * - primary --> NULL
+ * - secondary --> -ENODEV
+ * - NULL
*/
void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
{
@@ -4432,8 +4438,9 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
} else {
if (fwnode_is_primary(fn)) {
dev->fwnode = fn->secondary;
+ /* Set fn->secondary = NULL, so fn remains the primary fwnode */
if (!(parent && fn == parent->fwnode))
- fn->secondary = ERR_PTR(-ENODEV);
+ fn->secondary = NULL;
} else {
dev->fwnode = NULL;
}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 8dfac7f3ed7a..ff2ee87987c7 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -582,8 +582,12 @@ void regmap_debugfs_init(struct regmap *map)
devname = dev_name(map->dev);
if (name) {
- map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
+ if (!map->debugfs_name) {
+ map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
devname, name);
+ if (!map->debugfs_name)
+ return;
+ }
name = map->debugfs_name;
} else {
name = devname;
@@ -591,9 +595,10 @@ void regmap_debugfs_init(struct regmap *map)
if (!strcmp(name, "dummy")) {
kfree(map->debugfs_name);
-
map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
dummy_index);
+ if (!map->debugfs_name)
+ return;
name = map->debugfs_name;
dummy_index++;
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 262326973ee0..583b671b1d2d 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -445,6 +445,7 @@ config BLK_DEV_RBD
config BLK_DEV_RSXX
tristate "IBM Flash Adapter 900GB Full Height PCIe Device Driver"
depends on PCI
+ select CRC32
help
Device driver for IBM's high speed PCIe SSD
storage device: Flash Adapter 900GB Full Height.
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 92f84ed0ba9e..6727358e147d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -318,7 +318,8 @@ static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
blk_queue_logical_block_size(nbd->disk->queue, blksize);
blk_queue_physical_block_size(nbd->disk->queue, blksize);
- set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
+ if (max_part)
+ set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
return 0;
@@ -1476,9 +1477,11 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
refcount_set(&nbd->config_refs, 1);
refcount_inc(&nbd->refs);
mutex_unlock(&nbd->config_lock);
- set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
+ if (max_part)
+ set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
} else if (nbd_disconnected(nbd->config)) {
- set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
+ if (max_part)
+ set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
}
out:
mutex_unlock(&nbd_index_mutex);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index 7b55811c2a81..ba3ece56cbb3 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -507,7 +507,7 @@ fail:
return error;
}
-static int ps3disk_remove(struct ps3_system_bus_device *_dev)
+static void ps3disk_remove(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
struct ps3disk_private *priv = ps3_system_bus_get_drvdata(&dev->sbd);
@@ -526,7 +526,6 @@ static int ps3disk_remove(struct ps3_system_bus_device *_dev)
kfree(dev->bounce_buf);
kfree(priv);
ps3_system_bus_set_drvdata(_dev, NULL);
- return 0;
}
static struct ps3_system_bus_driver ps3disk = {
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 1088798c8dd0..b71d28372ef3 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -797,7 +797,7 @@ fail:
return error;
}
-static int ps3vram_remove(struct ps3_system_bus_device *dev)
+static void ps3vram_remove(struct ps3_system_bus_device *dev)
{
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -817,7 +817,6 @@ static int ps3vram_remove(struct ps3_system_bus_device *dev)
free_pages((unsigned long) priv->xdr_buf, get_order(XDR_BUF_SIZE));
kfree(priv);
ps3_system_bus_set_drvdata(dev, NULL);
- return 0;
}
static struct ps3_system_bus_driver ps3vram = {
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 2ed79b09439a..59cfe71d0b3a 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -3925,8 +3925,12 @@ static int find_watcher(struct rbd_device *rbd_dev,
sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
for (i = 0; i < num_watchers; i++) {
- if (!memcmp(&watchers[i].addr, &locker->info.addr,
- sizeof(locker->info.addr)) &&
+ /*
+ * Ignore addr->type while comparing. This mimics
+ * entity_addr_t::get_legacy_str() + strcmp().
+ */
+ if (ceph_addr_equal_no_type(&watchers[i].addr,
+ &locker->info.addr) &&
watchers[i].cookie == cookie) {
struct rbd_client_id cid = {
.gid = le64_to_cpu(watchers[i].name.num),
diff --git a/drivers/block/rnbd/Kconfig b/drivers/block/rnbd/Kconfig
index 4b6d3d816d1f..2ff05a0d2646 100644
--- a/drivers/block/rnbd/Kconfig
+++ b/drivers/block/rnbd/Kconfig
@@ -7,6 +7,7 @@ config BLK_DEV_RNBD_CLIENT
tristate "RDMA Network Block Device driver client"
depends on INFINIBAND_RTRS_CLIENT
select BLK_DEV_RNBD
+ select SG_POOL
help
RNBD client is a network block device driver using rdma transport.
diff --git a/drivers/block/rnbd/README b/drivers/block/rnbd/README
index 1773c0aa0bd4..080f58a5400a 100644
--- a/drivers/block/rnbd/README
+++ b/drivers/block/rnbd/README
@@ -90,3 +90,4 @@ Kleber Souza <kleber.souza@profitbricks.com>
Lutz Pogrell <lutz.pogrell@cloud.ionos.com>
Milind Dumbare <Milind.dumbare@gmail.com>
Roman Penyaev <roman.penyaev@profitbricks.com>
+Swapnil Ingle <ingleswapnil@gmail.com>
diff --git a/drivers/block/rnbd/rnbd-clt-sysfs.c b/drivers/block/rnbd/rnbd-clt-sysfs.c
index a7caeedeb198..d4aa6bfc9555 100644
--- a/drivers/block/rnbd/rnbd-clt-sysfs.c
+++ b/drivers/block/rnbd/rnbd-clt-sysfs.c
@@ -432,7 +432,7 @@ void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev)
* i.e. rnbd_clt_unmap_dev_store() leading to a sysfs warning because
* of sysfs link already was removed already.
*/
- if (strlen(dev->blk_symlink_name) && try_module_get(THIS_MODULE)) {
+ if (dev->blk_symlink_name && try_module_get(THIS_MODULE)) {
sysfs_remove_link(rnbd_devs_kobj, dev->blk_symlink_name);
kfree(dev->blk_symlink_name);
module_put(THIS_MODULE);
@@ -521,7 +521,8 @@ static int rnbd_clt_add_dev_symlink(struct rnbd_clt_dev *dev)
return 0;
out_err:
- dev->blk_symlink_name[0] = '\0';
+ kfree(dev->blk_symlink_name);
+ dev->blk_symlink_name = NULL ;
return ret;
}
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index a199b190c73d..45a470076652 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -88,6 +88,8 @@ static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
dev->secure_discard = le16_to_cpu(rsp->secure_discard);
dev->rotational = rsp->rotational;
+ dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK);
+ dev->fua = !!(rsp->cache_policy & RNBD_FUA);
dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
dev->max_segments = BMAX_SEGMENTS;
@@ -347,32 +349,48 @@ static struct rnbd_iu *rnbd_get_iu(struct rnbd_clt_session *sess,
struct rnbd_iu *iu;
struct rtrs_permit *permit;
+ iu = kzalloc(sizeof(*iu), GFP_KERNEL);
+ if (!iu) {
+ return NULL;
+ }
+
permit = rnbd_get_permit(sess, con_type,
wait ? RTRS_PERMIT_WAIT :
RTRS_PERMIT_NOWAIT);
- if (unlikely(!permit))
+ if (unlikely(!permit)) {
+ kfree(iu);
return NULL;
- iu = rtrs_permit_to_pdu(permit);
+ }
+
iu->permit = permit;
/*
* 1st reference is dropped after finishing sending a "user" message,
* 2nd reference is dropped after confirmation with the response is
* returned.
* 1st and 2nd can happen in any order, so the rnbd_iu should be
- * released (rtrs_permit returned to ibbtrs) only leased after both
+ * released (rtrs_permit returned to rtrs) only after both
* are finished.
*/
atomic_set(&iu->refcount, 2);
init_waitqueue_head(&iu->comp.wait);
iu->comp.errno = INT_MAX;
+ if (sg_alloc_table(&iu->sgt, 1, GFP_KERNEL)) {
+ rnbd_put_permit(sess, permit);
+ kfree(iu);
+ return NULL;
+ }
+
return iu;
}
static void rnbd_put_iu(struct rnbd_clt_session *sess, struct rnbd_iu *iu)
{
- if (atomic_dec_and_test(&iu->refcount))
+ if (atomic_dec_and_test(&iu->refcount)) {
+ sg_free_table(&iu->sgt);
rnbd_put_permit(sess, iu->permit);
+ kfree(iu);
+ }
}
static void rnbd_softirq_done_fn(struct request *rq)
@@ -382,6 +400,7 @@ static void rnbd_softirq_done_fn(struct request *rq)
struct rnbd_iu *iu;
iu = blk_mq_rq_to_pdu(rq);
+ sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT);
rnbd_put_permit(sess, iu->permit);
blk_mq_end_request(rq, errno_to_blk_status(iu->errno));
}
@@ -475,8 +494,6 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait)
iu->buf = NULL;
iu->dev = dev;
- sg_mark_end(&iu->sglist[0]);
-
msg.hdr.type = cpu_to_le16(RNBD_MSG_CLOSE);
msg.device_id = cpu_to_le32(device_id);
@@ -562,7 +579,7 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
iu->buf = rsp;
iu->dev = dev;
- sg_init_one(iu->sglist, rsp, sizeof(*rsp));
+ sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_OPEN);
msg.access_mode = dev->access_mode;
@@ -570,7 +587,7 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait)
WARN_ON(!rnbd_clt_get_dev(dev));
err = send_usr_msg(sess->rtrs, READ, iu,
- &vec, sizeof(*rsp), iu->sglist, 1,
+ &vec, sizeof(*rsp), iu->sgt.sgl, 1,
msg_open_conf, &errno, wait);
if (err) {
rnbd_clt_put_dev(dev);
@@ -607,8 +624,7 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
iu->buf = rsp;
iu->sess = sess;
-
- sg_init_one(iu->sglist, rsp, sizeof(*rsp));
+ sg_init_one(iu->sgt.sgl, rsp, sizeof(*rsp));
msg.hdr.type = cpu_to_le16(RNBD_MSG_SESS_INFO);
msg.ver = RNBD_PROTO_VER_MAJOR;
@@ -624,7 +640,7 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait)
goto put_iu;
}
err = send_usr_msg(sess->rtrs, READ, iu,
- &vec, sizeof(*rsp), iu->sglist, 1,
+ &vec, sizeof(*rsp), iu->sgt.sgl, 1,
msg_sess_info_conf, &errno, wait);
if (err) {
rnbd_clt_put_sess(sess);
@@ -634,7 +650,6 @@ put_iu:
} else {
err = errno;
}
-
rnbd_put_iu(sess, iu);
return err;
}
@@ -803,7 +818,7 @@ static struct rnbd_clt_session *alloc_sess(const char *sessname)
rnbd_init_cpu_qlists(sess->cpu_queues);
/*
- * That is simple percpu variable which stores cpu indeces, which are
+ * That is simple percpu variable which stores cpu indices, which are
* incremented on each access. We need that for the sake of fairness
* to wake up queues in a round-robin manner.
*/
@@ -1014,11 +1029,10 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
* See queue limits.
*/
if (req_op(rq) != REQ_OP_DISCARD)
- sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sglist);
+ sg_cnt = blk_rq_map_sg(dev->queue, rq, iu->sgt.sgl);
if (sg_cnt == 0)
- /* Do not forget to mark the end */
- sg_mark_end(&iu->sglist[0]);
+ sg_mark_end(&iu->sgt.sgl[0]);
msg.hdr.type = cpu_to_le16(RNBD_MSG_IO);
msg.device_id = cpu_to_le32(dev->device_id);
@@ -1027,13 +1041,13 @@ static int rnbd_client_xfer_request(struct rnbd_clt_dev *dev,
.iov_base = &msg,
.iov_len = sizeof(msg)
};
- size = rnbd_clt_get_sg_size(iu->sglist, sg_cnt);
+ size = rnbd_clt_get_sg_size(iu->sgt.sgl, sg_cnt);
req_ops = (struct rtrs_clt_req_ops) {
.priv = iu,
.conf_fn = msg_io_conf,
};
err = rtrs_clt_request(rq_data_dir(rq), &req_ops, rtrs, permit,
- &vec, 1, size, iu->sglist, sg_cnt);
+ &vec, 1, size, iu->sgt.sgl, sg_cnt);
if (unlikely(err)) {
rnbd_clt_err_rl(dev, "RTRS failed to transfer IO, err: %d\n",
err);
@@ -1120,6 +1134,7 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
struct rnbd_clt_dev *dev = rq->rq_disk->private_data;
struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
int err;
+ blk_status_t ret = BLK_STS_IOERR;
if (unlikely(dev->dev_state != DEV_STATE_MAPPED))
return BLK_STS_IOERR;
@@ -1131,32 +1146,35 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_RESOURCE;
}
+ iu->sgt.sgl = iu->first_sgl;
+ err = sg_alloc_table_chained(&iu->sgt,
+ /* Even-if the request has no segment,
+ * sglist must have one entry at least */
+ blk_rq_nr_phys_segments(rq) ? : 1,
+ iu->sgt.sgl,
+ RNBD_INLINE_SG_CNT);
+ if (err) {
+ rnbd_clt_err_rl(dev, "sg_alloc_table_chained ret=%d\n", err);
+ rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
+ rnbd_put_permit(dev->sess, iu->permit);
+ return BLK_STS_RESOURCE;
+ }
+
blk_mq_start_request(rq);
err = rnbd_client_xfer_request(dev, rq, iu);
if (likely(err == 0))
return BLK_STS_OK;
if (unlikely(err == -EAGAIN || err == -ENOMEM)) {
rnbd_clt_dev_kick_mq_queue(dev, hctx, 10/*ms*/);
- rnbd_put_permit(dev->sess, iu->permit);
- return BLK_STS_RESOURCE;
+ ret = BLK_STS_RESOURCE;
}
-
+ sg_free_table_chained(&iu->sgt, RNBD_INLINE_SG_CNT);
rnbd_put_permit(dev->sess, iu->permit);
- return BLK_STS_IOERR;
-}
-
-static int rnbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- struct rnbd_iu *iu = blk_mq_rq_to_pdu(rq);
-
- sg_init_table(iu->sglist, BMAX_SEGMENTS);
- return 0;
+ return ret;
}
static struct blk_mq_ops rnbd_mq_ops = {
.queue_rq = rnbd_queue_rq,
- .init_request = rnbd_init_request,
.complete = rnbd_softirq_done_fn,
};
@@ -1170,7 +1188,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess)
tag_set->numa_node = NUMA_NO_NODE;
tag_set->flags = BLK_MQ_F_SHOULD_MERGE |
BLK_MQ_F_TAG_QUEUE_SHARED;
- tag_set->cmd_size = sizeof(struct rnbd_iu);
+ tag_set->cmd_size = sizeof(struct rnbd_iu) + RNBD_RDMA_SGL_SIZE;
tag_set->nr_hw_queues = num_online_cpus();
return blk_mq_alloc_tag_set(tag_set);
@@ -1208,7 +1226,7 @@ find_and_get_or_create_sess(const char *sessname,
*/
sess->rtrs = rtrs_clt_open(&rtrs_ops, sessname,
paths, path_cnt, port_nr,
- sizeof(struct rnbd_iu),
+ 0, /* Do not use pdu of rtrs */
RECONNECT_DELAY, BMAX_SEGMENTS,
BLK_MAX_SEGMENT_SIZE,
MAX_RECONNECTS);
@@ -1305,7 +1323,7 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
blk_queue_max_segments(dev->queue, dev->max_segments);
blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
- blk_queue_write_cache(dev->queue, true, true);
+ blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
dev->queue->queuedata = dev;
}
@@ -1388,12 +1406,11 @@ static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
goto out_queues;
}
- dev->pathname = kzalloc(strlen(pathname) + 1, GFP_KERNEL);
+ dev->pathname = kstrdup(pathname, GFP_KERNEL);
if (!dev->pathname) {
ret = -ENOMEM;
goto out_queues;
}
- strlcpy(dev->pathname, pathname, strlen(pathname) + 1);
dev->clt_device_id = ret;
dev->sess = sess;
@@ -1529,13 +1546,13 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
}
rnbd_clt_info(dev,
- "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d)\n",
+ "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n",
dev->gd->disk_name, dev->nsectors,
dev->logical_block_size, dev->physical_block_size,
dev->max_write_same_sectors, dev->max_discard_sectors,
dev->discard_granularity, dev->discard_alignment,
dev->secure_discard, dev->max_segments,
- dev->max_hw_sectors, dev->rotational);
+ dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua);
mutex_unlock(&dev->lock);
@@ -1667,7 +1684,7 @@ static void rnbd_destroy_sessions(void)
/*
* Here at this point there is no any concurrent access to sessions
* list and devices list:
- * 1. New session or device can'be be created - session sysfs files
+ * 1. New session or device can't be created - session sysfs files
* are removed.
* 2. Device or session can't be removed - module reference is taken
* into account in unmap device sysfs callback.
@@ -1680,7 +1697,8 @@ static void rnbd_destroy_sessions(void)
*/
list_for_each_entry_safe(sess, sn, &sess_list, list) {
- WARN_ON(!rnbd_clt_get_sess(sess));
+ if (!rnbd_clt_get_sess(sess))
+ continue;
close_rtrs(sess);
list_for_each_entry_safe(dev, tn, &sess->devs_list, list) {
/*
diff --git a/drivers/block/rnbd/rnbd-clt.h b/drivers/block/rnbd/rnbd-clt.h
index b193d5904050..537d499dad3b 100644
--- a/drivers/block/rnbd/rnbd-clt.h
+++ b/drivers/block/rnbd/rnbd-clt.h
@@ -44,6 +44,13 @@ struct rnbd_iu_comp {
int errno;
};
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+#define RNBD_INLINE_SG_CNT 0
+#else
+#define RNBD_INLINE_SG_CNT 2
+#endif
+#define RNBD_RDMA_SGL_SIZE (sizeof(struct scatterlist) * RNBD_INLINE_SG_CNT)
+
struct rnbd_iu {
union {
struct request *rq; /* for block io */
@@ -56,11 +63,12 @@ struct rnbd_iu {
/* use to send msg associated with a sess */
struct rnbd_clt_session *sess;
};
- struct scatterlist sglist[BMAX_SEGMENTS];
+ struct sg_table sgt;
struct work_struct work;
int errno;
struct rnbd_iu_comp comp;
atomic_t refcount;
+ struct scatterlist first_sgl[]; /* must be the last one */
};
struct rnbd_cpu_qlist {
@@ -112,6 +120,8 @@ struct rnbd_clt_dev {
enum rnbd_access_mode access_mode;
bool read_only;
bool rotational;
+ bool wc;
+ bool fua;
u32 max_hw_sectors;
u32 max_write_same_sectors;
u32 max_discard_sectors;
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index ca166241452c..c1bc5c0fef71 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -108,6 +108,11 @@ struct rnbd_msg_close {
__le32 device_id;
};
+enum rnbd_cache_policy {
+ RNBD_FUA = 1 << 0,
+ RNBD_WRITEBACK = 1 << 1,
+};
+
/**
* struct rnbd_msg_open_rsp - response message to RNBD_MSG_OPEN
* @hdr: message header
@@ -124,6 +129,7 @@ struct rnbd_msg_close {
* @max_segments: max segments hardware support in one transfer
* @secure_discard: supports secure discard
* @rotation: is a rotational disc?
+ * @cache_policy: support write-back caching or FUA?
*/
struct rnbd_msg_open_rsp {
struct rnbd_msg_hdr hdr;
@@ -139,7 +145,8 @@ struct rnbd_msg_open_rsp {
__le16 max_segments;
__le16 secure_discard;
u8 rotational;
- u8 reserved[11];
+ u8 cache_policy;
+ u8 reserved[10];
};
/**
diff --git a/drivers/block/rnbd/rnbd-srv.c b/drivers/block/rnbd/rnbd-srv.c
index d1ee72ed8384..a6a68d44f517 100644
--- a/drivers/block/rnbd/rnbd-srv.c
+++ b/drivers/block/rnbd/rnbd-srv.c
@@ -338,9 +338,12 @@ static int rnbd_srv_link_ev(struct rtrs_srv *rtrs,
void rnbd_srv_sess_dev_force_close(struct rnbd_srv_sess_dev *sess_dev)
{
- rnbd_srv_destroy_dev_session_sysfs(sess_dev);
- sess_dev->keep_id = true;
+ struct rnbd_srv_session *sess = sess_dev->sess;
+ sess_dev->keep_id = true;
+ mutex_lock(&sess->lock);
+ rnbd_srv_destroy_dev_session_sysfs(sess_dev);
+ mutex_unlock(&sess->lock);
}
static int process_msg_close(struct rtrs_srv *rtrs,
@@ -549,6 +552,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
struct rnbd_srv_sess_dev *sess_dev)
{
struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev;
+ struct request_queue *q = bdev_get_queue(rnbd_dev->bdev);
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
rsp->device_id =
@@ -573,8 +577,12 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
rsp->secure_discard =
cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
- rsp->rotational =
- !blk_queue_nonrot(bdev_get_queue(rnbd_dev->bdev));
+ rsp->rotational = !blk_queue_nonrot(q);
+ rsp->cache_policy = 0;
+ if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
+ rsp->cache_policy |= RNBD_WRITEBACK;
+ if (blk_queue_fua(q))
+ rsp->cache_policy |= RNBD_FUA;
}
static struct rnbd_srv_sess_dev *
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 188e0b47534b..5265975b3fba 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -2462,6 +2462,7 @@ static void blkback_changed(struct xenbus_device *dev,
break;
if (talk_to_blkback(dev, info))
break;
+ break;
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateReconfiguring:
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 92ecf1a78ec7..a27d751cf219 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -853,8 +853,12 @@ static int sysc_ioremap(struct sysc *ddata)
*/
static int sysc_map_and_check_registers(struct sysc *ddata)
{
+ struct device_node *np = ddata->dev->of_node;
int error;
+ if (!of_get_property(np, "reg", NULL))
+ return 0;
+
error = sysc_parse_and_check_child_range(ddata);
if (error)
return error;
@@ -1222,10 +1226,10 @@ static int __maybe_unused sysc_runtime_suspend(struct device *dev)
ddata->enabled = false;
err_allow_idle:
- reset_control_assert(ddata->rsts);
-
sysc_clkdm_allow_idle(ddata);
+ reset_control_assert(ddata->rsts);
+
return error;
}
@@ -1379,6 +1383,8 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_CLKDM_NOAUTO),
SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
+ SYSC_QUIRK_GPMC_DEBUG),
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
SYSC_QUIRK_OPT_CLKS_NEEDED),
SYSC_QUIRK("hdq1w", 0, 0, 0x14, 0x18, 0x00000006, 0xffffffff,
@@ -1814,6 +1820,14 @@ static void sysc_init_module_quirks(struct sysc *ddata)
return;
}
+#ifdef CONFIG_OMAP_GPMC_DEBUG
+ if (ddata->cfg.quirks & SYSC_QUIRK_GPMC_DEBUG) {
+ ddata->cfg.quirks |= SYSC_QUIRK_NO_RESET_ON_INIT;
+
+ return;
+ }
+#endif
+
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_I2C) {
ddata->pre_reset_quirk = sysc_pre_reset_quirk_i2c;
ddata->post_reset_quirk = sysc_post_reset_quirk_i2c;
@@ -1945,6 +1959,7 @@ static int sysc_reset(struct sysc *ddata)
*/
static int sysc_init_module(struct sysc *ddata)
{
+ bool rstctrl_deasserted = false;
int error = 0;
error = sysc_clockdomain_init(ddata);
@@ -1969,6 +1984,7 @@ static int sysc_init_module(struct sysc *ddata)
error = reset_control_deassert(ddata->rsts);
if (error)
goto err_main_clocks;
+ rstctrl_deasserted = true;
}
ddata->revision = sysc_read_revision(ddata);
@@ -1978,13 +1994,13 @@ static int sysc_init_module(struct sysc *ddata)
if (ddata->legacy_mode) {
error = sysc_legacy_init(ddata);
if (error)
- goto err_reset;
+ goto err_main_clocks;
}
if (!ddata->legacy_mode) {
error = sysc_enable_module(ddata->dev);
if (error)
- goto err_reset;
+ goto err_main_clocks;
}
error = sysc_reset(ddata);
@@ -1994,10 +2010,6 @@ static int sysc_init_module(struct sysc *ddata)
if (error && !ddata->legacy_mode)
sysc_disable_module(ddata->dev);
-err_reset:
- if (error && !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
- reset_control_assert(ddata->rsts);
-
err_main_clocks:
if (error)
sysc_disable_main_clocks(ddata);
@@ -2008,6 +2020,10 @@ err_opt_clocks:
sysc_clkdm_allow_idle(ddata);
}
+ if (error && rstctrl_deasserted &&
+ !(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
+ reset_control_assert(ddata->rsts);
+
return error;
}
@@ -2909,6 +2925,9 @@ static int sysc_probe(struct platform_device *pdev)
if (!ddata)
return -ENOMEM;
+ ddata->offsets[SYSC_REVISION] = -ENODEV;
+ ddata->offsets[SYSC_SYSCONFIG] = -ENODEV;
+ ddata->offsets[SYSC_SYSSTATUS] = -ENODEV;
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
@@ -2975,9 +2994,6 @@ static int sysc_probe(struct platform_device *pdev)
}
/* Balance use counts as PM runtime should have enabled these all */
- if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
- reset_control_assert(ddata->rsts);
-
if (!(ddata->cfg.quirks &
(SYSC_QUIRK_NO_IDLE | SYSC_QUIRK_NO_IDLE_ON_INIT))) {
sysc_disable_main_clocks(ddata);
@@ -2985,6 +3001,9 @@ static int sysc_probe(struct platform_device *pdev)
sysc_clkdm_allow_idle(ddata);
}
+ if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT))
+ reset_control_assert(ddata->rsts);
+
sysc_show_registers(ddata);
ddata->dev->type = &sysc_device_type;
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index cb2497d157f6..90ed8c789e48 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -1,7 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
-agpgart-y := backend.o frontend.o generic.o isoch.o
+agpgart-y := backend.o generic.o isoch.o
+ifeq ($(CONFIG_DRM_LEGACY),y)
agpgart-$(CONFIG_COMPAT) += compat_ioctl.o
+agpgart-y += frontend.o
+endif
+
obj-$(CONFIG_AGP) += agpgart.o
obj-$(CONFIG_AGP_ALI) += ali-agp.o
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 4eb1c772ded7..bb09d64cd51e 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -186,8 +186,13 @@ int agp_add_bridge(struct agp_bridge_data *bridge);
void agp_remove_bridge(struct agp_bridge_data *bridge);
/* Frontend routines. */
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
int agp_frontend_initialize(void);
void agp_frontend_cleanup(void);
+#else
+static inline int agp_frontend_initialize(void) { return 0; }
+static inline void agp_frontend_cleanup(void) {}
+#endif
/* Generic routines. */
void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 17c1df8c909a..1fe006f3f12f 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -528,15 +528,15 @@ endif # HW_RANDOM
config UML_RANDOM
depends on UML
- tristate "Hardware random number generator"
+ select HW_RANDOM
+ tristate "UML Random Number Generator support"
help
This option enables UML's "hardware" random number generator. It
attaches itself to the host's /dev/random, supplying as much entropy
as the host has, rather than the small amount the UML gets from its
- own drivers. It registers itself as a standard hardware random number
- generator, major 10, minor 183, and the canonical device name is
- /dev/hwrng.
- The way to make use of this is to install the rng-tools package
- (check your distro, or download from
- http://sourceforge.net/projects/gkernel/). rngd periodically reads
- /dev/hwrng and injects the entropy into /dev/random.
+ own drivers. It registers itself as a rng-core driver thus providing
+ a device which is usually called /dev/hwrng. This hardware random
+ number generator does feed into the kernel's random number generator
+ entropy pool.
+
+ If unsure, say Y.
diff --git a/drivers/char/ps3flash.c b/drivers/char/ps3flash.c
index 1a07fee33f66..23871cde41fb 100644
--- a/drivers/char/ps3flash.c
+++ b/drivers/char/ps3flash.c
@@ -403,7 +403,7 @@ fail:
return error;
}
-static int ps3flash_remove(struct ps3_system_bus_device *_dev)
+static void ps3flash_remove(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
@@ -413,7 +413,6 @@ static int ps3flash_remove(struct ps3_system_bus_device *_dev)
kfree(ps3_system_bus_get_drvdata(&dev->sbd));
ps3_system_bus_set_drvdata(&dev->sbd, NULL);
ps3flash_dev = NULL;
- return 0;
}
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index c715d4681a0b..85856cff506c 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -188,6 +188,14 @@ config COMMON_CLK_CS2000_CP
help
If you say yes here you get support for the CS2000 clock multiplier.
+config COMMON_CLK_FSL_FLEXSPI
+ tristate "Clock driver for FlexSPI on Layerscape SoCs"
+ depends on ARCH_LAYERSCAPE || COMPILE_TEST
+ default ARCH_LAYERSCAPE && SPI_NXP_FLEXSPI
+ help
+ On Layerscape SoCs there is a special clock for the FlexSPI
+ interface.
+
config COMMON_CLK_FSL_SAI
bool "Clock driver for BCLK of Freescale SAI cores"
depends on ARCH_LAYERSCAPE || COMPILE_TEST
@@ -246,7 +254,8 @@ config COMMON_CLK_AXI_CLKGEN
config CLK_QORIQ
bool "Clock driver for Freescale QorIQ platforms"
- depends on (PPC_E500MC || ARM || ARM64 || COMPILE_TEST) && OF
+ depends on OF
+ depends on PPC_E500MC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
help
This adds the clock driver support for Freescale QorIQ platforms
using common clock framework.
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index da8fcf147eb1..dbdc590e7de3 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o
obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o
obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o
obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o
+obj-$(CONFIG_COMMON_CLK_FSL_FLEXSPI) += clk-fsl-flexspi.o
obj-$(CONFIG_COMMON_CLK_FSL_SAI) += clk-fsl-sai.o
obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o
obj-$(CONFIG_COMMON_CLK_ASPEED) += clk-aspeed.o
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
index 2c3d8e6ca63c..0fad1009f315 100644
--- a/drivers/clk/at91/at91rm9200.c
+++ b/drivers/clk/at91/at91rm9200.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(rm9200_mck_lock);
+
struct sck {
char *n;
char *p;
@@ -137,9 +139,20 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "pllack";
parent_names[3] = "pllbck";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91rm9200_master_layout,
- &rm9200_mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91rm9200_master_layout,
+ &rm9200_mck_characteristics,
+ &rm9200_mck_lock, CLK_SET_RATE_GATE,
+ INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91rm9200_master_layout,
+ &rm9200_mck_characteristics,
+ &rm9200_mck_lock, CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -181,7 +194,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91rm9200_periphck); i++) {
hw = at91_clk_register_peripheral(regmap,
at91rm9200_periphck[i].n,
- "masterck",
+ "masterck_div",
at91rm9200_periphck[i].id);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index bb81ff731ad8..ceb5495f723a 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -32,6 +32,8 @@ struct at91sam926x_data {
bool has_slck;
};
+static DEFINE_SPINLOCK(at91sam9260_mck_lock);
+
static const struct clk_master_characteristics sam9260_mck_characteristics = {
.output = { .min = 0, .max = 105000000 },
.divisors = { 1, 2, 4, 0 },
@@ -218,8 +220,8 @@ static const struct sck at91sam9261_systemck[] = {
{ .n = "pck1", .p = "prog1", .id = 9 },
{ .n = "pck2", .p = "prog2", .id = 10 },
{ .n = "pck3", .p = "prog3", .id = 11 },
- { .n = "hclk0", .p = "masterck", .id = 16 },
- { .n = "hclk1", .p = "masterck", .id = 17 },
+ { .n = "hclk0", .p = "masterck_div", .id = 16 },
+ { .n = "hclk1", .p = "masterck_div", .id = 17 },
};
static const struct pck at91sam9261_periphck[] = {
@@ -413,9 +415,21 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
parent_names[1] = "mainck";
parent_names[2] = "pllack";
parent_names[3] = "pllbck";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91rm9200_master_layout,
- data->mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91rm9200_master_layout,
+ data->mck_characteristics,
+ &at91sam9260_mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91rm9200_master_layout,
+ data->mck_characteristics,
+ &at91sam9260_mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -457,7 +471,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
for (i = 0; i < data->num_pck; i++) {
hw = at91_clk_register_peripheral(regmap,
data->pck[i].n,
- "masterck",
+ "masterck_div",
data->pck[i].id);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
index cb4a406ed15d..0214333dedd3 100644
--- a/drivers/clk/at91/at91sam9g45.c
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(at91sam9g45_mck_lock);
+
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 0, .max = 133333333 },
.divisors = { 1, 2, 4, 3 },
@@ -40,10 +42,10 @@ static const struct {
char *p;
u8 id;
} at91sam9g45_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
- { .n = "uhpck", .p = "usbck", .id = 6 },
- { .n = "pck0", .p = "prog0", .id = 8 },
- { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
};
struct pck {
@@ -148,9 +150,21 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91rm9200_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91rm9200_master_layout,
+ &mck_characteristics,
+ &at91sam9g45_mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91rm9200_master_layout,
+ &mck_characteristics,
+ &at91sam9g45_mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -166,7 +180,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
for (i = 0; i < 2; i++) {
char name[6];
@@ -195,7 +209,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91sam9g45_periphck); i++) {
hw = at91_clk_register_peripheral(regmap,
at91sam9g45_periphck[i].n,
- "masterck",
+ "masterck_div",
at91sam9g45_periphck[i].id);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c
index 93f7eb216122..f9db5316a7f1 100644
--- a/drivers/clk/at91/at91sam9n12.c
+++ b/drivers/clk/at91/at91sam9n12.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(at91sam9n12_mck_lock);
+
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 0, .max = 133333333 },
.divisors = { 1, 2, 4, 3 },
@@ -54,12 +56,12 @@ static const struct {
char *p;
u8 id;
} at91sam9n12_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
- { .n = "lcdck", .p = "masterck", .id = 3 },
- { .n = "uhpck", .p = "usbck", .id = 6 },
- { .n = "udpck", .p = "usbck", .id = 7 },
- { .n = "pck0", .p = "prog0", .id = 8 },
- { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ { .n = "lcdck", .p = "masterck_div", .id = 3 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
};
static const struct clk_pcr_layout at91sam9n12_pcr_layout = {
@@ -175,9 +177,21 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "pllbck";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91sam9x5_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics,
+ &at91sam9n12_mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91sam9x5_master_layout,
+ &mck_characteristics,
+ &at91sam9n12_mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -191,7 +205,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "pllbck";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
for (i = 0; i < 2; i++) {
char name[6];
@@ -221,7 +235,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&at91sam9n12_pcr_layout,
at91sam9n12_periphck[i].n,
- "masterck",
+ "masterck_div",
at91sam9n12_periphck[i].id,
&range, INT_MIN);
if (IS_ERR(hw))
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index a343eb69bb35..66736e03cfef 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(sam9rl_mck_lock);
+
static const struct clk_master_characteristics sam9rl_mck_characteristics = {
.output = { .min = 0, .max = 94000000 },
.divisors = { 1, 2, 4, 0 },
@@ -117,9 +119,20 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "pllack";
parent_names[3] = "utmick";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91rm9200_master_layout,
- &sam9rl_mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91rm9200_master_layout,
+ &sam9rl_mck_characteristics,
+ &sam9rl_mck_lock, CLK_SET_RATE_GATE,
+ INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91rm9200_master_layout,
+ &sam9rl_mck_characteristics,
+ &sam9rl_mck_lock, CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -129,7 +142,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "pllack";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
for (i = 0; i < 2; i++) {
char name[6];
@@ -158,7 +171,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
for (i = 0; i < ARRAY_SIZE(at91sam9rl_periphck); i++) {
hw = at91_clk_register_peripheral(regmap,
at91sam9rl_periphck[i].n,
- "masterck",
+ "masterck_div",
at91sam9rl_periphck[i].id);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 22b9aad9efb8..79b9d3667228 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(mck_lock);
+
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 0, .max = 133333333 },
.divisors = { 1, 2, 4, 3 },
@@ -41,7 +43,7 @@ static const struct {
char *p;
u8 id;
} at91sam9x5_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
{ .n = "smdck", .p = "smdclk", .id = 4 },
{ .n = "uhpck", .p = "usbck", .id = 6 },
{ .n = "udpck", .p = "usbck", .id = 7 },
@@ -196,9 +198,19 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91sam9x5_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -218,7 +230,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
for (i = 0; i < 2; i++) {
char name[6];
@@ -245,7 +257,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
}
if (has_lcdck) {
- hw = at91_clk_register_system(regmap, "lcdck", "masterck", 3);
+ hw = at91_clk_register_system(regmap, "lcdck", "masterck_div", 3);
if (IS_ERR(hw))
goto err_free;
@@ -256,7 +268,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&at91sam9x5_pcr_layout,
at91sam9x5_periphck[i].n,
- "masterck",
+ "masterck_div",
at91sam9x5_periphck[i].id,
&range, INT_MIN);
if (IS_ERR(hw))
@@ -269,7 +281,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&at91sam9x5_pcr_layout,
extra_pcks[i].n,
- "masterck",
+ "masterck_div",
extra_pcks[i].id,
&range, INT_MIN);
if (IS_ERR(hw))
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index bd0d8a69a2cf..a80427980bf7 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -15,7 +15,7 @@
#define MASTER_PRES_MASK 0x7
#define MASTER_PRES_MAX MASTER_PRES_MASK
#define MASTER_DIV_SHIFT 8
-#define MASTER_DIV_MASK 0x3
+#define MASTER_DIV_MASK 0x7
#define PMC_MCR 0x30
#define PMC_MCR_ID_MSK GENMASK(3, 0)
@@ -58,83 +58,309 @@ static inline bool clk_master_ready(struct clk_master *master)
static int clk_master_prepare(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+
+ spin_lock_irqsave(master->lock, flags);
while (!clk_master_ready(master))
cpu_relax();
+ spin_unlock_irqrestore(master->lock, flags);
+
return 0;
}
static int clk_master_is_prepared(struct clk_hw *hw)
{
struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+ bool status;
- return clk_master_ready(master);
+ spin_lock_irqsave(master->lock, flags);
+ status = clk_master_ready(master);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ return status;
}
-static unsigned long clk_master_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+static unsigned long clk_master_div_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
{
- u8 pres;
u8 div;
- unsigned long rate = parent_rate;
+ unsigned long flags, rate = parent_rate;
struct clk_master *master = to_clk_master(hw);
const struct clk_master_layout *layout = master->layout;
const struct clk_master_characteristics *characteristics =
master->characteristics;
unsigned int mckr;
+ spin_lock_irqsave(master->lock, flags);
regmap_read(master->regmap, master->layout->offset, &mckr);
+ spin_unlock_irqrestore(master->lock, flags);
+
mckr &= layout->mask;
- pres = (mckr >> layout->pres_shift) & MASTER_PRES_MASK;
div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
- if (characteristics->have_div3_pres && pres == MASTER_PRES_MAX)
- rate /= 3;
- else
- rate >>= pres;
-
rate /= characteristics->divisors[div];
if (rate < characteristics->output.min)
- pr_warn("master clk is underclocked");
+ pr_warn("master clk div is underclocked");
else if (rate > characteristics->output.max)
- pr_warn("master clk is overclocked");
+ pr_warn("master clk div is overclocked");
return rate;
}
-static u8 clk_master_get_parent(struct clk_hw *hw)
+static const struct clk_ops master_div_ops = {
+ .prepare = clk_master_prepare,
+ .is_prepared = clk_master_is_prepared,
+ .recalc_rate = clk_master_div_recalc_rate,
+};
+
+static int clk_master_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_master *master = to_clk_master(hw);
+ const struct clk_master_characteristics *characteristics =
+ master->characteristics;
+ unsigned long flags;
+ int div, i;
+
+ div = DIV_ROUND_CLOSEST(parent_rate, rate);
+ if (div > ARRAY_SIZE(characteristics->divisors))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) {
+ if (!characteristics->divisors[i])
+ break;
+
+ if (div == characteristics->divisors[i]) {
+ div = i;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(characteristics->divisors))
+ return -EINVAL;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_update_bits(master->regmap, master->layout->offset,
+ (MASTER_DIV_MASK << MASTER_DIV_SHIFT),
+ (div << MASTER_DIV_SHIFT));
+ while (!clk_master_ready(master))
+ cpu_relax();
+ spin_unlock_irqrestore(master->lock, flags);
+
+ return 0;
+}
+
+static int clk_master_div_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct clk_master *master = to_clk_master(hw);
+ const struct clk_master_characteristics *characteristics =
+ master->characteristics;
+ struct clk_hw *parent;
+ unsigned long parent_rate, tmp_rate, best_rate = 0;
+ int i, best_diff = INT_MIN, tmp_diff;
+
+ parent = clk_hw_get_parent(hw);
+ if (!parent)
+ return -EINVAL;
+
+ parent_rate = clk_hw_get_rate(parent);
+ if (!parent_rate)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) {
+ if (!characteristics->divisors[i])
+ break;
+
+ tmp_rate = DIV_ROUND_CLOSEST_ULL(parent_rate,
+ characteristics->divisors[i]);
+ tmp_diff = abs(tmp_rate - req->rate);
+
+ if (!best_rate || best_diff > tmp_diff) {
+ best_diff = tmp_diff;
+ best_rate = tmp_rate;
+ }
+
+ if (!best_diff)
+ break;
+ }
+
+ req->best_parent_rate = best_rate;
+ req->best_parent_hw = parent;
+ req->rate = best_rate;
+
+ return 0;
+}
+
+static const struct clk_ops master_div_ops_chg = {
+ .prepare = clk_master_prepare,
+ .is_prepared = clk_master_is_prepared,
+ .recalc_rate = clk_master_div_recalc_rate,
+ .determine_rate = clk_master_div_determine_rate,
+ .set_rate = clk_master_div_set_rate,
+};
+
+static void clk_sama7g5_master_best_diff(struct clk_rate_request *req,
+ struct clk_hw *parent,
+ unsigned long parent_rate,
+ long *best_rate,
+ long *best_diff,
+ u32 div)
+{
+ unsigned long tmp_rate, tmp_diff;
+
+ if (div == MASTER_PRES_MAX)
+ tmp_rate = parent_rate / 3;
+ else
+ tmp_rate = parent_rate >> div;
+
+ tmp_diff = abs(req->rate - tmp_rate);
+
+ if (*best_diff < 0 || *best_diff >= tmp_diff) {
+ *best_rate = tmp_rate;
+ *best_diff = tmp_diff;
+ req->best_parent_rate = parent_rate;
+ req->best_parent_hw = parent;
+ }
+}
+
+static int clk_master_pres_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
struct clk_master *master = to_clk_master(hw);
+ struct clk_rate_request req_parent = *req;
+ const struct clk_master_characteristics *characteristics =
+ master->characteristics;
+ struct clk_hw *parent;
+ long best_rate = LONG_MIN, best_diff = LONG_MIN;
+ u32 pres;
+ int i;
+
+ if (master->chg_pid < 0)
+ return -EOPNOTSUPP;
+
+ parent = clk_hw_get_parent_by_index(hw, master->chg_pid);
+ if (!parent)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i <= MASTER_PRES_MAX; i++) {
+ if (characteristics->have_div3_pres && i == MASTER_PRES_MAX)
+ pres = 3;
+ else
+ pres = 1 << i;
+
+ req_parent.rate = req->rate * pres;
+ if (__clk_determine_rate(parent, &req_parent))
+ continue;
+
+ clk_sama7g5_master_best_diff(req, parent, req_parent.rate,
+ &best_diff, &best_rate, pres);
+ if (!best_diff)
+ break;
+ }
+
+ return 0;
+}
+
+static int clk_master_pres_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+ unsigned int pres;
+
+ pres = DIV_ROUND_CLOSEST(parent_rate, rate);
+ if (pres > MASTER_PRES_MAX)
+ return -EINVAL;
+
+ else if (pres == 3)
+ pres = MASTER_PRES_MAX;
+ else
+ pres = ffs(pres) - 1;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_update_bits(master->regmap, master->layout->offset,
+ (MASTER_PRES_MASK << master->layout->pres_shift),
+ (pres << master->layout->pres_shift));
+
+ while (!clk_master_ready(master))
+ cpu_relax();
+ spin_unlock_irqrestore(master->lock, flags);
+
+ return 0;
+}
+
+static unsigned long clk_master_pres_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_master *master = to_clk_master(hw);
+ const struct clk_master_characteristics *characteristics =
+ master->characteristics;
+ unsigned long flags;
+ unsigned int val, pres;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_read(master->regmap, master->layout->offset, &val);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK;
+ if (pres == 3 && characteristics->have_div3_pres)
+ pres = 3;
+ else
+ pres = (1 << pres);
+
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, pres);
+}
+
+static u8 clk_master_pres_get_parent(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
unsigned int mckr;
+ spin_lock_irqsave(master->lock, flags);
regmap_read(master->regmap, master->layout->offset, &mckr);
+ spin_unlock_irqrestore(master->lock, flags);
return mckr & AT91_PMC_CSS;
}
-static const struct clk_ops master_ops = {
+static const struct clk_ops master_pres_ops = {
.prepare = clk_master_prepare,
.is_prepared = clk_master_is_prepared,
- .recalc_rate = clk_master_recalc_rate,
- .get_parent = clk_master_get_parent,
+ .recalc_rate = clk_master_pres_recalc_rate,
+ .get_parent = clk_master_pres_get_parent,
};
-struct clk_hw * __init
-at91_clk_register_master(struct regmap *regmap,
+static const struct clk_ops master_pres_ops_chg = {
+ .prepare = clk_master_prepare,
+ .is_prepared = clk_master_is_prepared,
+ .determine_rate = clk_master_pres_determine_rate,
+ .recalc_rate = clk_master_pres_recalc_rate,
+ .get_parent = clk_master_pres_get_parent,
+ .set_rate = clk_master_pres_set_rate,
+};
+
+static struct clk_hw * __init
+at91_clk_register_master_internal(struct regmap *regmap,
const char *name, int num_parents,
const char **parent_names,
const struct clk_master_layout *layout,
- const struct clk_master_characteristics *characteristics)
+ const struct clk_master_characteristics *characteristics,
+ const struct clk_ops *ops, spinlock_t *lock, u32 flags,
+ int chg_pid)
{
struct clk_master *master;
struct clk_init_data init;
struct clk_hw *hw;
int ret;
- if (!name || !num_parents || !parent_names)
+ if (!name || !num_parents || !parent_names || !lock)
return ERR_PTR(-EINVAL);
master = kzalloc(sizeof(*master), GFP_KERNEL);
@@ -142,15 +368,17 @@ at91_clk_register_master(struct regmap *regmap,
return ERR_PTR(-ENOMEM);
init.name = name;
- init.ops = &master_ops;
+ init.ops = ops;
init.parent_names = parent_names;
init.num_parents = num_parents;
- init.flags = 0;
+ init.flags = flags;
master->hw.init = &init;
master->layout = layout;
master->characteristics = characteristics;
master->regmap = regmap;
+ master->chg_pid = chg_pid;
+ master->lock = lock;
hw = &master->hw;
ret = clk_hw_register(NULL, &master->hw);
@@ -162,37 +390,54 @@ at91_clk_register_master(struct regmap *regmap,
return hw;
}
-static unsigned long
-clk_sama7g5_master_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
+struct clk_hw * __init
+at91_clk_register_master_pres(struct regmap *regmap,
+ const char *name, int num_parents,
+ const char **parent_names,
+ const struct clk_master_layout *layout,
+ const struct clk_master_characteristics *characteristics,
+ spinlock_t *lock, u32 flags, int chg_pid)
{
- struct clk_master *master = to_clk_master(hw);
+ const struct clk_ops *ops;
- return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div));
+ if (flags & CLK_SET_RATE_GATE)
+ ops = &master_pres_ops;
+ else
+ ops = &master_pres_ops_chg;
+
+ return at91_clk_register_master_internal(regmap, name, num_parents,
+ parent_names, layout,
+ characteristics, ops,
+ lock, flags, chg_pid);
}
-static void clk_sama7g5_master_best_diff(struct clk_rate_request *req,
- struct clk_hw *parent,
- unsigned long parent_rate,
- long *best_rate,
- long *best_diff,
- u32 div)
+struct clk_hw * __init
+at91_clk_register_master_div(struct regmap *regmap,
+ const char *name, const char *parent_name,
+ const struct clk_master_layout *layout,
+ const struct clk_master_characteristics *characteristics,
+ spinlock_t *lock, u32 flags)
{
- unsigned long tmp_rate, tmp_diff;
+ const struct clk_ops *ops;
- if (div == MASTER_PRES_MAX)
- tmp_rate = parent_rate / 3;
+ if (flags & CLK_SET_RATE_GATE)
+ ops = &master_div_ops;
else
- tmp_rate = parent_rate >> div;
+ ops = &master_div_ops_chg;
- tmp_diff = abs(req->rate - tmp_rate);
+ return at91_clk_register_master_internal(regmap, name, 1,
+ &parent_name, layout,
+ characteristics, ops,
+ lock, flags, -EINVAL);
+}
- if (*best_diff < 0 || *best_diff >= tmp_diff) {
- *best_rate = tmp_rate;
- *best_diff = tmp_diff;
- req->best_parent_rate = parent_rate;
- req->best_parent_hw = parent;
- }
+static unsigned long
+clk_sama7g5_master_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div));
}
static int clk_sama7g5_master_determine_rate(struct clk_hw *hw,
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index 78f458a7b2ef..34e3ab13741a 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -229,6 +229,57 @@ static int sam9x60_frac_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return sam9x60_frac_pll_compute_mul_frac(core, rate, parent_rate, true);
}
+static int sam9x60_frac_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_frac *frac = to_sam9x60_frac(core);
+ struct regmap *regmap = core->regmap;
+ unsigned long irqflags;
+ unsigned int val, cfrac, cmul;
+ long ret;
+
+ ret = sam9x60_frac_pll_compute_mul_frac(core, rate, parent_rate, true);
+ if (ret <= 0)
+ return ret;
+
+ spin_lock_irqsave(core->lock, irqflags);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val);
+ cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift;
+ cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift;
+
+ if (cmul == frac->mul && cfrac == frac->frac)
+ goto unlock;
+
+ regmap_write(regmap, AT91_PMC_PLL_CTRL1,
+ (frac->mul << core->layout->mul_shift) |
+ (frac->frac << core->layout->frac_shift));
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
+ AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL,
+ AT91_PMC_PLL_CTRL0_ENLOCK |
+ AT91_PMC_PLL_CTRL0_ENPLL);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
+
+ while (!sam9x60_pll_ready(regmap, core->id))
+ cpu_relax();
+
+unlock:
+ spin_unlock_irqrestore(core->lock, irqflags);
+
+ return ret;
+}
+
static const struct clk_ops sam9x60_frac_pll_ops = {
.prepare = sam9x60_frac_pll_prepare,
.unprepare = sam9x60_frac_pll_unprepare,
@@ -238,6 +289,15 @@ static const struct clk_ops sam9x60_frac_pll_ops = {
.set_rate = sam9x60_frac_pll_set_rate,
};
+static const struct clk_ops sam9x60_frac_pll_ops_chg = {
+ .prepare = sam9x60_frac_pll_prepare,
+ .unprepare = sam9x60_frac_pll_unprepare,
+ .is_prepared = sam9x60_frac_pll_is_prepared,
+ .recalc_rate = sam9x60_frac_pll_recalc_rate,
+ .round_rate = sam9x60_frac_pll_round_rate,
+ .set_rate = sam9x60_frac_pll_set_rate_chg,
+};
+
static int sam9x60_div_pll_prepare(struct clk_hw *hw)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
@@ -384,6 +444,44 @@ static int sam9x60_div_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int sam9x60_div_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_div *div = to_sam9x60_div(core);
+ struct regmap *regmap = core->regmap;
+ unsigned long irqflags;
+ unsigned int val, cdiv;
+
+ div->div = DIV_ROUND_CLOSEST(parent_rate, rate) - 1;
+
+ spin_lock_irqsave(core->lock, irqflags);
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core->id);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
+ cdiv = (val & core->layout->div_mask) >> core->layout->div_shift;
+
+ /* Stop if nothing changed. */
+ if (cdiv == div->div)
+ goto unlock;
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
+ core->layout->div_mask,
+ (div->div << core->layout->div_shift));
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
+
+ while (!sam9x60_pll_ready(regmap, core->id))
+ cpu_relax();
+
+unlock:
+ spin_unlock_irqrestore(core->lock, irqflags);
+
+ return 0;
+}
+
static const struct clk_ops sam9x60_div_pll_ops = {
.prepare = sam9x60_div_pll_prepare,
.unprepare = sam9x60_div_pll_unprepare,
@@ -393,17 +491,26 @@ static const struct clk_ops sam9x60_div_pll_ops = {
.set_rate = sam9x60_div_pll_set_rate,
};
+static const struct clk_ops sam9x60_div_pll_ops_chg = {
+ .prepare = sam9x60_div_pll_prepare,
+ .unprepare = sam9x60_div_pll_unprepare,
+ .is_prepared = sam9x60_div_pll_is_prepared,
+ .recalc_rate = sam9x60_div_pll_recalc_rate,
+ .round_rate = sam9x60_div_pll_round_rate,
+ .set_rate = sam9x60_div_pll_set_rate_chg,
+};
+
struct clk_hw * __init
sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name,
struct clk_hw *parent_hw, u8 id,
const struct clk_pll_characteristics *characteristics,
- const struct clk_pll_layout *layout, bool critical)
+ const struct clk_pll_layout *layout, u32 flags)
{
struct sam9x60_frac *frac;
struct clk_hw *hw;
struct clk_init_data init;
- unsigned long parent_rate, flags;
+ unsigned long parent_rate, irqflags;
unsigned int val;
int ret;
@@ -417,10 +524,12 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
init.name = name;
init.parent_names = &parent_name;
init.num_parents = 1;
- init.ops = &sam9x60_frac_pll_ops;
- init.flags = CLK_SET_RATE_GATE;
- if (critical)
- init.flags |= CLK_IS_CRITICAL;
+ if (flags & CLK_SET_RATE_GATE)
+ init.ops = &sam9x60_frac_pll_ops;
+ else
+ init.ops = &sam9x60_frac_pll_ops_chg;
+
+ init.flags = flags;
frac->core.id = id;
frac->core.hw.init = &init;
@@ -429,7 +538,7 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
frac->core.regmap = regmap;
frac->core.lock = lock;
- spin_lock_irqsave(frac->core.lock, flags);
+ spin_lock_irqsave(frac->core.lock, irqflags);
if (sam9x60_pll_ready(regmap, id)) {
regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
AT91_PMC_PLL_UPDT_ID_MSK, id);
@@ -457,7 +566,7 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
goto free;
}
}
- spin_unlock_irqrestore(frac->core.lock, flags);
+ spin_unlock_irqrestore(frac->core.lock, irqflags);
hw = &frac->core.hw;
ret = clk_hw_register(NULL, hw);
@@ -469,7 +578,7 @@ sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
return hw;
free:
- spin_unlock_irqrestore(frac->core.lock, flags);
+ spin_unlock_irqrestore(frac->core.lock, irqflags);
kfree(frac);
return hw;
}
@@ -478,12 +587,12 @@ struct clk_hw * __init
sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name, u8 id,
const struct clk_pll_characteristics *characteristics,
- const struct clk_pll_layout *layout, bool critical)
+ const struct clk_pll_layout *layout, u32 flags)
{
struct sam9x60_div *div;
struct clk_hw *hw;
struct clk_init_data init;
- unsigned long flags;
+ unsigned long irqflags;
unsigned int val;
int ret;
@@ -497,11 +606,11 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
init.name = name;
init.parent_names = &parent_name;
init.num_parents = 1;
- init.ops = &sam9x60_div_pll_ops;
- init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
- CLK_SET_RATE_PARENT;
- if (critical)
- init.flags |= CLK_IS_CRITICAL;
+ if (flags & CLK_SET_RATE_GATE)
+ init.ops = &sam9x60_div_pll_ops;
+ else
+ init.ops = &sam9x60_div_pll_ops_chg;
+ init.flags = flags;
div->core.id = id;
div->core.hw.init = &init;
@@ -510,14 +619,14 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
div->core.regmap = regmap;
div->core.lock = lock;
- spin_lock_irqsave(div->core.lock, flags);
+ spin_lock_irqsave(div->core.lock, irqflags);
regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
AT91_PMC_PLL_UPDT_ID_MSK, id);
regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
div->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, val);
- spin_unlock_irqrestore(div->core.lock, flags);
+ spin_unlock_irqrestore(div->core.lock, irqflags);
hw = &div->core.hw;
ret = clk_hw_register(NULL, hw);
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index a50084de97d4..a97b99c2dc12 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -24,6 +24,8 @@
#define GCK_INDEX_DT_AUDIO_PLL 5
+static DEFINE_SPINLOCK(mck_lock);
+
#ifdef CONFIG_HAVE_AT91_AUDIO_PLL
static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np)
{
@@ -388,9 +390,16 @@ of_at91_clk_master_setup(struct device_node *np,
if (IS_ERR(regmap))
return;
- hw = at91_clk_register_master(regmap, name, num_parents,
- parent_names, layout,
- characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", num_parents,
+ parent_names, layout,
+ characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto out_free_characteristics;
+
+ hw = at91_clk_register_master_div(regmap, name, "masterck_pres",
+ layout, characteristics,
+ &mck_lock, CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto out_free_characteristics;
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index 7b86affc6d7c..a49076c804a9 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -48,7 +48,7 @@ extern const struct clk_master_layout at91sam9x5_master_layout;
struct clk_master_characteristics {
struct clk_range output;
- u32 divisors[4];
+ u32 divisors[5];
u8 have_div3_pres;
};
@@ -155,10 +155,18 @@ at91_clk_register_sam9x5_main(struct regmap *regmap, const char *name,
const char **parent_names, int num_parents);
struct clk_hw * __init
-at91_clk_register_master(struct regmap *regmap, const char *name,
- int num_parents, const char **parent_names,
- const struct clk_master_layout *layout,
- const struct clk_master_characteristics *characteristics);
+at91_clk_register_master_pres(struct regmap *regmap, const char *name,
+ int num_parents, const char **parent_names,
+ const struct clk_master_layout *layout,
+ const struct clk_master_characteristics *characteristics,
+ spinlock_t *lock, u32 flags, int chg_pid);
+
+struct clk_hw * __init
+at91_clk_register_master_div(struct regmap *regmap, const char *name,
+ const char *parent_names,
+ const struct clk_master_layout *layout,
+ const struct clk_master_characteristics *characteristics,
+ spinlock_t *lock, u32 flags);
struct clk_hw * __init
at91_clk_sama7g5_register_master(struct regmap *regmap,
@@ -190,14 +198,14 @@ struct clk_hw * __init
sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name, u8 id,
const struct clk_pll_characteristics *characteristics,
- const struct clk_pll_layout *layout, bool critical);
+ const struct clk_pll_layout *layout, u32 flags);
struct clk_hw * __init
sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name,
struct clk_hw *parent_hw, u8 id,
const struct clk_pll_characteristics *characteristics,
- const struct clk_pll_layout *layout, bool critical);
+ const struct clk_pll_layout *layout, u32 flags);
struct clk_hw * __init
at91_clk_register_programmable(struct regmap *regmap, const char *name,
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index 3c4c95603595..5f6fa89571b7 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -8,6 +8,7 @@
#include "pmc.h"
static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(mck_lock);
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 140000000, .max = 200000000 },
@@ -76,11 +77,11 @@ static const struct {
char *p;
u8 id;
} sam9x60_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
{ .n = "uhpck", .p = "usbck", .id = 6 },
{ .n = "pck0", .p = "prog0", .id = 8 },
{ .n = "pck1", .p = "prog1", .id = 9 },
- { .n = "qspick", .p = "masterck", .id = 19 },
+ { .n = "qspick", .p = "masterck_div", .id = 19 },
};
static const struct {
@@ -174,7 +175,6 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
struct regmap *regmap;
struct clk_hw *hw;
int i;
- bool bypass;
i = of_property_match_string(np, "clock-names", "td_slck");
if (i < 0)
@@ -209,10 +209,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
if (IS_ERR(hw))
goto err_free;
- bypass = of_property_read_bool(np, "atmel,osc-bypass");
-
- hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name,
- bypass);
+ hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, 0);
if (IS_ERR(hw))
goto err_free;
main_osc_hw = hw;
@@ -228,13 +225,24 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
hw = sam9x60_clk_register_frac_pll(regmap, &pmc_pll_lock, "pllack_fracck",
"mainck", sam9x60_pmc->chws[PMC_MAIN],
0, &plla_characteristics,
- &pll_frac_layout, true);
+ &pll_frac_layout,
+ /*
+ * This feeds pllack_divck which
+ * feeds CPU. It should not be
+ * disabled.
+ */
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
hw = sam9x60_clk_register_div_pll(regmap, &pmc_pll_lock, "pllack_divck",
"pllack_fracck", 0, &plla_characteristics,
- &pll_div_layout, true);
+ &pll_div_layout,
+ /*
+ * This feeds CPU. It should not
+ * be disabled.
+ */
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -243,13 +251,16 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
hw = sam9x60_clk_register_frac_pll(regmap, &pmc_pll_lock, "upllck_fracck",
"main_osc", main_osc_hw, 1,
&upll_characteristics,
- &pll_frac_layout, false);
+ &pll_frac_layout, CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
hw = sam9x60_clk_register_div_pll(regmap, &pmc_pll_lock, "upllck_divck",
"upllck_fracck", 1, &upll_characteristics,
- &pll_div_layout, false);
+ &pll_div_layout,
+ CLK_SET_RATE_GATE |
+ CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT);
if (IS_ERR(hw))
goto err_free;
@@ -258,9 +269,17 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
parent_names[0] = md_slck_name;
parent_names[1] = "mainck";
parent_names[2] = "pllack_divck";
- hw = at91_clk_register_master(regmap, "masterck", 3, parent_names,
- &sam9x60_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 3,
+ parent_names, &sam9x60_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres", &sam9x60_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -276,7 +295,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
parent_names[0] = md_slck_name;
parent_names[1] = td_slck_name;
parent_names[2] = "mainck";
- parent_names[3] = "masterck";
+ parent_names[3] = "masterck_div";
parent_names[4] = "pllack_divck";
parent_names[5] = "upllck_divck";
for (i = 0; i < 2; i++) {
@@ -308,7 +327,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sam9x60_pcr_layout,
sam9x60_periphck[i].n,
- "masterck",
+ "masterck_div",
sam9x60_periphck[i].id,
&range, INT_MIN);
if (IS_ERR(hw))
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index 8b220762941a..9a5cbc7cd55a 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(mck_lock);
+
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 124000000, .max = 166000000 },
.divisors = { 1, 2, 4, 3 },
@@ -40,14 +42,14 @@ static const struct {
char *p;
u8 id;
} sama5d2_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
- { .n = "lcdck", .p = "masterck", .id = 3 },
- { .n = "uhpck", .p = "usbck", .id = 6 },
- { .n = "udpck", .p = "usbck", .id = 7 },
- { .n = "pck0", .p = "prog0", .id = 8 },
- { .n = "pck1", .p = "prog1", .id = 9 },
- { .n = "pck2", .p = "prog2", .id = 10 },
- { .n = "iscck", .p = "masterck", .id = 18 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ { .n = "lcdck", .p = "masterck_div", .id = 3 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
+ { .n = "iscck", .p = "masterck_div", .id = 18 },
};
static const struct {
@@ -235,15 +237,25 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91sam9x5_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
sama5d2_pmc->chws[PMC_MCK] = hw;
- hw = at91_clk_register_h32mx(regmap, "h32mxck", "masterck");
+ hw = at91_clk_register_h32mx(regmap, "h32mxck", "masterck_div");
if (IS_ERR(hw))
goto err_free;
@@ -259,7 +271,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
parent_names[5] = "audiopll_pmcck";
for (i = 0; i < 3; i++) {
char name[6];
@@ -290,7 +302,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sama5d2_pcr_layout,
sama5d2_periphck[i].n,
- "masterck",
+ "masterck_div",
sama5d2_periphck[i].id,
&range, INT_MIN);
if (IS_ERR(hw))
@@ -317,7 +329,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
parent_names[5] = "audiopll_pmcck";
for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) {
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c
index 7c6e0a5b9dc8..87009ee8effc 100644
--- a/drivers/clk/at91/sama5d3.c
+++ b/drivers/clk/at91/sama5d3.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(mck_lock);
+
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 0, .max = 166000000 },
.divisors = { 1, 2, 4, 3 },
@@ -40,14 +42,14 @@ static const struct {
char *p;
u8 id;
} sama5d3_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
- { .n = "lcdck", .p = "masterck", .id = 3 },
- { .n = "smdck", .p = "smdclk", .id = 4 },
- { .n = "uhpck", .p = "usbck", .id = 6 },
- { .n = "udpck", .p = "usbck", .id = 7 },
- { .n = "pck0", .p = "prog0", .id = 8 },
- { .n = "pck1", .p = "prog1", .id = 9 },
- { .n = "pck2", .p = "prog2", .id = 10 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ { .n = "lcdck", .p = "masterck_div", .id = 3 },
+ { .n = "smdck", .p = "smdclk", .id = 4 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
};
static const struct {
@@ -170,9 +172,19 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91sam9x5_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
@@ -192,7 +204,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
for (i = 0; i < 3; i++) {
char name[6];
@@ -222,7 +234,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sama5d3_pcr_layout,
sama5d3_periphck[i].n,
- "masterck",
+ "masterck_div",
sama5d3_periphck[i].id,
&sama5d3_periphck[i].r,
INT_MIN);
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index 92d8d4141b43..57fff790188b 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -7,6 +7,8 @@
#include "pmc.h"
+static DEFINE_SPINLOCK(mck_lock);
+
static const struct clk_master_characteristics mck_characteristics = {
.output = { .min = 125000000, .max = 200000000 },
.divisors = { 1, 2, 4, 3 },
@@ -39,14 +41,14 @@ static const struct {
char *p;
u8 id;
} sama5d4_systemck[] = {
- { .n = "ddrck", .p = "masterck", .id = 2 },
- { .n = "lcdck", .p = "masterck", .id = 3 },
- { .n = "smdck", .p = "smdclk", .id = 4 },
- { .n = "uhpck", .p = "usbck", .id = 6 },
- { .n = "udpck", .p = "usbck", .id = 7 },
- { .n = "pck0", .p = "prog0", .id = 8 },
- { .n = "pck1", .p = "prog1", .id = 9 },
- { .n = "pck2", .p = "prog2", .id = 10 },
+ { .n = "ddrck", .p = "masterck_div", .id = 2 },
+ { .n = "lcdck", .p = "masterck_div", .id = 3 },
+ { .n = "smdck", .p = "smdclk", .id = 4 },
+ { .n = "uhpck", .p = "usbck", .id = 6 },
+ { .n = "udpck", .p = "usbck", .id = 7 },
+ { .n = "pck0", .p = "prog0", .id = 8 },
+ { .n = "pck1", .p = "prog1", .id = 9 },
+ { .n = "pck2", .p = "prog2", .id = 10 },
};
static const struct {
@@ -185,15 +187,25 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- hw = at91_clk_register_master(regmap, "masterck", 4, parent_names,
- &at91sam9x5_master_layout,
- &mck_characteristics);
+ hw = at91_clk_register_master_pres(regmap, "masterck_pres", 4,
+ parent_names,
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE, INT_MIN);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ hw = at91_clk_register_master_div(regmap, "masterck_div",
+ "masterck_pres",
+ &at91sam9x5_master_layout,
+ &mck_characteristics, &mck_lock,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
goto err_free;
sama5d4_pmc->chws[PMC_MCK] = hw;
- hw = at91_clk_register_h32mx(regmap, "h32mxck", "masterck");
+ hw = at91_clk_register_h32mx(regmap, "h32mxck", "masterck_div");
if (IS_ERR(hw))
goto err_free;
@@ -215,7 +227,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
parent_names[1] = "mainck";
parent_names[2] = "plladivck";
parent_names[3] = "utmick";
- parent_names[4] = "masterck";
+ parent_names[4] = "masterck_div";
for (i = 0; i < 3; i++) {
char name[6];
@@ -245,7 +257,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock,
&sama5d4_pcr_layout,
sama5d4_periphck[i].n,
- "masterck",
+ "masterck_div",
sama5d4_periphck[i].id,
&range, INT_MIN);
if (IS_ERR(hw))
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index 0db2ab3eca14..a6e20b35960e 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -32,6 +32,7 @@
} while (0)
static DEFINE_SPINLOCK(pmc_pll_lock);
+static DEFINE_SPINLOCK(pmc_mck0_lock);
static DEFINE_SPINLOCK(pmc_mckX_lock);
/**
@@ -89,118 +90,198 @@ static const struct clk_pll_layout pll_layout_divio = {
.endiv_shift = 30,
};
+/*
+ * CPU PLL output range.
+ * Notice: The upper limit has been setup to 1000000002 due to hardware
+ * block which cannot output exactly 1GHz.
+ */
+static const struct clk_range cpu_pll_outputs[] = {
+ { .min = 2343750, .max = 1000000002 },
+};
+
+/* PLL output range. */
+static const struct clk_range pll_outputs[] = {
+ { .min = 2343750, .max = 1200000000 },
+};
+
+/* CPU PLL characteristics. */
+static const struct clk_pll_characteristics cpu_pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(cpu_pll_outputs),
+ .output = cpu_pll_outputs,
+};
+
+/* PLL characteristics. */
+static const struct clk_pll_characteristics pll_characteristics = {
+ .input = { .min = 12000000, .max = 50000000 },
+ .num_output = ARRAY_SIZE(pll_outputs),
+ .output = pll_outputs,
+};
+
/**
* PLL clocks description
* @n: clock name
* @p: clock parent
* @l: clock layout
+ * @c: clock characteristics
* @t: clock type
- * @f: true if clock is critical and cannot be disabled
+ * @f: clock flags
* @eid: export index in sama7g5->chws[] array
*/
static const struct {
const char *n;
const char *p;
const struct clk_pll_layout *l;
+ const struct clk_pll_characteristics *c;
+ unsigned long f;
u8 t;
- u8 c;
u8 eid;
} sama7g5_plls[][PLL_ID_MAX] = {
[PLL_ID_CPU] = {
{ .n = "cpupll_fracck",
.p = "mainck",
.l = &pll_layout_frac,
+ .c = &cpu_pll_characteristics,
.t = PLL_TYPE_FRAC,
- .c = 1, },
+ /*
+ * This feeds cpupll_divpmcck which feeds CPU. It should
+ * not be disabled.
+ */
+ .f = CLK_IS_CRITICAL, },
{ .n = "cpupll_divpmcck",
.p = "cpupll_fracck",
.l = &pll_layout_divpmc,
+ .c = &cpu_pll_characteristics,
.t = PLL_TYPE_DIV,
- .c = 1, },
+ /* This feeds CPU. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .eid = PMC_CPUPLL, },
},
[PLL_ID_SYS] = {
{ .n = "syspll_fracck",
.p = "mainck",
.l = &pll_layout_frac,
+ .c = &pll_characteristics,
.t = PLL_TYPE_FRAC,
- .c = 1, },
+ /*
+ * This feeds syspll_divpmcck which may feed critial parts
+ * of the systems like timers. Therefore it should not be
+ * disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE, },
{ .n = "syspll_divpmcck",
.p = "syspll_fracck",
.l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
.t = PLL_TYPE_DIV,
- .c = 1, },
+ /*
+ * This may feed critial parts of the systems like timers.
+ * Therefore it should not be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE,
+ .eid = PMC_SYSPLL, },
},
[PLL_ID_DDR] = {
{ .n = "ddrpll_fracck",
.p = "mainck",
.l = &pll_layout_frac,
+ .c = &pll_characteristics,
.t = PLL_TYPE_FRAC,
- .c = 1, },
+ /*
+ * This feeds ddrpll_divpmcck which feeds DDR. It should not
+ * be disabled.
+ */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE, },
{ .n = "ddrpll_divpmcck",
.p = "ddrpll_fracck",
.l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
.t = PLL_TYPE_DIV,
- .c = 1, },
+ /* This feeds DDR. It should not be disabled. */
+ .f = CLK_IS_CRITICAL | CLK_SET_RATE_GATE, },
},
[PLL_ID_IMG] = {
{ .n = "imgpll_fracck",
.p = "mainck",
.l = &pll_layout_frac,
- .t = PLL_TYPE_FRAC, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE, },
{ .n = "imgpll_divpmcck",
.p = "imgpll_fracck",
.l = &pll_layout_divpmc,
- .t = PLL_TYPE_DIV, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT, },
},
[PLL_ID_BAUD] = {
{ .n = "baudpll_fracck",
.p = "mainck",
.l = &pll_layout_frac,
- .t = PLL_TYPE_FRAC, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE, },
{ .n = "baudpll_divpmcck",
.p = "baudpll_fracck",
.l = &pll_layout_divpmc,
- .t = PLL_TYPE_DIV, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT, },
},
[PLL_ID_AUDIO] = {
{ .n = "audiopll_fracck",
.p = "main_xtal",
.l = &pll_layout_frac,
- .t = PLL_TYPE_FRAC, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE, },
{ .n = "audiopll_divpmcck",
.p = "audiopll_fracck",
.l = &pll_layout_divpmc,
+ .c = &pll_characteristics,
.t = PLL_TYPE_DIV,
- .eid = PMC_I2S0_MUX, },
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOPMCPLL, },
{ .n = "audiopll_diviock",
.p = "audiopll_fracck",
.l = &pll_layout_divio,
+ .c = &pll_characteristics,
.t = PLL_TYPE_DIV,
- .eid = PMC_I2S1_MUX, },
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT,
+ .eid = PMC_AUDIOIOPLL, },
},
[PLL_ID_ETH] = {
{ .n = "ethpll_fracck",
.p = "main_xtal",
.l = &pll_layout_frac,
- .t = PLL_TYPE_FRAC, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_FRAC,
+ .f = CLK_SET_RATE_GATE, },
{ .n = "ethpll_divpmcck",
.p = "ethpll_fracck",
.l = &pll_layout_divpmc,
- .t = PLL_TYPE_DIV, },
+ .c = &pll_characteristics,
+ .t = PLL_TYPE_DIV,
+ .f = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
+ CLK_SET_RATE_PARENT, },
},
};
@@ -245,7 +326,7 @@ static const struct {
.ep = { "syspll_divpmcck", "ddrpll_divpmcck", "imgpll_divpmcck", },
.ep_mux_table = { 5, 6, 7, },
.ep_count = 3,
- .ep_chg_id = 6, },
+ .ep_chg_id = 5, },
{ .n = "mck4",
.id = 4,
@@ -278,7 +359,7 @@ static const struct {
};
/* Mux table for programmable clocks. */
-static u32 sama7g5_prog_mux_table[] = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, };
+static u32 sama7g5_prog_mux_table[] = { 0, 1, 2, 5, 6, 7, 8, 9, 10, };
/**
* Peripheral clock description
@@ -401,7 +482,7 @@ static const struct {
.pp = { "audiopll_divpmcck", },
.pp_mux_table = { 9, },
.pp_count = 1,
- .pp_chg_id = 4, },
+ .pp_chg_id = 3, },
{ .n = "csi_gclk",
.id = 33,
@@ -513,7 +594,7 @@ static const struct {
.pp = { "ethpll_divpmcck", },
.pp_mux_table = { 10, },
.pp_count = 1,
- .pp_chg_id = 4, },
+ .pp_chg_id = 3, },
{ .n = "gmac1_gclk",
.id = 52,
@@ -545,7 +626,7 @@ static const struct {
.pp = { "syspll_divpmcck", "audiopll_divpmcck", },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "i2smcc1_gclk",
.id = 58,
@@ -553,7 +634,7 @@ static const struct {
.pp = { "syspll_divpmcck", "audiopll_divpmcck", },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "mcan0_gclk",
.id = 61,
@@ -695,7 +776,7 @@ static const struct {
.pp = { "syspll_divpmcck", "baudpll_divpmcck", },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "sdmmc1_gclk",
.id = 81,
@@ -703,7 +784,7 @@ static const struct {
.pp = { "syspll_divpmcck", "baudpll_divpmcck", },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "sdmmc2_gclk",
.id = 82,
@@ -711,7 +792,7 @@ static const struct {
.pp = { "syspll_divpmcck", "baudpll_divpmcck", },
.pp_mux_table = { 5, 8, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "spdifrx_gclk",
.id = 84,
@@ -719,7 +800,7 @@ static const struct {
.pp = { "syspll_divpmcck", "audiopll_divpmcck", },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "spdiftx_gclk",
.id = 85,
@@ -727,7 +808,7 @@ static const struct {
.pp = { "syspll_divpmcck", "audiopll_divpmcck", },
.pp_mux_table = { 5, 9, },
.pp_count = 2,
- .pp_chg_id = 5, },
+ .pp_chg_id = 4, },
{ .n = "tcb0_ch0_gclk",
.id = 88,
@@ -758,28 +839,16 @@ static const struct {
.pp_chg_id = INT_MIN, },
};
-/* PLL output range. */
-static const struct clk_range pll_outputs[] = {
- { .min = 2343750, .max = 1200000000 },
-};
-
-/* PLL characteristics. */
-static const struct clk_pll_characteristics pll_characteristics = {
- .input = { .min = 12000000, .max = 50000000 },
- .num_output = ARRAY_SIZE(pll_outputs),
- .output = pll_outputs,
-};
-
/* MCK0 characteristics. */
static const struct clk_master_characteristics mck0_characteristics = {
- .output = { .min = 140000000, .max = 200000000 },
- .divisors = { 1, 2, 4, 3 },
+ .output = { .min = 50000000, .max = 200000000 },
+ .divisors = { 1, 2, 4, 3, 5 },
.have_div3_pres = 1,
};
/* MCK0 layout. */
static const struct clk_master_layout mck0_layout = {
- .mask = 0x373,
+ .mask = 0x773,
.pres_shift = 4,
.offset = 0x28,
};
@@ -835,10 +904,10 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (IS_ERR(regmap))
return;
- sama7g5_pmc = pmc_data_allocate(PMC_I2S1_MUX + 1,
+ sama7g5_pmc = pmc_data_allocate(PMC_CPU + 1,
nck(sama7g5_systemck),
nck(sama7g5_periphck),
- nck(sama7g5_gck));
+ nck(sama7g5_gck), 8);
if (!sama7g5_pmc)
return;
@@ -886,18 +955,18 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
hw = sam9x60_clk_register_frac_pll(regmap,
&pmc_pll_lock, sama7g5_plls[i][j].n,
sama7g5_plls[i][j].p, parent_hw, i,
- &pll_characteristics,
+ sama7g5_plls[i][j].c,
sama7g5_plls[i][j].l,
- sama7g5_plls[i][j].c);
+ sama7g5_plls[i][j].f);
break;
case PLL_TYPE_DIV:
hw = sam9x60_clk_register_div_pll(regmap,
&pmc_pll_lock, sama7g5_plls[i][j].n,
sama7g5_plls[i][j].p, i,
- &pll_characteristics,
+ sama7g5_plls[i][j].c,
sama7g5_plls[i][j].l,
- sama7g5_plls[i][j].c);
+ sama7g5_plls[i][j].f);
break;
default:
@@ -912,12 +981,19 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
}
}
- parent_names[0] = md_slck_name;
- parent_names[1] = "mainck";
- parent_names[2] = "cpupll_divpmcck";
- parent_names[3] = "syspll_divpmcck";
- hw = at91_clk_register_master(regmap, "mck0", 4, parent_names,
- &mck0_layout, &mck0_characteristics);
+ parent_names[0] = "cpupll_divpmcck";
+ hw = at91_clk_register_master_pres(regmap, "cpuck", 1, parent_names,
+ &mck0_layout, &mck0_characteristics,
+ &pmc_mck0_lock,
+ CLK_SET_RATE_PARENT, 0);
+ if (IS_ERR(hw))
+ goto err_free;
+
+ sama7g5_pmc->chws[PMC_CPU] = hw;
+
+ hw = at91_clk_register_master_div(regmap, "mck0", "cpuck",
+ &mck0_layout, &mck0_characteristics,
+ &pmc_mck0_lock, 0);
if (IS_ERR(hw))
goto err_free;
@@ -926,9 +1002,8 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
parent_names[0] = md_slck_name;
parent_names[1] = td_slck_name;
parent_names[2] = "mainck";
- parent_names[3] = "mck0";
for (i = 0; i < ARRAY_SIZE(sama7g5_mckx); i++) {
- u8 num_parents = 4 + sama7g5_mckx[i].ep_count;
+ u8 num_parents = 3 + sama7g5_mckx[i].ep_count;
u32 *mux_table;
mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
@@ -936,10 +1011,10 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (!mux_table)
goto err_free;
- SAMA7G5_INIT_TABLE(mux_table, 4);
- SAMA7G5_FILL_TABLE(&mux_table[4], sama7g5_mckx[i].ep_mux_table,
+ SAMA7G5_INIT_TABLE(mux_table, 3);
+ SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_mckx[i].ep_mux_table,
sama7g5_mckx[i].ep_count);
- SAMA7G5_FILL_TABLE(&parent_names[4], sama7g5_mckx[i].ep,
+ SAMA7G5_FILL_TABLE(&parent_names[3], sama7g5_mckx[i].ep,
sama7g5_mckx[i].ep_count);
hw = at91_clk_sama7g5_register_master(regmap, sama7g5_mckx[i].n,
@@ -962,24 +1037,25 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
parent_names[0] = md_slck_name;
parent_names[1] = td_slck_name;
parent_names[2] = "mainck";
- parent_names[3] = "mck0";
- parent_names[4] = "syspll_divpmcck";
- parent_names[5] = "ddrpll_divpmcck";
- parent_names[6] = "imgpll_divpmcck";
- parent_names[7] = "baudpll_divpmcck";
- parent_names[8] = "audiopll_divpmcck";
- parent_names[9] = "ethpll_divpmcck";
+ parent_names[3] = "syspll_divpmcck";
+ parent_names[4] = "ddrpll_divpmcck";
+ parent_names[5] = "imgpll_divpmcck";
+ parent_names[6] = "baudpll_divpmcck";
+ parent_names[7] = "audiopll_divpmcck";
+ parent_names[8] = "ethpll_divpmcck";
for (i = 0; i < 8; i++) {
char name[6];
snprintf(name, sizeof(name), "prog%d", i);
hw = at91_clk_register_programmable(regmap, name, parent_names,
- 10, i,
+ 9, i,
&programmable_layout,
sama7g5_prog_mux_table);
if (IS_ERR(hw))
goto err_free;
+
+ sama7g5_pmc->pchws[i] = hw;
}
for (i = 0; i < ARRAY_SIZE(sama7g5_systemck); i++) {
@@ -1010,9 +1086,8 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
parent_names[0] = md_slck_name;
parent_names[1] = td_slck_name;
parent_names[2] = "mainck";
- parent_names[3] = "mck0";
for (i = 0; i < ARRAY_SIZE(sama7g5_gck); i++) {
- u8 num_parents = 4 + sama7g5_gck[i].pp_count;
+ u8 num_parents = 3 + sama7g5_gck[i].pp_count;
u32 *mux_table;
mux_table = kmalloc_array(num_parents, sizeof(*mux_table),
@@ -1020,10 +1095,10 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
if (!mux_table)
goto err_free;
- SAMA7G5_INIT_TABLE(mux_table, 4);
- SAMA7G5_FILL_TABLE(&mux_table[4], sama7g5_gck[i].pp_mux_table,
+ SAMA7G5_INIT_TABLE(mux_table, 3);
+ SAMA7G5_FILL_TABLE(&mux_table[3], sama7g5_gck[i].pp_mux_table,
sama7g5_gck[i].pp_count);
- SAMA7G5_FILL_TABLE(&parent_names[4], sama7g5_gck[i].pp,
+ SAMA7G5_FILL_TABLE(&parent_names[3], sama7g5_gck[i].pp,
sama7g5_gck[i].pp_count);
hw = at91_clk_register_generated(regmap, &pmc_pcr_lock,
@@ -1052,7 +1127,7 @@ err_free:
kfree(alloc_mem);
}
- pmc_data_free(sama7g5_pmc);
+ kfree(sama7g5_pmc);
}
/* Some clks are used for a clocksource */
diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c
index 8333e20dc9d2..e63a42618ac2 100644
--- a/drivers/clk/bcm/clk-bcm2711-dvp.c
+++ b/drivers/clk/bcm/clk-bcm2711-dvp.c
@@ -25,7 +25,6 @@ static const struct clk_parent_data clk_dvp_parent = {
static int clk_dvp_probe(struct platform_device *pdev)
{
struct clk_hw_onecell_data *data;
- struct resource *res;
struct clk_dvp *dvp;
void __iomem *base;
int ret;
@@ -42,7 +41,7 @@ static int clk_dvp_probe(struct platform_device *pdev)
return -ENOMEM;
data = dvp->data;
- base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -108,6 +107,7 @@ static const struct of_device_id clk_dvp_dt_ids[] = {
{ .compatible = "brcm,brcm2711-dvp", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, clk_dvp_dt_ids);
static struct platform_driver clk_dvp_driver = {
.probe = clk_dvp_probe,
diff --git a/drivers/clk/clk-axi-clkgen.c b/drivers/clk/clk-axi-clkgen.c
index 14d803e6af62..ad86e031ba3e 100644
--- a/drivers/clk/clk-axi-clkgen.c
+++ b/drivers/clk/clk-axi-clkgen.c
@@ -46,9 +46,17 @@
#define MMCM_CLK_DIV_DIVIDE BIT(11)
#define MMCM_CLK_DIV_NOCOUNT BIT(12)
+struct axi_clkgen_limits {
+ unsigned int fpfd_min;
+ unsigned int fpfd_max;
+ unsigned int fvco_min;
+ unsigned int fvco_max;
+};
+
struct axi_clkgen {
void __iomem *base;
struct clk_hw clk_hw;
+ struct axi_clkgen_limits limits;
};
static uint32_t axi_clkgen_lookup_filter(unsigned int m)
@@ -100,12 +108,15 @@ static uint32_t axi_clkgen_lookup_lock(unsigned int m)
return 0x1f1f00fa;
}
-static const unsigned int fpfd_min = 10000;
-static const unsigned int fpfd_max = 300000;
-static const unsigned int fvco_min = 600000;
-static const unsigned int fvco_max = 1200000;
+static const struct axi_clkgen_limits axi_clkgen_zynq_default_limits = {
+ .fpfd_min = 10000,
+ .fpfd_max = 300000,
+ .fvco_min = 600000,
+ .fvco_max = 1200000,
+};
-static void axi_clkgen_calc_params(unsigned long fin, unsigned long fout,
+static void axi_clkgen_calc_params(const struct axi_clkgen_limits *limits,
+ unsigned long fin, unsigned long fout,
unsigned int *best_d, unsigned int *best_m, unsigned int *best_dout)
{
unsigned long d, d_min, d_max, _d_min, _d_max;
@@ -122,12 +133,12 @@ static void axi_clkgen_calc_params(unsigned long fin, unsigned long fout,
*best_m = 0;
*best_dout = 0;
- d_min = max_t(unsigned long, DIV_ROUND_UP(fin, fpfd_max), 1);
- d_max = min_t(unsigned long, fin / fpfd_min, 80);
+ d_min = max_t(unsigned long, DIV_ROUND_UP(fin, limits->fpfd_max), 1);
+ d_max = min_t(unsigned long, fin / limits->fpfd_min, 80);
again:
- fvco_min_fract = fvco_min << fract_shift;
- fvco_max_fract = fvco_max << fract_shift;
+ fvco_min_fract = limits->fvco_min << fract_shift;
+ fvco_max_fract = limits->fvco_max << fract_shift;
m_min = max_t(unsigned long, DIV_ROUND_UP(fvco_min_fract, fin) * d_min, 1);
m_max = min_t(unsigned long, fvco_max_fract * d_max / fin, 64 << fract_shift);
@@ -319,6 +330,7 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
unsigned long rate, unsigned long parent_rate)
{
struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(clk_hw);
+ const struct axi_clkgen_limits *limits = &axi_clkgen->limits;
unsigned int d, m, dout;
struct axi_clkgen_div_params params;
uint32_t power = 0;
@@ -328,7 +340,7 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
if (parent_rate == 0 || rate == 0)
return -EINVAL;
- axi_clkgen_calc_params(parent_rate, rate, &d, &m, &dout);
+ axi_clkgen_calc_params(limits, parent_rate, rate, &d, &m, &dout);
if (d == 0 || dout == 0 || m == 0)
return -EINVAL;
@@ -368,10 +380,12 @@ static int axi_clkgen_set_rate(struct clk_hw *clk_hw,
static long axi_clkgen_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *parent_rate)
{
+ struct axi_clkgen *axi_clkgen = clk_hw_to_axi_clkgen(hw);
+ const struct axi_clkgen_limits *limits = &axi_clkgen->limits;
unsigned int d, m, dout;
unsigned long long tmp;
- axi_clkgen_calc_params(*parent_rate, rate, &d, &m, &dout);
+ axi_clkgen_calc_params(limits, *parent_rate, rate, &d, &m, &dout);
if (d == 0 || dout == 0 || m == 0)
return -EINVAL;
@@ -482,17 +496,9 @@ static const struct clk_ops axi_clkgen_ops = {
.get_parent = axi_clkgen_get_parent,
};
-static const struct of_device_id axi_clkgen_ids[] = {
- {
- .compatible = "adi,axi-clkgen-2.00.a",
- },
- { },
-};
-MODULE_DEVICE_TABLE(of, axi_clkgen_ids);
-
static int axi_clkgen_probe(struct platform_device *pdev)
{
- const struct of_device_id *id;
+ const struct axi_clkgen_limits *dflt_limits;
struct axi_clkgen *axi_clkgen;
struct clk_init_data init;
const char *parent_names[2];
@@ -501,11 +507,8 @@ static int axi_clkgen_probe(struct platform_device *pdev)
unsigned int i;
int ret;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
- id = of_match_node(axi_clkgen_ids, pdev->dev.of_node);
- if (!id)
+ dflt_limits = device_get_match_data(&pdev->dev);
+ if (!dflt_limits)
return -ENODEV;
axi_clkgen = devm_kzalloc(&pdev->dev, sizeof(*axi_clkgen), GFP_KERNEL);
@@ -527,6 +530,8 @@ static int axi_clkgen_probe(struct platform_device *pdev)
return -EINVAL;
}
+ memcpy(&axi_clkgen->limits, dflt_limits, sizeof(axi_clkgen->limits));
+
clk_name = pdev->dev.of_node->name;
of_property_read_string(pdev->dev.of_node, "clock-output-names",
&clk_name);
@@ -554,6 +559,15 @@ static int axi_clkgen_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id axi_clkgen_ids[] = {
+ {
+ .compatible = "adi,axi-clkgen-2.00.a",
+ .data = &axi_clkgen_zynq_default_limits,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, axi_clkgen_ids);
+
static struct platform_driver axi_clkgen_driver = {
.driver = {
.name = "adi-axi-clkgen",
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 2ddb54f7d3ab..0506046a5f4b 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/device.h>
#include <linux/err.h>
#include <linux/slab.h>
@@ -405,3 +406,52 @@ void clk_hw_unregister_composite(struct clk_hw *hw)
kfree(composite);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_composite);
+
+static void devm_clk_hw_release_composite(struct device *dev, void *res)
+{
+ clk_hw_unregister_composite(*(struct clk_hw **)res);
+}
+
+static struct clk_hw *__devm_clk_hw_register_composite(struct device *dev,
+ const char *name, const char * const *parent_names,
+ const struct clk_parent_data *pdata, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags)
+{
+ struct clk_hw **ptr, *hw;
+
+ ptr = devres_alloc(devm_clk_hw_release_composite, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ hw = __clk_hw_register_composite(dev, name, parent_names, pdata,
+ num_parents, mux_hw, mux_ops, rate_hw,
+ rate_ops, gate_hw, gate_ops, flags);
+
+ if (!IS_ERR(hw)) {
+ *ptr = hw;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return hw;
+}
+
+struct clk_hw *devm_clk_hw_register_composite_pdata(struct device *dev,
+ const char *name,
+ const struct clk_parent_data *parent_data,
+ int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags)
+{
+ return __devm_clk_hw_register_composite(dev, name, NULL, parent_data,
+ num_parents, mux_hw, mux_ops,
+ rate_hw, rate_ops, gate_hw,
+ gate_ops, flags);
+}
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 8de12cb0c43d..c499799693cc 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk-provider.h>
+#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -578,3 +579,36 @@ void clk_hw_unregister_divider(struct clk_hw *hw)
kfree(div);
}
EXPORT_SYMBOL_GPL(clk_hw_unregister_divider);
+
+static void devm_clk_hw_release_divider(struct device *dev, void *res)
+{
+ clk_hw_unregister_divider(*(struct clk_hw **)res);
+}
+
+struct clk_hw *__devm_clk_hw_register_divider(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock)
+{
+ struct clk_hw **ptr, *hw;
+
+ ptr = devres_alloc(devm_clk_hw_release_divider, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ hw = __clk_hw_register_divider(dev, np, name, parent_name, parent_hw,
+ parent_data, flags, reg, shift, width,
+ clk_divider_flags, table, lock);
+
+ if (!IS_ERR(hw)) {
+ *ptr = hw;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return hw;
+}
+EXPORT_SYMBOL_GPL(__devm_clk_hw_register_divider);
diff --git a/drivers/clk/clk-fsl-flexspi.c b/drivers/clk/clk-fsl-flexspi.c
new file mode 100644
index 000000000000..8432d681e2d9
--- /dev/null
+++ b/drivers/clk/clk-fsl-flexspi.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Layerscape FlexSPI clock driver
+ *
+ * Copyright 2020 Michael Walle <michael@walle.cc>
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+static const struct clk_div_table ls1028a_flexspi_divs[] = {
+ { .val = 0, .div = 1, },
+ { .val = 1, .div = 2, },
+ { .val = 2, .div = 3, },
+ { .val = 3, .div = 4, },
+ { .val = 4, .div = 5, },
+ { .val = 5, .div = 6, },
+ { .val = 6, .div = 7, },
+ { .val = 7, .div = 8, },
+ { .val = 11, .div = 12, },
+ { .val = 15, .div = 16, },
+ { .val = 16, .div = 20, },
+ { .val = 17, .div = 24, },
+ { .val = 18, .div = 28, },
+ { .val = 19, .div = 32, },
+ { .val = 20, .div = 80, },
+ {}
+};
+
+static const struct clk_div_table lx2160a_flexspi_divs[] = {
+ { .val = 1, .div = 2, },
+ { .val = 3, .div = 4, },
+ { .val = 5, .div = 6, },
+ { .val = 7, .div = 8, },
+ { .val = 11, .div = 12, },
+ { .val = 15, .div = 16, },
+ { .val = 16, .div = 20, },
+ { .val = 17, .div = 24, },
+ { .val = 18, .div = 28, },
+ { .val = 19, .div = 32, },
+ { .val = 20, .div = 80, },
+ {}
+};
+
+static int fsl_flexspi_clk_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const char *clk_name = np->name;
+ const char *clk_parent;
+ struct resource *res;
+ void __iomem *reg;
+ struct clk_hw *hw;
+ const struct clk_div_table *divs;
+
+ divs = device_get_match_data(dev);
+ if (!divs)
+ return -ENOENT;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENOENT;
+
+ /*
+ * Can't use devm_ioremap_resource() or devm_of_iomap() because the
+ * resource might already be taken by the parent device.
+ */
+ reg = devm_ioremap(dev, res->start, resource_size(res));
+ if (!reg)
+ return -ENOMEM;
+
+ clk_parent = of_clk_get_parent_name(np, 0);
+ if (!clk_parent)
+ return -EINVAL;
+
+ of_property_read_string(np, "clock-output-names", &clk_name);
+
+ hw = devm_clk_hw_register_divider_table(dev, clk_name, clk_parent, 0,
+ reg, 0, 5, 0, divs, NULL);
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, hw);
+}
+
+static const struct of_device_id fsl_flexspi_clk_dt_ids[] = {
+ { .compatible = "fsl,ls1028a-flexspi-clk", .data = &ls1028a_flexspi_divs },
+ { .compatible = "fsl,lx2160a-flexspi-clk", .data = &lx2160a_flexspi_divs },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_flexspi_clk_dt_ids);
+
+static struct platform_driver fsl_flexspi_clk_driver = {
+ .driver = {
+ .name = "fsl-flexspi-clk",
+ .of_match_table = fsl_flexspi_clk_dt_ids,
+ },
+ .probe = fsl_flexspi_clk_probe,
+};
+module_platform_driver(fsl_flexspi_clk_driver);
+
+MODULE_DESCRIPTION("FlexSPI clock driver for Layerscape SoCs");
+MODULE_AUTHOR("Michael Walle <michael@walle.cc>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/clk-fsl-sai.c b/drivers/clk/clk-fsl-sai.c
index 0221180a4dd7..6238fcea0467 100644
--- a/drivers/clk/clk-fsl-sai.c
+++ b/drivers/clk/clk-fsl-sai.c
@@ -58,13 +58,13 @@ static int fsl_sai_clk_probe(struct platform_device *pdev)
/* set clock direction, we are the BCLK master */
writel(CR2_BCD, base + I2S_CR2);
- hw = clk_hw_register_composite_pdata(dev, dev->of_node->name,
- &pdata, 1, NULL, NULL,
- &sai_clk->div.hw,
- &clk_divider_ops,
- &sai_clk->gate.hw,
- &clk_gate_ops,
- CLK_SET_RATE_GATE);
+ hw = devm_clk_hw_register_composite_pdata(dev, dev->of_node->name,
+ &pdata, 1, NULL, NULL,
+ &sai_clk->div.hw,
+ &clk_divider_ops,
+ &sai_clk->gate.hw,
+ &clk_gate_ops,
+ CLK_SET_RATE_GATE);
if (IS_ERR(hw))
return PTR_ERR(hw);
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 86f2e2d3fc02..da2c8eddfd9f 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -147,7 +147,7 @@ static struct platform_driver clk_pwm_driver = {
.remove = clk_pwm_remove,
.driver = {
.name = "pwm-clock",
- .of_match_table = of_match_ptr(clk_pwm_dt_ids),
+ .of_match_table = clk_pwm_dt_ids,
},
};
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
index 46101c6a20f2..70aa521e7e7f 100644
--- a/drivers/clk/clk-qoriq.c
+++ b/drivers/clk/clk-qoriq.c
@@ -7,6 +7,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <dt-bindings/clock/fsl,qoriq-clockgen.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
@@ -1368,33 +1369,33 @@ static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data)
idx = clkspec->args[1];
switch (type) {
- case 0:
+ case QORIQ_CLK_SYSCLK:
if (idx != 0)
goto bad_args;
clk = cg->sysclk;
break;
- case 1:
+ case QORIQ_CLK_CMUX:
if (idx >= ARRAY_SIZE(cg->cmux))
goto bad_args;
clk = cg->cmux[idx];
break;
- case 2:
+ case QORIQ_CLK_HWACCEL:
if (idx >= ARRAY_SIZE(cg->hwaccel))
goto bad_args;
clk = cg->hwaccel[idx];
break;
- case 3:
+ case QORIQ_CLK_FMAN:
if (idx >= ARRAY_SIZE(cg->fman))
goto bad_args;
clk = cg->fman[idx];
break;
- case 4:
+ case QORIQ_CLK_PLATFORM_PLL:
pll = &cg->pll[PLATFORM_PLL];
if (idx >= ARRAY_SIZE(pll->div))
goto bad_args;
clk = pll->div[idx].clk;
break;
- case 5:
+ case QORIQ_CLK_CORECLK:
if (idx != 0)
goto bad_args;
clk = cg->coreclk;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index aa21371f9104..a3e883a9f406 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -195,6 +195,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
return ret;
err_reg:
+ of_node_put(s2mps11_clks[0].clk_np);
while (--i >= 0)
clkdev_drop(s2mps11_clks[i].lookup);
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index 5a9b140dd8c8..a39af7616b13 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -129,7 +129,7 @@ static const struct clk_ops scpi_dvfs_ops = {
.set_rate = scpi_dvfs_set_rate,
};
-static const struct of_device_id scpi_clk_match[] = {
+static const struct of_device_id scpi_clk_match[] __maybe_unused = {
{ .compatible = "arm,scpi-dvfs-clocks", .data = &scpi_dvfs_ops, },
{ .compatible = "arm,scpi-variable-clocks", .data = &scpi_clk_ops, },
{}
diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c
index 1e1702e609cb..57e4597cdf4c 100644
--- a/drivers/clk/clk-si5351.c
+++ b/drivers/clk/clk-si5351.c
@@ -902,6 +902,10 @@ static int _si5351_clkout_set_disable_state(
static void _si5351_clkout_reset_pll(struct si5351_driver_data *drvdata, int num)
{
u8 val = si5351_reg_read(drvdata, SI5351_CLK0_CTRL + num);
+ u8 mask = val & SI5351_CLK_PLL_SELECT ? SI5351_PLL_RESET_B :
+ SI5351_PLL_RESET_A;
+ unsigned int v;
+ int err;
switch (val & SI5351_CLK_INPUT_MASK) {
case SI5351_CLK_INPUT_XTAL:
@@ -909,9 +913,12 @@ static void _si5351_clkout_reset_pll(struct si5351_driver_data *drvdata, int num
return; /* pll not used, no need to reset */
}
- si5351_reg_write(drvdata, SI5351_PLL_RESET,
- val & SI5351_CLK_PLL_SELECT ? SI5351_PLL_RESET_B :
- SI5351_PLL_RESET_A);
+ si5351_reg_write(drvdata, SI5351_PLL_RESET, mask);
+
+ err = regmap_read_poll_timeout(drvdata->regmap, SI5351_PLL_RESET, v,
+ !(v & mask), 0, 20000);
+ if (err < 0)
+ dev_err(&drvdata->client->dev, "Reset bit didn't clear\n");
dev_dbg(&drvdata->client->dev, "%s - %s: pll = %d\n",
__func__, clk_hw_get_name(&drvdata->clkout[num].hw),
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index c90460e7ef21..43db67337bc0 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -739,8 +739,8 @@ static int vc5_update_power(struct device_node *np_output,
{
u32 value;
- if (!of_property_read_u32(np_output,
- "idt,voltage-microvolts", &value)) {
+ if (!of_property_read_u32(np_output, "idt,voltage-microvolt",
+ &value)) {
clk_out->clk_output_cfg0_mask |= VC5_CLK_OUTPUT_CFG0_PWR_MASK;
switch (value) {
case 1800000:
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f83dac54ed85..8c1d04db990d 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -420,7 +420,7 @@ static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
{
struct clk_parent_map *entry = &core->parents[index];
- struct clk_core *parent = ERR_PTR(-ENOENT);
+ struct clk_core *parent;
if (entry->hw) {
parent = entry->hw->core;
@@ -2314,6 +2314,8 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
if (!clk)
return 0;
+ trace_clk_set_rate_range(clk->core, min, max);
+
if (min > max) {
pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
__func__, clk->core->name, clk->dev_id, clk->con_id,
@@ -2381,6 +2383,8 @@ int clk_set_min_rate(struct clk *clk, unsigned long rate)
if (!clk)
return 0;
+ trace_clk_set_min_rate(clk->core, rate);
+
return clk_set_rate_range(clk, rate, clk->max_rate);
}
EXPORT_SYMBOL_GPL(clk_set_min_rate);
@@ -2397,6 +2401,8 @@ int clk_set_max_rate(struct clk *clk, unsigned long rate)
if (!clk)
return 0;
+ trace_clk_set_max_rate(clk->core, rate);
+
return clk_set_rate_range(clk, clk->min_rate, rate);
}
EXPORT_SYMBOL_GPL(clk_set_max_rate);
@@ -2931,7 +2937,14 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
else
seq_puts(s, "-----");
- seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000));
+ seq_printf(s, " %6d", clk_core_get_scaled_duty_cycle(c, 100000));
+
+ if (c->ops->is_enabled)
+ seq_printf(s, " %9c\n", clk_core_is_enabled(c) ? 'Y' : 'N');
+ else if (!c->ops->enable)
+ seq_printf(s, " %9c\n", 'Y');
+ else
+ seq_printf(s, " %9c\n", '?');
}
static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@@ -2950,9 +2963,9 @@ static int clk_summary_show(struct seq_file *s, void *data)
struct clk_core *c;
struct hlist_head **lists = (struct hlist_head **)s->private;
- seq_puts(s, " enable prepare protect duty\n");
- seq_puts(s, " clock count count count rate accuracy phase cycle\n");
- seq_puts(s, "---------------------------------------------------------------------------------------------\n");
+ seq_puts(s, " enable prepare protect duty hardware\n");
+ seq_puts(s, " clock count count count rate accuracy phase cycle enable\n");
+ seq_puts(s, "-------------------------------------------------------------------------------------------------------\n");
clk_prepare_lock();
@@ -3667,6 +3680,24 @@ struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
return clk;
}
+/**
+ * clk_hw_get_clk - get clk consumer given an clk_hw
+ * @hw: clk_hw associated with the clk being consumed
+ * @con_id: connection ID string on device
+ *
+ * Returns: new clk consumer
+ * This is the function to be used by providers which need
+ * to get a consumer clk and act on the clock element
+ * Calls to this function must be balanced with calls clk_put()
+ */
+struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
+{
+ struct device *dev = hw->core->dev;
+
+ return clk_hw_create_clk(dev, hw, dev_name(dev), con_id);
+}
+EXPORT_SYMBOL(clk_hw_get_clk);
+
static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
{
const char *dst;
@@ -4068,12 +4099,12 @@ void clk_hw_unregister(struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_unregister);
-static void devm_clk_release(struct device *dev, void *res)
+static void devm_clk_unregister_cb(struct device *dev, void *res)
{
clk_unregister(*(struct clk **)res);
}
-static void devm_clk_hw_release(struct device *dev, void *res)
+static void devm_clk_hw_unregister_cb(struct device *dev, void *res)
{
clk_hw_unregister(*(struct clk_hw **)res);
}
@@ -4093,7 +4124,7 @@ struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
struct clk *clk;
struct clk **clkp;
- clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
+ clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL);
if (!clkp)
return ERR_PTR(-ENOMEM);
@@ -4123,7 +4154,7 @@ int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
struct clk_hw **hwp;
int ret;
- hwp = devres_alloc(devm_clk_hw_release, sizeof(*hwp), GFP_KERNEL);
+ hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL);
if (!hwp)
return -ENOMEM;
@@ -4167,7 +4198,7 @@ static int devm_clk_hw_match(struct device *dev, void *res, void *data)
*/
void devm_clk_unregister(struct device *dev, struct clk *clk)
{
- WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
+ WARN_ON(devres_release(dev, devm_clk_unregister_cb, devm_clk_match, clk));
}
EXPORT_SYMBOL_GPL(devm_clk_unregister);
@@ -4182,11 +4213,54 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister);
*/
void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw)
{
- WARN_ON(devres_release(dev, devm_clk_hw_release, devm_clk_hw_match,
+ WARN_ON(devres_release(dev, devm_clk_hw_unregister_cb, devm_clk_hw_match,
hw));
}
EXPORT_SYMBOL_GPL(devm_clk_hw_unregister);
+static void devm_clk_release(struct device *dev, void *res)
+{
+ clk_put(*(struct clk **)res);
+}
+
+/**
+ * devm_clk_hw_get_clk - resource managed clk_hw_get_clk()
+ * @dev: device that is registering this clock
+ * @hw: clk_hw associated with the clk being consumed
+ * @con_id: connection ID string on device
+ *
+ * Managed clk_hw_get_clk(). Clocks got with this function are
+ * automatically clk_put() on driver detach. See clk_put()
+ * for more information.
+ */
+struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
+ const char *con_id)
+{
+ struct clk *clk;
+ struct clk **clkp;
+
+ /* This should not happen because it would mean we have drivers
+ * passing around clk_hw pointers instead of having the caller use
+ * proper clk_get() style APIs
+ */
+ WARN_ON_ONCE(dev != hw->core->dev);
+
+ clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
+ if (!clkp)
+ return ERR_PTR(-ENOMEM);
+
+ clk = clk_hw_get_clk(hw, con_id);
+ if (!IS_ERR(clk)) {
+ *clkp = clk;
+ devres_add(dev, clkp);
+ } else {
+ devres_free(clkp);
+ }
+
+ return clk;
+}
+EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk);
+
/*
* clkdev helpers
*/
@@ -4334,6 +4408,42 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(clk_notifier_unregister);
+struct clk_notifier_devres {
+ struct clk *clk;
+ struct notifier_block *nb;
+};
+
+static void devm_clk_notifier_release(struct device *dev, void *res)
+{
+ struct clk_notifier_devres *devres = res;
+
+ clk_notifier_unregister(devres->clk, devres->nb);
+}
+
+int devm_clk_notifier_register(struct device *dev, struct clk *clk,
+ struct notifier_block *nb)
+{
+ struct clk_notifier_devres *devres;
+ int ret;
+
+ devres = devres_alloc(devm_clk_notifier_release,
+ sizeof(*devres), GFP_KERNEL);
+
+ if (!devres)
+ return -ENOMEM;
+
+ ret = clk_notifier_register(clk, nb);
+ if (!ret) {
+ devres->clk = clk;
+ devres->nb = nb;
+ } else {
+ devres_free(devres);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_clk_notifier_register);
+
#ifdef CONFIG_OF
static void clk_core_reparent_orphans(void)
{
diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c
index 7eed7083f46e..f16c4019f402 100644
--- a/drivers/clk/imx/clk-gate2.c
+++ b/drivers/clk/imx/clk-gate2.c
@@ -30,6 +30,7 @@ struct clk_gate2 {
void __iomem *reg;
u8 bit_idx;
u8 cgr_val;
+ u8 cgr_mask;
u8 flags;
spinlock_t *lock;
unsigned int *share_count;
@@ -37,37 +38,38 @@ struct clk_gate2 {
#define to_clk_gate2(_hw) container_of(_hw, struct clk_gate2, hw)
-static int clk_gate2_enable(struct clk_hw *hw)
+static void clk_gate2_do_shared_clks(struct clk_hw *hw, bool enable)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
u32 reg;
+
+ reg = readl(gate->reg);
+ reg &= ~(gate->cgr_mask << gate->bit_idx);
+ if (enable)
+ reg |= (gate->cgr_val & gate->cgr_mask) << gate->bit_idx;
+ writel(reg, gate->reg);
+}
+
+static int clk_gate2_enable(struct clk_hw *hw)
+{
+ struct clk_gate2 *gate = to_clk_gate2(hw);
unsigned long flags;
- int ret = 0;
spin_lock_irqsave(gate->lock, flags);
if (gate->share_count && (*gate->share_count)++ > 0)
goto out;
- if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) {
- ret = clk_gate_ops.enable(hw);
- } else {
- reg = readl(gate->reg);
- reg &= ~(3 << gate->bit_idx);
- reg |= gate->cgr_val << gate->bit_idx;
- writel(reg, gate->reg);
- }
-
+ clk_gate2_do_shared_clks(hw, true);
out:
spin_unlock_irqrestore(gate->lock, flags);
- return ret;
+ return 0;
}
static void clk_gate2_disable(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
- u32 reg;
unsigned long flags;
spin_lock_irqsave(gate->lock, flags);
@@ -79,23 +81,17 @@ static void clk_gate2_disable(struct clk_hw *hw)
goto out;
}
- if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT) {
- clk_gate_ops.disable(hw);
- } else {
- reg = readl(gate->reg);
- reg &= ~(3 << gate->bit_idx);
- writel(reg, gate->reg);
- }
-
+ clk_gate2_do_shared_clks(hw, false);
out:
spin_unlock_irqrestore(gate->lock, flags);
}
-static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx)
+static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx,
+ u8 cgr_val, u8 cgr_mask)
{
u32 val = readl(reg);
- if (((val >> bit_idx) & 1) == 1)
+ if (((val >> bit_idx) & cgr_mask) == cgr_val)
return 1;
return 0;
@@ -104,29 +100,28 @@ static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx)
static int clk_gate2_is_enabled(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(gate->lock, flags);
- if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT)
- return clk_gate_ops.is_enabled(hw);
+ ret = clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx,
+ gate->cgr_val, gate->cgr_mask);
- return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx);
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return ret;
}
static void clk_gate2_disable_unused(struct clk_hw *hw)
{
struct clk_gate2 *gate = to_clk_gate2(hw);
unsigned long flags;
- u32 reg;
-
- if (gate->flags & IMX_CLK_GATE2_SINGLE_BIT)
- return;
spin_lock_irqsave(gate->lock, flags);
- if (!gate->share_count || *gate->share_count == 0) {
- reg = readl(gate->reg);
- reg &= ~(3 << gate->bit_idx);
- writel(reg, gate->reg);
- }
+ if (!gate->share_count || *gate->share_count == 0)
+ clk_gate2_do_shared_clks(hw, false);
spin_unlock_irqrestore(gate->lock, flags);
}
@@ -140,7 +135,7 @@ static const struct clk_ops clk_gate2_ops = {
struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 bit_idx, u8 cgr_val,
+ void __iomem *reg, u8 bit_idx, u8 cgr_val, u8 cgr_mask,
u8 clk_gate2_flags, spinlock_t *lock,
unsigned int *share_count)
{
@@ -157,6 +152,7 @@ struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
gate->reg = reg;
gate->bit_idx = bit_idx;
gate->cgr_val = cgr_val;
+ gate->cgr_mask = cgr_mask;
gate->flags = clk_gate2_flags;
gate->lock = lock;
gate->share_count = share_count;
diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c
index f358ad907299..7c905861af5d 100644
--- a/drivers/clk/imx/clk-imx8mm.c
+++ b/drivers/clk/imx/clk-imx8mm.c
@@ -653,7 +653,7 @@ static struct platform_driver imx8mm_clk_driver = {
* reloading the driver will crash or break devices.
*/
.suppress_bind_attrs = true,
- .of_match_table = of_match_ptr(imx8mm_clk_of_match),
+ .of_match_table = imx8mm_clk_of_match,
},
};
module_platform_driver(imx8mm_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c
index f3c5e6cf55dd..3c21db942d5b 100644
--- a/drivers/clk/imx/clk-imx8mn.c
+++ b/drivers/clk/imx/clk-imx8mn.c
@@ -604,7 +604,7 @@ static struct platform_driver imx8mn_clk_driver = {
* reloading the driver will crash or break devices.
*/
.suppress_bind_attrs = true,
- .of_match_table = of_match_ptr(imx8mn_clk_of_match),
+ .of_match_table = imx8mn_clk_of_match,
},
};
module_platform_driver(imx8mn_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c
index 48e212477f52..2f4e1d674e1c 100644
--- a/drivers/clk/imx/clk-imx8mp.c
+++ b/drivers/clk/imx/clk-imx8mp.c
@@ -425,7 +425,7 @@ static struct clk **uart_clks[ARRAY_SIZE(uart_clk_ids) + 1];
static int imx8mp_clocks_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
+ struct device_node *np;
void __iomem *anatop_base, *ccm_base;
int i;
@@ -763,7 +763,7 @@ static struct platform_driver imx8mp_clk_driver = {
* reloading the driver will crash or break devices.
*/
.suppress_bind_attrs = true,
- .of_match_table = of_match_ptr(imx8mp_clk_of_match),
+ .of_match_table = imx8mp_clk_of_match,
},
};
module_platform_driver(imx8mp_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c
index 06292d4a98ff..779ea69e639c 100644
--- a/drivers/clk/imx/clk-imx8mq.c
+++ b/drivers/clk/imx/clk-imx8mq.c
@@ -639,7 +639,7 @@ static struct platform_driver imx8mq_clk_driver = {
* reloading the driver will crash or break devices.
*/
.suppress_bind_attrs = true,
- .of_match_table = of_match_ptr(imx8mq_clk_of_match),
+ .of_match_table = imx8mq_clk_of_match,
},
};
module_platform_driver(imx8mq_clk_driver);
diff --git a/drivers/clk/imx/clk-imx8qxp-lpcg.c b/drivers/clk/imx/clk-imx8qxp-lpcg.c
index e947a70054ac..d3e905cf867d 100644
--- a/drivers/clk/imx/clk-imx8qxp-lpcg.c
+++ b/drivers/clk/imx/clk-imx8qxp-lpcg.c
@@ -9,8 +9,10 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "clk-scu.h"
@@ -157,6 +159,135 @@ static const struct imx8qxp_ss_lpcg imx8qxp_ss_lsio = {
.num_max = IMX_LSIO_LPCG_CLK_END,
};
+#define IMX_LPCG_MAX_CLKS 8
+
+static struct clk_hw *imx_lpcg_of_clk_src_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ struct clk_hw_onecell_data *hw_data = data;
+ unsigned int idx = clkspec->args[0] / 4;
+
+ if (idx >= hw_data->num) {
+ pr_err("%s: invalid index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return hw_data->hws[idx];
+}
+
+static int imx_lpcg_parse_clks_from_dt(struct platform_device *pdev,
+ struct device_node *np)
+{
+ const char *output_names[IMX_LPCG_MAX_CLKS];
+ const char *parent_names[IMX_LPCG_MAX_CLKS];
+ unsigned int bit_offset[IMX_LPCG_MAX_CLKS];
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clk_hws;
+ struct resource *res;
+ void __iomem *base;
+ int count;
+ int idx;
+ int ret;
+ int i;
+
+ if (!of_device_is_compatible(np, "fsl,imx8qxp-lpcg"))
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ count = of_property_count_u32_elems(np, "clock-indices");
+ if (count < 0) {
+ dev_err(&pdev->dev, "failed to count clocks\n");
+ return -EINVAL;
+ }
+
+ /*
+ * A trick here is that we set the num of clks to the MAX instead
+ * of the count from clock-indices because one LPCG supports up to
+ * 8 clock outputs which each of them is fixed to 4 bits. Then we can
+ * easily get the clock by clk-indices (bit-offset) / 4.
+ * And the cost is very limited few pointers.
+ */
+
+ clk_data = devm_kzalloc(&pdev->dev, struct_size(clk_data, hws,
+ IMX_LPCG_MAX_CLKS), GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = IMX_LPCG_MAX_CLKS;
+ clk_hws = clk_data->hws;
+
+ ret = of_property_read_u32_array(np, "clock-indices", bit_offset,
+ count);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read clock-indices\n");
+ return -EINVAL;
+ }
+
+ ret = of_clk_parent_fill(np, parent_names, count);
+ if (ret != count) {
+ dev_err(&pdev->dev, "failed to get clock parent names\n");
+ return count;
+ }
+
+ ret = of_property_read_string_array(np, "clock-output-names",
+ output_names, count);
+ if (ret != count) {
+ dev_err(&pdev->dev, "failed to read clock-output-names\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_get_noresume(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ for (i = 0; i < count; i++) {
+ idx = bit_offset[i] / 4;
+ if (idx > IMX_LPCG_MAX_CLKS) {
+ dev_warn(&pdev->dev, "invalid bit offset of clock %d\n",
+ i);
+ ret = -EINVAL;
+ goto unreg;
+ }
+
+ clk_hws[idx] = imx_clk_lpcg_scu_dev(&pdev->dev, output_names[i],
+ parent_names[i], 0, base,
+ bit_offset[i], false);
+ if (IS_ERR(clk_hws[idx])) {
+ dev_warn(&pdev->dev, "failed to register clock %d\n",
+ idx);
+ ret = PTR_ERR(clk_hws[idx]);
+ goto unreg;
+ }
+ }
+
+ ret = devm_of_clk_add_hw_provider(&pdev->dev, imx_lpcg_of_clk_src_get,
+ clk_data);
+ if (ret)
+ goto unreg;
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return 0;
+
+unreg:
+ while (--i >= 0) {
+ idx = bit_offset[i] / 4;
+ if (clk_hws[idx])
+ imx_clk_lpcg_scu_unregister(clk_hws[idx]);
+ }
+
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -167,8 +298,14 @@ static int imx8qxp_lpcg_clk_probe(struct platform_device *pdev)
struct resource *res;
struct clk_hw **clks;
void __iomem *base;
+ int ret;
int i;
+ /* try new binding to parse clocks from device tree first */
+ ret = imx_lpcg_parse_clks_from_dt(pdev, np);
+ if (!ret)
+ return 0;
+
ss_lpcg = of_device_get_match_data(dev);
if (!ss_lpcg)
return -ENODEV;
@@ -219,6 +356,7 @@ static const struct of_device_id imx8qxp_lpcg_match[] = {
{ .compatible = "fsl,imx8qxp-lpcg-adma", &imx8qxp_ss_adma, },
{ .compatible = "fsl,imx8qxp-lpcg-conn", &imx8qxp_ss_conn, },
{ .compatible = "fsl,imx8qxp-lpcg-lsio", &imx8qxp_ss_lsio, },
+ { .compatible = "fsl,imx8qxp-lpcg", NULL },
{ /* sentinel */ }
};
@@ -226,6 +364,7 @@ static struct platform_driver imx8qxp_lpcg_clk_driver = {
.driver = {
.name = "imx8qxp-lpcg-clk",
.of_match_table = imx8qxp_lpcg_match,
+ .pm = &imx_clk_lpcg_scu_pm_ops,
.suppress_bind_attrs = true,
},
.probe = imx8qxp_lpcg_clk_probe,
diff --git a/drivers/clk/imx/clk-imx8qxp.c b/drivers/clk/imx/clk-imx8qxp.c
index d650ca33cdc8..5b3d4ede7c7c 100644
--- a/drivers/clk/imx/clk-imx8qxp.c
+++ b/drivers/clk/imx/clk-imx8qxp.c
@@ -22,9 +22,10 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
struct device_node *ccm_node = pdev->dev.of_node;
struct clk_hw_onecell_data *clk_data;
struct clk_hw **clks;
+ u32 clk_cells;
int ret, i;
- ret = imx_clk_scu_init();
+ ret = imx_clk_scu_init(ccm_node);
if (ret)
return ret;
@@ -33,6 +34,9 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
if (!clk_data)
return -ENOMEM;
+ if (of_property_read_u32(ccm_node, "#clock-cells", &clk_cells))
+ return -EINVAL;
+
clk_data->num = IMX_SCU_CLK_END;
clks = clk_data->hws;
@@ -55,78 +59,78 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
clks[IMX_LSIO_BUS_CLK] = clk_hw_register_fixed_rate(NULL, "lsio_bus_clk_root", NULL, 0, 100000000);
/* ARM core */
- clks[IMX_A35_CLK] = imx_clk_scu("a35_clk", IMX_SC_R_A35, IMX_SC_PM_CLK_CPU);
+ clks[IMX_A35_CLK] = imx_clk_scu("a35_clk", IMX_SC_R_A35, IMX_SC_PM_CLK_CPU, clk_cells);
/* LSIO SS */
- clks[IMX_LSIO_PWM0_CLK] = imx_clk_scu("pwm0_clk", IMX_SC_R_PWM_0, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM1_CLK] = imx_clk_scu("pwm1_clk", IMX_SC_R_PWM_1, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM2_CLK] = imx_clk_scu("pwm2_clk", IMX_SC_R_PWM_2, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM3_CLK] = imx_clk_scu("pwm3_clk", IMX_SC_R_PWM_3, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM4_CLK] = imx_clk_scu("pwm4_clk", IMX_SC_R_PWM_4, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM5_CLK] = imx_clk_scu("pwm5_clk", IMX_SC_R_PWM_5, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM6_CLK] = imx_clk_scu("pwm6_clk", IMX_SC_R_PWM_6, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_PWM7_CLK] = imx_clk_scu("pwm7_clk", IMX_SC_R_PWM_7, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_GPT0_CLK] = imx_clk_scu("gpt0_clk", IMX_SC_R_GPT_0, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_GPT1_CLK] = imx_clk_scu("gpt1_clk", IMX_SC_R_GPT_1, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_GPT2_CLK] = imx_clk_scu("gpt2_clk", IMX_SC_R_GPT_2, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_GPT3_CLK] = imx_clk_scu("gpt3_clk", IMX_SC_R_GPT_3, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_GPT4_CLK] = imx_clk_scu("gpt4_clk", IMX_SC_R_GPT_4, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_FSPI0_CLK] = imx_clk_scu("fspi0_clk", IMX_SC_R_FSPI_0, IMX_SC_PM_CLK_PER);
- clks[IMX_LSIO_FSPI1_CLK] = imx_clk_scu("fspi1_clk", IMX_SC_R_FSPI_1, IMX_SC_PM_CLK_PER);
+ clks[IMX_LSIO_PWM0_CLK] = imx_clk_scu("pwm0_clk", IMX_SC_R_PWM_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM1_CLK] = imx_clk_scu("pwm1_clk", IMX_SC_R_PWM_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM2_CLK] = imx_clk_scu("pwm2_clk", IMX_SC_R_PWM_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM3_CLK] = imx_clk_scu("pwm3_clk", IMX_SC_R_PWM_3, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM4_CLK] = imx_clk_scu("pwm4_clk", IMX_SC_R_PWM_4, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM5_CLK] = imx_clk_scu("pwm5_clk", IMX_SC_R_PWM_5, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM6_CLK] = imx_clk_scu("pwm6_clk", IMX_SC_R_PWM_6, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_PWM7_CLK] = imx_clk_scu("pwm7_clk", IMX_SC_R_PWM_7, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_GPT0_CLK] = imx_clk_scu("gpt0_clk", IMX_SC_R_GPT_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_GPT1_CLK] = imx_clk_scu("gpt1_clk", IMX_SC_R_GPT_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_GPT2_CLK] = imx_clk_scu("gpt2_clk", IMX_SC_R_GPT_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_GPT3_CLK] = imx_clk_scu("gpt3_clk", IMX_SC_R_GPT_3, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_GPT4_CLK] = imx_clk_scu("gpt4_clk", IMX_SC_R_GPT_4, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_FSPI0_CLK] = imx_clk_scu("fspi0_clk", IMX_SC_R_FSPI_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_LSIO_FSPI1_CLK] = imx_clk_scu("fspi1_clk", IMX_SC_R_FSPI_1, IMX_SC_PM_CLK_PER, clk_cells);
/* ADMA SS */
- clks[IMX_ADMA_UART0_CLK] = imx_clk_scu("uart0_clk", IMX_SC_R_UART_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_UART1_CLK] = imx_clk_scu("uart1_clk", IMX_SC_R_UART_1, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_UART2_CLK] = imx_clk_scu("uart2_clk", IMX_SC_R_UART_2, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_UART3_CLK] = imx_clk_scu("uart3_clk", IMX_SC_R_UART_3, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_SPI0_CLK] = imx_clk_scu("spi0_clk", IMX_SC_R_SPI_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_SPI1_CLK] = imx_clk_scu("spi1_clk", IMX_SC_R_SPI_1, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_SPI2_CLK] = imx_clk_scu("spi2_clk", IMX_SC_R_SPI_2, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_SPI3_CLK] = imx_clk_scu("spi3_clk", IMX_SC_R_SPI_3, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_CAN0_CLK] = imx_clk_scu("can0_clk", IMX_SC_R_CAN_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_I2C0_CLK] = imx_clk_scu("i2c0_clk", IMX_SC_R_I2C_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_I2C1_CLK] = imx_clk_scu("i2c1_clk", IMX_SC_R_I2C_1, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_I2C2_CLK] = imx_clk_scu("i2c2_clk", IMX_SC_R_I2C_2, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_I2C3_CLK] = imx_clk_scu("i2c3_clk", IMX_SC_R_I2C_3, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_FTM0_CLK] = imx_clk_scu("ftm0_clk", IMX_SC_R_FTM_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_FTM1_CLK] = imx_clk_scu("ftm1_clk", IMX_SC_R_FTM_1, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_ADC0_CLK] = imx_clk_scu("adc0_clk", IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_PWM_CLK] = imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER);
- clks[IMX_ADMA_LCD_CLK] = imx_clk_scu("lcd_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER);
+ clks[IMX_ADMA_UART0_CLK] = imx_clk_scu("uart0_clk", IMX_SC_R_UART_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_UART1_CLK] = imx_clk_scu("uart1_clk", IMX_SC_R_UART_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_UART2_CLK] = imx_clk_scu("uart2_clk", IMX_SC_R_UART_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_UART3_CLK] = imx_clk_scu("uart3_clk", IMX_SC_R_UART_3, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_SPI0_CLK] = imx_clk_scu("spi0_clk", IMX_SC_R_SPI_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_SPI1_CLK] = imx_clk_scu("spi1_clk", IMX_SC_R_SPI_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_SPI2_CLK] = imx_clk_scu("spi2_clk", IMX_SC_R_SPI_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_SPI3_CLK] = imx_clk_scu("spi3_clk", IMX_SC_R_SPI_3, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_CAN0_CLK] = imx_clk_scu("can0_clk", IMX_SC_R_CAN_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_I2C0_CLK] = imx_clk_scu("i2c0_clk", IMX_SC_R_I2C_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_I2C1_CLK] = imx_clk_scu("i2c1_clk", IMX_SC_R_I2C_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_I2C2_CLK] = imx_clk_scu("i2c2_clk", IMX_SC_R_I2C_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_I2C3_CLK] = imx_clk_scu("i2c3_clk", IMX_SC_R_I2C_3, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_FTM0_CLK] = imx_clk_scu("ftm0_clk", IMX_SC_R_FTM_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_FTM1_CLK] = imx_clk_scu("ftm1_clk", IMX_SC_R_FTM_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_ADC0_CLK] = imx_clk_scu("adc0_clk", IMX_SC_R_ADC_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_PWM_CLK] = imx_clk_scu("pwm_clk", IMX_SC_R_LCD_0_PWM_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_ADMA_LCD_CLK] = imx_clk_scu("lcd_clk", IMX_SC_R_LCD_0, IMX_SC_PM_CLK_PER, clk_cells);
/* Connectivity */
- clks[IMX_CONN_SDHC0_CLK] = imx_clk_scu("sdhc0_clk", IMX_SC_R_SDHC_0, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_SDHC1_CLK] = imx_clk_scu("sdhc1_clk", IMX_SC_R_SDHC_1, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_SDHC2_CLK] = imx_clk_scu("sdhc2_clk", IMX_SC_R_SDHC_2, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_ENET0_ROOT_CLK] = imx_clk_scu("enet0_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_ENET0_BYPASS_CLK] = imx_clk_scu("enet0_bypass_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_BYPASS);
- clks[IMX_CONN_ENET0_RGMII_CLK] = imx_clk_scu("enet0_rgmii_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_MISC0);
- clks[IMX_CONN_ENET1_ROOT_CLK] = imx_clk_scu("enet1_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_ENET1_BYPASS_CLK] = imx_clk_scu("enet1_bypass_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_BYPASS);
- clks[IMX_CONN_ENET1_RGMII_CLK] = imx_clk_scu("enet1_rgmii_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_MISC0);
- clks[IMX_CONN_GPMI_BCH_IO_CLK] = imx_clk_scu("gpmi_io_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_MST_BUS);
- clks[IMX_CONN_GPMI_BCH_CLK] = imx_clk_scu("gpmi_bch_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_USB2_ACLK] = imx_clk_scu("usb3_aclk_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_PER);
- clks[IMX_CONN_USB2_BUS_CLK] = imx_clk_scu("usb3_bus_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MST_BUS);
- clks[IMX_CONN_USB2_LPM_CLK] = imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC);
+ clks[IMX_CONN_SDHC0_CLK] = imx_clk_scu("sdhc0_clk", IMX_SC_R_SDHC_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_SDHC1_CLK] = imx_clk_scu("sdhc1_clk", IMX_SC_R_SDHC_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_SDHC2_CLK] = imx_clk_scu("sdhc2_clk", IMX_SC_R_SDHC_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_ENET0_ROOT_CLK] = imx_clk_scu("enet0_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_ENET0_BYPASS_CLK] = imx_clk_scu("enet0_bypass_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_BYPASS, clk_cells);
+ clks[IMX_CONN_ENET0_RGMII_CLK] = imx_clk_scu("enet0_rgmii_clk", IMX_SC_R_ENET_0, IMX_SC_PM_CLK_MISC0, clk_cells);
+ clks[IMX_CONN_ENET1_ROOT_CLK] = imx_clk_scu("enet1_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_ENET1_BYPASS_CLK] = imx_clk_scu("enet1_bypass_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_BYPASS, clk_cells);
+ clks[IMX_CONN_ENET1_RGMII_CLK] = imx_clk_scu("enet1_rgmii_clk", IMX_SC_R_ENET_1, IMX_SC_PM_CLK_MISC0, clk_cells);
+ clks[IMX_CONN_GPMI_BCH_IO_CLK] = imx_clk_scu("gpmi_io_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_MST_BUS, clk_cells);
+ clks[IMX_CONN_GPMI_BCH_CLK] = imx_clk_scu("gpmi_bch_clk", IMX_SC_R_NAND, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_USB2_ACLK] = imx_clk_scu("usb3_aclk_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CONN_USB2_BUS_CLK] = imx_clk_scu("usb3_bus_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MST_BUS, clk_cells);
+ clks[IMX_CONN_USB2_LPM_CLK] = imx_clk_scu("usb3_lpm_div", IMX_SC_R_USB_2, IMX_SC_PM_CLK_MISC, clk_cells);
/* Display controller SS */
- clks[IMX_DC0_DISP0_CLK] = imx_clk_scu("dc0_disp0_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0);
- clks[IMX_DC0_DISP1_CLK] = imx_clk_scu("dc0_disp1_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1);
+ clks[IMX_DC0_DISP0_CLK] = imx_clk_scu("dc0_disp0_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC0, clk_cells);
+ clks[IMX_DC0_DISP1_CLK] = imx_clk_scu("dc0_disp1_clk", IMX_SC_R_DC_0, IMX_SC_PM_CLK_MISC1, clk_cells);
/* MIPI-LVDS SS */
- clks[IMX_MIPI0_I2C0_CLK] = imx_clk_scu("mipi0_i2c0_clk", IMX_SC_R_MIPI_0_I2C_0, IMX_SC_PM_CLK_MISC2);
- clks[IMX_MIPI0_I2C1_CLK] = imx_clk_scu("mipi0_i2c1_clk", IMX_SC_R_MIPI_0_I2C_1, IMX_SC_PM_CLK_MISC2);
+ clks[IMX_MIPI0_I2C0_CLK] = imx_clk_scu("mipi0_i2c0_clk", IMX_SC_R_MIPI_0_I2C_0, IMX_SC_PM_CLK_MISC2, clk_cells);
+ clks[IMX_MIPI0_I2C1_CLK] = imx_clk_scu("mipi0_i2c1_clk", IMX_SC_R_MIPI_0_I2C_1, IMX_SC_PM_CLK_MISC2, clk_cells);
/* MIPI CSI SS */
- clks[IMX_CSI0_CORE_CLK] = imx_clk_scu("mipi_csi0_core_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_PER);
- clks[IMX_CSI0_ESC_CLK] = imx_clk_scu("mipi_csi0_esc_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_MISC);
- clks[IMX_CSI0_I2C0_CLK] = imx_clk_scu("mipi_csi0_i2c0_clk", IMX_SC_R_CSI_0_I2C_0, IMX_SC_PM_CLK_PER);
- clks[IMX_CSI0_PWM0_CLK] = imx_clk_scu("mipi_csi0_pwm0_clk", IMX_SC_R_CSI_0_PWM_0, IMX_SC_PM_CLK_PER);
+ clks[IMX_CSI0_CORE_CLK] = imx_clk_scu("mipi_csi0_core_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CSI0_ESC_CLK] = imx_clk_scu("mipi_csi0_esc_clk", IMX_SC_R_CSI_0, IMX_SC_PM_CLK_MISC, clk_cells);
+ clks[IMX_CSI0_I2C0_CLK] = imx_clk_scu("mipi_csi0_i2c0_clk", IMX_SC_R_CSI_0_I2C_0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_CSI0_PWM0_CLK] = imx_clk_scu("mipi_csi0_pwm0_clk", IMX_SC_R_CSI_0_PWM_0, IMX_SC_PM_CLK_PER, clk_cells);
/* GPU SS */
- clks[IMX_GPU0_CORE_CLK] = imx_clk_scu("gpu_core0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_PER);
- clks[IMX_GPU0_SHADER_CLK] = imx_clk_scu("gpu_shader0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_MISC);
+ clks[IMX_GPU0_CORE_CLK] = imx_clk_scu("gpu_core0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_PER, clk_cells);
+ clks[IMX_GPU0_SHADER_CLK] = imx_clk_scu("gpu_shader0_clk", IMX_SC_R_GPU_0_PID0, IMX_SC_PM_CLK_MISC, clk_cells);
for (i = 0; i < clk_data->num; i++) {
if (IS_ERR(clks[i]))
@@ -134,7 +138,19 @@ static int imx8qxp_clk_probe(struct platform_device *pdev)
i, PTR_ERR(clks[i]));
}
- return of_clk_add_hw_provider(ccm_node, of_clk_hw_onecell_get, clk_data);
+ if (clk_cells == 2) {
+ ret = of_clk_add_hw_provider(ccm_node, imx_scu_of_clk_src_get, imx_scu_clks);
+ if (ret)
+ imx_clk_scu_unregister();
+ } else {
+ /*
+ * legacy binding code path doesn't unregister here because
+ * it will be removed later.
+ */
+ ret = of_clk_add_hw_provider(ccm_node, of_clk_hw_onecell_get, clk_data);
+ }
+
+ return ret;
}
static const struct of_device_id imx8qxp_match[] = {
diff --git a/drivers/clk/imx/clk-lpcg-scu.c b/drivers/clk/imx/clk-lpcg-scu.c
index 1f0e44f921ae..77be7632866d 100644
--- a/drivers/clk/imx/clk-lpcg-scu.c
+++ b/drivers/clk/imx/clk-lpcg-scu.c
@@ -34,6 +34,9 @@ struct clk_lpcg_scu {
void __iomem *reg;
u8 bit_idx;
bool hw_gate;
+
+ /* for state save&restore */
+ u32 state;
};
#define to_clk_lpcg_scu(_hw) container_of(_hw, struct clk_lpcg_scu, hw)
@@ -81,9 +84,9 @@ static const struct clk_ops clk_lpcg_scu_ops = {
.disable = clk_lpcg_scu_disable,
};
-struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
- unsigned long flags, void __iomem *reg,
- u8 bit_idx, bool hw_gate)
+struct clk_hw *__imx_clk_lpcg_scu(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx, bool hw_gate)
{
struct clk_lpcg_scu *clk;
struct clk_init_data init;
@@ -107,11 +110,53 @@ struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
clk->hw.init = &init;
hw = &clk->hw;
- ret = clk_hw_register(NULL, hw);
+ ret = clk_hw_register(dev, hw);
if (ret) {
kfree(clk);
hw = ERR_PTR(ret);
}
+ if (dev)
+ dev_set_drvdata(dev, clk);
+
return hw;
}
+
+void imx_clk_lpcg_scu_unregister(struct clk_hw *hw)
+{
+ struct clk_lpcg_scu *clk = to_clk_lpcg_scu(hw);
+
+ clk_hw_unregister(&clk->hw);
+ kfree(clk);
+}
+
+static int __maybe_unused imx_clk_lpcg_scu_suspend(struct device *dev)
+{
+ struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
+
+ clk->state = readl_relaxed(clk->reg);
+ dev_dbg(dev, "save lpcg state 0x%x\n", clk->state);
+
+ return 0;
+}
+
+static int __maybe_unused imx_clk_lpcg_scu_resume(struct device *dev)
+{
+ struct clk_lpcg_scu *clk = dev_get_drvdata(dev);
+
+ /*
+ * FIXME: Sometimes writes don't work unless the CPU issues
+ * them twice
+ */
+
+ writel(clk->state, clk->reg);
+ writel(clk->state, clk->reg);
+ dev_dbg(dev, "restore lpcg state 0x%x\n", clk->state);
+
+ return 0;
+}
+
+const struct dev_pm_ops imx_clk_lpcg_scu_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_lpcg_scu_suspend,
+ imx_clk_lpcg_scu_resume)
+};
diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c
index aba36e4217d2..2b5ed86b9dbb 100644
--- a/drivers/clk/imx/clk-pll14xx.c
+++ b/drivers/clk/imx/clk-pll14xx.c
@@ -416,7 +416,7 @@ struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
__func__, name);
kfree(pll);
return ERR_PTR(-EINVAL);
- };
+ }
pll->base = base;
pll->hw.init = &init;
diff --git a/drivers/clk/imx/clk-scu.c b/drivers/clk/imx/clk-scu.c
index b8b2072742a5..1f5518b7ab39 100644
--- a/drivers/clk/imx/clk-scu.c
+++ b/drivers/clk/imx/clk-scu.c
@@ -8,6 +8,10 @@
#include <linux/arm-smccc.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include "clk-scu.h"
@@ -16,6 +20,21 @@
#define IMX_SIP_SET_CPUFREQ 0x00
static struct imx_sc_ipc *ccm_ipc_handle;
+static struct device_node *pd_np;
+static struct platform_driver imx_clk_scu_driver;
+
+struct imx_scu_clk_node {
+ const char *name;
+ u32 rsrc;
+ u8 clk_type;
+ const char * const *parents;
+ int num_parents;
+
+ struct clk_hw *hw;
+ struct list_head node;
+};
+
+struct list_head imx_scu_clks[IMX_SC_R_LAST];
/*
* struct clk_scu - Description of one SCU clock
@@ -27,6 +46,10 @@ struct clk_scu {
struct clk_hw hw;
u16 rsrc_id;
u8 clk_type;
+
+ /* for state save&restore */
+ bool is_enabled;
+ u32 rate;
};
/*
@@ -128,9 +151,28 @@ static inline struct clk_scu *to_clk_scu(struct clk_hw *hw)
return container_of(hw, struct clk_scu, hw);
}
-int imx_clk_scu_init(void)
+int imx_clk_scu_init(struct device_node *np)
{
- return imx_scu_get_handle(&ccm_ipc_handle);
+ u32 clk_cells;
+ int ret, i;
+
+ ret = imx_scu_get_handle(&ccm_ipc_handle);
+ if (ret)
+ return ret;
+
+ of_property_read_u32(np, "#clock-cells", &clk_cells);
+
+ if (clk_cells == 2) {
+ for (i = 0; i < IMX_SC_R_LAST; i++)
+ INIT_LIST_HEAD(&imx_scu_clks[i]);
+
+ /* pd_np will be used to attach power domains later */
+ pd_np = of_find_compatible_node(NULL, NULL, "fsl,scu-pd");
+ if (!pd_np)
+ return -EINVAL;
+ }
+
+ return platform_driver_register(&imx_clk_scu_driver);
}
/*
@@ -344,8 +386,9 @@ static const struct clk_ops clk_scu_cpu_ops = {
.unprepare = clk_scu_unprepare,
};
-struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
- int num_parents, u32 rsrc_id, u8 clk_type)
+struct clk_hw *__imx_clk_scu(struct device *dev, const char *name,
+ const char * const *parents, int num_parents,
+ u32 rsrc_id, u8 clk_type)
{
struct clk_init_data init;
struct clk_scu *clk;
@@ -379,11 +422,185 @@ struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
clk->hw.init = &init;
hw = &clk->hw;
- ret = clk_hw_register(NULL, hw);
+ ret = clk_hw_register(dev, hw);
if (ret) {
kfree(clk);
hw = ERR_PTR(ret);
}
+ if (dev)
+ dev_set_drvdata(dev, clk);
+
return hw;
}
+
+struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
+ void *data)
+{
+ unsigned int rsrc = clkspec->args[0];
+ unsigned int idx = clkspec->args[1];
+ struct list_head *scu_clks = data;
+ struct imx_scu_clk_node *clk;
+
+ list_for_each_entry(clk, &scu_clks[rsrc], node) {
+ if (clk->clk_type == idx)
+ return clk->hw;
+ }
+
+ return ERR_PTR(-ENODEV);
+}
+
+static int imx_clk_scu_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_scu_clk_node *clk = dev_get_platdata(dev);
+ struct clk_hw *hw;
+ int ret;
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_set_autosuspend_delay(dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(dev);
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret) {
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ hw = __imx_clk_scu(dev, clk->name, clk->parents, clk->num_parents,
+ clk->rsrc, clk->clk_type);
+ if (IS_ERR(hw)) {
+ pm_runtime_disable(dev);
+ return PTR_ERR(hw);
+ }
+
+ clk->hw = hw;
+ list_add_tail(&clk->node, &imx_scu_clks[clk->rsrc]);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ dev_dbg(dev, "register SCU clock rsrc:%d type:%d\n", clk->rsrc,
+ clk->clk_type);
+
+ return 0;
+}
+
+static int __maybe_unused imx_clk_scu_suspend(struct device *dev)
+{
+ struct clk_scu *clk = dev_get_drvdata(dev);
+
+ clk->rate = clk_hw_get_rate(&clk->hw);
+ clk->is_enabled = clk_hw_is_enabled(&clk->hw);
+
+ if (clk->rate)
+ dev_dbg(dev, "save rate %d\n", clk->rate);
+
+ if (clk->is_enabled)
+ dev_dbg(dev, "save enabled state\n");
+
+ return 0;
+}
+
+static int __maybe_unused imx_clk_scu_resume(struct device *dev)
+{
+ struct clk_scu *clk = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (clk->rate) {
+ ret = clk_scu_set_rate(&clk->hw, clk->rate, 0);
+ dev_dbg(dev, "restore rate %d %s\n", clk->rate,
+ !ret ? "success" : "failed");
+ }
+
+ if (clk->is_enabled) {
+ ret = clk_scu_prepare(&clk->hw);
+ dev_dbg(dev, "restore enabled state %s\n",
+ !ret ? "success" : "failed");
+ }
+
+ return ret;
+}
+
+static const struct dev_pm_ops imx_clk_scu_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(imx_clk_scu_suspend,
+ imx_clk_scu_resume)
+};
+
+static struct platform_driver imx_clk_scu_driver = {
+ .driver = {
+ .name = "imx-scu-clk",
+ .suppress_bind_attrs = true,
+ .pm = &imx_clk_scu_pm_ops,
+ },
+ .probe = imx_clk_scu_probe,
+};
+
+static int imx_clk_scu_attach_pd(struct device *dev, u32 rsrc_id)
+{
+ struct of_phandle_args genpdspec = {
+ .np = pd_np,
+ .args_count = 1,
+ .args[0] = rsrc_id,
+ };
+
+ if (rsrc_id == IMX_SC_R_A35 || rsrc_id == IMX_SC_R_A53 ||
+ rsrc_id == IMX_SC_R_A72)
+ return 0;
+
+ return of_genpd_add_device(&genpdspec, dev);
+}
+
+struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
+ const char * const *parents,
+ int num_parents, u32 rsrc_id, u8 clk_type)
+{
+ struct imx_scu_clk_node clk = {
+ .name = name,
+ .rsrc = rsrc_id,
+ .clk_type = clk_type,
+ .parents = parents,
+ .num_parents = num_parents,
+ };
+ struct platform_device *pdev;
+ int ret;
+
+ pdev = platform_device_alloc(name, PLATFORM_DEVID_NONE);
+ if (!pdev) {
+ pr_err("%s: failed to allocate scu clk dev rsrc %d type %d\n",
+ name, rsrc_id, clk_type);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = platform_device_add_data(pdev, &clk, sizeof(clk));
+ if (ret) {
+ platform_device_put(pdev);
+ return ERR_PTR(ret);
+ }
+
+ pdev->driver_override = "imx-scu-clk";
+
+ ret = imx_clk_scu_attach_pd(&pdev->dev, rsrc_id);
+ if (ret)
+ pr_warn("%s: failed to attached the power domain %d\n",
+ name, ret);
+
+ platform_device_add(pdev);
+
+ /* For API backwards compatiblilty, simply return NULL for success */
+ return NULL;
+}
+
+void imx_clk_scu_unregister(void)
+{
+ struct imx_scu_clk_node *clk;
+ int i;
+
+ for (i = 0; i < IMX_SC_R_LAST; i++) {
+ list_for_each_entry(clk, &imx_scu_clks[i], node) {
+ clk_hw_unregister(clk->hw);
+ kfree(clk);
+ }
+ }
+}
diff --git a/drivers/clk/imx/clk-scu.h b/drivers/clk/imx/clk-scu.h
index 2bcfaf06a458..e8352164923e 100644
--- a/drivers/clk/imx/clk-scu.h
+++ b/drivers/clk/imx/clk-scu.h
@@ -8,25 +8,61 @@
#define __IMX_CLK_SCU_H
#include <linux/firmware/imx/sci.h>
+#include <linux/of.h>
-int imx_clk_scu_init(void);
+extern struct list_head imx_scu_clks[];
+extern const struct dev_pm_ops imx_clk_lpcg_scu_pm_ops;
-struct clk_hw *__imx_clk_scu(const char *name, const char * const *parents,
- int num_parents, u32 rsrc_id, u8 clk_type);
+int imx_clk_scu_init(struct device_node *np);
+struct clk_hw *imx_scu_of_clk_src_get(struct of_phandle_args *clkspec,
+ void *data);
+struct clk_hw *imx_clk_scu_alloc_dev(const char *name,
+ const char * const *parents,
+ int num_parents, u32 rsrc_id, u8 clk_type);
+
+struct clk_hw *__imx_clk_scu(struct device *dev, const char *name,
+ const char * const *parents, int num_parents,
+ u32 rsrc_id, u8 clk_type);
+
+void imx_clk_scu_unregister(void);
+
+struct clk_hw *__imx_clk_lpcg_scu(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx, bool hw_gate);
+void imx_clk_lpcg_scu_unregister(struct clk_hw *hw);
static inline struct clk_hw *imx_clk_scu(const char *name, u32 rsrc_id,
- u8 clk_type)
+ u8 clk_type, u8 clk_cells)
{
- return __imx_clk_scu(name, NULL, 0, rsrc_id, clk_type);
+ if (clk_cells == 2)
+ return imx_clk_scu_alloc_dev(name, NULL, 0, rsrc_id, clk_type);
+ else
+ return __imx_clk_scu(NULL, name, NULL, 0, rsrc_id, clk_type);
}
static inline struct clk_hw *imx_clk_scu2(const char *name, const char * const *parents,
- int num_parents, u32 rsrc_id, u8 clk_type)
+ int num_parents, u32 rsrc_id, u8 clk_type,
+ u8 clk_cells)
+{
+ if (clk_cells == 2)
+ return imx_clk_scu_alloc_dev(name, parents, num_parents, rsrc_id, clk_type);
+ else
+ return __imx_clk_scu(NULL, name, parents, num_parents, rsrc_id, clk_type);
+}
+
+static inline struct clk_hw *imx_clk_lpcg_scu_dev(struct device *dev, const char *name,
+ const char *parent_name, unsigned long flags,
+ void __iomem *reg, u8 bit_idx, bool hw_gate)
{
- return __imx_clk_scu(name, parents, num_parents, rsrc_id, clk_type);
+ return __imx_clk_lpcg_scu(dev, name, parent_name, flags, reg,
+ bit_idx, hw_gate);
}
-struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
- unsigned long flags, void __iomem *reg,
- u8 bit_idx, bool hw_gate);
+static inline struct clk_hw *imx_clk_lpcg_scu(const char *name, const char *parent_name,
+ unsigned long flags, void __iomem *reg,
+ u8 bit_idx, bool hw_gate)
+{
+ return __imx_clk_lpcg_scu(NULL, name, parent_name, flags, reg,
+ bit_idx, hw_gate);
+}
#endif
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index 1d7be0c86538..4f04c8287286 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -6,8 +6,6 @@
#include <linux/spinlock.h>
#include <linux/clk-provider.h>
-#define IMX_CLK_GATE2_SINGLE_BIT 1
-
extern spinlock_t imx_ccm_lock;
void imx_check_clocks(struct clk *clks[], unsigned int count);
@@ -68,9 +66,9 @@ extern struct imx_pll14xx_clk imx_1443x_dram_pll;
to_clk(imx_clk_hw_cpu(name, parent_name, div, mux, pll, step))
#define clk_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
- cgr_val, clk_gate_flags, lock, share_count) \
+ cgr_val, cgr_mask, clk_gate_flags, lock, share_count) \
to_clk(clk_hw_register_gate2(dev, name, parent_name, flags, reg, bit_idx, \
- cgr_val, clk_gate_flags, lock, share_count))
+ cgr_val, cgr_mask, clk_gate_flags, lock, share_count))
#define imx_clk_pllv3(type, name, parent_name, base, div_mask) \
to_clk(imx_clk_hw_pllv3(type, name, parent_name, base, div_mask))
@@ -198,7 +196,7 @@ struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 bit_idx, u8 cgr_val,
+ void __iomem *reg, u8 bit_idx, u8 cgr_val, u8 cgr_mask,
u8 clk_gate_flags, spinlock_t *lock,
unsigned int *share_count);
@@ -351,14 +349,14 @@ static inline struct clk_hw *imx_clk_hw_gate2(const char *name, const char *pare
void __iomem *reg, u8 shift)
{
return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0, &imx_ccm_lock, NULL);
+ shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
}
static inline struct clk_hw *imx_clk_hw_gate2_flags(const char *name, const char *parent,
void __iomem *reg, u8 shift, unsigned long flags)
{
return clk_hw_register_gate2(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0, &imx_ccm_lock, NULL);
+ shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
}
static inline struct clk_hw *imx_clk_hw_gate2_shared(const char *name,
@@ -366,7 +364,7 @@ static inline struct clk_hw *imx_clk_hw_gate2_shared(const char *name,
unsigned int *share_count)
{
return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0, &imx_ccm_lock, share_count);
+ shift, 0x3, 0x3, 0, &imx_ccm_lock, share_count);
}
static inline struct clk_hw *imx_clk_hw_gate2_shared2(const char *name,
@@ -374,7 +372,7 @@ static inline struct clk_hw *imx_clk_hw_gate2_shared2(const char *name,
unsigned int *share_count)
{
return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT |
- CLK_OPS_PARENT_ENABLE, reg, shift, 0x3, 0,
+ CLK_OPS_PARENT_ENABLE, reg, shift, 0x3, 0x3, 0,
&imx_ccm_lock, share_count);
}
@@ -384,16 +382,15 @@ static inline struct clk_hw *imx_dev_clk_hw_gate_shared(struct device *dev,
unsigned int *share_count)
{
return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT |
- CLK_OPS_PARENT_ENABLE, reg, shift, 0x3,
- IMX_CLK_GATE2_SINGLE_BIT,
- &imx_ccm_lock, share_count);
+ CLK_OPS_PARENT_ENABLE, reg, shift, 0x1,
+ 0x1, 0, &imx_ccm_lock, share_count);
}
static inline struct clk *imx_clk_gate2_cgr(const char *name,
const char *parent, void __iomem *reg, u8 shift, u8 cgr_val)
{
return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, cgr_val, 0, &imx_ccm_lock, NULL);
+ shift, cgr_val, 0x3, 0, &imx_ccm_lock, NULL);
}
static inline struct clk_hw *imx_clk_hw_gate3(const char *name, const char *parent,
@@ -421,7 +418,7 @@ static inline struct clk_hw *imx_clk_hw_gate4(const char *name, const char *pare
{
return clk_hw_register_gate2(NULL, name, parent,
CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
+ reg, shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
}
static inline struct clk_hw *imx_clk_hw_gate4_flags(const char *name,
@@ -430,7 +427,7 @@ static inline struct clk_hw *imx_clk_hw_gate4_flags(const char *name,
{
return clk_hw_register_gate2(NULL, name, parent,
flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, 0x3, 0, &imx_ccm_lock, NULL);
+ reg, shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
}
#define imx_clk_gate4_flags(name, parent, reg, shift, flags) \
diff --git a/drivers/clk/ingenic/cgu.c b/drivers/clk/ingenic/cgu.c
index dac6edc670cc..c8e9cb6c8e39 100644
--- a/drivers/clk/ingenic/cgu.c
+++ b/drivers/clk/ingenic/cgu.c
@@ -392,15 +392,21 @@ static unsigned int
ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info *clk_info,
unsigned int div)
{
- unsigned int i;
+ unsigned int i, best_i = 0, best = (unsigned int)-1;
for (i = 0; i < (1 << clk_info->div.bits)
&& clk_info->div.div_table[i]; i++) {
- if (clk_info->div.div_table[i] >= div)
- return i;
+ if (clk_info->div.div_table[i] >= div &&
+ clk_info->div.div_table[i] < best) {
+ best = clk_info->div.div_table[i];
+ best_i = i;
+
+ if (div == best)
+ break;
+ }
}
- return i - 1;
+ return best_i;
}
static unsigned
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index 14e127e9a740..dcc1352bf13c 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -155,7 +155,7 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
.set_parent = mtk_clk_mux_set_parent_setclr_lock,
};
-struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
+static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
struct regmap *regmap,
spinlock_t *lock)
{
diff --git a/drivers/clk/mediatek/clk-mux.h b/drivers/clk/mediatek/clk-mux.h
index f5625f4d9e6c..8e2f927dd2ff 100644
--- a/drivers/clk/mediatek/clk-mux.h
+++ b/drivers/clk/mediatek/clk-mux.h
@@ -77,10 +77,6 @@ extern const struct clk_ops mtk_mux_gate_clr_set_upd_ops;
_width, _gate, _upd_ofs, _upd, \
CLK_SET_RATE_PARENT)
-struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
- struct regmap *regmap,
- spinlock_t *lock);
-
int mtk_clk_register_muxes(const struct mtk_mux *muxes,
int num, struct device_node *node,
spinlock_t *lock,
diff --git a/drivers/clk/meson/Kconfig b/drivers/clk/meson/Kconfig
index 034da203e8e0..fc002c155bc3 100644
--- a/drivers/clk/meson/Kconfig
+++ b/drivers/clk/meson/Kconfig
@@ -58,7 +58,7 @@ config COMMON_CLK_MESON8B
want peripherals and CPU frequency scaling to work.
config COMMON_CLK_GXBB
- bool "GXBB and GXL SoC clock controllers support"
+ tristate "GXBB and GXL SoC clock controllers support"
depends on ARM64
default y
select COMMON_CLK_MESON_REGMAP
@@ -74,7 +74,7 @@ config COMMON_CLK_GXBB
Say Y if you want peripherals and CPU frequency scaling to work.
config COMMON_CLK_AXG
- bool "AXG SoC clock controllers support"
+ tristate "AXG SoC clock controllers support"
depends on ARM64
default y
select COMMON_CLK_MESON_REGMAP
@@ -100,7 +100,7 @@ config COMMON_CLK_AXG_AUDIO
aka axg, Say Y if you want audio subsystem to work.
config COMMON_CLK_G12A
- bool "G12 and SM1 SoC clock controllers support"
+ tristate "G12 and SM1 SoC clock controllers support"
depends on ARM64
default y
select COMMON_CLK_MESON_REGMAP
@@ -110,6 +110,7 @@ config COMMON_CLK_G12A
select COMMON_CLK_MESON_AO_CLKC
select COMMON_CLK_MESON_EE_CLKC
select COMMON_CLK_MESON_CPU_DYNDIV
+ select COMMON_CLK_MESON_VID_PLL_DIV
select MFD_SYSCON
help
Support for the clock controller on Amlogic S905D2, S905X2 and S905Y2
diff --git a/drivers/clk/meson/axg-aoclk.c b/drivers/clk/meson/axg-aoclk.c
index b488b40c9d0e..af6db437bcd8 100644
--- a/drivers/clk/meson/axg-aoclk.c
+++ b/drivers/clk/meson/axg-aoclk.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include "meson-aoclk.h"
#include "axg-aoclk.h"
@@ -326,6 +327,7 @@ static const struct of_device_id axg_aoclkc_match_table[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, axg_aoclkc_match_table);
static struct platform_driver axg_aoclkc_driver = {
.probe = meson_aoclkc_probe,
@@ -335,4 +337,5 @@ static struct platform_driver axg_aoclkc_driver = {
},
};
-builtin_platform_driver(axg_aoclkc_driver);
+module_platform_driver(axg_aoclkc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/axg.c b/drivers/clk/meson/axg.c
index 13fc0006f63d..0e44695b8772 100644
--- a/drivers/clk/meson/axg.c
+++ b/drivers/clk/meson/axg.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
#include "clk-regmap.h"
#include "clk-pll.h"
@@ -1026,6 +1027,743 @@ static struct clk_regmap axg_sd_emmc_c_clk0 = {
},
};
+/* VPU Clock */
+
+static const struct clk_hw *axg_vpu_parent_hws[] = {
+ &axg_fclk_div4.hw,
+ &axg_fclk_div3.hw,
+ &axg_fclk_div5.hw,
+ &axg_fclk_div7.hw,
+};
+
+static struct clk_regmap axg_vpu_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_vpu_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ /* We need a specific parent for VPU clock source, let it be set in DT */
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap axg_vpu_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vpu_0_sel.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_vpu_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vpu_0_div.hw },
+ .num_parents = 1,
+ /*
+ * We want to avoid CCF to disable the VPU clock if
+ * display has been set by Bootloader
+ */
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vpu_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_vpu_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ /* We need a specific parent for VPU clock source, let it be set in DT */
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap axg_vpu_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vpu_1_sel.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_vpu_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vpu_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vpu_1_div.hw },
+ .num_parents = 1,
+ /*
+ * We want to avoid CCF to disable the VPU clock if
+ * display has been set by Bootloader
+ */
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vpu = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VPU_CLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vpu",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vpu_0.hw,
+ &axg_vpu_1.hw
+ },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+/* VAPB Clock */
+
+static struct clk_regmap axg_vapb_0_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 9,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_0_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_vpu_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap axg_vapb_0_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_0_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vapb_0_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_vapb_0 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb_0",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vapb_0_div.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vapb_1_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 0x3,
+ .shift = 25,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_1_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_vpu_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_vpu_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap axg_vapb_1_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .shift = 16,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_1_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vapb_1_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_vapb_1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 24,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb_1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vapb_1_div.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vapb_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .mask = 1,
+ .shift = 31,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vapb_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vapb_0.hw,
+ &axg_vapb_1.hw
+ },
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap axg_vapb = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VAPBCLK_CNTL,
+ .bit_idx = 30,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vapb",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vapb_sel.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+/* Video Clocks */
+
+static const struct clk_hw *axg_vclk_parent_hws[] = {
+ &axg_gp0_pll.hw,
+ &axg_fclk_div4.hw,
+ &axg_fclk_div3.hw,
+ &axg_fclk_div5.hw,
+ &axg_fclk_div2.hw,
+ &axg_fclk_div7.hw,
+ &axg_mpll1.hw,
+};
+
+static struct clk_regmap axg_vclk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_vclk_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_vclk_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap axg_vclk2_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 16,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_vclk_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_vclk_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap axg_vclk_input = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_DIV,
+ .bit_idx = 16,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_input",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk_sel.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2_input = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .bit_idx = 16,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_input",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2_sel.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VID_CLK_DIV,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk_input.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap axg_vclk2_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .shift = 0,
+ .width = 8,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk2_input.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap axg_vclk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk_div.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 19,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2_div.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk_div1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk_div2_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div2_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk_div4_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div4_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk_div6_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div6_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk_div12_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk_div12_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2_div1 = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 0,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div1",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2_div2_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 1,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div2_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2_div4_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 2,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div4_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2_div6_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div6_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_regmap axg_vclk2_div12_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VIID_CLK_CNTL,
+ .bit_idx = 4,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vclk2_div12_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) { &axg_vclk2.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk_div2_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk_div4 = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk_div4_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk_div6 = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div6",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk_div6_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk_div12 = {
+ .mult = 1,
+ .div = 12,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk_div12",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk_div12_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk2_div2 = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div2",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk2_div2_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk2_div4 = {
+ .mult = 1,
+ .div = 4,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div4",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk2_div4_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk2_div6 = {
+ .mult = 1,
+ .div = 6,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div6",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk2_div6_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static struct clk_fixed_factor axg_vclk2_div12 = {
+ .mult = 1,
+ .div = 12,
+ .hw.init = &(struct clk_init_data){
+ .name = "vclk2_div12",
+ .ops = &clk_fixed_factor_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vclk2_div12_en.hw
+ },
+ .num_parents = 1,
+ },
+};
+
+static u32 mux_table_cts_sel[] = { 0, 1, 2, 3, 4, 8, 9, 10, 11, 12 };
+static const struct clk_hw *axg_cts_parent_hws[] = {
+ &axg_vclk_div1.hw,
+ &axg_vclk_div2.hw,
+ &axg_vclk_div4.hw,
+ &axg_vclk_div6.hw,
+ &axg_vclk_div12.hw,
+ &axg_vclk2_div1.hw,
+ &axg_vclk2_div2.hw,
+ &axg_vclk2_div4.hw,
+ &axg_vclk2_div6.hw,
+ &axg_vclk2_div12.hw,
+};
+
+static struct clk_regmap axg_cts_encl_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VIID_CLK_DIV,
+ .mask = 0xf,
+ .shift = 12,
+ .table = mux_table_cts_sel,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "cts_encl_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = axg_cts_parent_hws,
+ .num_parents = ARRAY_SIZE(axg_cts_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT | CLK_GET_RATE_NOCACHE,
+ },
+};
+
+static struct clk_regmap axg_cts_encl = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_CLK_CNTL2,
+ .bit_idx = 3,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "cts_encl",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_cts_encl_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+ },
+};
+
+/* MIPI DSI Host Clock */
+
+static u32 mux_table_axg_vdin_meas[] = { 0, 1, 2, 3, 6, 7 };
+static const struct clk_parent_data axg_vdin_meas_parent_data[] = {
+ { .fw_name = "xtal", },
+ { .hw = &axg_fclk_div4.hw },
+ { .hw = &axg_fclk_div3.hw },
+ { .hw = &axg_fclk_div5.hw },
+ { .hw = &axg_fclk_div2.hw },
+ { .hw = &axg_fclk_div7.hw },
+};
+
+static struct clk_regmap axg_vdin_meas_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_VDIN_MEAS_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 21,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ .table = mux_table_axg_vdin_meas,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdin_meas_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_data = axg_vdin_meas_parent_data,
+ .num_parents = ARRAY_SIZE(axg_vdin_meas_parent_data),
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_vdin_meas_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_VDIN_MEAS_CLK_CNTL,
+ .shift = 12,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vdin_meas_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vdin_meas_sel.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap axg_vdin_meas = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VDIN_MEAS_CLK_CNTL,
+ .bit_idx = 20,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "vdin_meas",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &axg_vdin_meas_div.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static u32 mux_table_gen_clk[] = { 0, 4, 5, 6, 7, 8,
9, 10, 11, 13, 14, };
static const struct clk_parent_data gen_clk_parent_data[] = {
@@ -1246,6 +1984,52 @@ static struct clk_hw_onecell_data axg_hw_onecell_data = {
[CLKID_HIFI_PLL_DCO] = &axg_hifi_pll_dco.hw,
[CLKID_PCIE_PLL_DCO] = &axg_pcie_pll_dco.hw,
[CLKID_PCIE_PLL_OD] = &axg_pcie_pll_od.hw,
+ [CLKID_VPU_0_DIV] = &axg_vpu_0_div.hw,
+ [CLKID_VPU_0_SEL] = &axg_vpu_0_sel.hw,
+ [CLKID_VPU_0] = &axg_vpu_0.hw,
+ [CLKID_VPU_1_DIV] = &axg_vpu_1_div.hw,
+ [CLKID_VPU_1_SEL] = &axg_vpu_1_sel.hw,
+ [CLKID_VPU_1] = &axg_vpu_1.hw,
+ [CLKID_VPU] = &axg_vpu.hw,
+ [CLKID_VAPB_0_DIV] = &axg_vapb_0_div.hw,
+ [CLKID_VAPB_0_SEL] = &axg_vapb_0_sel.hw,
+ [CLKID_VAPB_0] = &axg_vapb_0.hw,
+ [CLKID_VAPB_1_DIV] = &axg_vapb_1_div.hw,
+ [CLKID_VAPB_1_SEL] = &axg_vapb_1_sel.hw,
+ [CLKID_VAPB_1] = &axg_vapb_1.hw,
+ [CLKID_VAPB_SEL] = &axg_vapb_sel.hw,
+ [CLKID_VAPB] = &axg_vapb.hw,
+ [CLKID_VCLK] = &axg_vclk.hw,
+ [CLKID_VCLK2] = &axg_vclk2.hw,
+ [CLKID_VCLK_SEL] = &axg_vclk_sel.hw,
+ [CLKID_VCLK2_SEL] = &axg_vclk2_sel.hw,
+ [CLKID_VCLK_INPUT] = &axg_vclk_input.hw,
+ [CLKID_VCLK2_INPUT] = &axg_vclk2_input.hw,
+ [CLKID_VCLK_DIV] = &axg_vclk_div.hw,
+ [CLKID_VCLK2_DIV] = &axg_vclk2_div.hw,
+ [CLKID_VCLK_DIV2_EN] = &axg_vclk_div2_en.hw,
+ [CLKID_VCLK_DIV4_EN] = &axg_vclk_div4_en.hw,
+ [CLKID_VCLK_DIV6_EN] = &axg_vclk_div6_en.hw,
+ [CLKID_VCLK_DIV12_EN] = &axg_vclk_div12_en.hw,
+ [CLKID_VCLK2_DIV2_EN] = &axg_vclk2_div2_en.hw,
+ [CLKID_VCLK2_DIV4_EN] = &axg_vclk2_div4_en.hw,
+ [CLKID_VCLK2_DIV6_EN] = &axg_vclk2_div6_en.hw,
+ [CLKID_VCLK2_DIV12_EN] = &axg_vclk2_div12_en.hw,
+ [CLKID_VCLK_DIV1] = &axg_vclk_div1.hw,
+ [CLKID_VCLK_DIV2] = &axg_vclk_div2.hw,
+ [CLKID_VCLK_DIV4] = &axg_vclk_div4.hw,
+ [CLKID_VCLK_DIV6] = &axg_vclk_div6.hw,
+ [CLKID_VCLK_DIV12] = &axg_vclk_div12.hw,
+ [CLKID_VCLK2_DIV1] = &axg_vclk2_div1.hw,
+ [CLKID_VCLK2_DIV2] = &axg_vclk2_div2.hw,
+ [CLKID_VCLK2_DIV4] = &axg_vclk2_div4.hw,
+ [CLKID_VCLK2_DIV6] = &axg_vclk2_div6.hw,
+ [CLKID_VCLK2_DIV12] = &axg_vclk2_div12.hw,
+ [CLKID_CTS_ENCL_SEL] = &axg_cts_encl_sel.hw,
+ [CLKID_CTS_ENCL] = &axg_cts_encl.hw,
+ [CLKID_VDIN_MEAS_SEL] = &axg_vdin_meas_sel.hw,
+ [CLKID_VDIN_MEAS_DIV] = &axg_vdin_meas_div.hw,
+ [CLKID_VDIN_MEAS] = &axg_vdin_meas.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -1341,6 +2125,42 @@ static struct clk_regmap *const axg_clk_regmaps[] = {
&axg_hifi_pll_dco,
&axg_pcie_pll_dco,
&axg_pcie_pll_od,
+ &axg_vpu_0_div,
+ &axg_vpu_0_sel,
+ &axg_vpu_0,
+ &axg_vpu_1_div,
+ &axg_vpu_1_sel,
+ &axg_vpu_1,
+ &axg_vpu,
+ &axg_vapb_0_div,
+ &axg_vapb_0_sel,
+ &axg_vapb_0,
+ &axg_vapb_1_div,
+ &axg_vapb_1_sel,
+ &axg_vapb_1,
+ &axg_vapb_sel,
+ &axg_vapb,
+ &axg_vclk,
+ &axg_vclk2,
+ &axg_vclk_sel,
+ &axg_vclk2_sel,
+ &axg_vclk_input,
+ &axg_vclk2_input,
+ &axg_vclk_div,
+ &axg_vclk2_div,
+ &axg_vclk_div2_en,
+ &axg_vclk_div4_en,
+ &axg_vclk_div6_en,
+ &axg_vclk_div12_en,
+ &axg_vclk2_div2_en,
+ &axg_vclk2_div4_en,
+ &axg_vclk2_div6_en,
+ &axg_vclk2_div12_en,
+ &axg_cts_encl_sel,
+ &axg_cts_encl,
+ &axg_vdin_meas_sel,
+ &axg_vdin_meas_div,
+ &axg_vdin_meas,
};
static const struct meson_eeclkc_data axg_clkc_data = {
@@ -1354,6 +2174,7 @@ static const struct of_device_id clkc_match_table[] = {
{ .compatible = "amlogic,axg-clkc", .data = &axg_clkc_data },
{}
};
+MODULE_DEVICE_TABLE(of, clkc_match_table);
static struct platform_driver axg_driver = {
.probe = meson_eeclkc_probe,
@@ -1363,4 +2184,5 @@ static struct platform_driver axg_driver = {
},
};
-builtin_platform_driver(axg_driver);
+module_platform_driver(axg_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/axg.h b/drivers/clk/meson/axg.h
index 0431dabac629..481b307ea3cb 100644
--- a/drivers/clk/meson/axg.h
+++ b/drivers/clk/meson/axg.h
@@ -139,8 +139,29 @@
#define CLKID_HIFI_PLL_DCO 88
#define CLKID_PCIE_PLL_DCO 89
#define CLKID_PCIE_PLL_OD 90
+#define CLKID_VPU_0_DIV 91
+#define CLKID_VPU_1_DIV 94
+#define CLKID_VAPB_0_DIV 98
+#define CLKID_VAPB_1_DIV 101
+#define CLKID_VCLK_SEL 108
+#define CLKID_VCLK2_SEL 109
+#define CLKID_VCLK_INPUT 110
+#define CLKID_VCLK2_INPUT 111
+#define CLKID_VCLK_DIV 112
+#define CLKID_VCLK2_DIV 113
+#define CLKID_VCLK_DIV2_EN 114
+#define CLKID_VCLK_DIV4_EN 115
+#define CLKID_VCLK_DIV6_EN 116
+#define CLKID_VCLK_DIV12_EN 117
+#define CLKID_VCLK2_DIV2_EN 118
+#define CLKID_VCLK2_DIV4_EN 119
+#define CLKID_VCLK2_DIV6_EN 120
+#define CLKID_VCLK2_DIV12_EN 121
+#define CLKID_CTS_ENCL_SEL 132
+#define CLKID_VDIN_MEAS_SEL 134
+#define CLKID_VDIN_MEAS_DIV 135
-#define NR_CLKS 91
+#define NR_CLKS 137
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/axg-clkc.h>
diff --git a/drivers/clk/meson/g12a-aoclk.c b/drivers/clk/meson/g12a-aoclk.c
index 62499563e4f5..b52990e574d2 100644
--- a/drivers/clk/meson/g12a-aoclk.c
+++ b/drivers/clk/meson/g12a-aoclk.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include "meson-aoclk.h"
#include "g12a-aoclk.h"
@@ -461,6 +462,7 @@ static const struct of_device_id g12a_aoclkc_match_table[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, g12a_aoclkc_match_table);
static struct platform_driver g12a_aoclkc_driver = {
.probe = meson_aoclkc_probe,
@@ -470,4 +472,5 @@ static struct platform_driver g12a_aoclkc_driver = {
},
};
-builtin_platform_driver(g12a_aoclkc_driver);
+module_platform_driver(g12a_aoclkc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c
index b814d44917a5..b080359b4645 100644
--- a/drivers/clk/meson/g12a.c
+++ b/drivers/clk/meson/g12a.c
@@ -15,6 +15,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/module.h>
#include "clk-mpll.h"
#include "clk-pll.h"
@@ -3657,6 +3658,68 @@ static struct clk_regmap g12a_hdmi_tx = {
},
};
+/* MIPI DSI Host Clocks */
+
+static const struct clk_hw *g12a_mipi_dsi_pxclk_parent_hws[] = {
+ &g12a_vid_pll.hw,
+ &g12a_gp0_pll.hw,
+ &g12a_hifi_pll.hw,
+ &g12a_mpll1.hw,
+ &g12a_fclk_div2.hw,
+ &g12a_fclk_div2p5.hw,
+ &g12a_fclk_div3.hw,
+ &g12a_fclk_div7.hw,
+};
+
+static struct clk_regmap g12a_mipi_dsi_pxclk_sel = {
+ .data = &(struct clk_regmap_mux_data){
+ .offset = HHI_MIPIDSI_PHY_CLK_CNTL,
+ .mask = 0x7,
+ .shift = 12,
+ .flags = CLK_MUX_ROUND_CLOSEST,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mipi_dsi_pxclk_sel",
+ .ops = &clk_regmap_mux_ops,
+ .parent_hws = g12a_mipi_dsi_pxclk_parent_hws,
+ .num_parents = ARRAY_SIZE(g12a_mipi_dsi_pxclk_parent_hws),
+ .flags = CLK_SET_RATE_NO_REPARENT,
+ },
+};
+
+static struct clk_regmap g12a_mipi_dsi_pxclk_div = {
+ .data = &(struct clk_regmap_div_data){
+ .offset = HHI_MIPIDSI_PHY_CLK_CNTL,
+ .shift = 0,
+ .width = 7,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "mipi_dsi_pxclk_div",
+ .ops = &clk_regmap_divider_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mipi_dsi_pxclk_sel.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
+static struct clk_regmap g12a_mipi_dsi_pxclk = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_MIPIDSI_PHY_CLK_CNTL,
+ .bit_idx = 8,
+ },
+ .hw.init = &(struct clk_init_data) {
+ .name = "mipi_dsi_pxclk",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &g12a_mipi_dsi_pxclk_div.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
/* HDMI Clocks */
static const struct clk_parent_data g12a_hdmi_parent_data[] = {
@@ -4402,6 +4465,9 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = {
[CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw,
[CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw,
[CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw,
+ [CLKID_MIPI_DSI_PXCLK_SEL] = &g12a_mipi_dsi_pxclk_sel.hw,
+ [CLKID_MIPI_DSI_PXCLK_DIV] = &g12a_mipi_dsi_pxclk_div.hw,
+ [CLKID_MIPI_DSI_PXCLK] = &g12a_mipi_dsi_pxclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -4657,6 +4723,9 @@ static struct clk_hw_onecell_data g12b_hw_onecell_data = {
[CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw,
[CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw,
[CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw,
+ [CLKID_MIPI_DSI_PXCLK_SEL] = &g12a_mipi_dsi_pxclk_sel.hw,
+ [CLKID_MIPI_DSI_PXCLK_DIV] = &g12a_mipi_dsi_pxclk_div.hw,
+ [CLKID_MIPI_DSI_PXCLK] = &g12a_mipi_dsi_pxclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -4903,6 +4972,9 @@ static struct clk_hw_onecell_data sm1_hw_onecell_data = {
[CLKID_NNA_CORE_CLK_SEL] = &sm1_nna_core_clk_sel.hw,
[CLKID_NNA_CORE_CLK_DIV] = &sm1_nna_core_clk_div.hw,
[CLKID_NNA_CORE_CLK] = &sm1_nna_core_clk.hw,
+ [CLKID_MIPI_DSI_PXCLK_SEL] = &g12a_mipi_dsi_pxclk_sel.hw,
+ [CLKID_MIPI_DSI_PXCLK_DIV] = &g12a_mipi_dsi_pxclk_div.hw,
+ [CLKID_MIPI_DSI_PXCLK] = &g12a_mipi_dsi_pxclk.hw,
[NR_CLKS] = NULL,
},
.num = NR_CLKS,
@@ -5150,16 +5222,20 @@ static struct clk_regmap *const g12a_clk_regmaps[] = {
&sm1_nna_core_clk_sel,
&sm1_nna_core_clk_div,
&sm1_nna_core_clk,
+ &g12a_mipi_dsi_pxclk_sel,
+ &g12a_mipi_dsi_pxclk_div,
+ &g12a_mipi_dsi_pxclk,
};
static const struct reg_sequence g12a_init_regs[] = {
{ .reg = HHI_MPLL_CNTL0, .def = 0x00000543 },
};
-static int meson_g12a_dvfs_setup_common(struct platform_device *pdev,
+#define DVFS_CON_ID "dvfs"
+
+static int meson_g12a_dvfs_setup_common(struct device *dev,
struct clk_hw **hws)
{
- const char *notifier_clk_name;
struct clk *notifier_clk;
struct clk_hw *xtal;
int ret;
@@ -5168,21 +5244,22 @@ static int meson_g12a_dvfs_setup_common(struct platform_device *pdev,
/* Setup clock notifier for cpu_clk_postmux0 */
g12a_cpu_clk_postmux0_nb_data.xtal = xtal;
- notifier_clk_name = clk_hw_get_name(&g12a_cpu_clk_postmux0.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk,
- &g12a_cpu_clk_postmux0_nb_data.nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk_postmux0.hw,
+ DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_cpu_clk_postmux0_nb_data.nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpu_clk_postmux0 notifier\n");
+ dev_err(dev, "failed to register the cpu_clk_postmux0 notifier\n");
return ret;
}
/* Setup clock notifier for cpu_clk_dyn mux */
- notifier_clk_name = clk_hw_get_name(&g12a_cpu_clk_dyn.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk_dyn.hw,
+ DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_cpu_clk_mux_nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpu_clk_dyn notifier\n");
+ dev_err(dev, "failed to register the cpu_clk_dyn notifier\n");
return ret;
}
@@ -5192,33 +5269,34 @@ static int meson_g12a_dvfs_setup_common(struct platform_device *pdev,
static int meson_g12b_dvfs_setup(struct platform_device *pdev)
{
struct clk_hw **hws = g12b_hw_onecell_data.hws;
- const char *notifier_clk_name;
+ struct device *dev = &pdev->dev;
struct clk *notifier_clk;
struct clk_hw *xtal;
int ret;
- ret = meson_g12a_dvfs_setup_common(pdev, hws);
+ ret = meson_g12a_dvfs_setup_common(dev, hws);
if (ret)
return ret;
xtal = clk_hw_get_parent_by_index(hws[CLKID_CPU_CLK_DYN1_SEL], 0);
/* Setup clock notifier for cpu_clk mux */
- notifier_clk_name = clk_hw_get_name(&g12b_cpu_clk.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpu_clk.hw,
+ DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_cpu_clk_mux_nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpu_clk notifier\n");
+ dev_err(dev, "failed to register the cpu_clk notifier\n");
return ret;
}
/* Setup clock notifier for sys1_pll */
- notifier_clk_name = clk_hw_get_name(&g12b_sys1_pll.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk,
- &g12b_cpu_clk_sys1_pll_nb_data.nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_sys1_pll.hw,
+ DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12b_cpu_clk_sys1_pll_nb_data.nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the sys1_pll notifier\n");
+ dev_err(dev, "failed to register the sys1_pll notifier\n");
return ret;
}
@@ -5226,40 +5304,39 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
/* Setup clock notifier for cpub_clk_postmux0 */
g12b_cpub_clk_postmux0_nb_data.xtal = xtal;
- notifier_clk_name = clk_hw_get_name(&g12b_cpub_clk_postmux0.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk,
- &g12b_cpub_clk_postmux0_nb_data.nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_postmux0.hw,
+ DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12b_cpub_clk_postmux0_nb_data.nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpub_clk_postmux0 notifier\n");
+ dev_err(dev, "failed to register the cpub_clk_postmux0 notifier\n");
return ret;
}
/* Setup clock notifier for cpub_clk_dyn mux */
- notifier_clk_name = clk_hw_get_name(&g12b_cpub_clk_dyn.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk_dyn.hw, "dvfs");
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_cpu_clk_mux_nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpub_clk_dyn notifier\n");
+ dev_err(dev, "failed to register the cpub_clk_dyn notifier\n");
return ret;
}
/* Setup clock notifier for cpub_clk mux */
- notifier_clk_name = clk_hw_get_name(&g12b_cpub_clk.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12b_cpub_clk.hw, DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_cpu_clk_mux_nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpub_clk notifier\n");
+ dev_err(dev, "failed to register the cpub_clk notifier\n");
return ret;
}
/* Setup clock notifier for sys_pll */
- notifier_clk_name = clk_hw_get_name(&g12a_sys_pll.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk,
- &g12b_cpub_clk_sys_pll_nb_data.nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_sys_pll.hw, DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12b_cpub_clk_sys_pll_nb_data.nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the sys_pll notifier\n");
+ dev_err(dev, "failed to register the sys_pll notifier\n");
return ret;
}
@@ -5269,29 +5346,29 @@ static int meson_g12b_dvfs_setup(struct platform_device *pdev)
static int meson_g12a_dvfs_setup(struct platform_device *pdev)
{
struct clk_hw **hws = g12a_hw_onecell_data.hws;
- const char *notifier_clk_name;
+ struct device *dev = &pdev->dev;
struct clk *notifier_clk;
int ret;
- ret = meson_g12a_dvfs_setup_common(pdev, hws);
+ ret = meson_g12a_dvfs_setup_common(dev, hws);
if (ret)
return ret;
/* Setup clock notifier for cpu_clk mux */
- notifier_clk_name = clk_hw_get_name(&g12a_cpu_clk.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk, &g12a_cpu_clk_mux_nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_cpu_clk.hw, DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_cpu_clk_mux_nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the cpu_clk notifier\n");
+ dev_err(dev, "failed to register the cpu_clk notifier\n");
return ret;
}
/* Setup clock notifier for sys_pll */
- notifier_clk_name = clk_hw_get_name(&g12a_sys_pll.hw);
- notifier_clk = __clk_lookup(notifier_clk_name);
- ret = clk_notifier_register(notifier_clk, &g12a_sys_pll_nb_data.nb);
+ notifier_clk = devm_clk_hw_get_clk(dev, &g12a_sys_pll.hw, DVFS_CON_ID);
+ ret = devm_clk_notifier_register(dev, notifier_clk,
+ &g12a_sys_pll_nb_data.nb);
if (ret) {
- dev_err(&pdev->dev, "failed to register the sys_pll notifier\n");
+ dev_err(dev, "failed to register the sys_pll notifier\n");
return ret;
}
@@ -5370,6 +5447,7 @@ static const struct of_device_id clkc_match_table[] = {
},
{}
};
+MODULE_DEVICE_TABLE(of, clkc_match_table);
static struct platform_driver g12a_driver = {
.probe = meson_g12a_probe,
@@ -5379,4 +5457,5 @@ static struct platform_driver g12a_driver = {
},
};
-builtin_platform_driver(g12a_driver);
+module_platform_driver(g12a_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h
index 69b6a69549c7..a97613df38b3 100644
--- a/drivers/clk/meson/g12a.h
+++ b/drivers/clk/meson/g12a.h
@@ -264,8 +264,9 @@
#define CLKID_NNA_AXI_CLK_DIV 263
#define CLKID_NNA_CORE_CLK_SEL 265
#define CLKID_NNA_CORE_CLK_DIV 266
+#define CLKID_MIPI_DSI_PXCLK_DIV 268
-#define NR_CLKS 268
+#define NR_CLKS 271
/* include the CLKIDs that have been made part of the DT binding */
#include <dt-bindings/clock/g12a-clkc.h>
diff --git a/drivers/clk/meson/gxbb-aoclk.c b/drivers/clk/meson/gxbb-aoclk.c
index e940861a396b..fce95cf89836 100644
--- a/drivers/clk/meson/gxbb-aoclk.c
+++ b/drivers/clk/meson/gxbb-aoclk.c
@@ -5,6 +5,7 @@
*/
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include "meson-aoclk.h"
#include "gxbb-aoclk.h"
@@ -287,6 +288,7 @@ static const struct of_device_id gxbb_aoclkc_match_table[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(of, gxbb_aoclkc_match_table);
static struct platform_driver gxbb_aoclkc_driver = {
.probe = meson_aoclkc_probe,
@@ -295,4 +297,5 @@ static struct platform_driver gxbb_aoclkc_driver = {
.of_match_table = gxbb_aoclkc_match_table,
},
};
-builtin_platform_driver(gxbb_aoclkc_driver);
+module_platform_driver(gxbb_aoclkc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 0a68af6eec3d..d6eed760327d 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -8,6 +8,7 @@
#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/module.h>
#include "gxbb.h"
#include "clk-regmap.h"
@@ -3519,6 +3520,7 @@ static const struct of_device_id clkc_match_table[] = {
{ .compatible = "amlogic,gxl-clkc", .data = &gxl_clkc_data },
{},
};
+MODULE_DEVICE_TABLE(of, clkc_match_table);
static struct platform_driver gxbb_driver = {
.probe = meson_eeclkc_probe,
@@ -3528,4 +3530,5 @@ static struct platform_driver gxbb_driver = {
},
};
-builtin_platform_driver(gxbb_driver);
+module_platform_driver(gxbb_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
index 3a6d84cd6601..27cd2c1f3f61 100644
--- a/drivers/clk/meson/meson-aoclk.c
+++ b/drivers/clk/meson/meson-aoclk.c
@@ -14,6 +14,8 @@
#include <linux/reset-controller.h>
#include <linux/mfd/syscon.h>
#include <linux/of_device.h>
+#include <linux/module.h>
+
#include <linux/slab.h>
#include "meson-aoclk.h"
@@ -84,3 +86,5 @@ int meson_aoclkc_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
(void *) data->hw_data);
}
+EXPORT_SYMBOL_GPL(meson_aoclkc_probe);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/meson/meson-eeclk.c b/drivers/clk/meson/meson-eeclk.c
index a7cb1e7aedc4..8d5a5dab955a 100644
--- a/drivers/clk/meson/meson-eeclk.c
+++ b/drivers/clk/meson/meson-eeclk.c
@@ -9,6 +9,7 @@
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <linux/module.h>
#include "clk-regmap.h"
#include "meson-eeclk.h"
@@ -54,3 +55,5 @@ int meson_eeclkc_probe(struct platform_device *pdev)
return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
data->hw_onecell_data);
}
+EXPORT_SYMBOL_GPL(meson_eeclkc_probe);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/mvebu/armada-37xx-xtal.c b/drivers/clk/mvebu/armada-37xx-xtal.c
index e9e306d4e9af..41271351cf1f 100644
--- a/drivers/clk/mvebu/armada-37xx-xtal.c
+++ b/drivers/clk/mvebu/armada-37xx-xtal.c
@@ -13,8 +13,8 @@
#include <linux/platform_device.h>
#include <linux/regmap.h>
-#define NB_GPIO1_LATCH 0xC
-#define XTAL_MODE BIT(31)
+#define NB_GPIO1_LATCH 0x8
+#define XTAL_MODE BIT(9)
static int armada_3700_xtal_clock_probe(struct platform_device *pdev)
{
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 3a965bd326d5..d32bb12cd8d0 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -44,7 +44,7 @@ config QCOM_CLK_APCC_MSM8996
help
Support for the CPU clock controller on msm8996 devices.
Say Y if you want to support CPU clock scaling using CPUfreq
- drivers for dyanmic power management.
+ drivers for dynamic power management.
config QCOM_CLK_RPM
tristate "RPM based Clock Controller"
@@ -290,6 +290,15 @@ config QCS_GCC_404
Say Y if you want to use multimedia devices or peripheral
devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc.
+config SC_CAMCC_7180
+ tristate "SC7180 Camera Clock Controller"
+ select SC_GCC_7180
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ SC7180 devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
config SC_DISPCC_7180
tristate "SC7180 Display Clock Controller"
select SC_GCC_7180
@@ -413,6 +422,14 @@ config SDM_LPASSCC_845
Say Y if you want to use the LPASS branch clocks of the LPASS clock
controller to reset the LPASS subsystem.
+config SDX_GCC_55
+ tristate "SDX55 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on SDX55 devices.
+ Say Y if you want to use peripheral devices such as UART,
+ SPI, I2C, USB, SD/UFS, PCIe etc.
+
config SM_DISPCC_8250
tristate "SM8150 and SM8250 Display Clock Controller"
depends on SM_GCC_8150 || SM_GCC_8250
@@ -502,4 +519,10 @@ config KRAITCC
Support for the Krait CPU clocks on Qualcomm devices.
Say Y if you want to support CPU frequency scaling.
+config CLK_GFM_LPASS_SM8250
+ tristate "SM8250 GFM LPASS Clocks"
+ help
+ Support for the Glitch Free Mux (GFM) Low power audio
+ subsystem (LPASS) clocks found on SM8250 SoCs.
+
endif
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 11ae86febe87..9e5e0e3cb7b4 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -19,6 +19,7 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
# Keep alphabetically sorted by config
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_CLK_GFM_LPASS_SM8250) += lpass-gfm-sm8250.o
obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o
obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o
obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o
@@ -51,6 +52,7 @@ obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
+obj-$(CONFIG_SC_CAMCC_7180) += camcc-sc7180.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
@@ -64,6 +66,7 @@ obj-$(CONFIG_SDM_GCC_845) += gcc-sdm845.o
obj-$(CONFIG_SDM_GPUCC_845) += gpucc-sdm845.o
obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
+obj-$(CONFIG_SDX_GCC_55) += gcc-sdx55.o
obj-$(CONFIG_SM_DISPCC_8250) += dispcc-sm8250.o
obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o
obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o
diff --git a/drivers/clk/qcom/camcc-sc7180.c b/drivers/clk/qcom/camcc-sc7180.c
new file mode 100644
index 000000000000..dbac5651ab85
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sc7180.c
@@ -0,0 +1,1732 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,camcc-sc7180.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_AUX,
+ P_CAM_CC_PLL2_OUT_EARLY,
+ P_CAM_CC_PLL3_OUT_MAIN,
+ P_CORE_BI_PLL_TEST_SE,
+};
+
+static const struct pll_vco agera_vco[] = {
+ { 600000000, 3300000000UL, 0 },
+};
+
+static const struct pll_vco fabia_vco[] = {
+ { 249600000, 2000000000UL, 0 },
+};
+
+/* 600MHz configuration */
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x1f,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002067,
+ .test_ctl_val = 0x40000000,
+ .user_ctl_hi_val = 0x00004805,
+ .user_ctl_val = 0x00000001,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+/* 860MHz configuration */
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x2a,
+ .alpha = 0x1555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002067,
+ .test_ctl_val = 0x40000000,
+ .user_ctl_hi_val = 0x00004805,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+/* 1920MHz configuration */
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x64,
+ .config_ctl_val = 0x20000800,
+ .config_ctl_hi_val = 0x400003D2,
+ .test_ctl_val = 0x04000400,
+ .test_ctl_hi_val = 0x00004000,
+ .user_ctl_val = 0x0000030F,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = agera_vco,
+ .num_vco = ARRAY_SIZE(agera_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_AGERA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_agera_ops,
+ },
+ },
+};
+
+static struct clk_fixed_factor cam_cc_pll2_out_early = {
+ .mult = 1,
+ .div = 2,
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2_out_early",
+ .parent_names = (const char *[]){ "cam_cc_pll2" },
+ .num_parents = 1,
+ .ops = &clk_fixed_factor_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_aux[] = {
+ { 0x3, 4 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_aux,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_aux),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_AGERA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2_out_aux",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+/* 1080MHz configuration */
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x38,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002067,
+ .test_ctl_val = 0x40000000,
+ .user_ctl_hi_val = 0x00004805,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = fabia_vco,
+ .num_vco = ARRAY_SIZE(fabia_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fabia_ops,
+ },
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_AUX, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll2_out_aux.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 4 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll2_out_early.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 4 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll2_out_early.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL3_OUT_MAIN, 5 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll1.clkr.hw },
+ { .hw = &cam_cc_pll3.clkr.hw },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x6010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0xb0d8,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0xb14c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(270000000, P_CAM_CC_PLL3_OUT_MAIN, 4, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x9064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x5004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x5028,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x504c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0x5070,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x603c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xb088,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0x9010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(270000000, P_CAM_CC_PLL3_OUT_MAIN, 4, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL2_OUT_EARLY, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0x903c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xa034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_clk_src = {
+ .cmd_rcgr = 0xb004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_csid_clk_src = {
+ .cmd_rcgr = 0xb024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_csid_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+ F(240000000, P_CAM_CC_PLL0_OUT_EVEN, 2.5, 0, 0),
+ F(360000000, P_CAM_CC_PLL3_OUT_MAIN, 3, 0, 0),
+ F(432000000, P_CAM_CC_PLL3_OUT_MAIN, 2.5, 0, 0),
+ F(540000000, P_CAM_CC_PLL3_OUT_MAIN, 2, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_jpeg_clk_src[] = {
+ F(66666667, P_CAM_CC_PLL0_OUT_EVEN, 9, 0, 0),
+ F(133333333, P_CAM_CC_PLL0_OUT_EVEN, 4.5, 0, 0),
+ F(216000000, P_CAM_CC_PLL3_OUT_MAIN, 5, 0, 0),
+ F(320000000, P_CAM_CC_PLL2_OUT_EARLY, 3, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0xb04c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_jpeg_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(216000000, P_CAM_CC_PLL3_OUT_MAIN, 5, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(404000000, P_CAM_CC_PLL1_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ .cmd_rcgr = 0xb0f8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_lrme_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_lrme_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_CAM_CC_PLL2_OUT_AUX, 10, 1, 2),
+ F(64000000, P_CAM_CC_PLL2_OUT_AUX, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0x4004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0x4024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0x4044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0x4064,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0x4084,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x6058,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x6070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6070,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x6054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_areg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+ .halt_reg = 0x6038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x6028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0xb124,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb124,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_camnoc_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0xb0f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0xb164,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb164,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0xb144,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xb144,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_core_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0xb11c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb11c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0x501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0x5040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0x5064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0x5088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0x5020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0x5044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0x5068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0x508c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x508c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xb0a0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+ .halt_reg = 0x9080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0x9028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0x907c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x907c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+ .halt_reg = 0x9054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0x9038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+ .halt_reg = 0xa058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+ .halt_reg = 0xa04c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa04c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0xa030,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa030,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_lite_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_cphy_rx_clk = {
+ .halt_reg = 0xb044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_csid_clk = {
+ .halt_reg = 0xb03c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb03c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_lite_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+ .halt_reg = 0x7040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+ .halt_reg = 0x703c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x703c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_areg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0xb064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_jpeg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+ .halt_reg = 0xb110,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb110,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_lrme_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_lrme_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0x401c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x401c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0x403c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x403c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0x405c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x405c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0x407c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x407c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0x409c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x409c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_soc_ahb_clk = {
+ .halt_reg = 0xb140,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb140,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_soc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sys_tmr_clk = {
+ .halt_reg = 0xb0a8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_sys_tmr_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc bps_gdsc = {
+ .gdscr = 0x6004,
+ .pd = {
+ .name = "bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct gdsc ife_0_gdsc = {
+ .gdscr = 0x9004,
+ .pd = {
+ .name = "ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ife_1_gdsc = {
+ .gdscr = 0xa004,
+ .pd = {
+ .name = "ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc ipe_0_gdsc = {
+ .gdscr = 0x7004,
+ .pd = {
+ .name = "ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
+};
+
+static struct gdsc titan_top_gdsc = {
+ .gdscr = 0xb134,
+ .pd = {
+ .name = "titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_hw *cam_cc_sc7180_hws[] = {
+ [CAM_CC_PLL2_OUT_EARLY] = &cam_cc_pll2_out_early.hw,
+};
+
+static struct clk_regmap *cam_cc_sc7180_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK] = &cam_cc_ife_lite_clk.clkr,
+ [CAM_CC_IFE_LITE_CLK_SRC] = &cam_cc_ife_lite_clk_src.clkr,
+ [CAM_CC_IFE_LITE_CPHY_RX_CLK] = &cam_cc_ife_lite_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK] = &cam_cc_ife_lite_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_CSID_CLK_SRC] = &cam_cc_ife_lite_csid_clk_src.clkr,
+ [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+ [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+ [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+ [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+ [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+ [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_AUX] = &cam_cc_pll2_out_aux.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_SOC_AHB_CLK] = &cam_cc_soc_ahb_clk.clkr,
+ [CAM_CC_SYS_TMR_CLK] = &cam_cc_sys_tmr_clk.clkr,
+};
+static struct gdsc *cam_cc_sc7180_gdscs[] = {
+ [BPS_GDSC] = &bps_gdsc,
+ [IFE_0_GDSC] = &ife_0_gdsc,
+ [IFE_1_GDSC] = &ife_1_gdsc,
+ [IPE_0_GDSC] = &ipe_0_gdsc,
+ [TITAN_TOP_GDSC] = &titan_top_gdsc,
+};
+
+static const struct regmap_config cam_cc_sc7180_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xd028,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc cam_cc_sc7180_desc = {
+ .config = &cam_cc_sc7180_regmap_config,
+ .clk_hws = cam_cc_sc7180_hws,
+ .num_clk_hws = ARRAY_SIZE(cam_cc_sc7180_hws),
+ .clks = cam_cc_sc7180_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sc7180_clocks),
+ .gdscs = cam_cc_sc7180_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sc7180_gdscs),
+};
+
+static const struct of_device_id cam_cc_sc7180_match_table[] = {
+ { .compatible = "qcom,sc7180-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sc7180_match_table);
+
+static int cam_cc_sc7180_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_clk_create(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_clk_add(&pdev->dev, "xo");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to acquire XO clock\n");
+ goto disable_pm_runtime;
+ }
+
+ ret = pm_clk_add(&pdev->dev, "iface");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to acquire iface clock\n");
+ goto disable_pm_runtime;
+ }
+
+ ret = pm_runtime_get(&pdev->dev);
+ if (ret)
+ goto destroy_pm_clk;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sc7180_desc);
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ pm_runtime_put(&pdev->dev);
+ goto destroy_pm_clk;
+ }
+
+ clk_fabia_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_fabia_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_agera_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_fabia_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+
+ ret = qcom_cc_really_probe(pdev, &cam_cc_sc7180_desc, regmap);
+ pm_runtime_put(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register CAM CC clocks\n");
+ goto destroy_pm_clk;
+ }
+
+ return 0;
+
+destroy_pm_clk:
+ pm_clk_destroy(&pdev->dev);
+
+disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static const struct dev_pm_ops cam_cc_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver cam_cc_sc7180_driver = {
+ .probe = cam_cc_sc7180_probe,
+ .driver = {
+ .name = "cam_cc-sc7180",
+ .of_match_table = cam_cc_sc7180_match_table,
+ .pm = &cam_cc_pm_ops,
+ },
+};
+
+static int __init cam_cc_sc7180_init(void)
+{
+ return platform_driver_register(&cam_cc_sc7180_driver);
+}
+subsys_initcall(cam_cc_sc7180_init);
+
+static void __exit cam_cc_sc7180_exit(void)
+{
+ platform_driver_unregister(&cam_cc_sc7180_driver);
+}
+module_exit(cam_cc_sc7180_exit);
+
+MODULE_DESCRIPTION("QTI CAM_CC SC7180 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index 564431130a76..21c357c26ec4 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -116,6 +116,16 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = {
[PLL_OFF_OPMODE] = 0x38,
[PLL_OFF_ALPHA_VAL] = 0x40,
},
+ [CLK_ALPHA_PLL_TYPE_AGERA] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_USER_CTL] = 0x0c,
+ [PLL_OFF_CONFIG_CTL] = 0x10,
+ [PLL_OFF_CONFIG_CTL_U] = 0x14,
+ [PLL_OFF_TEST_CTL] = 0x18,
+ [PLL_OFF_TEST_CTL_U] = 0x1c,
+ [PLL_OFF_STATUS] = 0x2c,
+ },
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
@@ -207,6 +217,13 @@ static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
#define wait_for_pll_update_ack_clear(pll) \
wait_for_pll(pll, ALPHA_PLL_ACK_LATCH, 1, "update_ack_clear")
+static void clk_alpha_pll_write_config(struct regmap *regmap, unsigned int reg,
+ unsigned int val)
+{
+ if (val)
+ regmap_write(regmap, reg, val);
+}
+
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
@@ -1004,33 +1021,19 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
{
u32 val, mask;
- if (config->l)
- regmap_write(regmap, PLL_L_VAL(pll), config->l);
-
- if (config->alpha)
- regmap_write(regmap, PLL_FRAC(pll), config->alpha);
-
- if (config->config_ctl_val)
- regmap_write(regmap, PLL_CONFIG_CTL(pll),
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ clk_alpha_pll_write_config(regmap, PLL_FRAC(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll),
config->config_ctl_val);
-
- if (config->config_ctl_hi_val)
- regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll),
config->config_ctl_hi_val);
-
- if (config->user_ctl_val)
- regmap_write(regmap, PLL_USER_CTL(pll), config->user_ctl_val);
-
- if (config->user_ctl_hi_val)
- regmap_write(regmap, PLL_USER_CTL_U(pll),
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll),
config->user_ctl_hi_val);
-
- if (config->test_ctl_val)
- regmap_write(regmap, PLL_TEST_CTL(pll),
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
config->test_ctl_val);
-
- if (config->test_ctl_hi_val)
- regmap_write(regmap, PLL_TEST_CTL_U(pll),
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
config->test_ctl_hi_val);
if (config->post_div_mask) {
@@ -1145,25 +1148,38 @@ static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw,
return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width);
}
+/*
+ * Due to limited number of bits for fractional rate programming, the
+ * rounded up rate could be marginally higher than the requested rate.
+ */
+static int alpha_pll_check_rate_margin(struct clk_hw *hw,
+ unsigned long rrate, unsigned long rate)
+{
+ unsigned long rate_margin = rate + PLL_RATE_MARGIN;
+
+ if (rrate > rate_margin || rrate < rate) {
+ pr_err("%s: Rounded rate %lu not within range [%lu, %lu)\n",
+ clk_hw_get_name(hw), rrate, rate, rate_margin);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long prate)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
u32 l, alpha_width = pll_alpha_width(pll);
+ unsigned long rrate;
+ int ret;
u64 a;
- unsigned long rrate, max = rate + PLL_RATE_MARGIN;
rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
- /*
- * Due to limited number of bits for fractional rate programming, the
- * rounded up rate could be marginally higher than the requested rate.
- */
- if (rrate > (rate + PLL_RATE_MARGIN) || rrate < rate) {
- pr_err("%s: Rounded rate %lu not within range [%lu, %lu)\n",
- clk_hw_get_name(hw), rrate, rate, max);
- return -EINVAL;
- }
+ ret = alpha_pll_check_rate_margin(hw, rrate, rate);
+ if (ret < 0)
+ return ret;
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
regmap_write(pll->clkr.regmap, PLL_FRAC(pll), a);
@@ -1206,12 +1222,10 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw)
rrate = alpha_pll_round_rate(cal_freq, clk_hw_get_rate(parent_hw),
&cal_l, &a, alpha_width);
- /*
- * Due to a limited number of bits for fractional rate programming, the
- * rounded up rate could be marginally higher than the requested rate.
- */
- if (rrate > (cal_freq + PLL_RATE_MARGIN) || rrate < cal_freq)
- return -EINVAL;
+
+ ret = alpha_pll_check_rate_margin(hw, rrate, cal_freq);
+ if (ret < 0)
+ return ret;
/* Setup PLL for calibration frequency */
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), cal_l);
@@ -1388,49 +1402,27 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops);
void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config)
{
- if (config->l)
- regmap_write(regmap, PLL_L_VAL(pll), config->l);
-
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL);
-
- if (config->alpha)
- regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha);
-
- if (config->config_ctl_val)
- regmap_write(regmap, PLL_CONFIG_CTL(pll),
- config->config_ctl_val);
-
- if (config->config_ctl_hi_val)
- regmap_write(regmap, PLL_CONFIG_CTL_U(pll),
- config->config_ctl_hi_val);
-
- if (config->config_ctl_hi1_val)
- regmap_write(regmap, PLL_CONFIG_CTL_U1(pll),
- config->config_ctl_hi1_val);
-
- if (config->user_ctl_val)
- regmap_write(regmap, PLL_USER_CTL(pll),
- config->user_ctl_val);
-
- if (config->user_ctl_hi_val)
- regmap_write(regmap, PLL_USER_CTL_U(pll),
- config->user_ctl_hi_val);
-
- if (config->user_ctl_hi1_val)
- regmap_write(regmap, PLL_USER_CTL_U1(pll),
- config->user_ctl_hi1_val);
-
- if (config->test_ctl_val)
- regmap_write(regmap, PLL_TEST_CTL(pll),
- config->test_ctl_val);
-
- if (config->test_ctl_hi_val)
- regmap_write(regmap, PLL_TEST_CTL_U(pll),
- config->test_ctl_hi_val);
-
- if (config->test_ctl_hi1_val)
- regmap_write(regmap, PLL_TEST_CTL_U1(pll),
- config->test_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll),
+ config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U1(pll),
+ config->config_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U(pll),
+ config->user_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL_U1(pll),
+ config->user_ctl_hi1_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
+ config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
+ config->test_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U1(pll),
+ config->test_ctl_hi1_val);
regmap_update_bits(regmap, PLL_MODE(pll), PLL_UPDATE_BYPASS,
PLL_UPDATE_BYPASS);
@@ -1490,14 +1482,9 @@ static int alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate,
rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
- /*
- * Due to a limited number of bits for fractional rate programming, the
- * rounded up rate could be marginally higher than the requested rate.
- */
- if (rrate > (rate + PLL_RATE_MARGIN) || rrate < rate) {
- pr_err("Call set rate on the PLL with rounded rates!\n");
- return -EINVAL;
- }
+ ret = alpha_pll_check_rate_margin(hw, rrate, rate);
+ if (ret < 0)
+ return ret;
regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
@@ -1561,3 +1548,55 @@ const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = {
.set_rate = clk_alpha_pll_postdiv_fabia_set_rate,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_ops);
+
+void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config)
+{
+ clk_alpha_pll_write_config(regmap, PLL_L_VAL(pll), config->l);
+ clk_alpha_pll_write_config(regmap, PLL_ALPHA_VAL(pll), config->alpha);
+ clk_alpha_pll_write_config(regmap, PLL_USER_CTL(pll),
+ config->user_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL(pll),
+ config->config_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_CONFIG_CTL_U(pll),
+ config->config_ctl_hi_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL(pll),
+ config->test_ctl_val);
+ clk_alpha_pll_write_config(regmap, PLL_TEST_CTL_U(pll),
+ config->test_ctl_hi_val);
+}
+EXPORT_SYMBOL_GPL(clk_agera_pll_configure);
+
+static int clk_alpha_pll_agera_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+ u32 l, alpha_width = pll_alpha_width(pll);
+ int ret;
+ unsigned long rrate;
+ u64 a;
+
+ rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width);
+ ret = alpha_pll_check_rate_margin(hw, rrate, rate);
+ if (ret < 0)
+ return ret;
+
+ /* change L_VAL without having to go through the power on sequence */
+ regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l);
+ regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a);
+
+ if (clk_hw_is_enabled(hw))
+ return wait_for_pll_enable_lock(pll);
+
+ return 0;
+}
+
+const struct clk_ops clk_alpha_pll_agera_ops = {
+ .enable = clk_alpha_pll_enable,
+ .disable = clk_alpha_pll_disable,
+ .is_enabled = clk_alpha_pll_is_enabled,
+ .recalc_rate = alpha_pll_fabia_recalc_rate,
+ .round_rate = clk_alpha_pll_round_rate,
+ .set_rate = clk_alpha_pll_agera_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_agera_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index d3201b87c0cd..0ea30d2f3da1 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -15,6 +15,7 @@ enum {
CLK_ALPHA_PLL_TYPE_FABIA,
CLK_ALPHA_PLL_TYPE_TRION,
CLK_ALPHA_PLL_TYPE_LUCID = CLK_ALPHA_PLL_TYPE_TRION,
+ CLK_ALPHA_PLL_TYPE_AGERA,
CLK_ALPHA_PLL_TYPE_MAX,
};
@@ -141,6 +142,7 @@ extern const struct clk_ops clk_alpha_pll_postdiv_trion_ops;
extern const struct clk_ops clk_alpha_pll_lucid_ops;
#define clk_alpha_pll_fixed_lucid_ops clk_alpha_pll_fixed_trion_ops
extern const struct clk_ops clk_alpha_pll_postdiv_lucid_ops;
+extern const struct clk_ops clk_alpha_pll_agera_ops;
void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
@@ -148,6 +150,8 @@ void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
const struct alpha_pll_config *config);
+void clk_agera_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+ const struct alpha_pll_config *config);
#define clk_lucid_pll_configure(pll, regmap, config) \
clk_trion_pll_configure(pll, regmap, config)
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index e2c669b08aff..6a2a13c5058e 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -349,6 +349,7 @@ DEFINE_CLK_RPMH_VRM(sdm845, rf_clk2, rf_clk2_ao, "rfclka2", 1);
DEFINE_CLK_RPMH_VRM(sdm845, rf_clk3, rf_clk3_ao, "rfclka3", 1);
DEFINE_CLK_RPMH_VRM(sm8150, rf_clk3, rf_clk3_ao, "rfclka3", 1);
DEFINE_CLK_RPMH_BCM(sdm845, ipa, "IP0");
+DEFINE_CLK_RPMH_BCM(sdm845, ce, "CE0");
static struct clk_hw *sdm845_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
@@ -364,6 +365,7 @@ static struct clk_hw *sdm845_rpmh_clocks[] = {
[RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
[RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
[RPMH_IPA_CLK] = &sdm845_ipa.hw,
+ [RPMH_CE_CLK] = &sdm845_ce.hw,
};
static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
@@ -371,6 +373,25 @@ static const struct clk_rpmh_desc clk_rpmh_sdm845 = {
.num_clks = ARRAY_SIZE(sdm845_rpmh_clocks),
};
+DEFINE_CLK_RPMH_VRM(sdx55, rf_clk1, rf_clk1_ao, "rfclkd1", 1);
+DEFINE_CLK_RPMH_VRM(sdx55, rf_clk2, rf_clk2_ao, "rfclkd2", 1);
+DEFINE_CLK_RPMH_BCM(sdx55, qpic_clk, "QP0");
+
+static struct clk_hw *sdx55_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_RF_CLK1] = &sdx55_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdx55_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &sdx55_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &sdx55_rf_clk2_ao.hw,
+ [RPMH_QPIC_CLK] = &sdx55_qpic_clk.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sdx55 = {
+ .clks = sdx55_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sdx55_rpmh_clocks),
+};
+
static struct clk_hw *sm8150_rpmh_clocks[] = {
[RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
[RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
@@ -432,6 +453,39 @@ static const struct clk_rpmh_desc clk_rpmh_sm8250 = {
.num_clks = ARRAY_SIZE(sm8250_rpmh_clocks),
};
+DEFINE_CLK_RPMH_VRM(sm8350, div_clk1, div_clk1_ao, "divclka1", 2);
+DEFINE_CLK_RPMH_VRM(sm8350, rf_clk4, rf_clk4_ao, "rfclka4", 1);
+DEFINE_CLK_RPMH_VRM(sm8350, rf_clk5, rf_clk5_ao, "rfclka5", 1);
+DEFINE_CLK_RPMH_BCM(sm8350, pka, "PKA0");
+DEFINE_CLK_RPMH_BCM(sm8350, hwkm, "HK0");
+
+static struct clk_hw *sm8350_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw,
+ [RPMH_DIV_CLK1] = &sm8350_div_clk1.hw,
+ [RPMH_DIV_CLK1_A] = &sm8350_div_clk1_ao.hw,
+ [RPMH_LN_BB_CLK1] = &sm8250_ln_bb_clk1.hw,
+ [RPMH_LN_BB_CLK1_A] = &sm8250_ln_bb_clk1_ao.hw,
+ [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw,
+ [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw,
+ [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw,
+ [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw,
+ [RPMH_RF_CLK4] = &sm8350_rf_clk4.hw,
+ [RPMH_RF_CLK4_A] = &sm8350_rf_clk4_ao.hw,
+ [RPMH_RF_CLK5] = &sm8350_rf_clk5.hw,
+ [RPMH_RF_CLK5_A] = &sm8350_rf_clk5_ao.hw,
+ [RPMH_IPA_CLK] = &sdm845_ipa.hw,
+ [RPMH_PKA_CLK] = &sm8350_pka.hw,
+ [RPMH_HWKM_CLK] = &sm8350_hwkm.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_sm8350 = {
+ .clks = sm8350_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(sm8350_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -517,8 +571,10 @@ static int clk_rpmh_probe(struct platform_device *pdev)
static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180},
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
+ { .compatible = "qcom,sdx55-rpmh-clk", .data = &clk_rpmh_sdx55},
{ .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150},
{ .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250},
+ { .compatible = "qcom,sm8350-rpmh-clk", .data = &clk_rpmh_sm8350},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index 07a98d3f882d..588575e1169d 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -963,6 +963,7 @@ static struct gdsc mdss_gdsc = {
},
.pwrsts = PWRSTS_OFF_ON,
.flags = HW_CTRL,
+ .supply = "mmcx",
};
static struct clk_regmap *disp_cc_sm8250_clocks[] = {
diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c
index 68d8f7aaf64e..d82d725ac231 100644
--- a/drivers/clk/qcom/gcc-sc7180.c
+++ b/drivers/clk/qcom/gcc-sc7180.c
@@ -642,7 +642,7 @@ static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
.name = "gcc_sdcc1_ice_core_clk_src",
.parent_data = gcc_parent_data_0,
.num_parents = 4,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
@@ -651,6 +651,7 @@ static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
F(9600000, P_BI_TCXO, 2, 0, 0),
F(19200000, P_BI_TCXO, 1, 0, 0),
F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
F(202000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
{ }
@@ -666,7 +667,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
.name = "gcc_sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_5,
.num_parents = 5,
- .ops = &clk_rcg2_ops,
+ .ops = &clk_rcg2_floor_ops,
},
};
diff --git a/drivers/clk/qcom/gcc-sdx55.c b/drivers/clk/qcom/gcc-sdx55.c
new file mode 100644
index 000000000000..e3b9030b2bae
--- /dev/null
+++ b/drivers/clk/qcom/gcc-sdx55.c
@@ -0,0 +1,1659 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, Linaro Ltd.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-sdx55.h>
+
+#include "common.h"
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_EVEN,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_EVEN,
+ P_GPLL5_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .clkr = {
+ .enable_reg = 0x6d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_lucid_even[] = {
+ { 0x0, 1 },
+ { 0x1, 2 },
+ { 0x3, 4 },
+ { 0x7, 8 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_even = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_lucid_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_lucid_even),
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .clkr = {
+ .enable_reg = 0x6d000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_even = {
+ .offset = 0x76000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_lucid_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_lucid_even),
+ .width = 4,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &gpll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll5 = {
+ .offset = 0x74000,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .clkr = {
+ .enable_reg = 0x6d000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll5",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_fixed_lucid_ops,
+ },
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parents_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct clk_parent_data gcc_parents_0_ao[] = {
+ { .fw_name = "bi_tcxo_ao" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_EVEN, 2 },
+ { P_GPLL5_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parents_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4_out_even.clkr.hw },
+ { .hw = &gpll5.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parents_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parents_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk", .name = "sleep_clk" },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_EVEN, 2 },
+ { P_GPLL0_OUT_EVEN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parents_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4_out_even.clkr.hw },
+ { .hw = &gpll0_out_even.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_i2c_apps_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_qup1_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x11024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_qup1_spi_apps_clk_src[] = {
+ F(960000, P_BI_TCXO, 10, 1, 2),
+ F(4800000, P_BI_TCXO, 4, 0, 0),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(15000000, P_GPLL0_OUT_EVEN, 5, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_GPLL0_OUT_MAIN, 12.5, 1, 2),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_qup1_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1100c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup2_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x13024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup2_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1300c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup3_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x15024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup3_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1500c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup4_i2c_apps_clk_src = {
+ .cmd_rcgr = 0x17024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_i2c_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_qup4_spi_apps_clk_src = {
+ .cmd_rcgr = 0x1700c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_uart1_apps_clk_src[] = {
+ F(3686400, P_GPLL0_OUT_EVEN, 1, 192, 15625),
+ F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625),
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625),
+ F(16000000, P_GPLL0_OUT_EVEN, 1, 4, 75),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(19354839, P_GPLL0_OUT_MAIN, 15.5, 1, 2),
+ F(20000000, P_GPLL0_OUT_MAIN, 15, 1, 2),
+ F(20689655, P_GPLL0_OUT_MAIN, 14.5, 1, 2),
+ F(21428571, P_GPLL0_OUT_MAIN, 14, 1, 2),
+ F(22222222, P_GPLL0_OUT_MAIN, 13.5, 1, 2),
+ F(23076923, P_GPLL0_OUT_MAIN, 13, 1, 2),
+ F(24000000, P_GPLL0_OUT_MAIN, 5, 1, 5),
+ F(25000000, P_GPLL0_OUT_MAIN, 12, 1, 2),
+ F(26086957, P_GPLL0_OUT_MAIN, 11.5, 1, 2),
+ F(27272727, P_GPLL0_OUT_MAIN, 11, 1, 2),
+ F(28571429, P_GPLL0_OUT_MAIN, 10.5, 1, 2),
+ F(32000000, P_GPLL0_OUT_MAIN, 1, 4, 75),
+ F(40000000, P_GPLL0_OUT_MAIN, 15, 0, 0),
+ F(46400000, P_GPLL0_OUT_MAIN, 1, 29, 375),
+ F(48000000, P_GPLL0_OUT_MAIN, 12.5, 0, 0),
+ F(51200000, P_GPLL0_OUT_MAIN, 1, 32, 375),
+ F(56000000, P_GPLL0_OUT_MAIN, 1, 7, 75),
+ F(58982400, P_GPLL0_OUT_MAIN, 1, 1536, 15625),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ F(63157895, P_GPLL0_OUT_MAIN, 9.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_blsp1_uart1_apps_clk_src = {
+ .cmd_rcgr = 0x1200c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart2_apps_clk_src = {
+ .cmd_rcgr = 0x1400c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart3_apps_clk_src = {
+ .cmd_rcgr = 0x1600c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_blsp1_uart4_apps_clk_src = {
+ .cmd_rcgr = 0x1800c,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_blsp1_uart1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
+ .cmd_rcgr = 0x24010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk_src",
+ .parent_data = gcc_parents_0_ao,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_cpuss_rbcpr_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
+ .cmd_rcgr = 0x2402c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk_src",
+ .parent_data = gcc_parents_0_ao,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_clk_src[] = {
+ F(2500000, P_BI_TCXO, 1, 25, 192),
+ F(5000000, P_BI_TCXO, 1, 25, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(250000000, P_GPLL4_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_clk_src = {
+ .cmd_rcgr = 0x47020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_emac_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_clk_src",
+ .parent_data = gcc_parents_5,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_emac_ptp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(230400000, P_GPLL5_OUT_MAIN, 3.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_emac_ptp_clk_src = {
+ .cmd_rcgr = 0x47038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_emac_ptp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_emac_ptp_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x2b004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x2c004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x2d004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_pcie_aux_phy_clk_src = {
+ .cmd_rcgr = 0x37034,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_phy_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_rchng_phy_clk_src[] = {
+ F(100000000, P_GPLL0_OUT_EVEN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pcie_rchng_phy_clk_src = {
+ .cmd_rcgr = 0x37050,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_pcie_rchng_phy_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_rchng_phy_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x19010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0xf00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_master_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_master_clk_src = {
+ .cmd_rcgr = 0xb024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_mock_utmi_clk_src = {
+ .cmd_rcgr = 0xb03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb3_phy_aux_clk_src[] = {
+ F(1000000, P_BI_TCXO, 1, 5, 96),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb3_phy_aux_clk_src = {
+ .cmd_rcgr = 0xb064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_usb3_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ahb_pcie_link_clk = {
+ .halt_reg = 0x22004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x22004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb_pcie_link_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+ .halt_reg = 0x10004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+ .halt_reg = 0x11008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup1_i2c_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+ .halt_reg = 0x11004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x11004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup1_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup1_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+ .halt_reg = 0x13008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup2_i2c_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+ .halt_reg = 0x13004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x13004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup2_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup2_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+ .halt_reg = 0x15008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup3_i2c_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+ .halt_reg = 0x15004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x15004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup3_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup3_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_i2c_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup4_i2c_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_qup4_spi_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_qup4_spi_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+ .halt_reg = 0x12004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x12004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_uart1_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+ .halt_reg = 0x14004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x14004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart2_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_uart2_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+ .halt_reg = 0x16004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x16004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart3_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_uart3_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+ .halt_reg = 0x18004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x18004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_blsp1_uart4_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_blsp1_uart4_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x1c004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1c004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+ .halt_reg = 0x2100c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2100c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+ .halt_reg = 0x21008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d008,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ce1_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_rbcpr_clk = {
+ .halt_reg = 0x24008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x24008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_rbcpr_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_cpuss_rbcpr_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_axi_clk = {
+ .halt_reg = 0x4701c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_ptp_clk = {
+ .halt_reg = 0x47018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_ptp_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_emac_ptp_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_rgmii_clk = {
+ .halt_reg = 0x47010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_rgmii_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_emac_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_eth_slave_ahb_clk = {
+ .halt_reg = 0x47014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x47014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_eth_slave_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x2b000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x2c000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2c000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x2d000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_gp3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_0_clkref_clk = {
+ .halt_reg = 0x88004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x88004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_0_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_aux_clk = {
+ .halt_reg = 0x37024,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_aux_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_cfg_ahb_clk = {
+ .halt_reg = 0x3701c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_mstr_axi_clk = {
+ .halt_reg = 0x37018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_pipe_clk = {
+ .halt_reg = 0x3702c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_rchng_phy_clk = {
+ .halt_reg = 0x37020,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_rchng_phy_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_rchng_phy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_sleep_clk = {
+ .halt_reg = 0x37028,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_sleep_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pcie_aux_phy_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_axi_clk = {
+ .halt_reg = 0x37014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x37014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_slv_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pcie_slv_q2a_axi_clk = {
+ .halt_reg = 0x37010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x6d010,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pcie_slv_q2a_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x1900c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_pdm2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x19004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x19004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x19004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x19008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x19008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0xf008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0xf004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xf004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_sdcc1_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+ .halt_reg = 0xb010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_master_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+ .halt_reg = 0xb020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb30_mock_utmi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_mstr_axi_clk = {
+ .halt_reg = 0xb014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_mstr_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+ .halt_reg = 0xb01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_slv_ahb_clk = {
+ .halt_reg = 0xb018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_slv_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_aux_clk = {
+ .halt_reg = 0xb058,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_aux_clk",
+ .parent_hws = (const struct clk_hw *[]){
+ &gcc_usb3_phy_aux_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0xb05c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xb05c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x88000,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x88000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
+ .halt_reg = 0xe004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0xe004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0xe004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb_phy_cfg_ahb2phy_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_xo_pcie_link_clk = {
+ .halt_reg = 0x22008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x22008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_xo_pcie_link_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc usb30_gdsc = {
+ .gdscr = 0x0b004,
+ .pd = {
+ .name = "usb30_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc pcie_gdsc = {
+ .gdscr = 0x37004,
+ .pd = {
+ .name = "pcie_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc emac_gdsc = {
+ .gdscr = 0x47004,
+ .pd = {
+ .name = "emac_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct clk_regmap *gcc_sdx55_clocks[] = {
+ [GCC_AHB_PCIE_LINK_CLK] = &gcc_ahb_pcie_link_clk.clkr,
+ [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup1_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP1_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup1_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup2_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP2_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup2_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup3_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP3_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup3_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_I2C_APPS_CLK_SRC] =
+ &gcc_blsp1_qup4_i2c_apps_clk_src.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+ [GCC_BLSP1_QUP4_SPI_APPS_CLK_SRC] =
+ &gcc_blsp1_qup4_spi_apps_clk_src.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+ [GCC_BLSP1_UART1_APPS_CLK_SRC] = &gcc_blsp1_uart1_apps_clk_src.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+ [GCC_BLSP1_UART2_APPS_CLK_SRC] = &gcc_blsp1_uart2_apps_clk_src.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+ [GCC_BLSP1_UART3_APPS_CLK_SRC] = &gcc_blsp1_uart3_apps_clk_src.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+ [GCC_BLSP1_UART4_APPS_CLK_SRC] = &gcc_blsp1_uart4_apps_clk_src.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+ [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+ [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+ [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
+ [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr,
+ [GCC_CPUSS_RBCPR_CLK_SRC] = &gcc_cpuss_rbcpr_clk_src.clkr,
+ [GCC_EMAC_CLK_SRC] = &gcc_emac_clk_src.clkr,
+ [GCC_EMAC_PTP_CLK_SRC] = &gcc_emac_ptp_clk_src.clkr,
+ [GCC_ETH_AXI_CLK] = &gcc_eth_axi_clk.clkr,
+ [GCC_ETH_PTP_CLK] = &gcc_eth_ptp_clk.clkr,
+ [GCC_ETH_RGMII_CLK] = &gcc_eth_rgmii_clk.clkr,
+ [GCC_ETH_SLAVE_AHB_CLK] = &gcc_eth_slave_ahb_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_PCIE_0_CLKREF_CLK] = &gcc_pcie_0_clkref_clk.clkr,
+ [GCC_PCIE_AUX_CLK] = &gcc_pcie_aux_clk.clkr,
+ [GCC_PCIE_AUX_PHY_CLK_SRC] = &gcc_pcie_aux_phy_clk_src.clkr,
+ [GCC_PCIE_CFG_AHB_CLK] = &gcc_pcie_cfg_ahb_clk.clkr,
+ [GCC_PCIE_MSTR_AXI_CLK] = &gcc_pcie_mstr_axi_clk.clkr,
+ [GCC_PCIE_PIPE_CLK] = &gcc_pcie_pipe_clk.clkr,
+ [GCC_PCIE_RCHNG_PHY_CLK] = &gcc_pcie_rchng_phy_clk.clkr,
+ [GCC_PCIE_RCHNG_PHY_CLK_SRC] = &gcc_pcie_rchng_phy_clk_src.clkr,
+ [GCC_PCIE_SLEEP_CLK] = &gcc_pcie_sleep_clk.clkr,
+ [GCC_PCIE_SLV_AXI_CLK] = &gcc_pcie_slv_axi_clk.clkr,
+ [GCC_PCIE_SLV_Q2A_AXI_CLK] = &gcc_pcie_slv_q2a_axi_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+ [GCC_USB30_MASTER_CLK_SRC] = &gcc_usb30_master_clk_src.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+ [GCC_USB30_MOCK_UTMI_CLK_SRC] = &gcc_usb30_mock_utmi_clk_src.clkr,
+ [GCC_USB30_MSTR_AXI_CLK] = &gcc_usb30_mstr_axi_clk.clkr,
+ [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+ [GCC_USB30_SLV_AHB_CLK] = &gcc_usb30_slv_ahb_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_AUX_CLK_SRC] = &gcc_usb3_phy_aux_clk_src.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GCC_XO_PCIE_LINK_CLK] = &gcc_xo_pcie_link_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_OUT_EVEN] = &gpll4_out_even.clkr,
+ [GPLL5] = &gpll5.clkr,
+};
+
+static const struct qcom_reset_map gcc_sdx55_resets[] = {
+ [GCC_EMAC_BCR] = { 0x47000 },
+ [GCC_PCIE_BCR] = { 0x37000 },
+ [GCC_PCIE_LINK_DOWN_BCR] = { 0x77000 },
+ [GCC_PCIE_PHY_BCR] = { 0x39000 },
+ [GCC_PCIE_PHY_COM_BCR] = { 0x78004 },
+ [GCC_QUSB2PHY_BCR] = { 0xd000 },
+ [GCC_USB30_BCR] = { 0xb000 },
+ [GCC_USB3_PHY_BCR] = { 0xc000 },
+ [GCC_USB3PHY_PHY_BCR] = { 0xc004 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0xe000 },
+};
+
+static struct gdsc *gcc_sdx55_gdscs[] = {
+ [USB30_GDSC] = &usb30_gdsc,
+ [PCIE_GDSC] = &pcie_gdsc,
+ [EMAC_GDSC] = &emac_gdsc,
+};
+
+static const struct regmap_config gcc_sdx55_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x9b040,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_sdx55_desc = {
+ .config = &gcc_sdx55_regmap_config,
+ .clks = gcc_sdx55_clocks,
+ .num_clks = ARRAY_SIZE(gcc_sdx55_clocks),
+ .resets = gcc_sdx55_resets,
+ .num_resets = ARRAY_SIZE(gcc_sdx55_resets),
+ .gdscs = gcc_sdx55_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_sdx55_gdscs),
+};
+
+static const struct of_device_id gcc_sdx55_match_table[] = {
+ { .compatible = "qcom,gcc-sdx55" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_sdx55_match_table);
+
+static int gcc_sdx55_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &gcc_sdx55_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ /*
+ * Keep the clocks always-ON as they are critical to the functioning
+ * of the system:
+ * GCC_SYS_NOC_CPUSS_AHB_CLK, GCC_CPUSS_AHB_CLK, GCC_CPUSS_GNOC_CLK
+ */
+ regmap_update_bits(regmap, 0x6d008, BIT(0), BIT(0));
+ regmap_update_bits(regmap, 0x6d008, BIT(21), BIT(21));
+ regmap_update_bits(regmap, 0x6d008, BIT(22), BIT(22));
+
+ return qcom_cc_really_probe(pdev, &gcc_sdx55_desc, regmap);
+}
+
+static struct platform_driver gcc_sdx55_driver = {
+ .probe = gcc_sdx55_probe,
+ .driver = {
+ .name = "gcc-sdx55",
+ .of_match_table = gcc_sdx55_match_table,
+ },
+};
+
+static int __init gcc_sdx55_init(void)
+{
+ return platform_driver_register(&gcc_sdx55_driver);
+}
+subsys_initcall(gcc_sdx55_init);
+
+static void __exit gcc_sdx55_exit(void)
+{
+ platform_driver_unregister(&gcc_sdx55_driver);
+}
+module_exit(gcc_sdx55_exit);
+
+MODULE_DESCRIPTION("QTI GCC SDX55 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/lpass-gfm-sm8250.c b/drivers/clk/qcom/lpass-gfm-sm8250.c
new file mode 100644
index 000000000000..d366c7c2abc7
--- /dev/null
+++ b/drivers/clk/qcom/lpass-gfm-sm8250.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LPASS Audio CC and Always ON CC Glitch Free Mux clock driver
+ *
+ * Copyright (c) 2020 Linaro Ltd.
+ * Author: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <dt-bindings/clock/qcom,sm8250-lpass-audiocc.h>
+#include <dt-bindings/clock/qcom,sm8250-lpass-aoncc.h>
+
+struct lpass_gfm {
+ struct device *dev;
+ void __iomem *base;
+};
+
+struct clk_gfm {
+ unsigned int mux_reg;
+ unsigned int mux_mask;
+ struct clk_hw hw;
+ struct lpass_gfm *priv;
+ void __iomem *gfm_mux;
+};
+
+#define GFM_MASK BIT(1)
+#define to_clk_gfm(_hw) container_of(_hw, struct clk_gfm, hw)
+
+static u8 clk_gfm_get_parent(struct clk_hw *hw)
+{
+ struct clk_gfm *clk = to_clk_gfm(hw);
+
+ return readl(clk->gfm_mux) & GFM_MASK;
+}
+
+static int clk_gfm_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct clk_gfm *clk = to_clk_gfm(hw);
+ unsigned int val;
+
+ val = readl(clk->gfm_mux);
+
+ if (index)
+ val |= GFM_MASK;
+ else
+ val &= ~GFM_MASK;
+
+ writel(val, clk->gfm_mux);
+
+ return 0;
+}
+
+static const struct clk_ops clk_gfm_ops = {
+ .get_parent = clk_gfm_get_parent,
+ .set_parent = clk_gfm_set_parent,
+ .determine_rate = __clk_mux_determine_rate,
+};
+
+static struct clk_gfm lpass_gfm_va_mclk = {
+ .mux_reg = 0x20000,
+ .mux_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "VA_MCLK",
+ .ops = &clk_gfm_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .num_parents = 2,
+ .parent_data = (const struct clk_parent_data[]){
+ {
+ .index = 0,
+ .fw_name = "LPASS_CLK_ID_TX_CORE_MCLK",
+ }, {
+ .index = 1,
+ .fw_name = "LPASS_CLK_ID_VA_CORE_MCLK",
+ },
+ },
+ },
+};
+
+static struct clk_gfm lpass_gfm_tx_npl = {
+ .mux_reg = 0x20000,
+ .mux_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "TX_NPL",
+ .ops = &clk_gfm_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .parent_data = (const struct clk_parent_data[]){
+ {
+ .index = 0,
+ .fw_name = "LPASS_CLK_ID_TX_CORE_NPL_MCLK",
+ }, {
+ .index = 1,
+ .fw_name = "LPASS_CLK_ID_VA_CORE_2X_MCLK",
+ },
+ },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_gfm lpass_gfm_wsa_mclk = {
+ .mux_reg = 0x220d8,
+ .mux_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "WSA_MCLK",
+ .ops = &clk_gfm_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .parent_data = (const struct clk_parent_data[]){
+ {
+ .index = 0,
+ .fw_name = "LPASS_CLK_ID_TX_CORE_MCLK",
+ }, {
+ .index = 1,
+ .fw_name = "LPASS_CLK_ID_WSA_CORE_MCLK",
+ },
+ },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_gfm lpass_gfm_wsa_npl = {
+ .mux_reg = 0x220d8,
+ .mux_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "WSA_NPL",
+ .ops = &clk_gfm_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .parent_data = (const struct clk_parent_data[]){
+ {
+ .index = 0,
+ .fw_name = "LPASS_CLK_ID_TX_CORE_NPL_MCLK",
+ }, {
+ .index = 1,
+ .fw_name = "LPASS_CLK_ID_WSA_CORE_NPL_MCLK",
+ },
+ },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_gfm lpass_gfm_rx_mclk_mclk2 = {
+ .mux_reg = 0x240d8,
+ .mux_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "RX_MCLK_MCLK2",
+ .ops = &clk_gfm_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .parent_data = (const struct clk_parent_data[]){
+ {
+ .index = 0,
+ .fw_name = "LPASS_CLK_ID_TX_CORE_MCLK",
+ }, {
+ .index = 1,
+ .fw_name = "LPASS_CLK_ID_RX_CORE_MCLK",
+ },
+ },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_gfm lpass_gfm_rx_npl = {
+ .mux_reg = 0x240d8,
+ .mux_mask = BIT(0),
+ .hw.init = &(struct clk_init_data) {
+ .name = "RX_NPL",
+ .ops = &clk_gfm_ops,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .parent_data = (const struct clk_parent_data[]){
+ {
+ .index = 0,
+ .fw_name = "LPASS_CLK_ID_TX_CORE_NPL_MCLK",
+ }, {
+ .index = 1,
+ .fw_name = "LPASS_CLK_ID_RX_CORE_NPL_MCLK",
+ },
+ },
+ .num_parents = 2,
+ },
+};
+
+static struct clk_gfm *aoncc_gfm_clks[] = {
+ [LPASS_CDC_VA_MCLK] = &lpass_gfm_va_mclk,
+ [LPASS_CDC_TX_NPL] = &lpass_gfm_tx_npl,
+};
+
+static struct clk_hw_onecell_data aoncc_hw_onecell_data = {
+ .hws = {
+ [LPASS_CDC_VA_MCLK] = &lpass_gfm_va_mclk.hw,
+ [LPASS_CDC_TX_NPL] = &lpass_gfm_tx_npl.hw,
+ },
+ .num = ARRAY_SIZE(aoncc_gfm_clks),
+};
+
+static struct clk_gfm *audiocc_gfm_clks[] = {
+ [LPASS_CDC_WSA_NPL] = &lpass_gfm_wsa_npl,
+ [LPASS_CDC_WSA_MCLK] = &lpass_gfm_wsa_mclk,
+ [LPASS_CDC_RX_NPL] = &lpass_gfm_rx_npl,
+ [LPASS_CDC_RX_MCLK_MCLK2] = &lpass_gfm_rx_mclk_mclk2,
+};
+
+static struct clk_hw_onecell_data audiocc_hw_onecell_data = {
+ .hws = {
+ [LPASS_CDC_WSA_NPL] = &lpass_gfm_wsa_npl.hw,
+ [LPASS_CDC_WSA_MCLK] = &lpass_gfm_wsa_mclk.hw,
+ [LPASS_CDC_RX_NPL] = &lpass_gfm_rx_npl.hw,
+ [LPASS_CDC_RX_MCLK_MCLK2] = &lpass_gfm_rx_mclk_mclk2.hw,
+ },
+ .num = ARRAY_SIZE(audiocc_gfm_clks),
+};
+
+struct lpass_gfm_data {
+ struct clk_hw_onecell_data *onecell_data;
+ struct clk_gfm **gfm_clks;
+};
+
+static struct lpass_gfm_data audiocc_data = {
+ .onecell_data = &audiocc_hw_onecell_data,
+ .gfm_clks = audiocc_gfm_clks,
+};
+
+static struct lpass_gfm_data aoncc_data = {
+ .onecell_data = &aoncc_hw_onecell_data,
+ .gfm_clks = aoncc_gfm_clks,
+};
+
+static int lpass_gfm_clk_driver_probe(struct platform_device *pdev)
+{
+ const struct lpass_gfm_data *data;
+ struct device *dev = &pdev->dev;
+ struct clk_gfm *gfm;
+ struct lpass_gfm *cc;
+ int err, i;
+
+ data = of_device_get_match_data(dev);
+ if (!data)
+ return -EINVAL;
+
+ cc = devm_kzalloc(dev, sizeof(*cc), GFP_KERNEL);
+ if (!cc)
+ return -ENOMEM;
+
+ cc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(cc->base))
+ return PTR_ERR(cc->base);
+
+ pm_runtime_enable(dev);
+ err = pm_clk_create(dev);
+ if (err)
+ goto pm_clk_err;
+
+ err = of_pm_clk_add_clks(dev);
+ if (err < 0) {
+ dev_dbg(dev, "Failed to get lpass core voting clocks\n");
+ goto clk_reg_err;
+ }
+
+ for (i = 0; i < data->onecell_data->num; i++) {
+ if (!data->gfm_clks[i])
+ continue;
+
+ gfm = data->gfm_clks[i];
+ gfm->priv = cc;
+ gfm->gfm_mux = cc->base;
+ gfm->gfm_mux = gfm->gfm_mux + data->gfm_clks[i]->mux_reg;
+
+ err = devm_clk_hw_register(dev, &data->gfm_clks[i]->hw);
+ if (err)
+ goto clk_reg_err;
+
+ }
+
+ err = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ data->onecell_data);
+ if (err)
+ goto clk_reg_err;
+
+ return 0;
+
+clk_reg_err:
+ pm_clk_destroy(dev);
+pm_clk_err:
+ pm_runtime_disable(dev);
+ return err;
+}
+
+static const struct of_device_id lpass_gfm_clk_match_table[] = {
+ {
+ .compatible = "qcom,sm8250-lpass-aoncc",
+ .data = &aoncc_data,
+ },
+ {
+ .compatible = "qcom,sm8250-lpass-audiocc",
+ .data = &audiocc_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpass_gfm_clk_match_table);
+
+static const struct dev_pm_ops lpass_gfm_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver lpass_gfm_clk_driver = {
+ .probe = lpass_gfm_clk_driver_probe,
+ .driver = {
+ .name = "lpass-gfm-clk",
+ .of_match_table = lpass_gfm_clk_match_table,
+ .pm = &lpass_gfm_pm_ops,
+ },
+};
+module_platform_driver(lpass_gfm_clk_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c
index 228d08f5d26f..2e0ecc38efdd 100644
--- a/drivers/clk/qcom/lpasscorecc-sc7180.c
+++ b/drivers/clk/qcom/lpasscorecc-sc7180.c
@@ -356,12 +356,52 @@ static const struct qcom_cc_desc lpass_audio_hm_sc7180_desc = {
.num_gdscs = ARRAY_SIZE(lpass_audio_hm_sc7180_gdscs),
};
+static void lpass_pm_runtime_disable(void *data)
+{
+ pm_runtime_disable(data);
+}
+
+static void lpass_pm_clk_destroy(void *data)
+{
+ pm_clk_destroy(data);
+}
+
+static int lpass_create_pm_clks(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_add_action_or_reset(&pdev->dev, lpass_pm_runtime_disable, &pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(&pdev->dev, lpass_pm_clk_destroy, &pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_clk_add(&pdev->dev, "iface");
+ if (ret < 0)
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+
+ return ret;
+}
+
static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
{
const struct qcom_cc_desc *desc;
struct regmap *regmap;
int ret;
+ ret = lpass_create_pm_clks(pdev);
+ if (ret)
+ return ret;
+
lpass_core_cc_sc7180_regmap_config.name = "lpass_audio_cc";
desc = &lpass_audio_hm_sc7180_desc;
ret = qcom_cc_probe_by_index(pdev, 1, desc);
@@ -386,12 +426,22 @@ static int lpass_core_cc_sc7180_probe(struct platform_device *pdev)
clk_fabia_pll_configure(&lpass_lpaaudio_dig_pll, regmap,
&lpass_lpaaudio_dig_pll_config);
- return qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap);
+ ret = qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap);
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ return ret;
}
static int lpass_hm_core_probe(struct platform_device *pdev)
{
const struct qcom_cc_desc *desc;
+ int ret;
+
+ ret = lpass_create_pm_clks(pdev);
+ if (ret)
+ return ret;
lpass_core_cc_sc7180_regmap_config.name = "lpass_hm_core";
desc = &lpass_core_hm_sc7180_desc;
@@ -399,61 +449,28 @@ static int lpass_hm_core_probe(struct platform_device *pdev)
return qcom_cc_probe_by_index(pdev, 0, desc);
}
-static const struct of_device_id lpass_core_cc_sc7180_match_table[] = {
+static const struct of_device_id lpass_hm_sc7180_match_table[] = {
{
.compatible = "qcom,sc7180-lpasshm",
- .data = lpass_hm_core_probe,
},
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpass_hm_sc7180_match_table);
+
+static const struct of_device_id lpass_core_cc_sc7180_match_table[] = {
{
.compatible = "qcom,sc7180-lpasscorecc",
- .data = lpass_core_cc_sc7180_probe,
},
{ }
};
MODULE_DEVICE_TABLE(of, lpass_core_cc_sc7180_match_table);
-static int lpass_core_sc7180_probe(struct platform_device *pdev)
-{
- int (*clk_probe)(struct platform_device *p);
- int ret;
-
- pm_runtime_enable(&pdev->dev);
- ret = pm_clk_create(&pdev->dev);
- if (ret)
- goto disable_pm_runtime;
-
- ret = pm_clk_add(&pdev->dev, "iface");
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to acquire iface clock\n");
- goto destroy_pm_clk;
- }
-
- ret = -EINVAL;
- clk_probe = of_device_get_match_data(&pdev->dev);
- if (!clk_probe)
- goto destroy_pm_clk;
-
- ret = clk_probe(pdev);
- if (ret)
- goto destroy_pm_clk;
-
- return 0;
-
-destroy_pm_clk:
- pm_clk_destroy(&pdev->dev);
-
-disable_pm_runtime:
- pm_runtime_disable(&pdev->dev);
-
- return ret;
-}
-
static const struct dev_pm_ops lpass_core_cc_pm_ops = {
SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
};
static struct platform_driver lpass_core_cc_sc7180_driver = {
- .probe = lpass_core_sc7180_probe,
+ .probe = lpass_core_cc_sc7180_probe,
.driver = {
.name = "lpass_core_cc-sc7180",
.of_match_table = lpass_core_cc_sc7180_match_table,
@@ -461,17 +478,43 @@ static struct platform_driver lpass_core_cc_sc7180_driver = {
},
};
-static int __init lpass_core_cc_sc7180_init(void)
+static const struct dev_pm_ops lpass_hm_pm_ops = {
+ SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL)
+};
+
+static struct platform_driver lpass_hm_sc7180_driver = {
+ .probe = lpass_hm_core_probe,
+ .driver = {
+ .name = "lpass_hm-sc7180",
+ .of_match_table = lpass_hm_sc7180_match_table,
+ .pm = &lpass_hm_pm_ops,
+ },
+};
+
+static int __init lpass_sc7180_init(void)
{
- return platform_driver_register(&lpass_core_cc_sc7180_driver);
+ int ret;
+
+ ret = platform_driver_register(&lpass_core_cc_sc7180_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&lpass_hm_sc7180_driver);
+ if (ret) {
+ platform_driver_unregister(&lpass_core_cc_sc7180_driver);
+ return ret;
+ }
+
+ return 0;
}
-subsys_initcall(lpass_core_cc_sc7180_init);
+subsys_initcall(lpass_sc7180_init);
-static void __exit lpass_core_cc_sc7180_exit(void)
+static void __exit lpass_sc7180_exit(void)
{
+ platform_driver_unregister(&lpass_hm_sc7180_driver);
platform_driver_unregister(&lpass_core_cc_sc7180_driver);
}
-module_exit(lpass_core_cc_sc7180_exit);
+module_exit(lpass_sc7180_exit);
MODULE_DESCRIPTION("QTI LPASS_CORE_CC SC7180 Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
index 5f25a70bc61c..4146c1d717b9 100644
--- a/drivers/clk/renesas/clk-sh73a0.c
+++ b/drivers/clk/renesas/clk-sh73a0.c
@@ -121,7 +121,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
(phy_no ? CPG_DSI1PHYCR : CPG_DSI0PHYCR);
parent_name = phy_no ? "dsi1pck" : "dsi0pck";
- mult = __raw_readl(dsi_reg);
+ mult = readl(dsi_reg);
if (!(mult & 0x8000))
mult = 1;
else
diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
index fd54b9f625da..4a43ebec7d5e 100644
--- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c
@@ -41,6 +41,7 @@ enum clk_ids {
CLK_S2,
CLK_S3,
CLK_SDSRC,
+ CLK_RPCSRC,
CLK_RINT,
/* Module Clocks */
@@ -67,6 +68,12 @@ static const struct cpg_core_clk r8a774a1_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
+
+ DEF_BASE("rpc", R8A774A1_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A774A1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A774A1_CLK_RPC),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -200,6 +207,7 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = {
DEF_MOD("can-fd", 914, R8A774A1_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A774A1_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A774A1_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A774A1_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A774A1_CLK_S0D6),
DEF_MOD("i2c5", 919, R8A774A1_CLK_S0D6),
DEF_MOD("i2c-dvfs", 926, R8A774A1_CLK_CP),
diff --git a/drivers/clk/renesas/r8a774b1-cpg-mssr.c b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
index f436691271ec..6f04c40fe237 100644
--- a/drivers/clk/renesas/r8a774b1-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774b1-cpg-mssr.c
@@ -40,6 +40,7 @@ enum clk_ids {
CLK_S2,
CLK_S3,
CLK_SDSRC,
+ CLK_RPCSRC,
CLK_RINT,
/* Module Clocks */
@@ -65,6 +66,12 @@ static const struct cpg_core_clk r8a774b1_core_clks[] __initconst = {
DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1),
+
+ DEF_BASE("rpc", R8A774B1_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A774B1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A774B1_CLK_RPC),
DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32),
@@ -196,6 +203,7 @@ static const struct mssr_mod_clk r8a774b1_mod_clks[] __initconst = {
DEF_MOD("can-fd", 914, R8A774B1_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A774B1_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A774B1_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A774B1_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A774B1_CLK_S0D6),
DEF_MOD("i2c5", 919, R8A774B1_CLK_S0D6),
DEF_MOD("i2c-dvfs", 926, R8A774B1_CLK_CP),
diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
index 9fc9fa9e531a..ed3a2cf0e0bb 100644
--- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c
@@ -44,6 +44,7 @@ enum clk_ids {
CLK_S2,
CLK_S3,
CLK_SDSRC,
+ CLK_RPCSRC,
CLK_RINT,
CLK_OCO,
@@ -74,6 +75,13 @@ static const struct cpg_core_clk r8a774c0_core_clks[] __initconst = {
DEF_FIXED(".s3", CLK_S3, CLK_PLL1, 6, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1, 2, 1),
+ DEF_FIXED_RPCSRC_E3(".rpcsrc", CLK_RPCSRC, CLK_PLL0, CLK_PLL1),
+
+ DEF_BASE("rpc", R8A774C0_CLK_RPC, CLK_TYPE_GEN3_RPC,
+ CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A774C0_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2,
+ R8A774C0_CLK_RPC),
+
DEF_DIV6_RO(".r", CLK_RINT, CLK_EXTAL, CPG_RCKCR, 32),
DEF_RATE(".oco", CLK_OCO, 8 * 1000 * 1000),
@@ -199,6 +207,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = {
DEF_MOD("can-fd", 914, R8A774C0_CLK_S3D2),
DEF_MOD("can-if1", 915, R8A774C0_CLK_S3D4),
DEF_MOD("can-if0", 916, R8A774C0_CLK_S3D4),
+ DEF_MOD("rpc-if", 917, R8A774C0_CLK_RPCD2),
DEF_MOD("i2c6", 918, R8A774C0_CLK_S3D2),
DEF_MOD("i2c5", 919, R8A774C0_CLK_S3D2),
DEF_MOD("i2c-dvfs", 926, R8A774C0_CLK_CP),
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index 17ebbac7ddfb..aa5389b04d74 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -26,7 +26,6 @@
#include <dt-bindings/clock/r8a779a0-cpg-mssr.h>
#include "renesas-cpg-mssr.h"
-#include "rcar-gen3-cpg.h"
enum rcar_r8a779a0_clk_types {
CLK_TYPE_R8A779A0_MAIN = CLK_TYPE_CUSTOM,
@@ -84,6 +83,14 @@ enum clk_ids {
DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_PLL2X_3X, CLK_MAIN, \
.offset = _offset)
+#define DEF_MDSEL(_name, _id, _md, _parent0, _div0, _parent1, _div1) \
+ DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_MDSEL, \
+ (_parent0) << 16 | (_parent1), \
+ .div = (_div0) << 16 | (_div1), .offset = _md)
+
+#define DEF_OSC(_name, _id, _parent, _div) \
+ DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_OSC, _parent, .div = _div)
+
static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
@@ -136,15 +143,51 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_DIV6P1("canfd", R8A779A0_CLK_CANFD, CLK_PLL5_DIV4, 0x878),
DEF_DIV6P1("csi0", R8A779A0_CLK_CSI0, CLK_PLL5_DIV4, 0x880),
- DEF_GEN3_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8),
- DEF_GEN3_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
+ DEF_OSC("osc", R8A779A0_CLK_OSC, CLK_EXTAL, 8),
+ DEF_MDSEL("r", R8A779A0_CLK_R, 29, CLK_EXTALR, 1, CLK_OCO, 1),
};
static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
+ DEF_MOD("csi40", 331, R8A779A0_CLK_CSI0),
+ DEF_MOD("csi41", 400, R8A779A0_CLK_CSI0),
+ DEF_MOD("csi42", 401, R8A779A0_CLK_CSI0),
+ DEF_MOD("csi43", 402, R8A779A0_CLK_CSI0),
DEF_MOD("scif0", 702, R8A779A0_CLK_S1D8),
DEF_MOD("scif1", 703, R8A779A0_CLK_S1D8),
DEF_MOD("scif3", 704, R8A779A0_CLK_S1D8),
DEF_MOD("scif4", 705, R8A779A0_CLK_S1D8),
+ DEF_MOD("vin00", 730, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin01", 731, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin02", 800, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin03", 801, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin04", 802, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin05", 803, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin06", 804, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin07", 805, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin10", 806, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin11", 807, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin12", 808, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin13", 809, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin14", 810, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin15", 811, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin16", 812, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin17", 813, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin20", 814, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin21", 815, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin22", 816, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin23", 817, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin24", 818, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin25", 819, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin26", 820, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin27", 821, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin30", 822, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin31", 823, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin32", 824, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin33", 825, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin34", 826, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin35", 827, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin36", 828, R8A779A0_CLK_S1D1),
+ DEF_MOD("vin37", 829, R8A779A0_CLK_S1D1),
};
static spinlock_t cpg_lock;
@@ -153,7 +196,7 @@ static const struct rcar_r8a779a0_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
-struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
+static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
struct clk **clks, void __iomem *base,
struct raw_notifier_head *notifiers)
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 488f8b3980c5..063b61151488 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -224,10 +224,9 @@ static struct clk * __init cpg_z_clk_register(const char *name,
#define CPG_SD_STP_MASK (CPG_SD_STP_HCK | CPG_SD_STP_CK)
#define CPG_SD_FC_MASK (0x7 << 2 | 0x3 << 0)
-#define CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) \
+#define CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) \
{ \
.val = ((stp_hck) ? CPG_SD_STP_HCK : 0) | \
- ((stp_ck) ? CPG_SD_STP_CK : 0) | \
((sd_srcfc) << 2) | \
((sd_fc) << 0), \
.div = (sd_div), \
@@ -247,36 +246,36 @@ struct sd_clock {
};
/* SDn divider
- * sd_srcfc sd_fc div
- * stp_hck stp_ck (div) (div) = sd_srcfc x sd_fc
- *-------------------------------------------------------------------
- * 0 0 0 (1) 1 (4) 4 : SDR104 / HS200 / HS400 (8 TAP)
- * 0 0 1 (2) 1 (4) 8 : SDR50
- * 1 0 2 (4) 1 (4) 16 : HS / SDR25
- * 1 0 3 (8) 1 (4) 32 : NS / SDR12
- * 1 0 4 (16) 1 (4) 64
- * 0 0 0 (1) 0 (2) 2
- * 0 0 1 (2) 0 (2) 4 : SDR104 / HS200 / HS400 (4 TAP)
- * 1 0 2 (4) 0 (2) 8
- * 1 0 3 (8) 0 (2) 16
- * 1 0 4 (16) 0 (2) 32
+ * sd_srcfc sd_fc div
+ * stp_hck (div) (div) = sd_srcfc x sd_fc
+ *---------------------------------------------------------
+ * 0 0 (1) 1 (4) 4 : SDR104 / HS200 / HS400 (8 TAP)
+ * 0 1 (2) 1 (4) 8 : SDR50
+ * 1 2 (4) 1 (4) 16 : HS / SDR25
+ * 1 3 (8) 1 (4) 32 : NS / SDR12
+ * 1 4 (16) 1 (4) 64
+ * 0 0 (1) 0 (2) 2
+ * 0 1 (2) 0 (2) 4 : SDR104 / HS200 / HS400 (4 TAP)
+ * 1 2 (4) 0 (2) 8
+ * 1 3 (8) 0 (2) 16
+ * 1 4 (16) 0 (2) 32
*
* NOTE: There is a quirk option to ignore the first row of the dividers
* table when searching for suitable settings. This is because HS400 on
* early ES versions of H3 and M3-W requires a specific setting to work.
*/
static const struct sd_div_table cpg_sd_div_table[] = {
-/* CPG_SD_DIV_TABLE_DATA(stp_hck, stp_ck, sd_srcfc, sd_fc, sd_div) */
- CPG_SD_DIV_TABLE_DATA(0, 0, 0, 1, 4),
- CPG_SD_DIV_TABLE_DATA(0, 0, 1, 1, 8),
- CPG_SD_DIV_TABLE_DATA(1, 0, 2, 1, 16),
- CPG_SD_DIV_TABLE_DATA(1, 0, 3, 1, 32),
- CPG_SD_DIV_TABLE_DATA(1, 0, 4, 1, 64),
- CPG_SD_DIV_TABLE_DATA(0, 0, 0, 0, 2),
- CPG_SD_DIV_TABLE_DATA(0, 0, 1, 0, 4),
- CPG_SD_DIV_TABLE_DATA(1, 0, 2, 0, 8),
- CPG_SD_DIV_TABLE_DATA(1, 0, 3, 0, 16),
- CPG_SD_DIV_TABLE_DATA(1, 0, 4, 0, 32),
+/* CPG_SD_DIV_TABLE_DATA(stp_hck, sd_srcfc, sd_fc, sd_div) */
+ CPG_SD_DIV_TABLE_DATA(0, 0, 1, 4),
+ CPG_SD_DIV_TABLE_DATA(0, 1, 1, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 2, 1, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 3, 1, 32),
+ CPG_SD_DIV_TABLE_DATA(1, 4, 1, 64),
+ CPG_SD_DIV_TABLE_DATA(0, 0, 0, 2),
+ CPG_SD_DIV_TABLE_DATA(0, 1, 0, 4),
+ CPG_SD_DIV_TABLE_DATA(1, 2, 0, 8),
+ CPG_SD_DIV_TABLE_DATA(1, 3, 0, 16),
+ CPG_SD_DIV_TABLE_DATA(1, 4, 0, 32),
};
#define to_sd_clock(_hw) container_of(_hw, struct sd_clock, hw)
@@ -696,6 +695,34 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
cpg_rpcsrc_div_table,
&cpg_lock);
+ case CLK_TYPE_GEN3_E3_RPCSRC:
+ /*
+ * Register RPCSRC as fixed factor clock based on the
+ * MD[4:1] pins and CPG_RPCCKCR[4:3] register value for
+ * which has been set prior to booting the kernel.
+ */
+ value = (readl(base + CPG_RPCCKCR) & GENMASK(4, 3)) >> 3;
+
+ switch (value) {
+ case 0:
+ div = 5;
+ break;
+ case 1:
+ div = 3;
+ break;
+ case 2:
+ parent = clks[core->parent >> 16];
+ if (IS_ERR(parent))
+ return ERR_CAST(parent);
+ div = core->div;
+ break;
+ case 3:
+ default:
+ div = 2;
+ break;
+ }
+ break;
+
case CLK_TYPE_GEN3_RPC:
return cpg_rpc_clk_register(core->name, base,
__clk_get_name(parent), notifiers);
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.h b/drivers/clk/renesas/rcar-gen3-cpg.h
index c4ac80cac6a0..3d949c4a3244 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.h
+++ b/drivers/clk/renesas/rcar-gen3-cpg.h
@@ -24,6 +24,7 @@ enum rcar_gen3_clk_types {
CLK_TYPE_GEN3_OSC, /* OSC EXTAL predivider and fixed divider */
CLK_TYPE_GEN3_RCKSEL, /* Select parent/divider using RCKCR.CKSEL */
CLK_TYPE_GEN3_RPCSRC,
+ CLK_TYPE_GEN3_E3_RPCSRC,
CLK_TYPE_GEN3_RPC,
CLK_TYPE_GEN3_RPCD2,
@@ -54,6 +55,10 @@ enum rcar_gen3_clk_types {
#define DEF_GEN3_Z(_name, _id, _type, _parent, _div, _offset) \
DEF_BASE(_name, _id, _type, _parent, .div = _div, .offset = _offset)
+#define DEF_FIXED_RPCSRC_E3(_name, _id, _parent0, _parent1) \
+ DEF_BASE(_name, _id, CLK_TYPE_GEN3_E3_RPCSRC, \
+ (_parent0) << 16 | (_parent1), .div = 8)
+
struct rcar_gen3_cpg_pll_config {
u8 extal_div;
u8 pll1_mult;
diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c
index d4c02986c34e..3abafd78f7c8 100644
--- a/drivers/clk/renesas/rcar-usb2-clock-sel.c
+++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c
@@ -160,7 +160,7 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- priv->rsts = devm_reset_control_array_get(dev, true, false);
+ priv->rsts = devm_reset_control_array_get_shared(dev);
if (IS_ERR(priv->rsts))
return PTR_ERR(priv->rsts);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index 94db88370337..1c3215dc4877 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -119,7 +119,8 @@ static const u16 srstclr_for_v3u[] = {
};
/**
- * Clock Pulse Generator / Module Standby and Software Reset Private Data
+ * struct cpg_mssr_priv - Clock Pulse Generator / Module Standby
+ * and Software Reset Private Data
*
* @rcdev: Optional reset controller entity
* @dev: CPG/MSSR device
diff --git a/drivers/clk/rockchip/Kconfig b/drivers/clk/rockchip/Kconfig
index 47cd6c5de837..effd05032e85 100644
--- a/drivers/clk/rockchip/Kconfig
+++ b/drivers/clk/rockchip/Kconfig
@@ -11,67 +11,77 @@ config COMMON_CLK_ROCKCHIP
if COMMON_CLK_ROCKCHIP
config CLK_PX30
bool "Rockchip PX30 clock controller support"
+ depends on (ARM64 || COMPILE_TEST)
default y
help
Build the driver for PX30 Clock Driver.
config CLK_RV110X
bool "Rockchip RV110x clock controller support"
+ depends on (ARM || COMPILE_TEST)
default y
help
Build the driver for RV110x Clock Driver.
config CLK_RK3036
bool "Rockchip RK3036 clock controller support"
+ depends on (ARM || COMPILE_TEST)
default y
help
Build the driver for RK3036 Clock Driver.
config CLK_RK312X
bool "Rockchip RK312x clock controller support"
+ depends on (ARM || COMPILE_TEST)
default y
help
Build the driver for RK312x Clock Driver.
config CLK_RK3188
bool "Rockchip RK3188 clock controller support"
+ depends on (ARM || COMPILE_TEST)
default y
help
Build the driver for RK3188 Clock Driver.
config CLK_RK322X
bool "Rockchip RK322x clock controller support"
+ depends on (ARM || COMPILE_TEST)
default y
help
Build the driver for RK322x Clock Driver.
config CLK_RK3288
bool "Rockchip RK3288 clock controller support"
- depends on ARM
+ depends on (ARM || COMPILE_TEST)
default y
help
Build the driver for RK3288 Clock Driver.
config CLK_RK3308
bool "Rockchip RK3308 clock controller support"
+ depends on (ARM64 || COMPILE_TEST)
default y
help
Build the driver for RK3308 Clock Driver.
config CLK_RK3328
bool "Rockchip RK3328 clock controller support"
+ depends on (ARM64 || COMPILE_TEST)
default y
help
Build the driver for RK3328 Clock Driver.
config CLK_RK3368
bool "Rockchip RK3368 clock controller support"
+ depends on (ARM64 || COMPILE_TEST)
default y
help
Build the driver for RK3368 Clock Driver.
config CLK_RK3399
tristate "Rockchip RK3399 clock controller support"
+ depends on (ARM64 || COMPILE_TEST)
default y
help
Build the driver for RK3399 Clock Driver.
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index 730020fcc7fe..0b76ad34de00 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -255,19 +255,19 @@ static struct rockchip_clk_branch common_spdif_fracmux __initdata =
RK2928_CLKSEL_CON(5), 8, 2, MFLAGS);
static struct rockchip_clk_branch common_uart0_fracmux __initdata =
- MUX(SCLK_UART0, "sclk_uart0", mux_sclk_uart0_p, 0,
+ MUX(SCLK_UART0, "sclk_uart0", mux_sclk_uart0_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(13), 8, 2, MFLAGS);
static struct rockchip_clk_branch common_uart1_fracmux __initdata =
- MUX(SCLK_UART1, "sclk_uart1", mux_sclk_uart1_p, 0,
+ MUX(SCLK_UART1, "sclk_uart1", mux_sclk_uart1_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(14), 8, 2, MFLAGS);
static struct rockchip_clk_branch common_uart2_fracmux __initdata =
- MUX(SCLK_UART2, "sclk_uart2", mux_sclk_uart2_p, 0,
+ MUX(SCLK_UART2, "sclk_uart2", mux_sclk_uart2_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(15), 8, 2, MFLAGS);
static struct rockchip_clk_branch common_uart3_fracmux __initdata =
- MUX(SCLK_UART3, "sclk_uart3", mux_sclk_uart3_p, 0,
+ MUX(SCLK_UART3, "sclk_uart3", mux_sclk_uart3_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(16), 8, 2, MFLAGS);
static struct rockchip_clk_branch common_clk_branches[] __initdata = {
@@ -408,28 +408,28 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "uart0_pre", "uart_src", 0,
RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(1), 8, GFLAGS),
- COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_pre", 0,
+ COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(17), 0,
RK2928_CLKGATE_CON(1), 9, GFLAGS,
&common_uart0_fracmux),
COMPOSITE_NOMUX(0, "uart1_pre", "uart_src", 0,
RK2928_CLKSEL_CON(14), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(1), 10, GFLAGS),
- COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_pre", 0,
+ COMPOSITE_FRACMUX(0, "uart1_frac", "uart1_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(18), 0,
RK2928_CLKGATE_CON(1), 11, GFLAGS,
&common_uart1_fracmux),
COMPOSITE_NOMUX(0, "uart2_pre", "uart_src", 0,
RK2928_CLKSEL_CON(15), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(1), 12, GFLAGS),
- COMPOSITE_FRACMUX(0, "uart2_frac", "uart2_pre", 0,
+ COMPOSITE_FRACMUX(0, "uart2_frac", "uart2_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(19), 0,
RK2928_CLKGATE_CON(1), 13, GFLAGS,
&common_uart2_fracmux),
COMPOSITE_NOMUX(0, "uart3_pre", "uart_src", 0,
RK2928_CLKSEL_CON(16), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(1), 14, GFLAGS),
- COMPOSITE_FRACMUX(0, "uart3_frac", "uart3_pre", 0,
+ COMPOSITE_FRACMUX(0, "uart3_frac", "uart3_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(20), 0,
RK2928_CLKGATE_CON(1), 15, GFLAGS,
&common_uart3_fracmux),
@@ -449,7 +449,6 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
/* hclk_cpu gates */
GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK2928_CLKGATE_CON(5), 6, GFLAGS),
- GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
GATE(HCLK_SPDIF, "hclk_spdif", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 1, GFLAGS),
GATE(0, "hclk_cpubus", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 8, GFLAGS),
/* hclk_ahb2apb is part of a clk branch */
@@ -543,15 +542,15 @@ static struct clk_div_table div_aclk_cpu_t[] = {
};
static struct rockchip_clk_branch rk3066a_i2s0_fracmux __initdata =
- MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
+ MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(2), 8, 2, MFLAGS);
static struct rockchip_clk_branch rk3066a_i2s1_fracmux __initdata =
- MUX(SCLK_I2S1, "sclk_i2s1", mux_sclk_i2s1_p, 0,
+ MUX(SCLK_I2S1, "sclk_i2s1", mux_sclk_i2s1_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(3), 8, 2, MFLAGS);
static struct rockchip_clk_branch rk3066a_i2s2_fracmux __initdata =
- MUX(SCLK_I2S2, "sclk_i2s2", mux_sclk_i2s2_p, 0,
+ MUX(SCLK_I2S2, "sclk_i2s2", mux_sclk_i2s2_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(4), 8, 2, MFLAGS);
static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
@@ -615,27 +614,28 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "i2s0_pre", "i2s_src", 0,
RK2928_CLKSEL_CON(2), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 7, GFLAGS),
- COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_pre", 0,
+ COMPOSITE_FRACMUX(0, "i2s0_frac", "i2s0_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(6), 0,
RK2928_CLKGATE_CON(0), 8, GFLAGS,
&rk3066a_i2s0_fracmux),
COMPOSITE_NOMUX(0, "i2s1_pre", "i2s_src", 0,
RK2928_CLKSEL_CON(3), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 9, GFLAGS),
- COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_pre", 0,
+ COMPOSITE_FRACMUX(0, "i2s1_frac", "i2s1_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(7), 0,
RK2928_CLKGATE_CON(0), 10, GFLAGS,
&rk3066a_i2s1_fracmux),
COMPOSITE_NOMUX(0, "i2s2_pre", "i2s_src", 0,
RK2928_CLKSEL_CON(4), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(0), 11, GFLAGS),
- COMPOSITE_FRACMUX(0, "i2s2_frac", "i2s2_pre", 0,
+ COMPOSITE_FRACMUX(0, "i2s2_frac", "i2s2_pre", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(8), 0,
RK2928_CLKGATE_CON(0), 12, GFLAGS,
&rk3066a_i2s2_fracmux),
- GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
- GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
+ GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
+ GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_CIF1, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
GATE(HCLK_HDMI, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
@@ -728,6 +728,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
RK2928_CLKGATE_CON(0), 10, GFLAGS,
&rk3188_i2s0_fracmux),
+ GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index b443169dd408..336481bc6cc7 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -603,8 +603,7 @@ void rockchip_clk_protect_critical(const char *const clocks[],
for (i = 0; i < nclocks; i++) {
struct clk *clk = __clk_lookup(clocks[i]);
- if (clk)
- clk_prepare_enable(clk);
+ clk_prepare_enable(clk);
}
}
EXPORT_SYMBOL_GPL(rockchip_clk_protect_critical);
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
index 57d4b3f20417..0441c4f73ac9 100644
--- a/drivers/clk/samsung/Kconfig
+++ b/drivers/clk/samsung/Kconfig
@@ -2,10 +2,73 @@
# Recent Exynos platforms should just select COMMON_CLK_SAMSUNG:
config COMMON_CLK_SAMSUNG
bool "Samsung Exynos clock controller support" if COMPILE_TEST
- # Clocks on ARM64 SoCs (e.g. Exynos5433, Exynos7) are chosen by
- # EXYNOS_ARM64_COMMON_CLK to avoid building them on ARMv7:
+ select S3C64XX_COMMON_CLK if ARM && ARCH_S3C64XX
+ select S5PV210_COMMON_CLK if ARM && ARCH_S5PV210
+ select EXYNOS_3250_COMMON_CLK if ARM && SOC_EXYNOS3250
+ select EXYNOS_4_COMMON_CLK if ARM && ARCH_EXYNOS4
+ select EXYNOS_5250_COMMON_CLK if ARM && SOC_EXYNOS5250
+ select EXYNOS_5260_COMMON_CLK if ARM && SOC_EXYNOS5260
+ select EXYNOS_5410_COMMON_CLK if ARM && SOC_EXYNOS5410
+ select EXYNOS_5420_COMMON_CLK if ARM && SOC_EXYNOS5420
select EXYNOS_ARM64_COMMON_CLK if ARM64 && ARCH_EXYNOS
+config S3C64XX_COMMON_CLK
+ bool "Samsung S3C64xx clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung S3C64xx SoCs.
+ Choose Y here only if you build for this SoC.
+
+config S5PV210_COMMON_CLK
+ bool "Samsung S5Pv210 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung S5Pv210 SoCs.
+ Choose Y here only if you build for this SoC.
+
+config EXYNOS_3250_COMMON_CLK
+ bool "Samsung Exynos3250 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ Exynos3250 SoCs. Choose Y here only if you build for this SoC.
+
+config EXYNOS_4_COMMON_CLK
+ bool "Samsung Exynos4 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ Exynos4212 and Exynos4412 SoCs. Choose Y here only if you build for
+ this SoC.
+
+config EXYNOS_5250_COMMON_CLK
+ bool "Samsung Exynos5250 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ Exynos5250 SoCs. Choose Y here only if you build for this SoC.
+
+config EXYNOS_5260_COMMON_CLK
+ bool "Samsung Exynos5260 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ Exynos5260 SoCs. Choose Y here only if you build for this SoC.
+
+config EXYNOS_5410_COMMON_CLK
+ bool "Samsung Exynos5410 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ Exynos5410 SoCs. Choose Y here only if you build for this SoC.
+
+config EXYNOS_5420_COMMON_CLK
+ bool "Samsung Exynos5420 clock controller support" if COMPILE_TEST
+ depends on COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ Exynos5420 SoCs. Choose Y here only if you build for this SoC.
+
config EXYNOS_ARM64_COMMON_CLK
bool "Samsung Exynos ARMv8-family clock controller support" if COMPILE_TEST
depends on COMMON_CLK_SAMSUNG
@@ -19,6 +82,16 @@ config EXYNOS_AUDSS_CLK_CON
on some Exynos SoC variants. Choose M or Y here if you want to
use audio devices such as I2S, PCM, etc.
+config EXYNOS_CLKOUT
+ tristate "Samsung Exynos clock output driver"
+ depends on COMMON_CLK_SAMSUNG
+ default y if ARCH_EXYNOS
+ help
+ Support for the clock output (XCLKOUT) present on some of Exynos SoC
+ variants. Usually the XCLKOUT is used to monitor the status of the
+ certains clocks from SoC, but it could also be tied to other devices
+ as an input clock.
+
# For S3C24XX platforms, select following symbols:
config S3C2410_COMMON_CLK
bool "Samsung S3C2410 clock controller support" if COMPILE_TEST
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 1a4e6b787978..028b2e27a37e 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -4,22 +4,22 @@
#
obj-$(CONFIG_COMMON_CLK) += clk.o clk-pll.o clk-cpu.o
-obj-$(CONFIG_SOC_EXYNOS3250) += clk-exynos3250.o
-obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4.o
-obj-$(CONFIG_ARCH_EXYNOS4) += clk-exynos4412-isp.o
-obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
-obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5-subcmu.o
-obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
-obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
-obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
-obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5-subcmu.o
+obj-$(CONFIG_EXYNOS_3250_COMMON_CLK) += clk-exynos3250.o
+obj-$(CONFIG_EXYNOS_4_COMMON_CLK) += clk-exynos4.o
+obj-$(CONFIG_EXYNOS_4_COMMON_CLK) += clk-exynos4412-isp.o
+obj-$(CONFIG_EXYNOS_5250_COMMON_CLK) += clk-exynos5250.o
+obj-$(CONFIG_EXYNOS_5250_COMMON_CLK) += clk-exynos5-subcmu.o
+obj-$(CONFIG_EXYNOS_5260_COMMON_CLK) += clk-exynos5260.o
+obj-$(CONFIG_EXYNOS_5410_COMMON_CLK) += clk-exynos5410.o
+obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5420.o
+obj-$(CONFIG_EXYNOS_5420_COMMON_CLK) += clk-exynos5-subcmu.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
-obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
+obj-$(CONFIG_EXYNOS_CLKOUT) += clk-exynos-clkout.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
obj-$(CONFIG_S3C2443_COMMON_CLK)+= clk-s3c2443.o
-obj-$(CONFIG_ARCH_S3C64XX) += clk-s3c64xx.o
-obj-$(CONFIG_ARCH_S5PV210) += clk-s5pv210.o clk-s5pv210-audss.o
+obj-$(CONFIG_S3C64XX_COMMON_CLK) += clk-s3c64xx.o
+obj-$(CONFIG_S5PV210_COMMON_CLK) += clk-s5pv210.o clk-s5pv210-audss.o
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
index 34ccb1d23bc3..e6d6cbf8c4e6 100644
--- a/drivers/clk/samsung/clk-exynos-clkout.c
+++ b/drivers/clk/samsung/clk-exynos-clkout.c
@@ -9,10 +9,13 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/module.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/syscore_ops.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
#define EXYNOS_CLKOUT_NR_CLKS 1
#define EXYNOS_CLKOUT_PARENTS 32
@@ -28,41 +31,103 @@ struct exynos_clkout {
struct clk_mux mux;
spinlock_t slock;
void __iomem *reg;
+ struct device_node *np;
u32 pmu_debug_save;
struct clk_hw_onecell_data data;
};
-static struct exynos_clkout *clkout;
+struct exynos_clkout_variant {
+ u32 mux_mask;
+};
-static int exynos_clkout_suspend(void)
-{
- clkout->pmu_debug_save = readl(clkout->reg + EXYNOS_PMU_DEBUG_REG);
+static const struct exynos_clkout_variant exynos_clkout_exynos4 = {
+ .mux_mask = EXYNOS4_CLKOUT_MUX_MASK,
+};
- return 0;
-}
+static const struct exynos_clkout_variant exynos_clkout_exynos5 = {
+ .mux_mask = EXYNOS5_CLKOUT_MUX_MASK,
+};
-static void exynos_clkout_resume(void)
+static const struct of_device_id exynos_clkout_ids[] = {
+ {
+ .compatible = "samsung,exynos3250-pmu",
+ .data = &exynos_clkout_exynos4,
+ }, {
+ .compatible = "samsung,exynos4210-pmu",
+ .data = &exynos_clkout_exynos4,
+ }, {
+ .compatible = "samsung,exynos4412-pmu",
+ .data = &exynos_clkout_exynos4,
+ }, {
+ .compatible = "samsung,exynos5250-pmu",
+ .data = &exynos_clkout_exynos5,
+ }, {
+ .compatible = "samsung,exynos5410-pmu",
+ .data = &exynos_clkout_exynos5,
+ }, {
+ .compatible = "samsung,exynos5420-pmu",
+ .data = &exynos_clkout_exynos5,
+ }, {
+ .compatible = "samsung,exynos5433-pmu",
+ .data = &exynos_clkout_exynos5,
+ }, { }
+};
+MODULE_DEVICE_TABLE(of, exynos_clkout_ids);
+
+/*
+ * Device will be instantiated as child of PMU device without its own
+ * device node. Therefore match compatibles against parent.
+ */
+static int exynos_clkout_match_parent_dev(struct device *dev, u32 *mux_mask)
{
- writel(clkout->pmu_debug_save, clkout->reg + EXYNOS_PMU_DEBUG_REG);
-}
+ const struct exynos_clkout_variant *variant;
+ const struct of_device_id *match;
-static struct syscore_ops exynos_clkout_syscore_ops = {
- .suspend = exynos_clkout_suspend,
- .resume = exynos_clkout_resume,
-};
+ if (!dev->parent) {
+ dev_err(dev, "not instantiated from MFD\n");
+ return -EINVAL;
+ }
+
+ match = of_match_device(exynos_clkout_ids, dev->parent);
+ if (!match) {
+ dev_err(dev, "cannot match parent device\n");
+ return -EINVAL;
+ }
+ variant = match->data;
-static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
+ *mux_mask = variant->mux_mask;
+
+ return 0;
+}
+
+static int exynos_clkout_probe(struct platform_device *pdev)
{
const char *parent_names[EXYNOS_CLKOUT_PARENTS];
struct clk *parents[EXYNOS_CLKOUT_PARENTS];
- int parent_count;
- int ret;
- int i;
+ struct exynos_clkout *clkout;
+ int parent_count, ret, i;
+ u32 mux_mask;
- clkout = kzalloc(struct_size(clkout, data.hws, EXYNOS_CLKOUT_NR_CLKS),
- GFP_KERNEL);
+ clkout = devm_kzalloc(&pdev->dev,
+ struct_size(clkout, data.hws, EXYNOS_CLKOUT_NR_CLKS),
+ GFP_KERNEL);
if (!clkout)
- return;
+ return -ENOMEM;
+
+ ret = exynos_clkout_match_parent_dev(&pdev->dev, &mux_mask);
+ if (ret)
+ return ret;
+
+ clkout->np = pdev->dev.of_node;
+ if (!clkout->np) {
+ /*
+ * pdev->dev.parent was checked by exynos_clkout_match_parent_dev()
+ * so it is not NULL.
+ */
+ clkout->np = pdev->dev.parent->of_node;
+ }
+
+ platform_set_drvdata(pdev, clkout);
spin_lock_init(&clkout->slock);
@@ -71,7 +136,7 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
char name[] = "clkoutXX";
snprintf(name, sizeof(name), "clkout%d", i);
- parents[i] = of_clk_get_by_name(node, name);
+ parents[i] = of_clk_get_by_name(clkout->np, name);
if (IS_ERR(parents[i])) {
parent_names[i] = "none";
continue;
@@ -82,11 +147,13 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
}
if (!parent_count)
- goto free_clkout;
+ return -EINVAL;
- clkout->reg = of_iomap(node, 0);
- if (!clkout->reg)
+ clkout->reg = of_iomap(clkout->np, 0);
+ if (!clkout->reg) {
+ ret = -ENODEV;
goto clks_put;
+ }
clkout->gate.reg = clkout->reg + EXYNOS_PMU_DEBUG_REG;
clkout->gate.bit_idx = EXYNOS_CLKOUT_DISABLE_SHIFT;
@@ -103,17 +170,17 @@ static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
&clk_mux_ops, NULL, NULL, &clkout->gate.hw,
&clk_gate_ops, CLK_SET_RATE_PARENT
| CLK_SET_RATE_NO_REPARENT);
- if (IS_ERR(clkout->data.hws[0]))
+ if (IS_ERR(clkout->data.hws[0])) {
+ ret = PTR_ERR(clkout->data.hws[0]);
goto err_unmap;
+ }
clkout->data.num = EXYNOS_CLKOUT_NR_CLKS;
- ret = of_clk_add_hw_provider(node, of_clk_hw_onecell_get, &clkout->data);
+ ret = of_clk_add_hw_provider(clkout->np, of_clk_hw_onecell_get, &clkout->data);
if (ret)
goto err_clk_unreg;
- register_syscore_ops(&exynos_clkout_syscore_ops);
-
- return;
+ return 0;
err_clk_unreg:
clk_hw_unregister(clkout->data.hws[0]);
@@ -123,38 +190,56 @@ clks_put:
for (i = 0; i < EXYNOS_CLKOUT_PARENTS; ++i)
if (!IS_ERR(parents[i]))
clk_put(parents[i]);
-free_clkout:
- kfree(clkout);
- pr_err("%s: failed to register clkout clock\n", __func__);
+ dev_err(&pdev->dev, "failed to register clkout clock\n");
+
+ return ret;
}
-/*
- * We use CLK_OF_DECLARE_DRIVER initialization method to avoid setting
- * the OF_POPULATED flag on the pmu device tree node, so later the
- * Exynos PMU platform device can be properly probed with PMU driver.
- */
+static int exynos_clkout_remove(struct platform_device *pdev)
+{
+ struct exynos_clkout *clkout = platform_get_drvdata(pdev);
+
+ of_clk_del_provider(clkout->np);
+ clk_hw_unregister(clkout->data.hws[0]);
+ iounmap(clkout->reg);
+
+ return 0;
+}
-static void __init exynos4_clkout_init(struct device_node *node)
+static int __maybe_unused exynos_clkout_suspend(struct device *dev)
{
- exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK);
+ struct exynos_clkout *clkout = dev_get_drvdata(dev);
+
+ clkout->pmu_debug_save = readl(clkout->reg + EXYNOS_PMU_DEBUG_REG);
+
+ return 0;
}
-CLK_OF_DECLARE_DRIVER(exynos4210_clkout, "samsung,exynos4210-pmu",
- exynos4_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos4412_clkout, "samsung,exynos4412-pmu",
- exynos4_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos3250_clkout, "samsung,exynos3250-pmu",
- exynos4_clkout_init);
-
-static void __init exynos5_clkout_init(struct device_node *node)
+
+static int __maybe_unused exynos_clkout_resume(struct device *dev)
{
- exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK);
+ struct exynos_clkout *clkout = dev_get_drvdata(dev);
+
+ writel(clkout->pmu_debug_save, clkout->reg + EXYNOS_PMU_DEBUG_REG);
+
+ return 0;
}
-CLK_OF_DECLARE_DRIVER(exynos5250_clkout, "samsung,exynos5250-pmu",
- exynos5_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos5410_clkout, "samsung,exynos5410-pmu",
- exynos5_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos5420_clkout, "samsung,exynos5420-pmu",
- exynos5_clkout_init);
-CLK_OF_DECLARE_DRIVER(exynos5433_clkout, "samsung,exynos5433-pmu",
- exynos5_clkout_init);
+
+static SIMPLE_DEV_PM_OPS(exynos_clkout_pm_ops, exynos_clkout_suspend,
+ exynos_clkout_resume);
+
+static struct platform_driver exynos_clkout_driver = {
+ .driver = {
+ .name = "exynos-clkout",
+ .of_match_table = exynos_clkout_ids,
+ .pm = &exynos_clkout_pm_ops,
+ },
+ .probe = exynos_clkout_probe,
+ .remove = exynos_clkout_remove,
+};
+module_platform_driver(exynos_clkout_driver);
+
+MODULE_AUTHOR("Krzysztof Kozlowski <krzk@kernel.org>");
+MODULE_AUTHOR("Tomasz Figa <tomasz.figa@gmail.com>");
+MODULE_DESCRIPTION("Samsung Exynos clock output driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index ac70ad785d8e..5873a9354b50 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -8,14 +8,17 @@
#include <linux/errno.h>
#include <linux/hrtimer.h>
+#include <linux/iopoll.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/timekeeping.h>
#include <linux/clk-provider.h>
#include <linux/io.h>
#include "clk.h"
#include "clk-pll.h"
-#define PLL_TIMEOUT_MS 10
+#define PLL_TIMEOUT_US 20000U
+#define PLL_TIMEOUT_LOOPS 1000000U
struct samsung_clk_pll {
struct clk_hw hw;
@@ -63,6 +66,53 @@ static long samsung_pll_round_rate(struct clk_hw *hw,
return rate_table[i - 1].rate;
}
+static bool pll_early_timeout = true;
+
+static int __init samsung_pll_disable_early_timeout(void)
+{
+ pll_early_timeout = false;
+ return 0;
+}
+arch_initcall(samsung_pll_disable_early_timeout);
+
+/* Wait until the PLL is locked */
+static int samsung_pll_lock_wait(struct samsung_clk_pll *pll,
+ unsigned int reg_mask)
+{
+ int i, ret;
+ u32 val;
+
+ /*
+ * This function might be called when the timekeeping API can't be used
+ * to detect timeouts. One situation is when the clocksource is not yet
+ * initialized, another when the timekeeping is suspended. udelay() also
+ * cannot be used when the clocksource is not running on arm64, since
+ * the current timer is used as cycle counter. So a simple busy loop
+ * is used here in that special cases. The limit of iterations has been
+ * derived from experimental measurements of various PLLs on multiple
+ * Exynos SoC variants. Single register read time was usually in range
+ * 0.4...1.5 us, never less than 0.4 us.
+ */
+ if (pll_early_timeout || timekeeping_suspended) {
+ i = PLL_TIMEOUT_LOOPS;
+ while (i-- > 0) {
+ if (readl_relaxed(pll->con_reg) & reg_mask)
+ return 0;
+
+ cpu_relax();
+ }
+ ret = -ETIMEDOUT;
+ } else {
+ ret = readl_relaxed_poll_timeout_atomic(pll->con_reg, val,
+ val & reg_mask, 0, PLL_TIMEOUT_US);
+ }
+
+ if (ret < 0)
+ pr_err("Could not lock PLL %s\n", clk_hw_get_name(&pll->hw));
+
+ return ret;
+}
+
static int samsung_pll3xxx_enable(struct clk_hw *hw)
{
struct samsung_clk_pll *pll = to_clk_pll(hw);
@@ -72,13 +122,7 @@ static int samsung_pll3xxx_enable(struct clk_hw *hw)
tmp |= BIT(pll->enable_offs);
writel_relaxed(tmp, pll->con_reg);
- /* wait lock time */
- do {
- cpu_relax();
- tmp = readl_relaxed(pll->con_reg);
- } while (!(tmp & BIT(pll->lock_offs)));
-
- return 0;
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
}
static void samsung_pll3xxx_disable(struct clk_hw *hw)
@@ -240,13 +284,10 @@ static int samsung_pll35xx_set_rate(struct clk_hw *hw, unsigned long drate,
(rate->sdiv << PLL35XX_SDIV_SHIFT);
writel_relaxed(tmp, pll->con_reg);
- /* Wait until the PLL is locked if it is enabled. */
- if (tmp & BIT(pll->enable_offs)) {
- do {
- cpu_relax();
- tmp = readl_relaxed(pll->con_reg);
- } while (!(tmp & BIT(pll->lock_offs)));
- }
+ /* Wait for PLL lock if the PLL is enabled */
+ if (tmp & BIT(pll->enable_offs))
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
+
return 0;
}
@@ -318,7 +359,7 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
unsigned long parent_rate)
{
struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 tmp, pll_con0, pll_con1;
+ u32 pll_con0, pll_con1;
const struct samsung_pll_rate_table *rate;
rate = samsung_get_pll_settings(pll, drate);
@@ -356,13 +397,8 @@ static int samsung_pll36xx_set_rate(struct clk_hw *hw, unsigned long drate,
pll_con1 |= rate->kdiv << PLL36XX_KDIV_SHIFT;
writel_relaxed(pll_con1, pll->con_reg + 4);
- /* wait_lock_time */
- if (pll_con0 & BIT(pll->enable_offs)) {
- do {
- cpu_relax();
- tmp = readl_relaxed(pll->con_reg);
- } while (!(tmp & BIT(pll->lock_offs)));
- }
+ if (pll_con0 & BIT(pll->enable_offs))
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
return 0;
}
@@ -437,7 +473,6 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
struct samsung_clk_pll *pll = to_clk_pll(hw);
const struct samsung_pll_rate_table *rate;
u32 con0, con1;
- ktime_t start;
/* Get required rate settings from table */
rate = samsung_get_pll_settings(pll, drate);
@@ -488,21 +523,8 @@ static int samsung_pll45xx_set_rate(struct clk_hw *hw, unsigned long drate,
writel_relaxed(con1, pll->con_reg + 0x4);
writel_relaxed(con0, pll->con_reg);
- /* Wait for locking. */
- start = ktime_get();
- while (!(readl_relaxed(pll->con_reg) & PLL45XX_LOCKED)) {
- ktime_t delta = ktime_sub(ktime_get(), start);
-
- if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
- pr_err("%s: could not lock PLL %s\n",
- __func__, clk_hw_get_name(hw));
- return -EFAULT;
- }
-
- cpu_relax();
- }
-
- return 0;
+ /* Wait for PLL lock */
+ return samsung_pll_lock_wait(pll, PLL45XX_LOCKED);
}
static const struct clk_ops samsung_pll45xx_clk_ops = {
@@ -588,7 +610,6 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
struct samsung_clk_pll *pll = to_clk_pll(hw);
const struct samsung_pll_rate_table *rate;
u32 con0, con1, lock;
- ktime_t start;
/* Get required rate settings from table */
rate = samsung_get_pll_settings(pll, drate);
@@ -647,21 +668,8 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
writel_relaxed(con0, pll->con_reg);
writel_relaxed(con1, pll->con_reg + 0x4);
- /* Wait for locking. */
- start = ktime_get();
- while (!(readl_relaxed(pll->con_reg) & PLL46XX_LOCKED)) {
- ktime_t delta = ktime_sub(ktime_get(), start);
-
- if (ktime_to_ms(delta) > PLL_TIMEOUT_MS) {
- pr_err("%s: could not lock PLL %s\n",
- __func__, clk_hw_get_name(hw));
- return -EFAULT;
- }
-
- cpu_relax();
- }
-
- return 0;
+ /* Wait for PLL lock */
+ return samsung_pll_lock_wait(pll, PLL46XX_LOCKED);
}
static const struct clk_ops samsung_pll46xx_clk_ops = {
@@ -1035,14 +1043,9 @@ static int samsung_pll2550xx_set_rate(struct clk_hw *hw, unsigned long drate,
(rate->sdiv << PLL2550XX_S_SHIFT);
writel_relaxed(tmp, pll->con_reg);
- /* wait_lock_time */
- do {
- cpu_relax();
- tmp = readl_relaxed(pll->con_reg);
- } while (!(tmp & (PLL2550XX_LOCK_STAT_MASK
- << PLL2550XX_LOCK_STAT_SHIFT)));
-
- return 0;
+ /* Wait for PLL lock */
+ return samsung_pll_lock_wait(pll,
+ PLL2550XX_LOCK_STAT_MASK << PLL2550XX_LOCK_STAT_SHIFT);
}
static const struct clk_ops samsung_pll2550xx_clk_ops = {
@@ -1132,13 +1135,9 @@ static int samsung_pll2650x_set_rate(struct clk_hw *hw, unsigned long drate,
con1 |= ((rate->kdiv & PLL2650X_K_MASK) << PLL2650X_K_SHIFT);
writel_relaxed(con1, pll->con_reg + 4);
- do {
- cpu_relax();
- con0 = readl_relaxed(pll->con_reg);
- } while (!(con0 & (PLL2650X_LOCK_STAT_MASK
- << PLL2650X_LOCK_STAT_SHIFT)));
-
- return 0;
+ /* Wait for PLL lock */
+ return samsung_pll_lock_wait(pll,
+ PLL2650X_LOCK_STAT_MASK << PLL2650X_LOCK_STAT_SHIFT);
}
static const struct clk_ops samsung_pll2650x_clk_ops = {
@@ -1196,7 +1195,7 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate,
unsigned long parent_rate)
{
struct samsung_clk_pll *pll = to_clk_pll(hw);
- u32 tmp, pll_con0, pll_con2;
+ u32 pll_con0, pll_con2;
const struct samsung_pll_rate_table *rate;
rate = samsung_get_pll_settings(pll, drate);
@@ -1229,11 +1228,7 @@ static int samsung_pll2650xx_set_rate(struct clk_hw *hw, unsigned long drate,
writel_relaxed(pll_con0, pll->con_reg);
writel_relaxed(pll_con2, pll->con_reg + 8);
- do {
- tmp = readl_relaxed(pll->con_reg);
- } while (!(tmp & (0x1 << PLL2650XX_PLL_LOCKTIME_SHIFT)));
-
- return 0;
+ return samsung_pll_lock_wait(pll, 0x1 << PLL2650XX_PLL_LOCKTIME_SHIFT);
}
static const struct clk_ops samsung_pll2650xx_clk_ops = {
diff --git a/drivers/clk/sifive/Kconfig b/drivers/clk/sifive/Kconfig
index f3b4eb9cb0f5..1c14eb20c066 100644
--- a/drivers/clk/sifive/Kconfig
+++ b/drivers/clk/sifive/Kconfig
@@ -8,12 +8,12 @@ menuconfig CLK_SIFIVE
if CLK_SIFIVE
-config CLK_SIFIVE_FU540_PRCI
- bool "PRCI driver for SiFive FU540 SoCs"
+config CLK_SIFIVE_PRCI
+ bool "PRCI driver for SiFive SoCs"
select CLK_ANALOGBITS_WRPLL_CLN28HPC
help
Supports the Power Reset Clock interface (PRCI) IP block found in
- FU540 SoCs. If this kernel is meant to run on a SiFive FU540 SoC,
- enable this driver.
+ FU540/FU740 SoCs. If this kernel is meant to run on a SiFive FU540/
+ FU740 SoCs, enable this driver.
endif
diff --git a/drivers/clk/sifive/Makefile b/drivers/clk/sifive/Makefile
index 0797f14fef6b..7b06fc04e6b3 100644
--- a/drivers/clk/sifive/Makefile
+++ b/drivers/clk/sifive/Makefile
@@ -1,2 +1,2 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_CLK_SIFIVE_FU540_PRCI) += fu540-prci.o
+obj-$(CONFIG_CLK_SIFIVE_PRCI) += sifive-prci.o fu540-prci.o fu740-prci.o
diff --git a/drivers/clk/sifive/fu540-prci.c b/drivers/clk/sifive/fu540-prci.c
index a8901f90a61a..29bab915003c 100644
--- a/drivers/clk/sifive/fu540-prci.c
+++ b/drivers/clk/sifive/fu540-prci.c
@@ -1,17 +1,9 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2019 SiFive, Inc.
- * Wesley Terpstra
- * Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+ * Copyright (C) 2018-2019 Wesley Terpstra
+ * Copyright (C) 2018-2019 Paul Walmsley
+ * Copyright (C) 2020 Zong Li
*
* The FU540 PRCI implements clock and reset control for the SiFive
* FU540-C000 chip. This driver assumes that it has sole control
@@ -24,464 +16,53 @@
* - SiFive FU540-C000 manual v1p0, Chapter 7 "Clocking and Reset"
*/
-#include <dt-bindings/clock/sifive-fu540-prci.h>
-#include <linux/clkdev.h>
-#include <linux/clk-provider.h>
-#include <linux/clk/analogbits-wrpll-cln28hpc.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/io.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_clk.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-/*
- * EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects:
- * hfclk and rtcclk
- */
-#define EXPECTED_CLK_PARENT_COUNT 2
-
-/*
- * Register offsets and bitmasks
- */
-
-/* COREPLLCFG0 */
-#define PRCI_COREPLLCFG0_OFFSET 0x4
-# define PRCI_COREPLLCFG0_DIVR_SHIFT 0
-# define PRCI_COREPLLCFG0_DIVR_MASK (0x3f << PRCI_COREPLLCFG0_DIVR_SHIFT)
-# define PRCI_COREPLLCFG0_DIVF_SHIFT 6
-# define PRCI_COREPLLCFG0_DIVF_MASK (0x1ff << PRCI_COREPLLCFG0_DIVF_SHIFT)
-# define PRCI_COREPLLCFG0_DIVQ_SHIFT 15
-# define PRCI_COREPLLCFG0_DIVQ_MASK (0x7 << PRCI_COREPLLCFG0_DIVQ_SHIFT)
-# define PRCI_COREPLLCFG0_RANGE_SHIFT 18
-# define PRCI_COREPLLCFG0_RANGE_MASK (0x7 << PRCI_COREPLLCFG0_RANGE_SHIFT)
-# define PRCI_COREPLLCFG0_BYPASS_SHIFT 24
-# define PRCI_COREPLLCFG0_BYPASS_MASK (0x1 << PRCI_COREPLLCFG0_BYPASS_SHIFT)
-# define PRCI_COREPLLCFG0_FSE_SHIFT 25
-# define PRCI_COREPLLCFG0_FSE_MASK (0x1 << PRCI_COREPLLCFG0_FSE_SHIFT)
-# define PRCI_COREPLLCFG0_LOCK_SHIFT 31
-# define PRCI_COREPLLCFG0_LOCK_MASK (0x1 << PRCI_COREPLLCFG0_LOCK_SHIFT)
-/* DDRPLLCFG0 */
-#define PRCI_DDRPLLCFG0_OFFSET 0xc
-# define PRCI_DDRPLLCFG0_DIVR_SHIFT 0
-# define PRCI_DDRPLLCFG0_DIVR_MASK (0x3f << PRCI_DDRPLLCFG0_DIVR_SHIFT)
-# define PRCI_DDRPLLCFG0_DIVF_SHIFT 6
-# define PRCI_DDRPLLCFG0_DIVF_MASK (0x1ff << PRCI_DDRPLLCFG0_DIVF_SHIFT)
-# define PRCI_DDRPLLCFG0_DIVQ_SHIFT 15
-# define PRCI_DDRPLLCFG0_DIVQ_MASK (0x7 << PRCI_DDRPLLCFG0_DIVQ_SHIFT)
-# define PRCI_DDRPLLCFG0_RANGE_SHIFT 18
-# define PRCI_DDRPLLCFG0_RANGE_MASK (0x7 << PRCI_DDRPLLCFG0_RANGE_SHIFT)
-# define PRCI_DDRPLLCFG0_BYPASS_SHIFT 24
-# define PRCI_DDRPLLCFG0_BYPASS_MASK (0x1 << PRCI_DDRPLLCFG0_BYPASS_SHIFT)
-# define PRCI_DDRPLLCFG0_FSE_SHIFT 25
-# define PRCI_DDRPLLCFG0_FSE_MASK (0x1 << PRCI_DDRPLLCFG0_FSE_SHIFT)
-# define PRCI_DDRPLLCFG0_LOCK_SHIFT 31
-# define PRCI_DDRPLLCFG0_LOCK_MASK (0x1 << PRCI_DDRPLLCFG0_LOCK_SHIFT)
-
-/* DDRPLLCFG1 */
-#define PRCI_DDRPLLCFG1_OFFSET 0x10
-# define PRCI_DDRPLLCFG1_CKE_SHIFT 24
-# define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
-
-/* GEMGXLPLLCFG0 */
-#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
-# define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
-# define PRCI_GEMGXLPLLCFG0_DIVR_MASK (0x3f << PRCI_GEMGXLPLLCFG0_DIVR_SHIFT)
-# define PRCI_GEMGXLPLLCFG0_DIVF_SHIFT 6
-# define PRCI_GEMGXLPLLCFG0_DIVF_MASK (0x1ff << PRCI_GEMGXLPLLCFG0_DIVF_SHIFT)
-# define PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT 15
-# define PRCI_GEMGXLPLLCFG0_DIVQ_MASK (0x7 << PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT)
-# define PRCI_GEMGXLPLLCFG0_RANGE_SHIFT 18
-# define PRCI_GEMGXLPLLCFG0_RANGE_MASK (0x7 << PRCI_GEMGXLPLLCFG0_RANGE_SHIFT)
-# define PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT 24
-# define PRCI_GEMGXLPLLCFG0_BYPASS_MASK (0x1 << PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT)
-# define PRCI_GEMGXLPLLCFG0_FSE_SHIFT 25
-# define PRCI_GEMGXLPLLCFG0_FSE_MASK (0x1 << PRCI_GEMGXLPLLCFG0_FSE_SHIFT)
-# define PRCI_GEMGXLPLLCFG0_LOCK_SHIFT 31
-# define PRCI_GEMGXLPLLCFG0_LOCK_MASK (0x1 << PRCI_GEMGXLPLLCFG0_LOCK_SHIFT)
-
-/* GEMGXLPLLCFG1 */
-#define PRCI_GEMGXLPLLCFG1_OFFSET 0x20
-# define PRCI_GEMGXLPLLCFG1_CKE_SHIFT 24
-# define PRCI_GEMGXLPLLCFG1_CKE_MASK (0x1 << PRCI_GEMGXLPLLCFG1_CKE_SHIFT)
-
-/* CORECLKSEL */
-#define PRCI_CORECLKSEL_OFFSET 0x24
-# define PRCI_CORECLKSEL_CORECLKSEL_SHIFT 0
-# define PRCI_CORECLKSEL_CORECLKSEL_MASK (0x1 << PRCI_CORECLKSEL_CORECLKSEL_SHIFT)
-
-/* DEVICESRESETREG */
-#define PRCI_DEVICESRESETREG_OFFSET 0x28
-# define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT 0
-# define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT)
-# define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT 1
-# define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT)
-# define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT 2
-# define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT)
-# define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT 3
-# define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT)
-# define PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT 5
-# define PRCI_DEVICESRESETREG_GEMGXL_RST_N_MASK (0x1 << PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT)
+#include <dt-bindings/clock/sifive-fu540-prci.h>
-/* CLKMUXSTATUSREG */
-#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
-# define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
-# define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK (0x1 << PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT)
+#include "fu540-prci.h"
+#include "sifive-prci.h"
-/*
- * Private structures
- */
+/* PRCI integration data for each WRPLL instance */
-/**
- * struct __prci_data - per-device-instance data
- * @va: base virtual address of the PRCI IP block
- * @hw_clks: encapsulates struct clk_hw records
- *
- * PRCI per-device instance data
- */
-struct __prci_data {
- void __iomem *va;
- struct clk_hw_onecell_data hw_clks;
+static struct __prci_wrpll_data __prci_corepll_data = {
+ .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
+ .enable_bypass = sifive_prci_coreclksel_use_hfclk,
+ .disable_bypass = sifive_prci_coreclksel_use_corepll,
};
-/**
- * struct __prci_wrpll_data - WRPLL configuration and integration data
- * @c: WRPLL current configuration record
- * @enable_bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL)
- * @disable_bypass: fn ptr to code to not bypass the WRPLL (or NULL)
- * @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address
- *
- * @enable_bypass and @disable_bypass are used for WRPLL instances
- * that contain a separate external glitchless clock mux downstream
- * from the PLL. The WRPLL internal bypass mux is not glitchless.
- */
-struct __prci_wrpll_data {
- struct wrpll_cfg c;
- void (*enable_bypass)(struct __prci_data *pd);
- void (*disable_bypass)(struct __prci_data *pd);
- u8 cfg0_offs;
+static struct __prci_wrpll_data __prci_ddrpll_data = {
+ .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
};
-/**
- * struct __prci_clock - describes a clock device managed by PRCI
- * @name: user-readable clock name string - should match the manual
- * @parent_name: parent name for this clock
- * @ops: struct clk_ops for the Linux clock framework to use for control
- * @hw: Linux-private clock data
- * @pwd: WRPLL-specific data, associated with this clock (if not NULL)
- * @pd: PRCI-specific data associated with this clock (if not NULL)
- *
- * PRCI clock data. Used by the PRCI driver to register PRCI-provided
- * clocks to the Linux clock infrastructure.
- */
-struct __prci_clock {
- const char *name;
- const char *parent_name;
- const struct clk_ops *ops;
- struct clk_hw hw;
- struct __prci_wrpll_data *pwd;
- struct __prci_data *pd;
+static struct __prci_wrpll_data __prci_gemgxlpll_data = {
+ .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
};
-#define clk_hw_to_prci_clock(pwd) container_of(pwd, struct __prci_clock, hw)
-
-/*
- * Private functions
- */
-
-/**
- * __prci_readl() - read from a PRCI register
- * @pd: PRCI context
- * @offs: register offset to read from (in bytes, from PRCI base address)
- *
- * Read the register located at offset @offs from the base virtual
- * address of the PRCI register target described by @pd, and return
- * the value to the caller.
- *
- * Context: Any context.
- *
- * Return: the contents of the register described by @pd and @offs.
- */
-static u32 __prci_readl(struct __prci_data *pd, u32 offs)
-{
- return readl_relaxed(pd->va + offs);
-}
-
-static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
-{
- writel_relaxed(v, pd->va + offs);
-}
-
-/* WRPLL-related private functions */
-
-/**
- * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
- * @c: ptr to a struct wrpll_cfg record to write config into
- * @r: value read from the PRCI PLL configuration register
- *
- * Given a value @r read from an FU540 PRCI PLL configuration register,
- * split it into fields and populate it into the WRPLL configuration record
- * pointed to by @c.
- *
- * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
- * have the same register layout.
- *
- * Context: Any context.
- */
-static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
-{
- u32 v;
-
- v = r & PRCI_COREPLLCFG0_DIVR_MASK;
- v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
- c->divr = v;
-
- v = r & PRCI_COREPLLCFG0_DIVF_MASK;
- v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
- c->divf = v;
-
- v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
- v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
- c->divq = v;
-
- v = r & PRCI_COREPLLCFG0_RANGE_MASK;
- v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
- c->range = v;
-
- c->flags &= (WRPLL_FLAGS_INT_FEEDBACK_MASK |
- WRPLL_FLAGS_EXT_FEEDBACK_MASK);
-
- /* external feedback mode not supported */
- c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
-}
-
-/**
- * __prci_wrpll_pack() - pack PLL configuration parameters into a register value
- * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
- *
- * Using a set of WRPLL configuration values pointed to by @c,
- * assemble a PRCI PLL configuration register value, and return it to
- * the caller.
- *
- * Context: Any context. Caller must ensure that the contents of the
- * record pointed to by @c do not change during the execution
- * of this function.
- *
- * Returns: a value suitable for writing into a PRCI PLL configuration
- * register
- */
-static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
-{
- u32 r = 0;
-
- r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
- r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
- r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
- r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
-
- /* external feedback mode not supported */
- r |= PRCI_COREPLLCFG0_FSE_MASK;
-
- return r;
-}
-
-/**
- * __prci_wrpll_read_cfg() - read the WRPLL configuration from the PRCI
- * @pd: PRCI context
- * @pwd: PRCI WRPLL metadata
- *
- * Read the current configuration of the PLL identified by @pwd from
- * the PRCI identified by @pd, and store it into the local configuration
- * cache in @pwd.
- *
- * Context: Any context. Caller must prevent the records pointed to by
- * @pd and @pwd from changing during execution.
- */
-static void __prci_wrpll_read_cfg(struct __prci_data *pd,
- struct __prci_wrpll_data *pwd)
-{
- __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
-}
-
-/**
- * __prci_wrpll_write_cfg() - write WRPLL configuration into the PRCI
- * @pd: PRCI context
- * @pwd: PRCI WRPLL metadata
- * @c: WRPLL configuration record to write
- *
- * Write the WRPLL configuration described by @c into the WRPLL
- * configuration register identified by @pwd in the PRCI instance
- * described by @c. Make a cached copy of the WRPLL's current
- * configuration so it can be used by other code.
- *
- * Context: Any context. Caller must prevent the records pointed to by
- * @pd and @pwd from changing during execution.
- */
-static void __prci_wrpll_write_cfg(struct __prci_data *pd,
- struct __prci_wrpll_data *pwd,
- struct wrpll_cfg *c)
-{
- __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
-
- memcpy(&pwd->c, c, sizeof(*c));
-}
-
-/* Core clock mux control */
-
-/**
- * __prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
- * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
- *
- * Switch the CORECLK mux to the HFCLK input source; return once complete.
- *
- * Context: Any context. Caller must prevent concurrent changes to the
- * PRCI_CORECLKSEL_OFFSET register.
- */
-static void __prci_coreclksel_use_hfclk(struct __prci_data *pd)
-{
- u32 r;
-
- r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
- r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
- __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
-
- r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
-}
-
-/**
- * __prci_coreclksel_use_corepll() - switch the CORECLK mux to output COREPLL
- * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
- *
- * Switch the CORECLK mux to the PLL output clock; return once complete.
- *
- * Context: Any context. Caller must prevent concurrent changes to the
- * PRCI_CORECLKSEL_OFFSET register.
- */
-static void __prci_coreclksel_use_corepll(struct __prci_data *pd)
-{
- u32 r;
-
- r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
- r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
- __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
-
- r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
-}
-
-/*
- * Linux clock framework integration
- *
- * See the Linux clock framework documentation for more information on
- * these functions.
- */
-
-static unsigned long sifive_fu540_prci_wrpll_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
- struct __prci_wrpll_data *pwd = pc->pwd;
-
- return wrpll_calc_output_rate(&pwd->c, parent_rate);
-}
-
-static long sifive_fu540_prci_wrpll_round_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long *parent_rate)
-{
- struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
- struct __prci_wrpll_data *pwd = pc->pwd;
- struct wrpll_cfg c;
-
- memcpy(&c, &pwd->c, sizeof(c));
-
- wrpll_configure_for_rate(&c, rate, *parent_rate);
-
- return wrpll_calc_output_rate(&c, *parent_rate);
-}
-
-static int sifive_fu540_prci_wrpll_set_rate(struct clk_hw *hw,
- unsigned long rate,
- unsigned long parent_rate)
-{
- struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
- struct __prci_wrpll_data *pwd = pc->pwd;
- struct __prci_data *pd = pc->pd;
- int r;
-
- r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
- if (r)
- return r;
-
- if (pwd->enable_bypass)
- pwd->enable_bypass(pd);
-
- __prci_wrpll_write_cfg(pd, pwd, &pwd->c);
-
- udelay(wrpll_calc_max_lock_us(&pwd->c));
-
- if (pwd->disable_bypass)
- pwd->disable_bypass(pd);
-
- return 0;
-}
+/* Linux clock framework integration */
static const struct clk_ops sifive_fu540_prci_wrpll_clk_ops = {
- .set_rate = sifive_fu540_prci_wrpll_set_rate,
- .round_rate = sifive_fu540_prci_wrpll_round_rate,
- .recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
+ .set_rate = sifive_prci_wrpll_set_rate,
+ .round_rate = sifive_prci_wrpll_round_rate,
+ .recalc_rate = sifive_prci_wrpll_recalc_rate,
+ .enable = sifive_prci_clock_enable,
+ .disable = sifive_prci_clock_disable,
+ .is_enabled = sifive_clk_is_enabled,
};
static const struct clk_ops sifive_fu540_prci_wrpll_ro_clk_ops = {
- .recalc_rate = sifive_fu540_prci_wrpll_recalc_rate,
+ .recalc_rate = sifive_prci_wrpll_recalc_rate,
};
-/* TLCLKSEL clock integration */
-
-static unsigned long sifive_fu540_prci_tlclksel_recalc_rate(struct clk_hw *hw,
- unsigned long parent_rate)
-{
- struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
- struct __prci_data *pd = pc->pd;
- u32 v;
- u8 div;
-
- v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
- v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
- div = v ? 1 : 2;
-
- return div_u64(parent_rate, div);
-}
-
static const struct clk_ops sifive_fu540_prci_tlclksel_clk_ops = {
- .recalc_rate = sifive_fu540_prci_tlclksel_recalc_rate,
-};
-
-/*
- * PRCI integration data for each WRPLL instance
- */
-
-static struct __prci_wrpll_data __prci_corepll_data = {
- .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
- .enable_bypass = __prci_coreclksel_use_hfclk,
- .disable_bypass = __prci_coreclksel_use_corepll,
-};
-
-static struct __prci_wrpll_data __prci_ddrpll_data = {
- .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
+ .recalc_rate = sifive_prci_tlclksel_recalc_rate,
};
-static struct __prci_wrpll_data __prci_gemgxlpll_data = {
- .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
-};
-
-/*
- * List of clock controls provided by the PRCI
- */
-
-static struct __prci_clock __prci_init_clocks[] = {
+/* List of clock controls provided by the PRCI */
+struct __prci_clock __prci_init_clocks_fu540[] = {
[PRCI_CLK_COREPLL] = {
.name = "corepll",
.parent_name = "hfclk",
@@ -506,125 +87,3 @@ static struct __prci_clock __prci_init_clocks[] = {
.ops = &sifive_fu540_prci_tlclksel_clk_ops,
},
};
-
-/**
- * __prci_register_clocks() - register clock controls in the PRCI with Linux
- * @dev: Linux struct device *
- *
- * Register the list of clock controls described in __prci_init_plls[] with
- * the Linux clock framework.
- *
- * Return: 0 upon success or a negative error code upon failure.
- */
-static int __prci_register_clocks(struct device *dev, struct __prci_data *pd)
-{
- struct clk_init_data init = { };
- struct __prci_clock *pic;
- int parent_count, i, r;
-
- parent_count = of_clk_get_parent_count(dev->of_node);
- if (parent_count != EXPECTED_CLK_PARENT_COUNT) {
- dev_err(dev, "expected only two parent clocks, found %d\n",
- parent_count);
- return -EINVAL;
- }
-
- /* Register PLLs */
- for (i = 0; i < ARRAY_SIZE(__prci_init_clocks); ++i) {
- pic = &__prci_init_clocks[i];
-
- init.name = pic->name;
- init.parent_names = &pic->parent_name;
- init.num_parents = 1;
- init.ops = pic->ops;
- pic->hw.init = &init;
-
- pic->pd = pd;
-
- if (pic->pwd)
- __prci_wrpll_read_cfg(pd, pic->pwd);
-
- r = devm_clk_hw_register(dev, &pic->hw);
- if (r) {
- dev_warn(dev, "Failed to register clock %s: %d\n",
- init.name, r);
- return r;
- }
-
- r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
- if (r) {
- dev_warn(dev, "Failed to register clkdev for %s: %d\n",
- init.name, r);
- return r;
- }
-
- pd->hw_clks.hws[i] = &pic->hw;
- }
-
- pd->hw_clks.num = i;
-
- r = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
- &pd->hw_clks);
- if (r) {
- dev_err(dev, "could not add hw_provider: %d\n", r);
- return r;
- }
-
- return 0;
-}
-
-/*
- * Linux device model integration
- *
- * See the Linux device model documentation for more information about
- * these functions.
- */
-static int sifive_fu540_prci_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct resource *res;
- struct __prci_data *pd;
- int r;
-
- pd = devm_kzalloc(dev,
- struct_size(pd, hw_clks.hws,
- ARRAY_SIZE(__prci_init_clocks)),
- GFP_KERNEL);
- if (!pd)
- return -ENOMEM;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pd->va = devm_ioremap_resource(dev, res);
- if (IS_ERR(pd->va))
- return PTR_ERR(pd->va);
-
- r = __prci_register_clocks(dev, pd);
- if (r) {
- dev_err(dev, "could not register clocks: %d\n", r);
- return r;
- }
-
- dev_dbg(dev, "SiFive FU540 PRCI probed\n");
-
- return 0;
-}
-
-static const struct of_device_id sifive_fu540_prci_of_match[] = {
- { .compatible = "sifive,fu540-c000-prci", },
- {}
-};
-MODULE_DEVICE_TABLE(of, sifive_fu540_prci_of_match);
-
-static struct platform_driver sifive_fu540_prci_driver = {
- .driver = {
- .name = "sifive-fu540-prci",
- .of_match_table = sifive_fu540_prci_of_match,
- },
- .probe = sifive_fu540_prci_probe,
-};
-
-static int __init sifive_fu540_prci_init(void)
-{
- return platform_driver_register(&sifive_fu540_prci_driver);
-}
-core_initcall(sifive_fu540_prci_init);
diff --git a/drivers/clk/sifive/fu540-prci.h b/drivers/clk/sifive/fu540-prci.h
new file mode 100644
index 000000000000..c8271efa7bdc
--- /dev/null
+++ b/drivers/clk/sifive/fu540-prci.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 SiFive, Inc.
+ * Zong Li
+ */
+
+#ifndef __SIFIVE_CLK_FU540_PRCI_H
+#define __SIFIVE_CLK_FU540_PRCI_H
+
+#include "sifive-prci.h"
+
+#define NUM_CLOCK_FU540 4
+
+extern struct __prci_clock __prci_init_clocks_fu540[NUM_CLOCK_FU540];
+
+static const struct prci_clk_desc prci_clk_fu540 = {
+ .clks = __prci_init_clocks_fu540,
+ .num_clks = ARRAY_SIZE(__prci_init_clocks_fu540),
+};
+
+#endif /* __SIFIVE_CLK_FU540_PRCI_H */
diff --git a/drivers/clk/sifive/fu740-prci.c b/drivers/clk/sifive/fu740-prci.c
new file mode 100644
index 000000000000..764d1097aa51
--- /dev/null
+++ b/drivers/clk/sifive/fu740-prci.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 SiFive, Inc.
+ * Copyright (C) 2020 Zong Li
+ */
+
+#include <linux/module.h>
+
+#include <dt-bindings/clock/sifive-fu740-prci.h>
+
+#include "fu540-prci.h"
+#include "sifive-prci.h"
+
+/* PRCI integration data for each WRPLL instance */
+
+static struct __prci_wrpll_data __prci_corepll_data = {
+ .cfg0_offs = PRCI_COREPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_COREPLLCFG1_OFFSET,
+ .enable_bypass = sifive_prci_coreclksel_use_hfclk,
+ .disable_bypass = sifive_prci_coreclksel_use_final_corepll,
+};
+
+static struct __prci_wrpll_data __prci_ddrpll_data = {
+ .cfg0_offs = PRCI_DDRPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_DDRPLLCFG1_OFFSET,
+};
+
+static struct __prci_wrpll_data __prci_gemgxlpll_data = {
+ .cfg0_offs = PRCI_GEMGXLPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_GEMGXLPLLCFG1_OFFSET,
+};
+
+static struct __prci_wrpll_data __prci_dvfscorepll_data = {
+ .cfg0_offs = PRCI_DVFSCOREPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_DVFSCOREPLLCFG1_OFFSET,
+ .enable_bypass = sifive_prci_corepllsel_use_corepll,
+ .disable_bypass = sifive_prci_corepllsel_use_dvfscorepll,
+};
+
+static struct __prci_wrpll_data __prci_hfpclkpll_data = {
+ .cfg0_offs = PRCI_HFPCLKPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_HFPCLKPLLCFG1_OFFSET,
+ .enable_bypass = sifive_prci_hfpclkpllsel_use_hfclk,
+ .disable_bypass = sifive_prci_hfpclkpllsel_use_hfpclkpll,
+};
+
+static struct __prci_wrpll_data __prci_cltxpll_data = {
+ .cfg0_offs = PRCI_CLTXPLLCFG0_OFFSET,
+ .cfg1_offs = PRCI_CLTXPLLCFG1_OFFSET,
+};
+
+/* Linux clock framework integration */
+
+static const struct clk_ops sifive_fu740_prci_wrpll_clk_ops = {
+ .set_rate = sifive_prci_wrpll_set_rate,
+ .round_rate = sifive_prci_wrpll_round_rate,
+ .recalc_rate = sifive_prci_wrpll_recalc_rate,
+ .enable = sifive_prci_clock_enable,
+ .disable = sifive_prci_clock_disable,
+ .is_enabled = sifive_clk_is_enabled,
+};
+
+static const struct clk_ops sifive_fu740_prci_wrpll_ro_clk_ops = {
+ .recalc_rate = sifive_prci_wrpll_recalc_rate,
+};
+
+static const struct clk_ops sifive_fu740_prci_tlclksel_clk_ops = {
+ .recalc_rate = sifive_prci_tlclksel_recalc_rate,
+};
+
+static const struct clk_ops sifive_fu740_prci_hfpclkplldiv_clk_ops = {
+ .recalc_rate = sifive_prci_hfpclkplldiv_recalc_rate,
+};
+
+/* List of clock controls provided by the PRCI */
+struct __prci_clock __prci_init_clocks_fu740[] = {
+ [PRCI_CLK_COREPLL] = {
+ .name = "corepll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_wrpll_clk_ops,
+ .pwd = &__prci_corepll_data,
+ },
+ [PRCI_CLK_DDRPLL] = {
+ .name = "ddrpll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_wrpll_ro_clk_ops,
+ .pwd = &__prci_ddrpll_data,
+ },
+ [PRCI_CLK_GEMGXLPLL] = {
+ .name = "gemgxlpll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_wrpll_clk_ops,
+ .pwd = &__prci_gemgxlpll_data,
+ },
+ [PRCI_CLK_DVFSCOREPLL] = {
+ .name = "dvfscorepll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_wrpll_clk_ops,
+ .pwd = &__prci_dvfscorepll_data,
+ },
+ [PRCI_CLK_HFPCLKPLL] = {
+ .name = "hfpclkpll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_wrpll_clk_ops,
+ .pwd = &__prci_hfpclkpll_data,
+ },
+ [PRCI_CLK_CLTXPLL] = {
+ .name = "cltxpll",
+ .parent_name = "hfclk",
+ .ops = &sifive_fu740_prci_wrpll_clk_ops,
+ .pwd = &__prci_cltxpll_data,
+ },
+ [PRCI_CLK_TLCLK] = {
+ .name = "tlclk",
+ .parent_name = "corepll",
+ .ops = &sifive_fu740_prci_tlclksel_clk_ops,
+ },
+ [PRCI_CLK_PCLK] = {
+ .name = "pclk",
+ .parent_name = "hfpclkpll",
+ .ops = &sifive_fu740_prci_hfpclkplldiv_clk_ops,
+ },
+};
diff --git a/drivers/clk/sifive/fu740-prci.h b/drivers/clk/sifive/fu740-prci.h
new file mode 100644
index 000000000000..13ef971f7764
--- /dev/null
+++ b/drivers/clk/sifive/fu740-prci.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 SiFive, Inc.
+ * Zong Li
+ */
+
+#ifndef __SIFIVE_CLK_FU740_PRCI_H
+#define __SIFIVE_CLK_FU740_PRCI_H
+
+#include "sifive-prci.h"
+
+#define NUM_CLOCK_FU740 8
+
+extern struct __prci_clock __prci_init_clocks_fu740[NUM_CLOCK_FU740];
+
+static const struct prci_clk_desc prci_clk_fu740 = {
+ .clks = __prci_init_clocks_fu740,
+ .num_clks = ARRAY_SIZE(__prci_init_clocks_fu740),
+};
+
+#endif /* __SIFIVE_CLK_FU740_PRCI_H */
diff --git a/drivers/clk/sifive/sifive-prci.c b/drivers/clk/sifive/sifive-prci.c
new file mode 100644
index 000000000000..c78b042750e2
--- /dev/null
+++ b/drivers/clk/sifive/sifive-prci.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 SiFive, Inc.
+ * Copyright (C) 2020 Zong Li
+ */
+
+#include <linux/clkdev.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include "sifive-prci.h"
+#include "fu540-prci.h"
+#include "fu740-prci.h"
+
+/*
+ * Private functions
+ */
+
+/**
+ * __prci_readl() - read from a PRCI register
+ * @pd: PRCI context
+ * @offs: register offset to read from (in bytes, from PRCI base address)
+ *
+ * Read the register located at offset @offs from the base virtual
+ * address of the PRCI register target described by @pd, and return
+ * the value to the caller.
+ *
+ * Context: Any context.
+ *
+ * Return: the contents of the register described by @pd and @offs.
+ */
+static u32 __prci_readl(struct __prci_data *pd, u32 offs)
+{
+ return readl_relaxed(pd->va + offs);
+}
+
+static void __prci_writel(u32 v, u32 offs, struct __prci_data *pd)
+{
+ writel_relaxed(v, pd->va + offs);
+}
+
+/* WRPLL-related private functions */
+
+/**
+ * __prci_wrpll_unpack() - unpack WRPLL configuration registers into parameters
+ * @c: ptr to a struct wrpll_cfg record to write config into
+ * @r: value read from the PRCI PLL configuration register
+ *
+ * Given a value @r read from an FU740 PRCI PLL configuration register,
+ * split it into fields and populate it into the WRPLL configuration record
+ * pointed to by @c.
+ *
+ * The COREPLLCFG0 macros are used below, but the other *PLLCFG0 macros
+ * have the same register layout.
+ *
+ * Context: Any context.
+ */
+static void __prci_wrpll_unpack(struct wrpll_cfg *c, u32 r)
+{
+ u32 v;
+
+ v = r & PRCI_COREPLLCFG0_DIVR_MASK;
+ v >>= PRCI_COREPLLCFG0_DIVR_SHIFT;
+ c->divr = v;
+
+ v = r & PRCI_COREPLLCFG0_DIVF_MASK;
+ v >>= PRCI_COREPLLCFG0_DIVF_SHIFT;
+ c->divf = v;
+
+ v = r & PRCI_COREPLLCFG0_DIVQ_MASK;
+ v >>= PRCI_COREPLLCFG0_DIVQ_SHIFT;
+ c->divq = v;
+
+ v = r & PRCI_COREPLLCFG0_RANGE_MASK;
+ v >>= PRCI_COREPLLCFG0_RANGE_SHIFT;
+ c->range = v;
+
+ c->flags &=
+ (WRPLL_FLAGS_INT_FEEDBACK_MASK | WRPLL_FLAGS_EXT_FEEDBACK_MASK);
+
+ /* external feedback mode not supported */
+ c->flags |= WRPLL_FLAGS_INT_FEEDBACK_MASK;
+}
+
+/**
+ * __prci_wrpll_pack() - pack PLL configuration parameters into a register value
+ * @c: pointer to a struct wrpll_cfg record containing the PLL's cfg
+ *
+ * Using a set of WRPLL configuration values pointed to by @c,
+ * assemble a PRCI PLL configuration register value, and return it to
+ * the caller.
+ *
+ * Context: Any context. Caller must ensure that the contents of the
+ * record pointed to by @c do not change during the execution
+ * of this function.
+ *
+ * Returns: a value suitable for writing into a PRCI PLL configuration
+ * register
+ */
+static u32 __prci_wrpll_pack(const struct wrpll_cfg *c)
+{
+ u32 r = 0;
+
+ r |= c->divr << PRCI_COREPLLCFG0_DIVR_SHIFT;
+ r |= c->divf << PRCI_COREPLLCFG0_DIVF_SHIFT;
+ r |= c->divq << PRCI_COREPLLCFG0_DIVQ_SHIFT;
+ r |= c->range << PRCI_COREPLLCFG0_RANGE_SHIFT;
+
+ /* external feedback mode not supported */
+ r |= PRCI_COREPLLCFG0_FSE_MASK;
+
+ return r;
+}
+
+/**
+ * __prci_wrpll_read_cfg0() - read the WRPLL configuration from the PRCI
+ * @pd: PRCI context
+ * @pwd: PRCI WRPLL metadata
+ *
+ * Read the current configuration of the PLL identified by @pwd from
+ * the PRCI identified by @pd, and store it into the local configuration
+ * cache in @pwd.
+ *
+ * Context: Any context. Caller must prevent the records pointed to by
+ * @pd and @pwd from changing during execution.
+ */
+static void __prci_wrpll_read_cfg0(struct __prci_data *pd,
+ struct __prci_wrpll_data *pwd)
+{
+ __prci_wrpll_unpack(&pwd->c, __prci_readl(pd, pwd->cfg0_offs));
+}
+
+/**
+ * __prci_wrpll_write_cfg0() - write WRPLL configuration into the PRCI
+ * @pd: PRCI context
+ * @pwd: PRCI WRPLL metadata
+ * @c: WRPLL configuration record to write
+ *
+ * Write the WRPLL configuration described by @c into the WRPLL
+ * configuration register identified by @pwd in the PRCI instance
+ * described by @c. Make a cached copy of the WRPLL's current
+ * configuration so it can be used by other code.
+ *
+ * Context: Any context. Caller must prevent the records pointed to by
+ * @pd and @pwd from changing during execution.
+ */
+static void __prci_wrpll_write_cfg0(struct __prci_data *pd,
+ struct __prci_wrpll_data *pwd,
+ struct wrpll_cfg *c)
+{
+ __prci_writel(__prci_wrpll_pack(c), pwd->cfg0_offs, pd);
+
+ memcpy(&pwd->c, c, sizeof(*c));
+}
+
+/**
+ * __prci_wrpll_write_cfg1() - write Clock enable/disable configuration
+ * into the PRCI
+ * @pd: PRCI context
+ * @pwd: PRCI WRPLL metadata
+ * @enable: Clock enable or disable value
+ */
+static void __prci_wrpll_write_cfg1(struct __prci_data *pd,
+ struct __prci_wrpll_data *pwd,
+ u32 enable)
+{
+ __prci_writel(enable, pwd->cfg1_offs, pd);
+}
+
+/*
+ * Linux clock framework integration
+ *
+ * See the Linux clock framework documentation for more information on
+ * these functions.
+ */
+
+unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+
+ return wrpll_calc_output_rate(&pwd->c, parent_rate);
+}
+
+long sifive_prci_wrpll_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct wrpll_cfg c;
+
+ memcpy(&c, &pwd->c, sizeof(c));
+
+ wrpll_configure_for_rate(&c, rate, *parent_rate);
+
+ return wrpll_calc_output_rate(&c, *parent_rate);
+}
+
+int sifive_prci_wrpll_set_rate(struct clk_hw *hw,
+ unsigned long rate, unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct __prci_data *pd = pc->pd;
+ int r;
+
+ r = wrpll_configure_for_rate(&pwd->c, rate, parent_rate);
+ if (r)
+ return r;
+
+ if (pwd->enable_bypass)
+ pwd->enable_bypass(pd);
+
+ __prci_wrpll_write_cfg0(pd, pwd, &pwd->c);
+
+ udelay(wrpll_calc_max_lock_us(&pwd->c));
+
+ return 0;
+}
+
+int sifive_clk_is_enabled(struct clk_hw *hw)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct __prci_data *pd = pc->pd;
+ u32 r;
+
+ r = __prci_readl(pd, pwd->cfg1_offs);
+
+ if (r & PRCI_COREPLLCFG1_CKE_MASK)
+ return 1;
+ else
+ return 0;
+}
+
+int sifive_prci_clock_enable(struct clk_hw *hw)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct __prci_data *pd = pc->pd;
+
+ if (sifive_clk_is_enabled(hw))
+ return 0;
+
+ __prci_wrpll_write_cfg1(pd, pwd, PRCI_COREPLLCFG1_CKE_MASK);
+
+ if (pwd->disable_bypass)
+ pwd->disable_bypass(pd);
+
+ return 0;
+}
+
+void sifive_prci_clock_disable(struct clk_hw *hw)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_wrpll_data *pwd = pc->pwd;
+ struct __prci_data *pd = pc->pd;
+ u32 r;
+
+ if (pwd->enable_bypass)
+ pwd->enable_bypass(pd);
+
+ r = __prci_readl(pd, pwd->cfg1_offs);
+ r &= ~PRCI_COREPLLCFG1_CKE_MASK;
+
+ __prci_wrpll_write_cfg1(pd, pwd, r);
+}
+
+/* TLCLKSEL clock integration */
+
+unsigned long sifive_prci_tlclksel_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_data *pd = pc->pd;
+ u32 v;
+ u8 div;
+
+ v = __prci_readl(pd, PRCI_CLKMUXSTATUSREG_OFFSET);
+ v &= PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK;
+ div = v ? 1 : 2;
+
+ return div_u64(parent_rate, div);
+}
+
+/* HFPCLK clock integration */
+
+unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct __prci_clock *pc = clk_hw_to_prci_clock(hw);
+ struct __prci_data *pd = pc->pd;
+ u32 div = __prci_readl(pd, PRCI_HFPCLKPLLDIV_OFFSET);
+
+ return div_u64(parent_rate, div + 2);
+}
+
+/*
+ * Core clock mux control
+ */
+
+/**
+ * sifive_prci_coreclksel_use_hfclk() - switch the CORECLK mux to output HFCLK
+ * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
+ *
+ * Switch the CORECLK mux to the HFCLK input source; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_CORECLKSEL_OFFSET register.
+ */
+void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
+ r |= PRCI_CORECLKSEL_CORECLKSEL_MASK;
+ __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
+}
+
+/**
+ * sifive_prci_coreclksel_use_corepll() - switch the CORECLK mux to output
+ * COREPLL
+ * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
+ *
+ * Switch the CORECLK mux to the COREPLL output clock; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_CORECLKSEL_OFFSET register.
+ */
+void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
+ r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
+ __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
+}
+
+/**
+ * sifive_prci_coreclksel_use_final_corepll() - switch the CORECLK mux to output
+ * FINAL_COREPLL
+ * @pd: struct __prci_data * for the PRCI containing the CORECLK mux reg
+ *
+ * Switch the CORECLK mux to the final COREPLL output clock; return once
+ * complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_CORECLKSEL_OFFSET register.
+ */
+void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET);
+ r &= ~PRCI_CORECLKSEL_CORECLKSEL_MASK;
+ __prci_writel(r, PRCI_CORECLKSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_CORECLKSEL_OFFSET); /* barrier */
+}
+
+/**
+ * sifive_prci_corepllsel_use_dvfscorepll() - switch the COREPLL mux to
+ * output DVFS_COREPLL
+ * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
+ *
+ * Switch the COREPLL mux to the DVFSCOREPLL output clock; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_COREPLLSEL_OFFSET register.
+ */
+void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
+ r |= PRCI_COREPLLSEL_COREPLLSEL_MASK;
+ __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
+}
+
+/**
+ * sifive_prci_corepllsel_use_corepll() - switch the COREPLL mux to
+ * output COREPLL
+ * @pd: struct __prci_data * for the PRCI containing the COREPLL mux reg
+ *
+ * Switch the COREPLL mux to the COREPLL output clock; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_COREPLLSEL_OFFSET register.
+ */
+void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET);
+ r &= ~PRCI_COREPLLSEL_COREPLLSEL_MASK;
+ __prci_writel(r, PRCI_COREPLLSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_COREPLLSEL_OFFSET); /* barrier */
+}
+
+/**
+ * sifive_prci_hfpclkpllsel_use_hfclk() - switch the HFPCLKPLL mux to
+ * output HFCLK
+ * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
+ *
+ * Switch the HFPCLKPLL mux to the HFCLK input source; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_HFPCLKPLLSEL_OFFSET register.
+ */
+void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
+ r |= PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
+ __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
+}
+
+/**
+ * sifive_prci_hfpclkpllsel_use_hfpclkpll() - switch the HFPCLKPLL mux to
+ * output HFPCLKPLL
+ * @pd: struct __prci_data * for the PRCI containing the HFPCLKPLL mux reg
+ *
+ * Switch the HFPCLKPLL mux to the HFPCLKPLL output clock; return once complete.
+ *
+ * Context: Any context. Caller must prevent concurrent changes to the
+ * PRCI_HFPCLKPLLSEL_OFFSET register.
+ */
+void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd)
+{
+ u32 r;
+
+ r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET);
+ r &= ~PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK;
+ __prci_writel(r, PRCI_HFPCLKPLLSEL_OFFSET, pd);
+
+ r = __prci_readl(pd, PRCI_HFPCLKPLLSEL_OFFSET); /* barrier */
+}
+
+/**
+ * __prci_register_clocks() - register clock controls in the PRCI
+ * @dev: Linux struct device
+ * @pd: The pointer for PRCI per-device instance data
+ * @desc: The pointer for the information of clocks of each SoCs
+ *
+ * Register the list of clock controls described in __prci_init_clocks[] with
+ * the Linux clock framework.
+ *
+ * Return: 0 upon success or a negative error code upon failure.
+ */
+static int __prci_register_clocks(struct device *dev, struct __prci_data *pd,
+ const struct prci_clk_desc *desc)
+{
+ struct clk_init_data init = { };
+ struct __prci_clock *pic;
+ int parent_count, i, r;
+
+ parent_count = of_clk_get_parent_count(dev->of_node);
+ if (parent_count != EXPECTED_CLK_PARENT_COUNT) {
+ dev_err(dev, "expected only two parent clocks, found %d\n",
+ parent_count);
+ return -EINVAL;
+ }
+
+ /* Register PLLs */
+ for (i = 0; i < desc->num_clks; ++i) {
+ pic = &(desc->clks[i]);
+
+ init.name = pic->name;
+ init.parent_names = &pic->parent_name;
+ init.num_parents = 1;
+ init.ops = pic->ops;
+ pic->hw.init = &init;
+
+ pic->pd = pd;
+
+ if (pic->pwd)
+ __prci_wrpll_read_cfg0(pd, pic->pwd);
+
+ r = devm_clk_hw_register(dev, &pic->hw);
+ if (r) {
+ dev_warn(dev, "Failed to register clock %s: %d\n",
+ init.name, r);
+ return r;
+ }
+
+ r = clk_hw_register_clkdev(&pic->hw, pic->name, dev_name(dev));
+ if (r) {
+ dev_warn(dev, "Failed to register clkdev for %s: %d\n",
+ init.name, r);
+ return r;
+ }
+
+ pd->hw_clks.hws[i] = &pic->hw;
+ }
+
+ pd->hw_clks.num = i;
+
+ r = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
+ &pd->hw_clks);
+ if (r) {
+ dev_err(dev, "could not add hw_provider: %d\n", r);
+ return r;
+ }
+
+ return 0;
+}
+
+/**
+ * sifive_prci_init() - initialize prci data and check parent count
+ * @pdev: platform device pointer for the prci
+ *
+ * Return: 0 upon success or a negative error code upon failure.
+ */
+static int sifive_prci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ struct __prci_data *pd;
+ const struct prci_clk_desc *desc;
+ int r;
+
+ desc = of_device_get_match_data(&pdev->dev);
+
+ pd = devm_kzalloc(dev, struct_size(pd, hw_clks.hws, desc->num_clks), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pd->va = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pd->va))
+ return PTR_ERR(pd->va);
+
+ r = __prci_register_clocks(dev, pd, desc);
+ if (r) {
+ dev_err(dev, "could not register clocks: %d\n", r);
+ return r;
+ }
+
+ dev_dbg(dev, "SiFive PRCI probed\n");
+
+ return 0;
+}
+
+static const struct of_device_id sifive_prci_of_match[] = {
+ {.compatible = "sifive,fu540-c000-prci", .data = &prci_clk_fu540},
+ {.compatible = "sifive,fu740-c000-prci", .data = &prci_clk_fu740},
+ {}
+};
+
+static struct platform_driver sifive_prci_driver = {
+ .driver = {
+ .name = "sifive-clk-prci",
+ .of_match_table = sifive_prci_of_match,
+ },
+ .probe = sifive_prci_probe,
+};
+
+static int __init sifive_prci_init(void)
+{
+ return platform_driver_register(&sifive_prci_driver);
+}
+core_initcall(sifive_prci_init);
diff --git a/drivers/clk/sifive/sifive-prci.h b/drivers/clk/sifive/sifive-prci.h
new file mode 100644
index 000000000000..dbdbd1722688
--- /dev/null
+++ b/drivers/clk/sifive/sifive-prci.h
@@ -0,0 +1,299 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018-2019 SiFive, Inc.
+ * Wesley Terpstra
+ * Paul Walmsley
+ * Zong Li
+ */
+
+#ifndef __SIFIVE_CLK_SIFIVE_PRCI_H
+#define __SIFIVE_CLK_SIFIVE_PRCI_H
+
+#include <linux/clk/analogbits-wrpll-cln28hpc.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+/*
+ * EXPECTED_CLK_PARENT_COUNT: how many parent clocks this driver expects:
+ * hfclk and rtcclk
+ */
+#define EXPECTED_CLK_PARENT_COUNT 2
+
+/*
+ * Register offsets and bitmasks
+ */
+
+/* COREPLLCFG0 */
+#define PRCI_COREPLLCFG0_OFFSET 0x4
+#define PRCI_COREPLLCFG0_DIVR_SHIFT 0
+#define PRCI_COREPLLCFG0_DIVR_MASK (0x3f << PRCI_COREPLLCFG0_DIVR_SHIFT)
+#define PRCI_COREPLLCFG0_DIVF_SHIFT 6
+#define PRCI_COREPLLCFG0_DIVF_MASK (0x1ff << PRCI_COREPLLCFG0_DIVF_SHIFT)
+#define PRCI_COREPLLCFG0_DIVQ_SHIFT 15
+#define PRCI_COREPLLCFG0_DIVQ_MASK (0x7 << PRCI_COREPLLCFG0_DIVQ_SHIFT)
+#define PRCI_COREPLLCFG0_RANGE_SHIFT 18
+#define PRCI_COREPLLCFG0_RANGE_MASK (0x7 << PRCI_COREPLLCFG0_RANGE_SHIFT)
+#define PRCI_COREPLLCFG0_BYPASS_SHIFT 24
+#define PRCI_COREPLLCFG0_BYPASS_MASK (0x1 << PRCI_COREPLLCFG0_BYPASS_SHIFT)
+#define PRCI_COREPLLCFG0_FSE_SHIFT 25
+#define PRCI_COREPLLCFG0_FSE_MASK (0x1 << PRCI_COREPLLCFG0_FSE_SHIFT)
+#define PRCI_COREPLLCFG0_LOCK_SHIFT 31
+#define PRCI_COREPLLCFG0_LOCK_MASK (0x1 << PRCI_COREPLLCFG0_LOCK_SHIFT)
+
+/* COREPLLCFG1 */
+#define PRCI_COREPLLCFG1_OFFSET 0x8
+#define PRCI_COREPLLCFG1_CKE_SHIFT 31
+#define PRCI_COREPLLCFG1_CKE_MASK (0x1 << PRCI_COREPLLCFG1_CKE_SHIFT)
+
+/* DDRPLLCFG0 */
+#define PRCI_DDRPLLCFG0_OFFSET 0xc
+#define PRCI_DDRPLLCFG0_DIVR_SHIFT 0
+#define PRCI_DDRPLLCFG0_DIVR_MASK (0x3f << PRCI_DDRPLLCFG0_DIVR_SHIFT)
+#define PRCI_DDRPLLCFG0_DIVF_SHIFT 6
+#define PRCI_DDRPLLCFG0_DIVF_MASK (0x1ff << PRCI_DDRPLLCFG0_DIVF_SHIFT)
+#define PRCI_DDRPLLCFG0_DIVQ_SHIFT 15
+#define PRCI_DDRPLLCFG0_DIVQ_MASK (0x7 << PRCI_DDRPLLCFG0_DIVQ_SHIFT)
+#define PRCI_DDRPLLCFG0_RANGE_SHIFT 18
+#define PRCI_DDRPLLCFG0_RANGE_MASK (0x7 << PRCI_DDRPLLCFG0_RANGE_SHIFT)
+#define PRCI_DDRPLLCFG0_BYPASS_SHIFT 24
+#define PRCI_DDRPLLCFG0_BYPASS_MASK (0x1 << PRCI_DDRPLLCFG0_BYPASS_SHIFT)
+#define PRCI_DDRPLLCFG0_FSE_SHIFT 25
+#define PRCI_DDRPLLCFG0_FSE_MASK (0x1 << PRCI_DDRPLLCFG0_FSE_SHIFT)
+#define PRCI_DDRPLLCFG0_LOCK_SHIFT 31
+#define PRCI_DDRPLLCFG0_LOCK_MASK (0x1 << PRCI_DDRPLLCFG0_LOCK_SHIFT)
+
+/* DDRPLLCFG1 */
+#define PRCI_DDRPLLCFG1_OFFSET 0x10
+#define PRCI_DDRPLLCFG1_CKE_SHIFT 31
+#define PRCI_DDRPLLCFG1_CKE_MASK (0x1 << PRCI_DDRPLLCFG1_CKE_SHIFT)
+
+/* GEMGXLPLLCFG0 */
+#define PRCI_GEMGXLPLLCFG0_OFFSET 0x1c
+#define PRCI_GEMGXLPLLCFG0_DIVR_SHIFT 0
+#define PRCI_GEMGXLPLLCFG0_DIVR_MASK (0x3f << PRCI_GEMGXLPLLCFG0_DIVR_SHIFT)
+#define PRCI_GEMGXLPLLCFG0_DIVF_SHIFT 6
+#define PRCI_GEMGXLPLLCFG0_DIVF_MASK (0x1ff << PRCI_GEMGXLPLLCFG0_DIVF_SHIFT)
+#define PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT 15
+#define PRCI_GEMGXLPLLCFG0_DIVQ_MASK (0x7 << PRCI_GEMGXLPLLCFG0_DIVQ_SHIFT)
+#define PRCI_GEMGXLPLLCFG0_RANGE_SHIFT 18
+#define PRCI_GEMGXLPLLCFG0_RANGE_MASK (0x7 << PRCI_GEMGXLPLLCFG0_RANGE_SHIFT)
+#define PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT 24
+#define PRCI_GEMGXLPLLCFG0_BYPASS_MASK (0x1 << PRCI_GEMGXLPLLCFG0_BYPASS_SHIFT)
+#define PRCI_GEMGXLPLLCFG0_FSE_SHIFT 25
+#define PRCI_GEMGXLPLLCFG0_FSE_MASK (0x1 << PRCI_GEMGXLPLLCFG0_FSE_SHIFT)
+#define PRCI_GEMGXLPLLCFG0_LOCK_SHIFT 31
+#define PRCI_GEMGXLPLLCFG0_LOCK_MASK (0x1 << PRCI_GEMGXLPLLCFG0_LOCK_SHIFT)
+
+/* GEMGXLPLLCFG1 */
+#define PRCI_GEMGXLPLLCFG1_OFFSET 0x20
+#define PRCI_GEMGXLPLLCFG1_CKE_SHIFT 31
+#define PRCI_GEMGXLPLLCFG1_CKE_MASK (0x1 << PRCI_GEMGXLPLLCFG1_CKE_SHIFT)
+
+/* CORECLKSEL */
+#define PRCI_CORECLKSEL_OFFSET 0x24
+#define PRCI_CORECLKSEL_CORECLKSEL_SHIFT 0
+#define PRCI_CORECLKSEL_CORECLKSEL_MASK \
+ (0x1 << PRCI_CORECLKSEL_CORECLKSEL_SHIFT)
+
+/* DEVICESRESETREG */
+#define PRCI_DEVICESRESETREG_OFFSET 0x28
+#define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT 0
+#define PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_MASK \
+ (0x1 << PRCI_DEVICESRESETREG_DDR_CTRL_RST_N_SHIFT)
+#define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT 1
+#define PRCI_DEVICESRESETREG_DDR_AXI_RST_N_MASK \
+ (0x1 << PRCI_DEVICESRESETREG_DDR_AXI_RST_N_SHIFT)
+#define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT 2
+#define PRCI_DEVICESRESETREG_DDR_AHB_RST_N_MASK \
+ (0x1 << PRCI_DEVICESRESETREG_DDR_AHB_RST_N_SHIFT)
+#define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT 3
+#define PRCI_DEVICESRESETREG_DDR_PHY_RST_N_MASK \
+ (0x1 << PRCI_DEVICESRESETREG_DDR_PHY_RST_N_SHIFT)
+#define PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT 5
+#define PRCI_DEVICESRESETREG_GEMGXL_RST_N_MASK \
+ (0x1 << PRCI_DEVICESRESETREG_GEMGXL_RST_N_SHIFT)
+#define PRCI_DEVICESRESETREG_CHIPLINK_RST_N_SHIFT 6
+#define PRCI_DEVICESRESETREG_CHIPLINK_RST_N_MASK \
+ (0x1 << PRCI_DEVICESRESETREG_CHIPLINK_RST_N_SHIFT)
+
+/* CLKMUXSTATUSREG */
+#define PRCI_CLKMUXSTATUSREG_OFFSET 0x2c
+#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT 1
+#define PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_MASK \
+ (0x1 << PRCI_CLKMUXSTATUSREG_TLCLKSEL_STATUS_SHIFT)
+
+/* CLTXPLLCFG0 */
+#define PRCI_CLTXPLLCFG0_OFFSET 0x30
+#define PRCI_CLTXPLLCFG0_DIVR_SHIFT 0
+#define PRCI_CLTXPLLCFG0_DIVR_MASK (0x3f << PRCI_CLTXPLLCFG0_DIVR_SHIFT)
+#define PRCI_CLTXPLLCFG0_DIVF_SHIFT 6
+#define PRCI_CLTXPLLCFG0_DIVF_MASK (0x1ff << PRCI_CLTXPLLCFG0_DIVF_SHIFT)
+#define PRCI_CLTXPLLCFG0_DIVQ_SHIFT 15
+#define PRCI_CLTXPLLCFG0_DIVQ_MASK (0x7 << PRCI_CLTXPLLCFG0_DIVQ_SHIFT)
+#define PRCI_CLTXPLLCFG0_RANGE_SHIFT 18
+#define PRCI_CLTXPLLCFG0_RANGE_MASK (0x7 << PRCI_CLTXPLLCFG0_RANGE_SHIFT)
+#define PRCI_CLTXPLLCFG0_BYPASS_SHIFT 24
+#define PRCI_CLTXPLLCFG0_BYPASS_MASK (0x1 << PRCI_CLTXPLLCFG0_BYPASS_SHIFT)
+#define PRCI_CLTXPLLCFG0_FSE_SHIFT 25
+#define PRCI_CLTXPLLCFG0_FSE_MASK (0x1 << PRCI_CLTXPLLCFG0_FSE_SHIFT)
+#define PRCI_CLTXPLLCFG0_LOCK_SHIFT 31
+#define PRCI_CLTXPLLCFG0_LOCK_MASK (0x1 << PRCI_CLTXPLLCFG0_LOCK_SHIFT)
+
+/* CLTXPLLCFG1 */
+#define PRCI_CLTXPLLCFG1_OFFSET 0x34
+#define PRCI_CLTXPLLCFG1_CKE_SHIFT 31
+#define PRCI_CLTXPLLCFG1_CKE_MASK (0x1 << PRCI_CLTXPLLCFG1_CKE_SHIFT)
+
+/* DVFSCOREPLLCFG0 */
+#define PRCI_DVFSCOREPLLCFG0_OFFSET 0x38
+
+/* DVFSCOREPLLCFG1 */
+#define PRCI_DVFSCOREPLLCFG1_OFFSET 0x3c
+#define PRCI_DVFSCOREPLLCFG1_CKE_SHIFT 31
+#define PRCI_DVFSCOREPLLCFG1_CKE_MASK (0x1 << PRCI_DVFSCOREPLLCFG1_CKE_SHIFT)
+
+/* COREPLLSEL */
+#define PRCI_COREPLLSEL_OFFSET 0x40
+#define PRCI_COREPLLSEL_COREPLLSEL_SHIFT 0
+#define PRCI_COREPLLSEL_COREPLLSEL_MASK \
+ (0x1 << PRCI_COREPLLSEL_COREPLLSEL_SHIFT)
+
+/* HFPCLKPLLCFG0 */
+#define PRCI_HFPCLKPLLCFG0_OFFSET 0x50
+#define PRCI_HFPCLKPLL_CFG0_DIVR_SHIFT 0
+#define PRCI_HFPCLKPLL_CFG0_DIVR_MASK \
+ (0x3f << PRCI_HFPCLKPLLCFG0_DIVR_SHIFT)
+#define PRCI_HFPCLKPLL_CFG0_DIVF_SHIFT 6
+#define PRCI_HFPCLKPLL_CFG0_DIVF_MASK \
+ (0x1ff << PRCI_HFPCLKPLLCFG0_DIVF_SHIFT)
+#define PRCI_HFPCLKPLL_CFG0_DIVQ_SHIFT 15
+#define PRCI_HFPCLKPLL_CFG0_DIVQ_MASK \
+ (0x7 << PRCI_HFPCLKPLLCFG0_DIVQ_SHIFT)
+#define PRCI_HFPCLKPLL_CFG0_RANGE_SHIFT 18
+#define PRCI_HFPCLKPLL_CFG0_RANGE_MASK \
+ (0x7 << PRCI_HFPCLKPLLCFG0_RANGE_SHIFT)
+#define PRCI_HFPCLKPLL_CFG0_BYPASS_SHIFT 24
+#define PRCI_HFPCLKPLL_CFG0_BYPASS_MASK \
+ (0x1 << PRCI_HFPCLKPLLCFG0_BYPASS_SHIFT)
+#define PRCI_HFPCLKPLL_CFG0_FSE_SHIFT 25
+#define PRCI_HFPCLKPLL_CFG0_FSE_MASK \
+ (0x1 << PRCI_HFPCLKPLLCFG0_FSE_SHIFT)
+#define PRCI_HFPCLKPLL_CFG0_LOCK_SHIFT 31
+#define PRCI_HFPCLKPLL_CFG0_LOCK_MASK \
+ (0x1 << PRCI_HFPCLKPLLCFG0_LOCK_SHIFT)
+
+/* HFPCLKPLLCFG1 */
+#define PRCI_HFPCLKPLLCFG1_OFFSET 0x54
+#define PRCI_HFPCLKPLLCFG1_CKE_SHIFT 31
+#define PRCI_HFPCLKPLLCFG1_CKE_MASK \
+ (0x1 << PRCI_HFPCLKPLLCFG1_CKE_SHIFT)
+
+/* HFPCLKPLLSEL */
+#define PRCI_HFPCLKPLLSEL_OFFSET 0x58
+#define PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_SHIFT 0
+#define PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_MASK \
+ (0x1 << PRCI_HFPCLKPLLSEL_HFPCLKPLLSEL_SHIFT)
+
+/* HFPCLKPLLDIV */
+#define PRCI_HFPCLKPLLDIV_OFFSET 0x5c
+
+/* PRCIPLL */
+#define PRCI_PRCIPLL_OFFSET 0xe0
+
+/* PROCMONCFG */
+#define PRCI_PROCMONCFG_OFFSET 0xf0
+
+/*
+ * Private structures
+ */
+
+/**
+ * struct __prci_data - per-device-instance data
+ * @va: base virtual address of the PRCI IP block
+ * @hw_clks: encapsulates struct clk_hw records
+ *
+ * PRCI per-device instance data
+ */
+struct __prci_data {
+ void __iomem *va;
+ struct clk_hw_onecell_data hw_clks;
+};
+
+/**
+ * struct __prci_wrpll_data - WRPLL configuration and integration data
+ * @c: WRPLL current configuration record
+ * @enable_bypass: fn ptr to code to bypass the WRPLL (if applicable; else NULL)
+ * @disable_bypass: fn ptr to code to not bypass the WRPLL (or NULL)
+ * @cfg0_offs: WRPLL CFG0 register offset (in bytes) from the PRCI base address
+ * @cfg1_offs: WRPLL CFG1 register offset (in bytes) from the PRCI base address
+ *
+ * @enable_bypass and @disable_bypass are used for WRPLL instances
+ * that contain a separate external glitchless clock mux downstream
+ * from the PLL. The WRPLL internal bypass mux is not glitchless.
+ */
+struct __prci_wrpll_data {
+ struct wrpll_cfg c;
+ void (*enable_bypass)(struct __prci_data *pd);
+ void (*disable_bypass)(struct __prci_data *pd);
+ u8 cfg0_offs;
+ u8 cfg1_offs;
+};
+
+/**
+ * struct __prci_clock - describes a clock device managed by PRCI
+ * @name: user-readable clock name string - should match the manual
+ * @parent_name: parent name for this clock
+ * @ops: struct clk_ops for the Linux clock framework to use for control
+ * @hw: Linux-private clock data
+ * @pwd: WRPLL-specific data, associated with this clock (if not NULL)
+ * @pd: PRCI-specific data associated with this clock (if not NULL)
+ *
+ * PRCI clock data. Used by the PRCI driver to register PRCI-provided
+ * clocks to the Linux clock infrastructure.
+ */
+struct __prci_clock {
+ const char *name;
+ const char *parent_name;
+ const struct clk_ops *ops;
+ struct clk_hw hw;
+ struct __prci_wrpll_data *pwd;
+ struct __prci_data *pd;
+};
+
+#define clk_hw_to_prci_clock(pwd) container_of(pwd, struct __prci_clock, hw)
+
+/*
+ * struct prci_clk_desc - describes the information of clocks of each SoCs
+ * @clks: point to a array of __prci_clock
+ * @num_clks: the number of element of clks
+ */
+struct prci_clk_desc {
+ struct __prci_clock *clks;
+ size_t num_clks;
+};
+
+/* Core clock mux control */
+void sifive_prci_coreclksel_use_hfclk(struct __prci_data *pd);
+void sifive_prci_coreclksel_use_corepll(struct __prci_data *pd);
+void sifive_prci_coreclksel_use_final_corepll(struct __prci_data *pd);
+void sifive_prci_corepllsel_use_dvfscorepll(struct __prci_data *pd);
+void sifive_prci_corepllsel_use_corepll(struct __prci_data *pd);
+void sifive_prci_hfpclkpllsel_use_hfclk(struct __prci_data *pd);
+void sifive_prci_hfpclkpllsel_use_hfpclkpll(struct __prci_data *pd);
+
+/* Linux clock framework integration */
+long sifive_prci_wrpll_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate);
+int sifive_prci_wrpll_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+int sifive_clk_is_enabled(struct clk_hw *hw);
+int sifive_prci_clock_enable(struct clk_hw *hw);
+void sifive_prci_clock_disable(struct clk_hw *hw);
+unsigned long sifive_prci_wrpll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate);
+unsigned long sifive_prci_tlclksel_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate);
+unsigned long sifive_prci_hfpclkplldiv_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate);
+
+#endif /* __SIFIVE_CLK_SIFIVE_PRCI_H */
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 5f66bf879772..149cfde817cb 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -389,6 +389,7 @@ static struct clk_div_table ths_div_table[] = {
{ .val = 1, .div = 2 },
{ .val = 2, .div = 4 },
{ .val = 3, .div = 6 },
+ { /* Sentinel */ },
};
static const char * const ths_parents[] = { "osc24M" };
static struct ccu_div ths_clk = {
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 6b636362379e..7e629a4493af 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -322,6 +322,7 @@ static struct clk_div_table ths_div_table[] = {
{ .val = 1, .div = 2 },
{ .val = 2, .div = 4 },
{ .val = 3, .div = 6 },
+ { /* Sentinel */ },
};
static SUNXI_CCU_DIV_TABLE_WITH_GATE(ths_clk, "ths", "osc24M",
0x074, 0, 2, ths_div_table, BIT(31), 0);
diff --git a/drivers/clk/tegra/clk-bpmp.c b/drivers/clk/tegra/clk-bpmp.c
index a66263b6490d..6ecf18f71c32 100644
--- a/drivers/clk/tegra/clk-bpmp.c
+++ b/drivers/clk/tegra/clk-bpmp.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2016 NVIDIA Corporation
+ * Copyright (C) 2016-2020 NVIDIA Corporation
*/
#include <linux/clk-provider.h>
@@ -174,7 +174,7 @@ static long tegra_bpmp_clk_round_rate(struct clk_hw *hw, unsigned long rate,
int err;
memset(&request, 0, sizeof(request));
- request.rate = rate;
+ request.rate = min_t(u64, rate, S64_MAX);
memset(&msg, 0, sizeof(msg));
msg.cmd = CMD_CLK_ROUND_RATE;
@@ -256,7 +256,7 @@ static int tegra_bpmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
struct tegra_bpmp_clk_message msg;
memset(&request, 0, sizeof(request));
- request.rate = rate;
+ request.rate = min_t(u64, rate, S64_MAX);
memset(&msg, 0, sizeof(msg));
msg.cmd = CMD_CLK_SET_RATE;
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index cfbaa90c7adb..a5f526bb0483 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -1856,13 +1856,13 @@ static int dfll_fetch_pwm_params(struct tegra_dfll *td)
&td->reg_init_uV);
if (!ret) {
dev_err(td->dev, "couldn't get initialized voltage\n");
- return ret;
+ return -EINVAL;
}
ret = read_dt_param(td, "nvidia,pwm-period-nanoseconds", &pwm_period);
if (!ret) {
dev_err(td->dev, "couldn't get PWM period\n");
- return ret;
+ return -EINVAL;
}
td->pwm_rate = (NSEC_PER_SEC / pwm_period) * (MAX_DFLL_VOLTAGES - 1);
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index ff7da2d3e94d..24413812ec5b 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -227,6 +227,7 @@ enum clk_id {
tegra_clk_sdmmc4,
tegra_clk_sdmmc4_8,
tegra_clk_se,
+ tegra_clk_se_10,
tegra_clk_soc_therm,
tegra_clk_soc_therm_8,
tegra_clk_sor0,
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 2b2a3b81c16b..60cc34f90cb9 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -630,7 +630,7 @@ static struct tegra_periph_init_data periph_clks[] = {
INT8("host1x", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_8),
INT8("host1x", mux_pllc4_out1_pllc_pllc4_out2_pllp_clkm_plla_pllc4_out0, CLK_SOURCE_HOST1X, 28, 0, tegra_clk_host1x_9),
INT8("se", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
- INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se),
+ INT8("se", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_SE, 127, TEGRA_PERIPH_ON_APB, tegra_clk_se_10),
INT8("2d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_2D, 21, 0, tegra_clk_gr2d_8),
INT8("3d", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_3D, 24, 0, tegra_clk_gr3d_8),
INT8("vic03", mux_pllm_pllc_pllp_plla_pllc2_c3_clkm, CLK_SOURCE_VIC03, 178, 0, tegra_clk_vic03),
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index 37244a7e68c2..9cf249c344d9 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1256,6 +1256,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
{ TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
+ { TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 },
+ { TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 },
/* must be the last entry */
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
};
diff --git a/drivers/clk/ti/clk-33xx.c b/drivers/clk/ti/clk-33xx.c
index 7dc30dd6c8d5..f2c22120aaa7 100644
--- a/drivers/clk/ti/clk-33xx.c
+++ b/drivers/clk/ti/clk-33xx.c
@@ -266,6 +266,8 @@ static const char *enable_init_clks[] = {
"dpll_ddr_m2_ck",
"dpll_mpu_m2_ck",
"l3_gclk",
+ /* AM3_L3_L3_MAIN_CLKCTRL, needed during suspend */
+ "l3-clkctrl:00bc:0",
"l4hs_gclk",
"l4fw_gclk",
"l4ls_gclk",
diff --git a/drivers/clk/ti/clk-43xx.c b/drivers/clk/ti/clk-43xx.c
index e5538d577ce5..46c0add99570 100644
--- a/drivers/clk/ti/clk-43xx.c
+++ b/drivers/clk/ti/clk-43xx.c
@@ -272,6 +272,11 @@ static struct ti_dt_clk am43xx_clks[] = {
{ .node_name = NULL },
};
+static const char *enable_init_clks[] = {
+ /* AM4_L3_L3_MAIN_CLKCTRL, needed during suspend */
+ "l3-clkctrl:0000:0",
+};
+
int __init am43xx_dt_clk_init(void)
{
struct clk *clk1, *clk2;
@@ -283,6 +288,9 @@ int __init am43xx_dt_clk_init(void)
omap2_clk_disable_autoidle_all();
+ omap2_clk_enable_init_clocks(enable_init_clks,
+ ARRAY_SIZE(enable_init_clks));
+
ti_clk_add_aliases();
/*
diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c
index a38c92153979..d078e5d73ed9 100644
--- a/drivers/clk/ti/clk-44xx.c
+++ b/drivers/clk/ti/clk-44xx.c
@@ -255,7 +255,7 @@ static const struct omap_clkctrl_reg_data omap4_l3_instr_clkctrl_regs[] __initco
};
static const struct omap_clkctrl_reg_data omap4_ivahd_clkctrl_regs[] __initconst = {
- { OMAP4_IVA_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_m5x2_ck" },
+ { OMAP4_IVA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_iva_m5x2_ck" },
{ OMAP4_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_m5x2_ck" },
{ 0 },
};
diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c
index 8694bc9f5fc7..f0542391ca4b 100644
--- a/drivers/clk/ti/clk-54xx.c
+++ b/drivers/clk/ti/clk-54xx.c
@@ -605,7 +605,7 @@ static struct ti_dt_clk omap54xx_clks[] = {
int __init omap5xxx_dt_clk_init(void)
{
int rc;
- struct clk *abe_dpll_ref, *abe_dpll, *sys_32k_ck, *usb_dpll;
+ struct clk *abe_dpll_ref, *abe_dpll, *abe_dpll_byp, *sys_32k_ck, *usb_dpll;
ti_dt_clocks_register(omap54xx_clks);
@@ -616,6 +616,16 @@ int __init omap5xxx_dt_clk_init(void)
abe_dpll_ref = clk_get_sys(NULL, "abe_dpll_clk_mux");
sys_32k_ck = clk_get_sys(NULL, "sys_32k_ck");
rc = clk_set_parent(abe_dpll_ref, sys_32k_ck);
+
+ /*
+ * This must also be set to sys_32k_ck to match or
+ * the ABE DPLL will not lock on a warm reboot when
+ * ABE timers are used.
+ */
+ abe_dpll_byp = clk_get_sys(NULL, "abe_dpll_bypass_clk_mux");
+ if (!rc)
+ rc = clk_set_parent(abe_dpll_byp, sys_32k_ck);
+
abe_dpll = clk_get_sys(NULL, "dpll_abe_ck");
if (!rc)
rc = clk_set_rate(abe_dpll, OMAP5_DPLL_ABE_DEFFREQ);
diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c
index 4e27f88062e7..8b9118ccd4cd 100644
--- a/drivers/clk/ti/clk-7xx.c
+++ b/drivers/clk/ti/clk-7xx.c
@@ -252,6 +252,12 @@ static const struct omap_clkctrl_reg_data dra7_l3instr_clkctrl_regs[] __initcons
{ 0 },
};
+static const struct omap_clkctrl_reg_data dra7_iva_clkctrl_regs[] __initconst = {
+ { DRA7_IVA_CLKCTRL, NULL, CLKF_HW_SUP | CLKF_NO_IDLEST, "dpll_iva_h12x2_ck" },
+ { DRA7_SL2IF_CLKCTRL, NULL, CLKF_HW_SUP, "dpll_iva_h12x2_ck" },
+ { 0 },
+};
+
static const char * const dra7_dss_dss_clk_parents[] __initconst = {
"dpll_per_h12x2_ck",
NULL,
@@ -827,6 +833,7 @@ const struct omap_clkctrl_data dra7_clkctrl_data[] __initconst = {
{ 0x4a008c00, dra7_atl_clkctrl_regs },
{ 0x4a008d20, dra7_l4cfg_clkctrl_regs },
{ 0x4a008e20, dra7_l3instr_clkctrl_regs },
+ { 0x4a008f20, dra7_iva_clkctrl_regs },
{ 0x4a009020, dra7_cam_clkctrl_regs },
{ 0x4a009120, dra7_dss_clkctrl_regs },
{ 0x4a009220, dra7_gpu_clkctrl_regs },
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
index 95e36ba64acc..8024c6d2b9e9 100644
--- a/drivers/clk/ti/fapll.c
+++ b/drivers/clk/ti/fapll.c
@@ -498,6 +498,7 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
{
struct clk_init_data *init;
struct fapll_synth *synth;
+ struct clk *clk = ERR_PTR(-ENOMEM);
init = kzalloc(sizeof(*init), GFP_KERNEL);
if (!init)
@@ -520,13 +521,19 @@ static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
synth->hw.init = init;
synth->clk_pll = pll_clk;
- return clk_register(NULL, &synth->hw);
+ clk = clk_register(NULL, &synth->hw);
+ if (IS_ERR(clk)) {
+ pr_err("failed to register clock\n");
+ goto free;
+ }
+
+ return clk;
free:
kfree(synth);
kfree(init);
- return ERR_PTR(-ENOMEM);
+ return clk;
}
static void __init ti_fapll_setup(struct device_node *node)
diff --git a/drivers/connector/cn_queue.c b/drivers/connector/cn_queue.c
index 49295052ba8b..996f025eb63c 100644
--- a/drivers/connector/cn_queue.c
+++ b/drivers/connector/cn_queue.c
@@ -19,7 +19,7 @@
static struct cn_callback_entry *
cn_queue_alloc_callback_entry(struct cn_queue_dev *dev, const char *name,
- struct cb_id *id,
+ const struct cb_id *id,
void (*callback)(struct cn_msg *,
struct netlink_skb_parms *))
{
@@ -51,13 +51,13 @@ void cn_queue_release_callback(struct cn_callback_entry *cbq)
kfree(cbq);
}
-int cn_cb_equal(struct cb_id *i1, struct cb_id *i2)
+int cn_cb_equal(const struct cb_id *i1, const struct cb_id *i2)
{
return ((i1->idx == i2->idx) && (i1->val == i2->val));
}
int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
- struct cb_id *id,
+ const struct cb_id *id,
void (*callback)(struct cn_msg *,
struct netlink_skb_parms *))
{
@@ -90,7 +90,7 @@ int cn_queue_add_callback(struct cn_queue_dev *dev, const char *name,
return 0;
}
-void cn_queue_del_callback(struct cn_queue_dev *dev, struct cb_id *id)
+void cn_queue_del_callback(struct cn_queue_dev *dev, const struct cb_id *id)
{
struct cn_callback_entry *cbq, *n;
int found = 0;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index 7d59d18c6f26..48ec7ce6ecac 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -193,7 +193,7 @@ static void cn_rx_skb(struct sk_buff *skb)
*
* May sleep.
*/
-int cn_add_callback(struct cb_id *id, const char *name,
+int cn_add_callback(const struct cb_id *id, const char *name,
void (*callback)(struct cn_msg *,
struct netlink_skb_parms *))
{
@@ -214,7 +214,7 @@ EXPORT_SYMBOL_GPL(cn_add_callback);
*
* May sleep while waiting for reference counter to become zero.
*/
-void cn_del_callback(struct cb_id *id)
+void cn_del_callback(const struct cb_id *id)
{
struct cn_dev *dev = &cdev;
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index 7cc9bd8568de..8a482c434ea6 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -30,13 +30,13 @@
#define DMI_PROCESSOR_MAX_SPEED 0x14
/*
- * These structs contain information parsed from per CPU
- * ACPI _CPC structures.
- * e.g. For each CPU the highest, lowest supported
- * performance capabilities, desired performance level
- * requested etc.
+ * This list contains information parsed from per CPU ACPI _CPC and _PSD
+ * structures: e.g. the highest and lowest supported performance, capabilities,
+ * desired performance, level requested etc. Depending on the share_type, not
+ * all CPUs will have an entry in the list.
*/
-static struct cppc_cpudata **all_cpu_data;
+static LIST_HEAD(cpu_data_list);
+
static bool boost_supported;
struct cppc_workaround_oem_info {
@@ -148,8 +148,10 @@ static unsigned int cppc_cpufreq_khz_to_perf(struct cppc_cpudata *cpu_data,
static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
+
{
- struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu];
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+ unsigned int cpu = policy->cpu;
struct cpufreq_freqs freqs;
u32 desired_perf;
int ret = 0;
@@ -164,12 +166,12 @@ static int cppc_cpufreq_set_target(struct cpufreq_policy *policy,
freqs.new = target_freq;
cpufreq_freq_transition_begin(policy, &freqs);
- ret = cppc_set_perf(cpu_data->cpu, &cpu_data->perf_ctrls);
+ ret = cppc_set_perf(cpu, &cpu_data->perf_ctrls);
cpufreq_freq_transition_end(policy, &freqs, ret != 0);
if (ret)
pr_debug("Failed to set target on CPU:%d. ret:%d\n",
- cpu_data->cpu, ret);
+ cpu, ret);
return ret;
}
@@ -182,7 +184,7 @@ static int cppc_verify_policy(struct cpufreq_policy_data *policy)
static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
{
- struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu];
+ struct cppc_cpudata *cpu_data = policy->driver_data;
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
unsigned int cpu = policy->cpu;
int ret;
@@ -193,6 +195,12 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
if (ret)
pr_debug("Err setting perf value:%d on CPU:%d. ret:%d\n",
caps->lowest_perf, cpu, ret);
+
+ /* Remove CPU node from list and free driver data for policy */
+ free_cpumask_var(cpu_data->shared_cpu_map);
+ list_del(&cpu_data->node);
+ kfree(policy->driver_data);
+ policy->driver_data = NULL;
}
/*
@@ -238,25 +246,61 @@ static unsigned int cppc_cpufreq_get_transition_delay_us(unsigned int cpu)
}
#endif
-static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+
+static struct cppc_cpudata *cppc_cpufreq_get_cpu_data(unsigned int cpu)
{
- struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu];
- struct cppc_perf_caps *caps = &cpu_data->perf_caps;
- unsigned int cpu = policy->cpu;
- int ret = 0;
+ struct cppc_cpudata *cpu_data;
+ int ret;
- cpu_data->cpu = cpu;
- ret = cppc_get_perf_caps(cpu, caps);
+ cpu_data = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
+ if (!cpu_data)
+ goto out;
+ if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
+ goto free_cpu;
+
+ ret = acpi_get_psd_map(cpu, cpu_data);
if (ret) {
- pr_debug("Err reading CPU%d perf capabilities. ret:%d\n",
- cpu, ret);
- return ret;
+ pr_debug("Err parsing CPU%d PSD data: ret:%d\n", cpu, ret);
+ goto free_mask;
+ }
+
+ ret = cppc_get_perf_caps(cpu, &cpu_data->perf_caps);
+ if (ret) {
+ pr_debug("Err reading CPU%d perf caps: ret:%d\n", cpu, ret);
+ goto free_mask;
}
/* Convert the lowest and nominal freq from MHz to KHz */
- caps->lowest_freq *= 1000;
- caps->nominal_freq *= 1000;
+ cpu_data->perf_caps.lowest_freq *= 1000;
+ cpu_data->perf_caps.nominal_freq *= 1000;
+
+ list_add(&cpu_data->node, &cpu_data_list);
+
+ return cpu_data;
+
+free_mask:
+ free_cpumask_var(cpu_data->shared_cpu_map);
+free_cpu:
+ kfree(cpu_data);
+out:
+ return NULL;
+}
+
+static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ unsigned int cpu = policy->cpu;
+ struct cppc_cpudata *cpu_data;
+ struct cppc_perf_caps *caps;
+ int ret;
+
+ cpu_data = cppc_cpufreq_get_cpu_data(cpu);
+ if (!cpu_data) {
+ pr_err("Error in acquiring _CPC/_PSD data for CPU%d.\n", cpu);
+ return -ENODEV;
+ }
+ caps = &cpu_data->perf_caps;
+ policy->driver_data = cpu_data;
/*
* Set min to lowest nonlinear perf to avoid any efficiency penalty (see
@@ -280,26 +324,25 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu);
policy->shared_type = cpu_data->shared_type;
- if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
- int i;
-
+ switch (policy->shared_type) {
+ case CPUFREQ_SHARED_TYPE_HW:
+ case CPUFREQ_SHARED_TYPE_NONE:
+ /* Nothing to be done - we'll have a policy for each CPU */
+ break;
+ case CPUFREQ_SHARED_TYPE_ANY:
+ /*
+ * All CPUs in the domain will share a policy and all cpufreq
+ * operations will use a single cppc_cpudata structure stored
+ * in policy->driver_data.
+ */
cpumask_copy(policy->cpus, cpu_data->shared_cpu_map);
-
- for_each_cpu(i, policy->cpus) {
- if (unlikely(i == cpu))
- continue;
-
- memcpy(&all_cpu_data[i]->perf_caps, caps,
- sizeof(cpu_data->perf_caps));
- }
- } else if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL) {
- /* Support only SW_ANY for now. */
- pr_debug("Unsupported CPU co-ord type\n");
+ break;
+ default:
+ pr_debug("Unsupported CPU co-ord type: %d\n",
+ policy->shared_type);
return -EFAULT;
}
- cpu_data->cur_policy = policy;
-
/*
* If 'highest_perf' is greater than 'nominal_perf', we assume CPU Boost
* is supported.
@@ -354,9 +397,12 @@ static int cppc_get_rate_from_fbctrs(struct cppc_cpudata *cpu_data,
static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
{
struct cppc_perf_fb_ctrs fb_ctrs_t0 = {0}, fb_ctrs_t1 = {0};
- struct cppc_cpudata *cpu_data = all_cpu_data[cpu];
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cppc_cpudata *cpu_data = policy->driver_data;
int ret;
+ cpufreq_cpu_put(policy);
+
ret = cppc_get_perf_ctrs(cpu, &fb_ctrs_t0);
if (ret)
return ret;
@@ -372,7 +418,7 @@ static unsigned int cppc_cpufreq_get_rate(unsigned int cpu)
static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
{
- struct cppc_cpudata *cpu_data = all_cpu_data[policy->cpu];
+ struct cppc_cpudata *cpu_data = policy->driver_data;
struct cppc_perf_caps *caps = &cpu_data->perf_caps;
int ret;
@@ -396,6 +442,19 @@ static int cppc_cpufreq_set_boost(struct cpufreq_policy *policy, int state)
return 0;
}
+static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf)
+{
+ struct cppc_cpudata *cpu_data = policy->driver_data;
+
+ return cpufreq_show_cpus(cpu_data->shared_cpu_map, buf);
+}
+cpufreq_freq_attr_ro(freqdomain_cpus);
+
+static struct freq_attr *cppc_cpufreq_attr[] = {
+ &freqdomain_cpus,
+ NULL,
+};
+
static struct cpufreq_driver cppc_cpufreq_driver = {
.flags = CPUFREQ_CONST_LOOPS,
.verify = cppc_verify_policy,
@@ -404,6 +463,7 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
.init = cppc_cpufreq_cpu_init,
.stop_cpu = cppc_cpufreq_stop_cpu,
.set_boost = cppc_cpufreq_set_boost,
+ .attr = cppc_cpufreq_attr,
.name = "cppc_cpufreq",
};
@@ -415,10 +475,13 @@ static struct cpufreq_driver cppc_cpufreq_driver = {
*/
static unsigned int hisi_cppc_cpufreq_get_rate(unsigned int cpu)
{
- struct cppc_cpudata *cpu_data = all_cpu_data[cpu];
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cppc_cpudata *cpu_data = policy->driver_data;
u64 desired_perf;
int ret;
+ cpufreq_cpu_put(policy);
+
ret = cppc_get_desired_perf(cpu, &desired_perf);
if (ret < 0)
return -EIO;
@@ -451,68 +514,33 @@ static void cppc_check_hisi_workaround(void)
static int __init cppc_cpufreq_init(void)
{
- struct cppc_cpudata *cpu_data;
- int i, ret = 0;
-
- if (acpi_disabled)
+ if ((acpi_disabled) || !acpi_cpc_valid())
return -ENODEV;
- all_cpu_data = kcalloc(num_possible_cpus(), sizeof(void *),
- GFP_KERNEL);
- if (!all_cpu_data)
- return -ENOMEM;
-
- for_each_possible_cpu(i) {
- all_cpu_data[i] = kzalloc(sizeof(struct cppc_cpudata), GFP_KERNEL);
- if (!all_cpu_data[i])
- goto out;
-
- cpu_data = all_cpu_data[i];
- if (!zalloc_cpumask_var(&cpu_data->shared_cpu_map, GFP_KERNEL))
- goto out;
- }
-
- ret = acpi_get_psd_map(all_cpu_data);
- if (ret) {
- pr_debug("Error parsing PSD data. Aborting cpufreq registration.\n");
- goto out;
- }
+ INIT_LIST_HEAD(&cpu_data_list);
cppc_check_hisi_workaround();
- ret = cpufreq_register_driver(&cppc_cpufreq_driver);
- if (ret)
- goto out;
+ return cpufreq_register_driver(&cppc_cpufreq_driver);
+}
- return ret;
+static inline void free_cpu_data(void)
+{
+ struct cppc_cpudata *iter, *tmp;
-out:
- for_each_possible_cpu(i) {
- cpu_data = all_cpu_data[i];
- if (!cpu_data)
- break;
- free_cpumask_var(cpu_data->shared_cpu_map);
- kfree(cpu_data);
+ list_for_each_entry_safe(iter, tmp, &cpu_data_list, node) {
+ free_cpumask_var(iter->shared_cpu_map);
+ list_del(&iter->node);
+ kfree(iter);
}
- kfree(all_cpu_data);
- return -ENODEV;
}
static void __exit cppc_cpufreq_exit(void)
{
- struct cppc_cpudata *cpu_data;
- int i;
-
cpufreq_unregister_driver(&cppc_cpufreq_driver);
- for_each_possible_cpu(i) {
- cpu_data = all_cpu_data[i];
- free_cpumask_var(cpu_data->shared_cpu_map);
- kfree(cpu_data);
- }
-
- kfree(all_cpu_data);
+ free_cpu_data();
}
module_exit(cppc_cpufreq_exit);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index c17aa2973c44..d0a3525ce27f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2097,6 +2097,46 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
+/**
+ * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
+ * @cpu: Target CPU.
+ * @min_perf: Minimum (required) performance level (units of @capacity).
+ * @target_perf: Terget (desired) performance level (units of @capacity).
+ * @capacity: Capacity of the target CPU.
+ *
+ * Carry out a fast performance level switch of @cpu without sleeping.
+ *
+ * The driver's ->adjust_perf() callback invoked by this function must be
+ * suitable for being called from within RCU-sched read-side critical sections
+ * and it is expected to select a suitable performance level equal to or above
+ * @min_perf and preferably equal to or below @target_perf.
+ *
+ * This function must not be called if policy->fast_switch_enabled is unset.
+ *
+ * Governors calling this function must guarantee that it will never be invoked
+ * twice in parallel for the same CPU and that it will never be called in
+ * parallel with either ->target() or ->target_index() or ->fast_switch() for
+ * the same CPU.
+ */
+void cpufreq_driver_adjust_perf(unsigned int cpu,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity)
+{
+ cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
+}
+
+/**
+ * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
+ *
+ * Return 'true' if the ->adjust_perf callback is present for the
+ * current driver or 'false' otherwise.
+ */
+bool cpufreq_driver_has_adjust_perf(void)
+{
+ return !!cpufreq_driver->adjust_perf;
+}
+
/* Must set freqs->new to intermediate frequency */
static int __target_intermediate(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, int index)
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2a4db856222f..be05e038d956 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -76,11 +76,6 @@ static inline int ceiling_fp(int32_t x)
return ret;
}
-static inline int32_t percent_fp(int percent)
-{
- return div_fp(percent, 100);
-}
-
static inline u64 mul_ext_fp(u64 x, u64 y)
{
return (x * y) >> EXT_FRAC_BITS;
@@ -91,11 +86,6 @@ static inline u64 div_ext_fp(u64 x, u64 y)
return div64_u64(x << EXT_FRAC_BITS, y);
}
-static inline int32_t percent_ext_fp(int percent)
-{
- return div_ext_fp(percent, 100);
-}
-
/**
* struct sample - Store performance sample
* @core_avg_perf: Ratio of APERF/MPERF which is the actual average
@@ -2207,9 +2197,9 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
unsigned int policy_min,
unsigned int policy_max)
{
- int max_freq = intel_pstate_get_max_freq(cpu);
int32_t max_policy_perf, min_policy_perf;
int max_state, turbo_max;
+ int max_freq;
/*
* HWP needs some special consideration, because on BDX the
@@ -2223,6 +2213,7 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
turbo_max = cpu->pstate.turbo_pstate;
}
+ max_freq = max_state * cpu->pstate.scaling;
max_policy_perf = max_state * policy_max / max_freq;
if (policy_max == policy_min) {
@@ -2325,9 +2316,18 @@ static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
struct cpufreq_policy_data *policy)
{
+ int max_freq;
+
update_turbo_state();
- cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- intel_pstate_get_max_freq(cpu));
+ if (hwp_active) {
+ int max_state, turbo_max;
+
+ intel_pstate_get_hwp_max(cpu->cpu, &turbo_max, &max_state);
+ max_freq = max_state * cpu->pstate.scaling;
+ } else {
+ max_freq = intel_pstate_get_max_freq(cpu);
+ }
+ cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq);
intel_pstate_adjust_policy_max(cpu, policy);
}
@@ -2526,20 +2526,19 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
fp_toint(cpu->iowait_boost * 100));
}
-static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
- bool strict, bool fast_switch)
+static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 min, u32 max,
+ u32 desired, bool fast_switch)
{
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
value &= ~HWP_MIN_PERF(~0L);
- value |= HWP_MIN_PERF(target_pstate);
+ value |= HWP_MIN_PERF(min);
- /*
- * The entire MSR needs to be updated in order to update the HWP min
- * field in it, so opportunistically update the max too if needed.
- */
value &= ~HWP_MAX_PERF(~0L);
- value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
+ value |= HWP_MAX_PERF(max);
+
+ value &= ~HWP_DESIRED_PERF(~0L);
+ value |= HWP_DESIRED_PERF(desired);
if (value == prev)
return;
@@ -2569,11 +2568,15 @@ static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
int old_pstate = cpu->pstate.current_pstate;
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
- if (hwp_active)
- intel_cpufreq_adjust_hwp(cpu, target_pstate,
- policy->strict_target, fast_switch);
- else if (target_pstate != old_pstate)
+ if (hwp_active) {
+ int max_pstate = policy->strict_target ?
+ target_pstate : cpu->max_perf_ratio;
+
+ intel_cpufreq_adjust_hwp(cpu, target_pstate, max_pstate, 0,
+ fast_switch);
+ } else if (target_pstate != old_pstate) {
intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
+ }
cpu->pstate.current_pstate = target_pstate;
@@ -2634,6 +2637,48 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
return target_pstate * cpu->pstate.scaling;
}
+static void intel_cpufreq_adjust_perf(unsigned int cpunum,
+ unsigned long min_perf,
+ unsigned long target_perf,
+ unsigned long capacity)
+{
+ struct cpudata *cpu = all_cpu_data[cpunum];
+ u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
+ int old_pstate = cpu->pstate.current_pstate;
+ int cap_pstate, min_pstate, max_pstate, target_pstate;
+
+ update_turbo_state();
+ cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
+ HWP_HIGHEST_PERF(hwp_cap);
+
+ /* Optimization: Avoid unnecessary divisions. */
+
+ target_pstate = cap_pstate;
+ if (target_perf < capacity)
+ target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity);
+
+ min_pstate = cap_pstate;
+ if (min_perf < capacity)
+ min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity);
+
+ if (min_pstate < cpu->pstate.min_pstate)
+ min_pstate = cpu->pstate.min_pstate;
+
+ if (min_pstate < cpu->min_perf_ratio)
+ min_pstate = cpu->min_perf_ratio;
+
+ max_pstate = min(cap_pstate, cpu->max_perf_ratio);
+ if (max_pstate < min_pstate)
+ max_pstate = min_pstate;
+
+ target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
+
+ intel_cpufreq_adjust_hwp(cpu, min_pstate, max_pstate, target_pstate, true);
+
+ cpu->pstate.current_pstate = target_pstate;
+ intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
+}
+
static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
{
int max_state, turbo_max, min_freq, max_freq, ret;
@@ -3032,6 +3077,7 @@ static int __init intel_pstate_init(void)
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
+ intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
if (!default_driver)
default_driver = &intel_pstate;
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 0acc9e241cd7..b9ccb6a3dad9 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -878,9 +878,9 @@ static int get_transition_latency(struct powernow_k8_data *data)
/* Take a frequency, and issue the fid/vid transition command */
static int transition_frequency_fidvid(struct powernow_k8_data *data,
- unsigned int index)
+ unsigned int index,
+ struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy;
u32 fid = 0;
u32 vid = 0;
int res;
@@ -912,9 +912,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
freqs.old = find_khz_freq_from_fid(data->currfid);
freqs.new = find_khz_freq_from_fid(fid);
- policy = cpufreq_cpu_get(smp_processor_id());
- cpufreq_cpu_put(policy);
-
cpufreq_freq_transition_begin(policy, &freqs);
res = transition_fid_vid(data, fid, vid);
cpufreq_freq_transition_end(policy, &freqs, res);
@@ -969,7 +966,7 @@ static long powernowk8_target_fn(void *arg)
powernow_k8_acpi_pst_values(data, newstate);
- ret = transition_frequency_fidvid(data, newstate);
+ ret = transition_frequency_fidvid(data, newstate, pol);
if (ret) {
pr_err("transition frequency failed\n");
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index 7f8ddc04342d..abe51185f243 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -155,8 +155,7 @@ static int __init bl_idle_driver_init(struct cpuidle_driver *drv, int part_id)
static const struct of_device_id compatible_machine_match[] = {
{ .compatible = "arm,vexpress,v2p-ca15_a7" },
- { .compatible = "samsung,exynos5420" },
- { .compatible = "samsung,exynos5800" },
+ { .compatible = "google,peach" },
{},
};
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig
index 3c16797b25b9..f2e17b0c4fa0 100644
--- a/drivers/crypto/keembay/Kconfig
+++ b/drivers/crypto/keembay/Kconfig
@@ -1,12 +1,13 @@
config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4
tristate "Support for Intel Keem Bay OCS AES/SM4 HW acceleration"
- depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on ARCH_KEEMBAY || COMPILE_TEST
select CRYPTO_SKCIPHER
select CRYPTO_AEAD
select CRYPTO_ENGINE
help
Support for Intel Keem Bay Offload and Crypto Subsystem (OCS) AES and
- SM4 cihper hardware acceleration for use with Crypto API.
+ SM4 cipher hardware acceleration for use with Crypto API.
Provides HW acceleration for the following transformations:
cbc(aes), ctr(aes), ccm(aes), gcm(aes), cbc(sm4), ctr(sm4), ccm(sm4)
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
index beb379b23dc3..846a3d90b41a 100644
--- a/drivers/crypto/qat/Kconfig
+++ b/drivers/crypto/qat/Kconfig
@@ -11,6 +11,7 @@ config CRYPTO_DEV_QAT
select CRYPTO_SHA1
select CRYPTO_SHA256
select CRYPTO_SHA512
+ select CRYPTO_AES
select FW_LOADER
config CRYPTO_DEV_QAT_DH895xCC
diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c
index 27513d311242..737b207c9e30 100644
--- a/drivers/dax/bus.c
+++ b/drivers/dax/bus.c
@@ -367,19 +367,28 @@ void kill_dev_dax(struct dev_dax *dev_dax)
}
EXPORT_SYMBOL_GPL(kill_dev_dax);
-static void free_dev_dax_ranges(struct dev_dax *dev_dax)
+static void trim_dev_dax_range(struct dev_dax *dev_dax)
{
+ int i = dev_dax->nr_range - 1;
+ struct range *range = &dev_dax->ranges[i].range;
struct dax_region *dax_region = dev_dax->region;
- int i;
device_lock_assert(dax_region->dev);
- for (i = 0; i < dev_dax->nr_range; i++) {
- struct range *range = &dev_dax->ranges[i].range;
-
- __release_region(&dax_region->res, range->start,
- range_len(range));
+ dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i,
+ (unsigned long long)range->start,
+ (unsigned long long)range->end);
+
+ __release_region(&dax_region->res, range->start, range_len(range));
+ if (--dev_dax->nr_range == 0) {
+ kfree(dev_dax->ranges);
+ dev_dax->ranges = NULL;
}
- dev_dax->nr_range = 0;
+}
+
+static void free_dev_dax_ranges(struct dev_dax *dev_dax)
+{
+ while (dev_dax->nr_range)
+ trim_dev_dax_range(dev_dax);
}
static void unregister_dev_dax(void *dev)
@@ -763,22 +772,14 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
return 0;
}
- ranges = krealloc(dev_dax->ranges, sizeof(*ranges)
- * (dev_dax->nr_range + 1), GFP_KERNEL);
- if (!ranges)
+ alloc = __request_region(res, start, size, dev_name(dev), 0);
+ if (!alloc)
return -ENOMEM;
- alloc = __request_region(res, start, size, dev_name(dev), 0);
- if (!alloc) {
- /*
- * If this was an empty set of ranges nothing else
- * will release @ranges, so do it now.
- */
- if (!dev_dax->nr_range) {
- kfree(ranges);
- ranges = NULL;
- }
- dev_dax->ranges = ranges;
+ ranges = krealloc(dev_dax->ranges, sizeof(*ranges)
+ * (dev_dax->nr_range + 1), GFP_KERNEL);
+ if (!ranges) {
+ __release_region(res, alloc->start, resource_size(alloc));
return -ENOMEM;
}
@@ -804,15 +805,10 @@ static int alloc_dev_dax_range(struct dev_dax *dev_dax, u64 start,
return 0;
rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1);
- if (rc) {
- dev_dbg(dev, "delete range[%d]: %pa:%pa\n", dev_dax->nr_range - 1,
- &alloc->start, &alloc->end);
- dev_dax->nr_range--;
- __release_region(res, alloc->start, resource_size(alloc));
- return rc;
- }
+ if (rc)
+ trim_dev_dax_range(dev_dax);
- return 0;
+ return rc;
}
static int adjust_dev_dax_range(struct dev_dax *dev_dax, struct resource *res, resource_size_t size)
@@ -885,12 +881,7 @@ static int dev_dax_shrink(struct dev_dax *dev_dax, resource_size_t size)
if (shrink >= range_len(range)) {
devm_release_action(dax_region->dev,
unregister_dax_mapping, &mapping->dev);
- __release_region(&dax_region->res, range->start,
- range_len(range));
- dev_dax->nr_range--;
- dev_dbg(dev, "delete range[%d]: %#llx:%#llx\n", i,
- (unsigned long long) range->start,
- (unsigned long long) range->end);
+ trim_dev_dax_range(dev_dax);
to_shrink -= shrink;
if (!to_shrink)
break;
@@ -1114,16 +1105,9 @@ static ssize_t align_show(struct device *dev,
static ssize_t dev_dax_validate_align(struct dev_dax *dev_dax)
{
- resource_size_t dev_size = dev_dax_size(dev_dax);
struct device *dev = &dev_dax->dev;
int i;
- if (dev_size > 0 && !alloc_is_aligned(dev_dax, dev_size)) {
- dev_dbg(dev, "%s: align %u invalid for size %pa\n",
- __func__, dev_dax->align, &dev_size);
- return -EINVAL;
- }
-
for (i = 0; i < dev_dax->nr_range; i++) {
size_t len = range_len(&dev_dax->ranges[i].range);
@@ -1274,7 +1258,6 @@ static void dev_dax_release(struct device *dev)
put_dax(dax_dev);
free_dev_dax_id(dev_dax);
dax_region_put(dax_region);
- kfree(dev_dax->ranges);
kfree(dev_dax->pgmap);
kfree(dev_dax);
}
diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
index 62b26bfceab1..062e8bc14223 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem/core.c
@@ -52,7 +52,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
/* adjust the dax_region range to the start of data */
range = pgmap.range;
- range.start += offset,
+ range.start += offset;
dax_region = alloc_dax_region(dev, region_id, &range,
nd_region->target_node, le32_to_cpu(pfn_sb->align),
IORESOURCE_DAX_STATIC);
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index edc279be3e59..cadbd0a1a1ef 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -752,6 +752,7 @@ err_chrdev:
static void __exit dax_core_exit(void)
{
+ dax_bus_exit();
unregister_chrdev_region(dax_devt, MINORMASK+1);
ida_destroy(&dax_minor_ida);
dax_fs_exit();
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 0eb80c1ecdab..9ad6397aaa97 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -76,10 +76,6 @@ static void dma_buf_release(struct dentry *dentry)
dmabuf->ops->release(dmabuf);
- mutex_lock(&db_list.lock);
- list_del(&dmabuf->list_node);
- mutex_unlock(&db_list.lock);
-
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
@@ -88,6 +84,22 @@ static void dma_buf_release(struct dentry *dentry)
kfree(dmabuf);
}
+static int dma_buf_file_release(struct inode *inode, struct file *file)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ mutex_lock(&db_list.lock);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&db_list.lock);
+
+ return 0;
+}
+
static const struct dentry_operations dma_buf_dentry_ops = {
.d_dname = dmabuffs_dname,
.d_release = dma_buf_release,
@@ -413,6 +425,7 @@ static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
}
static const struct file_operations dma_buf_fops = {
+ .release = dma_buf_file_release,
.mmap = dma_buf_mmap_internal,
.llseek = dma_buf_llseek,
.poll = dma_buf_poll,
@@ -1166,9 +1179,6 @@ EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
unsigned long pgoff)
{
- struct file *oldfile;
- int ret;
-
if (WARN_ON(!dmabuf || !vma))
return -EINVAL;
@@ -1186,22 +1196,10 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
return -EINVAL;
/* readjust the vma */
- get_file(dmabuf->file);
- oldfile = vma->vm_file;
- vma->vm_file = dmabuf->file;
+ vma_set_file(vma, dmabuf->file);
vma->vm_pgoff = pgoff;
- ret = dmabuf->ops->mmap(dmabuf, vma);
- if (ret) {
- /* restore old parameters on failure */
- vma->vm_file = oldfile;
- fput(dmabuf->file);
- } else {
- if (oldfile)
- fput(oldfile);
- }
- return ret;
-
+ return dmabuf->ops->mmap(dmabuf, vma);
}
EXPORT_SYMBOL_GPL(dma_buf_mmap);
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index bb5a42b10c29..6ddbeb5dfbf6 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -200,7 +200,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
max = max(old->shared_count + num_fences,
old->shared_max * 2);
} else {
- max = 4;
+ max = max(4ul, roundup_pow_of_two(num_fences));
}
new = dma_resv_list_alloc(max);
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
index 6e54cdec3da0..974467791032 100644
--- a/drivers/dma-buf/heaps/Makefile
+++ b/drivers/dma-buf/heaps/Makefile
@@ -1,4 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
-obj-y += heap-helpers.o
obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index e55384dc115b..3c4e34301172 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -2,76 +2,306 @@
/*
* DMABUF CMA heap exporter
*
- * Copyright (C) 2012, 2019 Linaro Ltd.
+ * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * Also utilizing parts of Andrew Davis' SRAM heap:
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*/
-
#include <linux/cma.h>
-#include <linux/device.h>
#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <linux/dma-map-ops.h>
#include <linux/err.h>
-#include <linux/errno.h>
#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/scatterlist.h>
-#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
-#include "heap-helpers.h"
struct cma_heap {
struct dma_heap *heap;
struct cma *cma;
};
-static void cma_heap_free(struct heap_helper_buffer *buffer)
+struct cma_heap_buffer {
+ struct cma_heap *heap;
+ struct list_head attachments;
+ struct mutex lock;
+ unsigned long len;
+ struct page *cma_pages;
+ struct page **pages;
+ pgoff_t pagecount;
+ int vmap_cnt;
+ void *vaddr;
+};
+
+struct dma_heap_attachment {
+ struct device *dev;
+ struct sg_table table;
+ struct list_head list;
+ bool mapped;
+};
+
+static int cma_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
{
- struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap);
- unsigned long nr_pages = buffer->pagecount;
- struct page *cma_pages = buffer->priv_virt;
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+ int ret;
- /* free page list */
- kfree(buffer->pages);
- /* release memory */
- cma_release(cma_heap->cma, cma_pages, nr_pages);
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
+ buffer->pagecount, 0,
+ buffer->pagecount << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(a);
+ return ret;
+ }
+
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->list);
+ a->mapped = false;
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void cma_heap_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a = attachment->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
+
+ sg_free_table(&a->table);
+ kfree(a);
+}
+
+static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_heap_attachment *a = attachment->priv;
+ struct sg_table *table = &a->table;
+ int ret;
+
+ ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
+ a->mapped = true;
+ return table;
+}
+
+static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ struct dma_heap_attachment *a = attachment->priv;
+
+ a->mapped = false;
+ dma_unmap_sgtable(attachment->dev, table, direction, 0);
+}
+
+static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ if (buffer->vmap_cnt)
+ invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+ mutex_lock(&buffer->lock);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ if (buffer->vmap_cnt)
+ flush_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+ mutex_lock(&buffer->lock);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_device(a->dev, &a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct cma_heap_buffer *buffer = vma->vm_private_data;
+
+ if (vmf->pgoff > buffer->pagecount)
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = buffer->pages[vmf->pgoff];
+ get_page(vmf->page);
+
+ return 0;
+}
+
+static const struct vm_operations_struct dma_heap_vm_ops = {
+ .fault = cma_heap_vm_fault,
+};
+
+static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+ return -EINVAL;
+
+ vma->vm_ops = &dma_heap_vm_ops;
+ vma->vm_private_data = buffer;
+
+ return 0;
+}
+
+static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
+{
+ void *vaddr;
+
+ vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
+ if (!vaddr)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+ int ret = 0;
+
+ mutex_lock(&buffer->lock);
+ if (buffer->vmap_cnt) {
+ buffer->vmap_cnt++;
+ dma_buf_map_set_vaddr(map, buffer->vaddr);
+ goto out;
+ }
+
+ vaddr = cma_heap_do_vmap(buffer);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto out;
+ }
+ buffer->vaddr = vaddr;
+ buffer->vmap_cnt++;
+ dma_buf_map_set_vaddr(map, buffer->vaddr);
+out:
+ mutex_unlock(&buffer->lock);
+
+ return ret;
+}
+
+static void cma_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ if (!--buffer->vmap_cnt) {
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
+ mutex_unlock(&buffer->lock);
+ dma_buf_map_clear(map);
+}
+
+static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct cma_heap *cma_heap = buffer->heap;
+
+ if (buffer->vmap_cnt > 0) {
+ WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
+
+ cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
kfree(buffer);
}
-/* dmabuf heap CMA operations functions */
+static const struct dma_buf_ops cma_heap_buf_ops = {
+ .attach = cma_heap_attach,
+ .detach = cma_heap_detach,
+ .map_dma_buf = cma_heap_map_dma_buf,
+ .unmap_dma_buf = cma_heap_unmap_dma_buf,
+ .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
+ .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
+ .mmap = cma_heap_mmap,
+ .vmap = cma_heap_vmap,
+ .vunmap = cma_heap_vunmap,
+ .release = cma_heap_dma_buf_release,
+};
+
static int cma_heap_allocate(struct dma_heap *heap,
- unsigned long len,
- unsigned long fd_flags,
- unsigned long heap_flags)
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
{
struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
- struct heap_helper_buffer *helper_buffer;
- struct page *cma_pages;
+ struct cma_heap_buffer *buffer;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
size_t size = PAGE_ALIGN(len);
- unsigned long nr_pages = size >> PAGE_SHIFT;
+ pgoff_t pagecount = size >> PAGE_SHIFT;
unsigned long align = get_order(size);
+ struct page *cma_pages;
struct dma_buf *dmabuf;
int ret = -ENOMEM;
pgoff_t pg;
- if (align > CONFIG_CMA_ALIGNMENT)
- align = CONFIG_CMA_ALIGNMENT;
-
- helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
- if (!helper_buffer)
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
return -ENOMEM;
- init_heap_helper_buffer(helper_buffer, cma_heap_free);
- helper_buffer->heap = heap;
- helper_buffer->size = len;
+ INIT_LIST_HEAD(&buffer->attachments);
+ mutex_init(&buffer->lock);
+ buffer->len = size;
+
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
- cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false);
+ cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
if (!cma_pages)
- goto free_buf;
+ goto free_buffer;
+ /* Clear the cma pages */
if (PageHighMem(cma_pages)) {
- unsigned long nr_clear_pages = nr_pages;
+ unsigned long nr_clear_pages = pagecount;
struct page *page = cma_pages;
while (nr_clear_pages > 0) {
@@ -85,7 +315,6 @@ static int cma_heap_allocate(struct dma_heap *heap,
*/
if (fatal_signal_pending(current))
goto free_cma;
-
page++;
nr_clear_pages--;
}
@@ -93,28 +322,30 @@ static int cma_heap_allocate(struct dma_heap *heap,
memset(page_address(cma_pages), 0, size);
}
- helper_buffer->pagecount = nr_pages;
- helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
- sizeof(*helper_buffer->pages),
- GFP_KERNEL);
- if (!helper_buffer->pages) {
+ buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
+ if (!buffer->pages) {
ret = -ENOMEM;
goto free_cma;
}
- for (pg = 0; pg < helper_buffer->pagecount; pg++)
- helper_buffer->pages[pg] = &cma_pages[pg];
+ for (pg = 0; pg < pagecount; pg++)
+ buffer->pages[pg] = &cma_pages[pg];
+
+ buffer->cma_pages = cma_pages;
+ buffer->heap = cma_heap;
+ buffer->pagecount = pagecount;
/* create the dmabuf */
- dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+ exp_info.ops = &cma_heap_buf_ops;
+ exp_info.size = buffer->len;
+ exp_info.flags = fd_flags;
+ exp_info.priv = buffer;
+ dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
ret = PTR_ERR(dmabuf);
goto free_pages;
}
- helper_buffer->dmabuf = dmabuf;
- helper_buffer->priv_virt = cma_pages;
-
ret = dma_buf_fd(dmabuf, fd_flags);
if (ret < 0) {
dma_buf_put(dmabuf);
@@ -125,11 +356,12 @@ static int cma_heap_allocate(struct dma_heap *heap,
return ret;
free_pages:
- kfree(helper_buffer->pages);
+ kfree(buffer->pages);
free_cma:
- cma_release(cma_heap->cma, cma_pages, nr_pages);
-free_buf:
- kfree(helper_buffer);
+ cma_release(cma_heap->cma, cma_pages, pagecount);
+free_buffer:
+ kfree(buffer);
+
return ret;
}
diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c
deleted file mode 100644
index fcf4ce3e2cbb..000000000000
--- a/drivers/dma-buf/heaps/heap-helpers.c
+++ /dev/null
@@ -1,274 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-#include <linux/device.h>
-#include <linux/dma-buf.h>
-#include <linux/err.h>
-#include <linux/highmem.h>
-#include <linux/idr.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <uapi/linux/dma-heap.h>
-
-#include "heap-helpers.h"
-
-void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
- void (*free)(struct heap_helper_buffer *))
-{
- buffer->priv_virt = NULL;
- mutex_init(&buffer->lock);
- buffer->vmap_cnt = 0;
- buffer->vaddr = NULL;
- buffer->pagecount = 0;
- buffer->pages = NULL;
- INIT_LIST_HEAD(&buffer->attachments);
- buffer->free = free;
-}
-
-struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
- int fd_flags)
-{
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
-
- exp_info.ops = &heap_helper_ops;
- exp_info.size = buffer->size;
- exp_info.flags = fd_flags;
- exp_info.priv = buffer;
-
- return dma_buf_export(&exp_info);
-}
-
-static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer)
-{
- void *vaddr;
-
- vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
- if (!vaddr)
- return ERR_PTR(-ENOMEM);
-
- return vaddr;
-}
-
-static void dma_heap_buffer_destroy(struct heap_helper_buffer *buffer)
-{
- if (buffer->vmap_cnt > 0) {
- WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
- vunmap(buffer->vaddr);
- }
-
- buffer->free(buffer);
-}
-
-static void *dma_heap_buffer_vmap_get(struct heap_helper_buffer *buffer)
-{
- void *vaddr;
-
- if (buffer->vmap_cnt) {
- buffer->vmap_cnt++;
- return buffer->vaddr;
- }
- vaddr = dma_heap_map_kernel(buffer);
- if (IS_ERR(vaddr))
- return vaddr;
- buffer->vaddr = vaddr;
- buffer->vmap_cnt++;
- return vaddr;
-}
-
-static void dma_heap_buffer_vmap_put(struct heap_helper_buffer *buffer)
-{
- if (!--buffer->vmap_cnt) {
- vunmap(buffer->vaddr);
- buffer->vaddr = NULL;
- }
-}
-
-struct dma_heaps_attachment {
- struct device *dev;
- struct sg_table table;
- struct list_head list;
-};
-
-static int dma_heap_attach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attachment)
-{
- struct dma_heaps_attachment *a;
- struct heap_helper_buffer *buffer = dmabuf->priv;
- int ret;
-
- a = kzalloc(sizeof(*a), GFP_KERNEL);
- if (!a)
- return -ENOMEM;
-
- ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
- buffer->pagecount, 0,
- buffer->pagecount << PAGE_SHIFT,
- GFP_KERNEL);
- if (ret) {
- kfree(a);
- return ret;
- }
-
- a->dev = attachment->dev;
- INIT_LIST_HEAD(&a->list);
-
- attachment->priv = a;
-
- mutex_lock(&buffer->lock);
- list_add(&a->list, &buffer->attachments);
- mutex_unlock(&buffer->lock);
-
- return 0;
-}
-
-static void dma_heap_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attachment)
-{
- struct dma_heaps_attachment *a = attachment->priv;
- struct heap_helper_buffer *buffer = dmabuf->priv;
-
- mutex_lock(&buffer->lock);
- list_del(&a->list);
- mutex_unlock(&buffer->lock);
-
- sg_free_table(&a->table);
- kfree(a);
-}
-
-static
-struct sg_table *dma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction direction)
-{
- struct dma_heaps_attachment *a = attachment->priv;
- struct sg_table *table = &a->table;
- int ret;
-
- ret = dma_map_sgtable(attachment->dev, table, direction, 0);
- if (ret)
- table = ERR_PTR(ret);
- return table;
-}
-
-static void dma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *table,
- enum dma_data_direction direction)
-{
- dma_unmap_sgtable(attachment->dev, table, direction, 0);
-}
-
-static vm_fault_t dma_heap_vm_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct heap_helper_buffer *buffer = vma->vm_private_data;
-
- if (vmf->pgoff > buffer->pagecount)
- return VM_FAULT_SIGBUS;
-
- vmf->page = buffer->pages[vmf->pgoff];
- get_page(vmf->page);
-
- return 0;
-}
-
-static const struct vm_operations_struct dma_heap_vm_ops = {
- .fault = dma_heap_vm_fault,
-};
-
-static int dma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
-{
- struct heap_helper_buffer *buffer = dmabuf->priv;
-
- if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
- return -EINVAL;
-
- vma->vm_ops = &dma_heap_vm_ops;
- vma->vm_private_data = buffer;
-
- return 0;
-}
-
-static void dma_heap_dma_buf_release(struct dma_buf *dmabuf)
-{
- struct heap_helper_buffer *buffer = dmabuf->priv;
-
- dma_heap_buffer_destroy(buffer);
-}
-
-static int dma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
- enum dma_data_direction direction)
-{
- struct heap_helper_buffer *buffer = dmabuf->priv;
- struct dma_heaps_attachment *a;
- int ret = 0;
-
- mutex_lock(&buffer->lock);
-
- if (buffer->vmap_cnt)
- invalidate_kernel_vmap_range(buffer->vaddr, buffer->size);
-
- list_for_each_entry(a, &buffer->attachments, list) {
- dma_sync_sg_for_cpu(a->dev, a->table.sgl, a->table.nents,
- direction);
- }
- mutex_unlock(&buffer->lock);
-
- return ret;
-}
-
-static int dma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
- enum dma_data_direction direction)
-{
- struct heap_helper_buffer *buffer = dmabuf->priv;
- struct dma_heaps_attachment *a;
-
- mutex_lock(&buffer->lock);
-
- if (buffer->vmap_cnt)
- flush_kernel_vmap_range(buffer->vaddr, buffer->size);
-
- list_for_each_entry(a, &buffer->attachments, list) {
- dma_sync_sg_for_device(a->dev, a->table.sgl, a->table.nents,
- direction);
- }
- mutex_unlock(&buffer->lock);
-
- return 0;
-}
-
-static int dma_heap_dma_buf_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
-{
- struct heap_helper_buffer *buffer = dmabuf->priv;
- void *vaddr;
-
- mutex_lock(&buffer->lock);
- vaddr = dma_heap_buffer_vmap_get(buffer);
- mutex_unlock(&buffer->lock);
-
- if (!vaddr)
- return -ENOMEM;
- dma_buf_map_set_vaddr(map, vaddr);
-
- return 0;
-}
-
-static void dma_heap_dma_buf_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
-{
- struct heap_helper_buffer *buffer = dmabuf->priv;
-
- mutex_lock(&buffer->lock);
- dma_heap_buffer_vmap_put(buffer);
- mutex_unlock(&buffer->lock);
-}
-
-const struct dma_buf_ops heap_helper_ops = {
- .map_dma_buf = dma_heap_map_dma_buf,
- .unmap_dma_buf = dma_heap_unmap_dma_buf,
- .mmap = dma_heap_mmap,
- .release = dma_heap_dma_buf_release,
- .attach = dma_heap_attach,
- .detach = dma_heap_detach,
- .begin_cpu_access = dma_heap_dma_buf_begin_cpu_access,
- .end_cpu_access = dma_heap_dma_buf_end_cpu_access,
- .vmap = dma_heap_dma_buf_vmap,
- .vunmap = dma_heap_dma_buf_vunmap,
-};
diff --git a/drivers/dma-buf/heaps/heap-helpers.h b/drivers/dma-buf/heaps/heap-helpers.h
deleted file mode 100644
index 805d2df88024..000000000000
--- a/drivers/dma-buf/heaps/heap-helpers.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * DMABUF Heaps helper code
- *
- * Copyright (C) 2011 Google, Inc.
- * Copyright (C) 2019 Linaro Ltd.
- */
-
-#ifndef _HEAP_HELPERS_H
-#define _HEAP_HELPERS_H
-
-#include <linux/dma-heap.h>
-#include <linux/list.h>
-
-/**
- * struct heap_helper_buffer - helper buffer metadata
- * @heap: back pointer to the heap the buffer came from
- * @dmabuf: backing dma-buf for this buffer
- * @size: size of the buffer
- * @priv_virt pointer to heap specific private value
- * @lock mutext to protect the data in this structure
- * @vmap_cnt count of vmap references on the buffer
- * @vaddr vmap'ed virtual address
- * @pagecount number of pages in the buffer
- * @pages list of page pointers
- * @attachments list of device attachments
- *
- * @free heap callback to free the buffer
- */
-struct heap_helper_buffer {
- struct dma_heap *heap;
- struct dma_buf *dmabuf;
- size_t size;
-
- void *priv_virt;
- struct mutex lock;
- int vmap_cnt;
- void *vaddr;
- pgoff_t pagecount;
- struct page **pages;
- struct list_head attachments;
-
- void (*free)(struct heap_helper_buffer *buffer);
-};
-
-void init_heap_helper_buffer(struct heap_helper_buffer *buffer,
- void (*free)(struct heap_helper_buffer *));
-
-struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
- int fd_flags);
-
-extern const struct dma_buf_ops heap_helper_ops;
-#endif /* _HEAP_HELPERS_H */
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 0bf688e3c023..17e0e9a68baf 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -3,7 +3,11 @@
* DMABUF System heap exporter
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (C) 2019 Linaro Ltd.
+ * Copyright (C) 2019, 2020 Linaro Ltd.
+ *
+ * Portions based off of Andrew Davis' SRAM heap:
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
*/
#include <linux/dma-buf.h>
@@ -15,87 +19,404 @@
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
-#include <linux/sched/signal.h>
-#include <asm/page.h>
+#include <linux/vmalloc.h>
+
+static struct dma_heap *sys_heap;
+
+struct system_heap_buffer {
+ struct dma_heap *heap;
+ struct list_head attachments;
+ struct mutex lock;
+ unsigned long len;
+ struct sg_table sg_table;
+ int vmap_cnt;
+ void *vaddr;
+};
+
+struct dma_heap_attachment {
+ struct device *dev;
+ struct sg_table *table;
+ struct list_head list;
+ bool mapped;
+};
+
+#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
+ | __GFP_NORETRY) & ~__GFP_RECLAIM) \
+ | __GFP_COMP)
+#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
+static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP};
+/*
+ * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
+ * to match with the sizes often found in IOMMUs. Using order 4 pages instead
+ * of order 0 pages can significantly improve the performance of many IOMMUs
+ * by reducing TLB pressure and time spent updating page tables.
+ */
+static const unsigned int orders[] = {8, 4, 0};
+#define NUM_ORDERS ARRAY_SIZE(orders)
+
+static struct sg_table *dup_sg_table(struct sg_table *table)
+{
+ struct sg_table *new_table;
+ int ret, i;
+ struct scatterlist *sg, *new_sg;
+
+ new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
+ if (!new_table)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(new_table);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ new_sg = new_table->sgl;
+ for_each_sgtable_sg(table, sg, i) {
+ sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
+ new_sg = sg_next(new_sg);
+ }
+
+ return new_table;
+}
+
+static int system_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+ struct sg_table *table;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ table = dup_sg_table(&buffer->sg_table);
+ if (IS_ERR(table)) {
+ kfree(a);
+ return -ENOMEM;
+ }
+
+ a->table = table;
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->list);
+ a->mapped = false;
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void system_heap_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a = attachment->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
+
+ sg_free_table(a->table);
+ kfree(a->table);
+ kfree(a);
+}
+
+static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_heap_attachment *a = attachment->priv;
+ struct sg_table *table = a->table;
+ int ret;
+
+ ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ a->mapped = true;
+ return table;
+}
+
+static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ struct dma_heap_attachment *a = attachment->priv;
+
+ a->mapped = false;
+ dma_unmap_sgtable(attachment->dev, table, direction, 0);
+}
+
+static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ flush_kernel_vmap_range(buffer->vaddr, buffer->len);
-#include "heap-helpers.h"
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_device(a->dev, a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct sg_table *table = &buffer->sg_table;
+ unsigned long addr = vma->vm_start;
+ struct sg_page_iter piter;
+ int ret;
+
+ for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
+ struct page *page = sg_page_iter_page(&piter);
+
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
+ vma->vm_page_prot);
+ if (ret)
+ return ret;
+ addr += PAGE_SIZE;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ return 0;
+}
+
+static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
+{
+ struct sg_table *table = &buffer->sg_table;
+ int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
+ struct page **pages = vmalloc(sizeof(struct page *) * npages);
+ struct page **tmp = pages;
+ struct sg_page_iter piter;
+ void *vaddr;
+
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_sgtable_page(table, &piter, 0) {
+ WARN_ON(tmp - pages >= npages);
+ *tmp++ = sg_page_iter_page(&piter);
+ }
+
+ vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
+ vfree(pages);
+
+ if (!vaddr)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+static int system_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+ int ret = 0;
+
+ mutex_lock(&buffer->lock);
+ if (buffer->vmap_cnt) {
+ buffer->vmap_cnt++;
+ dma_buf_map_set_vaddr(map, buffer->vaddr);
+ goto out;
+ }
+
+ vaddr = system_heap_do_vmap(buffer);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto out;
+ }
+
+ buffer->vaddr = vaddr;
+ buffer->vmap_cnt++;
+ dma_buf_map_set_vaddr(map, buffer->vaddr);
+out:
+ mutex_unlock(&buffer->lock);
+
+ return ret;
+}
-struct dma_heap *sys_heap;
+static void system_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
-static void system_heap_free(struct heap_helper_buffer *buffer)
+ mutex_lock(&buffer->lock);
+ if (!--buffer->vmap_cnt) {
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
+ mutex_unlock(&buffer->lock);
+ dma_buf_map_clear(map);
+}
+
+static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
{
- pgoff_t pg;
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i;
+
+ table = &buffer->sg_table;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ struct page *page = sg_page(sg);
- for (pg = 0; pg < buffer->pagecount; pg++)
- __free_page(buffer->pages[pg]);
- kfree(buffer->pages);
+ __free_pages(page, compound_order(page));
+ }
+ sg_free_table(table);
kfree(buffer);
}
+static const struct dma_buf_ops system_heap_buf_ops = {
+ .attach = system_heap_attach,
+ .detach = system_heap_detach,
+ .map_dma_buf = system_heap_map_dma_buf,
+ .unmap_dma_buf = system_heap_unmap_dma_buf,
+ .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
+ .end_cpu_access = system_heap_dma_buf_end_cpu_access,
+ .mmap = system_heap_mmap,
+ .vmap = system_heap_vmap,
+ .vunmap = system_heap_vunmap,
+ .release = system_heap_dma_buf_release,
+};
+
+static struct page *alloc_largest_available(unsigned long size,
+ unsigned int max_order)
+{
+ struct page *page;
+ int i;
+
+ for (i = 0; i < NUM_ORDERS; i++) {
+ if (size < (PAGE_SIZE << orders[i]))
+ continue;
+ if (max_order < orders[i])
+ continue;
+
+ page = alloc_pages(order_flags[i], orders[i]);
+ if (!page)
+ continue;
+ return page;
+ }
+ return NULL;
+}
+
static int system_heap_allocate(struct dma_heap *heap,
unsigned long len,
unsigned long fd_flags,
unsigned long heap_flags)
{
- struct heap_helper_buffer *helper_buffer;
+ struct system_heap_buffer *buffer;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ unsigned long size_remaining = len;
+ unsigned int max_order = orders[0];
struct dma_buf *dmabuf;
- int ret = -ENOMEM;
- pgoff_t pg;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ struct list_head pages;
+ struct page *page, *tmp_page;
+ int i, ret = -ENOMEM;
- helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
- if (!helper_buffer)
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
return -ENOMEM;
- init_heap_helper_buffer(helper_buffer, system_heap_free);
- helper_buffer->heap = heap;
- helper_buffer->size = len;
-
- helper_buffer->pagecount = len / PAGE_SIZE;
- helper_buffer->pages = kmalloc_array(helper_buffer->pagecount,
- sizeof(*helper_buffer->pages),
- GFP_KERNEL);
- if (!helper_buffer->pages) {
- ret = -ENOMEM;
- goto err0;
- }
+ INIT_LIST_HEAD(&buffer->attachments);
+ mutex_init(&buffer->lock);
+ buffer->heap = heap;
+ buffer->len = len;
- for (pg = 0; pg < helper_buffer->pagecount; pg++) {
+ INIT_LIST_HEAD(&pages);
+ i = 0;
+ while (size_remaining > 0) {
/*
* Avoid trying to allocate memory if the process
- * has been killed by by SIGKILL
+ * has been killed by SIGKILL
*/
if (fatal_signal_pending(current))
- goto err1;
+ goto free_buffer;
+
+ page = alloc_largest_available(size_remaining, max_order);
+ if (!page)
+ goto free_buffer;
+
+ list_add_tail(&page->lru, &pages);
+ size_remaining -= page_size(page);
+ max_order = compound_order(page);
+ i++;
+ }
+
+ table = &buffer->sg_table;
+ if (sg_alloc_table(table, i, GFP_KERNEL))
+ goto free_buffer;
- helper_buffer->pages[pg] = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!helper_buffer->pages[pg])
- goto err1;
+ sg = table->sgl;
+ list_for_each_entry_safe(page, tmp_page, &pages, lru) {
+ sg_set_page(sg, page, page_size(page), 0);
+ sg = sg_next(sg);
+ list_del(&page->lru);
}
/* create the dmabuf */
- dmabuf = heap_helper_export_dmabuf(helper_buffer, fd_flags);
+ exp_info.ops = &system_heap_buf_ops;
+ exp_info.size = buffer->len;
+ exp_info.flags = fd_flags;
+ exp_info.priv = buffer;
+ dmabuf = dma_buf_export(&exp_info);
if (IS_ERR(dmabuf)) {
ret = PTR_ERR(dmabuf);
- goto err1;
+ goto free_pages;
}
- helper_buffer->dmabuf = dmabuf;
-
ret = dma_buf_fd(dmabuf, fd_flags);
if (ret < 0) {
dma_buf_put(dmabuf);
/* just return, as put will call release and that will free */
return ret;
}
-
return ret;
-err1:
- while (pg > 0)
- __free_page(helper_buffer->pages[--pg]);
- kfree(helper_buffer->pages);
-err0:
- kfree(helper_buffer);
+free_pages:
+ for_each_sgtable_sg(table, sg, i) {
+ struct page *p = sg_page(sg);
+
+ __free_pages(p, compound_order(p));
+ }
+ sg_free_table(table);
+free_buffer:
+ list_for_each_entry_safe(page, tmp_page, &pages, lru)
+ __free_pages(page, compound_order(page));
+ kfree(buffer);
return ret;
}
@@ -107,7 +428,6 @@ static const struct dma_heap_ops system_heap_ops = {
static int system_heap_create(void)
{
struct dma_heap_export_info exp_info;
- int ret = 0;
exp_info.name = "system";
exp_info.ops = &system_heap_ops;
@@ -115,9 +435,9 @@ static int system_heap_create(void)
sys_heap = dma_heap_add(&exp_info);
if (IS_ERR(sys_heap))
- ret = PTR_ERR(sys_heap);
+ return PTR_ERR(sys_heap);
- return ret;
+ return 0;
}
module_init(system_heap_create);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 90284ffda58a..d242c7632621 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -296,6 +296,16 @@ config INTEL_IDXD
If unsure, say N.
+# Config symbol that collects all the dependencies that's necessary to
+# support shared virtual memory for the devices supported by idxd.
+config INTEL_IDXD_SVM
+ bool "Accelerator Shared Virtual Memory Support"
+ depends on INTEL_IDXD
+ depends on INTEL_IOMMU_SVM
+ depends on PCI_PRI
+ depends on PCI_PASID
+ depends on PCI_IOV
+
config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86_64
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 3b53115db268..fe45ad5d06c4 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -30,7 +30,24 @@
#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
+#define AT_XDMAC_WRHP(i) (((i) & 0xF) << 4)
+#define AT_XDMAC_WRMP(i) (((i) & 0xF) << 8)
+#define AT_XDMAC_WRLP(i) (((i) & 0xF) << 12)
+#define AT_XDMAC_RDHP(i) (((i) & 0xF) << 16)
+#define AT_XDMAC_RDMP(i) (((i) & 0xF) << 20)
+#define AT_XDMAC_RDLP(i) (((i) & 0xF) << 24)
+#define AT_XDMAC_RDSG(i) (((i) & 0xF) << 28)
+#define AT_XDMAC_GCFG_M2M (AT_XDMAC_RDLP(0xF) | AT_XDMAC_WRLP(0xF))
+#define AT_XDMAC_GCFG_P2M (AT_XDMAC_RDSG(0x1) | AT_XDMAC_RDHP(0x3) | \
+ AT_XDMAC_WRHP(0x5))
#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
+#define AT_XDMAC_PW0(i) (((i) & 0xF) << 0)
+#define AT_XDMAC_PW1(i) (((i) & 0xF) << 4)
+#define AT_XDMAC_PW2(i) (((i) & 0xF) << 8)
+#define AT_XDMAC_PW3(i) (((i) & 0xF) << 12)
+#define AT_XDMAC_GWAC_M2M 0
+#define AT_XDMAC_GWAC_P2M (AT_XDMAC_PW0(0xF) | AT_XDMAC_PW2(0xF))
+
#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
@@ -38,13 +55,6 @@
#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
-#define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
-#define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */
-#define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
-#define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
-#define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
-#define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
-#define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
/* Channel relative registers offsets */
@@ -150,8 +160,6 @@
#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
-#define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
-
/* Microblock control members */
#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
@@ -179,6 +187,29 @@ enum atc_status {
AT_XDMAC_CHAN_IS_PAUSED,
};
+struct at_xdmac_layout {
+ /* Global Channel Read Suspend Register */
+ u8 grs;
+ /* Global Write Suspend Register */
+ u8 gws;
+ /* Global Channel Read Write Suspend Register */
+ u8 grws;
+ /* Global Channel Read Write Resume Register */
+ u8 grwr;
+ /* Global Channel Software Request Register */
+ u8 gswr;
+ /* Global channel Software Request Status Register */
+ u8 gsws;
+ /* Global Channel Software Flush Request Register */
+ u8 gswf;
+ /* Channel reg base */
+ u8 chan_cc_reg_base;
+ /* Source/Destination Interface must be specified or not */
+ bool sdif;
+ /* AXI queue priority configuration supported */
+ bool axi_config;
+};
+
/* ----- Channels ----- */
struct at_xdmac_chan {
struct dma_chan chan;
@@ -212,6 +243,7 @@ struct at_xdmac {
struct clk *clk;
u32 save_gim;
struct dma_pool *at_xdmac_desc_pool;
+ const struct at_xdmac_layout *layout;
struct at_xdmac_chan chan[];
};
@@ -244,9 +276,35 @@ struct at_xdmac_desc {
struct list_head xfer_node;
} __aligned(sizeof(u64));
+static const struct at_xdmac_layout at_xdmac_sama5d4_layout = {
+ .grs = 0x28,
+ .gws = 0x2C,
+ .grws = 0x30,
+ .grwr = 0x34,
+ .gswr = 0x38,
+ .gsws = 0x3C,
+ .gswf = 0x40,
+ .chan_cc_reg_base = 0x50,
+ .sdif = true,
+ .axi_config = false,
+};
+
+static const struct at_xdmac_layout at_xdmac_sama7g5_layout = {
+ .grs = 0x30,
+ .gws = 0x38,
+ .grws = 0x40,
+ .grwr = 0x44,
+ .gswr = 0x48,
+ .gsws = 0x4C,
+ .gswf = 0x50,
+ .chan_cc_reg_base = 0x60,
+ .sdif = false,
+ .axi_config = true,
+};
+
static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
{
- return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
+ return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40);
}
#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
@@ -345,8 +403,10 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
first->active_xfer = true;
/* Tell xdmac where to get the first descriptor. */
- reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
- | AT_XDMAC_CNDA_NDAIF(atchan->memif);
+ reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys);
+ if (atxdmac->layout->sdif)
+ reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
+
at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
/*
@@ -541,6 +601,7 @@ static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
enum dma_transfer_direction direction)
{
struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
+ struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
int csize, dwidth;
if (direction == DMA_DEV_TO_MEM) {
@@ -548,12 +609,14 @@ static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
AT91_XDMAC_DT_PERID(atchan->perid)
| AT_XDMAC_CC_DAM_INCREMENTED_AM
| AT_XDMAC_CC_SAM_FIXED_AM
- | AT_XDMAC_CC_DIF(atchan->memif)
- | AT_XDMAC_CC_SIF(atchan->perif)
| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
| AT_XDMAC_CC_DSYNC_PER2MEM
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_TYPE_PER_TRAN;
+ if (atxdmac->layout->sdif)
+ atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) |
+ AT_XDMAC_CC_SIF(atchan->perif);
+
csize = ffs(atchan->sconfig.src_maxburst) - 1;
if (csize < 0) {
dev_err(chan2dev(chan), "invalid src maxburst value\n");
@@ -571,12 +634,14 @@ static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
AT91_XDMAC_DT_PERID(atchan->perid)
| AT_XDMAC_CC_DAM_FIXED_AM
| AT_XDMAC_CC_SAM_INCREMENTED_AM
- | AT_XDMAC_CC_DIF(atchan->perif)
- | AT_XDMAC_CC_SIF(atchan->memif)
| AT_XDMAC_CC_SWREQ_HWR_CONNECTED
| AT_XDMAC_CC_DSYNC_MEM2PER
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_TYPE_PER_TRAN;
+ if (atxdmac->layout->sdif)
+ atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) |
+ AT_XDMAC_CC_SIF(atchan->memif);
+
csize = ffs(atchan->sconfig.dst_maxburst) - 1;
if (csize < 0) {
dev_err(chan2dev(chan), "invalid src maxburst value\n");
@@ -866,10 +931,12 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
* ERRATA: Even if useless for memory transfers, the PERID has to not
* match the one of another channel. If not, it could lead to spurious
* flag status.
+ * For SAMA7G5x case, the SIF and DIF fields are no longer used.
+ * Thus, no need to have the SIF/DIF interfaces here.
+ * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
+ * zero.
*/
- u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
- | AT_XDMAC_CC_DIF(0)
- | AT_XDMAC_CC_SIF(0)
+ u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_TYPE_MEM_TRAN;
@@ -1048,12 +1115,14 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
* ERRATA: Even if useless for memory transfers, the PERID has to not
* match the one of another channel. If not, it could lead to spurious
* flag status.
+ * For SAMA7G5x case, the SIF and DIF fields are no longer used.
+ * Thus, no need to have the SIF/DIF interfaces here.
+ * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
+ * zero.
*/
- u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
+ u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
| AT_XDMAC_CC_DAM_INCREMENTED_AM
| AT_XDMAC_CC_SAM_INCREMENTED_AM
- | AT_XDMAC_CC_DIF(0)
- | AT_XDMAC_CC_SIF(0)
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_TYPE_MEM_TRAN;
unsigned long irqflags;
@@ -1154,12 +1223,14 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
* ERRATA: Even if useless for memory transfers, the PERID has to not
* match the one of another channel. If not, it could lead to spurious
* flag status.
+ * For SAMA7G5x case, the SIF and DIF fields are no longer used.
+ * Thus, no need to have the SIF/DIF interfaces here.
+ * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
+ * zero.
*/
- u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
+ u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
| AT_XDMAC_CC_DAM_UBS_AM
| AT_XDMAC_CC_SAM_INCREMENTED_AM
- | AT_XDMAC_CC_DIF(0)
- | AT_XDMAC_CC_SIF(0)
| AT_XDMAC_CC_MBSIZE_SIXTEEN
| AT_XDMAC_CC_MEMSET_HW_MODE
| AT_XDMAC_CC_TYPE_MEM_TRAN;
@@ -1438,7 +1509,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
if ((desc->lld.mbr_cfg & mask) == value) {
- at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+ at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
cpu_relax();
}
@@ -1496,7 +1567,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
* FIFO flush ensures that data are really written.
*/
if ((desc->lld.mbr_cfg & mask) == value) {
- at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+ at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
cpu_relax();
}
@@ -1761,7 +1832,7 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
return 0;
spin_lock_irqsave(&atchan->lock, flags);
- at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
+ at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
& (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
cpu_relax();
@@ -1784,7 +1855,7 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
return 0;
}
- at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
+ at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags);
@@ -1947,6 +2018,30 @@ static int atmel_xdmac_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
+static void at_xdmac_axi_config(struct platform_device *pdev)
+{
+ struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
+ bool dev_m2m = false;
+ u32 dma_requests;
+
+ if (!atxdmac->layout->axi_config)
+ return; /* Not supported */
+
+ if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
+ &dma_requests)) {
+ dev_info(&pdev->dev, "controller in mem2mem mode.\n");
+ dev_m2m = true;
+ }
+
+ if (dev_m2m) {
+ at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
+ at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
+ } else {
+ at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
+ at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
+ }
+}
+
static int at_xdmac_probe(struct platform_device *pdev)
{
struct at_xdmac *atxdmac;
@@ -1986,6 +2081,10 @@ static int at_xdmac_probe(struct platform_device *pdev)
atxdmac->regs = base;
atxdmac->irq = irq;
+ atxdmac->layout = of_device_get_match_data(&pdev->dev);
+ if (!atxdmac->layout)
+ return -ENODEV;
+
atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
if (IS_ERR(atxdmac->clk)) {
dev_err(&pdev->dev, "can't get dma_clk\n");
@@ -2087,6 +2186,8 @@ static int at_xdmac_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
nr_channels, atxdmac->regs);
+ at_xdmac_axi_config(pdev);
+
return 0;
err_dma_unregister:
@@ -2128,6 +2229,10 @@ static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
static const struct of_device_id atmel_xdmac_dt_ids[] = {
{
.compatible = "atmel,sama5d4-dma",
+ .data = &at_xdmac_sama5d4_layout,
+ }, {
+ .compatible = "microchip,sama7g5-dma",
+ .data = &at_xdmac_sama7g5_layout,
}, {
/* sentinel */
}
diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c
index a608efaa435f..612d353648cf 100644
--- a/drivers/dma/dma-jz4780.c
+++ b/drivers/dma/dma-jz4780.c
@@ -1044,7 +1044,7 @@ static struct platform_driver jz4780_dma_driver = {
.remove = jz4780_dma_remove,
.driver = {
.name = "jz4780-dma",
- .of_match_table = of_match_ptr(jz4780_dma_dt_match),
+ .of_match_table = jz4780_dma_dt_match,
},
};
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index a3a172173e34..f696246f57fd 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -573,6 +573,7 @@ static int dmatest_func(void *data)
struct dmatest_params *params;
struct dma_chan *chan;
struct dma_device *dev;
+ struct device *dma_dev;
unsigned int error_count;
unsigned int failed_tests = 0;
unsigned int total_tests = 0;
@@ -606,6 +607,8 @@ static int dmatest_func(void *data)
params = &info->params;
chan = thread->chan;
dev = chan->device;
+ dma_dev = dmaengine_get_dma_device(chan);
+
src = &thread->src;
dst = &thread->dst;
if (thread->type == DMA_MEMCPY) {
@@ -730,7 +733,7 @@ static int dmatest_func(void *data)
filltime = ktime_add(filltime, diff);
}
- um = dmaengine_get_unmap_data(dev->dev, src->cnt + dst->cnt,
+ um = dmaengine_get_unmap_data(dma_dev, src->cnt + dst->cnt,
GFP_KERNEL);
if (!um) {
failed_tests++;
@@ -745,10 +748,10 @@ static int dmatest_func(void *data)
struct page *pg = virt_to_page(buf);
unsigned long pg_off = offset_in_page(buf);
- um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
+ um->addr[i] = dma_map_page(dma_dev, pg, pg_off,
um->len, DMA_TO_DEVICE);
srcs[i] = um->addr[i] + src->off;
- ret = dma_mapping_error(dev->dev, um->addr[i]);
+ ret = dma_mapping_error(dma_dev, um->addr[i]);
if (ret) {
result("src mapping error", total_tests,
src->off, dst->off, len, ret);
@@ -763,9 +766,9 @@ static int dmatest_func(void *data)
struct page *pg = virt_to_page(buf);
unsigned long pg_off = offset_in_page(buf);
- dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
+ dsts[i] = dma_map_page(dma_dev, pg, pg_off, um->len,
DMA_BIDIRECTIONAL);
- ret = dma_mapping_error(dev->dev, dsts[i]);
+ ret = dma_mapping_error(dma_dev, dsts[i]);
if (ret) {
result("dst mapping error", total_tests,
src->off, dst->off, len, ret);
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
index 14c1ac26f866..e164f3295f5d 100644
--- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -992,7 +992,7 @@ static struct platform_driver dw_driver = {
.remove = dw_remove,
.driver = {
.name = KBUILD_MODNAME,
- .of_match_table = of_match_ptr(dw_dma_of_id_table),
+ .of_match_table = dw_dma_of_id_table,
.pm = &dw_axi_dma_pm_ops,
},
};
diff --git a/drivers/dma/dw-edma/dw-edma-core.c b/drivers/dma/dw-edma/dw-edma-core.c
index b971505b8715..08d71dafa001 100644
--- a/drivers/dma/dw-edma/dw-edma-core.c
+++ b/drivers/dma/dw-edma/dw-edma-core.c
@@ -86,12 +86,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
if (desc->chunk) {
/* Create and add new element into the linked list */
- desc->chunks_alloc++;
- list_add_tail(&chunk->list, &desc->chunk->list);
if (!dw_edma_alloc_burst(chunk)) {
kfree(chunk);
return NULL;
}
+ desc->chunks_alloc++;
+ list_add_tail(&chunk->list, &desc->chunk->list);
} else {
/* List head */
chunk->burst = NULL;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 7ab83fe601ed..19a23767533a 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -982,8 +982,11 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dev_vdbg(chan2dev(chan), "%s\n", __func__);
+ pm_runtime_get_sync(dw->dma.dev);
+
/* ASSERT: channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
+ pm_runtime_put_sync_suspend(dw->dma.dev);
dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
return -EIO;
}
@@ -1000,6 +1003,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
* We need controller-specific data to set up slave transfers.
*/
if (chan->private && !dw_dma_filter(chan, chan->private)) {
+ pm_runtime_put_sync_suspend(dw->dma.dev);
dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
return -EINVAL;
}
@@ -1043,6 +1047,8 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
if (!dw->in_use)
do_dw_dma_off(dw);
+ pm_runtime_put_sync_suspend(dw->dma.dev);
+
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c
index e1a958ae7925..a259ee010e9b 100644
--- a/drivers/dma/hisi_dma.c
+++ b/drivers/dma/hisi_dma.c
@@ -431,9 +431,8 @@ static irqreturn_t hisi_dma_irq(int irq, void *data)
struct hisi_dma_dev *hdma_dev = chan->hdma_dev;
struct hisi_dma_desc *desc;
struct hisi_dma_cqe *cqe;
- unsigned long flags;
- spin_lock_irqsave(&chan->vc.lock, flags);
+ spin_lock(&chan->vc.lock);
desc = chan->desc;
cqe = chan->cq + chan->cq_head;
@@ -452,7 +451,7 @@ static irqreturn_t hisi_dma_irq(int irq, void *data)
chan->desc = NULL;
}
- spin_unlock_irqrestore(&chan->vc.lock, flags);
+ spin_unlock(&chan->vc.lock);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index f5a84c846394..f4c07ad3be15 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -667,9 +667,7 @@ static int idma64_platform_remove(struct platform_device *pdev)
return idma64_remove(chip);
}
-#ifdef CONFIG_PM_SLEEP
-
-static int idma64_pm_suspend(struct device *dev)
+static int __maybe_unused idma64_pm_suspend(struct device *dev)
{
struct idma64_chip *chip = dev_get_drvdata(dev);
@@ -677,7 +675,7 @@ static int idma64_pm_suspend(struct device *dev)
return 0;
}
-static int idma64_pm_resume(struct device *dev)
+static int __maybe_unused idma64_pm_resume(struct device *dev)
{
struct idma64_chip *chip = dev_get_drvdata(dev);
@@ -685,8 +683,6 @@ static int idma64_pm_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM_SLEEP */
-
static const struct dev_pm_ops idma64_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(idma64_pm_suspend, idma64_pm_resume)
};
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index c3976156db2f..0db9b82ed8cf 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -11,6 +11,7 @@
#include <linux/cdev.h>
#include <linux/fs.h>
#include <linux/poll.h>
+#include <linux/iommu.h>
#include <uapi/linux/idxd.h>
#include "registers.h"
#include "idxd.h"
@@ -27,12 +28,15 @@ struct idxd_cdev_context {
*/
static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
{ .name = "dsa" },
+ { .name = "iax" }
};
struct idxd_user_context {
struct idxd_wq *wq;
struct task_struct *task;
+ unsigned int pasid;
unsigned int flags;
+ struct iommu_sva *sva;
};
enum idxd_cdev_cleanup {
@@ -75,6 +79,8 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
struct idxd_wq *wq;
struct device *dev;
int rc = 0;
+ struct iommu_sva *sva;
+ unsigned int pasid;
wq = inode_wq(inode);
idxd = wq->idxd;
@@ -95,6 +101,34 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
ctx->wq = wq;
filp->private_data = ctx;
+
+ if (device_pasid_enabled(idxd)) {
+ sva = iommu_sva_bind_device(dev, current->mm, NULL);
+ if (IS_ERR(sva)) {
+ rc = PTR_ERR(sva);
+ dev_err(dev, "pasid allocation failed: %d\n", rc);
+ goto failed;
+ }
+
+ pasid = iommu_sva_get_pasid(sva);
+ if (pasid == IOMMU_PASID_INVALID) {
+ iommu_sva_unbind_device(sva);
+ goto failed;
+ }
+
+ ctx->sva = sva;
+ ctx->pasid = pasid;
+
+ if (wq_dedicated(wq)) {
+ rc = idxd_wq_set_pasid(wq, pasid);
+ if (rc < 0) {
+ iommu_sva_unbind_device(sva);
+ dev_err(dev, "wq set pasid failed: %d\n", rc);
+ goto failed;
+ }
+ }
+ }
+
idxd_wq_get(wq);
mutex_unlock(&wq->wq_lock);
return 0;
@@ -111,13 +145,27 @@ static int idxd_cdev_release(struct inode *node, struct file *filep)
struct idxd_wq *wq = ctx->wq;
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
+ int rc;
dev_dbg(dev, "%s called\n", __func__);
filep->private_data = NULL;
/* Wait for in-flight operations to complete. */
- idxd_wq_drain(wq);
+ if (wq_shared(wq)) {
+ idxd_device_drain_pasid(idxd, ctx->pasid);
+ } else {
+ if (device_pasid_enabled(idxd)) {
+ /* The wq disable in the disable pasid function will drain the wq */
+ rc = idxd_wq_disable_pasid(wq);
+ if (rc < 0)
+ dev_err(dev, "wq disable pasid failed.\n");
+ } else {
+ idxd_wq_drain(wq);
+ }
+ }
+ if (ctx->sva)
+ iommu_sva_unbind_device(ctx->sva);
kfree(ctx);
mutex_lock(&wq->wq_lock);
idxd_wq_put(wq);
diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index 663344987e3f..95f94a3ed6be 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -131,6 +131,8 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
struct idxd_device *idxd = wq->idxd;
struct device *dev = &idxd->pdev->dev;
int rc, num_descs, i;
+ int align;
+ u64 tmp;
if (wq->type != IDXD_WQT_KERNEL)
return 0;
@@ -142,14 +144,27 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
if (rc < 0)
return rc;
- wq->compls_size = num_descs * sizeof(struct dsa_completion_record);
- wq->compls = dma_alloc_coherent(dev, wq->compls_size,
- &wq->compls_addr, GFP_KERNEL);
- if (!wq->compls) {
+ if (idxd->type == IDXD_TYPE_DSA)
+ align = 32;
+ else if (idxd->type == IDXD_TYPE_IAX)
+ align = 64;
+ else
+ return -ENODEV;
+
+ wq->compls_size = num_descs * idxd->compl_size + align;
+ wq->compls_raw = dma_alloc_coherent(dev, wq->compls_size,
+ &wq->compls_addr_raw, GFP_KERNEL);
+ if (!wq->compls_raw) {
rc = -ENOMEM;
goto fail_alloc_compls;
}
+ /* Adjust alignment */
+ wq->compls_addr = (wq->compls_addr_raw + (align - 1)) & ~(align - 1);
+ tmp = (u64)wq->compls_raw;
+ tmp = (tmp + (align - 1)) & ~(align - 1);
+ wq->compls = (struct dsa_completion_record *)tmp;
+
rc = alloc_descs(wq, num_descs);
if (rc < 0)
goto fail_alloc_descs;
@@ -163,9 +178,11 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
struct idxd_desc *desc = wq->descs[i];
desc->hw = wq->hw_descs[i];
- desc->completion = &wq->compls[i];
- desc->compl_dma = wq->compls_addr +
- sizeof(struct dsa_completion_record) * i;
+ if (idxd->type == IDXD_TYPE_DSA)
+ desc->completion = &wq->compls[i];
+ else if (idxd->type == IDXD_TYPE_IAX)
+ desc->iax_completion = &wq->iax_compls[i];
+ desc->compl_dma = wq->compls_addr + idxd->compl_size * i;
desc->id = i;
desc->wq = wq;
desc->cpu = -1;
@@ -178,7 +195,8 @@ int idxd_wq_alloc_resources(struct idxd_wq *wq)
fail_sbitmap_init:
free_descs(wq);
fail_alloc_descs:
- dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
+ wq->compls_addr_raw);
fail_alloc_compls:
free_hw_descs(wq);
return rc;
@@ -193,7 +211,8 @@ void idxd_wq_free_resources(struct idxd_wq *wq)
free_hw_descs(wq);
free_descs(wq);
- dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr);
+ dma_free_coherent(dev, wq->compls_size, wq->compls_raw,
+ wq->compls_addr_raw);
sbitmap_queue_free(&wq->sbq);
}
@@ -273,10 +292,9 @@ int idxd_wq_map_portal(struct idxd_wq *wq)
start = pci_resource_start(pdev, IDXD_WQ_BAR);
start += idxd_get_wq_portal_full_offset(wq->id, IDXD_PORTAL_LIMITED);
- wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
- if (!wq->dportal)
+ wq->portal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE);
+ if (!wq->portal)
return -ENOMEM;
- dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal);
return 0;
}
@@ -285,7 +303,61 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
{
struct device *dev = &wq->idxd->pdev->dev;
- devm_iounmap(dev, wq->dportal);
+ devm_iounmap(dev, wq->portal);
+}
+
+int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
+{
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+ union wqcfg wqcfg;
+ unsigned int offset;
+ unsigned long flags;
+
+ rc = idxd_wq_disable(wq);
+ if (rc < 0)
+ return rc;
+
+ offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
+ wqcfg.pasid_en = 1;
+ wqcfg.pasid = pasid;
+ iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ rc = idxd_wq_enable(wq);
+ if (rc < 0)
+ return rc;
+
+ return 0;
+}
+
+int idxd_wq_disable_pasid(struct idxd_wq *wq)
+{
+ struct idxd_device *idxd = wq->idxd;
+ int rc;
+ union wqcfg wqcfg;
+ unsigned int offset;
+ unsigned long flags;
+
+ rc = idxd_wq_disable(wq);
+ if (rc < 0)
+ return rc;
+
+ offset = WQCFG_OFFSET(idxd, wq->id, WQCFG_PASID_IDX);
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ wqcfg.bits[WQCFG_PASID_IDX] = ioread32(idxd->reg_base + offset);
+ wqcfg.pasid_en = 0;
+ wqcfg.pasid = 0;
+ iowrite32(wqcfg.bits[WQCFG_PASID_IDX], idxd->reg_base + offset);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+
+ rc = idxd_wq_enable(wq);
+ if (rc < 0)
+ return rc;
+
+ return 0;
}
void idxd_wq_disable_cleanup(struct idxd_wq *wq)
@@ -301,6 +373,7 @@ void idxd_wq_disable_cleanup(struct idxd_wq *wq)
wq->group = NULL;
wq->threshold = 0;
wq->priority = 0;
+ wq->ats_dis = 0;
clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
memset(wq->name, 0, WQ_NAME_SIZE);
@@ -468,6 +541,17 @@ void idxd_device_reset(struct idxd_device *idxd)
spin_unlock_irqrestore(&idxd->dev_lock, flags);
}
+void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
+{
+ struct device *dev = &idxd->pdev->dev;
+ u32 operand;
+
+ operand = pasid;
+ dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_PASID, operand);
+ idxd_cmd_exec(idxd, IDXD_CMD_DRAIN_PASID, operand, NULL);
+ dev_dbg(dev, "pasid %d drained\n", pasid);
+}
+
/* Device configuration bits */
static void idxd_group_config_write(struct idxd_group *group)
{
@@ -479,24 +563,22 @@ static void idxd_group_config_write(struct idxd_group *group)
dev_dbg(dev, "Writing group %d cfg registers\n", group->id);
/* setup GRPWQCFG */
- for (i = 0; i < 4; i++) {
- grpcfg_offset = idxd->grpcfg_offset +
- group->id * 64 + i * sizeof(u64);
- iowrite64(group->grpcfg.wqs[i],
- idxd->reg_base + grpcfg_offset);
+ for (i = 0; i < GRPWQCFG_STRIDES; i++) {
+ grpcfg_offset = GRPWQCFG_OFFSET(idxd, group->id, i);
+ iowrite64(group->grpcfg.wqs[i], idxd->reg_base + grpcfg_offset);
dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n",
group->id, i, grpcfg_offset,
ioread64(idxd->reg_base + grpcfg_offset));
}
/* setup GRPENGCFG */
- grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32;
+ grpcfg_offset = GRPENGCFG_OFFSET(idxd, group->id);
iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset);
dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id,
grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset));
/* setup GRPFLAGS */
- grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40;
+ grpcfg_offset = GRPFLGCFG_OFFSET(idxd, group->id);
iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset);
dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n",
group->id, grpcfg_offset,
@@ -554,9 +636,24 @@ static int idxd_wq_config_write(struct idxd_wq *wq)
/* byte 8-11 */
wq->wqcfg->priv = !!(wq->type == IDXD_WQT_KERNEL);
- wq->wqcfg->mode = 1;
+ if (wq_dedicated(wq))
+ wq->wqcfg->mode = 1;
+
+ if (device_pasid_enabled(idxd)) {
+ wq->wqcfg->pasid_en = 1;
+ if (wq->type == IDXD_WQT_KERNEL && wq_dedicated(wq))
+ wq->wqcfg->pasid = idxd->pasid;
+ }
+
wq->wqcfg->priority = wq->priority;
+ if (idxd->hw.gen_cap.block_on_fault &&
+ test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags))
+ wq->wqcfg->bof = 1;
+
+ if (idxd->hw.wq_cap.wq_ats_support)
+ wq->wqcfg->wq_ats_disable = wq->ats_dis;
+
/* bytes 12-15 */
wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes);
wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size);
@@ -664,8 +761,8 @@ static int idxd_wqs_setup(struct idxd_device *idxd)
if (!wq->size)
continue;
- if (!wq_dedicated(wq)) {
- dev_warn(dev, "No shared workqueue support.\n");
+ if (wq_shared(wq) && !device_swq_supported(idxd)) {
+ dev_warn(dev, "No shared wq support but configured.\n");
return -EINVAL;
}
diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c
index 0c892cbd72e0..8ed2773d8285 100644
--- a/drivers/dma/idxd/dma.c
+++ b/drivers/dma/idxd/dma.c
@@ -61,8 +61,6 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
u64 addr_f1, u64 addr_f2, u64 len,
u64 compl, u32 flags)
{
- struct idxd_device *idxd = wq->idxd;
-
hw->flags = flags;
hw->opcode = opcode;
hw->src_addr = addr_f1;
@@ -70,13 +68,6 @@ static inline void idxd_prep_desc_common(struct idxd_wq *wq,
hw->xfer_size = len;
hw->priv = !!(wq->type == IDXD_WQT_KERNEL);
hw->completion_addr = compl;
-
- /*
- * Descriptor completion vectors are 1-8 for MSIX. We will round
- * robin through the 8 vectors.
- */
- wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
- hw->int_handle = wq->vec_ptr;
}
static struct dma_async_tx_descriptor *
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index d48f193daacc..5a50e91c71bf 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -20,7 +20,8 @@ extern struct kmem_cache *idxd_desc_pool;
enum idxd_type {
IDXD_TYPE_UNKNOWN = -1,
IDXD_TYPE_DSA = 0,
- IDXD_TYPE_MAX
+ IDXD_TYPE_IAX,
+ IDXD_TYPE_MAX,
};
#define IDXD_NAME_SIZE 128
@@ -34,6 +35,11 @@ struct idxd_irq_entry {
int id;
struct llist_head pending_llist;
struct list_head work_list;
+ /*
+ * Lock to protect access between irq thread process descriptor
+ * and irq thread processing error descriptor.
+ */
+ spinlock_t list_lock;
};
struct idxd_group {
@@ -59,6 +65,7 @@ enum idxd_wq_state {
enum idxd_wq_flag {
WQ_FLAG_DEDICATED = 0,
+ WQ_FLAG_BLOCK_ON_FAULT,
};
enum idxd_wq_type {
@@ -86,10 +93,11 @@ enum idxd_op_type {
enum idxd_complete_type {
IDXD_COMPLETE_NORMAL = 0,
IDXD_COMPLETE_ABORT,
+ IDXD_COMPLETE_DEV_FAIL,
};
struct idxd_wq {
- void __iomem *dportal;
+ void __iomem *portal;
struct device conf_dev;
struct idxd_cdev idxd_cdev;
struct idxd_device *idxd;
@@ -107,8 +115,13 @@ struct idxd_wq {
u32 vec_ptr; /* interrupt steering */
struct dsa_hw_desc **hw_descs;
int num_descs;
- struct dsa_completion_record *compls;
+ union {
+ struct dsa_completion_record *compls;
+ struct iax_completion_record *iax_compls;
+ };
+ void *compls_raw;
dma_addr_t compls_addr;
+ dma_addr_t compls_addr_raw;
int compls_size;
struct idxd_desc **descs;
struct sbitmap_queue sbq;
@@ -116,6 +129,7 @@ struct idxd_wq {
char name[WQ_NAME_SIZE + 1];
u64 max_xfer_bytes;
u32 max_batch_size;
+ bool ats_dis;
};
struct idxd_engine {
@@ -145,6 +159,7 @@ enum idxd_device_state {
enum idxd_device_flag {
IDXD_FLAG_CONFIGURABLE = 0,
IDXD_FLAG_CMD_RUNNING,
+ IDXD_FLAG_PASID_ENABLED,
};
struct idxd_device {
@@ -167,6 +182,9 @@ struct idxd_device {
struct idxd_wq *wqs;
struct idxd_engine *engines;
+ struct iommu_sva *sva;
+ unsigned int pasid;
+
int num_groups;
u32 msix_perm_offset;
@@ -184,6 +202,7 @@ struct idxd_device {
int token_limit;
int nr_tokens; /* non-reserved tokens */
unsigned int wqcfg_size;
+ int compl_size;
union sw_err_reg sw_err;
wait_queue_head_t cmd_waitq;
@@ -198,9 +217,15 @@ struct idxd_device {
/* IDXD software descriptor */
struct idxd_desc {
- struct dsa_hw_desc *hw;
+ union {
+ struct dsa_hw_desc *hw;
+ struct iax_hw_desc *iax_hw;
+ };
dma_addr_t desc_dma;
- struct dsa_completion_record *completion;
+ union {
+ struct dsa_completion_record *completion;
+ struct iax_completion_record *iax_completion;
+ };
dma_addr_t compl_dma;
struct dma_async_tx_descriptor txd;
struct llist_node llnode;
@@ -214,12 +239,30 @@ struct idxd_desc {
#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
extern struct bus_type dsa_bus_type;
+extern struct bus_type iax_bus_type;
+
+extern bool support_enqcmd;
static inline bool wq_dedicated(struct idxd_wq *wq)
{
return test_bit(WQ_FLAG_DEDICATED, &wq->flags);
}
+static inline bool wq_shared(struct idxd_wq *wq)
+{
+ return !test_bit(WQ_FLAG_DEDICATED, &wq->flags);
+}
+
+static inline bool device_pasid_enabled(struct idxd_device *idxd)
+{
+ return test_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+}
+
+static inline bool device_swq_supported(struct idxd_device *idxd)
+{
+ return (support_enqcmd && device_pasid_enabled(idxd));
+}
+
enum idxd_portal_prot {
IDXD_PORTAL_UNLIMITED = 0,
IDXD_PORTAL_LIMITED,
@@ -242,6 +285,8 @@ static inline void idxd_set_type(struct idxd_device *idxd)
if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0)
idxd->type = IDXD_TYPE_DSA;
+ else if (pdev->device == PCI_DEVICE_ID_INTEL_IAX_SPR0)
+ idxd->type = IDXD_TYPE_IAX;
else
idxd->type = IDXD_TYPE_UNKNOWN;
}
@@ -288,6 +333,7 @@ void idxd_device_reset(struct idxd_device *idxd);
void idxd_device_cleanup(struct idxd_device *idxd);
int idxd_device_config(struct idxd_device *idxd);
void idxd_device_wqs_clear_state(struct idxd_device *idxd);
+void idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
/* work queue control */
int idxd_wq_alloc_resources(struct idxd_wq *wq);
@@ -298,6 +344,8 @@ void idxd_wq_drain(struct idxd_wq *wq);
int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
void idxd_wq_disable_cleanup(struct idxd_wq *wq);
+int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
+int idxd_wq_disable_pasid(struct idxd_wq *wq);
/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc);
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 0a4432b063b5..2c051e07c34c 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -14,6 +14,8 @@
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/device.h>
#include <linux/idr.h>
+#include <linux/intel-svm.h>
+#include <linux/iommu.h>
#include <uapi/linux/idxd.h>
#include <linux/dmaengine.h>
#include "../dmaengine.h"
@@ -26,18 +28,24 @@ MODULE_AUTHOR("Intel Corporation");
#define DRV_NAME "idxd"
+bool support_enqcmd;
+
static struct idr idxd_idrs[IDXD_TYPE_MAX];
static struct mutex idxd_idr_lock;
static struct pci_device_id idxd_pci_tbl[] = {
/* DSA ver 1.0 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) },
+
+ /* IAX ver 1.0 platforms */
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IAX_SPR0) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, idxd_pci_tbl);
static char *idxd_name[] = {
"dsa",
+ "iax"
};
const char *idxd_get_dev_name(struct idxd_device *idxd)
@@ -53,6 +61,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
struct idxd_irq_entry *irq_entry;
int i, msixcnt;
int rc = 0;
+ union msix_perm mperm;
msixcnt = pci_msix_vec_count(pdev);
if (msixcnt < 0) {
@@ -92,6 +101,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
for (i = 0; i < msixcnt; i++) {
idxd->irq_entries[i].id = i;
idxd->irq_entries[i].idxd = idxd;
+ spin_lock_init(&idxd->irq_entries[i].list_lock);
}
msix = &idxd->msix_entries[0];
@@ -131,6 +141,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
idxd_unmask_error_interrupts(idxd);
+ /* Setup MSIX permission table */
+ mperm.bits = 0;
+ mperm.pasid = idxd->pasid;
+ mperm.pasid_en = device_pasid_enabled(idxd);
+ for (i = 1; i < msixcnt; i++)
+ iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8);
+
return 0;
err_no_irq:
@@ -201,17 +218,14 @@ static void idxd_read_table_offsets(struct idxd_device *idxd)
struct device *dev = &idxd->pdev->dev;
offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET);
- offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET
- + sizeof(u64));
- idxd->grpcfg_offset = offsets.grpcfg * 0x100;
+ offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64));
+ idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT;
dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset);
- idxd->wqcfg_offset = offsets.wqcfg * 0x100;
- dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n",
- idxd->wqcfg_offset);
- idxd->msix_perm_offset = offsets.msix_perm * 0x100;
- dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n",
- idxd->msix_perm_offset);
- idxd->perfmon_offset = offsets.perfmon * 0x100;
+ idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT;
+ dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset);
+ idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT;
+ dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset);
+ idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT;
dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset);
}
@@ -265,8 +279,7 @@ static void idxd_read_caps(struct idxd_device *idxd)
}
}
-static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
- void __iomem * const *iomap)
+static struct idxd_device *idxd_alloc(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
struct idxd_device *idxd;
@@ -276,12 +289,45 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev,
return NULL;
idxd->pdev = pdev;
- idxd->reg_base = iomap[IDXD_MMIO_BAR];
spin_lock_init(&idxd->dev_lock);
return idxd;
}
+static int idxd_enable_system_pasid(struct idxd_device *idxd)
+{
+ int flags;
+ unsigned int pasid;
+ struct iommu_sva *sva;
+
+ flags = SVM_FLAG_SUPERVISOR_MODE;
+
+ sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags);
+ if (IS_ERR(sva)) {
+ dev_warn(&idxd->pdev->dev,
+ "iommu sva bind failed: %ld\n", PTR_ERR(sva));
+ return PTR_ERR(sva);
+ }
+
+ pasid = iommu_sva_get_pasid(sva);
+ if (pasid == IOMMU_PASID_INVALID) {
+ iommu_sva_unbind_device(sva);
+ return -ENODEV;
+ }
+
+ idxd->sva = sva;
+ idxd->pasid = pasid;
+ dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid);
+ return 0;
+}
+
+static void idxd_disable_system_pasid(struct idxd_device *idxd)
+{
+
+ iommu_sva_unbind_device(idxd->sva);
+ idxd->sva = NULL;
+}
+
static int idxd_probe(struct idxd_device *idxd)
{
struct pci_dev *pdev = idxd->pdev;
@@ -292,6 +338,14 @@ static int idxd_probe(struct idxd_device *idxd)
idxd_device_init_reset(idxd);
dev_dbg(dev, "IDXD reset complete\n");
+ if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) {
+ rc = idxd_enable_system_pasid(idxd);
+ if (rc < 0)
+ dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc);
+ else
+ set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags);
+ }
+
idxd_read_caps(idxd);
idxd_read_table_offsets(idxd);
@@ -322,29 +376,37 @@ static int idxd_probe(struct idxd_device *idxd)
idxd_mask_error_interrupts(idxd);
idxd_mask_msix_vectors(idxd);
err_setup:
+ if (device_pasid_enabled(idxd))
+ idxd_disable_system_pasid(idxd);
return rc;
}
+static void idxd_type_init(struct idxd_device *idxd)
+{
+ if (idxd->type == IDXD_TYPE_DSA)
+ idxd->compl_size = sizeof(struct dsa_completion_record);
+ else if (idxd->type == IDXD_TYPE_IAX)
+ idxd->compl_size = sizeof(struct iax_completion_record);
+}
+
static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- void __iomem * const *iomap;
struct device *dev = &pdev->dev;
struct idxd_device *idxd;
int rc;
- unsigned int mask;
rc = pcim_enable_device(pdev);
if (rc)
return rc;
- dev_dbg(dev, "Mapping BARs\n");
- mask = (1 << IDXD_MMIO_BAR);
- rc = pcim_iomap_regions(pdev, mask, DRV_NAME);
- if (rc)
- return rc;
+ dev_dbg(dev, "Alloc IDXD context\n");
+ idxd = idxd_alloc(pdev);
+ if (!idxd)
+ return -ENOMEM;
- iomap = pcim_iomap_table(pdev);
- if (!iomap)
+ dev_dbg(dev, "Mapping BARs\n");
+ idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0);
+ if (!idxd->reg_base)
return -ENOMEM;
dev_dbg(dev, "Set DMA masks\n");
@@ -360,13 +422,10 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (rc)
return rc;
- dev_dbg(dev, "Alloc IDXD context\n");
- idxd = idxd_alloc(pdev, iomap);
- if (!idxd)
- return -ENOMEM;
-
idxd_set_type(idxd);
+ idxd_type_init(idxd);
+
dev_dbg(dev, "Set PCI master\n");
pci_set_master(pdev);
pci_set_drvdata(pdev, idxd);
@@ -452,6 +511,8 @@ static void idxd_remove(struct pci_dev *pdev)
dev_dbg(&pdev->dev, "%s called\n", __func__);
idxd_cleanup_sysfs(idxd);
idxd_shutdown(pdev);
+ if (device_pasid_enabled(idxd))
+ idxd_disable_system_pasid(idxd);
mutex_lock(&idxd_idr_lock);
idr_remove(&idxd_idrs[idxd->type], idxd->id);
mutex_unlock(&idxd_idr_lock);
@@ -470,7 +531,7 @@ static int __init idxd_init_module(void)
int err, i;
/*
- * If the CPU does not support write512, there's no point in
+ * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in
* enumerating the device. We can not utilize it.
*/
if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) {
@@ -478,8 +539,10 @@ static int __init idxd_init_module(void)
return -ENODEV;
}
- pr_info("%s: Intel(R) Accelerator Devices Driver %s\n",
- DRV_NAME, IDXD_DRIVER_VERSION);
+ if (!boot_cpu_has(X86_FEATURE_ENQCMD))
+ pr_warn("Platform does not have ENQCMD(S) support.\n");
+ else
+ support_enqcmd = true;
mutex_init(&idxd_idr_lock);
for (i = 0; i < IDXD_TYPE_MAX; i++)
diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c
index 17a65a13fb64..593a2f6ed16c 100644
--- a/drivers/dma/idxd/irq.c
+++ b/drivers/dma/idxd/irq.c
@@ -11,6 +11,24 @@
#include "idxd.h"
#include "registers.h"
+enum irq_work_type {
+ IRQ_WORK_NORMAL = 0,
+ IRQ_WORK_PROCESS_FAULT,
+};
+
+struct idxd_fault {
+ struct work_struct work;
+ u64 addr;
+ struct idxd_device *idxd;
+};
+
+static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
+ enum irq_work_type wtype,
+ int *processed, u64 data);
+static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
+ enum irq_work_type wtype,
+ int *processed, u64 data);
+
static void idxd_device_reinit(struct work_struct *work)
{
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
@@ -44,6 +62,46 @@ static void idxd_device_reinit(struct work_struct *work)
idxd_device_wqs_clear_state(idxd);
}
+static void idxd_device_fault_work(struct work_struct *work)
+{
+ struct idxd_fault *fault = container_of(work, struct idxd_fault, work);
+ struct idxd_irq_entry *ie;
+ int i;
+ int processed;
+ int irqcnt = fault->idxd->num_wq_irqs + 1;
+
+ for (i = 1; i < irqcnt; i++) {
+ ie = &fault->idxd->irq_entries[i];
+ irq_process_work_list(ie, IRQ_WORK_PROCESS_FAULT,
+ &processed, fault->addr);
+ if (processed)
+ break;
+
+ irq_process_pending_llist(ie, IRQ_WORK_PROCESS_FAULT,
+ &processed, fault->addr);
+ if (processed)
+ break;
+ }
+
+ kfree(fault);
+}
+
+static int idxd_device_schedule_fault_process(struct idxd_device *idxd,
+ u64 fault_addr)
+{
+ struct idxd_fault *fault;
+
+ fault = kmalloc(sizeof(*fault), GFP_ATOMIC);
+ if (!fault)
+ return -ENOMEM;
+
+ fault->addr = fault_addr;
+ fault->idxd = idxd;
+ INIT_WORK(&fault->work, idxd_device_fault_work);
+ queue_work(idxd->wq, &fault->work);
+ return 0;
+}
+
irqreturn_t idxd_irq_handler(int vec, void *data)
{
struct idxd_irq_entry *irq_entry = data;
@@ -125,6 +183,15 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
if (!err)
goto out;
+ /*
+ * This case should rarely happen and typically is due to software
+ * programming error by the driver.
+ */
+ if (idxd->sw_err.valid &&
+ idxd->sw_err.desc_valid &&
+ idxd->sw_err.fault_addr)
+ idxd_device_schedule_fault_process(idxd, idxd->sw_err.fault_addr);
+
gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
if (gensts.state == IDXD_DEVICE_STATE_HALT) {
idxd->state = IDXD_DEV_HALTED;
@@ -152,57 +219,110 @@ irqreturn_t idxd_misc_thread(int vec, void *data)
return IRQ_HANDLED;
}
+static bool process_fault(struct idxd_desc *desc, u64 fault_addr)
+{
+ /*
+ * Completion address can be bad as well. Check fault address match for descriptor
+ * and completion address.
+ */
+ if ((u64)desc->hw == fault_addr ||
+ (u64)desc->completion == fault_addr) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_DEV_FAIL);
+ return true;
+ }
+
+ return false;
+}
+
+static bool complete_desc(struct idxd_desc *desc)
+{
+ if (desc->completion->status) {
+ idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ return true;
+ }
+
+ return false;
+}
+
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
- int *processed)
+ enum irq_work_type wtype,
+ int *processed, u64 data)
{
struct idxd_desc *desc, *t;
struct llist_node *head;
int queued = 0;
+ bool completed = false;
+ unsigned long flags;
*processed = 0;
head = llist_del_all(&irq_entry->pending_llist);
if (!head)
- return 0;
+ goto out;
llist_for_each_entry_safe(desc, t, head, llnode) {
- if (desc->completion->status) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ if (wtype == IRQ_WORK_NORMAL)
+ completed = complete_desc(desc);
+ else if (wtype == IRQ_WORK_PROCESS_FAULT)
+ completed = process_fault(desc, data);
+
+ if (completed) {
idxd_free_desc(desc->wq, desc);
(*processed)++;
+ if (wtype == IRQ_WORK_PROCESS_FAULT)
+ break;
} else {
- list_add_tail(&desc->list, &irq_entry->work_list);
+ spin_lock_irqsave(&irq_entry->list_lock, flags);
+ list_add_tail(&desc->list,
+ &irq_entry->work_list);
+ spin_unlock_irqrestore(&irq_entry->list_lock, flags);
queued++;
}
}
+ out:
return queued;
}
static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
- int *processed)
+ enum irq_work_type wtype,
+ int *processed, u64 data)
{
struct list_head *node, *next;
int queued = 0;
+ bool completed = false;
+ unsigned long flags;
*processed = 0;
+ spin_lock_irqsave(&irq_entry->list_lock, flags);
if (list_empty(&irq_entry->work_list))
- return 0;
+ goto out;
list_for_each_safe(node, next, &irq_entry->work_list) {
struct idxd_desc *desc =
container_of(node, struct idxd_desc, list);
- if (desc->completion->status) {
+ spin_unlock_irqrestore(&irq_entry->list_lock, flags);
+ if (wtype == IRQ_WORK_NORMAL)
+ completed = complete_desc(desc);
+ else if (wtype == IRQ_WORK_PROCESS_FAULT)
+ completed = process_fault(desc, data);
+
+ if (completed) {
+ spin_lock_irqsave(&irq_entry->list_lock, flags);
list_del(&desc->list);
- /* process and callback */
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
+ spin_unlock_irqrestore(&irq_entry->list_lock, flags);
idxd_free_desc(desc->wq, desc);
(*processed)++;
+ if (wtype == IRQ_WORK_PROCESS_FAULT)
+ return queued;
} else {
queued++;
}
+ spin_lock_irqsave(&irq_entry->list_lock, flags);
}
+ out:
+ spin_unlock_irqrestore(&irq_entry->list_lock, flags);
return queued;
}
@@ -230,12 +350,14 @@ static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
* 5. Repeat until no more descriptors.
*/
do {
- rc = irq_process_work_list(irq_entry, &processed);
+ rc = irq_process_work_list(irq_entry, IRQ_WORK_NORMAL,
+ &processed, 0);
total += processed;
if (rc != 0)
continue;
- rc = irq_process_pending_llist(irq_entry, &processed);
+ rc = irq_process_pending_llist(irq_entry, IRQ_WORK_NORMAL,
+ &processed, 0);
total += processed;
} while (rc != 0);
diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h
index 54390334c243..751ecb4f9f81 100644
--- a/drivers/dma/idxd/registers.h
+++ b/drivers/dma/idxd/registers.h
@@ -5,6 +5,7 @@
/* PCI Config */
#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25
+#define PCI_DEVICE_ID_INTEL_IAX_SPR0 0x0cfe
#define IDXD_MMIO_BAR 0
#define IDXD_WQ_BAR 2
@@ -47,7 +48,7 @@ union wq_cap_reg {
u64 rsvd:20;
u64 shared_mode:1;
u64 dedicated_mode:1;
- u64 rsvd2:1;
+ u64 wq_ats_support:1;
u64 priority:1;
u64 occupancy:1;
u64 occupancy_int:1;
@@ -102,6 +103,8 @@ union offsets_reg {
u64 bits[2];
} __packed;
+#define IDXD_TABLE_MULT 0x100
+
#define IDXD_GENCFG_OFFSET 0x80
union gencfg_reg {
struct {
@@ -301,7 +304,8 @@ union wqcfg {
/* bytes 8-11 */
u32 mode:1; /* shared or dedicated */
u32 bof:1; /* block on fault */
- u32 rsvd2:2;
+ u32 wq_ats_disable:1;
+ u32 rsvd2:1;
u32 priority:4;
u32 pasid:20;
u32 pasid_en:1;
@@ -336,6 +340,8 @@ union wqcfg {
u32 bits[8];
} __packed;
+#define WQCFG_PASID_IDX 2
+
/*
* This macro calculates the offset into the WQCFG register
* idxd - struct idxd *
@@ -354,4 +360,22 @@ union wqcfg {
#define WQCFG_STRIDES(_idxd_dev) ((_idxd_dev)->wqcfg_size / sizeof(u32))
+#define GRPCFG_SIZE 64
+#define GRPWQCFG_STRIDES 4
+
+/*
+ * This macro calculates the offset into the GRPCFG register
+ * idxd - struct idxd *
+ * n - wq id
+ * ofs - the index of the 32b dword for the config register
+ *
+ * The WQCFG register block is divided into groups per each wq. The n index
+ * allows us to move to the register group that's for that particular wq.
+ * Each register is 32bits. The ofs gives us the number of register to access.
+ */
+#define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\
+ (n) * GRPCFG_SIZE + sizeof(u64) * (ofs))
+#define GRPENGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 32)
+#define GRPFLGCFG_OFFSET(idxd_dev, n) ((idxd_dev)->grpcfg_offset + (n) * GRPCFG_SIZE + 40)
+
#endif
diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c
index 417048e3c42a..a7a61bcc17d5 100644
--- a/drivers/dma/idxd/submit.c
+++ b/drivers/dma/idxd/submit.c
@@ -11,11 +11,22 @@
static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
{
struct idxd_desc *desc;
+ struct idxd_device *idxd = wq->idxd;
desc = wq->descs[idx];
memset(desc->hw, 0, sizeof(struct dsa_hw_desc));
- memset(desc->completion, 0, sizeof(struct dsa_completion_record));
+ memset(desc->completion, 0, idxd->compl_size);
desc->cpu = cpu;
+
+ if (device_pasid_enabled(idxd))
+ desc->hw->pasid = idxd->pasid;
+
+ /*
+ * Descriptor completion vectors are 1-8 for MSIX. We will round
+ * robin through the 8 vectors.
+ */
+ wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
+ desc->hw->int_handle = wq->vec_ptr;
return desc;
}
@@ -70,18 +81,32 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
struct idxd_device *idxd = wq->idxd;
int vec = desc->hw->int_handle;
void __iomem *portal;
+ int rc;
if (idxd->state != IDXD_DEV_ENABLED)
return -EIO;
- portal = wq->dportal;
+ portal = wq->portal;
+
/*
- * The wmb() flushes writes to coherent DMA data before possibly
- * triggering a DMA read. The wmb() is necessary even on UP because
- * the recipient is a device.
+ * The wmb() flushes writes to coherent DMA data before
+ * possibly triggering a DMA read. The wmb() is necessary
+ * even on UP because the recipient is a device.
*/
wmb();
- iosubmit_cmds512(portal, desc->hw, 1);
+ if (wq_dedicated(wq)) {
+ iosubmit_cmds512(portal, desc->hw, 1);
+ } else {
+ /*
+ * It's not likely that we would receive queue full rejection
+ * since the descriptor allocation gates at wq size. If we
+ * receive a -EAGAIN, that means something went wrong such as the
+ * device is not accepting descriptor at all.
+ */
+ rc = enqcmds(portal, desc->hw);
+ if (rc < 0)
+ return rc;
+ }
/*
* Pending the descriptor to the lockless list for the irq_entry
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 07a5db06a29a..4dbb03c545e4 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -41,14 +41,24 @@ static struct device_type dsa_device_type = {
.release = idxd_conf_device_release,
};
+static struct device_type iax_device_type = {
+ .name = "iax",
+ .release = idxd_conf_device_release,
+};
+
static inline bool is_dsa_dev(struct device *dev)
{
return dev ? dev->type == &dsa_device_type : false;
}
+static inline bool is_iax_dev(struct device *dev)
+{
+ return dev ? dev->type == &iax_device_type : false;
+}
+
static inline bool is_idxd_dev(struct device *dev)
{
- return is_dsa_dev(dev);
+ return is_dsa_dev(dev) || is_iax_dev(dev);
}
static inline bool is_idxd_wq_dev(struct device *dev)
@@ -175,6 +185,30 @@ static int idxd_config_bus_probe(struct device *dev)
return -EINVAL;
}
+ /* Shared WQ checks */
+ if (wq_shared(wq)) {
+ if (!device_swq_supported(idxd)) {
+ dev_warn(dev,
+ "PASID not enabled and shared WQ.\n");
+ mutex_unlock(&wq->wq_lock);
+ return -ENXIO;
+ }
+ /*
+ * Shared wq with the threshold set to 0 means the user
+ * did not set the threshold or transitioned from a
+ * dedicated wq but did not set threshold. A value
+ * of 0 would effectively disable the shared wq. The
+ * driver does not allow a value of 0 to be set for
+ * threshold via sysfs.
+ */
+ if (wq->threshold == 0) {
+ dev_warn(dev,
+ "Shared WQ and threshold 0.\n");
+ mutex_unlock(&wq->wq_lock);
+ return -EINVAL;
+ }
+ }
+
rc = idxd_wq_alloc_resources(wq);
if (rc < 0) {
mutex_unlock(&wq->wq_lock);
@@ -335,8 +369,17 @@ struct bus_type dsa_bus_type = {
.shutdown = idxd_config_bus_shutdown,
};
+struct bus_type iax_bus_type = {
+ .name = "iax",
+ .match = idxd_config_bus_match,
+ .probe = idxd_config_bus_probe,
+ .remove = idxd_config_bus_remove,
+ .shutdown = idxd_config_bus_shutdown,
+};
+
static struct bus_type *idxd_bus_types[] = {
- &dsa_bus_type
+ &dsa_bus_type,
+ &iax_bus_type
};
static struct idxd_device_driver dsa_drv = {
@@ -348,8 +391,18 @@ static struct idxd_device_driver dsa_drv = {
},
};
+static struct idxd_device_driver iax_drv = {
+ .drv = {
+ .name = "iax",
+ .bus = &iax_bus_type,
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ },
+};
+
static struct idxd_device_driver *idxd_drvs[] = {
- &dsa_drv
+ &dsa_drv,
+ &iax_drv
};
struct bus_type *idxd_get_bus_type(struct idxd_device *idxd)
@@ -361,6 +414,8 @@ static struct device_type *idxd_get_device_type(struct idxd_device *idxd)
{
if (idxd->type == IDXD_TYPE_DSA)
return &dsa_device_type;
+ else if (idxd->type == IDXD_TYPE_IAX)
+ return &iax_device_type;
else
return NULL;
}
@@ -379,7 +434,7 @@ int idxd_register_driver(void)
return 0;
drv_fail:
- for (; i > 0; i--)
+ while (--i >= 0)
driver_unregister(&idxd_drvs[i]->drv);
return rc;
}
@@ -501,6 +556,9 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
if (rc < 0)
return -EINVAL;
+ if (idxd->type == IDXD_TYPE_IAX)
+ return -EOPNOTSUPP;
+
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
@@ -546,6 +604,9 @@ static ssize_t group_tokens_allowed_store(struct device *dev,
if (rc < 0)
return -EINVAL;
+ if (idxd->type == IDXD_TYPE_IAX)
+ return -EOPNOTSUPP;
+
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
@@ -588,6 +649,9 @@ static ssize_t group_use_token_limit_store(struct device *dev,
if (rc < 0)
return -EINVAL;
+ if (idxd->type == IDXD_TYPE_IAX)
+ return -EOPNOTSUPP;
+
if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
return -EPERM;
@@ -875,6 +939,8 @@ static ssize_t wq_mode_store(struct device *dev,
if (sysfs_streq(buf, "dedicated")) {
set_bit(WQ_FLAG_DEDICATED, &wq->flags);
wq->threshold = 0;
+ } else if (sysfs_streq(buf, "shared") && device_swq_supported(idxd)) {
+ clear_bit(WQ_FLAG_DEDICATED, &wq->flags);
} else {
return -EINVAL;
}
@@ -973,6 +1039,87 @@ static ssize_t wq_priority_store(struct device *dev,
static struct device_attribute dev_attr_wq_priority =
__ATTR(priority, 0644, wq_priority_show, wq_priority_store);
+static ssize_t wq_block_on_fault_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n",
+ test_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags));
+}
+
+static ssize_t wq_block_on_fault_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ bool bof;
+ int rc;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -ENXIO;
+
+ rc = kstrtobool(buf, &bof);
+ if (rc < 0)
+ return rc;
+
+ if (bof)
+ set_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
+ else
+ clear_bit(WQ_FLAG_BLOCK_ON_FAULT, &wq->flags);
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_block_on_fault =
+ __ATTR(block_on_fault, 0644, wq_block_on_fault_show,
+ wq_block_on_fault_store);
+
+static ssize_t wq_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->threshold);
+}
+
+static ssize_t wq_threshold_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ unsigned int val;
+ int rc;
+
+ rc = kstrtouint(buf, 0, &val);
+ if (rc < 0)
+ return -EINVAL;
+
+ if (val > wq->size || val <= 0)
+ return -EINVAL;
+
+ if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
+ return -EPERM;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -ENXIO;
+
+ if (test_bit(WQ_FLAG_DEDICATED, &wq->flags))
+ return -EINVAL;
+
+ wq->threshold = val;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_threshold =
+ __ATTR(threshold, 0644, wq_threshold_show, wq_threshold_store);
+
static ssize_t wq_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1044,6 +1191,13 @@ static ssize_t wq_name_store(struct device *dev,
if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0)
return -EINVAL;
+ /*
+ * This is temporarily placed here until we have SVM support for
+ * dmaengine.
+ */
+ if (wq->type == IDXD_WQT_KERNEL && device_pasid_enabled(wq->idxd))
+ return -EOPNOTSUPP;
+
memset(wq->name, 0, WQ_NAME_SIZE + 1);
strncpy(wq->name, buf, WQ_NAME_SIZE);
strreplace(wq->name, '\n', '\0');
@@ -1147,6 +1301,39 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu
static struct device_attribute dev_attr_wq_max_batch_size =
__ATTR(max_batch_size, 0644, wq_max_batch_size_show, wq_max_batch_size_store);
+static ssize_t wq_ats_disable_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+
+ return sprintf(buf, "%u\n", wq->ats_dis);
+}
+
+static ssize_t wq_ats_disable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev);
+ struct idxd_device *idxd = wq->idxd;
+ bool ats_dis;
+ int rc;
+
+ if (wq->state != IDXD_WQ_DISABLED)
+ return -EPERM;
+
+ if (!idxd->hw.wq_cap.wq_ats_support)
+ return -EOPNOTSUPP;
+
+ rc = kstrtobool(buf, &ats_dis);
+ if (rc < 0)
+ return rc;
+
+ wq->ats_dis = ats_dis;
+
+ return count;
+}
+
+static struct device_attribute dev_attr_wq_ats_disable =
+ __ATTR(ats_disable, 0644, wq_ats_disable_show, wq_ats_disable_store);
+
static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_clients.attr,
&dev_attr_wq_state.attr,
@@ -1154,11 +1341,14 @@ static struct attribute *idxd_wq_attributes[] = {
&dev_attr_wq_mode.attr,
&dev_attr_wq_size.attr,
&dev_attr_wq_priority.attr,
+ &dev_attr_wq_block_on_fault.attr,
+ &dev_attr_wq_threshold.attr,
&dev_attr_wq_type.attr,
&dev_attr_wq_name.attr,
&dev_attr_wq_cdev_minor.attr,
&dev_attr_wq_max_transfer_size.attr,
&dev_attr_wq_max_batch_size.attr,
+ &dev_attr_wq_ats_disable.attr,
NULL,
};
@@ -1305,6 +1495,16 @@ static ssize_t clients_show(struct device *dev,
}
static DEVICE_ATTR_RO(clients);
+static ssize_t pasid_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct idxd_device *idxd =
+ container_of(dev, struct idxd_device, conf_dev);
+
+ return sprintf(buf, "%u\n", device_pasid_enabled(idxd));
+}
+static DEVICE_ATTR_RO(pasid_enabled);
+
static ssize_t state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1424,6 +1624,7 @@ static struct attribute *idxd_device_attributes[] = {
&dev_attr_gen_cap.attr,
&dev_attr_configurable.attr,
&dev_attr_clients.attr,
+ &dev_attr_pasid_enabled.attr,
&dev_attr_state.attr,
&dev_attr_errors.attr,
&dev_attr_max_tokens.attr,
@@ -1639,7 +1840,7 @@ int idxd_register_bus_type(void)
return 0;
bus_err:
- for (; i > 0; i--)
+ while (--i >= 0)
bus_unregister(idxd_bus_types[i]);
return rc;
}
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 670db04b0757..7f116bbcfad2 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -191,32 +191,13 @@ struct imxdma_filter_data {
int request;
};
-static const struct platform_device_id imx_dma_devtype[] = {
- {
- .name = "imx1-dma",
- .driver_data = IMX1_DMA,
- }, {
- .name = "imx21-dma",
- .driver_data = IMX21_DMA,
- }, {
- .name = "imx27-dma",
- .driver_data = IMX27_DMA,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, imx_dma_devtype);
-
static const struct of_device_id imx_dma_of_dev_id[] = {
{
- .compatible = "fsl,imx1-dma",
- .data = &imx_dma_devtype[IMX1_DMA],
+ .compatible = "fsl,imx1-dma", .data = (const void *)IMX1_DMA,
}, {
- .compatible = "fsl,imx21-dma",
- .data = &imx_dma_devtype[IMX21_DMA],
+ .compatible = "fsl,imx21-dma", .data = (const void *)IMX21_DMA,
}, {
- .compatible = "fsl,imx27-dma",
- .data = &imx_dma_devtype[IMX27_DMA],
+ .compatible = "fsl,imx27-dma", .data = (const void *)IMX27_DMA,
}, {
/* sentinel */
}
@@ -1056,20 +1037,15 @@ static int __init imxdma_probe(struct platform_device *pdev)
{
struct imxdma_engine *imxdma;
struct resource *res;
- const struct of_device_id *of_id;
int ret, i;
int irq, irq_err;
- of_id = of_match_device(imx_dma_of_dev_id, &pdev->dev);
- if (of_id)
- pdev->id_entry = of_id->data;
-
imxdma = devm_kzalloc(&pdev->dev, sizeof(*imxdma), GFP_KERNEL);
if (!imxdma)
return -ENOMEM;
imxdma->dev = &pdev->dev;
- imxdma->devtype = pdev->id_entry->driver_data;
+ imxdma->devtype = (enum imx_dma_type)of_device_get_match_data(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
imxdma->base = devm_ioremap_resource(&pdev->dev, res);
@@ -1263,7 +1239,6 @@ static struct platform_driver imxdma_driver = {
.name = "imx-dma",
.of_match_table = imx_dma_of_dev_id,
},
- .id_table = imx_dma_devtype,
.remove = imxdma_remove,
};
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 16b908c77db3..41ba21eea7c8 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -566,37 +566,6 @@ static struct sdma_driver_data sdma_imx8mq = {
.check_ratio = 1,
};
-static const struct platform_device_id sdma_devtypes[] = {
- {
- .name = "imx25-sdma",
- .driver_data = (unsigned long)&sdma_imx25,
- }, {
- .name = "imx31-sdma",
- .driver_data = (unsigned long)&sdma_imx31,
- }, {
- .name = "imx35-sdma",
- .driver_data = (unsigned long)&sdma_imx35,
- }, {
- .name = "imx51-sdma",
- .driver_data = (unsigned long)&sdma_imx51,
- }, {
- .name = "imx53-sdma",
- .driver_data = (unsigned long)&sdma_imx53,
- }, {
- .name = "imx6q-sdma",
- .driver_data = (unsigned long)&sdma_imx6q,
- }, {
- .name = "imx7d-sdma",
- .driver_data = (unsigned long)&sdma_imx7d,
- }, {
- .name = "imx8mq-sdma",
- .driver_data = (unsigned long)&sdma_imx8mq,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, sdma_devtypes);
-
static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
{ .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
@@ -1998,11 +1967,7 @@ static int sdma_probe(struct platform_device *pdev)
s32 *saddr_arr;
const struct sdma_driver_data *drvdata = NULL;
- if (of_id)
- drvdata = of_id->data;
- else if (pdev->id_entry)
- drvdata = (void *)pdev->id_entry->driver_data;
-
+ drvdata = of_id->data;
if (!drvdata) {
dev_err(&pdev->dev, "unable to find driver data\n");
return -EINVAL;
@@ -2211,7 +2176,6 @@ static struct platform_driver sdma_driver = {
.name = "imx-sdma",
.of_match_table = sdma_dt_ids,
},
- .id_table = sdma_devtypes,
.remove = sdma_remove,
.probe = sdma_probe,
};
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 38036db284cb..104ad420abbe 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1160,14 +1160,13 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
struct idmac_tx_desc *desc, *descnew;
bool done = false;
u32 ready0, ready1, curbuf, err;
- unsigned long flags;
struct dmaengine_desc_callback cb;
/* IDMAC has cleared the respective BUFx_RDY bit, we manage the buffer */
dev_dbg(dev, "IDMAC irq %d, buf %d\n", irq, ichan->active_buffer);
- spin_lock_irqsave(&ipu_data.lock, flags);
+ spin_lock(&ipu_data.lock);
ready0 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF0_RDY);
ready1 = idmac_read_ipureg(&ipu_data, IPU_CHA_BUF1_RDY);
@@ -1176,7 +1175,7 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (err & (1 << chan_id)) {
idmac_write_ipureg(&ipu_data, 1 << chan_id, IPU_INT_STAT_4);
- spin_unlock_irqrestore(&ipu_data.lock, flags);
+ spin_unlock(&ipu_data.lock);
/*
* Doing this
* ichan->sg[0] = ichan->sg[1] = NULL;
@@ -1188,7 +1187,7 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
chan_id, ready0, ready1, curbuf);
return IRQ_HANDLED;
}
- spin_unlock_irqrestore(&ipu_data.lock, flags);
+ spin_unlock(&ipu_data.lock);
/* Other interrupts do not interfere with this channel */
spin_lock(&ichan->lock);
@@ -1251,9 +1250,9 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
if (unlikely(sgnew)) {
ipu_submit_buffer(ichan, descnew, sgnew, !ichan->active_buffer);
} else {
- spin_lock_irqsave(&ipu_data.lock, flags);
+ spin_lock(&ipu_data.lock);
ipu_ic_disable_task(&ipu_data, chan_id);
- spin_unlock_irqrestore(&ipu_data.lock, flags);
+ spin_unlock(&ipu_data.lock);
ichan->status = IPU_CHANNEL_READY;
/* Continue to check for complete descriptor */
}
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index f609a84c493c..d0b2e601e3e5 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -223,24 +223,23 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
i = __ffs(stat);
stat &= ~BIT(i);
if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
- unsigned long flags;
p = &d->phy[i];
c = p->vchan;
if (c && (tc1 & BIT(i))) {
- spin_lock_irqsave(&c->vc.lock, flags);
+ spin_lock(&c->vc.lock);
if (p->ds_run != NULL) {
vchan_cookie_complete(&p->ds_run->vd);
p->ds_done = p->ds_run;
p->ds_run = NULL;
}
- spin_unlock_irqrestore(&c->vc.lock, flags);
+ spin_unlock(&c->vc.lock);
}
if (c && (tc2 & BIT(i))) {
- spin_lock_irqsave(&c->vc.lock, flags);
+ spin_lock(&c->vc.lock);
if (p->ds_run != NULL)
vchan_cyclic_callback(&p->ds_run->vd);
- spin_unlock_irqrestore(&c->vc.lock, flags);
+ spin_unlock(&c->vc.lock);
}
irq_chan |= BIT(i);
}
diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
index f133ae8dece1..6ad8afbb95f2 100644
--- a/drivers/dma/mediatek/mtk-hsdma.c
+++ b/drivers/dma/mediatek/mtk-hsdma.c
@@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
return 0;
err_free:
+ mtk_hsdma_hw_deinit(hsdma);
of_dma_controller_free(pdev->dev.of_node);
err_unregister:
dma_async_device_unregister(dd);
diff --git a/drivers/dma/milbeaut-xdmac.c b/drivers/dma/milbeaut-xdmac.c
index 85a597228fb0..d29d01e730aa 100644
--- a/drivers/dma/milbeaut-xdmac.c
+++ b/drivers/dma/milbeaut-xdmac.c
@@ -160,10 +160,9 @@ static irqreturn_t milbeaut_xdmac_interrupt(int irq, void *dev_id)
{
struct milbeaut_xdmac_chan *mc = dev_id;
struct milbeaut_xdmac_desc *md;
- unsigned long flags;
u32 val;
- spin_lock_irqsave(&mc->vc.lock, flags);
+ spin_lock(&mc->vc.lock);
/* Ack and Stop */
val = FIELD_PREP(M10V_XDDSD_IS_MASK, 0x0);
@@ -177,7 +176,7 @@ static irqreturn_t milbeaut_xdmac_interrupt(int irq, void *dev_id)
milbeaut_xdmac_start(mc);
out:
- spin_unlock_irqrestore(&mc->vc.lock, flags);
+ spin_unlock(&mc->vc.lock);
return IRQ_HANDLED;
}
@@ -351,7 +350,7 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
ret = dma_async_device_register(ddev);
if (ret)
- return ret;
+ goto disable_xdmac;
ret = of_dma_controller_register(dev->of_node,
of_dma_simple_xlate, mdev);
@@ -364,6 +363,8 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
unregister_dmac:
dma_async_device_unregister(ddev);
+disable_xdmac:
+ disable_xdmac(mdev);
return ret;
}
diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c
index 347146a6e1d0..74755093e14b 100644
--- a/drivers/dma/moxart-dma.c
+++ b/drivers/dma/moxart-dma.c
@@ -524,7 +524,6 @@ static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
struct moxart_dmadev *mc = devid;
struct moxart_chan *ch = &mc->slave_chans[0];
unsigned int i;
- unsigned long flags;
u32 ctrl;
dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__);
@@ -541,14 +540,14 @@ static irqreturn_t moxart_dma_interrupt(int irq, void *devid)
if (ctrl & APB_DMA_FIN_INT_STS) {
ctrl &= ~APB_DMA_FIN_INT_STS;
if (ch->desc) {
- spin_lock_irqsave(&ch->vc.lock, flags);
+ spin_lock(&ch->vc.lock);
if (++ch->sgidx < ch->desc->sglen) {
moxart_dma_start_sg(ch, ch->sgidx);
} else {
vchan_cookie_complete(&ch->desc->vd);
moxart_dma_start_desc(&ch->vc.chan);
}
- spin_unlock_irqrestore(&ch->vc.lock, flags);
+ spin_unlock(&ch->vc.lock);
}
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 00cd1335eeba..23b232b57518 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1455,7 +1455,7 @@ static struct platform_driver mv_xor_driver = {
.resume = mv_xor_resume,
.driver = {
.name = MV_XOR_NAME,
- .of_match_table = of_match_ptr(mv_xor_dt_ids),
+ .of_match_table = mv_xor_dt_ids,
},
};
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c
index 2753a6b916f6..9b0d463f89bb 100644
--- a/drivers/dma/mv_xor_v2.c
+++ b/drivers/dma/mv_xor_v2.c
@@ -771,8 +771,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev)
goto disable_clk;
msi_desc = first_msi_entry(&pdev->dev);
- if (!msi_desc)
+ if (!msi_desc) {
+ ret = -ENODEV;
goto free_msi_irqs;
+ }
xor_dev->msi_desc = msi_desc;
ret = devm_request_irq(&pdev->dev, msi_desc->irq,
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 65f816b40c32..994fc4d2aca4 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -167,29 +167,11 @@ static struct mxs_dma_type mxs_dma_types[] = {
}
};
-static const struct platform_device_id mxs_dma_ids[] = {
- {
- .name = "imx23-dma-apbh",
- .driver_data = (kernel_ulong_t) &mxs_dma_types[0],
- }, {
- .name = "imx23-dma-apbx",
- .driver_data = (kernel_ulong_t) &mxs_dma_types[1],
- }, {
- .name = "imx28-dma-apbh",
- .driver_data = (kernel_ulong_t) &mxs_dma_types[2],
- }, {
- .name = "imx28-dma-apbx",
- .driver_data = (kernel_ulong_t) &mxs_dma_types[3],
- }, {
- /* end of list */
- }
-};
-
static const struct of_device_id mxs_dma_dt_ids[] = {
- { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
- { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
- { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
- { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
+ { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_types[0], },
+ { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_types[1], },
+ { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_types[2], },
+ { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_types[3], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
@@ -762,8 +744,6 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
static int __init mxs_dma_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- const struct platform_device_id *id_entry;
- const struct of_device_id *of_id;
const struct mxs_dma_type *dma_type;
struct mxs_dma_engine *mxs_dma;
struct resource *iores;
@@ -779,13 +759,7 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
return ret;
}
- of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
- if (of_id)
- id_entry = of_id->data;
- else
- id_entry = platform_get_device_id(pdev);
-
- dma_type = (struct mxs_dma_type *)id_entry->driver_data;
+ dma_type = (struct mxs_dma_type *)of_device_get_match_data(&pdev->dev);
mxs_dma->type = dma_type->type;
mxs_dma->dev_id = dma_type->id;
@@ -865,7 +839,6 @@ static struct platform_driver mxs_dma_driver = {
.name = "mxs-dma",
.of_match_table = mxs_dma_dt_ids,
},
- .id_table = mxs_dma_ids,
};
static int __init mxs_dma_module_init(void)
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 8a4f608904b9..ec00b20ae8e4 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -75,8 +75,18 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
ofdma->dma_router->route_free(ofdma->dma_router->dev,
route_data);
} else {
+ int ret = 0;
+
chan->router = ofdma->dma_router;
chan->route_data = route_data;
+
+ if (chan->device->device_router_config)
+ ret = chan->device->device_router_config(chan);
+
+ if (ret) {
+ dma_release_channel(chan);
+ chan = ERR_PTR(ret);
+ }
}
/*
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 0f5c19370f6d..bc0f66af0f11 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1527,8 +1527,6 @@ static int pl330_submit_req(struct pl330_thread *thrd,
/* First dry run to check if req is acceptable */
ret = _setup_req(pl330, 1, thrd, idx, &xs);
- if (ret < 0)
- goto xfer_exit;
if (ret > pl330->mcbufsz / 2) {
dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n",
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 71cdaaa8134c..df7704053d91 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -69,7 +69,7 @@ struct ppc_dma_chan_ref {
};
/* The list of channels exported by ppc440spe ADMA */
-struct list_head
+static struct list_head
ppc440spe_adma_chan_list = LIST_HEAD_INIT(ppc440spe_adma_chan_list);
/* This flag is set when want to refetch the xor chain in the interrupt
@@ -559,7 +559,6 @@ static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
int sg_index, unsigned char mult_value)
{
struct dma_cdb *dma_hw_desc;
- struct xor_cb *xor_hw_desc;
u32 *psgu;
switch (chan->device->id) {
@@ -590,7 +589,6 @@ static void ppc440spe_desc_set_src_mult(struct ppc440spe_adma_desc_slot *desc,
*psgu |= cpu_to_le32(mult_value << mult_index);
break;
case PPC440SPE_XOR_ID:
- xor_hw_desc = desc->hw_desc;
break;
default:
BUG();
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 349fb312c872..4a2a796e348c 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -606,7 +606,6 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
struct pxad_chan *chan = phy->vchan;
struct virt_dma_desc *vd, *tmp;
unsigned int dcsr;
- unsigned long flags;
bool vd_completed;
dma_cookie_t last_started = 0;
@@ -616,7 +615,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
if (dcsr & PXA_DCSR_RUN)
return IRQ_NONE;
- spin_lock_irqsave(&chan->vc.lock, flags);
+ spin_lock(&chan->vc.lock);
list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
vd_completed = is_desc_completed(vd);
dev_dbg(&chan->vc.chan.dev->device,
@@ -658,7 +657,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
pxad_launch_chan(chan, to_pxad_sw_desc(vd));
}
}
- spin_unlock_irqrestore(&chan->vc.lock, flags);
+ spin_unlock(&chan->vc.lock);
wake_up(&chan->wq_state);
return IRQ_HANDLED;
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
index 3bcb689162c6..365f94eb3b08 100644
--- a/drivers/dma/qcom/Kconfig
+++ b/drivers/dma/qcom/Kconfig
@@ -1,4 +1,15 @@
# SPDX-License-Identifier: GPL-2.0-only
+config QCOM_ADM
+ tristate "Qualcomm ADM support"
+ depends on (ARCH_QCOM || COMPILE_TEST) && !PHYS_ADDR_T_64BIT
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the Qualcomm Application Data Mover (ADM) DMA
+ controller, as present on MSM8x60, APQ8064, and IPQ8064 devices.
+ This controller provides DMA capabilities for both general purpose
+ and on-chip peripheral devices.
+
config QCOM_BAM_DMA
tristate "QCOM BAM DMA support"
depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
@@ -8,6 +19,18 @@ config QCOM_BAM_DMA
Enable support for the QCOM BAM DMA controller. This controller
provides DMA capabilities for a variety of on-chip devices.
+config QCOM_GPI_DMA
+ tristate "Qualcomm Technologies GPI DMA support"
+ depends on ARCH_QCOM
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Enable support for the QCOM GPI DMA controller. This controller
+ provides DMA capabilities for a variety of peripheral buses such
+ as I2C, UART, and SPI. By using GPI dmaengine driver, bus drivers
+ can use a standardize interface that is protocol independent to
+ transfer data between DDR and peripheral.
+
config QCOM_HIDMA_MGMT
tristate "Qualcomm Technologies HIDMA Management support"
select DMA_ENGINE
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile
index 1ae92da88b0c..50f1e7014693 100644
--- a/drivers/dma/qcom/Makefile
+++ b/drivers/dma/qcom/Makefile
@@ -1,5 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_QCOM_ADM) += qcom_adm.o
obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o
+obj-$(CONFIG_QCOM_GPI_DMA) += gpi.o
obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o
hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o
obj-$(CONFIG_QCOM_HIDMA) += hdma.o
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 4eeb8bb27279..88579857ca1d 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -630,7 +630,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
GFP_NOWAIT);
if (!async_desc)
- goto err_out;
+ return NULL;
if (flags & DMA_PREP_FENCE)
async_desc->flags |= DESC_FLAG_NWD;
@@ -670,10 +670,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
}
return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
-
-err_out:
- kfree(async_desc);
- return NULL;
}
/**
@@ -875,7 +871,7 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
ret = bam_pm_runtime_get_sync(bdev->dev);
if (ret < 0)
- return ret;
+ return IRQ_NONE;
if (srcs & BAM_IRQ) {
clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
new file mode 100644
index 000000000000..1a0bf6b0567a
--- /dev/null
+++ b/drivers/dma/qcom/gpi.c
@@ -0,0 +1,2303 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2020, Linaro Limited
+ */
+
+#include <dt-bindings/dma/qcom-gpi.h>
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/dma/qcom-gpi-dma.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+#define TRE_TYPE_DMA 0x10
+#define TRE_TYPE_GO 0x20
+#define TRE_TYPE_CONFIG0 0x22
+
+/* TRE flags */
+#define TRE_FLAGS_CHAIN BIT(0)
+#define TRE_FLAGS_IEOB BIT(8)
+#define TRE_FLAGS_IEOT BIT(9)
+#define TRE_FLAGS_BEI BIT(10)
+#define TRE_FLAGS_LINK BIT(11)
+#define TRE_FLAGS_TYPE GENMASK(23, 16)
+
+/* SPI CONFIG0 WD0 */
+#define TRE_SPI_C0_WORD_SZ GENMASK(4, 0)
+#define TRE_SPI_C0_LOOPBACK BIT(8)
+#define TRE_SPI_C0_CS BIT(11)
+#define TRE_SPI_C0_CPHA BIT(12)
+#define TRE_SPI_C0_CPOL BIT(13)
+#define TRE_SPI_C0_TX_PACK BIT(24)
+#define TRE_SPI_C0_RX_PACK BIT(25)
+
+/* CONFIG0 WD2 */
+#define TRE_C0_CLK_DIV GENMASK(11, 0)
+#define TRE_C0_CLK_SRC GENMASK(19, 16)
+
+/* SPI GO WD0 */
+#define TRE_SPI_GO_CMD GENMASK(4, 0)
+#define TRE_SPI_GO_CS GENMASK(10, 8)
+#define TRE_SPI_GO_FRAG BIT(26)
+
+/* GO WD2 */
+#define TRE_RX_LEN GENMASK(23, 0)
+
+/* I2C Config0 WD0 */
+#define TRE_I2C_C0_TLOW GENMASK(7, 0)
+#define TRE_I2C_C0_THIGH GENMASK(15, 8)
+#define TRE_I2C_C0_TCYL GENMASK(23, 16)
+#define TRE_I2C_C0_TX_PACK BIT(24)
+#define TRE_I2C_C0_RX_PACK BIT(25)
+
+/* I2C GO WD0 */
+#define TRE_I2C_GO_CMD GENMASK(4, 0)
+#define TRE_I2C_GO_ADDR GENMASK(14, 8)
+#define TRE_I2C_GO_STRETCH BIT(26)
+
+/* DMA TRE */
+#define TRE_DMA_LEN GENMASK(23, 0)
+
+/* Register offsets from gpi-top */
+#define GPII_n_CH_k_CNTXT_0_OFFS(n, k) (0x20000 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPII_n_CH_k_CNTXT_0_EL_SIZE GENMASK(31, 24)
+#define GPII_n_CH_k_CNTXT_0_CHSTATE GENMASK(23, 20)
+#define GPII_n_CH_k_CNTXT_0_ERIDX GENMASK(18, 14)
+#define GPII_n_CH_k_CNTXT_0_DIR BIT(3)
+#define GPII_n_CH_k_CNTXT_0_PROTO GENMASK(2, 0)
+
+#define GPII_n_CH_k_CNTXT_0(el_size, erindex, dir, chtype_proto) \
+ (FIELD_PREP(GPII_n_CH_k_CNTXT_0_EL_SIZE, el_size) | \
+ FIELD_PREP(GPII_n_CH_k_CNTXT_0_ERIDX, erindex) | \
+ FIELD_PREP(GPII_n_CH_k_CNTXT_0_DIR, dir) | \
+ FIELD_PREP(GPII_n_CH_k_CNTXT_0_PROTO, chtype_proto))
+
+#define GPI_CHTYPE_DIR_IN (0)
+#define GPI_CHTYPE_DIR_OUT (1)
+
+#define GPI_CHTYPE_PROTO_GPI (0x2)
+
+#define GPII_n_CH_k_DOORBELL_0_OFFS(n, k) (0x22000 + (0x4000 * (n)) + (0x8 * (k)))
+#define GPII_n_CH_CMD_OFFS(n) (0x23008 + (0x4000 * (n)))
+#define GPII_n_CH_CMD_OPCODE GENMASK(31, 24)
+#define GPII_n_CH_CMD_CHID GENMASK(7, 0)
+#define GPII_n_CH_CMD(opcode, chid) \
+ (FIELD_PREP(GPII_n_CH_CMD_OPCODE, opcode) | \
+ FIELD_PREP(GPII_n_CH_CMD_CHID, chid))
+
+#define GPII_n_CH_CMD_ALLOCATE (0)
+#define GPII_n_CH_CMD_START (1)
+#define GPII_n_CH_CMD_STOP (2)
+#define GPII_n_CH_CMD_RESET (9)
+#define GPII_n_CH_CMD_DE_ALLOC (10)
+#define GPII_n_CH_CMD_UART_SW_STALE (32)
+#define GPII_n_CH_CMD_UART_RFR_READY (33)
+#define GPII_n_CH_CMD_UART_RFR_NOT_READY (34)
+
+/* EV Context Array */
+#define GPII_n_EV_CH_k_CNTXT_0_OFFS(n, k) (0x21000 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPII_n_EV_k_CNTXT_0_EL_SIZE GENMASK(31, 24)
+#define GPII_n_EV_k_CNTXT_0_CHSTATE GENMASK(23, 20)
+#define GPII_n_EV_k_CNTXT_0_INTYPE BIT(16)
+#define GPII_n_EV_k_CNTXT_0_CHTYPE GENMASK(3, 0)
+
+#define GPII_n_EV_k_CNTXT_0(el_size, inttype, chtype) \
+ (FIELD_PREP(GPII_n_EV_k_CNTXT_0_EL_SIZE, el_size) | \
+ FIELD_PREP(GPII_n_EV_k_CNTXT_0_INTYPE, inttype) | \
+ FIELD_PREP(GPII_n_EV_k_CNTXT_0_CHTYPE, chtype))
+
+#define GPI_INTTYPE_IRQ (1)
+#define GPI_CHTYPE_GPI_EV (0x2)
+
+enum CNTXT_OFFS {
+ CNTXT_0_CONFIG = 0x0,
+ CNTXT_1_R_LENGTH = 0x4,
+ CNTXT_2_RING_BASE_LSB = 0x8,
+ CNTXT_3_RING_BASE_MSB = 0xC,
+ CNTXT_4_RING_RP_LSB = 0x10,
+ CNTXT_5_RING_RP_MSB = 0x14,
+ CNTXT_6_RING_WP_LSB = 0x18,
+ CNTXT_7_RING_WP_MSB = 0x1C,
+ CNTXT_8_RING_INT_MOD = 0x20,
+ CNTXT_9_RING_INTVEC = 0x24,
+ CNTXT_10_RING_MSI_LSB = 0x28,
+ CNTXT_11_RING_MSI_MSB = 0x2C,
+ CNTXT_12_RING_RP_UPDATE_LSB = 0x30,
+ CNTXT_13_RING_RP_UPDATE_MSB = 0x34,
+};
+
+#define GPII_n_EV_CH_k_DOORBELL_0_OFFS(n, k) (0x22100 + (0x4000 * (n)) + (0x8 * (k)))
+#define GPII_n_EV_CH_CMD_OFFS(n) (0x23010 + (0x4000 * (n)))
+#define GPII_n_EV_CMD_OPCODE GENMASK(31, 24)
+#define GPII_n_EV_CMD_CHID GENMASK(7, 0)
+#define GPII_n_EV_CMD(opcode, chid) \
+ (FIELD_PREP(GPII_n_EV_CMD_OPCODE, opcode) | \
+ FIELD_PREP(GPII_n_EV_CMD_CHID, chid))
+
+#define GPII_n_EV_CH_CMD_ALLOCATE (0x00)
+#define GPII_n_EV_CH_CMD_RESET (0x09)
+#define GPII_n_EV_CH_CMD_DE_ALLOC (0x0A)
+
+#define GPII_n_CNTXT_TYPE_IRQ_OFFS(n) (0x23080 + (0x4000 * (n)))
+
+/* mask type register */
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) (0x23088 + (0x4000 * (n)))
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK GENMASK(6, 0)
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL BIT(6)
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB BIT(3)
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB BIT(2)
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL BIT(1)
+#define GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL BIT(0)
+
+#define GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(n) (0x23090 + (0x4000 * (n)))
+#define GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) (0x23094 + (0x4000 * (n)))
+
+/* Mask channel control interrupt register */
+#define GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(n) (0x23098 + (0x4000 * (n)))
+#define GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK GENMASK(1, 0)
+
+/* Mask event control interrupt register */
+#define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) (0x2309C + (0x4000 * (n)))
+#define GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK BIT(0)
+
+#define GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(n) (0x230A0 + (0x4000 * (n)))
+#define GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) (0x230A4 + (0x4000 * (n)))
+
+/* Mask event interrupt register */
+#define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) (0x230B8 + (0x4000 * (n)))
+#define GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK BIT(0)
+
+#define GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) (0x230C0 + (0x4000 * (n)))
+#define GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) (0x23100 + (0x4000 * (n)))
+#define GPI_GLOB_IRQ_ERROR_INT_MSK BIT(0)
+
+/* GPII specific Global - Enable bit register */
+#define GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(n) (0x23108 + (0x4000 * (n)))
+#define GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) (0x23110 + (0x4000 * (n)))
+#define GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(n) (0x23118 + (0x4000 * (n)))
+
+/* GPII general interrupt - Enable bit register */
+#define GPII_n_CNTXT_GPII_IRQ_EN_OFFS(n) (0x23120 + (0x4000 * (n)))
+#define GPII_n_CNTXT_GPII_IRQ_EN_BMSK GENMASK(3, 0)
+
+#define GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(n) (0x23128 + (0x4000 * (n)))
+
+/* GPII Interrupt Type register */
+#define GPII_n_CNTXT_INTSET_OFFS(n) (0x23180 + (0x4000 * (n)))
+#define GPII_n_CNTXT_INTSET_BMSK BIT(0)
+
+#define GPII_n_CNTXT_MSI_BASE_LSB_OFFS(n) (0x23188 + (0x4000 * (n)))
+#define GPII_n_CNTXT_MSI_BASE_MSB_OFFS(n) (0x2318C + (0x4000 * (n)))
+#define GPII_n_CNTXT_SCRATCH_0_OFFS(n) (0x23400 + (0x4000 * (n)))
+#define GPII_n_CNTXT_SCRATCH_1_OFFS(n) (0x23404 + (0x4000 * (n)))
+
+#define GPII_n_ERROR_LOG_OFFS(n) (0x23200 + (0x4000 * (n)))
+
+/* QOS Registers */
+#define GPII_n_CH_k_QOS_OFFS(n, k) (0x2005C + (0x4000 * (n)) + (0x80 * (k)))
+
+/* Scratch registers */
+#define GPII_n_CH_k_SCRATCH_0_OFFS(n, k) (0x20060 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPII_n_CH_k_SCRATCH_0_SEID GENMASK(2, 0)
+#define GPII_n_CH_k_SCRATCH_0_PROTO GENMASK(7, 4)
+#define GPII_n_CH_k_SCRATCH_0_PAIR GENMASK(20, 16)
+#define GPII_n_CH_k_SCRATCH_0(pair, proto, seid) \
+ (FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PAIR, pair) | \
+ FIELD_PREP(GPII_n_CH_k_SCRATCH_0_PROTO, proto) | \
+ FIELD_PREP(GPII_n_CH_k_SCRATCH_0_SEID, seid))
+#define GPII_n_CH_k_SCRATCH_1_OFFS(n, k) (0x20064 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPII_n_CH_k_SCRATCH_2_OFFS(n, k) (0x20068 + (0x4000 * (n)) + (0x80 * (k)))
+#define GPII_n_CH_k_SCRATCH_3_OFFS(n, k) (0x2006C + (0x4000 * (n)) + (0x80 * (k)))
+
+struct __packed gpi_tre {
+ u32 dword[4];
+};
+
+enum msm_gpi_tce_code {
+ MSM_GPI_TCE_SUCCESS = 1,
+ MSM_GPI_TCE_EOT = 2,
+ MSM_GPI_TCE_EOB = 4,
+ MSM_GPI_TCE_UNEXP_ERR = 16,
+};
+
+#define CMD_TIMEOUT_MS (250)
+
+#define MAX_CHANNELS_PER_GPII (2)
+#define GPI_TX_CHAN (0)
+#define GPI_RX_CHAN (1)
+#define STATE_IGNORE (U32_MAX)
+#define EV_FACTOR (2)
+#define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */
+#define CHAN_TRES 64
+
+struct __packed xfer_compl_event {
+ u64 ptr;
+ u32 length:24;
+ u8 code;
+ u16 status;
+ u8 type;
+ u8 chid;
+};
+
+struct __packed immediate_data_event {
+ u8 data_bytes[8];
+ u8 length:4;
+ u8 resvd:4;
+ u16 tre_index;
+ u8 code;
+ u16 status;
+ u8 type;
+ u8 chid;
+};
+
+struct __packed qup_notif_event {
+ u32 status;
+ u32 time;
+ u32 count:24;
+ u8 resvd;
+ u16 resvd1;
+ u8 type;
+ u8 chid;
+};
+
+struct __packed gpi_ere {
+ u32 dword[4];
+};
+
+enum GPI_EV_TYPE {
+ XFER_COMPLETE_EV_TYPE = 0x22,
+ IMMEDIATE_DATA_EV_TYPE = 0x30,
+ QUP_NOTIF_EV_TYPE = 0x31,
+ STALE_EV_TYPE = 0xFF,
+};
+
+union __packed gpi_event {
+ struct __packed xfer_compl_event xfer_compl_event;
+ struct __packed immediate_data_event immediate_data_event;
+ struct __packed qup_notif_event qup_notif_event;
+ struct __packed gpi_ere gpi_ere;
+};
+
+enum gpii_irq_settings {
+ DEFAULT_IRQ_SETTINGS,
+ MASK_IEOB_SETTINGS,
+};
+
+enum gpi_ev_state {
+ DEFAULT_EV_CH_STATE = 0,
+ EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
+ EV_STATE_ALLOCATED,
+ MAX_EV_STATES
+};
+
+static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
+ [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
+ [EV_STATE_ALLOCATED] = "ALLOCATED",
+};
+
+#define TO_GPI_EV_STATE_STR(_state) (((_state) >= MAX_EV_STATES) ? \
+ "INVALID" : gpi_ev_state_str[(_state)])
+
+enum gpi_ch_state {
+ DEFAULT_CH_STATE = 0x0,
+ CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
+ CH_STATE_ALLOCATED = 0x1,
+ CH_STATE_STARTED = 0x2,
+ CH_STATE_STOPPED = 0x3,
+ CH_STATE_STOP_IN_PROC = 0x4,
+ CH_STATE_ERROR = 0xf,
+ MAX_CH_STATES
+};
+
+enum gpi_cmd {
+ GPI_CH_CMD_BEGIN,
+ GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
+ GPI_CH_CMD_START,
+ GPI_CH_CMD_STOP,
+ GPI_CH_CMD_RESET,
+ GPI_CH_CMD_DE_ALLOC,
+ GPI_CH_CMD_UART_SW_STALE,
+ GPI_CH_CMD_UART_RFR_READY,
+ GPI_CH_CMD_UART_RFR_NOT_READY,
+ GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY,
+ GPI_EV_CMD_BEGIN,
+ GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
+ GPI_EV_CMD_RESET,
+ GPI_EV_CMD_DEALLOC,
+ GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
+ GPI_MAX_CMD,
+};
+
+#define IS_CHAN_CMD(_cmd) ((_cmd) <= GPI_CH_CMD_END)
+
+static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
+ [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
+ [GPI_CH_CMD_START] = "CH START",
+ [GPI_CH_CMD_STOP] = "CH STOP",
+ [GPI_CH_CMD_RESET] = "CH_RESET",
+ [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
+ [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
+ [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
+ [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
+ [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
+ [GPI_EV_CMD_RESET] = "EV RESET",
+ [GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
+};
+
+#define TO_GPI_CMD_STR(_cmd) (((_cmd) >= GPI_MAX_CMD) ? "INVALID" : \
+ gpi_cmd_str[(_cmd)])
+
+/*
+ * @DISABLE_STATE: no register access allowed
+ * @CONFIG_STATE: client has configured the channel
+ * @PREP_HARDWARE: register access is allowed
+ * however, no processing EVENTS
+ * @ACTIVE_STATE: channels are fully operational
+ * @PREPARE_TERMINATE: graceful termination of channels
+ * register access is allowed
+ * @PAUSE_STATE: channels are active, but not processing any events
+ */
+enum gpi_pm_state {
+ DISABLE_STATE,
+ CONFIG_STATE,
+ PREPARE_HARDWARE,
+ ACTIVE_STATE,
+ PREPARE_TERMINATE,
+ PAUSE_STATE,
+ MAX_PM_STATE
+};
+
+#define REG_ACCESS_VALID(_pm_state) ((_pm_state) >= PREPARE_HARDWARE)
+
+static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
+ [DISABLE_STATE] = "DISABLE",
+ [CONFIG_STATE] = "CONFIG",
+ [PREPARE_HARDWARE] = "PREPARE HARDWARE",
+ [ACTIVE_STATE] = "ACTIVE",
+ [PREPARE_TERMINATE] = "PREPARE TERMINATE",
+ [PAUSE_STATE] = "PAUSE",
+};
+
+#define TO_GPI_PM_STR(_state) (((_state) >= MAX_PM_STATE) ? \
+ "INVALID" : gpi_pm_state_str[(_state)])
+
+static const struct {
+ enum gpi_cmd gpi_cmd;
+ u32 opcode;
+ u32 state;
+} gpi_cmd_info[GPI_MAX_CMD] = {
+ {
+ GPI_CH_CMD_ALLOCATE,
+ GPII_n_CH_CMD_ALLOCATE,
+ CH_STATE_ALLOCATED,
+ },
+ {
+ GPI_CH_CMD_START,
+ GPII_n_CH_CMD_START,
+ CH_STATE_STARTED,
+ },
+ {
+ GPI_CH_CMD_STOP,
+ GPII_n_CH_CMD_STOP,
+ CH_STATE_STOPPED,
+ },
+ {
+ GPI_CH_CMD_RESET,
+ GPII_n_CH_CMD_RESET,
+ CH_STATE_ALLOCATED,
+ },
+ {
+ GPI_CH_CMD_DE_ALLOC,
+ GPII_n_CH_CMD_DE_ALLOC,
+ CH_STATE_NOT_ALLOCATED,
+ },
+ {
+ GPI_CH_CMD_UART_SW_STALE,
+ GPII_n_CH_CMD_UART_SW_STALE,
+ STATE_IGNORE,
+ },
+ {
+ GPI_CH_CMD_UART_RFR_READY,
+ GPII_n_CH_CMD_UART_RFR_READY,
+ STATE_IGNORE,
+ },
+ {
+ GPI_CH_CMD_UART_RFR_NOT_READY,
+ GPII_n_CH_CMD_UART_RFR_NOT_READY,
+ STATE_IGNORE,
+ },
+ {
+ GPI_EV_CMD_ALLOCATE,
+ GPII_n_EV_CH_CMD_ALLOCATE,
+ EV_STATE_ALLOCATED,
+ },
+ {
+ GPI_EV_CMD_RESET,
+ GPII_n_EV_CH_CMD_RESET,
+ EV_STATE_ALLOCATED,
+ },
+ {
+ GPI_EV_CMD_DEALLOC,
+ GPII_n_EV_CH_CMD_DE_ALLOC,
+ EV_STATE_NOT_ALLOCATED,
+ },
+};
+
+struct gpi_ring {
+ void *pre_aligned;
+ size_t alloc_size;
+ phys_addr_t phys_addr;
+ dma_addr_t dma_handle;
+ void *base;
+ void *wp;
+ void *rp;
+ u32 len;
+ u32 el_size;
+ u32 elements;
+ bool configured;
+};
+
+struct gpi_dev {
+ struct dma_device dma_device;
+ struct device *dev;
+ struct resource *res;
+ void __iomem *regs;
+ void __iomem *ee_base; /*ee register base address*/
+ u32 max_gpii; /* maximum # of gpii instances available per gpi block */
+ u32 gpii_mask; /* gpii instances available for apps */
+ u32 ev_factor; /* ev ring length factor */
+ struct gpii *gpiis;
+};
+
+struct reg_info {
+ char *name;
+ u32 offset;
+ u32 val;
+};
+
+struct gchan {
+ struct virt_dma_chan vc;
+ u32 chid;
+ u32 seid;
+ u32 protocol;
+ struct gpii *gpii;
+ enum gpi_ch_state ch_state;
+ enum gpi_pm_state pm_state;
+ void __iomem *ch_cntxt_base_reg;
+ void __iomem *ch_cntxt_db_reg;
+ void __iomem *ch_cmd_reg;
+ u32 dir;
+ struct gpi_ring ch_ring;
+ void *config;
+};
+
+struct gpii {
+ u32 gpii_id;
+ struct gchan gchan[MAX_CHANNELS_PER_GPII];
+ struct gpi_dev *gpi_dev;
+ int irq;
+ void __iomem *regs; /* points to gpi top */
+ void __iomem *ev_cntxt_base_reg;
+ void __iomem *ev_cntxt_db_reg;
+ void __iomem *ev_ring_rp_lsb_reg;
+ void __iomem *ev_cmd_reg;
+ void __iomem *ieob_clr_reg;
+ struct mutex ctrl_lock;
+ enum gpi_ev_state ev_state;
+ bool configured_irq;
+ enum gpi_pm_state pm_state;
+ rwlock_t pm_lock;
+ struct gpi_ring ev_ring;
+ struct tasklet_struct ev_task; /* event processing tasklet */
+ struct completion cmd_completion;
+ enum gpi_cmd gpi_cmd;
+ u32 cntxt_type_irq_msk;
+ bool ieob_set;
+};
+
+#define MAX_TRE 3
+
+struct gpi_desc {
+ struct virt_dma_desc vd;
+ size_t len;
+ void *db; /* DB register to program */
+ struct gchan *gchan;
+ struct gpi_tre tre[MAX_TRE];
+ u32 num_tre;
+};
+
+static const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
+ GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
+};
+
+static irqreturn_t gpi_handle_irq(int irq, void *data);
+static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
+static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
+static void gpi_process_events(struct gpii *gpii);
+
+static inline struct gchan *to_gchan(struct dma_chan *dma_chan)
+{
+ return container_of(dma_chan, struct gchan, vc.chan);
+}
+
+static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
+{
+ return container_of(vd, struct gpi_desc, vd);
+}
+
+static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
+ void *addr)
+{
+ return ring->phys_addr + (addr - ring->base);
+}
+
+static inline void *to_virtual(const struct gpi_ring *const ring, phys_addr_t addr)
+{
+ return ring->base + (addr - ring->phys_addr);
+}
+
+static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
+{
+ return readl_relaxed(addr);
+}
+
+static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
+{
+ writel_relaxed(val, addr);
+}
+
+/* gpi_write_reg_field - write to specific bit field */
+static inline void gpi_write_reg_field(struct gpii *gpii, void __iomem *addr,
+ u32 mask, u32 shift, u32 val)
+{
+ u32 tmp = gpi_read_reg(gpii, addr);
+
+ tmp &= ~mask;
+ val = tmp | ((val << shift) & mask);
+ gpi_write_reg(gpii, addr, val);
+}
+
+static inline void
+gpi_update_reg(struct gpii *gpii, u32 offset, u32 mask, u32 val)
+{
+ void __iomem *addr = gpii->regs + offset;
+ u32 tmp = gpi_read_reg(gpii, addr);
+
+ tmp &= ~mask;
+ tmp |= u32_encode_bits(val, mask);
+
+ gpi_write_reg(gpii, addr, tmp);
+}
+
+static void gpi_disable_interrupts(struct gpii *gpii)
+{
+ gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_GPII_IRQ_EN_BMSK, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_GPII_IRQ_EN_BMSK, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_INTSET_BMSK, 0);
+
+ gpii->cntxt_type_irq_msk = 0;
+ devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
+ gpii->configured_irq = false;
+}
+
+/* configure and enable interrupts */
+static int gpi_config_interrupts(struct gpii *gpii, enum gpii_irq_settings settings, bool mask)
+{
+ const u32 enable = (GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
+ GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
+ GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
+ GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
+ GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
+ int ret;
+
+ if (!gpii->configured_irq) {
+ ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
+ gpi_handle_irq, IRQF_TRIGGER_HIGH,
+ "gpi-dma", gpii);
+ if (ret < 0) {
+ dev_err(gpii->gpi_dev->dev, "error request irq:%d ret:%d\n",
+ gpii->irq, ret);
+ return ret;
+ }
+ }
+
+ if (settings == MASK_IEOB_SETTINGS) {
+ /*
+ * GPII only uses one EV ring per gpii so we can globally
+ * enable/disable IEOB interrupt
+ */
+ if (mask)
+ gpii->cntxt_type_irq_msk |= GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
+ else
+ gpii->cntxt_type_irq_msk &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
+ gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, gpii->cntxt_type_irq_msk);
+ } else {
+ gpi_update_reg(gpii, GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK, enable);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
+ GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
+ GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
+ GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK);
+ gpi_update_reg(gpii, GPII_n_CNTXT_GLOB_IRQ_EN_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
+ GPII_n_CNTXT_GPII_IRQ_EN_BMSK);
+ gpi_update_reg(gpii, GPII_n_CNTXT_GPII_IRQ_EN_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_GPII_IRQ_EN_BMSK, GPII_n_CNTXT_GPII_IRQ_EN_BMSK);
+ gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_LSB_OFFS(gpii->gpii_id), U32_MAX, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_MSI_BASE_MSB_OFFS(gpii->gpii_id), U32_MAX, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_0_OFFS(gpii->gpii_id), U32_MAX, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_SCRATCH_1_OFFS(gpii->gpii_id), U32_MAX, 0);
+ gpi_update_reg(gpii, GPII_n_CNTXT_INTSET_OFFS(gpii->gpii_id),
+ GPII_n_CNTXT_INTSET_BMSK, 1);
+ gpi_update_reg(gpii, GPII_n_ERROR_LOG_OFFS(gpii->gpii_id), U32_MAX, 0);
+
+ gpii->cntxt_type_irq_msk = enable;
+ }
+
+ gpii->configured_irq = true;
+ return 0;
+}
+
+/* Sends gpii event or channel command */
+static int gpi_send_cmd(struct gpii *gpii, struct gchan *gchan,
+ enum gpi_cmd gpi_cmd)
+{
+ u32 chid = MAX_CHANNELS_PER_GPII;
+ unsigned long timeout;
+ void __iomem *cmd_reg;
+ u32 cmd;
+
+ if (gpi_cmd >= GPI_MAX_CMD)
+ return -EINVAL;
+ if (IS_CHAN_CMD(gpi_cmd))
+ chid = gchan->chid;
+
+ dev_dbg(gpii->gpi_dev->dev,
+ "sending cmd: %s:%u\n", TO_GPI_CMD_STR(gpi_cmd), chid);
+
+ /* send opcode and wait for completion */
+ reinit_completion(&gpii->cmd_completion);
+ gpii->gpi_cmd = gpi_cmd;
+
+ cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gchan->ch_cmd_reg : gpii->ev_cmd_reg;
+ cmd = IS_CHAN_CMD(gpi_cmd) ? GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
+ GPII_n_EV_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
+ gpi_write_reg(gpii, cmd_reg, cmd);
+ timeout = wait_for_completion_timeout(&gpii->cmd_completion,
+ msecs_to_jiffies(CMD_TIMEOUT_MS));
+ if (!timeout) {
+ dev_err(gpii->gpi_dev->dev, "cmd: %s completion timeout:%u\n",
+ TO_GPI_CMD_STR(gpi_cmd), chid);
+ return -EIO;
+ }
+
+ /* confirm new ch state is correct , if the cmd is a state change cmd */
+ if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
+ return 0;
+
+ if (IS_CHAN_CMD(gpi_cmd) && gchan->ch_state == gpi_cmd_info[gpi_cmd].state)
+ return 0;
+
+ if (!IS_CHAN_CMD(gpi_cmd) && gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
+ return 0;
+
+ return -EIO;
+}
+
+/* program transfer ring DB register */
+static inline void gpi_write_ch_db(struct gchan *gchan,
+ struct gpi_ring *ring, void *wp)
+{
+ struct gpii *gpii = gchan->gpii;
+ phys_addr_t p_wp;
+
+ p_wp = to_physical(ring, wp);
+ gpi_write_reg(gpii, gchan->ch_cntxt_db_reg, p_wp);
+}
+
+/* program event ring DB register */
+static inline void gpi_write_ev_db(struct gpii *gpii,
+ struct gpi_ring *ring, void *wp)
+{
+ phys_addr_t p_wp;
+
+ p_wp = ring->phys_addr + (wp - ring->base);
+ gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, p_wp);
+}
+
+/* process transfer completion interrupt */
+static void gpi_process_ieob(struct gpii *gpii)
+{
+ gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
+
+ gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
+ tasklet_hi_schedule(&gpii->ev_task);
+}
+
+/* process channel control interrupt */
+static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
+{
+ u32 gpii_id = gpii->gpii_id;
+ u32 offset = GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
+ u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
+ struct gchan *gchan;
+ u32 chid, state;
+
+ /* clear the status */
+ offset = GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
+ gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
+
+ for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
+ if (!(BIT(chid) & ch_irq))
+ continue;
+
+ gchan = &gpii->gchan[chid];
+ state = gpi_read_reg(gpii, gchan->ch_cntxt_base_reg +
+ CNTXT_0_CONFIG);
+ state = FIELD_GET(GPII_n_CH_k_CNTXT_0_CHSTATE, state);
+
+ /*
+ * CH_CMD_DEALLOC cmd always successful. However cmd does
+ * not change hardware status. So overwriting software state
+ * to default state.
+ */
+ if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
+ state = DEFAULT_CH_STATE;
+ gchan->ch_state = state;
+
+ /*
+ * Triggering complete all if ch_state is not a stop in process.
+ * Stop in process is a transition state and we will wait for
+ * stop interrupt before notifying.
+ */
+ if (gchan->ch_state != CH_STATE_STOP_IN_PROC)
+ complete_all(&gpii->cmd_completion);
+ }
+}
+
+/* processing gpi general error interrupts */
+static void gpi_process_gen_err_irq(struct gpii *gpii)
+{
+ u32 gpii_id = gpii->gpii_id;
+ u32 offset = GPII_n_CNTXT_GPII_IRQ_STTS_OFFS(gpii_id);
+ u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
+
+ /* clear the status */
+ dev_dbg(gpii->gpi_dev->dev, "irq_stts:0x%x\n", irq_stts);
+
+ /* Clear the register */
+ offset = GPII_n_CNTXT_GPII_IRQ_CLR_OFFS(gpii_id);
+ gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
+}
+
+/* processing gpi level error interrupts */
+static void gpi_process_glob_err_irq(struct gpii *gpii)
+{
+ u32 gpii_id = gpii->gpii_id;
+ u32 offset = GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
+ u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
+
+ offset = GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
+ gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
+
+ /* only error interrupt should be set */
+ if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
+ dev_err(gpii->gpi_dev->dev, "invalid error status:0x%x\n", irq_stts);
+ return;
+ }
+
+ offset = GPII_n_ERROR_LOG_OFFS(gpii_id);
+ gpi_write_reg(gpii, gpii->regs + offset, 0);
+}
+
+/* gpii interrupt handler */
+static irqreturn_t gpi_handle_irq(int irq, void *data)
+{
+ struct gpii *gpii = data;
+ u32 gpii_id = gpii->gpii_id;
+ u32 type, offset;
+ unsigned long flags;
+
+ read_lock_irqsave(&gpii->pm_lock, flags);
+
+ /*
+ * States are out of sync to receive interrupt
+ * while software state is in DISABLE state, bailing out.
+ */
+ if (!REG_ACCESS_VALID(gpii->pm_state)) {
+ dev_err(gpii->gpi_dev->dev, "receive interrupt while in %s state\n",
+ TO_GPI_PM_STR(gpii->pm_state));
+ goto exit_irq;
+ }
+
+ offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
+ type = gpi_read_reg(gpii, gpii->regs + offset);
+
+ do {
+ /* global gpii error */
+ if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
+ gpi_process_glob_err_irq(gpii);
+ type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
+ }
+
+ /* transfer complete interrupt */
+ if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
+ gpi_process_ieob(gpii);
+ type &= ~GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
+ }
+
+ /* event control irq */
+ if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
+ u32 ev_state;
+ u32 ev_ch_irq;
+
+ dev_dbg(gpii->gpi_dev->dev,
+ "processing EV CTRL interrupt\n");
+ offset = GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
+ ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
+
+ offset = GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
+ (gpii_id);
+ gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
+ ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
+ CNTXT_0_CONFIG);
+ ev_state = FIELD_GET(GPII_n_EV_k_CNTXT_0_CHSTATE, ev_state);
+
+ /*
+ * CMD EV_CMD_DEALLOC is always successful. However
+ * cmd does not change hardware status. So overwriting
+ * software state to default state.
+ */
+ if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
+ ev_state = DEFAULT_EV_CH_STATE;
+
+ gpii->ev_state = ev_state;
+ dev_dbg(gpii->gpi_dev->dev, "setting EV state to %s\n",
+ TO_GPI_EV_STATE_STR(gpii->ev_state));
+ complete_all(&gpii->cmd_completion);
+ type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
+ }
+
+ /* channel control irq */
+ if (type & GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
+ dev_dbg(gpii->gpi_dev->dev, "process CH CTRL interrupts\n");
+ gpi_process_ch_ctrl_irq(gpii);
+ type &= ~(GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
+ }
+
+ if (type) {
+ dev_err(gpii->gpi_dev->dev, "Unhandled interrupt status:0x%x\n", type);
+ gpi_process_gen_err_irq(gpii);
+ goto exit_irq;
+ }
+
+ offset = GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
+ type = gpi_read_reg(gpii, gpii->regs + offset);
+ } while (type);
+
+exit_irq:
+ read_unlock_irqrestore(&gpii->pm_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+/* process DMA Immediate completion data events */
+static void gpi_process_imed_data_event(struct gchan *gchan,
+ struct immediate_data_event *imed_event)
+{
+ struct gpii *gpii = gchan->gpii;
+ struct gpi_ring *ch_ring = &gchan->ch_ring;
+ void *tre = ch_ring->base + (ch_ring->el_size * imed_event->tre_index);
+ struct dmaengine_result result;
+ struct gpi_desc *gpi_desc;
+ struct virt_dma_desc *vd;
+ unsigned long flags;
+ u32 chid;
+
+ /*
+ * If channel not active don't process event
+ */
+ if (gchan->pm_state != ACTIVE_STATE) {
+ dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n",
+ TO_GPI_PM_STR(gchan->pm_state));
+ return;
+ }
+
+ spin_lock_irqsave(&gchan->vc.lock, flags);
+ vd = vchan_next_desc(&gchan->vc);
+ if (!vd) {
+ struct gpi_ere *gpi_ere;
+ struct gpi_tre *gpi_tre;
+
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+ dev_dbg(gpii->gpi_dev->dev, "event without a pending descriptor!\n");
+ gpi_ere = (struct gpi_ere *)imed_event;
+ dev_dbg(gpii->gpi_dev->dev,
+ "Event: %08x %08x %08x %08x\n",
+ gpi_ere->dword[0], gpi_ere->dword[1],
+ gpi_ere->dword[2], gpi_ere->dword[3]);
+ gpi_tre = tre;
+ dev_dbg(gpii->gpi_dev->dev,
+ "Pending TRE: %08x %08x %08x %08x\n",
+ gpi_tre->dword[0], gpi_tre->dword[1],
+ gpi_tre->dword[2], gpi_tre->dword[3]);
+ return;
+ }
+ gpi_desc = to_gpi_desc(vd);
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+
+ /*
+ * RP pointed by Event is to last TRE processed,
+ * we need to update ring rp to tre + 1
+ */
+ tre += ch_ring->el_size;
+ if (tre >= (ch_ring->base + ch_ring->len))
+ tre = ch_ring->base;
+ ch_ring->rp = tre;
+
+ /* make sure rp updates are immediately visible to all cores */
+ smp_wmb();
+
+ chid = imed_event->chid;
+ if (imed_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) {
+ if (chid == GPI_RX_CHAN)
+ goto gpi_free_desc;
+ else
+ return;
+ }
+
+ if (imed_event->code == MSM_GPI_TCE_UNEXP_ERR)
+ result.result = DMA_TRANS_ABORTED;
+ else
+ result.result = DMA_TRANS_NOERROR;
+ result.residue = gpi_desc->len - imed_event->length;
+
+ dma_cookie_complete(&vd->tx);
+ dmaengine_desc_get_callback_invoke(&vd->tx, &result);
+
+gpi_free_desc:
+ spin_lock_irqsave(&gchan->vc.lock, flags);
+ list_del(&vd->node);
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+ kfree(gpi_desc);
+ gpi_desc = NULL;
+}
+
+/* processing transfer completion events */
+static void gpi_process_xfer_compl_event(struct gchan *gchan,
+ struct xfer_compl_event *compl_event)
+{
+ struct gpii *gpii = gchan->gpii;
+ struct gpi_ring *ch_ring = &gchan->ch_ring;
+ void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
+ struct virt_dma_desc *vd;
+ struct gpi_desc *gpi_desc;
+ struct dmaengine_result result;
+ unsigned long flags;
+ u32 chid;
+
+ /* only process events on active channel */
+ if (unlikely(gchan->pm_state != ACTIVE_STATE)) {
+ dev_err(gpii->gpi_dev->dev, "skipping processing event because ch @ %s state\n",
+ TO_GPI_PM_STR(gchan->pm_state));
+ return;
+ }
+
+ spin_lock_irqsave(&gchan->vc.lock, flags);
+ vd = vchan_next_desc(&gchan->vc);
+ if (!vd) {
+ struct gpi_ere *gpi_ere;
+
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+ dev_err(gpii->gpi_dev->dev, "Event without a pending descriptor!\n");
+ gpi_ere = (struct gpi_ere *)compl_event;
+ dev_err(gpii->gpi_dev->dev,
+ "Event: %08x %08x %08x %08x\n",
+ gpi_ere->dword[0], gpi_ere->dword[1],
+ gpi_ere->dword[2], gpi_ere->dword[3]);
+ return;
+ }
+
+ gpi_desc = to_gpi_desc(vd);
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+
+ /*
+ * RP pointed by Event is to last TRE processed,
+ * we need to update ring rp to ev_rp + 1
+ */
+ ev_rp += ch_ring->el_size;
+ if (ev_rp >= (ch_ring->base + ch_ring->len))
+ ev_rp = ch_ring->base;
+ ch_ring->rp = ev_rp;
+
+ /* update must be visible to other cores */
+ smp_wmb();
+
+ chid = compl_event->chid;
+ if (compl_event->code == MSM_GPI_TCE_EOT && gpii->ieob_set) {
+ if (chid == GPI_RX_CHAN)
+ goto gpi_free_desc;
+ else
+ return;
+ }
+
+ if (compl_event->code == MSM_GPI_TCE_UNEXP_ERR) {
+ dev_err(gpii->gpi_dev->dev, "Error in Transaction\n");
+ result.result = DMA_TRANS_ABORTED;
+ } else {
+ dev_dbg(gpii->gpi_dev->dev, "Transaction Success\n");
+ result.result = DMA_TRANS_NOERROR;
+ }
+ result.residue = gpi_desc->len - compl_event->length;
+ dev_dbg(gpii->gpi_dev->dev, "Residue %d\n", result.residue);
+
+ dma_cookie_complete(&vd->tx);
+ dmaengine_desc_get_callback_invoke(&vd->tx, &result);
+
+gpi_free_desc:
+ spin_lock_irqsave(&gchan->vc.lock, flags);
+ list_del(&vd->node);
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+ kfree(gpi_desc);
+ gpi_desc = NULL;
+}
+
+/* process all events */
+static void gpi_process_events(struct gpii *gpii)
+{
+ struct gpi_ring *ev_ring = &gpii->ev_ring;
+ phys_addr_t cntxt_rp;
+ void *rp;
+ union gpi_event *gpi_event;
+ struct gchan *gchan;
+ u32 chid, type;
+
+ cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+ rp = to_virtual(ev_ring, cntxt_rp);
+
+ do {
+ while (rp != ev_ring->rp) {
+ gpi_event = ev_ring->rp;
+ chid = gpi_event->xfer_compl_event.chid;
+ type = gpi_event->xfer_compl_event.type;
+
+ dev_dbg(gpii->gpi_dev->dev,
+ "Event: CHID:%u, type:%x %08x %08x %08x %08x\n",
+ chid, type, gpi_event->gpi_ere.dword[0],
+ gpi_event->gpi_ere.dword[1], gpi_event->gpi_ere.dword[2],
+ gpi_event->gpi_ere.dword[3]);
+
+ switch (type) {
+ case XFER_COMPLETE_EV_TYPE:
+ gchan = &gpii->gchan[chid];
+ gpi_process_xfer_compl_event(gchan,
+ &gpi_event->xfer_compl_event);
+ break;
+ case STALE_EV_TYPE:
+ dev_dbg(gpii->gpi_dev->dev, "stale event, not processing\n");
+ break;
+ case IMMEDIATE_DATA_EV_TYPE:
+ gchan = &gpii->gchan[chid];
+ gpi_process_imed_data_event(gchan,
+ &gpi_event->immediate_data_event);
+ break;
+ case QUP_NOTIF_EV_TYPE:
+ dev_dbg(gpii->gpi_dev->dev, "QUP_NOTIF_EV_TYPE\n");
+ break;
+ default:
+ dev_dbg(gpii->gpi_dev->dev,
+ "not supported event type:0x%x\n", type);
+ }
+ gpi_ring_recycle_ev_element(ev_ring);
+ }
+ gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
+
+ /* clear pending IEOB events */
+ gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
+
+ cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+ rp = to_virtual(ev_ring, cntxt_rp);
+
+ } while (rp != ev_ring->rp);
+}
+
+/* processing events using tasklet */
+static void gpi_ev_tasklet(unsigned long data)
+{
+ struct gpii *gpii = (struct gpii *)data;
+
+ read_lock_bh(&gpii->pm_lock);
+ if (!REG_ACCESS_VALID(gpii->pm_state)) {
+ read_unlock_bh(&gpii->pm_lock);
+ dev_err(gpii->gpi_dev->dev, "not processing any events, pm_state:%s\n",
+ TO_GPI_PM_STR(gpii->pm_state));
+ return;
+ }
+
+ /* process the events */
+ gpi_process_events(gpii);
+
+ /* enable IEOB, switching back to interrupts */
+ gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
+ read_unlock_bh(&gpii->pm_lock);
+}
+
+/* marks all pending events for the channel as stale */
+static void gpi_mark_stale_events(struct gchan *gchan)
+{
+ struct gpii *gpii = gchan->gpii;
+ struct gpi_ring *ev_ring = &gpii->ev_ring;
+ u32 cntxt_rp, local_rp;
+ void *ev_rp;
+
+ cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+
+ ev_rp = ev_ring->rp;
+ local_rp = (u32)to_physical(ev_ring, ev_rp);
+ while (local_rp != cntxt_rp) {
+ union gpi_event *gpi_event = ev_rp;
+ u32 chid = gpi_event->xfer_compl_event.chid;
+
+ if (chid == gchan->chid)
+ gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
+ ev_rp += ev_ring->el_size;
+ if (ev_rp >= (ev_ring->base + ev_ring->len))
+ ev_rp = ev_ring->base;
+ cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
+ local_rp = (u32)to_physical(ev_ring, ev_rp);
+ }
+}
+
+/* reset sw state and issue channel reset or de-alloc */
+static int gpi_reset_chan(struct gchan *gchan, enum gpi_cmd gpi_cmd)
+{
+ struct gpii *gpii = gchan->gpii;
+ struct gpi_ring *ch_ring = &gchan->ch_ring;
+ unsigned long flags;
+ LIST_HEAD(list);
+ int ret;
+
+ ret = gpi_send_cmd(gpii, gchan, gpi_cmd);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(gpi_cmd), ret);
+ return ret;
+ }
+
+ /* initialize the local ring ptrs */
+ ch_ring->rp = ch_ring->base;
+ ch_ring->wp = ch_ring->base;
+
+ /* visible to other cores */
+ smp_wmb();
+
+ /* check event ring for any stale events */
+ write_lock_irq(&gpii->pm_lock);
+ gpi_mark_stale_events(gchan);
+
+ /* remove all async descriptors */
+ spin_lock_irqsave(&gchan->vc.lock, flags);
+ vchan_get_all_descriptors(&gchan->vc, &list);
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+ write_unlock_irq(&gpii->pm_lock);
+ vchan_dma_desc_free_list(&gchan->vc, &list);
+
+ return 0;
+}
+
+static int gpi_start_chan(struct gchan *gchan)
+{
+ struct gpii *gpii = gchan->gpii;
+ int ret;
+
+ ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_START);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
+ return ret;
+ }
+
+ /* gpii CH is active now */
+ write_lock_irq(&gpii->pm_lock);
+ gchan->pm_state = ACTIVE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ return 0;
+}
+
+static int gpi_stop_chan(struct gchan *gchan)
+{
+ struct gpii *gpii = gchan->gpii;
+ int ret;
+
+ ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_STOP);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(GPI_CH_CMD_STOP), ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/* allocate and configure the transfer channel */
+static int gpi_alloc_chan(struct gchan *chan, bool send_alloc_cmd)
+{
+ struct gpii *gpii = chan->gpii;
+ struct gpi_ring *ring = &chan->ch_ring;
+ int ret;
+ u32 id = gpii->gpii_id;
+ u32 chid = chan->chid;
+ u32 pair_chid = !chid;
+
+ if (send_alloc_cmd) {
+ ret = gpi_send_cmd(gpii, chan, GPI_CH_CMD_ALLOCATE);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
+ return ret;
+ }
+ }
+
+ gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_0_CONFIG,
+ GPII_n_CH_k_CNTXT_0(ring->el_size, 0, chan->dir, GPI_CHTYPE_PROTO_GPI));
+ gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_1_R_LENGTH, ring->len);
+ gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_2_RING_BASE_LSB, ring->phys_addr);
+ gpi_write_reg(gpii, chan->ch_cntxt_base_reg + CNTXT_3_RING_BASE_MSB,
+ upper_32_bits(ring->phys_addr));
+ gpi_write_reg(gpii, chan->ch_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
+ upper_32_bits(ring->phys_addr));
+ gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_0_OFFS(id, chid),
+ GPII_n_CH_k_SCRATCH_0(pair_chid, chan->protocol, chan->seid));
+ gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_1_OFFS(id, chid), 0);
+ gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_2_OFFS(id, chid), 0);
+ gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_SCRATCH_3_OFFS(id, chid), 0);
+ gpi_write_reg(gpii, gpii->regs + GPII_n_CH_k_QOS_OFFS(id, chid), 1);
+
+ /* flush all the writes */
+ wmb();
+ return 0;
+}
+
+/* allocate and configure event ring */
+static int gpi_alloc_ev_chan(struct gpii *gpii)
+{
+ struct gpi_ring *ring = &gpii->ev_ring;
+ void __iomem *base = gpii->ev_cntxt_base_reg;
+ int ret;
+
+ ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "error with cmd:%s ret:%d\n",
+ TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
+ return ret;
+ }
+
+ /* program event context */
+ gpi_write_reg(gpii, base + CNTXT_0_CONFIG,
+ GPII_n_EV_k_CNTXT_0(ring->el_size, GPI_INTTYPE_IRQ, GPI_CHTYPE_GPI_EV));
+ gpi_write_reg(gpii, base + CNTXT_1_R_LENGTH, ring->len);
+ gpi_write_reg(gpii, base + CNTXT_2_RING_BASE_LSB, lower_32_bits(ring->phys_addr));
+ gpi_write_reg(gpii, base + CNTXT_3_RING_BASE_MSB, upper_32_bits(ring->phys_addr));
+ gpi_write_reg(gpii, gpii->ev_cntxt_db_reg + CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
+ upper_32_bits(ring->phys_addr));
+ gpi_write_reg(gpii, base + CNTXT_8_RING_INT_MOD, 0);
+ gpi_write_reg(gpii, base + CNTXT_10_RING_MSI_LSB, 0);
+ gpi_write_reg(gpii, base + CNTXT_11_RING_MSI_MSB, 0);
+ gpi_write_reg(gpii, base + CNTXT_8_RING_INT_MOD, 0);
+ gpi_write_reg(gpii, base + CNTXT_12_RING_RP_UPDATE_LSB, 0);
+ gpi_write_reg(gpii, base + CNTXT_13_RING_RP_UPDATE_MSB, 0);
+
+ /* add events to ring */
+ ring->wp = (ring->base + ring->len - ring->el_size);
+
+ /* flush all the writes */
+ wmb();
+
+ /* gpii is active now */
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = ACTIVE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+ gpi_write_ev_db(gpii, ring, ring->wp);
+
+ return 0;
+}
+
+/* calculate # of ERE/TRE available to queue */
+static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
+{
+ int elements = 0;
+
+ if (ring->wp < ring->rp) {
+ elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
+ } else {
+ elements = (ring->rp - ring->base) / ring->el_size;
+ elements += ((ring->base + ring->len - ring->wp) / ring->el_size) - 1;
+ }
+
+ return elements;
+}
+
+static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
+{
+ if (gpi_ring_num_elements_avail(ring) <= 0)
+ return -ENOMEM;
+
+ *wp = ring->wp;
+ ring->wp += ring->el_size;
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+
+ /* visible to other cores */
+ smp_wmb();
+
+ return 0;
+}
+
+static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
+{
+ /* Update the WP */
+ ring->wp += ring->el_size;
+ if (ring->wp >= (ring->base + ring->len))
+ ring->wp = ring->base;
+
+ /* Update the RP */
+ ring->rp += ring->el_size;
+ if (ring->rp >= (ring->base + ring->len))
+ ring->rp = ring->base;
+
+ /* visible to other cores */
+ smp_wmb();
+}
+
+static void gpi_free_ring(struct gpi_ring *ring,
+ struct gpii *gpii)
+{
+ dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
+ ring->pre_aligned, ring->dma_handle);
+ memset(ring, 0, sizeof(*ring));
+}
+
+/* allocate memory for transfer and event rings */
+static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
+ u32 el_size, struct gpii *gpii)
+{
+ u64 len = elements * el_size;
+ int bit;
+
+ /* ring len must be power of 2 */
+ bit = find_last_bit((unsigned long *)&len, 32);
+ if (((1 << bit) - 1) & len)
+ bit++;
+ len = 1 << bit;
+ ring->alloc_size = (len + (len - 1));
+ dev_dbg(gpii->gpi_dev->dev,
+ "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zu\n",
+ elements, el_size, (elements * el_size), len,
+ ring->alloc_size);
+
+ ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
+ ring->alloc_size,
+ &ring->dma_handle, GFP_KERNEL);
+ if (!ring->pre_aligned) {
+ dev_err(gpii->gpi_dev->dev, "could not alloc size:%zu mem for ring\n",
+ ring->alloc_size);
+ return -ENOMEM;
+ }
+
+ /* align the physical mem */
+ ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
+ ring->base = ring->pre_aligned + (ring->phys_addr - ring->dma_handle);
+ ring->rp = ring->base;
+ ring->wp = ring->base;
+ ring->len = len;
+ ring->el_size = el_size;
+ ring->elements = ring->len / ring->el_size;
+ memset(ring->base, 0, ring->len);
+ ring->configured = true;
+
+ /* update to other cores */
+ smp_wmb();
+
+ dev_dbg(gpii->gpi_dev->dev,
+ "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n",
+ &ring->dma_handle, &ring->phys_addr, ring->len,
+ ring->el_size, ring->elements);
+
+ return 0;
+}
+
+/* copy tre into transfer ring */
+static void gpi_queue_xfer(struct gpii *gpii, struct gchan *gchan,
+ struct gpi_tre *gpi_tre, void **wp)
+{
+ struct gpi_tre *ch_tre;
+ int ret;
+
+ /* get next tre location we can copy */
+ ret = gpi_ring_add_element(&gchan->ch_ring, (void **)&ch_tre);
+ if (unlikely(ret)) {
+ dev_err(gpii->gpi_dev->dev, "Error adding ring element to xfer ring\n");
+ return;
+ }
+
+ /* copy the tre info */
+ memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
+ *wp = ch_tre;
+}
+
+/* reset and restart transfer channel */
+static int gpi_terminate_all(struct dma_chan *chan)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ int schid, echid, i;
+ int ret = 0;
+
+ mutex_lock(&gpii->ctrl_lock);
+
+ /*
+ * treat both channels as a group if its protocol is not UART
+ * STOP, RESET, or START needs to be in lockstep
+ */
+ schid = (gchan->protocol == QCOM_GPI_UART) ? gchan->chid : 0;
+ echid = (gchan->protocol == QCOM_GPI_UART) ? schid + 1 : MAX_CHANNELS_PER_GPII;
+
+ /* stop the channel */
+ for (i = schid; i < echid; i++) {
+ gchan = &gpii->gchan[i];
+
+ /* disable ch state so no more TRE processing */
+ write_lock_irq(&gpii->pm_lock);
+ gchan->pm_state = PREPARE_TERMINATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ /* send command to Stop the channel */
+ ret = gpi_stop_chan(gchan);
+ }
+
+ /* reset the channels (clears any pending tre) */
+ for (i = schid; i < echid; i++) {
+ gchan = &gpii->gchan[i];
+
+ ret = gpi_reset_chan(gchan, GPI_CH_CMD_RESET);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error resetting channel ret:%d\n", ret);
+ goto terminate_exit;
+ }
+
+ /* reprogram channel CNTXT */
+ ret = gpi_alloc_chan(gchan, false);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error alloc_channel ret:%d\n", ret);
+ goto terminate_exit;
+ }
+ }
+
+ /* restart the channels */
+ for (i = schid; i < echid; i++) {
+ gchan = &gpii->gchan[i];
+
+ ret = gpi_start_chan(gchan);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error Starting Channel ret:%d\n", ret);
+ goto terminate_exit;
+ }
+ }
+
+terminate_exit:
+ mutex_unlock(&gpii->ctrl_lock);
+ return ret;
+}
+
+/* pause dma transfer for all channels */
+static int gpi_pause(struct dma_chan *chan)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ int i, ret;
+
+ mutex_lock(&gpii->ctrl_lock);
+
+ /*
+ * pause/resume are per gpii not per channel, so
+ * client needs to call pause only once
+ */
+ if (gpii->pm_state == PAUSE_STATE) {
+ dev_dbg(gpii->gpi_dev->dev, "channel is already paused\n");
+ mutex_unlock(&gpii->ctrl_lock);
+ return 0;
+ }
+
+ /* send stop command to stop the channels */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+ ret = gpi_stop_chan(&gpii->gchan[i]);
+ if (ret) {
+ mutex_unlock(&gpii->ctrl_lock);
+ return ret;
+ }
+ }
+
+ disable_irq(gpii->irq);
+
+ /* Wait for threads to complete out */
+ tasklet_kill(&gpii->ev_task);
+
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = PAUSE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+ mutex_unlock(&gpii->ctrl_lock);
+
+ return 0;
+}
+
+/* resume dma transfer */
+static int gpi_resume(struct dma_chan *chan)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ int i, ret;
+
+ mutex_lock(&gpii->ctrl_lock);
+ if (gpii->pm_state == ACTIVE_STATE) {
+ dev_dbg(gpii->gpi_dev->dev, "channel is already active\n");
+ mutex_unlock(&gpii->ctrl_lock);
+ return 0;
+ }
+
+ enable_irq(gpii->irq);
+
+ /* send start command to start the channels */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+ ret = gpi_send_cmd(gpii, &gpii->gchan[i], GPI_CH_CMD_START);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error starting chan, ret:%d\n", ret);
+ mutex_unlock(&gpii->ctrl_lock);
+ return ret;
+ }
+ }
+
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = ACTIVE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+ mutex_unlock(&gpii->ctrl_lock);
+
+ return 0;
+}
+
+static void gpi_desc_free(struct virt_dma_desc *vd)
+{
+ struct gpi_desc *gpi_desc = to_gpi_desc(vd);
+
+ kfree(gpi_desc);
+ gpi_desc = NULL;
+}
+
+static int
+gpi_peripheral_config(struct dma_chan *chan, struct dma_slave_config *config)
+{
+ struct gchan *gchan = to_gchan(chan);
+
+ if (!config->peripheral_config)
+ return -EINVAL;
+
+ gchan->config = krealloc(gchan->config, config->peripheral_size, GFP_NOWAIT);
+ if (!gchan->config)
+ return -ENOMEM;
+
+ memcpy(gchan->config, config->peripheral_config, config->peripheral_size);
+
+ return 0;
+}
+
+static int gpi_create_i2c_tre(struct gchan *chan, struct gpi_desc *desc,
+ struct scatterlist *sgl, enum dma_transfer_direction direction)
+{
+ struct gpi_i2c_config *i2c = chan->config;
+ struct device *dev = chan->gpii->gpi_dev->dev;
+ unsigned int tre_idx = 0;
+ dma_addr_t address;
+ struct gpi_tre *tre;
+ unsigned int i;
+
+ /* first create config tre if applicable */
+ if (i2c->set_config) {
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ tre->dword[0] = u32_encode_bits(i2c->low_count, TRE_I2C_C0_TLOW);
+ tre->dword[0] |= u32_encode_bits(i2c->high_count, TRE_I2C_C0_THIGH);
+ tre->dword[0] |= u32_encode_bits(i2c->cycle_count, TRE_I2C_C0_TCYL);
+ tre->dword[0] |= u32_encode_bits(i2c->pack_enable, TRE_I2C_C0_TX_PACK);
+ tre->dword[0] |= u32_encode_bits(i2c->pack_enable, TRE_I2C_C0_RX_PACK);
+
+ tre->dword[1] = 0;
+
+ tre->dword[2] = u32_encode_bits(i2c->clk_div, TRE_C0_CLK_DIV);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_CONFIG0, TRE_FLAGS_TYPE);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ }
+
+ /* create the GO tre for Tx */
+ if (i2c->op == I2C_WRITE) {
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ if (i2c->multi_msg)
+ tre->dword[0] = u32_encode_bits(I2C_READ, TRE_I2C_GO_CMD);
+ else
+ tre->dword[0] = u32_encode_bits(i2c->op, TRE_I2C_GO_CMD);
+
+ tre->dword[0] |= u32_encode_bits(i2c->addr, TRE_I2C_GO_ADDR);
+ tre->dword[0] |= u32_encode_bits(i2c->stretch, TRE_I2C_GO_STRETCH);
+
+ tre->dword[1] = 0;
+ tre->dword[2] = u32_encode_bits(i2c->rx_len, TRE_RX_LEN);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
+
+ if (i2c->multi_msg)
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
+ else
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ }
+
+ if (i2c->op == I2C_READ || i2c->multi_msg == false) {
+ /* create the DMA TRE */
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ address = sg_dma_address(sgl);
+ tre->dword[0] = lower_32_bits(address);
+ tre->dword[1] = upper_32_bits(address);
+
+ tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
+ };
+
+ for (i = 0; i < tre_idx; i++)
+ dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0],
+ desc->tre[i].dword[1], desc->tre[i].dword[2], desc->tre[i].dword[3]);
+
+ return tre_idx;
+}
+
+static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
+ struct scatterlist *sgl, enum dma_transfer_direction direction)
+{
+ struct gpi_spi_config *spi = chan->config;
+ struct device *dev = chan->gpii->gpi_dev->dev;
+ unsigned int tre_idx = 0;
+ dma_addr_t address;
+ struct gpi_tre *tre;
+ unsigned int i;
+
+ /* first create config tre if applicable */
+ if (direction == DMA_MEM_TO_DEV && spi->set_config) {
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ tre->dword[0] = u32_encode_bits(spi->word_len, TRE_SPI_C0_WORD_SZ);
+ tre->dword[0] |= u32_encode_bits(spi->loopback_en, TRE_SPI_C0_LOOPBACK);
+ tre->dword[0] |= u32_encode_bits(spi->clock_pol_high, TRE_SPI_C0_CPOL);
+ tre->dword[0] |= u32_encode_bits(spi->data_pol_high, TRE_SPI_C0_CPHA);
+ tre->dword[0] |= u32_encode_bits(spi->pack_en, TRE_SPI_C0_TX_PACK);
+ tre->dword[0] |= u32_encode_bits(spi->pack_en, TRE_SPI_C0_RX_PACK);
+
+ tre->dword[1] = 0;
+
+ tre->dword[2] = u32_encode_bits(spi->clk_div, TRE_C0_CLK_DIV);
+ tre->dword[2] |= u32_encode_bits(spi->clk_src, TRE_C0_CLK_SRC);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_CONFIG0, TRE_FLAGS_TYPE);
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ }
+
+ /* create the GO tre for Tx */
+ if (direction == DMA_MEM_TO_DEV) {
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ tre->dword[0] = u32_encode_bits(spi->fragmentation, TRE_SPI_GO_FRAG);
+ tre->dword[0] |= u32_encode_bits(spi->cs, TRE_SPI_GO_CS);
+ tre->dword[0] |= u32_encode_bits(spi->cmd, TRE_SPI_GO_CMD);
+
+ tre->dword[1] = 0;
+
+ tre->dword[2] = u32_encode_bits(spi->rx_len, TRE_RX_LEN);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
+ if (spi->cmd == SPI_RX)
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
+ else
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
+ }
+
+ /* create the dma tre */
+ tre = &desc->tre[tre_idx];
+ tre_idx++;
+
+ address = sg_dma_address(sgl);
+ tre->dword[0] = lower_32_bits(address);
+ tre->dword[1] = upper_32_bits(address);
+
+ tre->dword[2] = u32_encode_bits(sg_dma_len(sgl), TRE_DMA_LEN);
+
+ tre->dword[3] = u32_encode_bits(TRE_TYPE_DMA, TRE_FLAGS_TYPE);
+ if (direction == DMA_MEM_TO_DEV)
+ tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOT);
+
+ for (i = 0; i < tre_idx; i++)
+ dev_dbg(dev, "TRE:%d %x:%x:%x:%x\n", i, desc->tre[i].dword[0],
+ desc->tre[i].dword[1], desc->tre[i].dword[2], desc->tre[i].dword[3]);
+
+ return tre_idx;
+}
+
+/* copy tre into transfer ring */
+static struct dma_async_tx_descriptor *
+gpi_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
+ unsigned int sg_len, enum dma_transfer_direction direction,
+ unsigned long flags, void *context)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ struct device *dev = gpii->gpi_dev->dev;
+ struct gpi_ring *ch_ring = &gchan->ch_ring;
+ struct gpi_desc *gpi_desc;
+ u32 nr, nr_tre = 0;
+ u8 set_config;
+ int i;
+
+ gpii->ieob_set = false;
+ if (!is_slave_direction(direction)) {
+ dev_err(gpii->gpi_dev->dev, "invalid dma direction: %d\n", direction);
+ return NULL;
+ }
+
+ if (sg_len > 1) {
+ dev_err(dev, "Multi sg sent, we support only one atm: %d\n", sg_len);
+ return NULL;
+ }
+
+ nr_tre = 3;
+ set_config = *(u32 *)gchan->config;
+ if (!set_config)
+ nr_tre = 2;
+ if (direction == DMA_DEV_TO_MEM) /* rx */
+ nr_tre = 1;
+
+ /* calculate # of elements required & available */
+ nr = gpi_ring_num_elements_avail(ch_ring);
+ if (nr < nr_tre) {
+ dev_err(dev, "not enough space in ring, avail:%u required:%u\n", nr, nr_tre);
+ return NULL;
+ }
+
+ gpi_desc = kzalloc(sizeof(*gpi_desc), GFP_NOWAIT);
+ if (!gpi_desc)
+ return NULL;
+
+ /* create TREs for xfer */
+ if (gchan->protocol == QCOM_GPI_SPI) {
+ i = gpi_create_spi_tre(gchan, gpi_desc, sgl, direction);
+ } else if (gchan->protocol == QCOM_GPI_I2C) {
+ i = gpi_create_i2c_tre(gchan, gpi_desc, sgl, direction);
+ } else {
+ dev_err(dev, "invalid peripheral: %d\n", gchan->protocol);
+ kfree(gpi_desc);
+ return NULL;
+ }
+
+ /* set up the descriptor */
+ gpi_desc->gchan = gchan;
+ gpi_desc->len = sg_dma_len(sgl);
+ gpi_desc->num_tre = i;
+
+ return vchan_tx_prep(&gchan->vc, &gpi_desc->vd, flags);
+}
+
+/* rings transfer ring db to being transfer */
+static void gpi_issue_pending(struct dma_chan *chan)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ unsigned long flags, pm_lock_flags;
+ struct virt_dma_desc *vd = NULL;
+ struct gpi_desc *gpi_desc;
+ struct gpi_ring *ch_ring = &gchan->ch_ring;
+ void *tre, *wp = NULL;
+ int i;
+
+ read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
+
+ /* move all submitted discriptors to issued list */
+ spin_lock_irqsave(&gchan->vc.lock, flags);
+ if (vchan_issue_pending(&gchan->vc))
+ vd = list_last_entry(&gchan->vc.desc_issued,
+ struct virt_dma_desc, node);
+ spin_unlock_irqrestore(&gchan->vc.lock, flags);
+
+ /* nothing to do list is empty */
+ if (!vd) {
+ read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
+ return;
+ }
+
+ gpi_desc = to_gpi_desc(vd);
+ for (i = 0; i < gpi_desc->num_tre; i++) {
+ tre = &gpi_desc->tre[i];
+ gpi_queue_xfer(gpii, gchan, tre, &wp);
+ }
+
+ gpi_desc->db = ch_ring->wp;
+ gpi_write_ch_db(gchan, &gchan->ch_ring, gpi_desc->db);
+ read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
+}
+
+static int gpi_ch_init(struct gchan *gchan)
+{
+ struct gpii *gpii = gchan->gpii;
+ const int ev_factor = gpii->gpi_dev->ev_factor;
+ u32 elements;
+ int i = 0, ret = 0;
+
+ gchan->pm_state = CONFIG_STATE;
+
+ /* check if both channels are configured before continue */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
+ if (gpii->gchan[i].pm_state != CONFIG_STATE)
+ goto exit_gpi_init;
+
+ /* protocol must be same for both channels */
+ if (gpii->gchan[0].protocol != gpii->gchan[1].protocol) {
+ dev_err(gpii->gpi_dev->dev, "protocol did not match protocol %u != %u\n",
+ gpii->gchan[0].protocol, gpii->gchan[1].protocol);
+ ret = -EINVAL;
+ goto exit_gpi_init;
+ }
+
+ /* allocate memory for event ring */
+ elements = CHAN_TRES << ev_factor;
+ ret = gpi_alloc_ring(&gpii->ev_ring, elements,
+ sizeof(union gpi_event), gpii);
+ if (ret)
+ goto exit_gpi_init;
+
+ /* configure interrupts */
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = PREPARE_HARDWARE;
+ write_unlock_irq(&gpii->pm_lock);
+ ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "error config. interrupts, ret:%d\n", ret);
+ goto error_config_int;
+ }
+
+ /* allocate event rings */
+ ret = gpi_alloc_ev_chan(gpii);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "error alloc_ev_chan:%d\n", ret);
+ goto error_alloc_ev_ring;
+ }
+
+ /* Allocate all channels */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+ ret = gpi_alloc_chan(&gpii->gchan[i], true);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error allocating chan:%d\n", ret);
+ goto error_alloc_chan;
+ }
+ }
+
+ /* start channels */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
+ ret = gpi_start_chan(&gpii->gchan[i]);
+ if (ret) {
+ dev_err(gpii->gpi_dev->dev, "Error start chan:%d\n", ret);
+ goto error_start_chan;
+ }
+ }
+ return ret;
+
+error_start_chan:
+ for (i = i - 1; i >= 0; i--) {
+ gpi_stop_chan(&gpii->gchan[i]);
+ gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
+ }
+ i = 2;
+error_alloc_chan:
+ for (i = i - 1; i >= 0; i--)
+ gpi_reset_chan(gchan, GPI_CH_CMD_DE_ALLOC);
+error_alloc_ev_ring:
+ gpi_disable_interrupts(gpii);
+error_config_int:
+ gpi_free_ring(&gpii->ev_ring, gpii);
+exit_gpi_init:
+ mutex_unlock(&gpii->ctrl_lock);
+ return ret;
+}
+
+/* release all channel resources */
+static void gpi_free_chan_resources(struct dma_chan *chan)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ enum gpi_pm_state cur_state;
+ int ret, i;
+
+ mutex_lock(&gpii->ctrl_lock);
+
+ cur_state = gchan->pm_state;
+
+ /* disable ch state so no more TRE processing for this channel */
+ write_lock_irq(&gpii->pm_lock);
+ gchan->pm_state = PREPARE_TERMINATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ /* attempt to do graceful hardware shutdown */
+ if (cur_state == ACTIVE_STATE) {
+ gpi_stop_chan(gchan);
+
+ ret = gpi_send_cmd(gpii, gchan, GPI_CH_CMD_RESET);
+ if (ret)
+ dev_err(gpii->gpi_dev->dev, "error resetting channel:%d\n", ret);
+
+ gpi_reset_chan(gchan, GPI_CH_CMD_DE_ALLOC);
+ }
+
+ /* free all allocated memory */
+ gpi_free_ring(&gchan->ch_ring, gpii);
+ vchan_free_chan_resources(&gchan->vc);
+ kfree(gchan->config);
+
+ write_lock_irq(&gpii->pm_lock);
+ gchan->pm_state = DISABLE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ /* if other rings are still active exit */
+ for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
+ if (gpii->gchan[i].ch_ring.configured)
+ goto exit_free;
+
+ /* deallocate EV Ring */
+ cur_state = gpii->pm_state;
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = PREPARE_TERMINATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+ /* wait for threads to complete out */
+ tasklet_kill(&gpii->ev_task);
+
+ /* send command to de allocate event ring */
+ if (cur_state == ACTIVE_STATE)
+ gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
+
+ gpi_free_ring(&gpii->ev_ring, gpii);
+
+ /* disable interrupts */
+ if (cur_state == ACTIVE_STATE)
+ gpi_disable_interrupts(gpii);
+
+ /* set final state to disable */
+ write_lock_irq(&gpii->pm_lock);
+ gpii->pm_state = DISABLE_STATE;
+ write_unlock_irq(&gpii->pm_lock);
+
+exit_free:
+ mutex_unlock(&gpii->ctrl_lock);
+}
+
+/* allocate channel resources */
+static int gpi_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct gchan *gchan = to_gchan(chan);
+ struct gpii *gpii = gchan->gpii;
+ int ret;
+
+ mutex_lock(&gpii->ctrl_lock);
+
+ /* allocate memory for transfer ring */
+ ret = gpi_alloc_ring(&gchan->ch_ring, CHAN_TRES,
+ sizeof(struct gpi_tre), gpii);
+ if (ret)
+ goto xfer_alloc_err;
+
+ ret = gpi_ch_init(gchan);
+
+ mutex_unlock(&gpii->ctrl_lock);
+
+ return ret;
+xfer_alloc_err:
+ mutex_unlock(&gpii->ctrl_lock);
+
+ return ret;
+}
+
+static int gpi_find_avail_gpii(struct gpi_dev *gpi_dev, u32 seid)
+{
+ struct gchan *tx_chan, *rx_chan;
+ unsigned int gpii;
+
+ /* check if same seid is already configured for another chid */
+ for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
+ if (!((1 << gpii) & gpi_dev->gpii_mask))
+ continue;
+
+ tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN];
+ rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN];
+
+ if (rx_chan->vc.chan.client_count && rx_chan->seid == seid)
+ return gpii;
+ if (tx_chan->vc.chan.client_count && tx_chan->seid == seid)
+ return gpii;
+ }
+
+ /* no channels configured with same seid, return next avail gpii */
+ for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
+ if (!((1 << gpii) & gpi_dev->gpii_mask))
+ continue;
+
+ tx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_TX_CHAN];
+ rx_chan = &gpi_dev->gpiis[gpii].gchan[GPI_RX_CHAN];
+
+ /* check if gpii is configured */
+ if (tx_chan->vc.chan.client_count ||
+ rx_chan->vc.chan.client_count)
+ continue;
+
+ /* found a free gpii */
+ return gpii;
+ }
+
+ /* no gpii instance available to use */
+ return -EIO;
+}
+
+/* gpi_of_dma_xlate: open client requested channel */
+static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
+ struct of_dma *of_dma)
+{
+ struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
+ u32 seid, chid;
+ int gpii;
+ struct gchan *gchan;
+
+ if (args->args_count < 3) {
+ dev_err(gpi_dev->dev, "gpii require minimum 2 args, client passed:%d args\n",
+ args->args_count);
+ return NULL;
+ }
+
+ chid = args->args[0];
+ if (chid >= MAX_CHANNELS_PER_GPII) {
+ dev_err(gpi_dev->dev, "gpii channel:%d not valid\n", chid);
+ return NULL;
+ }
+
+ seid = args->args[1];
+
+ /* find next available gpii to use */
+ gpii = gpi_find_avail_gpii(gpi_dev, seid);
+ if (gpii < 0) {
+ dev_err(gpi_dev->dev, "no available gpii instances\n");
+ return NULL;
+ }
+
+ gchan = &gpi_dev->gpiis[gpii].gchan[chid];
+ if (gchan->vc.chan.client_count) {
+ dev_err(gpi_dev->dev, "gpii:%d chid:%d seid:%d already configured\n",
+ gpii, chid, gchan->seid);
+ return NULL;
+ }
+
+ gchan->seid = seid;
+ gchan->protocol = args->args[2];
+
+ return dma_get_slave_channel(&gchan->vc.chan);
+}
+
+static int gpi_probe(struct platform_device *pdev)
+{
+ struct gpi_dev *gpi_dev;
+ unsigned int i;
+ int ret;
+
+ gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
+ if (!gpi_dev)
+ return -ENOMEM;
+
+ gpi_dev->dev = &pdev->dev;
+ gpi_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ gpi_dev->regs = devm_ioremap_resource(gpi_dev->dev, gpi_dev->res);
+ if (IS_ERR(gpi_dev->regs))
+ return PTR_ERR(gpi_dev->regs);
+ gpi_dev->ee_base = gpi_dev->regs;
+
+ ret = of_property_read_u32(gpi_dev->dev->of_node, "dma-channels",
+ &gpi_dev->max_gpii);
+ if (ret) {
+ dev_err(gpi_dev->dev, "missing 'max-no-gpii' DT node\n");
+ return ret;
+ }
+
+ ret = of_property_read_u32(gpi_dev->dev->of_node, "dma-channel-mask",
+ &gpi_dev->gpii_mask);
+ if (ret) {
+ dev_err(gpi_dev->dev, "missing 'gpii-mask' DT node\n");
+ return ret;
+ }
+
+ gpi_dev->ev_factor = EV_FACTOR;
+
+ ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(gpi_dev->dev, "Error setting dma_mask to 64, ret:%d\n", ret);
+ return ret;
+ }
+
+ gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev, sizeof(*gpi_dev->gpiis) *
+ gpi_dev->max_gpii, GFP_KERNEL);
+ if (!gpi_dev->gpiis)
+ return -ENOMEM;
+
+ /* setup all the supported gpii */
+ INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
+ for (i = 0; i < gpi_dev->max_gpii; i++) {
+ struct gpii *gpii = &gpi_dev->gpiis[i];
+ int chan;
+
+ if (!((1 << i) & gpi_dev->gpii_mask))
+ continue;
+
+ /* set up ev cntxt register map */
+ gpii->ev_cntxt_base_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
+ gpii->ev_cntxt_db_reg = gpi_dev->ee_base + GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
+ gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg + CNTXT_4_RING_RP_LSB;
+ gpii->ev_cmd_reg = gpi_dev->ee_base + GPII_n_EV_CH_CMD_OFFS(i);
+ gpii->ieob_clr_reg = gpi_dev->ee_base + GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
+
+ /* set up irq */
+ ret = platform_get_irq(pdev, i);
+ if (ret < 0) {
+ dev_err(gpi_dev->dev, "platform_get_irq failed for %d:%d\n", i, ret);
+ return ret;
+ }
+ gpii->irq = ret;
+
+ /* set up channel specific register info */
+ for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
+ struct gchan *gchan = &gpii->gchan[chan];
+
+ /* set up ch cntxt register map */
+ gchan->ch_cntxt_base_reg = gpi_dev->ee_base +
+ GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
+ gchan->ch_cntxt_db_reg = gpi_dev->ee_base +
+ GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
+ gchan->ch_cmd_reg = gpi_dev->ee_base + GPII_n_CH_CMD_OFFS(i);
+
+ /* vchan setup */
+ vchan_init(&gchan->vc, &gpi_dev->dma_device);
+ gchan->vc.desc_free = gpi_desc_free;
+ gchan->chid = chan;
+ gchan->gpii = gpii;
+ gchan->dir = GPII_CHAN_DIR[chan];
+ }
+ mutex_init(&gpii->ctrl_lock);
+ rwlock_init(&gpii->pm_lock);
+ tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
+ (unsigned long)gpii);
+ init_completion(&gpii->cmd_completion);
+ gpii->gpii_id = i;
+ gpii->regs = gpi_dev->ee_base;
+ gpii->gpi_dev = gpi_dev;
+ }
+
+ platform_set_drvdata(pdev, gpi_dev);
+
+ /* clear and Set capabilities */
+ dma_cap_zero(gpi_dev->dma_device.cap_mask);
+ dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
+
+ /* configure dmaengine apis */
+ gpi_dev->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
+ gpi_dev->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
+ gpi_dev->dma_device.device_alloc_chan_resources = gpi_alloc_chan_resources;
+ gpi_dev->dma_device.device_free_chan_resources = gpi_free_chan_resources;
+ gpi_dev->dma_device.device_tx_status = dma_cookie_status;
+ gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
+ gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
+ gpi_dev->dma_device.device_config = gpi_peripheral_config;
+ gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
+ gpi_dev->dma_device.dev = gpi_dev->dev;
+ gpi_dev->dma_device.device_pause = gpi_pause;
+ gpi_dev->dma_device.device_resume = gpi_resume;
+
+ /* register with dmaengine framework */
+ ret = dma_async_device_register(&gpi_dev->dma_device);
+ if (ret) {
+ dev_err(gpi_dev->dev, "async_device_register failed ret:%d", ret);
+ return ret;
+ }
+
+ ret = of_dma_controller_register(gpi_dev->dev->of_node,
+ gpi_of_dma_xlate, gpi_dev);
+ if (ret) {
+ dev_err(gpi_dev->dev, "of_dma_controller_reg failed ret:%d", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static const struct of_device_id gpi_of_match[] = {
+ { .compatible = "qcom,sdm845-gpi-dma" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, gpi_of_match);
+
+static struct platform_driver gpi_driver = {
+ .probe = gpi_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = gpi_of_match,
+ },
+};
+
+static int __init gpi_init(void)
+{
+ return platform_driver_register(&gpi_driver);
+}
+subsys_initcall(gpi_init)
+
+MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/qcom_adm.c b/drivers/dma/qcom/qcom_adm.c
new file mode 100644
index 000000000000..ee78bed8d60d
--- /dev/null
+++ b/drivers/dma/qcom/qcom_adm.c
@@ -0,0 +1,905 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_dma.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+
+#include "../dmaengine.h"
+#include "../virt-dma.h"
+
+/* ADM registers - calculated from channel number and security domain */
+#define ADM_CHAN_MULTI 0x4
+#define ADM_CI_MULTI 0x4
+#define ADM_CRCI_MULTI 0x4
+#define ADM_EE_MULTI 0x800
+#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
+#define ADM_EE_OFFS(ee) (ADM_EE_MULTI * (ee))
+#define ADM_CHAN_EE_OFFS(chan, ee) (ADM_CHAN_OFFS(chan) + ADM_EE_OFFS(ee))
+#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
+#define ADM_CI_OFFS(ci) (ADM_CHAN_OFF(ci))
+#define ADM_CH_CMD_PTR(chan, ee) (ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_RSLT(chan, ee) (0x40 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_FLUSH_STATE0(chan, ee) (0x80 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_STATUS_SD(chan, ee) (0x200 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_CH_CONF(chan) (0x240 + ADM_CHAN_OFFS(chan))
+#define ADM_CH_RSLT_CONF(chan, ee) (0x300 + ADM_CHAN_EE_OFFS(chan, ee))
+#define ADM_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + ADM_EE_OFFS(ee))
+#define ADM_CI_CONF(ci) (0x390 + (ci) * ADM_CI_MULTI)
+#define ADM_GP_CTL 0x3d8
+#define ADM_CRCI_CTL(crci, ee) (0x400 + (crci) * ADM_CRCI_MULTI + \
+ ADM_EE_OFFS(ee))
+
+/* channel status */
+#define ADM_CH_STATUS_VALID BIT(1)
+
+/* channel result */
+#define ADM_CH_RSLT_VALID BIT(31)
+#define ADM_CH_RSLT_ERR BIT(3)
+#define ADM_CH_RSLT_FLUSH BIT(2)
+#define ADM_CH_RSLT_TPD BIT(1)
+
+/* channel conf */
+#define ADM_CH_CONF_SHADOW_EN BIT(12)
+#define ADM_CH_CONF_MPU_DISABLE BIT(11)
+#define ADM_CH_CONF_PERM_MPU_CONF BIT(9)
+#define ADM_CH_CONF_FORCE_RSLT_EN BIT(7)
+#define ADM_CH_CONF_SEC_DOMAIN(ee) ((((ee) & 0x3) << 4) | (((ee) & 0x4) << 11))
+
+/* channel result conf */
+#define ADM_CH_RSLT_CONF_FLUSH_EN BIT(1)
+#define ADM_CH_RSLT_CONF_IRQ_EN BIT(0)
+
+/* CRCI CTL */
+#define ADM_CRCI_CTL_MUX_SEL BIT(18)
+#define ADM_CRCI_CTL_RST BIT(17)
+
+/* CI configuration */
+#define ADM_CI_RANGE_END(x) ((x) << 24)
+#define ADM_CI_RANGE_START(x) ((x) << 16)
+#define ADM_CI_BURST_4_WORDS BIT(2)
+#define ADM_CI_BURST_8_WORDS BIT(3)
+
+/* GP CTL */
+#define ADM_GP_CTL_LP_EN BIT(12)
+#define ADM_GP_CTL_LP_CNT(x) ((x) << 8)
+
+/* Command pointer list entry */
+#define ADM_CPLE_LP BIT(31)
+#define ADM_CPLE_CMD_PTR_LIST BIT(29)
+
+/* Command list entry */
+#define ADM_CMD_LC BIT(31)
+#define ADM_CMD_DST_CRCI(n) (((n) & 0xf) << 7)
+#define ADM_CMD_SRC_CRCI(n) (((n) & 0xf) << 3)
+
+#define ADM_CMD_TYPE_SINGLE 0x0
+#define ADM_CMD_TYPE_BOX 0x3
+
+#define ADM_CRCI_MUX_SEL BIT(4)
+#define ADM_DESC_ALIGN 8
+#define ADM_MAX_XFER (SZ_64K - 1)
+#define ADM_MAX_ROWS (SZ_64K - 1)
+#define ADM_MAX_CHANNELS 16
+
+struct adm_desc_hw_box {
+ u32 cmd;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 row_len;
+ u32 num_rows;
+ u32 row_offset;
+};
+
+struct adm_desc_hw_single {
+ u32 cmd;
+ u32 src_addr;
+ u32 dst_addr;
+ u32 len;
+};
+
+struct adm_async_desc {
+ struct virt_dma_desc vd;
+ struct adm_device *adev;
+
+ size_t length;
+ enum dma_transfer_direction dir;
+ dma_addr_t dma_addr;
+ size_t dma_len;
+
+ void *cpl;
+ dma_addr_t cp_addr;
+ u32 crci;
+ u32 mux;
+ u32 blk_size;
+};
+
+struct adm_chan {
+ struct virt_dma_chan vc;
+ struct adm_device *adev;
+
+ /* parsed from DT */
+ u32 id; /* channel id */
+
+ struct adm_async_desc *curr_txd;
+ struct dma_slave_config slave;
+ struct list_head node;
+
+ int error;
+ int initialized;
+};
+
+static inline struct adm_chan *to_adm_chan(struct dma_chan *common)
+{
+ return container_of(common, struct adm_chan, vc.chan);
+}
+
+struct adm_device {
+ void __iomem *regs;
+ struct device *dev;
+ struct dma_device common;
+ struct device_dma_parameters dma_parms;
+ struct adm_chan *channels;
+
+ u32 ee;
+
+ struct clk *core_clk;
+ struct clk *iface_clk;
+
+ struct reset_control *clk_reset;
+ struct reset_control *c0_reset;
+ struct reset_control *c1_reset;
+ struct reset_control *c2_reset;
+ int irq;
+};
+
+/**
+ * adm_free_chan - Frees dma resources associated with the specific channel
+ *
+ * @chan: dma channel
+ *
+ * Free all allocated descriptors associated with this channel
+ */
+static void adm_free_chan(struct dma_chan *chan)
+{
+ /* free all queued descriptors */
+ vchan_free_chan_resources(to_virt_chan(chan));
+}
+
+/**
+ * adm_get_blksize - Get block size from burst value
+ *
+ * @burst: Burst size of transaction
+ */
+static int adm_get_blksize(unsigned int burst)
+{
+ int ret;
+
+ switch (burst) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ ret = ffs(burst >> 4) - 1;
+ break;
+ case 192:
+ ret = 4;
+ break;
+ case 256:
+ ret = 5;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * adm_process_fc_descriptors - Process descriptors for flow controlled xfers
+ *
+ * @achan: ADM channel
+ * @desc: Descriptor memory pointer
+ * @sg: Scatterlist entry
+ * @crci: CRCI value
+ * @burst: Burst size of transaction
+ * @direction: DMA transfer direction
+ */
+static void *adm_process_fc_descriptors(struct adm_chan *achan, void *desc,
+ struct scatterlist *sg, u32 crci,
+ u32 burst,
+ enum dma_transfer_direction direction)
+{
+ struct adm_desc_hw_box *box_desc = NULL;
+ struct adm_desc_hw_single *single_desc;
+ u32 remainder = sg_dma_len(sg);
+ u32 rows, row_offset, crci_cmd;
+ u32 mem_addr = sg_dma_address(sg);
+ u32 *incr_addr = &mem_addr;
+ u32 *src, *dst;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ crci_cmd = ADM_CMD_SRC_CRCI(crci);
+ row_offset = burst;
+ src = &achan->slave.src_addr;
+ dst = &mem_addr;
+ } else {
+ crci_cmd = ADM_CMD_DST_CRCI(crci);
+ row_offset = burst << 16;
+ src = &mem_addr;
+ dst = &achan->slave.dst_addr;
+ }
+
+ while (remainder >= burst) {
+ box_desc = desc;
+ box_desc->cmd = ADM_CMD_TYPE_BOX | crci_cmd;
+ box_desc->row_offset = row_offset;
+ box_desc->src_addr = *src;
+ box_desc->dst_addr = *dst;
+
+ rows = remainder / burst;
+ rows = min_t(u32, rows, ADM_MAX_ROWS);
+ box_desc->num_rows = rows << 16 | rows;
+ box_desc->row_len = burst << 16 | burst;
+
+ *incr_addr += burst * rows;
+ remainder -= burst * rows;
+ desc += sizeof(*box_desc);
+ }
+
+ /* if leftover bytes, do one single descriptor */
+ if (remainder) {
+ single_desc = desc;
+ single_desc->cmd = ADM_CMD_TYPE_SINGLE | crci_cmd;
+ single_desc->len = remainder;
+ single_desc->src_addr = *src;
+ single_desc->dst_addr = *dst;
+ desc += sizeof(*single_desc);
+
+ if (sg_is_last(sg))
+ single_desc->cmd |= ADM_CMD_LC;
+ } else {
+ if (box_desc && sg_is_last(sg))
+ box_desc->cmd |= ADM_CMD_LC;
+ }
+
+ return desc;
+}
+
+/**
+ * adm_process_non_fc_descriptors - Process descriptors for non-fc xfers
+ *
+ * @achan: ADM channel
+ * @desc: Descriptor memory pointer
+ * @sg: Scatterlist entry
+ * @direction: DMA transfer direction
+ */
+static void *adm_process_non_fc_descriptors(struct adm_chan *achan, void *desc,
+ struct scatterlist *sg,
+ enum dma_transfer_direction direction)
+{
+ struct adm_desc_hw_single *single_desc;
+ u32 remainder = sg_dma_len(sg);
+ u32 mem_addr = sg_dma_address(sg);
+ u32 *incr_addr = &mem_addr;
+ u32 *src, *dst;
+
+ if (direction == DMA_DEV_TO_MEM) {
+ src = &achan->slave.src_addr;
+ dst = &mem_addr;
+ } else {
+ src = &mem_addr;
+ dst = &achan->slave.dst_addr;
+ }
+
+ do {
+ single_desc = desc;
+ single_desc->cmd = ADM_CMD_TYPE_SINGLE;
+ single_desc->src_addr = *src;
+ single_desc->dst_addr = *dst;
+ single_desc->len = (remainder > ADM_MAX_XFER) ?
+ ADM_MAX_XFER : remainder;
+
+ remainder -= single_desc->len;
+ *incr_addr += single_desc->len;
+ desc += sizeof(*single_desc);
+ } while (remainder);
+
+ /* set last command if this is the end of the whole transaction */
+ if (sg_is_last(sg))
+ single_desc->cmd |= ADM_CMD_LC;
+
+ return desc;
+}
+
+/**
+ * adm_prep_slave_sg - Prep slave sg transaction
+ *
+ * @chan: dma channel
+ * @sgl: scatter gather list
+ * @sg_len: length of sg
+ * @direction: DMA transfer direction
+ * @flags: DMA flags
+ * @context: transfer context (unused)
+ */
+static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
+ struct scatterlist *sgl,
+ unsigned int sg_len,
+ enum dma_transfer_direction direction,
+ unsigned long flags,
+ void *context)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct adm_device *adev = achan->adev;
+ struct adm_async_desc *async_desc;
+ struct scatterlist *sg;
+ dma_addr_t cple_addr;
+ u32 i, burst;
+ u32 single_count = 0, box_count = 0, crci = 0;
+ void *desc;
+ u32 *cple;
+ int blk_size = 0;
+
+ if (!is_slave_direction(direction)) {
+ dev_err(adev->dev, "invalid dma direction\n");
+ return NULL;
+ }
+
+ /*
+ * get burst value from slave configuration
+ */
+ burst = (direction == DMA_MEM_TO_DEV) ?
+ achan->slave.dst_maxburst :
+ achan->slave.src_maxburst;
+
+ /* if using flow control, validate burst and crci values */
+ if (achan->slave.device_fc) {
+ blk_size = adm_get_blksize(burst);
+ if (blk_size < 0) {
+ dev_err(adev->dev, "invalid burst value: %d\n",
+ burst);
+ return ERR_PTR(-EINVAL);
+ }
+
+ crci = achan->slave.slave_id & 0xf;
+ if (!crci || achan->slave.slave_id > 0x1f) {
+ dev_err(adev->dev, "invalid crci value\n");
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /* iterate through sgs and compute allocation size of structures */
+ for_each_sg(sgl, sg, sg_len, i) {
+ if (achan->slave.device_fc) {
+ box_count += DIV_ROUND_UP(sg_dma_len(sg) / burst,
+ ADM_MAX_ROWS);
+ if (sg_dma_len(sg) % burst)
+ single_count++;
+ } else {
+ single_count += DIV_ROUND_UP(sg_dma_len(sg),
+ ADM_MAX_XFER);
+ }
+ }
+
+ async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
+ if (!async_desc)
+ return ERR_PTR(-ENOMEM);
+
+ if (crci)
+ async_desc->mux = achan->slave.slave_id & ADM_CRCI_MUX_SEL ?
+ ADM_CRCI_CTL_MUX_SEL : 0;
+ async_desc->crci = crci;
+ async_desc->blk_size = blk_size;
+ async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
+ box_count * sizeof(struct adm_desc_hw_box) +
+ sizeof(*cple) + 2 * ADM_DESC_ALIGN;
+
+ async_desc->cpl = kzalloc(async_desc->dma_len, GFP_NOWAIT);
+ if (!async_desc->cpl)
+ goto free;
+
+ async_desc->adev = adev;
+
+ /* both command list entry and descriptors must be 8 byte aligned */
+ cple = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
+ desc = PTR_ALIGN(cple + 1, ADM_DESC_ALIGN);
+
+ for_each_sg(sgl, sg, sg_len, i) {
+ async_desc->length += sg_dma_len(sg);
+
+ if (achan->slave.device_fc)
+ desc = adm_process_fc_descriptors(achan, desc, sg, crci,
+ burst, direction);
+ else
+ desc = adm_process_non_fc_descriptors(achan, desc, sg,
+ direction);
+ }
+
+ async_desc->dma_addr = dma_map_single(adev->dev, async_desc->cpl,
+ async_desc->dma_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(adev->dev, async_desc->dma_addr))
+ goto free;
+
+ cple_addr = async_desc->dma_addr + ((void *)cple - async_desc->cpl);
+
+ /* init cmd list */
+ dma_sync_single_for_cpu(adev->dev, cple_addr, sizeof(*cple),
+ DMA_TO_DEVICE);
+ *cple = ADM_CPLE_LP;
+ *cple |= (async_desc->dma_addr + ADM_DESC_ALIGN) >> 3;
+ dma_sync_single_for_device(adev->dev, cple_addr, sizeof(*cple),
+ DMA_TO_DEVICE);
+
+ return vchan_tx_prep(&achan->vc, &async_desc->vd, flags);
+
+free:
+ kfree(async_desc);
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * adm_terminate_all - terminate all transactions on a channel
+ * @chan: dma channel
+ *
+ * Dequeues and frees all transactions, aborts current transaction
+ * No callbacks are done
+ *
+ */
+static int adm_terminate_all(struct dma_chan *chan)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct adm_device *adev = achan->adev;
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+ vchan_get_all_descriptors(&achan->vc, &head);
+
+ /* send flush command to terminate current transaction */
+ writel_relaxed(0x0,
+ adev->regs + ADM_CH_FLUSH_STATE0(achan->id, adev->ee));
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+
+ vchan_dma_desc_free_list(&achan->vc, &head);
+
+ return 0;
+}
+
+static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ unsigned long flag;
+
+ spin_lock_irqsave(&achan->vc.lock, flag);
+ memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
+ spin_unlock_irqrestore(&achan->vc.lock, flag);
+
+ return 0;
+}
+
+/**
+ * adm_start_dma - start next transaction
+ * @achan: ADM dma channel
+ */
+static void adm_start_dma(struct adm_chan *achan)
+{
+ struct virt_dma_desc *vd = vchan_next_desc(&achan->vc);
+ struct adm_device *adev = achan->adev;
+ struct adm_async_desc *async_desc;
+
+ lockdep_assert_held(&achan->vc.lock);
+
+ if (!vd)
+ return;
+
+ list_del(&vd->node);
+
+ /* write next command list out to the CMD FIFO */
+ async_desc = container_of(vd, struct adm_async_desc, vd);
+ achan->curr_txd = async_desc;
+
+ /* reset channel error */
+ achan->error = 0;
+
+ if (!achan->initialized) {
+ /* enable interrupts */
+ writel(ADM_CH_CONF_SHADOW_EN |
+ ADM_CH_CONF_PERM_MPU_CONF |
+ ADM_CH_CONF_MPU_DISABLE |
+ ADM_CH_CONF_SEC_DOMAIN(adev->ee),
+ adev->regs + ADM_CH_CONF(achan->id));
+
+ writel(ADM_CH_RSLT_CONF_IRQ_EN | ADM_CH_RSLT_CONF_FLUSH_EN,
+ adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
+
+ achan->initialized = 1;
+ }
+
+ /* set the crci block size if this transaction requires CRCI */
+ if (async_desc->crci) {
+ writel(async_desc->mux | async_desc->blk_size,
+ adev->regs + ADM_CRCI_CTL(async_desc->crci, adev->ee));
+ }
+
+ /* make sure IRQ enable doesn't get reordered */
+ wmb();
+
+ /* write next command list out to the CMD FIFO */
+ writel(ALIGN(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3,
+ adev->regs + ADM_CH_CMD_PTR(achan->id, adev->ee));
+}
+
+/**
+ * adm_dma_irq - irq handler for ADM controller
+ * @irq: IRQ of interrupt
+ * @data: callback data
+ *
+ * IRQ handler for the bam controller
+ */
+static irqreturn_t adm_dma_irq(int irq, void *data)
+{
+ struct adm_device *adev = data;
+ u32 srcs, i;
+ struct adm_async_desc *async_desc;
+ unsigned long flags;
+
+ srcs = readl_relaxed(adev->regs +
+ ADM_SEC_DOMAIN_IRQ_STATUS(adev->ee));
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++) {
+ struct adm_chan *achan = &adev->channels[i];
+ u32 status, result;
+
+ if (srcs & BIT(i)) {
+ status = readl_relaxed(adev->regs +
+ ADM_CH_STATUS_SD(i, adev->ee));
+
+ /* if no result present, skip */
+ if (!(status & ADM_CH_STATUS_VALID))
+ continue;
+
+ result = readl_relaxed(adev->regs +
+ ADM_CH_RSLT(i, adev->ee));
+
+ /* no valid results, skip */
+ if (!(result & ADM_CH_RSLT_VALID))
+ continue;
+
+ /* flag error if transaction was flushed or failed */
+ if (result & (ADM_CH_RSLT_ERR | ADM_CH_RSLT_FLUSH))
+ achan->error = 1;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+ async_desc = achan->curr_txd;
+
+ achan->curr_txd = NULL;
+
+ if (async_desc) {
+ vchan_cookie_complete(&async_desc->vd);
+
+ /* kick off next DMA */
+ adm_start_dma(achan);
+ }
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * adm_tx_status - returns status of transaction
+ * @chan: dma channel
+ * @cookie: transaction cookie
+ * @txstate: DMA transaction state
+ *
+ * Return status of dma transaction
+ */
+static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ struct virt_dma_desc *vd;
+ enum dma_status ret;
+ unsigned long flags;
+ size_t residue = 0;
+
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (ret == DMA_COMPLETE || !txstate)
+ return ret;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+
+ vd = vchan_find_desc(&achan->vc, cookie);
+ if (vd)
+ residue = container_of(vd, struct adm_async_desc, vd)->length;
+
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+
+ /*
+ * residue is either the full length if it is in the issued list, or 0
+ * if it is in progress. We have no reliable way of determining
+ * anything inbetween
+ */
+ dma_set_residue(txstate, residue);
+
+ if (achan->error)
+ return DMA_ERROR;
+
+ return ret;
+}
+
+/**
+ * adm_issue_pending - starts pending transactions
+ * @chan: dma channel
+ *
+ * Issues all pending transactions and starts DMA
+ */
+static void adm_issue_pending(struct dma_chan *chan)
+{
+ struct adm_chan *achan = to_adm_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&achan->vc.lock, flags);
+
+ if (vchan_issue_pending(&achan->vc) && !achan->curr_txd)
+ adm_start_dma(achan);
+ spin_unlock_irqrestore(&achan->vc.lock, flags);
+}
+
+/**
+ * adm_dma_free_desc - free descriptor memory
+ * @vd: virtual descriptor
+ *
+ */
+static void adm_dma_free_desc(struct virt_dma_desc *vd)
+{
+ struct adm_async_desc *async_desc = container_of(vd,
+ struct adm_async_desc, vd);
+
+ dma_unmap_single(async_desc->adev->dev, async_desc->dma_addr,
+ async_desc->dma_len, DMA_TO_DEVICE);
+ kfree(async_desc->cpl);
+ kfree(async_desc);
+}
+
+static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
+ u32 index)
+{
+ achan->id = index;
+ achan->adev = adev;
+
+ vchan_init(&achan->vc, &adev->common);
+ achan->vc.desc_free = adm_dma_free_desc;
+}
+
+static int adm_dma_probe(struct platform_device *pdev)
+{
+ struct adm_device *adev;
+ int ret;
+ u32 i;
+
+ adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
+ if (!adev)
+ return -ENOMEM;
+
+ adev->dev = &pdev->dev;
+
+ adev->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(adev->regs))
+ return PTR_ERR(adev->regs);
+
+ adev->irq = platform_get_irq(pdev, 0);
+ if (adev->irq < 0)
+ return adev->irq;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee);
+ if (ret) {
+ dev_err(adev->dev, "Execution environment unspecified\n");
+ return ret;
+ }
+
+ adev->core_clk = devm_clk_get(adev->dev, "core");
+ if (IS_ERR(adev->core_clk))
+ return PTR_ERR(adev->core_clk);
+
+ adev->iface_clk = devm_clk_get(adev->dev, "iface");
+ if (IS_ERR(adev->iface_clk))
+ return PTR_ERR(adev->iface_clk);
+
+ adev->clk_reset = devm_reset_control_get_exclusive(&pdev->dev, "clk");
+ if (IS_ERR(adev->clk_reset)) {
+ dev_err(adev->dev, "failed to get ADM0 reset\n");
+ return PTR_ERR(adev->clk_reset);
+ }
+
+ adev->c0_reset = devm_reset_control_get_exclusive(&pdev->dev, "c0");
+ if (IS_ERR(adev->c0_reset)) {
+ dev_err(adev->dev, "failed to get ADM0 C0 reset\n");
+ return PTR_ERR(adev->c0_reset);
+ }
+
+ adev->c1_reset = devm_reset_control_get_exclusive(&pdev->dev, "c1");
+ if (IS_ERR(adev->c1_reset)) {
+ dev_err(adev->dev, "failed to get ADM0 C1 reset\n");
+ return PTR_ERR(adev->c1_reset);
+ }
+
+ adev->c2_reset = devm_reset_control_get_exclusive(&pdev->dev, "c2");
+ if (IS_ERR(adev->c2_reset)) {
+ dev_err(adev->dev, "failed to get ADM0 C2 reset\n");
+ return PTR_ERR(adev->c2_reset);
+ }
+
+ ret = clk_prepare_enable(adev->core_clk);
+ if (ret) {
+ dev_err(adev->dev, "failed to prepare/enable core clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(adev->iface_clk);
+ if (ret) {
+ dev_err(adev->dev, "failed to prepare/enable iface clock\n");
+ goto err_disable_core_clk;
+ }
+
+ reset_control_assert(adev->clk_reset);
+ reset_control_assert(adev->c0_reset);
+ reset_control_assert(adev->c1_reset);
+ reset_control_assert(adev->c2_reset);
+
+ udelay(2);
+
+ reset_control_deassert(adev->clk_reset);
+ reset_control_deassert(adev->c0_reset);
+ reset_control_deassert(adev->c1_reset);
+ reset_control_deassert(adev->c2_reset);
+
+ adev->channels = devm_kcalloc(adev->dev, ADM_MAX_CHANNELS,
+ sizeof(*adev->channels), GFP_KERNEL);
+
+ if (!adev->channels) {
+ ret = -ENOMEM;
+ goto err_disable_clks;
+ }
+
+ /* allocate and initialize channels */
+ INIT_LIST_HEAD(&adev->common.channels);
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++)
+ adm_channel_init(adev, &adev->channels[i], i);
+
+ /* reset CRCIs */
+ for (i = 0; i < 16; i++)
+ writel(ADM_CRCI_CTL_RST, adev->regs +
+ ADM_CRCI_CTL(i, adev->ee));
+
+ /* configure client interfaces */
+ writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(0));
+ writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(1));
+ writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) |
+ ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(2));
+ writel(ADM_GP_CTL_LP_EN | ADM_GP_CTL_LP_CNT(0xf),
+ adev->regs + ADM_GP_CTL);
+
+ ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq,
+ 0, "adm_dma", adev);
+ if (ret)
+ goto err_disable_clks;
+
+ platform_set_drvdata(pdev, adev);
+
+ adev->common.dev = adev->dev;
+ adev->common.dev->dma_parms = &adev->dma_parms;
+
+ /* set capabilities */
+ dma_cap_zero(adev->common.cap_mask);
+ dma_cap_set(DMA_SLAVE, adev->common.cap_mask);
+ dma_cap_set(DMA_PRIVATE, adev->common.cap_mask);
+
+ /* initialize dmaengine apis */
+ adev->common.directions = BIT(DMA_DEV_TO_MEM | DMA_MEM_TO_DEV);
+ adev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ adev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ adev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ adev->common.device_free_chan_resources = adm_free_chan;
+ adev->common.device_prep_slave_sg = adm_prep_slave_sg;
+ adev->common.device_issue_pending = adm_issue_pending;
+ adev->common.device_tx_status = adm_tx_status;
+ adev->common.device_terminate_all = adm_terminate_all;
+ adev->common.device_config = adm_slave_config;
+
+ ret = dma_async_device_register(&adev->common);
+ if (ret) {
+ dev_err(adev->dev, "failed to register dma async device\n");
+ goto err_disable_clks;
+ }
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ of_dma_xlate_by_chan_id,
+ &adev->common);
+ if (ret)
+ goto err_unregister_dma;
+
+ return 0;
+
+err_unregister_dma:
+ dma_async_device_unregister(&adev->common);
+err_disable_clks:
+ clk_disable_unprepare(adev->iface_clk);
+err_disable_core_clk:
+ clk_disable_unprepare(adev->core_clk);
+
+ return ret;
+}
+
+static int adm_dma_remove(struct platform_device *pdev)
+{
+ struct adm_device *adev = platform_get_drvdata(pdev);
+ struct adm_chan *achan;
+ u32 i;
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&adev->common);
+
+ for (i = 0; i < ADM_MAX_CHANNELS; i++) {
+ achan = &adev->channels[i];
+
+ /* mask IRQs for this channel/EE pair */
+ writel(0, adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
+
+ tasklet_kill(&adev->channels[i].vc.task);
+ adm_terminate_all(&adev->channels[i].vc.chan);
+ }
+
+ devm_free_irq(adev->dev, adev->irq, adev);
+
+ clk_disable_unprepare(adev->core_clk);
+ clk_disable_unprepare(adev->iface_clk);
+
+ return 0;
+}
+
+static const struct of_device_id adm_of_match[] = {
+ { .compatible = "qcom,adm", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, adm_of_match);
+
+static struct platform_driver adm_dma_driver = {
+ .probe = adm_dma_probe,
+ .remove = adm_dma_remove,
+ .driver = {
+ .name = "adm-dma-engine",
+ .of_match_table = adm_of_match,
+ },
+};
+
+module_platform_driver(adm_dma_driver);
+
+MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
+MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c
index 528deb5d9f31..c4c4e8575764 100644
--- a/drivers/dma/sf-pdma/sf-pdma.c
+++ b/drivers/dma/sf-pdma/sf-pdma.c
@@ -326,10 +326,9 @@ static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
{
struct sf_pdma_chan *chan = dev_id;
struct pdma_regs *regs = &chan->regs;
- unsigned long flags;
u64 residue;
- spin_lock_irqsave(&chan->vchan.lock, flags);
+ spin_lock(&chan->vchan.lock);
writel((readl(regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, regs->ctrl);
residue = readq(regs->residue);
@@ -346,7 +345,7 @@ static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
sf_pdma_xfer_desc(chan);
}
- spin_unlock_irqrestore(&chan->vchan.lock, flags);
+ spin_unlock(&chan->vchan.lock);
return IRQ_HANDLED;
}
@@ -355,11 +354,10 @@ static irqreturn_t sf_pdma_err_isr(int irq, void *dev_id)
{
struct sf_pdma_chan *chan = dev_id;
struct pdma_regs *regs = &chan->regs;
- unsigned long flags;
- spin_lock_irqsave(&chan->lock, flags);
+ spin_lock(&chan->lock);
writel((readl(regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, regs->ctrl);
- spin_unlock_irqrestore(&chan->lock, flags);
+ spin_unlock(&chan->lock);
tasklet_schedule(&chan->err_tasklet);
@@ -584,7 +582,7 @@ static struct platform_driver sf_pdma_driver = {
.remove = sf_pdma_remove,
.driver = {
.name = "sf-pdma",
- .of_match_table = of_match_ptr(sf_pdma_dt_ids),
+ .of_match_table = sf_pdma_dt_ids,
},
};
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 77ab1f4730be..4256e55bbf25 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1643,13 +1643,12 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
u32 row;
long chan = -1;
struct d40_chan *d40c;
- unsigned long flags;
struct d40_base *base = data;
u32 *regs = base->regs_interrupt;
struct d40_interrupt_lookup *il = base->gen_dmac.il;
u32 il_size = base->gen_dmac.il_size;
- spin_lock_irqsave(&base->interrupt_lock, flags);
+ spin_lock(&base->interrupt_lock);
/* Read interrupt status of both logical and physical channels */
for (i = 0; i < il_size; i++)
@@ -1694,7 +1693,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
spin_unlock(&d40c->lock);
}
- spin_unlock_irqrestore(&base->interrupt_lock, flags);
+ spin_unlock(&base->interrupt_lock);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index d0055d2f0b9a..f54ecb123a52 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -264,9 +264,11 @@ static int stm32_dma_get_width(struct stm32_dma_chan *chan,
}
static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
+ dma_addr_t buf_addr,
u32 threshold)
{
enum dma_slave_buswidth max_width;
+ u64 addr = buf_addr;
if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -277,6 +279,9 @@ static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
max_width = max_width >> 1;
+ if (do_div(addr, max_width))
+ max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
return max_width;
}
@@ -648,21 +653,12 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
- if (status & STM32_DMA_TCI) {
- stm32_dma_irq_clear(chan, STM32_DMA_TCI);
- if (scr & STM32_DMA_SCR_TCIE)
- stm32_dma_handle_chan_done(chan);
- status &= ~STM32_DMA_TCI;
- }
- if (status & STM32_DMA_HTI) {
- stm32_dma_irq_clear(chan, STM32_DMA_HTI);
- status &= ~STM32_DMA_HTI;
- }
if (status & STM32_DMA_FEI) {
stm32_dma_irq_clear(chan, STM32_DMA_FEI);
status &= ~STM32_DMA_FEI;
if (sfcr & STM32_DMA_SFCR_FEIE) {
- if (!(scr & STM32_DMA_SCR_EN))
+ if (!(scr & STM32_DMA_SCR_EN) &&
+ !(status & STM32_DMA_TCI))
dev_err(chan2dev(chan), "FIFO Error\n");
else
dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
@@ -674,6 +670,19 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
if (sfcr & STM32_DMA_SCR_DMEIE)
dev_dbg(chan2dev(chan), "Direct mode overrun\n");
}
+
+ if (status & STM32_DMA_TCI) {
+ stm32_dma_irq_clear(chan, STM32_DMA_TCI);
+ if (scr & STM32_DMA_SCR_TCIE)
+ stm32_dma_handle_chan_done(chan);
+ status &= ~STM32_DMA_TCI;
+ }
+
+ if (status & STM32_DMA_HTI) {
+ stm32_dma_irq_clear(chan, STM32_DMA_HTI);
+ status &= ~STM32_DMA_HTI;
+ }
+
if (status) {
stm32_dma_irq_clear(chan, status);
dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
@@ -703,7 +712,7 @@ static void stm32_dma_issue_pending(struct dma_chan *c)
static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
enum dma_transfer_direction direction,
enum dma_slave_buswidth *buswidth,
- u32 buf_len)
+ u32 buf_len, dma_addr_t buf_addr)
{
enum dma_slave_buswidth src_addr_width, dst_addr_width;
int src_bus_width, dst_bus_width;
@@ -735,7 +744,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
return dst_burst_size;
/* Set memory data size */
- src_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
+ src_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
+ fifoth);
chan->mem_width = src_addr_width;
src_bus_width = stm32_dma_get_width(chan, src_addr_width);
if (src_bus_width < 0)
@@ -784,7 +794,8 @@ static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
return src_burst_size;
/* Set memory data size */
- dst_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
+ dst_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
+ fifoth);
chan->mem_width = dst_addr_width;
dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
if (dst_bus_width < 0)
@@ -872,7 +883,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
for_each_sg(sgl, sg, sg_len, i) {
ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
- sg_dma_len(sg));
+ sg_dma_len(sg),
+ sg_dma_address(sg));
if (ret < 0)
goto err;
@@ -940,7 +952,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
return NULL;
}
- ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len);
+ ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len,
+ buf_addr);
if (ret < 0)
return NULL;
@@ -1216,6 +1229,8 @@ static void stm32_dma_free_chan_resources(struct dma_chan *c)
pm_runtime_put(dmadev->ddev.dev);
vchan_free_chan_resources(to_virt_chan(c));
+ stm32_dma_clear_reg(&chan->chan_reg);
+ chan->threshold = 0;
}
static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
diff --git a/drivers/dma/stm32-dmamux.c b/drivers/dma/stm32-dmamux.c
index a10ccd964376..ef0d0555103d 100644
--- a/drivers/dma/stm32-dmamux.c
+++ b/drivers/dma/stm32-dmamux.c
@@ -168,7 +168,7 @@ error_chan_id:
return ERR_PTR(ret);
}
-static const struct of_device_id stm32_stm32dma_master_match[] = {
+static const struct of_device_id stm32_stm32dma_master_match[] __maybe_unused = {
{ .compatible = "st,stm32-dma", },
{},
};
diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
index 08cfbfab837b..36ba8b43e78d 100644
--- a/drivers/dma/stm32-mdma.c
+++ b/drivers/dma/stm32-mdma.c
@@ -199,7 +199,7 @@
#define STM32_MDMA_MAX_CHANNELS 63
#define STM32_MDMA_MAX_REQUESTS 256
#define STM32_MDMA_MAX_BURST 128
-#define STM32_MDMA_VERY_HIGH_PRIORITY 0x11
+#define STM32_MDMA_VERY_HIGH_PRIORITY 0x3
enum stm32_mdma_trigger_mode {
STM32_MDMA_BUFFER,
@@ -339,7 +339,7 @@ static struct stm32_mdma_desc *stm32_mdma_alloc_desc(
struct stm32_mdma_desc *desc;
int i;
- desc = kzalloc(offsetof(typeof(*desc), node[count]), GFP_NOWAIT);
+ desc = kzalloc(struct_size(desc, node, count), GFP_NOWAIT);
if (!desc)
return NULL;
@@ -1346,7 +1346,7 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
{
struct stm32_mdma_device *dmadev = devid;
struct stm32_mdma_chan *chan = devid;
- u32 reg, id, ien, status, flag;
+ u32 reg, id, ccr, ien, status;
/* Find out which channel generates the interrupt */
status = readl_relaxed(dmadev->base + STM32_MDMA_GISR0);
@@ -1368,67 +1368,71 @@ static irqreturn_t stm32_mdma_irq_handler(int irq, void *devid)
chan = &dmadev->chan[id];
if (!chan) {
- dev_dbg(mdma2dev(dmadev), "MDMA channel not initialized\n");
- goto exit;
+ dev_warn(mdma2dev(dmadev), "MDMA channel not initialized\n");
+ return IRQ_NONE;
}
/* Handle interrupt for the channel */
spin_lock(&chan->vchan.lock);
- status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(chan->id));
- ien = stm32_mdma_read(dmadev, STM32_MDMA_CCR(chan->id));
- ien &= STM32_MDMA_CCR_IRQ_MASK;
- ien >>= 1;
+ status = stm32_mdma_read(dmadev, STM32_MDMA_CISR(id));
+ /* Mask Channel ReQuest Active bit which can be set in case of MEM2MEM */
+ status &= ~STM32_MDMA_CISR_CRQA;
+ ccr = stm32_mdma_read(dmadev, STM32_MDMA_CCR(id));
+ ien = (ccr & STM32_MDMA_CCR_IRQ_MASK) >> 1;
if (!(status & ien)) {
spin_unlock(&chan->vchan.lock);
- dev_dbg(chan2dev(chan),
- "spurious it (status=0x%04x, ien=0x%04x)\n",
- status, ien);
+ dev_warn(chan2dev(chan),
+ "spurious it (status=0x%04x, ien=0x%04x)\n",
+ status, ien);
return IRQ_NONE;
}
- flag = __ffs(status & ien);
- reg = STM32_MDMA_CIFCR(chan->id);
+ reg = STM32_MDMA_CIFCR(id);
- switch (1 << flag) {
- case STM32_MDMA_CISR_TEIF:
- id = chan->id;
- status = readl_relaxed(dmadev->base + STM32_MDMA_CESR(id));
- dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n", status);
+ if (status & STM32_MDMA_CISR_TEIF) {
+ dev_err(chan2dev(chan), "Transfer Err: stat=0x%08x\n",
+ readl_relaxed(dmadev->base + STM32_MDMA_CESR(id)));
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CTEIF);
- break;
+ status &= ~STM32_MDMA_CISR_TEIF;
+ }
- case STM32_MDMA_CISR_CTCIF:
+ if (status & STM32_MDMA_CISR_CTCIF) {
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CCTCIF);
+ status &= ~STM32_MDMA_CISR_CTCIF;
stm32_mdma_xfer_end(chan);
- break;
+ }
- case STM32_MDMA_CISR_BRTIF:
+ if (status & STM32_MDMA_CISR_BRTIF) {
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBRTIF);
- break;
+ status &= ~STM32_MDMA_CISR_BRTIF;
+ }
- case STM32_MDMA_CISR_BTIF:
+ if (status & STM32_MDMA_CISR_BTIF) {
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CBTIF);
+ status &= ~STM32_MDMA_CISR_BTIF;
chan->curr_hwdesc++;
if (chan->desc && chan->desc->cyclic) {
if (chan->curr_hwdesc == chan->desc->count)
chan->curr_hwdesc = 0;
vchan_cyclic_callback(&chan->desc->vdesc);
}
- break;
+ }
- case STM32_MDMA_CISR_TCIF:
+ if (status & STM32_MDMA_CISR_TCIF) {
stm32_mdma_set_bits(dmadev, reg, STM32_MDMA_CIFCR_CLTCIF);
- break;
+ status &= ~STM32_MDMA_CISR_TCIF;
+ }
- default:
- dev_err(chan2dev(chan), "it %d unhandled (status=0x%04x)\n",
- 1 << flag, status);
+ if (status) {
+ stm32_mdma_set_bits(dmadev, reg, status);
+ dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
+ if (!(ccr & STM32_MDMA_CCR_EN))
+ dev_err(chan2dev(chan), "chan disabled by HW\n");
}
spin_unlock(&chan->vchan.lock);
-exit:
return IRQ_HANDLED;
}
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index f5f9c86c50bc..5cadd4d2b824 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -1174,6 +1174,30 @@ static struct sun6i_dma_config sun50i_a64_dma_cfg = {
};
/*
+ * TODO: Add support for more than 4g physical addressing.
+ *
+ * The A100 binding uses the number of dma channels from the
+ * device tree node.
+ */
+static struct sun6i_dma_config sun50i_a100_dma_cfg = {
+ .clock_autogate_enable = sun6i_enable_clock_autogate_h3,
+ .set_burst_length = sun6i_set_burst_length_h3,
+ .set_drq = sun6i_set_drq_h6,
+ .set_mode = sun6i_set_mode_h6,
+ .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+ .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
+ .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+ .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
+ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
+ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
+ .has_mbus_clk = true,
+};
+
+/*
* The H6 binding uses the number of dma channels from the
* device tree node.
*/
@@ -1225,6 +1249,7 @@ static const struct of_device_id sun6i_dma_match[] = {
{ .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
{ .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg },
{ .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg },
+ { .compatible = "allwinner,sun50i-a100-dma", .data = &sun50i_a100_dma_cfg },
{ .compatible = "allwinner,sun50i-h6-dma", .data = &sun50i_h6_dma_cfg },
{ /* sentinel */ }
};
diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
index c5fa2ef74abc..4735742e826d 100644
--- a/drivers/dma/tegra210-adma.c
+++ b/drivers/dma/tegra210-adma.c
@@ -408,19 +408,18 @@ static irqreturn_t tegra_adma_isr(int irq, void *dev_id)
{
struct tegra_adma_chan *tdc = dev_id;
unsigned long status;
- unsigned long flags;
- spin_lock_irqsave(&tdc->vc.lock, flags);
+ spin_lock(&tdc->vc.lock);
status = tegra_adma_irq_clear(tdc);
if (status == 0 || !tdc->desc) {
- spin_unlock_irqrestore(&tdc->vc.lock, flags);
+ spin_unlock(&tdc->vc.lock);
return IRQ_NONE;
}
vchan_cyclic_callback(&tdc->desc->vd);
- spin_unlock_irqrestore(&tdc->vc.lock, flags);
+ spin_unlock(&tdc->vc.lock);
return IRQ_HANDLED;
}
diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile
index 0c67254caee6..bd496efadff7 100644
--- a/drivers/dma/ti/Makefile
+++ b/drivers/dma/ti/Makefile
@@ -7,5 +7,6 @@ obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o
obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o \
k3-psil-am654.o \
k3-psil-j721e.o \
- k3-psil-j7200.o
+ k3-psil-j7200.o \
+ k3-psil-am64.o
obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index 4ba8fa5d9c36..71d24fc07c00 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -122,7 +122,7 @@ static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
return map;
}
-static const struct of_device_id ti_am335x_master_match[] = {
+static const struct of_device_id ti_am335x_master_match[] __maybe_unused = {
{ .compatible = "ti,edma3-tpcc", },
{},
};
@@ -292,7 +292,7 @@ static const u32 ti_dma_offset[] = {
[TI_XBAR_SDMA_OFFSET] = 1,
};
-static const struct of_device_id ti_dra7_master_match[] = {
+static const struct of_device_id ti_dra7_master_match[] __maybe_unused = {
{
.compatible = "ti,omap4430-sdma",
.data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
@@ -460,7 +460,7 @@ static int ti_dma_xbar_probe(struct platform_device *pdev)
static struct platform_driver ti_dma_xbar_driver = {
.driver = {
.name = "ti-dma-crossbar",
- .of_match_table = of_match_ptr(ti_dma_xbar_match),
+ .of_match_table = ti_dma_xbar_match,
},
.probe = ti_dma_xbar_probe,
};
diff --git a/drivers/dma/ti/k3-psil-am64.c b/drivers/dma/ti/k3-psil-am64.c
new file mode 100644
index 000000000000..9fdeaa11a4fc
--- /dev/null
+++ b/drivers/dma/ti/k3-psil-am64.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com
+ * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
+ */
+
+#include <linux/kernel.h>
+
+#include "k3-psil-priv.h"
+
+#define PSIL_PDMA_XY_TR(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ }, \
+ }
+
+#define PSIL_PDMA_XY_PKT(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_PDMA_XY, \
+ .mapped_channel_id = -1, \
+ .default_flow_id = -1, \
+ .pkt_mode = 1, \
+ }, \
+ }
+
+#define PSIL_ETHERNET(x, ch, flow_base, flow_cnt) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 16, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = flow_base, \
+ }, \
+ }
+
+#define PSIL_SAUL(x, ch, flow_base, flow_cnt, default_flow, tx) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ .pkt_mode = 1, \
+ .needs_epib = 1, \
+ .psd_size = 64, \
+ .mapped_channel_id = ch, \
+ .flow_start = flow_base, \
+ .flow_num = flow_cnt, \
+ .default_flow_id = default_flow, \
+ .notdpkt = tx, \
+ }, \
+ }
+
+/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
+static struct psil_ep am64_src_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0x4000, 17, 32, 8, 32, 0),
+ PSIL_SAUL(0x4001, 18, 32, 8, 33, 0),
+ PSIL_SAUL(0x4002, 19, 40, 8, 40, 0),
+ PSIL_SAUL(0x4003, 20, 40, 8, 41, 0),
+ /* ICSS_G0 */
+ PSIL_ETHERNET(0x4100, 21, 48, 16),
+ PSIL_ETHERNET(0x4101, 22, 64, 16),
+ PSIL_ETHERNET(0x4102, 23, 80, 16),
+ PSIL_ETHERNET(0x4103, 24, 96, 16),
+ /* ICSS_G1 */
+ PSIL_ETHERNET(0x4200, 25, 112, 16),
+ PSIL_ETHERNET(0x4201, 26, 128, 16),
+ PSIL_ETHERNET(0x4202, 27, 144, 16),
+ PSIL_ETHERNET(0x4203, 28, 160, 16),
+ /* PDMA_MAIN0 - SPI0-3 */
+ PSIL_PDMA_XY_PKT(0x4300),
+ PSIL_PDMA_XY_PKT(0x4301),
+ PSIL_PDMA_XY_PKT(0x4302),
+ PSIL_PDMA_XY_PKT(0x4303),
+ PSIL_PDMA_XY_PKT(0x4304),
+ PSIL_PDMA_XY_PKT(0x4305),
+ PSIL_PDMA_XY_PKT(0x4306),
+ PSIL_PDMA_XY_PKT(0x4307),
+ PSIL_PDMA_XY_PKT(0x4308),
+ PSIL_PDMA_XY_PKT(0x4309),
+ PSIL_PDMA_XY_PKT(0x430a),
+ PSIL_PDMA_XY_PKT(0x430b),
+ PSIL_PDMA_XY_PKT(0x430c),
+ PSIL_PDMA_XY_PKT(0x430d),
+ PSIL_PDMA_XY_PKT(0x430e),
+ PSIL_PDMA_XY_PKT(0x430f),
+ /* PDMA_MAIN0 - USART0-1 */
+ PSIL_PDMA_XY_PKT(0x4310),
+ PSIL_PDMA_XY_PKT(0x4311),
+ /* PDMA_MAIN1 - SPI4 */
+ PSIL_PDMA_XY_PKT(0x4400),
+ PSIL_PDMA_XY_PKT(0x4401),
+ PSIL_PDMA_XY_PKT(0x4402),
+ PSIL_PDMA_XY_PKT(0x4403),
+ /* PDMA_MAIN1 - USART2-6 */
+ PSIL_PDMA_XY_PKT(0x4404),
+ PSIL_PDMA_XY_PKT(0x4405),
+ PSIL_PDMA_XY_PKT(0x4406),
+ PSIL_PDMA_XY_PKT(0x4407),
+ PSIL_PDMA_XY_PKT(0x4408),
+ /* PDMA_MAIN1 - ADCs */
+ PSIL_PDMA_XY_TR(0x440f),
+ PSIL_PDMA_XY_TR(0x4410),
+ /* CPSW2 */
+ PSIL_ETHERNET(0x4500, 16, 16, 16),
+};
+
+/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */
+static struct psil_ep am64_dst_ep_map[] = {
+ /* SAUL */
+ PSIL_SAUL(0xc000, 24, 80, 8, 80, 1),
+ PSIL_SAUL(0xc001, 25, 88, 8, 88, 1),
+ /* ICSS_G0 */
+ PSIL_ETHERNET(0xc100, 26, 96, 1),
+ PSIL_ETHERNET(0xc101, 27, 97, 1),
+ PSIL_ETHERNET(0xc102, 28, 98, 1),
+ PSIL_ETHERNET(0xc103, 29, 99, 1),
+ PSIL_ETHERNET(0xc104, 30, 100, 1),
+ PSIL_ETHERNET(0xc105, 31, 101, 1),
+ PSIL_ETHERNET(0xc106, 32, 102, 1),
+ PSIL_ETHERNET(0xc107, 33, 103, 1),
+ /* ICSS_G1 */
+ PSIL_ETHERNET(0xc200, 34, 104, 1),
+ PSIL_ETHERNET(0xc201, 35, 105, 1),
+ PSIL_ETHERNET(0xc202, 36, 106, 1),
+ PSIL_ETHERNET(0xc203, 37, 107, 1),
+ PSIL_ETHERNET(0xc204, 38, 108, 1),
+ PSIL_ETHERNET(0xc205, 39, 109, 1),
+ PSIL_ETHERNET(0xc206, 40, 110, 1),
+ PSIL_ETHERNET(0xc207, 41, 111, 1),
+ /* CPSW2 */
+ PSIL_ETHERNET(0xc500, 16, 16, 8),
+ PSIL_ETHERNET(0xc501, 17, 24, 8),
+ PSIL_ETHERNET(0xc502, 18, 32, 8),
+ PSIL_ETHERNET(0xc503, 19, 40, 8),
+ PSIL_ETHERNET(0xc504, 20, 48, 8),
+ PSIL_ETHERNET(0xc505, 21, 56, 8),
+ PSIL_ETHERNET(0xc506, 22, 64, 8),
+ PSIL_ETHERNET(0xc507, 23, 72, 8),
+};
+
+struct psil_ep_map am64_ep_map = {
+ .name = "am64",
+ .src = am64_src_ep_map,
+ .src_count = ARRAY_SIZE(am64_src_ep_map),
+ .dst = am64_dst_ep_map,
+ .dst_count = ARRAY_SIZE(am64_dst_ep_map),
+};
diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h
index b4b0fb359eff..b74e192e3c2d 100644
--- a/drivers/dma/ti/k3-psil-priv.h
+++ b/drivers/dma/ti/k3-psil-priv.h
@@ -40,5 +40,6 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id);
extern struct psil_ep_map am654_ep_map;
extern struct psil_ep_map j721e_ep_map;
extern struct psil_ep_map j7200_ep_map;
+extern struct psil_ep_map am64_ep_map;
#endif /* K3_PSIL_PRIV_H_ */
diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c
index 837853aab95a..13ce7367d870 100644
--- a/drivers/dma/ti/k3-psil.c
+++ b/drivers/dma/ti/k3-psil.c
@@ -20,6 +20,7 @@ static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM65X", .data = &am654_ep_map },
{ .family = "J721E", .data = &j721e_ep_map },
{ .family = "J7200", .data = &j7200_ep_map },
+ { .family = "AM64X", .data = &am64_ep_map },
{ /* sentinel */ }
};
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index a367584f0d7b..4fdd9f06b723 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -22,6 +22,7 @@
struct k3_udma_glue_common {
struct device *dev;
+ struct device chan_dev;
struct udma_dev *udmax;
const struct udma_tisci_rm *tisci_rm;
struct k3_ringacc *ringacc;
@@ -32,7 +33,8 @@ struct k3_udma_glue_common {
bool epib;
u32 psdata_size;
u32 swdata_size;
- u32 atype;
+ u32 atype_asel;
+ struct psil_endpoint_config *ep_config;
};
struct k3_udma_glue_tx_channel {
@@ -53,6 +55,8 @@ struct k3_udma_glue_tx_channel {
bool tx_filt_einfo;
bool tx_filt_pswords;
bool tx_supr_tdpkt;
+
+ int udma_tflow_id;
};
struct k3_udma_glue_rx_flow {
@@ -81,20 +85,26 @@ struct k3_udma_glue_rx_channel {
u32 flows_ready;
};
+static void k3_udma_chan_dev_release(struct device *dev)
+{
+ /* The struct containing the device is devm managed */
+}
+
+static struct class k3_udma_glue_devclass = {
+ .name = "k3_udma_glue_chan",
+ .dev_release = k3_udma_chan_dev_release,
+};
+
#define K3_UDMAX_TDOWN_TIMEOUT_US 1000
static int of_k3_udma_glue_parse(struct device_node *udmax_np,
struct k3_udma_glue_common *common)
{
- common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np,
- "ti,ringacc");
- if (IS_ERR(common->ringacc))
- return PTR_ERR(common->ringacc);
-
common->udmax = of_xudma_dev_get(udmax_np, NULL);
if (IS_ERR(common->udmax))
return PTR_ERR(common->udmax);
+ common->ringacc = xudma_get_ringacc(common->udmax);
common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
return 0;
@@ -104,7 +114,6 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
const char *name, struct k3_udma_glue_common *common,
bool tx_chn)
{
- struct psil_endpoint_config *ep_config;
struct of_phandle_args dma_spec;
u32 thread_id;
int ret = 0;
@@ -121,15 +130,26 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
&dma_spec))
return -ENOENT;
+ ret = of_k3_udma_glue_parse(dma_spec.np, common);
+ if (ret)
+ goto out_put_spec;
+
thread_id = dma_spec.args[0];
if (dma_spec.args_count == 2) {
- if (dma_spec.args[1] > 2) {
+ if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
dev_err(common->dev, "Invalid channel atype: %u\n",
dma_spec.args[1]);
ret = -EINVAL;
goto out_put_spec;
}
- common->atype = dma_spec.args[1];
+ if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
+ dev_err(common->dev, "Invalid channel asel: %u\n",
+ dma_spec.args[1]);
+ ret = -EINVAL;
+ goto out_put_spec;
+ }
+
+ common->atype_asel = dma_spec.args[1];
}
if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
@@ -143,25 +163,23 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
}
/* get psil endpoint config */
- ep_config = psil_get_ep_config(thread_id);
- if (IS_ERR(ep_config)) {
+ common->ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(common->ep_config)) {
dev_err(common->dev,
"No configuration for psi-l thread 0x%04x\n",
thread_id);
- ret = PTR_ERR(ep_config);
+ ret = PTR_ERR(common->ep_config);
goto out_put_spec;
}
- common->epib = ep_config->needs_epib;
- common->psdata_size = ep_config->psd_size;
+ common->epib = common->ep_config->needs_epib;
+ common->psdata_size = common->ep_config->psd_size;
if (tx_chn)
common->dst_thread = thread_id;
else
common->src_thread = thread_id;
- ret = of_k3_udma_glue_parse(dma_spec.np, common);
-
out_put_spec:
of_node_put(dma_spec.np);
return ret;
@@ -227,7 +245,7 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
req.tx_supr_tdpkt = 1;
req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
- req.tx_atype = tx_chn->common.atype;
+ req.tx_atype = tx_chn->common.atype_asel;
return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
}
@@ -259,8 +277,14 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
tx_chn->common.psdata_size,
tx_chn->common.swdata_size);
+ if (xudma_is_pktdma(tx_chn->common.udmax))
+ tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
+ else
+ tx_chn->udma_tchan_id = -1;
+
/* request and cfg UDMAP TX channel */
- tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1);
+ tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
+ tx_chn->udma_tchan_id);
if (IS_ERR(tx_chn->udma_tchanx)) {
ret = PTR_ERR(tx_chn->udma_tchanx);
dev_err(dev, "UDMAX tchanx get err %d\n", ret);
@@ -268,11 +292,34 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
}
tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
+ tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
+ tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
+ dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
+ tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
+ ret = device_register(&tx_chn->common.chan_dev);
+ if (ret) {
+ dev_err(dev, "Channel Device registration failed %d\n", ret);
+ tx_chn->common.chan_dev.parent = NULL;
+ goto err;
+ }
+
+ if (xudma_is_pktdma(tx_chn->common.udmax)) {
+ /* prepare the channel device as coherent */
+ tx_chn->common.chan_dev.dma_coherent = true;
+ dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
+ DMA_BIT_MASK(48));
+ }
+
atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
+ if (xudma_is_pktdma(tx_chn->common.udmax))
+ tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
+ else
+ tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
+
/* request and cfg rings */
ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
- tx_chn->udma_tchan_id, -1,
+ tx_chn->udma_tflow_id, -1,
&tx_chn->ringtx,
&tx_chn->ringtxcq);
if (ret) {
@@ -280,6 +327,16 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
goto err;
}
+ /* Set the dma_dev for the rings to be configured */
+ cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
+ cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
+
+ /* Set the ASEL value for DMA rings of PKTDMA */
+ if (xudma_is_pktdma(tx_chn->common.udmax)) {
+ cfg->tx_cfg.asel = tx_chn->common.atype_asel;
+ cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
+ }
+
ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret);
@@ -303,19 +360,6 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
goto err;
}
- ret = xudma_navss_psil_pair(tx_chn->common.udmax,
- tx_chn->common.src_thread,
- tx_chn->common.dst_thread);
- if (ret) {
- dev_err(dev, "PSI-L request err %d\n", ret);
- goto err;
- }
-
- tx_chn->psil_paired = true;
-
- /* reset TX RT registers */
- k3_udma_glue_disable_tx_chn(tx_chn);
-
k3_udma_glue_dump_tx_chn(tx_chn);
return tx_chn;
@@ -344,6 +388,11 @@ void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
if (tx_chn->ringtx)
k3_ringacc_ring_free(tx_chn->ringtx);
+
+ if (tx_chn->common.chan_dev.parent) {
+ device_unregister(&tx_chn->common.chan_dev);
+ tx_chn->common.chan_dev.parent = NULL;
+ }
}
EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
@@ -378,6 +427,18 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
+ int ret;
+
+ ret = xudma_navss_psil_pair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
+ return ret;
+ }
+
+ tx_chn->psil_paired = true;
+
xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
UDMA_PEER_RT_EN_ENABLE);
@@ -398,6 +459,13 @@ void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
xudma_tchanrt_write(tx_chn->udma_tchanx,
UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
+
+ if (tx_chn->psil_paired) {
+ xudma_navss_psil_unpair(tx_chn->common.udmax,
+ tx_chn->common.src_thread,
+ tx_chn->common.dst_thread);
+ tx_chn->psil_paired = false;
+ }
}
EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
@@ -437,13 +505,10 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
void *data,
void (*cleanup)(void *data, dma_addr_t desc_dma))
{
+ struct device *dev = tx_chn->common.dev;
dma_addr_t desc_dma;
int occ_tx, i, ret;
- /* reset TXCQ as it is not input for udma - expected to be empty */
- if (tx_chn->ringtxcq)
- k3_ringacc_ring_reset(tx_chn->ringtxcq);
-
/*
* TXQ reset need to be special way as it is input for udma and its
* state cached by udma, so:
@@ -452,17 +517,20 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
* 3) reset TXQ in a special way
*/
occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
- dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx);
+ dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
for (i = 0; i < occ_tx; i++) {
ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
if (ret) {
- dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret);
+ if (ret != -ENODATA)
+ dev_err(dev, "TX reset pop %d\n", ret);
break;
}
cleanup(data, desc_dma);
}
+ /* reset TXCQ as it is not input for udma - expected to be empty */
+ k3_ringacc_ring_reset(tx_chn->ringtxcq);
k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
}
EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
@@ -481,12 +549,50 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
{
- tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
+ if (xudma_is_pktdma(tx_chn->common.udmax)) {
+ tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
+ tx_chn->udma_tflow_id);
+ } else {
+ tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
+ }
return tx_chn->virq;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
+struct device *
+ k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ if (xudma_is_pktdma(tx_chn->common.udmax) &&
+ (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
+ return &tx_chn->common.chan_dev;
+
+ return xudma_get_device(tx_chn->common.udmax);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
+
+void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr)
+{
+ if (!xudma_is_pktdma(tx_chn->common.udmax) ||
+ !tx_chn->common.atype_asel)
+ return;
+
+ *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr);
+
+void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *addr)
+{
+ if (!xudma_is_pktdma(tx_chn->common.udmax) ||
+ !tx_chn->common.atype_asel)
+ return;
+
+ *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr);
+
static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
{
const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
@@ -498,8 +604,6 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
- TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
req.nav_id = tisci_rm->tisci_dev_id;
@@ -511,13 +615,16 @@ static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
* req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
*/
req.rxcq_qnum = 0xFFFF;
- if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
+ if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
+ rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
/* Default flow + extra ones */
+ req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
req.flowid_start = rx_chn->flow_id_base;
req.flowid_cnt = rx_chn->flow_num;
}
req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
- req.rx_atype = rx_chn->common.atype;
+ req.rx_atype = rx_chn->common.atype_asel;
ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
if (ret)
@@ -571,10 +678,18 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
goto err_rflow_put;
}
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ rx_ringfdq_id = flow->udma_rflow_id +
+ xudma_get_rflow_ring_offset(rx_chn->common.udmax);
+ rx_ring_id = 0;
+ } else {
+ rx_ring_id = flow_cfg->ring_rxq_id;
+ rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
+ }
+
/* request and cfg rings */
ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
- flow_cfg->ring_rxfdq0_id,
- flow_cfg->ring_rxq_id,
+ rx_ringfdq_id, rx_ring_id,
&flow->ringrxfdq,
&flow->ringrx);
if (ret) {
@@ -582,6 +697,16 @@ static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
goto err_rflow_put;
}
+ /* Set the dma_dev for the rings to be configured */
+ flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
+ flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
+
+ /* Set the ASEL value for DMA rings of PKTDMA */
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
+ flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
+ }
+
ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringrx %d\n", ret);
@@ -740,6 +865,7 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
struct k3_udma_glue_rx_channel_cfg *cfg)
{
struct k3_udma_glue_rx_channel *rx_chn;
+ struct psil_endpoint_config *ep_cfg;
int ret, i;
if (cfg->flow_id_num <= 0)
@@ -767,8 +893,16 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
rx_chn->common.psdata_size,
rx_chn->common.swdata_size);
+ ep_cfg = rx_chn->common.ep_config;
+
+ if (xudma_is_pktdma(rx_chn->common.udmax))
+ rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
+ else
+ rx_chn->udma_rchan_id = -1;
+
/* request and cfg UDMAP RX channel */
- rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1);
+ rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
+ rx_chn->udma_rchan_id);
if (IS_ERR(rx_chn->udma_rchanx)) {
ret = PTR_ERR(rx_chn->udma_rchanx);
dev_err(dev, "UDMAX rchanx get err %d\n", ret);
@@ -776,12 +910,48 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
}
rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
- rx_chn->flow_num = cfg->flow_id_num;
- rx_chn->flow_id_base = cfg->flow_id_base;
+ rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
+ rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
+ dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
+ rx_chn->udma_rchan_id, rx_chn->common.src_thread);
+ ret = device_register(&rx_chn->common.chan_dev);
+ if (ret) {
+ dev_err(dev, "Channel Device registration failed %d\n", ret);
+ rx_chn->common.chan_dev.parent = NULL;
+ goto err;
+ }
- /* Use RX channel id as flow id: target dev can't generate flow_id */
- if (cfg->flow_id_use_rxchan_id)
- rx_chn->flow_id_base = rx_chn->udma_rchan_id;
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ /* prepare the channel device as coherent */
+ rx_chn->common.chan_dev.dma_coherent = true;
+ dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
+ DMA_BIT_MASK(48));
+ }
+
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ int flow_start = cfg->flow_id_base;
+ int flow_end;
+
+ if (flow_start == -1)
+ flow_start = ep_cfg->flow_start;
+
+ flow_end = flow_start + cfg->flow_id_num - 1;
+ if (flow_start < ep_cfg->flow_start ||
+ flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
+ dev_err(dev, "Invalid flow range requested\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ rx_chn->flow_id_base = flow_start;
+ } else {
+ rx_chn->flow_id_base = cfg->flow_id_base;
+
+ /* Use RX channel id as flow id: target dev can't generate flow_id */
+ if (cfg->flow_id_use_rxchan_id)
+ rx_chn->flow_id_base = rx_chn->udma_rchan_id;
+ }
+
+ rx_chn->flow_num = cfg->flow_id_num;
rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
sizeof(*rx_chn->flows), GFP_KERNEL);
@@ -815,19 +985,6 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
goto err;
}
- ret = xudma_navss_psil_pair(rx_chn->common.udmax,
- rx_chn->common.src_thread,
- rx_chn->common.dst_thread);
- if (ret) {
- dev_err(dev, "PSI-L request err %d\n", ret);
- goto err;
- }
-
- rx_chn->psil_paired = true;
-
- /* reset RX RT registers */
- k3_udma_glue_disable_rx_chn(rx_chn);
-
k3_udma_glue_dump_rx_chn(rx_chn);
return rx_chn;
@@ -884,6 +1041,24 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
goto err;
}
+ rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
+ rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
+ dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
+ rx_chn->common.src_thread);
+ ret = device_register(&rx_chn->common.chan_dev);
+ if (ret) {
+ dev_err(dev, "Channel Device registration failed %d\n", ret);
+ rx_chn->common.chan_dev.parent = NULL;
+ goto err;
+ }
+
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ /* prepare the channel device as coherent */
+ rx_chn->common.chan_dev.dma_coherent = true;
+ dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
+ DMA_BIT_MASK(48));
+ }
+
ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
if (ret)
goto err;
@@ -936,6 +1111,11 @@ void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
xudma_rchan_put(rx_chn->common.udmax,
rx_chn->udma_rchanx);
+
+ if (rx_chn->common.chan_dev.parent) {
+ device_unregister(&rx_chn->common.chan_dev);
+ rx_chn->common.chan_dev.parent = NULL;
+ }
}
EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
@@ -1052,12 +1232,24 @@ EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
{
+ int ret;
+
if (rx_chn->remote)
return -EINVAL;
if (rx_chn->flows_ready < rx_chn->flow_num)
return -EINVAL;
+ ret = xudma_navss_psil_pair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ if (ret) {
+ dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
+ return ret;
+ }
+
+ rx_chn->psil_paired = true;
+
xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
UDMA_CHAN_RT_CTL_EN);
@@ -1078,6 +1270,13 @@ void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
+
+ if (rx_chn->psil_paired) {
+ xudma_navss_psil_unpair(rx_chn->common.udmax,
+ rx_chn->common.src_thread,
+ rx_chn->common.dst_thread);
+ rx_chn->psil_paired = false;
+ }
}
EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
@@ -1128,12 +1327,10 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
/* reset RXCQ as it is not input for udma - expected to be empty */
occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
- if (flow->ringrx)
- k3_ringacc_ring_reset(flow->ringrx);
/* Skip RX FDQ in case one FDQ is used for the set of flows */
if (skip_fdq)
- return;
+ goto do_reset;
/*
* RX FDQ reset need to be special way as it is input for udma and its
@@ -1148,13 +1345,17 @@ void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
for (i = 0; i < occ_rx; i++) {
ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
if (ret) {
- dev_err(dev, "RX reset pop %d\n", ret);
+ if (ret != -ENODATA)
+ dev_err(dev, "RX reset pop %d\n", ret);
break;
}
cleanup(data, desc_dma);
}
k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
+
+do_reset:
+ k3_ringacc_ring_reset(flow->ringrx);
}
EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
@@ -1184,8 +1385,52 @@ int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
flow = &rx_chn->flows[flow_num];
- flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
+ flow->udma_rflow_id);
+ } else {
+ flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
+ }
return flow->virq;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
+
+struct device *
+ k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ if (xudma_is_pktdma(rx_chn->common.udmax) &&
+ (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
+ return &rx_chn->common.chan_dev;
+
+ return xudma_get_device(rx_chn->common.udmax);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
+
+void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr)
+{
+ if (!xudma_is_pktdma(rx_chn->common.udmax) ||
+ !rx_chn->common.atype_asel)
+ return;
+
+ *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr);
+
+void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
+ dma_addr_t *addr)
+{
+ if (!xudma_is_pktdma(rx_chn->common.udmax) ||
+ !rx_chn->common.atype_asel)
+ return;
+
+ *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr);
+
+static int __init k3_udma_glue_class_init(void)
+{
+ return class_register(&k3_udma_glue_devclass);
+}
+arch_initcall(k3_udma_glue_class_init);
diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c
index 8563a392f30b..aada84f40723 100644
--- a/drivers/dma/ti/k3-udma-private.c
+++ b/drivers/dma/ti/k3-udma-private.c
@@ -50,6 +50,18 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
}
EXPORT_SYMBOL(of_xudma_dev_get);
+struct device *xudma_get_device(struct udma_dev *ud)
+{
+ return ud->dev;
+}
+EXPORT_SYMBOL(xudma_get_device);
+
+struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud)
+{
+ return ud->ringacc;
+}
+EXPORT_SYMBOL(xudma_get_ringacc);
+
u32 xudma_dev_get_psil_base(struct udma_dev *ud)
{
return ud->psil_base;
@@ -76,6 +88,9 @@ EXPORT_SYMBOL(xudma_free_gp_rflow_range);
bool xudma_rflow_is_gp(struct udma_dev *ud, int id)
{
+ if (!ud->rflow_gp_map)
+ return false;
+
return !test_bit(id, ud->rflow_gp_map);
}
EXPORT_SYMBOL(xudma_rflow_is_gp);
@@ -107,6 +122,12 @@ void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p)
}
EXPORT_SYMBOL(xudma_rflow_put);
+int xudma_get_rflow_ring_offset(struct udma_dev *ud)
+{
+ return ud->tflow_cnt;
+}
+EXPORT_SYMBOL(xudma_get_rflow_ring_offset);
+
#define XUDMA_GET_RESOURCE_ID(res) \
int xudma_##res##_get_id(struct udma_##res *p) \
{ \
@@ -136,3 +157,27 @@ void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \
EXPORT_SYMBOL(xudma_##res##rt_write)
XUDMA_RT_IO_FUNCTIONS(tchan);
XUDMA_RT_IO_FUNCTIONS(rchan);
+
+int xudma_is_pktdma(struct udma_dev *ud)
+{
+ return ud->match_data->type == DMA_TYPE_PKTDMA;
+}
+EXPORT_SYMBOL(xudma_is_pktdma);
+
+int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id)
+{
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+
+ return ti_sci_inta_msi_get_virq(ud->dev, udma_tflow_id +
+ oes->pktdma_tchan_flow);
+}
+EXPORT_SYMBOL(xudma_pktdma_tflow_get_irq);
+
+int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id)
+{
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+
+ return ti_sci_inta_msi_get_virq(ud->dev, udma_rflow_id +
+ oes->pktdma_rchan_flow);
+}
+EXPORT_SYMBOL(xudma_pktdma_rflow_get_irq);
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index 82cf6c77f5c9..298460438bb4 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -26,6 +26,7 @@
#include <linux/soc/ti/k3-ringacc.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <linux/soc/ti/ti_sci_inta_msi.h>
+#include <linux/dma/k3-event-router.h>
#include <linux/dma/ti-cppi5.h>
#include "../virt-dma.h"
@@ -55,14 +56,26 @@ struct udma_static_tr {
struct udma_chan;
+enum k3_dma_type {
+ DMA_TYPE_UDMA = 0,
+ DMA_TYPE_BCDMA,
+ DMA_TYPE_PKTDMA,
+};
+
enum udma_mmr {
MMR_GCFG = 0,
+ MMR_BCHANRT,
MMR_RCHANRT,
MMR_TCHANRT,
MMR_LAST,
};
-static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" };
+static const char * const mmr_names[] = {
+ [MMR_GCFG] = "gcfg",
+ [MMR_BCHANRT] = "bchanrt",
+ [MMR_RCHANRT] = "rchanrt",
+ [MMR_TCHANRT] = "tchanrt",
+};
struct udma_tchan {
void __iomem *reg_rt;
@@ -70,8 +83,12 @@ struct udma_tchan {
int id;
struct k3_ring *t_ring; /* Transmit ring */
struct k3_ring *tc_ring; /* Transmit Completion ring */
+ int tflow_id; /* applicable only for PKTDMA */
+
};
+#define udma_bchan udma_tchan
+
struct udma_rflow {
int id;
struct k3_ring *fd_ring; /* Free Descriptor ring */
@@ -84,10 +101,29 @@ struct udma_rchan {
int id;
};
+struct udma_oes_offsets {
+ /* K3 UDMA Output Event Offset */
+ u32 udma_rchan;
+
+ /* BCDMA Output Event Offsets */
+ u32 bcdma_bchan_data;
+ u32 bcdma_bchan_ring;
+ u32 bcdma_tchan_data;
+ u32 bcdma_tchan_ring;
+ u32 bcdma_rchan_data;
+ u32 bcdma_rchan_ring;
+
+ /* PKTDMA Output Event Offsets */
+ u32 pktdma_tchan_flow;
+ u32 pktdma_rchan_flow;
+};
+
#define UDMA_FLAG_PDMA_ACC32 BIT(0)
#define UDMA_FLAG_PDMA_BURST BIT(1)
+#define UDMA_FLAG_TDTYPE BIT(2)
struct udma_match_data {
+ enum k3_dma_type type;
u32 psil_base;
bool enable_memcpy_support;
u32 flags;
@@ -95,7 +131,8 @@ struct udma_match_data {
};
struct udma_soc_data {
- u32 rchan_oes_offset;
+ struct udma_oes_offsets oes;
+ u32 bcdma_trigger_event_offset;
};
struct udma_hwdesc {
@@ -116,6 +153,11 @@ struct udma_rx_flush {
dma_addr_t buffer_paddr;
};
+struct udma_tpl {
+ u8 levels;
+ u32 start_idx[3];
+};
+
struct udma_dev {
struct dma_device ddev;
struct device *dev;
@@ -123,8 +165,9 @@ struct udma_dev {
const struct udma_match_data *match_data;
const struct udma_soc_data *soc_data;
- u8 tpl_levels;
- u32 tpl_start_idx[3];
+ struct udma_tpl bchan_tpl;
+ struct udma_tpl tchan_tpl;
+ struct udma_tpl rchan_tpl;
size_t desc_align; /* alignment to use for descriptors */
@@ -138,16 +181,21 @@ struct udma_dev {
struct udma_rx_flush rx_flush;
+ int bchan_cnt;
int tchan_cnt;
int echan_cnt;
int rchan_cnt;
int rflow_cnt;
+ int tflow_cnt;
+ unsigned long *bchan_map;
unsigned long *tchan_map;
unsigned long *rchan_map;
unsigned long *rflow_gp_map;
unsigned long *rflow_gp_map_allocated;
unsigned long *rflow_in_use;
+ unsigned long *tflow_map;
+ struct udma_bchan *bchans;
struct udma_tchan *tchans;
struct udma_rchan *rchans;
struct udma_rflow *rflows;
@@ -155,6 +203,7 @@ struct udma_dev {
struct udma_chan *channels;
u32 psil_base;
u32 atype;
+ u32 asel;
};
struct udma_desc {
@@ -199,6 +248,7 @@ struct udma_chan_config {
bool notdpkt; /* Suppress sending TDC packet */
int remote_thread_id;
u32 atype;
+ u32 asel;
u32 src_thread;
u32 dst_thread;
enum psil_endpoint_type ep_type;
@@ -206,6 +256,13 @@ struct udma_chan_config {
bool enable_burst;
enum udma_tp_level channel_tpl; /* Channel Throughput Level */
+ u32 tr_trigger_type;
+
+ /* PKDMA mapped channel */
+ int mapped_channel_id;
+ /* PKTDMA default tflow or rflow for mapped channel */
+ int default_flow_id;
+
enum dma_transfer_direction dir;
};
@@ -213,11 +270,13 @@ struct udma_chan {
struct virt_dma_chan vc;
struct dma_slave_config cfg;
struct udma_dev *ud;
+ struct device *dma_dev;
struct udma_desc *desc;
struct udma_desc *terminated_desc;
struct udma_static_tr static_tr;
char *name;
+ struct udma_bchan *bchan;
struct udma_tchan *tchan;
struct udma_rchan *rchan;
struct udma_rflow *rflow;
@@ -353,10 +412,36 @@ static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
src_thread, dst_thread);
}
+static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
+{
+ struct device *chan_dev = &chan->dev->device;
+
+ if (asel == 0) {
+ /* No special handling for the channel */
+ chan->dev->chan_dma_dev = false;
+
+ chan_dev->dma_coherent = false;
+ chan_dev->dma_parms = NULL;
+ } else if (asel == 14 || asel == 15) {
+ chan->dev->chan_dma_dev = true;
+
+ chan_dev->dma_coherent = true;
+ dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
+ chan_dev->dma_parms = chan_dev->parent->dma_parms;
+ } else {
+ dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
+
+ chan_dev->dma_coherent = false;
+ chan_dev->dma_parms = NULL;
+ }
+}
+
static void udma_reset_uchan(struct udma_chan *uc)
{
memset(&uc->config, 0, sizeof(uc->config));
uc->config.remote_thread_id = -1;
+ uc->config.mapped_channel_id = -1;
+ uc->config.default_flow_id = -1;
uc->state = UDMA_CHAN_IS_IDLE;
}
@@ -439,9 +524,7 @@ static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
d->hwdesc[i].cppi5_desc_vaddr = NULL;
}
} else if (d->hwdesc[0].cppi5_desc_vaddr) {
- struct udma_dev *ud = uc->ud;
-
- dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size,
+ dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
d->hwdesc[0].cppi5_desc_vaddr,
d->hwdesc[0].cppi5_desc_paddr);
@@ -670,8 +753,10 @@ static void udma_reset_counters(struct udma_chan *uc)
val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
- val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
- udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ if (!uc->bchan) {
+ val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
+ udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
+ }
}
if (uc->rchan) {
@@ -746,10 +831,16 @@ static void udma_start_desc(struct udma_chan *uc)
{
struct udma_chan_config *ucc = &uc->config;
- if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
+ (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
int i;
- /* Push all descriptors to ring for packet mode cyclic or RX */
+ /*
+ * UDMA only: Push all descriptors to ring for packet mode
+ * cyclic or RX
+ * PKTDMA supports pre-linked descriptor and cyclic is not
+ * supported
+ */
for (i = 0; i < uc->desc->sglen; i++)
udma_push_to_ring(uc, i);
} else {
@@ -1020,13 +1111,12 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
{
struct udma_chan *uc = data;
struct udma_desc *d;
- unsigned long flags;
dma_addr_t paddr = 0;
if (udma_pop_from_ring(uc, &paddr) || !paddr)
return IRQ_HANDLED;
- spin_lock_irqsave(&uc->vc.lock, flags);
+ spin_lock(&uc->vc.lock);
/* Teardown completion message */
if (cppi5_desc_is_tdcm(paddr)) {
@@ -1077,7 +1167,7 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
}
}
out:
- spin_unlock_irqrestore(&uc->vc.lock, flags);
+ spin_unlock(&uc->vc.lock);
return IRQ_HANDLED;
}
@@ -1086,9 +1176,8 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
{
struct udma_chan *uc = data;
struct udma_desc *d;
- unsigned long flags;
- spin_lock_irqsave(&uc->vc.lock, flags);
+ spin_lock(&uc->vc.lock);
d = uc->desc;
if (d) {
d->tr_idx = (d->tr_idx + 1) % d->sglen;
@@ -1103,7 +1192,7 @@ static irqreturn_t udma_udma_irq_handler(int irq, void *data)
}
}
- spin_unlock_irqrestore(&uc->vc.lock, flags);
+ spin_unlock(&uc->vc.lock);
return IRQ_HANDLED;
}
@@ -1181,10 +1270,12 @@ static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
if (test_bit(id, ud->rflow_in_use))
return ERR_PTR(-ENOENT);
- /* GP rflow has to be allocated first */
- if (!test_bit(id, ud->rflow_gp_map) &&
- !test_bit(id, ud->rflow_gp_map_allocated))
- return ERR_PTR(-EINVAL);
+ if (ud->rflow_gp_map) {
+ /* GP rflow has to be allocated first */
+ if (!test_bit(id, ud->rflow_gp_map) &&
+ !test_bit(id, ud->rflow_gp_map_allocated))
+ return ERR_PTR(-EINVAL);
+ }
dev_dbg(ud->dev, "get rflow%d\n", id);
set_bit(id, ud->rflow_in_use);
@@ -1215,10 +1306,10 @@ static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
} else { \
int start; \
\
- if (tpl >= ud->tpl_levels) \
- tpl = ud->tpl_levels - 1; \
+ if (tpl >= ud->res##_tpl.levels) \
+ tpl = ud->res##_tpl.levels - 1; \
\
- start = ud->tpl_start_idx[tpl]; \
+ start = ud->res##_tpl.start_idx[tpl]; \
\
id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
start); \
@@ -1231,9 +1322,39 @@ static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
return &ud->res##s[id]; \
}
+UDMA_RESERVE_RESOURCE(bchan);
UDMA_RESERVE_RESOURCE(tchan);
UDMA_RESERVE_RESOURCE(rchan);
+static int bcdma_get_bchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ enum udma_tp_level tpl;
+
+ if (uc->bchan) {
+ dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
+ uc->id, uc->bchan->id);
+ return 0;
+ }
+
+ /*
+ * Use normal channels for peripherals, and highest TPL channel for
+ * mem2mem
+ */
+ if (uc->config.tr_trigger_type)
+ tpl = 0;
+ else
+ tpl = ud->bchan_tpl.levels - 1;
+
+ uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
+ if (IS_ERR(uc->bchan))
+ return PTR_ERR(uc->bchan);
+
+ uc->tchan = uc->bchan;
+
+ return 0;
+}
+
static int udma_get_tchan(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1244,9 +1365,39 @@ static int udma_get_tchan(struct udma_chan *uc)
return 0;
}
- uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1);
+ /*
+ * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
+ * For PKTDMA mapped channels it is configured to a channel which must
+ * be used to service the peripheral.
+ */
+ uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
+ uc->config.mapped_channel_id);
+ if (IS_ERR(uc->tchan))
+ return PTR_ERR(uc->tchan);
+
+ if (ud->tflow_cnt) {
+ int tflow_id;
+
+ /* Only PKTDMA have support for tx flows */
+ if (uc->config.default_flow_id >= 0)
+ tflow_id = uc->config.default_flow_id;
+ else
+ tflow_id = uc->tchan->id;
+
+ if (test_bit(tflow_id, ud->tflow_map)) {
+ dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
+ clear_bit(uc->tchan->id, ud->tchan_map);
+ uc->tchan = NULL;
+ return -ENOENT;
+ }
- return PTR_ERR_OR_ZERO(uc->tchan);
+ uc->tchan->tflow_id = tflow_id;
+ set_bit(tflow_id, ud->tflow_map);
+ } else {
+ uc->tchan->tflow_id = -1;
+ }
+
+ return 0;
}
static int udma_get_rchan(struct udma_chan *uc)
@@ -1259,7 +1410,13 @@ static int udma_get_rchan(struct udma_chan *uc)
return 0;
}
- uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1);
+ /*
+ * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
+ * For PKTDMA mapped channels it is configured to a channel which must
+ * be used to service the peripheral.
+ */
+ uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
+ uc->config.mapped_channel_id);
return PTR_ERR_OR_ZERO(uc->rchan);
}
@@ -1287,8 +1444,11 @@ static int udma_get_chan_pair(struct udma_chan *uc)
/* Can be optimized, but let's have it like this for now */
end = min(ud->tchan_cnt, ud->rchan_cnt);
- /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */
- chan_id = ud->tpl_start_idx[ud->tpl_levels - 1];
+ /*
+ * Try to use the highest TPL channel pair for MEM_TO_MEM channels
+ * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
+ */
+ chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
for (; chan_id < end; chan_id++) {
if (!test_bit(chan_id, ud->tchan_map) &&
!test_bit(chan_id, ud->rchan_map))
@@ -1303,6 +1463,9 @@ static int udma_get_chan_pair(struct udma_chan *uc)
uc->tchan = &ud->tchans[chan_id];
uc->rchan = &ud->rchans[chan_id];
+ /* UDMA does not use tx flows */
+ uc->tchan->tflow_id = -1;
+
return 0;
}
@@ -1326,6 +1489,19 @@ static int udma_get_rflow(struct udma_chan *uc, int flow_id)
return PTR_ERR_OR_ZERO(uc->rflow);
}
+static void bcdma_put_bchan(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+
+ if (uc->bchan) {
+ dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
+ uc->bchan->id);
+ clear_bit(uc->bchan->id, ud->bchan_map);
+ uc->bchan = NULL;
+ uc->tchan = NULL;
+ }
+}
+
static void udma_put_rchan(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1346,6 +1522,10 @@ static void udma_put_tchan(struct udma_chan *uc)
dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
uc->tchan->id);
clear_bit(uc->tchan->id, ud->tchan_map);
+
+ if (uc->tchan->tflow_id >= 0)
+ clear_bit(uc->tchan->tflow_id, ud->tflow_map);
+
uc->tchan = NULL;
}
}
@@ -1362,6 +1542,65 @@ static void udma_put_rflow(struct udma_chan *uc)
}
}
+static void bcdma_free_bchan_resources(struct udma_chan *uc)
+{
+ if (!uc->bchan)
+ return;
+
+ k3_ringacc_ring_free(uc->bchan->tc_ring);
+ k3_ringacc_ring_free(uc->bchan->t_ring);
+ uc->bchan->tc_ring = NULL;
+ uc->bchan->t_ring = NULL;
+ k3_configure_chan_coherency(&uc->vc.chan, 0);
+
+ bcdma_put_bchan(uc);
+}
+
+static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
+{
+ struct k3_ring_cfg ring_cfg;
+ struct udma_dev *ud = uc->ud;
+ int ret;
+
+ ret = bcdma_get_bchan(uc);
+ if (ret)
+ return ret;
+
+ ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
+ &uc->bchan->t_ring,
+ &uc->bchan->tc_ring);
+ if (ret) {
+ ret = -EBUSY;
+ goto err_ring;
+ }
+
+ memset(&ring_cfg, 0, sizeof(ring_cfg));
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
+
+ k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
+ ring_cfg.asel = ud->asel;
+ ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
+
+ ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
+ if (ret)
+ goto err_ringcfg;
+
+ return 0;
+
+err_ringcfg:
+ k3_ringacc_ring_free(uc->bchan->tc_ring);
+ uc->bchan->tc_ring = NULL;
+ k3_ringacc_ring_free(uc->bchan->t_ring);
+ uc->bchan->t_ring = NULL;
+ k3_configure_chan_coherency(&uc->vc.chan, 0);
+err_ring:
+ bcdma_put_bchan(uc);
+
+ return ret;
+}
+
static void udma_free_tx_resources(struct udma_chan *uc)
{
if (!uc->tchan)
@@ -1379,15 +1618,22 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
{
struct k3_ring_cfg ring_cfg;
struct udma_dev *ud = uc->ud;
- int ret;
+ struct udma_tchan *tchan;
+ int ring_idx, ret;
ret = udma_get_tchan(uc);
if (ret)
return ret;
- ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->tchan->id, -1,
- &uc->tchan->t_ring,
- &uc->tchan->tc_ring);
+ tchan = uc->tchan;
+ if (tchan->tflow_id >= 0)
+ ring_idx = tchan->tflow_id;
+ else
+ ring_idx = ud->bchan_cnt + tchan->id;
+
+ ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
+ &tchan->t_ring,
+ &tchan->tc_ring);
if (ret) {
ret = -EBUSY;
goto err_ring;
@@ -1396,10 +1642,18 @@ static int udma_alloc_tx_resources(struct udma_chan *uc)
memset(&ring_cfg, 0, sizeof(ring_cfg));
ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
- ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ } else {
+ ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
+
+ k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
+ ring_cfg.asel = uc->config.asel;
+ ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
+ }
- ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg);
- ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg);
+ ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
+ ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
if (ret)
goto err_ringcfg;
@@ -1452,14 +1706,23 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
if (uc->config.dir == DMA_MEM_TO_MEM)
return 0;
- ret = udma_get_rflow(uc, uc->rchan->id);
+ if (uc->config.default_flow_id >= 0)
+ ret = udma_get_rflow(uc, uc->config.default_flow_id);
+ else
+ ret = udma_get_rflow(uc, uc->rchan->id);
+
if (ret) {
ret = -EBUSY;
goto err_rflow;
}
rflow = uc->rflow;
- fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id;
+ if (ud->tflow_cnt)
+ fd_ring_id = ud->tflow_cnt + rflow->id;
+ else
+ fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
+ uc->rchan->id;
+
ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
&rflow->fd_ring, &rflow->r_ring);
if (ret) {
@@ -1469,15 +1732,25 @@ static int udma_alloc_rx_resources(struct udma_chan *uc)
memset(&ring_cfg, 0, sizeof(ring_cfg));
- if (uc->config.pkt_mode)
- ring_cfg.size = SG_MAX_SEGMENTS;
- else
+ ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ if (uc->config.pkt_mode)
+ ring_cfg.size = SG_MAX_SEGMENTS;
+ else
+ ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+
+ ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ } else {
ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
+ ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
- ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
- ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
+ k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
+ ring_cfg.asel = uc->config.asel;
+ ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
+ }
ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
+
ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
@@ -1499,7 +1772,18 @@ err_rflow:
return ret;
}
-#define TISCI_TCHAN_VALID_PARAMS ( \
+#define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
+
+#define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
+
+#define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
+
+#define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
@@ -1509,7 +1793,7 @@ err_rflow:
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
-#define TISCI_RCHAN_VALID_PARAMS ( \
+#define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
@@ -1534,7 +1818,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
- req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
req_tx.index = tchan->id;
req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
@@ -1548,7 +1832,7 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
return ret;
}
- req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
req_rx.nav_id = tisci_rm->tisci_dev_id;
req_rx.index = rchan->id;
req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
@@ -1563,6 +1847,27 @@ static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
return ret;
}
+static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ struct udma_bchan *bchan = uc->bchan;
+ int ret = 0;
+
+ req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
+ req_tx.index = bchan->id;
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret)
+ dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
+
+ return ret;
+}
+
static int udma_tisci_tx_channel_config(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1583,7 +1888,7 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
fetch_size = sizeof(struct cppi5_desc_hdr_t);
}
- req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS;
+ req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
req_tx.nav_id = tisci_rm->tisci_dev_id;
req_tx.index = tchan->id;
req_tx.tx_chan_type = mode;
@@ -1591,6 +1896,13 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
req_tx.tx_fetch_size = fetch_size >> 2;
req_tx.txcq_qnum = tc_ring;
req_tx.tx_atype = uc->config.atype;
+ if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
+ ud->match_data->flags & UDMA_FLAG_TDTYPE) {
+ /* wait for peer to complete the teardown for PDMAs */
+ req_tx.valid_params |=
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
+ req_tx.tx_tdtype = 1;
+ }
ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
if (ret)
@@ -1599,6 +1911,35 @@ static int udma_tisci_tx_channel_config(struct udma_chan *uc)
return ret;
}
+static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_tchan *tchan = uc->tchan;
+ struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
+ int ret = 0;
+
+ req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
+ req_tx.nav_id = tisci_rm->tisci_dev_id;
+ req_tx.index = tchan->id;
+ req_tx.tx_supr_tdpkt = uc->config.notdpkt;
+ if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
+ /* wait for peer to complete the teardown for PDMAs */
+ req_tx.valid_params |=
+ TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
+ req_tx.tx_tdtype = 1;
+ }
+
+ ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
+ if (ret)
+ dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
+
+ return ret;
+}
+
+#define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
+
static int udma_tisci_rx_channel_config(struct udma_chan *uc)
{
struct udma_dev *ud = uc->ud;
@@ -1621,7 +1962,7 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
fetch_size = sizeof(struct cppi5_desc_hdr_t);
}
- req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS;
+ req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
req_rx.nav_id = tisci_rm->tisci_dev_id;
req_rx.index = rchan->id;
req_rx.rx_fetch_size = fetch_size >> 2;
@@ -1680,6 +2021,72 @@ static int udma_tisci_rx_channel_config(struct udma_chan *uc)
return 0;
}
+static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct udma_rchan *rchan = uc->rchan;
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+ int ret = 0;
+
+ req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = rchan->id;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret)
+ dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
+
+ return ret;
+}
+
+static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
+{
+ struct udma_dev *ud = uc->ud;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
+ struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
+ struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
+ int ret = 0;
+
+ req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
+ req_rx.nav_id = tisci_rm->tisci_dev_id;
+ req_rx.index = uc->rchan->id;
+
+ ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
+ if (ret) {
+ dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
+ return ret;
+ }
+
+ flow_req.valid_params =
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
+ TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
+
+ flow_req.nav_id = tisci_rm->tisci_dev_id;
+ flow_req.flow_index = uc->rflow->id;
+
+ if (uc->config.needs_epib)
+ flow_req.rx_einfo_present = 1;
+ else
+ flow_req.rx_einfo_present = 0;
+ if (uc->config.psd_size)
+ flow_req.rx_psinfo_present = 1;
+ else
+ flow_req.rx_psinfo_present = 0;
+ flow_req.rx_error_handling = 1;
+
+ ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
+
+ if (ret)
+ dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
+ ret);
+
+ return ret;
+}
+
static int udma_alloc_chan_resources(struct dma_chan *chan)
{
struct udma_chan *uc = to_udma_chan(chan);
@@ -1689,6 +2096,8 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
u32 irq_udma_idx;
int ret;
+ uc->dma_dev = ud->dev;
+
if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
uc->use_dma_pool = true;
/* in case of MEM_TO_MEM we have maximum of two TRs */
@@ -1784,7 +2193,7 @@ static int udma_alloc_chan_resources(struct dma_chan *chan)
K3_PSIL_DST_THREAD_ID_OFFSET;
irq_ring = uc->rflow->r_ring;
- irq_udma_idx = soc_data->rchan_oes_offset + uc->rchan->id;
+ irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
ret = udma_tisci_rx_channel_config(uc);
break;
@@ -1884,6 +2293,369 @@ err_cleanup:
return ret;
}
+static int bcdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+ u32 irq_udma_idx, irq_ring_idx;
+ int ret;
+
+ /* Only TR mode is supported */
+ uc->config.pkt_mode = false;
+
+ /*
+ * Make sure that the completion is in a known state:
+ * No teardown, the channel is idle
+ */
+ reinit_completion(&uc->teardown_completed);
+ complete_all(&uc->teardown_completed);
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ switch (uc->config.dir) {
+ case DMA_MEM_TO_MEM:
+ /* Non synchronized - mem to mem type of transfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
+ uc->id);
+
+ ret = bcdma_alloc_bchan_resources(uc);
+ if (ret)
+ return ret;
+
+ irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
+ irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
+
+ ret = bcdma_tisci_m2m_channel_config(uc);
+ break;
+ case DMA_MEM_TO_DEV:
+ /* Slave transfer synchronized - mem to dev (TX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
+ irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
+
+ ret = bcdma_tisci_tx_channel_config(uc);
+ break;
+ case DMA_DEV_TO_MEM:
+ /* Slave transfer synchronized - dev to mem (RX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
+ irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
+
+ ret = bcdma_tisci_rx_channel_config(uc);
+ break;
+ default:
+ /* Can not happen */
+ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
+ __func__, uc->id, uc->config.dir);
+ return -EINVAL;
+ }
+
+ /* check if the channel configuration was successful */
+ if (ret)
+ goto err_res_free;
+
+ if (udma_is_chan_running(uc)) {
+ dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
+ udma_reset_chan(uc, false);
+ if (udma_is_chan_running(uc)) {
+ dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+ ret = -EBUSY;
+ goto err_res_free;
+ }
+ }
+
+ uc->dma_dev = dmaengine_get_dma_device(chan);
+ if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) {
+ uc->config.hdesc_size = cppi5_trdesc_calc_size(
+ sizeof(struct cppi5_tr_type15_t), 2);
+
+ uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
+ uc->config.hdesc_size,
+ ud->desc_align,
+ 0);
+ if (!uc->hdesc_pool) {
+ dev_err(ud->ddev.dev,
+ "Descriptor pool allocation failed\n");
+ uc->use_dma_pool = false;
+ return -ENOMEM;
+ }
+
+ uc->use_dma_pool = true;
+ } else if (uc->config.dir != DMA_MEM_TO_MEM) {
+ /* PSI-L pairing */
+ ret = navss_psil_pair(ud, uc->config.src_thread,
+ uc->config.dst_thread);
+ if (ret) {
+ dev_err(ud->dev,
+ "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
+ uc->config.src_thread, uc->config.dst_thread);
+ goto err_res_free;
+ }
+
+ uc->psil_paired = true;
+ }
+
+ uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
+ if (uc->irq_num_ring <= 0) {
+ dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
+ irq_ring_idx);
+ ret = -EINVAL;
+ goto err_psi_free;
+ }
+
+ ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
+ IRQF_TRIGGER_HIGH, uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
+ goto err_irq_free;
+ }
+
+ /* Event from BCDMA (TR events) only needed for slave channels */
+ if (is_slave_direction(uc->config.dir)) {
+ uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev,
+ irq_udma_idx);
+ if (uc->irq_num_udma <= 0) {
+ dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
+ irq_udma_idx);
+ free_irq(uc->irq_num_ring, uc);
+ ret = -EINVAL;
+ goto err_irq_free;
+ }
+
+ ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
+ uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
+ uc->id);
+ free_irq(uc->irq_num_ring, uc);
+ goto err_irq_free;
+ }
+ } else {
+ uc->irq_num_udma = 0;
+ }
+
+ udma_reset_rings(uc);
+
+ INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+ udma_check_tx_completion);
+ return 0;
+
+err_irq_free:
+ uc->irq_num_ring = 0;
+ uc->irq_num_udma = 0;
+err_psi_free:
+ if (uc->psil_paired)
+ navss_psil_unpair(ud, uc->config.src_thread,
+ uc->config.dst_thread);
+ uc->psil_paired = false;
+err_res_free:
+ bcdma_free_bchan_resources(uc);
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+
+ udma_reset_uchan(uc);
+
+ if (uc->use_dma_pool) {
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+ }
+
+ return ret;
+}
+
+static int bcdma_router_config(struct dma_chan *chan)
+{
+ struct k3_event_route_data *router_data = chan->route_data;
+ struct udma_chan *uc = to_udma_chan(chan);
+ u32 trigger_event;
+
+ if (!uc->bchan)
+ return -EINVAL;
+
+ if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
+ return -EINVAL;
+
+ trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
+ trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
+
+ return router_data->set_event(router_data->priv, trigger_event);
+}
+
+static int pktdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct udma_chan *uc = to_udma_chan(chan);
+ struct udma_dev *ud = to_udma_dev(chan->device);
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+ u32 irq_ring_idx;
+ int ret;
+
+ /*
+ * Make sure that the completion is in a known state:
+ * No teardown, the channel is idle
+ */
+ reinit_completion(&uc->teardown_completed);
+ complete_all(&uc->teardown_completed);
+ uc->state = UDMA_CHAN_IS_IDLE;
+
+ switch (uc->config.dir) {
+ case DMA_MEM_TO_DEV:
+ /* Slave transfer synchronized - mem to dev (TX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_tx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = ud->psil_base + uc->tchan->id;
+ uc->config.dst_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
+
+ ret = pktdma_tisci_tx_channel_config(uc);
+ break;
+ case DMA_DEV_TO_MEM:
+ /* Slave transfer synchronized - dev to mem (RX) trasnfer */
+ dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
+ uc->id);
+
+ ret = udma_alloc_rx_resources(uc);
+ if (ret) {
+ uc->config.remote_thread_id = -1;
+ return ret;
+ }
+
+ uc->config.src_thread = uc->config.remote_thread_id;
+ uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
+ K3_PSIL_DST_THREAD_ID_OFFSET;
+
+ irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
+
+ ret = pktdma_tisci_rx_channel_config(uc);
+ break;
+ default:
+ /* Can not happen */
+ dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
+ __func__, uc->id, uc->config.dir);
+ return -EINVAL;
+ }
+
+ /* check if the channel configuration was successful */
+ if (ret)
+ goto err_res_free;
+
+ if (udma_is_chan_running(uc)) {
+ dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
+ udma_reset_chan(uc, false);
+ if (udma_is_chan_running(uc)) {
+ dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
+ ret = -EBUSY;
+ goto err_res_free;
+ }
+ }
+
+ uc->dma_dev = dmaengine_get_dma_device(chan);
+ uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
+ uc->config.hdesc_size, ud->desc_align,
+ 0);
+ if (!uc->hdesc_pool) {
+ dev_err(ud->ddev.dev,
+ "Descriptor pool allocation failed\n");
+ uc->use_dma_pool = false;
+ ret = -ENOMEM;
+ goto err_res_free;
+ }
+
+ uc->use_dma_pool = true;
+
+ /* PSI-L pairing */
+ ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
+ if (ret) {
+ dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
+ uc->config.src_thread, uc->config.dst_thread);
+ goto err_res_free;
+ }
+
+ uc->psil_paired = true;
+
+ uc->irq_num_ring = ti_sci_inta_msi_get_virq(ud->dev, irq_ring_idx);
+ if (uc->irq_num_ring <= 0) {
+ dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
+ irq_ring_idx);
+ ret = -EINVAL;
+ goto err_psi_free;
+ }
+
+ ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
+ IRQF_TRIGGER_HIGH, uc->name, uc);
+ if (ret) {
+ dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
+ goto err_irq_free;
+ }
+
+ uc->irq_num_udma = 0;
+
+ udma_reset_rings(uc);
+
+ INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
+ udma_check_tx_completion);
+
+ if (uc->tchan)
+ dev_dbg(ud->dev,
+ "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
+ uc->id, uc->tchan->id, uc->tchan->tflow_id,
+ uc->config.remote_thread_id);
+ else if (uc->rchan)
+ dev_dbg(ud->dev,
+ "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
+ uc->id, uc->rchan->id, uc->rflow->id,
+ uc->config.remote_thread_id);
+ return 0;
+
+err_irq_free:
+ uc->irq_num_ring = 0;
+err_psi_free:
+ navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
+ uc->psil_paired = false;
+err_res_free:
+ udma_free_tx_resources(uc);
+ udma_free_rx_resources(uc);
+
+ udma_reset_uchan(uc);
+
+ dma_pool_destroy(uc->hdesc_pool);
+ uc->use_dma_pool = false;
+
+ return ret;
+}
+
static int udma_slave_config(struct dma_chan *chan,
struct dma_slave_config *cfg)
{
@@ -2028,6 +2800,7 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
size_t tr_size;
int num_tr = 0;
int tr_idx = 0;
+ u64 asel;
/* estimate the number of TRs we will need */
for_each_sg(sgl, sgent, sglen, i) {
@@ -2045,6 +2818,11 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
d->sglen = sglen;
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA)
+ asel = 0;
+ else
+ asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+
tr_req = d->hwdesc[0].tr_req_base;
for_each_sg(sgl, sgent, sglen, i) {
dma_addr_t sg_addr = sg_dma_address(sgent);
@@ -2063,6 +2841,7 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
+ sg_addr |= asel;
tr_req[tr_idx].addr = sg_addr;
tr_req[tr_idx].icnt0 = tr0_cnt0;
tr_req[tr_idx].icnt1 = tr0_cnt1;
@@ -2092,6 +2871,205 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
return d;
}
+static struct udma_desc *
+udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
+ unsigned int sglen,
+ enum dma_transfer_direction dir,
+ unsigned long tx_flags, void *context)
+{
+ struct scatterlist *sgent;
+ struct cppi5_tr_type15_t *tr_req = NULL;
+ enum dma_slave_buswidth dev_width;
+ u16 tr_cnt0, tr_cnt1;
+ dma_addr_t dev_addr;
+ struct udma_desc *d;
+ unsigned int i;
+ size_t tr_size, sg_len;
+ int num_tr = 0;
+ int tr_idx = 0;
+ u32 burst, trigger_size, port_window;
+ u64 asel;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ dev_addr = uc->cfg.src_addr;
+ dev_width = uc->cfg.src_addr_width;
+ burst = uc->cfg.src_maxburst;
+ port_window = uc->cfg.src_port_window_size;
+ } else if (dir == DMA_MEM_TO_DEV) {
+ dev_addr = uc->cfg.dst_addr;
+ dev_width = uc->cfg.dst_addr_width;
+ burst = uc->cfg.dst_maxburst;
+ port_window = uc->cfg.dst_port_window_size;
+ } else {
+ dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ return NULL;
+ }
+
+ if (!burst)
+ burst = 1;
+
+ if (port_window) {
+ if (port_window != burst) {
+ dev_err(uc->ud->dev,
+ "The burst must be equal to port_window\n");
+ return NULL;
+ }
+
+ tr_cnt0 = dev_width * port_window;
+ tr_cnt1 = 1;
+ } else {
+ tr_cnt0 = dev_width;
+ tr_cnt1 = burst;
+ }
+ trigger_size = tr_cnt0 * tr_cnt1;
+
+ /* estimate the number of TRs we will need */
+ for_each_sg(sgl, sgent, sglen, i) {
+ sg_len = sg_dma_len(sgent);
+
+ if (sg_len % trigger_size) {
+ dev_err(uc->ud->dev,
+ "Not aligned SG entry (%zu for %u)\n", sg_len,
+ trigger_size);
+ return NULL;
+ }
+
+ if (sg_len / trigger_size < SZ_64K)
+ num_tr++;
+ else
+ num_tr += 2;
+ }
+
+ /* Now allocate and setup the descriptor. */
+ tr_size = sizeof(struct cppi5_tr_type15_t);
+ d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
+ if (!d)
+ return NULL;
+
+ d->sglen = sglen;
+
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
+ asel = 0;
+ } else {
+ asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+ dev_addr |= asel;
+ }
+
+ tr_req = d->hwdesc[0].tr_req_base;
+ for_each_sg(sgl, sgent, sglen, i) {
+ u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
+ dma_addr_t sg_addr = sg_dma_address(sgent);
+
+ sg_len = sg_dma_len(sgent);
+ num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
+ &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
+ if (num_tr < 0) {
+ dev_err(uc->ud->dev, "size %zu is not supported\n",
+ sg_len);
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
+
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
+ true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
+ uc->config.tr_trigger_type,
+ CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
+
+ sg_addr |= asel;
+ if (dir == DMA_DEV_TO_MEM) {
+ tr_req[tr_idx].addr = dev_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr0_cnt2;
+ tr_req[tr_idx].icnt3 = tr0_cnt3;
+ tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
+
+ tr_req[tr_idx].daddr = sg_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr0_cnt2;
+ tr_req[tr_idx].dicnt3 = tr0_cnt3;
+ tr_req[tr_idx].ddim1 = tr_cnt0;
+ tr_req[tr_idx].ddim2 = trigger_size;
+ tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
+ } else {
+ tr_req[tr_idx].addr = sg_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr0_cnt2;
+ tr_req[tr_idx].icnt3 = tr0_cnt3;
+ tr_req[tr_idx].dim1 = tr_cnt0;
+ tr_req[tr_idx].dim2 = trigger_size;
+ tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
+
+ tr_req[tr_idx].daddr = dev_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr0_cnt2;
+ tr_req[tr_idx].dicnt3 = tr0_cnt3;
+ tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
+ }
+
+ tr_idx++;
+
+ if (num_tr == 2) {
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
+ false, true,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags,
+ CPPI5_TR_CSF_SUPR_EVT);
+ cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
+ uc->config.tr_trigger_type,
+ CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
+ 0, 0);
+
+ sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
+ if (dir == DMA_DEV_TO_MEM) {
+ tr_req[tr_idx].addr = dev_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr1_cnt2;
+ tr_req[tr_idx].icnt3 = 1;
+ tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
+
+ tr_req[tr_idx].daddr = sg_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr1_cnt2;
+ tr_req[tr_idx].dicnt3 = 1;
+ tr_req[tr_idx].ddim1 = tr_cnt0;
+ tr_req[tr_idx].ddim2 = trigger_size;
+ } else {
+ tr_req[tr_idx].addr = sg_addr;
+ tr_req[tr_idx].icnt0 = tr_cnt0;
+ tr_req[tr_idx].icnt1 = tr_cnt1;
+ tr_req[tr_idx].icnt2 = tr1_cnt2;
+ tr_req[tr_idx].icnt3 = 1;
+ tr_req[tr_idx].dim1 = tr_cnt0;
+ tr_req[tr_idx].dim2 = trigger_size;
+
+ tr_req[tr_idx].daddr = dev_addr;
+ tr_req[tr_idx].dicnt0 = tr_cnt0;
+ tr_req[tr_idx].dicnt1 = tr_cnt1;
+ tr_req[tr_idx].dicnt2 = tr1_cnt2;
+ tr_req[tr_idx].dicnt3 = 1;
+ tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
+ }
+ tr_idx++;
+ }
+
+ d->residue += sg_len;
+ }
+
+ cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
+ CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
+
+ return d;
+}
+
static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
enum dma_slave_buswidth dev_width,
u16 elcnt)
@@ -2156,6 +3134,7 @@ udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
struct udma_desc *d;
u32 ring_id;
unsigned int i;
+ u64 asel;
d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
if (!d)
@@ -2169,6 +3148,11 @@ udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
else
ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA)
+ asel = 0;
+ else
+ asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+
for_each_sg(sgl, sgent, sglen, i) {
struct udma_hwdesc *hwdesc = &d->hwdesc[i];
dma_addr_t sg_addr = sg_dma_address(sgent);
@@ -2203,14 +3187,16 @@ udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
}
/* attach the sg buffer to the descriptor */
+ sg_addr |= asel;
cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
/* Attach link as host buffer descriptor */
if (h_desc)
cppi5_hdesc_link_hbdesc(h_desc,
- hwdesc->cppi5_desc_paddr);
+ hwdesc->cppi5_desc_paddr | asel);
- if (dir == DMA_MEM_TO_DEV)
+ if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
+ dir == DMA_MEM_TO_DEV)
h_desc = desc;
}
@@ -2333,7 +3319,8 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct udma_desc *d;
u32 burst;
- if (dir != uc->config.dir) {
+ if (dir != uc->config.dir &&
+ (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
dev_err(chan->device->dev,
"%s: chan%d is for %s, not supporting %s\n",
__func__, uc->id,
@@ -2359,9 +3346,12 @@ udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
if (uc->config.pkt_mode)
d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
context);
- else
+ else if (is_slave_direction(uc->config.dir))
d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
context);
+ else
+ d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
+ tx_flags, context);
if (!d)
return NULL;
@@ -2415,7 +3405,12 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
return NULL;
tr_req = d->hwdesc[0].tr_req_base;
- period_addr = buf_addr;
+ if (uc->ud->match_data->type == DMA_TYPE_UDMA)
+ period_addr = buf_addr;
+ else
+ period_addr = buf_addr |
+ ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
+
for (i = 0; i < periods; i++) {
int tr_idx = i * num_tr;
@@ -2480,6 +3475,9 @@ udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
else
ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
+ if (uc->ud->match_data->type != DMA_TYPE_UDMA)
+ buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
+
for (i = 0; i < periods; i++) {
struct udma_hwdesc *hwdesc = &d->hwdesc[i];
dma_addr_t period_addr = buf_addr + (period_len * i);
@@ -2621,6 +3619,11 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
d->tr_idx = 0;
d->residue = len;
+ if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
+ src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
+ dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
+ }
+
tr_req = d->hwdesc[0].tr_req_base;
cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
@@ -2978,6 +3981,7 @@ static void udma_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(&uc->vc);
tasklet_kill(&uc->vc.task);
+ bcdma_free_bchan_resources(uc);
udma_free_tx_resources(uc);
udma_free_rx_resources(uc);
udma_reset_uchan(uc);
@@ -2989,10 +3993,14 @@ static void udma_free_chan_resources(struct dma_chan *chan)
}
static struct platform_driver udma_driver;
+static struct platform_driver bcdma_driver;
+static struct platform_driver pktdma_driver;
struct udma_filter_param {
int remote_thread_id;
u32 atype;
+ u32 asel;
+ u32 tr_trigger_type;
};
static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
@@ -3003,7 +4011,9 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
struct udma_chan *uc;
struct udma_dev *ud;
- if (chan->device->dev->driver != &udma_driver.driver)
+ if (chan->device->dev->driver != &udma_driver.driver &&
+ chan->device->dev->driver != &bcdma_driver.driver &&
+ chan->device->dev->driver != &pktdma_driver.driver)
return false;
uc = to_udma_chan(chan);
@@ -3017,13 +4027,25 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
return false;
}
+ if (filter_param->asel > 15) {
+ dev_err(ud->dev, "Invalid channel asel: %u\n",
+ filter_param->asel);
+ return false;
+ }
+
ucc->remote_thread_id = filter_param->remote_thread_id;
ucc->atype = filter_param->atype;
+ ucc->asel = filter_param->asel;
+ ucc->tr_trigger_type = filter_param->tr_trigger_type;
- if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)
+ if (ucc->tr_trigger_type) {
+ ucc->dir = DMA_MEM_TO_MEM;
+ goto triggered_bchan;
+ } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
ucc->dir = DMA_MEM_TO_DEV;
- else
+ } else {
ucc->dir = DMA_DEV_TO_MEM;
+ }
ep_config = psil_get_ep_config(ucc->remote_thread_id);
if (IS_ERR(ep_config)) {
@@ -3032,6 +4054,19 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
ucc->dir = DMA_MEM_TO_MEM;
ucc->remote_thread_id = -1;
ucc->atype = 0;
+ ucc->asel = 0;
+ return false;
+ }
+
+ if (ud->match_data->type == DMA_TYPE_BCDMA &&
+ ep_config->pkt_mode) {
+ dev_err(ud->dev,
+ "Only TR mode is supported (psi-l thread 0x%04x)\n",
+ ucc->remote_thread_id);
+ ucc->dir = DMA_MEM_TO_MEM;
+ ucc->remote_thread_id = -1;
+ ucc->atype = 0;
+ ucc->asel = 0;
return false;
}
@@ -3040,6 +4075,15 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
ucc->notdpkt = ep_config->notdpkt;
ucc->ep_type = ep_config->ep_type;
+ if (ud->match_data->type == DMA_TYPE_PKTDMA &&
+ ep_config->mapped_channel_id >= 0) {
+ ucc->mapped_channel_id = ep_config->mapped_channel_id;
+ ucc->default_flow_id = ep_config->default_flow_id;
+ } else {
+ ucc->mapped_channel_id = -1;
+ ucc->default_flow_id = -1;
+ }
+
if (ucc->ep_type != PSIL_EP_NATIVE) {
const struct udma_match_data *match_data = ud->match_data;
@@ -3063,6 +4107,13 @@ static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
return true;
+
+triggered_bchan:
+ dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
+ ucc->tr_trigger_type);
+
+ return true;
+
}
static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
@@ -3073,14 +4124,33 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
struct udma_filter_param filter_param;
struct dma_chan *chan;
- if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
- return NULL;
+ if (ud->match_data->type == DMA_TYPE_BCDMA) {
+ if (dma_spec->args_count != 3)
+ return NULL;
- filter_param.remote_thread_id = dma_spec->args[0];
- if (dma_spec->args_count == 2)
- filter_param.atype = dma_spec->args[1];
- else
+ filter_param.tr_trigger_type = dma_spec->args[0];
+ filter_param.remote_thread_id = dma_spec->args[1];
+ filter_param.asel = dma_spec->args[2];
filter_param.atype = 0;
+ } else {
+ if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
+ return NULL;
+
+ filter_param.remote_thread_id = dma_spec->args[0];
+ filter_param.tr_trigger_type = 0;
+ if (dma_spec->args_count == 2) {
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ filter_param.atype = dma_spec->args[1];
+ filter_param.asel = 0;
+ } else {
+ filter_param.atype = 0;
+ filter_param.asel = dma_spec->args[1];
+ }
+ } else {
+ filter_param.atype = 0;
+ filter_param.asel = 0;
+ }
+ }
chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
ofdma->of_node);
@@ -3093,28 +4163,48 @@ static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
}
static struct udma_match_data am654_main_data = {
+ .type = DMA_TYPE_UDMA,
.psil_base = 0x1000,
.enable_memcpy_support = true,
.statictr_z_mask = GENMASK(11, 0),
};
static struct udma_match_data am654_mcu_data = {
+ .type = DMA_TYPE_UDMA,
.psil_base = 0x6000,
.enable_memcpy_support = false,
.statictr_z_mask = GENMASK(11, 0),
};
static struct udma_match_data j721e_main_data = {
+ .type = DMA_TYPE_UDMA,
.psil_base = 0x1000,
.enable_memcpy_support = true,
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
.statictr_z_mask = GENMASK(23, 0),
};
static struct udma_match_data j721e_mcu_data = {
+ .type = DMA_TYPE_UDMA,
.psil_base = 0x6000,
.enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
- .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST,
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
+ .statictr_z_mask = GENMASK(23, 0),
+};
+
+static struct udma_match_data am64_bcdma_data = {
+ .type = DMA_TYPE_BCDMA,
+ .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
+ .enable_memcpy_support = true, /* Supported via bchan */
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
+ .statictr_z_mask = GENMASK(23, 0),
+};
+
+static struct udma_match_data am64_pktdma_data = {
+ .type = DMA_TYPE_PKTDMA,
+ .psil_base = 0x1000,
+ .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
+ .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST | UDMA_FLAG_TDTYPE,
.statictr_z_mask = GENMASK(23, 0),
};
@@ -3136,30 +4226,105 @@ static const struct of_device_id udma_of_match[] = {
{ /* Sentinel */ },
};
+static const struct of_device_id bcdma_of_match[] = {
+ {
+ .compatible = "ti,am64-dmss-bcdma",
+ .data = &am64_bcdma_data,
+ },
+ { /* Sentinel */ },
+};
+
+static const struct of_device_id pktdma_of_match[] = {
+ {
+ .compatible = "ti,am64-dmss-pktdma",
+ .data = &am64_pktdma_data,
+ },
+ { /* Sentinel */ },
+};
+
static struct udma_soc_data am654_soc_data = {
- .rchan_oes_offset = 0x200,
+ .oes = {
+ .udma_rchan = 0x200,
+ },
};
static struct udma_soc_data j721e_soc_data = {
- .rchan_oes_offset = 0x400,
+ .oes = {
+ .udma_rchan = 0x400,
+ },
};
static struct udma_soc_data j7200_soc_data = {
- .rchan_oes_offset = 0x80,
+ .oes = {
+ .udma_rchan = 0x80,
+ },
+};
+
+static struct udma_soc_data am64_soc_data = {
+ .oes = {
+ .bcdma_bchan_data = 0x2200,
+ .bcdma_bchan_ring = 0x2400,
+ .bcdma_tchan_data = 0x2800,
+ .bcdma_tchan_ring = 0x2a00,
+ .bcdma_rchan_data = 0x2e00,
+ .bcdma_rchan_ring = 0x3000,
+ .pktdma_tchan_flow = 0x1200,
+ .pktdma_rchan_flow = 0x1600,
+ },
+ .bcdma_trigger_event_offset = 0xc400,
};
static const struct soc_device_attribute k3_soc_devices[] = {
{ .family = "AM65X", .data = &am654_soc_data },
{ .family = "J721E", .data = &j721e_soc_data },
{ .family = "J7200", .data = &j7200_soc_data },
+ { .family = "AM64X", .data = &am64_soc_data },
{ /* sentinel */ }
};
static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
{
+ u32 cap2, cap3, cap4;
int i;
- for (i = 0; i < MMR_LAST; i++) {
+ ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
+ if (IS_ERR(ud->mmrs[MMR_GCFG]))
+ return PTR_ERR(ud->mmrs[MMR_GCFG]);
+
+ cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
+ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+
+ switch (ud->match_data->type) {
+ case DMA_TYPE_UDMA:
+ ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
+ ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
+ ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
+ ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
+ break;
+ case DMA_TYPE_BCDMA:
+ ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
+ ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
+ ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
+ break;
+ case DMA_TYPE_PKTDMA:
+ cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
+ ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
+ ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
+ ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
+ ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 1; i < MMR_LAST; i++) {
+ if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
+ continue;
+ if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
+ continue;
+ if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
+ continue;
+
ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
if (IS_ERR(ud->mmrs[i]))
return PTR_ERR(ud->mmrs[i]);
@@ -3168,48 +4333,58 @@ static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
return 0;
}
+static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
+ struct ti_sci_resource_desc *rm_desc,
+ char *name)
+{
+ bitmap_clear(map, rm_desc->start, rm_desc->num);
+ bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
+ dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
+ rm_desc->start, rm_desc->num, rm_desc->start_sec,
+ rm_desc->num_sec);
+}
+
+static const char * const range_names[] = {
+ [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
+ [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
+ [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
+ [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
+ [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
+};
+
static int udma_setup_resources(struct udma_dev *ud)
{
+ int ret, i, j;
struct device *dev = ud->dev;
- int ch_count, ret, i, j;
- u32 cap2, cap3;
- struct ti_sci_resource_desc *rm_desc;
struct ti_sci_resource *rm_res, irq_res;
struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
- static const char * const range_names[] = { "ti,sci-rm-range-tchan",
- "ti,sci-rm-range-rchan",
- "ti,sci-rm-range-rflow" };
-
- cap2 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(2));
- cap3 = udma_read(ud->mmrs[MMR_GCFG], UDMA_CAP_REG(3));
-
- ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
- ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
- ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
- ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
- ch_count = ud->tchan_cnt + ud->rchan_cnt;
+ u32 cap3;
/* Set up the throughput level start indexes */
+ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
if (of_device_is_compatible(dev->of_node,
"ti,am654-navss-main-udmap")) {
- ud->tpl_levels = 2;
- ud->tpl_start_idx[0] = 8;
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = 8;
} else if (of_device_is_compatible(dev->of_node,
"ti,am654-navss-mcu-udmap")) {
- ud->tpl_levels = 2;
- ud->tpl_start_idx[0] = 2;
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = 2;
} else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
- ud->tpl_levels = 3;
- ud->tpl_start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
- ud->tpl_start_idx[0] = ud->tpl_start_idx[1] +
- UDMA_CAP3_HCHAN_CNT(cap3);
+ ud->tchan_tpl.levels = 3;
+ ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
+ ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
} else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
- ud->tpl_levels = 2;
- ud->tpl_start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
} else {
- ud->tpl_levels = 1;
+ ud->tchan_tpl.levels = 1;
}
+ ud->rchan_tpl.levels = ud->tchan_tpl.levels;
+ ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
+ ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
+
ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
sizeof(unsigned long), GFP_KERNEL);
ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
@@ -3247,11 +4422,15 @@ static int udma_setup_resources(struct udma_dev *ud)
bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
/* Get resource ranges from tisci */
- for (i = 0; i < RM_RANGE_LAST; i++)
+ for (i = 0; i < RM_RANGE_LAST; i++) {
+ if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
+ continue;
+
tisci_rm->rm_ranges[i] =
devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
tisci_rm->tisci_dev_id,
(char *)range_names[i]);
+ }
/* tchan ranges */
rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
@@ -3259,13 +4438,9 @@ static int udma_setup_resources(struct udma_dev *ud)
bitmap_zero(ud->tchan_map, ud->tchan_cnt);
} else {
bitmap_fill(ud->tchan_map, ud->tchan_cnt);
- for (i = 0; i < rm_res->sets; i++) {
- rm_desc = &rm_res->desc[i];
- bitmap_clear(ud->tchan_map, rm_desc->start,
- rm_desc->num);
- dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n",
- rm_desc->start, rm_desc->num);
- }
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->tchan_map,
+ &rm_res->desc[i], "tchan");
}
irq_res.sets = rm_res->sets;
@@ -3275,13 +4450,9 @@ static int udma_setup_resources(struct udma_dev *ud)
bitmap_zero(ud->rchan_map, ud->rchan_cnt);
} else {
bitmap_fill(ud->rchan_map, ud->rchan_cnt);
- for (i = 0; i < rm_res->sets; i++) {
- rm_desc = &rm_res->desc[i];
- bitmap_clear(ud->rchan_map, rm_desc->start,
- rm_desc->num);
- dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n",
- rm_desc->start, rm_desc->num);
- }
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rchan_map,
+ &rm_res->desc[i], "rchan");
}
irq_res.sets += rm_res->sets;
@@ -3290,12 +4461,21 @@ static int udma_setup_resources(struct udma_dev *ud)
for (i = 0; i < rm_res->sets; i++) {
irq_res.desc[i].start = rm_res->desc[i].start;
irq_res.desc[i].num = rm_res->desc[i].num;
+ irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
+ irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
}
rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
for (j = 0; j < rm_res->sets; j++, i++) {
- irq_res.desc[i].start = rm_res->desc[j].start +
- ud->soc_data->rchan_oes_offset;
- irq_res.desc[i].num = rm_res->desc[j].num;
+ if (rm_res->desc[j].num) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ ud->soc_data->oes.udma_rchan;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
+ if (rm_res->desc[j].num_sec) {
+ irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
+ ud->soc_data->oes.udma_rchan;
+ irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
+ }
}
ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
kfree(irq_res.desc);
@@ -3311,15 +4491,344 @@ static int udma_setup_resources(struct udma_dev *ud)
bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
ud->rflow_cnt - ud->rchan_cnt);
} else {
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rflow_gp_map,
+ &rm_res->desc[i], "gp-rflow");
+ }
+
+ return 0;
+}
+
+static int bcdma_setup_resources(struct udma_dev *ud)
+{
+ int ret, i, j;
+ struct device *dev = ud->dev;
+ struct ti_sci_resource *rm_res, irq_res;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+ u32 cap;
+
+ /* Set up the throughput level start indexes */
+ cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+ if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
+ ud->bchan_tpl.levels = 3;
+ ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
+ ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
+ } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
+ ud->bchan_tpl.levels = 2;
+ ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
+ } else {
+ ud->bchan_tpl.levels = 1;
+ }
+
+ cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
+ if (BCDMA_CAP4_URCHAN_CNT(cap)) {
+ ud->rchan_tpl.levels = 3;
+ ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
+ ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
+ } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
+ ud->rchan_tpl.levels = 2;
+ ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
+ } else {
+ ud->rchan_tpl.levels = 1;
+ }
+
+ if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
+ ud->tchan_tpl.levels = 3;
+ ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
+ ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
+ } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
+ } else {
+ ud->tchan_tpl.levels = 1;
+ }
+
+ ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
+ GFP_KERNEL);
+ ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
+ GFP_KERNEL);
+ ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
+ GFP_KERNEL);
+ /* BCDMA do not really have flows, but the driver expect it */
+ ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
+ GFP_KERNEL);
+
+ if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
+ !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
+ !ud->rflows)
+ return -ENOMEM;
+
+ /* Get resource ranges from tisci */
+ for (i = 0; i < RM_RANGE_LAST; i++) {
+ if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
+ continue;
+ if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
+ continue;
+ if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
+ continue;
+ if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
+ continue;
+
+ tisci_rm->rm_ranges[i] =
+ devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
+ tisci_rm->tisci_dev_id,
+ (char *)range_names[i]);
+ }
+
+ irq_res.sets = 0;
+
+ /* bchan ranges */
+ if (ud->bchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->bchan_map, ud->bchan_cnt);
+ } else {
+ bitmap_fill(ud->bchan_map, ud->bchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->bchan_map,
+ &rm_res->desc[i],
+ "bchan");
+ }
+ irq_res.sets += rm_res->sets;
+ }
+
+ /* tchan ranges */
+ if (ud->tchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ } else {
+ bitmap_fill(ud->tchan_map, ud->tchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->tchan_map,
+ &rm_res->desc[i],
+ "tchan");
+ }
+ irq_res.sets += rm_res->sets * 2;
+ }
+
+ /* rchan ranges */
+ if (ud->rchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ } else {
+ bitmap_fill(ud->rchan_map, ud->rchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rchan_map,
+ &rm_res->desc[i],
+ "rchan");
+ }
+ irq_res.sets += rm_res->sets * 2;
+ }
+
+ irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ if (ud->bchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
for (i = 0; i < rm_res->sets; i++) {
- rm_desc = &rm_res->desc[i];
- bitmap_clear(ud->rflow_gp_map, rm_desc->start,
- rm_desc->num);
- dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n",
- rm_desc->start, rm_desc->num);
+ irq_res.desc[i].start = rm_res->desc[i].start +
+ oes->bcdma_bchan_ring;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
+ }
+ if (ud->tchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ for (j = 0; j < rm_res->sets; j++, i += 2) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->bcdma_tchan_data;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+
+ irq_res.desc[i + 1].start = rm_res->desc[j].start +
+ oes->bcdma_tchan_ring;
+ irq_res.desc[i + 1].num = rm_res->desc[j].num;
+ }
+ }
+ if (ud->rchan_cnt) {
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ for (j = 0; j < rm_res->sets; j++, i += 2) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->bcdma_rchan_data;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+
+ irq_res.desc[i + 1].start = rm_res->desc[j].start +
+ oes->bcdma_rchan_ring;
+ irq_res.desc[i + 1].num = rm_res->desc[j].num;
}
}
+ ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
+ kfree(irq_res.desc);
+ if (ret) {
+ dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int pktdma_setup_resources(struct udma_dev *ud)
+{
+ int ret, i, j;
+ struct device *dev = ud->dev;
+ struct ti_sci_resource *rm_res, irq_res;
+ struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
+ const struct udma_oes_offsets *oes = &ud->soc_data->oes;
+ u32 cap3;
+
+ /* Set up the throughput level start indexes */
+ cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
+ if (UDMA_CAP3_UCHAN_CNT(cap3)) {
+ ud->tchan_tpl.levels = 3;
+ ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
+ ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
+ } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
+ ud->tchan_tpl.levels = 2;
+ ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
+ } else {
+ ud->tchan_tpl.levels = 1;
+ }
+
+ ud->rchan_tpl.levels = ud->tchan_tpl.levels;
+ ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
+ ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
+
+ ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
+ GFP_KERNEL);
+ ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+ ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
+ GFP_KERNEL);
+ ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
+ GFP_KERNEL);
+ ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
+ !ud->rchans || !ud->rflows || !ud->rflow_in_use)
+ return -ENOMEM;
+
+ /* Get resource ranges from tisci */
+ for (i = 0; i < RM_RANGE_LAST; i++) {
+ if (i == RM_RANGE_BCHAN)
+ continue;
+
+ tisci_rm->rm_ranges[i] =
+ devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
+ tisci_rm->tisci_dev_id,
+ (char *)range_names[i]);
+ }
+
+ /* tchan ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->tchan_map, ud->tchan_cnt);
+ } else {
+ bitmap_fill(ud->tchan_map, ud->tchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->tchan_map,
+ &rm_res->desc[i], "tchan");
+ }
+
+ /* rchan ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
+ if (IS_ERR(rm_res)) {
+ bitmap_zero(ud->rchan_map, ud->rchan_cnt);
+ } else {
+ bitmap_fill(ud->rchan_map, ud->rchan_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rchan_map,
+ &rm_res->desc[i], "rchan");
+ }
+
+ /* rflow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
+ if (IS_ERR(rm_res)) {
+ /* all rflows are assigned exclusively to Linux */
+ bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
+ } else {
+ bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->rflow_in_use,
+ &rm_res->desc[i], "rflow");
+ }
+ irq_res.sets = rm_res->sets;
+
+ /* tflow ranges */
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
+ if (IS_ERR(rm_res)) {
+ /* all tflows are assigned exclusively to Linux */
+ bitmap_zero(ud->tflow_map, ud->tflow_cnt);
+ } else {
+ bitmap_fill(ud->tflow_map, ud->tflow_cnt);
+ for (i = 0; i < rm_res->sets; i++)
+ udma_mark_resource_ranges(ud, ud->tflow_map,
+ &rm_res->desc[i], "tflow");
+ }
+ irq_res.sets += rm_res->sets;
+
+ irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
+ for (i = 0; i < rm_res->sets; i++) {
+ irq_res.desc[i].start = rm_res->desc[i].start +
+ oes->pktdma_tchan_flow;
+ irq_res.desc[i].num = rm_res->desc[i].num;
+ }
+ rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
+ for (j = 0; j < rm_res->sets; j++, i++) {
+ irq_res.desc[i].start = rm_res->desc[j].start +
+ oes->pktdma_rchan_flow;
+ irq_res.desc[i].num = rm_res->desc[j].num;
+ }
+ ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
+ kfree(irq_res.desc);
+ if (ret) {
+ dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int setup_resources(struct udma_dev *ud)
+{
+ struct device *dev = ud->dev;
+ int ch_count, ret;
+
+ switch (ud->match_data->type) {
+ case DMA_TYPE_UDMA:
+ ret = udma_setup_resources(ud);
+ break;
+ case DMA_TYPE_BCDMA:
+ ret = bcdma_setup_resources(ud);
+ break;
+ case DMA_TYPE_PKTDMA:
+ ret = pktdma_setup_resources(ud);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
+ if (ud->bchan_cnt)
+ ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
if (!ch_count)
@@ -3330,12 +4839,40 @@ static int udma_setup_resources(struct udma_dev *ud)
if (!ud->channels)
return -ENOMEM;
- dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
- ch_count,
- ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt),
- ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt),
- ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
- ud->rflow_cnt));
+ switch (ud->match_data->type) {
+ case DMA_TYPE_UDMA:
+ dev_info(dev,
+ "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
+ ch_count,
+ ud->tchan_cnt - bitmap_weight(ud->tchan_map,
+ ud->tchan_cnt),
+ ud->rchan_cnt - bitmap_weight(ud->rchan_map,
+ ud->rchan_cnt),
+ ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
+ ud->rflow_cnt));
+ break;
+ case DMA_TYPE_BCDMA:
+ dev_info(dev,
+ "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
+ ch_count,
+ ud->bchan_cnt - bitmap_weight(ud->bchan_map,
+ ud->bchan_cnt),
+ ud->tchan_cnt - bitmap_weight(ud->tchan_map,
+ ud->tchan_cnt),
+ ud->rchan_cnt - bitmap_weight(ud->rchan_map,
+ ud->rchan_cnt));
+ break;
+ case DMA_TYPE_PKTDMA:
+ dev_info(dev,
+ "Channels: %d (tchan: %u, rchan: %u)\n",
+ ch_count,
+ ud->tchan_cnt - bitmap_weight(ud->tchan_map,
+ ud->tchan_cnt),
+ ud->rchan_cnt - bitmap_weight(ud->rchan_map,
+ ud->rchan_cnt));
+ default:
+ break;
+ }
return ch_count;
}
@@ -3444,20 +4981,33 @@ static void udma_dbg_summary_show_chan(struct seq_file *s,
seq_printf(s, " %-13s| %s", dma_chan_name(chan),
chan->dbg_client_name ?: "in-use");
- seq_printf(s, " (%s, ", dmaengine_get_direction_text(uc->config.dir));
+ if (ucc->tr_trigger_type)
+ seq_puts(s, " (triggered, ");
+ else
+ seq_printf(s, " (%s, ",
+ dmaengine_get_direction_text(uc->config.dir));
switch (uc->config.dir) {
case DMA_MEM_TO_MEM:
+ if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
+ seq_printf(s, "bchan%d)\n", uc->bchan->id);
+ return;
+ }
+
seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
ucc->src_thread, ucc->dst_thread);
break;
case DMA_DEV_TO_MEM:
seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
ucc->src_thread, ucc->dst_thread);
+ if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
+ seq_printf(s, "rflow%d, ", uc->rflow->id);
break;
case DMA_MEM_TO_DEV:
seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
ucc->src_thread, ucc->dst_thread);
+ if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
+ seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
break;
default:
seq_printf(s, ")\n");
@@ -3519,6 +5069,25 @@ static int udma_probe(struct platform_device *pdev)
if (!ud)
return -ENOMEM;
+ match = of_match_node(udma_of_match, dev->of_node);
+ if (!match)
+ match = of_match_node(bcdma_of_match, dev->of_node);
+ if (!match) {
+ match = of_match_node(pktdma_of_match, dev->of_node);
+ if (!match) {
+ dev_err(dev, "No compatible match found\n");
+ return -ENODEV;
+ }
+ }
+ ud->match_data = match->data;
+
+ soc = soc_device_match(k3_soc_devices);
+ if (!soc) {
+ dev_err(dev, "No compatible SoC found\n");
+ return -ENODEV;
+ }
+ ud->soc_data = soc->data;
+
ret = udma_get_mmrs(pdev, ud);
if (ret)
return ret;
@@ -3542,16 +5111,44 @@ static int udma_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(dev->of_node, "ti,udma-atype", &ud->atype);
- if (!ret && ud->atype > 2) {
- dev_err(dev, "Invalid atype: %u\n", ud->atype);
- return -EINVAL;
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
+ &ud->atype);
+ if (!ret && ud->atype > 2) {
+ dev_err(dev, "Invalid atype: %u\n", ud->atype);
+ return -EINVAL;
+ }
+ } else {
+ ret = of_property_read_u32(dev->of_node, "ti,asel",
+ &ud->asel);
+ if (!ret && ud->asel > 15) {
+ dev_err(dev, "Invalid asel: %u\n", ud->asel);
+ return -EINVAL;
+ }
}
ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
- ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
+ if (ud->match_data->type == DMA_TYPE_UDMA) {
+ ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
+ } else {
+ struct k3_ringacc_init_data ring_init_data;
+
+ ring_init_data.tisci = ud->tisci_rm.tisci;
+ ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
+ if (ud->match_data->type == DMA_TYPE_BCDMA) {
+ ring_init_data.num_rings = ud->bchan_cnt +
+ ud->tchan_cnt +
+ ud->rchan_cnt;
+ } else {
+ ring_init_data.num_rings = ud->rflow_cnt +
+ ud->tflow_cnt;
+ }
+
+ ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
+ }
+
if (IS_ERR(ud->ringacc))
return PTR_ERR(ud->ringacc);
@@ -3562,27 +5159,15 @@ static int udma_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
- match = of_match_node(udma_of_match, dev->of_node);
- if (!match) {
- dev_err(dev, "No compatible match found\n");
- return -ENODEV;
- }
- ud->match_data = match->data;
-
- soc = soc_device_match(k3_soc_devices);
- if (!soc) {
- dev_err(dev, "No compatible SoC found\n");
- return -ENODEV;
- }
- ud->soc_data = soc->data;
-
dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
- dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
+ /* cyclic operation is not supported via PKTDMA */
+ if (ud->match_data->type != DMA_TYPE_PKTDMA) {
+ dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
+ ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
+ }
- ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources;
ud->ddev.device_config = udma_slave_config;
ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
- ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
ud->ddev.device_issue_pending = udma_issue_pending;
ud->ddev.device_tx_status = udma_tx_status;
ud->ddev.device_pause = udma_pause;
@@ -3593,7 +5178,25 @@ static int udma_probe(struct platform_device *pdev)
ud->ddev.dbg_summary_show = udma_dbg_summary_show;
#endif
+ switch (ud->match_data->type) {
+ case DMA_TYPE_UDMA:
+ ud->ddev.device_alloc_chan_resources =
+ udma_alloc_chan_resources;
+ break;
+ case DMA_TYPE_BCDMA:
+ ud->ddev.device_alloc_chan_resources =
+ bcdma_alloc_chan_resources;
+ ud->ddev.device_router_config = bcdma_router_config;
+ break;
+ case DMA_TYPE_PKTDMA:
+ ud->ddev.device_alloc_chan_resources =
+ pktdma_alloc_chan_resources;
+ break;
+ default:
+ return -EINVAL;
+ }
ud->ddev.device_free_chan_resources = udma_free_chan_resources;
+
ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
@@ -3601,7 +5204,8 @@ static int udma_probe(struct platform_device *pdev)
ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES;
ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
DESC_METADATA_ENGINE;
- if (ud->match_data->enable_memcpy_support) {
+ if (ud->match_data->enable_memcpy_support &&
+ !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
@@ -3614,7 +5218,7 @@ static int udma_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&ud->ddev.channels);
INIT_LIST_HEAD(&ud->desc_to_purge);
- ch_count = udma_setup_resources(ud);
+ ch_count = setup_resources(ud);
if (ch_count <= 0)
return ch_count;
@@ -3629,6 +5233,13 @@ static int udma_probe(struct platform_device *pdev)
if (ret)
return ret;
+ for (i = 0; i < ud->bchan_cnt; i++) {
+ struct udma_bchan *bchan = &ud->bchans[i];
+
+ bchan->id = i;
+ bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
+ }
+
for (i = 0; i < ud->tchan_cnt; i++) {
struct udma_tchan *tchan = &ud->tchans[i];
@@ -3655,9 +5266,12 @@ static int udma_probe(struct platform_device *pdev)
uc->ud = ud;
uc->vc.desc_free = udma_desc_free;
uc->id = i;
+ uc->bchan = NULL;
uc->tchan = NULL;
uc->rchan = NULL;
uc->config.remote_thread_id = -1;
+ uc->config.mapped_channel_id = -1;
+ uc->config.default_flow_id = -1;
uc->config.dir = DMA_MEM_TO_MEM;
uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
dev_name(dev), i);
@@ -3696,5 +5310,25 @@ static struct platform_driver udma_driver = {
};
builtin_platform_driver(udma_driver);
+static struct platform_driver bcdma_driver = {
+ .driver = {
+ .name = "ti-bcdma",
+ .of_match_table = bcdma_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = udma_probe,
+};
+builtin_platform_driver(bcdma_driver);
+
+static struct platform_driver pktdma_driver = {
+ .driver = {
+ .name = "ti-pktdma",
+ .of_match_table = pktdma_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = udma_probe,
+};
+builtin_platform_driver(pktdma_driver);
+
/* Private interfaces to UDMA */
#include "k3-udma-private.c"
diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h
index 09c4529e013d..d349c6d482ae 100644
--- a/drivers/dma/ti/k3-udma.h
+++ b/drivers/dma/ti/k3-udma.h
@@ -18,7 +18,7 @@
#define UDMA_RX_FLOW_ID_FW_OES_REG 0x80
#define UDMA_RX_FLOW_ID_FW_STATUS_REG 0x88
-/* TCHANRT/RCHANRT registers */
+/* BCHANRT/TCHANRT/RCHANRT registers */
#define UDMA_CHAN_RT_CTL_REG 0x0
#define UDMA_CHAN_RT_SWTRIG_REG 0x8
#define UDMA_CHAN_RT_STDATA_REG 0x80
@@ -45,6 +45,18 @@
#define UDMA_CAP3_HCHAN_CNT(val) (((val) >> 14) & 0x1ff)
#define UDMA_CAP3_UCHAN_CNT(val) (((val) >> 23) & 0x1ff)
+#define BCDMA_CAP2_BCHAN_CNT(val) ((val) & 0x1ff)
+#define BCDMA_CAP2_TCHAN_CNT(val) (((val) >> 9) & 0x1ff)
+#define BCDMA_CAP2_RCHAN_CNT(val) (((val) >> 18) & 0x1ff)
+#define BCDMA_CAP3_HBCHAN_CNT(val) (((val) >> 14) & 0x1ff)
+#define BCDMA_CAP3_UBCHAN_CNT(val) (((val) >> 23) & 0x1ff)
+#define BCDMA_CAP4_HRCHAN_CNT(val) ((val) & 0xff)
+#define BCDMA_CAP4_URCHAN_CNT(val) (((val) >> 8) & 0xff)
+#define BCDMA_CAP4_HTCHAN_CNT(val) (((val) >> 16) & 0xff)
+#define BCDMA_CAP4_UTCHAN_CNT(val) (((val) >> 24) & 0xff)
+
+#define PKTDMA_CAP4_TFLOW_CNT(val) ((val) & 0x3fff)
+
/* UDMA_CHAN_RT_CTL_REG */
#define UDMA_CHAN_RT_CTL_EN BIT(31)
#define UDMA_CHAN_RT_CTL_TDOWN BIT(30)
@@ -82,15 +94,20 @@
*/
#define PDMA_STATIC_TR_Z(x, mask) ((x) & (mask))
+/* Address Space Select */
+#define K3_ADDRESS_ASEL_SHIFT 48
+
struct udma_dev;
struct udma_tchan;
struct udma_rchan;
struct udma_rflow;
enum udma_rm_range {
- RM_RANGE_TCHAN = 0,
+ RM_RANGE_BCHAN = 0,
+ RM_RANGE_TCHAN,
RM_RANGE_RCHAN,
RM_RANGE_RFLOW,
+ RM_RANGE_TFLOW,
RM_RANGE_LAST,
};
@@ -112,6 +129,8 @@ int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
u32 dst_thread);
struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
+struct device *xudma_get_device(struct udma_dev *ud);
+struct k3_ringacc *xudma_get_ringacc(struct udma_dev *ud);
void xudma_dev_put(struct udma_dev *ud);
u32 xudma_dev_get_psil_base(struct udma_dev *ud);
struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
@@ -136,5 +155,10 @@ void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val);
u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg);
void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val);
bool xudma_rflow_is_gp(struct udma_dev *ud, int id);
+int xudma_get_rflow_ring_offset(struct udma_dev *ud);
+
+int xudma_is_pktdma(struct udma_dev *ud);
+int xudma_pktdma_tflow_get_irq(struct udma_dev *ud, int udma_tflow_id);
+int xudma_pktdma_rflow_get_irq(struct udma_dev *ud, int udma_rflow_id);
#endif /* K3_UDMA_H_ */
diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
index 22faea653ea8..79777550a6ff 100644
--- a/drivers/dma/xilinx/xilinx_dma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -2781,7 +2781,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
has_dre = false;
if (!has_dre)
- xdev->common.copy_align = fls(width - 1);
+ xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
@@ -2900,7 +2900,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
struct device_node *node)
{
- int ret, i, nr_channels = 1;
+ int ret, i;
+ u32 nr_channels = 1;
ret = of_property_read_u32(node, "dma-channels", &nr_channels);
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
@@ -3112,7 +3113,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
/* Register the DMA engine with the core */
- dma_async_device_register(&xdev->common);
+ err = dma_async_device_register(&xdev->common);
+ if (err) {
+ dev_err(xdev->dev, "failed to register the dma device\n");
+ goto error;
+ }
err = of_dma_controller_register(node, of_dma_xilinx_xlate,
xdev);
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index ce336899d636..66196b293b6c 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -1474,17 +1474,17 @@ int scmi_notification_init(struct scmi_handle *handle)
ni->gid = gid;
ni->handle = handle;
+ ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
+ sizeof(char *), GFP_KERNEL);
+ if (!ni->registered_protocols)
+ goto err;
+
ni->notify_wq = alloc_workqueue(dev_name(handle->dev),
WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS,
0);
if (!ni->notify_wq)
goto err;
- ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO,
- sizeof(char *), GFP_KERNEL);
- if (!ni->registered_protocols)
- goto err;
-
mutex_init(&ni->pending_mtx);
hash_init(ni->pending_events_handlers);
diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c
index b4232d611033..4541b891b733 100644
--- a/drivers/firmware/arm_scmi/sensors.c
+++ b/drivers/firmware/arm_scmi/sensors.c
@@ -2,21 +2,30 @@
/*
* System Control and Management Interface (SCMI) Sensor Protocol
*
- * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2018-2020 ARM Ltd.
*/
#define pr_fmt(fmt) "SCMI Notifications SENSOR - " fmt
+#include <linux/bitfield.h>
#include <linux/scmi_protocol.h>
#include "common.h"
#include "notify.h"
+#define SCMI_MAX_NUM_SENSOR_AXIS 63
+#define SCMIv2_SENSOR_PROTOCOL 0x10000
+
enum scmi_sensor_protocol_cmd {
SENSOR_DESCRIPTION_GET = 0x3,
SENSOR_TRIP_POINT_NOTIFY = 0x4,
SENSOR_TRIP_POINT_CONFIG = 0x5,
SENSOR_READING_GET = 0x6,
+ SENSOR_AXIS_DESCRIPTION_GET = 0x7,
+ SENSOR_LIST_UPDATE_INTERVALS = 0x8,
+ SENSOR_CONFIG_GET = 0x9,
+ SENSOR_CONFIG_SET = 0xA,
+ SENSOR_CONTINUOUS_UPDATE_NOTIFY = 0xB,
};
struct scmi_msg_resp_sensor_attributes {
@@ -28,29 +37,106 @@ struct scmi_msg_resp_sensor_attributes {
__le32 reg_size;
};
+/* v3 attributes_low macros */
+#define SUPPORTS_UPDATE_NOTIFY(x) FIELD_GET(BIT(30), (x))
+#define SENSOR_TSTAMP_EXP(x) FIELD_GET(GENMASK(14, 10), (x))
+#define SUPPORTS_TIMESTAMP(x) FIELD_GET(BIT(9), (x))
+#define SUPPORTS_EXTEND_ATTRS(x) FIELD_GET(BIT(8), (x))
+
+/* v2 attributes_high macros */
+#define SENSOR_UPDATE_BASE(x) FIELD_GET(GENMASK(31, 27), (x))
+#define SENSOR_UPDATE_SCALE(x) FIELD_GET(GENMASK(26, 22), (x))
+
+/* v3 attributes_high macros */
+#define SENSOR_AXIS_NUMBER(x) FIELD_GET(GENMASK(21, 16), (x))
+#define SUPPORTS_AXIS(x) FIELD_GET(BIT(8), (x))
+
+/* v3 resolution macros */
+#define SENSOR_RES(x) FIELD_GET(GENMASK(26, 0), (x))
+#define SENSOR_RES_EXP(x) FIELD_GET(GENMASK(31, 27), (x))
+
+struct scmi_msg_resp_attrs {
+ __le32 min_range_low;
+ __le32 min_range_high;
+ __le32 max_range_low;
+ __le32 max_range_high;
+};
+
struct scmi_msg_resp_sensor_description {
__le16 num_returned;
__le16 num_remaining;
- struct {
+ struct scmi_sensor_descriptor {
__le32 id;
__le32 attributes_low;
-#define SUPPORTS_ASYNC_READ(x) ((x) & BIT(31))
-#define NUM_TRIP_POINTS(x) ((x) & 0xff)
+/* Common attributes_low macros */
+#define SUPPORTS_ASYNC_READ(x) FIELD_GET(BIT(31), (x))
+#define NUM_TRIP_POINTS(x) FIELD_GET(GENMASK(7, 0), (x))
__le32 attributes_high;
-#define SENSOR_TYPE(x) ((x) & 0xff)
-#define SENSOR_SCALE(x) (((x) >> 11) & 0x1f)
-#define SENSOR_SCALE_SIGN BIT(4)
-#define SENSOR_SCALE_EXTEND GENMASK(7, 5)
-#define SENSOR_UPDATE_SCALE(x) (((x) >> 22) & 0x1f)
-#define SENSOR_UPDATE_BASE(x) (((x) >> 27) & 0x1f)
- u8 name[SCMI_MAX_STR_SIZE];
- } desc[0];
+/* Common attributes_high macros */
+#define SENSOR_SCALE(x) FIELD_GET(GENMASK(15, 11), (x))
+#define SENSOR_SCALE_SIGN BIT(4)
+#define SENSOR_SCALE_EXTEND GENMASK(31, 5)
+#define SENSOR_TYPE(x) FIELD_GET(GENMASK(7, 0), (x))
+ u8 name[SCMI_MAX_STR_SIZE];
+ /* only for version > 2.0 */
+ __le32 power;
+ __le32 resolution;
+ struct scmi_msg_resp_attrs scalar_attrs;
+ } desc[];
};
-struct scmi_msg_sensor_trip_point_notify {
+/* Base scmi_sensor_descriptor size excluding extended attrs after name */
+#define SCMI_MSG_RESP_SENS_DESCR_BASE_SZ 28
+
+/* Sign extend to a full s32 */
+#define S32_EXT(v) \
+ ({ \
+ int __v = (v); \
+ \
+ if (__v & SENSOR_SCALE_SIGN) \
+ __v |= SENSOR_SCALE_EXTEND; \
+ __v; \
+ })
+
+struct scmi_msg_sensor_axis_description_get {
+ __le32 id;
+ __le32 axis_desc_index;
+};
+
+struct scmi_msg_resp_sensor_axis_description {
+ __le32 num_axis_flags;
+#define NUM_AXIS_RETURNED(x) FIELD_GET(GENMASK(5, 0), (x))
+#define NUM_AXIS_REMAINING(x) FIELD_GET(GENMASK(31, 26), (x))
+ struct scmi_axis_descriptor {
+ __le32 id;
+ __le32 attributes_low;
+ __le32 attributes_high;
+ u8 name[SCMI_MAX_STR_SIZE];
+ __le32 resolution;
+ struct scmi_msg_resp_attrs attrs;
+ } desc[];
+};
+
+/* Base scmi_axis_descriptor size excluding extended attrs after name */
+#define SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ 28
+
+struct scmi_msg_sensor_list_update_intervals {
+ __le32 id;
+ __le32 index;
+};
+
+struct scmi_msg_resp_sensor_list_update_intervals {
+ __le32 num_intervals_flags;
+#define NUM_INTERVALS_RETURNED(x) FIELD_GET(GENMASK(11, 0), (x))
+#define SEGMENTED_INTVL_FORMAT(x) FIELD_GET(BIT(12), (x))
+#define NUM_INTERVALS_REMAINING(x) FIELD_GET(GENMASK(31, 16), (x))
+ __le32 intervals[];
+};
+
+struct scmi_msg_sensor_request_notify {
__le32 id;
__le32 event_control;
-#define SENSOR_TP_NOTIFY_ALL BIT(0)
+#define SENSOR_NOTIFY_ALL BIT(0)
};
struct scmi_msg_set_sensor_trip_point {
@@ -66,18 +152,46 @@ struct scmi_msg_set_sensor_trip_point {
__le32 value_high;
};
+struct scmi_msg_sensor_config_set {
+ __le32 id;
+ __le32 sensor_config;
+};
+
struct scmi_msg_sensor_reading_get {
__le32 id;
__le32 flags;
#define SENSOR_READ_ASYNC BIT(0)
};
+struct scmi_resp_sensor_reading_complete {
+ __le32 id;
+ __le64 readings;
+};
+
+struct scmi_sensor_reading_resp {
+ __le32 sensor_value_low;
+ __le32 sensor_value_high;
+ __le32 timestamp_low;
+ __le32 timestamp_high;
+};
+
+struct scmi_resp_sensor_reading_complete_v3 {
+ __le32 id;
+ struct scmi_sensor_reading_resp readings[];
+};
+
struct scmi_sensor_trip_notify_payld {
__le32 agent_id;
__le32 sensor_id;
__le32 trip_point_desc;
};
+struct scmi_sensor_update_notify_payld {
+ __le32 agent_id;
+ __le32 sensor_id;
+ struct scmi_sensor_reading_resp readings[];
+};
+
struct sensors_info {
u32 version;
int num_sensors;
@@ -114,6 +228,194 @@ static int scmi_sensor_attributes_get(const struct scmi_handle *handle,
return ret;
}
+static inline void scmi_parse_range_attrs(struct scmi_range_attrs *out,
+ struct scmi_msg_resp_attrs *in)
+{
+ out->min_range = get_unaligned_le64((void *)&in->min_range_low);
+ out->max_range = get_unaligned_le64((void *)&in->max_range_low);
+}
+
+static int scmi_sensor_update_intervals(const struct scmi_handle *handle,
+ struct scmi_sensor_info *s)
+{
+ int ret, cnt;
+ u32 desc_index = 0;
+ u16 num_returned, num_remaining;
+ struct scmi_xfer *ti;
+ struct scmi_msg_resp_sensor_list_update_intervals *buf;
+ struct scmi_msg_sensor_list_update_intervals *msg;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_LIST_UPDATE_INTERVALS,
+ SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &ti);
+ if (ret)
+ return ret;
+
+ buf = ti->rx.buf;
+ do {
+ u32 flags;
+
+ msg = ti->tx.buf;
+ /* Set the number of sensors to be skipped/already read */
+ msg->id = cpu_to_le32(s->id);
+ msg->index = cpu_to_le32(desc_index);
+
+ ret = scmi_do_xfer(handle, ti);
+ if (ret)
+ break;
+
+ flags = le32_to_cpu(buf->num_intervals_flags);
+ num_returned = NUM_INTERVALS_RETURNED(flags);
+ num_remaining = NUM_INTERVALS_REMAINING(flags);
+
+ /*
+ * Max intervals is not declared previously anywhere so we
+ * assume it's returned+remaining.
+ */
+ if (!s->intervals.count) {
+ s->intervals.segmented = SEGMENTED_INTVL_FORMAT(flags);
+ s->intervals.count = num_returned + num_remaining;
+ /* segmented intervals are reported in one triplet */
+ if (s->intervals.segmented &&
+ (num_remaining || num_returned != 3)) {
+ dev_err(handle->dev,
+ "Sensor ID:%d advertises an invalid segmented interval (%d)\n",
+ s->id, s->intervals.count);
+ s->intervals.segmented = false;
+ s->intervals.count = 0;
+ ret = -EINVAL;
+ break;
+ }
+ /* Direct allocation when exceeding pre-allocated */
+ if (s->intervals.count >= SCMI_MAX_PREALLOC_POOL) {
+ s->intervals.desc =
+ devm_kcalloc(handle->dev,
+ s->intervals.count,
+ sizeof(*s->intervals.desc),
+ GFP_KERNEL);
+ if (!s->intervals.desc) {
+ s->intervals.segmented = false;
+ s->intervals.count = 0;
+ ret = -ENOMEM;
+ break;
+ }
+ }
+ } else if (desc_index + num_returned > s->intervals.count) {
+ dev_err(handle->dev,
+ "No. of update intervals can't exceed %d\n",
+ s->intervals.count);
+ ret = -EINVAL;
+ break;
+ }
+
+ for (cnt = 0; cnt < num_returned; cnt++)
+ s->intervals.desc[desc_index + cnt] =
+ le32_to_cpu(buf->intervals[cnt]);
+
+ desc_index += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, ti);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+ */
+ } while (num_returned && num_remaining);
+
+ scmi_xfer_put(handle, ti);
+ return ret;
+}
+
+static int scmi_sensor_axis_description(const struct scmi_handle *handle,
+ struct scmi_sensor_info *s)
+{
+ int ret, cnt;
+ u32 desc_index = 0;
+ u16 num_returned, num_remaining;
+ struct scmi_xfer *te;
+ struct scmi_msg_resp_sensor_axis_description *buf;
+ struct scmi_msg_sensor_axis_description_get *msg;
+
+ s->axis = devm_kcalloc(handle->dev, s->num_axis,
+ sizeof(*s->axis), GFP_KERNEL);
+ if (!s->axis)
+ return -ENOMEM;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_AXIS_DESCRIPTION_GET,
+ SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &te);
+ if (ret)
+ return ret;
+
+ buf = te->rx.buf;
+ do {
+ u32 flags;
+ struct scmi_axis_descriptor *adesc;
+
+ msg = te->tx.buf;
+ /* Set the number of sensors to be skipped/already read */
+ msg->id = cpu_to_le32(s->id);
+ msg->axis_desc_index = cpu_to_le32(desc_index);
+
+ ret = scmi_do_xfer(handle, te);
+ if (ret)
+ break;
+
+ flags = le32_to_cpu(buf->num_axis_flags);
+ num_returned = NUM_AXIS_RETURNED(flags);
+ num_remaining = NUM_AXIS_REMAINING(flags);
+
+ if (desc_index + num_returned > s->num_axis) {
+ dev_err(handle->dev, "No. of axis can't exceed %d\n",
+ s->num_axis);
+ break;
+ }
+
+ adesc = &buf->desc[0];
+ for (cnt = 0; cnt < num_returned; cnt++) {
+ u32 attrh, attrl;
+ struct scmi_sensor_axis_info *a;
+ size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ;
+
+ attrl = le32_to_cpu(adesc->attributes_low);
+
+ a = &s->axis[desc_index + cnt];
+
+ a->id = le32_to_cpu(adesc->id);
+ a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl);
+
+ attrh = le32_to_cpu(adesc->attributes_high);
+ a->scale = S32_EXT(SENSOR_SCALE(attrh));
+ a->type = SENSOR_TYPE(attrh);
+ strlcpy(a->name, adesc->name, SCMI_MAX_STR_SIZE);
+
+ if (a->extended_attrs) {
+ unsigned int ares =
+ le32_to_cpu(adesc->resolution);
+
+ a->resolution = SENSOR_RES(ares);
+ a->exponent =
+ S32_EXT(SENSOR_RES_EXP(ares));
+ dsize += sizeof(adesc->resolution);
+
+ scmi_parse_range_attrs(&a->attrs,
+ &adesc->attrs);
+ dsize += sizeof(adesc->attrs);
+ }
+
+ adesc = (typeof(adesc))((u8 *)adesc + dsize);
+ }
+
+ desc_index += num_returned;
+
+ scmi_reset_rx_to_maxsz(handle, te);
+ /*
+ * check for both returned and remaining to avoid infinite
+ * loop due to buggy firmware
+ */
+ } while (num_returned && num_remaining);
+
+ scmi_xfer_put(handle, te);
+ return ret;
+}
+
static int scmi_sensor_description_get(const struct scmi_handle *handle,
struct sensors_info *si)
{
@@ -131,9 +433,10 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
buf = t->rx.buf;
do {
+ struct scmi_sensor_descriptor *sdesc;
+
/* Set the number of sensors to be skipped/already read */
put_unaligned_le32(desc_index, t->tx.buf);
-
ret = scmi_do_xfer(handle, t);
if (ret)
break;
@@ -147,22 +450,97 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
break;
}
+ sdesc = &buf->desc[0];
for (cnt = 0; cnt < num_returned; cnt++) {
u32 attrh, attrl;
struct scmi_sensor_info *s;
+ size_t dsize = SCMI_MSG_RESP_SENS_DESCR_BASE_SZ;
- attrl = le32_to_cpu(buf->desc[cnt].attributes_low);
- attrh = le32_to_cpu(buf->desc[cnt].attributes_high);
s = &si->sensors[desc_index + cnt];
- s->id = le32_to_cpu(buf->desc[cnt].id);
- s->type = SENSOR_TYPE(attrh);
- s->scale = SENSOR_SCALE(attrh);
- /* Sign extend to a full s8 */
- if (s->scale & SENSOR_SCALE_SIGN)
- s->scale |= SENSOR_SCALE_EXTEND;
+ s->id = le32_to_cpu(sdesc->id);
+
+ attrl = le32_to_cpu(sdesc->attributes_low);
+ /* common bitfields parsing */
s->async = SUPPORTS_ASYNC_READ(attrl);
s->num_trip_points = NUM_TRIP_POINTS(attrl);
- strlcpy(s->name, buf->desc[cnt].name, SCMI_MAX_STR_SIZE);
+ /**
+ * only SCMIv3.0 specific bitfield below.
+ * Such bitfields are assumed to be zeroed on non
+ * relevant fw versions...assuming fw not buggy !
+ */
+ s->update = SUPPORTS_UPDATE_NOTIFY(attrl);
+ s->timestamped = SUPPORTS_TIMESTAMP(attrl);
+ if (s->timestamped)
+ s->tstamp_scale =
+ S32_EXT(SENSOR_TSTAMP_EXP(attrl));
+ s->extended_scalar_attrs =
+ SUPPORTS_EXTEND_ATTRS(attrl);
+
+ attrh = le32_to_cpu(sdesc->attributes_high);
+ /* common bitfields parsing */
+ s->scale = S32_EXT(SENSOR_SCALE(attrh));
+ s->type = SENSOR_TYPE(attrh);
+ /* Use pre-allocated pool wherever possible */
+ s->intervals.desc = s->intervals.prealloc_pool;
+ if (si->version == SCMIv2_SENSOR_PROTOCOL) {
+ s->intervals.segmented = false;
+ s->intervals.count = 1;
+ /*
+ * Convert SCMIv2.0 update interval format to
+ * SCMIv3.0 to be used as the common exposed
+ * descriptor, accessible via common macros.
+ */
+ s->intervals.desc[0] =
+ (SENSOR_UPDATE_BASE(attrh) << 5) |
+ SENSOR_UPDATE_SCALE(attrh);
+ } else {
+ /*
+ * From SCMIv3.0 update intervals are retrieved
+ * via a dedicated (optional) command.
+ * Since the command is optional, on error carry
+ * on without any update interval.
+ */
+ if (scmi_sensor_update_intervals(handle, s))
+ dev_dbg(handle->dev,
+ "Update Intervals not available for sensor ID:%d\n",
+ s->id);
+ }
+ /**
+ * only > SCMIv2.0 specific bitfield below.
+ * Such bitfields are assumed to be zeroed on non
+ * relevant fw versions...assuming fw not buggy !
+ */
+ s->num_axis = min_t(unsigned int,
+ SUPPORTS_AXIS(attrh) ?
+ SENSOR_AXIS_NUMBER(attrh) : 0,
+ SCMI_MAX_NUM_SENSOR_AXIS);
+ strlcpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE);
+
+ if (s->extended_scalar_attrs) {
+ s->sensor_power = le32_to_cpu(sdesc->power);
+ dsize += sizeof(sdesc->power);
+ /* Only for sensors reporting scalar values */
+ if (s->num_axis == 0) {
+ unsigned int sres =
+ le32_to_cpu(sdesc->resolution);
+
+ s->resolution = SENSOR_RES(sres);
+ s->exponent =
+ S32_EXT(SENSOR_RES_EXP(sres));
+ dsize += sizeof(sdesc->resolution);
+
+ scmi_parse_range_attrs(&s->scalar_attrs,
+ &sdesc->scalar_attrs);
+ dsize += sizeof(sdesc->scalar_attrs);
+ }
+ }
+ if (s->num_axis > 0) {
+ ret = scmi_sensor_axis_description(handle, s);
+ if (ret)
+ goto out;
+ }
+
+ sdesc = (typeof(sdesc))((u8 *)sdesc + dsize);
}
desc_index += num_returned;
@@ -174,19 +552,21 @@ static int scmi_sensor_description_get(const struct scmi_handle *handle,
*/
} while (num_returned && num_remaining);
+out:
scmi_xfer_put(handle, t);
return ret;
}
-static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
- u32 sensor_id, bool enable)
+static inline int
+scmi_sensor_request_notify(const struct scmi_handle *handle, u32 sensor_id,
+ u8 message_id, bool enable)
{
int ret;
- u32 evt_cntl = enable ? SENSOR_TP_NOTIFY_ALL : 0;
+ u32 evt_cntl = enable ? SENSOR_NOTIFY_ALL : 0;
struct scmi_xfer *t;
- struct scmi_msg_sensor_trip_point_notify *cfg;
+ struct scmi_msg_sensor_request_notify *cfg;
- ret = scmi_xfer_get_init(handle, SENSOR_TRIP_POINT_NOTIFY,
+ ret = scmi_xfer_get_init(handle, message_id,
SCMI_PROTOCOL_SENSOR, sizeof(*cfg), 0, &t);
if (ret)
return ret;
@@ -201,6 +581,23 @@ static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
return ret;
}
+static int scmi_sensor_trip_point_notify(const struct scmi_handle *handle,
+ u32 sensor_id, bool enable)
+{
+ return scmi_sensor_request_notify(handle, sensor_id,
+ SENSOR_TRIP_POINT_NOTIFY,
+ enable);
+}
+
+static int
+scmi_sensor_continuous_update_notify(const struct scmi_handle *handle,
+ u32 sensor_id, bool enable)
+{
+ return scmi_sensor_request_notify(handle, sensor_id,
+ SENSOR_CONTINUOUS_UPDATE_NOTIFY,
+ enable);
+}
+
static int
scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
u8 trip_id, u64 trip_value)
@@ -227,6 +624,75 @@ scmi_sensor_trip_point_config(const struct scmi_handle *handle, u32 sensor_id,
return ret;
}
+static int scmi_sensor_config_get(const struct scmi_handle *handle,
+ u32 sensor_id, u32 *sensor_config)
+{
+ int ret;
+ struct scmi_xfer *t;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_GET,
+ SCMI_PROTOCOL_SENSOR, sizeof(__le32),
+ sizeof(__le32), &t);
+ if (ret)
+ return ret;
+
+ put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf);
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ struct sensors_info *si = handle->sensor_priv;
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ *sensor_config = get_unaligned_le64(t->rx.buf);
+ s->sensor_config = *sensor_config;
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+static int scmi_sensor_config_set(const struct scmi_handle *handle,
+ u32 sensor_id, u32 sensor_config)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_sensor_config_set *msg;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_CONFIG_SET,
+ SCMI_PROTOCOL_SENSOR, sizeof(*msg), 0, &t);
+ if (ret)
+ return ret;
+
+ msg = t->tx.buf;
+ msg->id = cpu_to_le32(sensor_id);
+ msg->sensor_config = cpu_to_le32(sensor_config);
+
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ struct sensors_info *si = handle->sensor_priv;
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ s->sensor_config = sensor_config;
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
+/**
+ * scmi_sensor_reading_get - Read scalar sensor value
+ * @handle: Platform handle
+ * @sensor_id: Sensor ID
+ * @value: The 64bit value sensor reading
+ *
+ * This function returns a single 64 bit reading value representing the sensor
+ * value; if the platform SCMI Protocol implementation and the sensor support
+ * multiple axis and timestamped-reads, this just returns the first axis while
+ * dropping the timestamp value.
+ * Use instead the @scmi_sensor_reading_get_timestamped to retrieve the array of
+ * timestamped multi-axis values.
+ *
+ * Return: 0 on Success
+ */
static int scmi_sensor_reading_get(const struct scmi_handle *handle,
u32 sensor_id, u64 *value)
{
@@ -237,20 +703,24 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
struct scmi_sensor_info *s = si->sensors + sensor_id;
ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
- SCMI_PROTOCOL_SENSOR, sizeof(*sensor),
- sizeof(u64), &t);
+ SCMI_PROTOCOL_SENSOR, sizeof(*sensor), 0, &t);
if (ret)
return ret;
sensor = t->tx.buf;
sensor->id = cpu_to_le32(sensor_id);
-
if (s->async) {
sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
ret = scmi_do_xfer_with_response(handle, t);
- if (!ret)
- *value = get_unaligned_le64((void *)
- ((__le32 *)t->rx.buf + 1));
+ if (!ret) {
+ struct scmi_resp_sensor_reading_complete *resp;
+
+ resp = t->rx.buf;
+ if (le32_to_cpu(resp->id) == sensor_id)
+ *value = get_unaligned_le64(&resp->readings);
+ else
+ ret = -EPROTO;
+ }
} else {
sensor->flags = cpu_to_le32(0);
ret = scmi_do_xfer(handle, t);
@@ -262,6 +732,84 @@ static int scmi_sensor_reading_get(const struct scmi_handle *handle,
return ret;
}
+static inline void
+scmi_parse_sensor_readings(struct scmi_sensor_reading *out,
+ const struct scmi_sensor_reading_resp *in)
+{
+ out->value = get_unaligned_le64((void *)&in->sensor_value_low);
+ out->timestamp = get_unaligned_le64((void *)&in->timestamp_low);
+}
+
+/**
+ * scmi_sensor_reading_get_timestamped - Read multiple-axis timestamped values
+ * @handle: Platform handle
+ * @sensor_id: Sensor ID
+ * @count: The length of the provided @readings array
+ * @readings: An array of elements each representing a timestamped per-axis
+ * reading of type @struct scmi_sensor_reading.
+ * Returned readings are ordered as the @axis descriptors array
+ * included in @struct scmi_sensor_info and the max number of
+ * returned elements is min(@count, @num_axis); ideally the provided
+ * array should be of length @count equal to @num_axis.
+ *
+ * Return: 0 on Success
+ */
+static int
+scmi_sensor_reading_get_timestamped(const struct scmi_handle *handle,
+ u32 sensor_id, u8 count,
+ struct scmi_sensor_reading *readings)
+{
+ int ret;
+ struct scmi_xfer *t;
+ struct scmi_msg_sensor_reading_get *sensor;
+ struct sensors_info *si = handle->sensor_priv;
+ struct scmi_sensor_info *s = si->sensors + sensor_id;
+
+ if (!count || !readings ||
+ (!s->num_axis && count > 1) || (s->num_axis && count > s->num_axis))
+ return -EINVAL;
+
+ ret = scmi_xfer_get_init(handle, SENSOR_READING_GET,
+ SCMI_PROTOCOL_SENSOR, sizeof(*sensor), 0, &t);
+ if (ret)
+ return ret;
+
+ sensor = t->tx.buf;
+ sensor->id = cpu_to_le32(sensor_id);
+ if (s->async) {
+ sensor->flags = cpu_to_le32(SENSOR_READ_ASYNC);
+ ret = scmi_do_xfer_with_response(handle, t);
+ if (!ret) {
+ int i;
+ struct scmi_resp_sensor_reading_complete_v3 *resp;
+
+ resp = t->rx.buf;
+ /* Retrieve only the number of requested axis anyway */
+ if (le32_to_cpu(resp->id) == sensor_id)
+ for (i = 0; i < count; i++)
+ scmi_parse_sensor_readings(&readings[i],
+ &resp->readings[i]);
+ else
+ ret = -EPROTO;
+ }
+ } else {
+ sensor->flags = cpu_to_le32(0);
+ ret = scmi_do_xfer(handle, t);
+ if (!ret) {
+ int i;
+ struct scmi_sensor_reading_resp *resp_readings;
+
+ resp_readings = t->rx.buf;
+ for (i = 0; i < count; i++)
+ scmi_parse_sensor_readings(&readings[i],
+ &resp_readings[i]);
+ }
+ }
+
+ scmi_xfer_put(handle, t);
+ return ret;
+}
+
static const struct scmi_sensor_info *
scmi_sensor_info_get(const struct scmi_handle *handle, u32 sensor_id)
{
@@ -282,6 +830,9 @@ static const struct scmi_sensor_ops sensor_ops = {
.info_get = scmi_sensor_info_get,
.trip_point_config = scmi_sensor_trip_point_config,
.reading_get = scmi_sensor_reading_get,
+ .reading_get_timestamped = scmi_sensor_reading_get_timestamped,
+ .config_get = scmi_sensor_config_get,
+ .config_set = scmi_sensor_config_set,
};
static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
@@ -289,7 +840,19 @@ static int scmi_sensor_set_notify_enabled(const struct scmi_handle *handle,
{
int ret;
- ret = scmi_sensor_trip_point_notify(handle, src_id, enable);
+ switch (evt_id) {
+ case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT:
+ ret = scmi_sensor_trip_point_notify(handle, src_id, enable);
+ break;
+ case SCMI_EVENT_SENSOR_UPDATE:
+ ret = scmi_sensor_continuous_update_notify(handle, src_id,
+ enable);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
if (ret)
pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n",
evt_id, src_id, ret);
@@ -302,20 +865,59 @@ static void *scmi_sensor_fill_custom_report(const struct scmi_handle *handle,
const void *payld, size_t payld_sz,
void *report, u32 *src_id)
{
- const struct scmi_sensor_trip_notify_payld *p = payld;
- struct scmi_sensor_trip_point_report *r = report;
+ void *rep = NULL;
+
+ switch (evt_id) {
+ case SCMI_EVENT_SENSOR_TRIP_POINT_EVENT:
+ {
+ const struct scmi_sensor_trip_notify_payld *p = payld;
+ struct scmi_sensor_trip_point_report *r = report;
- if (evt_id != SCMI_EVENT_SENSOR_TRIP_POINT_EVENT ||
- sizeof(*p) != payld_sz)
- return NULL;
+ if (sizeof(*p) != payld_sz)
+ break;
- r->timestamp = timestamp;
- r->agent_id = le32_to_cpu(p->agent_id);
- r->sensor_id = le32_to_cpu(p->sensor_id);
- r->trip_point_desc = le32_to_cpu(p->trip_point_desc);
- *src_id = r->sensor_id;
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ r->sensor_id = le32_to_cpu(p->sensor_id);
+ r->trip_point_desc = le32_to_cpu(p->trip_point_desc);
+ *src_id = r->sensor_id;
+ rep = r;
+ break;
+ }
+ case SCMI_EVENT_SENSOR_UPDATE:
+ {
+ int i;
+ struct scmi_sensor_info *s;
+ const struct scmi_sensor_update_notify_payld *p = payld;
+ struct scmi_sensor_update_report *r = report;
+ struct sensors_info *sinfo = handle->sensor_priv;
+
+ /* payld_sz is variable for this event */
+ r->sensor_id = le32_to_cpu(p->sensor_id);
+ if (r->sensor_id >= sinfo->num_sensors)
+ break;
+ r->timestamp = timestamp;
+ r->agent_id = le32_to_cpu(p->agent_id);
+ s = &sinfo->sensors[r->sensor_id];
+ /*
+ * The generated report r (@struct scmi_sensor_update_report)
+ * was pre-allocated to contain up to SCMI_MAX_NUM_SENSOR_AXIS
+ * readings: here it is filled with the effective @num_axis
+ * readings defined for this sensor or 1 for scalar sensors.
+ */
+ r->readings_count = s->num_axis ?: 1;
+ for (i = 0; i < r->readings_count; i++)
+ scmi_parse_sensor_readings(&r->readings[i],
+ &p->readings[i]);
+ *src_id = r->sensor_id;
+ rep = r;
+ break;
+ }
+ default:
+ break;
+ }
- return r;
+ return rep;
}
static const struct scmi_event sensor_events[] = {
@@ -324,6 +926,16 @@ static const struct scmi_event sensor_events[] = {
.max_payld_sz = sizeof(struct scmi_sensor_trip_notify_payld),
.max_report_sz = sizeof(struct scmi_sensor_trip_point_report),
},
+ {
+ .id = SCMI_EVENT_SENSOR_UPDATE,
+ .max_payld_sz =
+ sizeof(struct scmi_sensor_update_notify_payld) +
+ SCMI_MAX_NUM_SENSOR_AXIS *
+ sizeof(struct scmi_sensor_reading_resp),
+ .max_report_sz = sizeof(struct scmi_sensor_update_report) +
+ SCMI_MAX_NUM_SENSOR_AXIS *
+ sizeof(struct scmi_sensor_reading),
+ },
};
static const struct scmi_event_ops sensor_event_ops = {
@@ -334,6 +946,7 @@ static const struct scmi_event_ops sensor_event_ops = {
static int scmi_sensors_protocol_init(struct scmi_handle *handle)
{
u32 version;
+ int ret;
struct sensors_info *sinfo;
scmi_version_get(handle, SCMI_PROTOCOL_SENSOR, &version);
@@ -344,15 +957,19 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle)
sinfo = devm_kzalloc(handle->dev, sizeof(*sinfo), GFP_KERNEL);
if (!sinfo)
return -ENOMEM;
+ sinfo->version = version;
- scmi_sensor_attributes_get(handle, sinfo);
-
+ ret = scmi_sensor_attributes_get(handle, sinfo);
+ if (ret)
+ return ret;
sinfo->sensors = devm_kcalloc(handle->dev, sinfo->num_sensors,
sizeof(*sinfo->sensors), GFP_KERNEL);
if (!sinfo->sensors)
return -ENOMEM;
- scmi_sensor_description_get(handle, sinfo);
+ ret = scmi_sensor_description_get(handle, sinfo);
+ if (ret)
+ return ret;
scmi_register_protocol_events(handle,
SCMI_PROTOCOL_SENSOR, SCMI_PROTO_QUEUE_SZ,
@@ -360,9 +977,8 @@ static int scmi_sensors_protocol_init(struct scmi_handle *handle)
ARRAY_SIZE(sensor_events),
sinfo->num_sensors);
- sinfo->version = version;
- handle->sensor_ops = &sensor_ops;
handle->sensor_priv = sinfo;
+ handle->sensor_ops = &sensor_ops;
return 0;
}
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index d9895491ff34..2c3dac5ecb36 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -122,7 +122,7 @@ config EFI_ARMSTUB_DTB_LOADER
config EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER
bool "Enable the command line initrd loader" if !X86
depends on EFI_STUB && (EFI_GENERIC_STUB || X86)
- default y
+ default y if X86
depends on !RISCV
help
Select this config option to add support for the initrd= command
@@ -147,7 +147,7 @@ config EFI_BOOTLOADER_CONTROL
config EFI_CAPSULE_LOADER
tristate "EFI capsule loader"
- depends on EFI
+ depends on EFI && !IA64
help
This option exposes a loader interface "/dev/efi_capsule_loader" for
users to load EFI capsules. This driver requires working runtime
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index d6ca2da19339..467e94259679 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -12,7 +12,10 @@ KASAN_SANITIZE_runtime-wrappers.o := n
obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o
obj-$(CONFIG_EFI) += efi.o vars.o reboot.o memattr.o tpm.o
-obj-$(CONFIG_EFI) += capsule.o memmap.o
+obj-$(CONFIG_EFI) += memmap.o
+ifneq ($(CONFIG_EFI_CAPSULE_LOADER),)
+obj-$(CONFIG_EFI) += capsule.o
+endif
obj-$(CONFIG_EFI_PARAMS_FROM_FDT) += fdtparams.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_ESRT) += esrt.o
diff --git a/drivers/firmware/efi/capsule.c b/drivers/firmware/efi/capsule.c
index 598b7800d14e..768430293669 100644
--- a/drivers/firmware/efi/capsule.c
+++ b/drivers/firmware/efi/capsule.c
@@ -12,6 +12,7 @@
#include <linux/highmem.h>
#include <linux/efi.h>
#include <linux/vmalloc.h>
+#include <asm/efi.h>
#include <asm/io.h>
typedef struct {
@@ -244,7 +245,7 @@ int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages)
for (i = 0; i < sg_count; i++) {
efi_capsule_block_desc_t *sglist;
- sglist = kmap(sg_pages[i]);
+ sglist = kmap_atomic(sg_pages[i]);
for (j = 0; j < SGLIST_PER_PAGE && count > 0; j++) {
u64 sz = min_t(u64, imagesize,
@@ -265,7 +266,18 @@ int efi_capsule_update(efi_capsule_header_t *capsule, phys_addr_t *pages)
else
sglist[j].data = page_to_phys(sg_pages[i + 1]);
- kunmap(sg_pages[i]);
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ /*
+ * At runtime, the firmware has no way to find out where the
+ * sglist elements are mapped, if they are mapped in the first
+ * place. Therefore, on architectures that can only perform
+ * cache maintenance by virtual address, the firmware is unable
+ * to perform this maintenance, and so it is up to the OS to do
+ * it instead.
+ */
+ efi_capsule_flush_cache_range(sglist, PAGE_SIZE);
+#endif
+ kunmap_atomic(sglist);
}
mutex_lock(&capsule_mutex);
diff --git a/drivers/firmware/efi/libstub/efi-stub.c b/drivers/firmware/efi/libstub/efi-stub.c
index 914a343c7785..ec2f3985bef3 100644
--- a/drivers/firmware/efi/libstub/efi-stub.c
+++ b/drivers/firmware/efi/libstub/efi-stub.c
@@ -273,7 +273,6 @@ efi_status_t __efiapi efi_pe_entry(efi_handle_t handle,
install_memreserve_table();
status = allocate_new_fdt_and_exit_boot(handle, &fdt_addr,
- efi_get_max_fdt_addr(image_addr),
initrd_addr, initrd_size,
cmdline_ptr, fdt_addr, fdt_size);
if (status != EFI_SUCCESS)
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index 2d7abcd99de9..b50a6c67d9bd 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -750,7 +750,6 @@ efi_status_t efi_exit_boot_services(void *handle,
efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
unsigned long *new_fdt_addr,
- unsigned long max_addr,
u64 initrd_addr, u64 initrd_size,
char *cmdline_ptr,
unsigned long fdt_addr,
@@ -848,4 +847,6 @@ asmlinkage void __noreturn efi_enter_kernel(unsigned long entrypoint,
void efi_handle_post_ebs_state(void);
+enum efi_secureboot_mode efi_get_secureboot(void);
+
#endif
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 368cd60000ee..365c3a43a198 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -238,7 +238,6 @@ static efi_status_t exit_boot_func(struct efi_boot_memmap *map,
efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
unsigned long *new_fdt_addr,
- unsigned long max_addr,
u64 initrd_addr, u64 initrd_size,
char *cmdline_ptr,
unsigned long fdt_addr,
@@ -275,7 +274,7 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle,
efi_info("Exiting boot services and installing virtual address map...\n");
map.map = &memory_map;
- status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, max_addr);
+ status = efi_allocate_pages(MAX_FDT_SIZE, new_fdt_addr, ULONG_MAX);
if (status != EFI_SUCCESS) {
efi_err("Unable to allocate memory for new device tree.\n");
goto fail;
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c
index 5efc524b14be..8a18930f3eb6 100644
--- a/drivers/firmware/efi/libstub/secureboot.c
+++ b/drivers/firmware/efi/libstub/secureboot.c
@@ -12,44 +12,34 @@
#include "efistub.h"
-/* BIOS variables */
-static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
-static const efi_char16_t efi_SecureBoot_name[] = L"SecureBoot";
-static const efi_char16_t efi_SetupMode_name[] = L"SetupMode";
-
/* SHIM variables */
static const efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID;
static const efi_char16_t shim_MokSBState_name[] = L"MokSBState";
+static efi_status_t get_var(efi_char16_t *name, efi_guid_t *vendor, u32 *attr,
+ unsigned long *data_size, void *data)
+{
+ return get_efi_var(name, vendor, attr, data_size, data);
+}
+
/*
* Determine whether we're in secure boot mode.
- *
- * Please keep the logic in sync with
- * arch/x86/xen/efi.c:xen_efi_get_secureboot().
*/
enum efi_secureboot_mode efi_get_secureboot(void)
{
u32 attr;
- u8 secboot, setupmode, moksbstate;
unsigned long size;
+ enum efi_secureboot_mode mode;
efi_status_t status;
+ u8 moksbstate;
- size = sizeof(secboot);
- status = get_efi_var(efi_SecureBoot_name, &efi_variable_guid,
- NULL, &size, &secboot);
- if (status == EFI_NOT_FOUND)
- return efi_secureboot_mode_disabled;
- if (status != EFI_SUCCESS)
- goto out_efi_err;
-
- size = sizeof(setupmode);
- status = get_efi_var(efi_SetupMode_name, &efi_variable_guid,
- NULL, &size, &setupmode);
- if (status != EFI_SUCCESS)
- goto out_efi_err;
-
- if (secboot == 0 || setupmode == 1)
- return efi_secureboot_mode_disabled;
+ mode = efi_get_secureboot_mode(get_var);
+ if (mode == efi_secureboot_mode_unknown) {
+ efi_err("Could not determine UEFI Secure Boot status.\n");
+ return efi_secureboot_mode_unknown;
+ }
+ if (mode != efi_secureboot_mode_enabled)
+ return mode;
/*
* See if a user has put the shim into insecure mode. If so, and if the
@@ -69,8 +59,4 @@ enum efi_secureboot_mode efi_get_secureboot(void)
secure_boot_enabled:
efi_info("UEFI Secure Boot is enabled.\n");
return efi_secureboot_mode_enabled;
-
-out_efi_err:
- efi_err("Could not determine UEFI Secure Boot status.\n");
- return efi_secureboot_mode_unknown;
}
diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c
index 3672539cb96e..f14c4ff5839f 100644
--- a/drivers/firmware/efi/libstub/x86-stub.c
+++ b/drivers/firmware/efi/libstub/x86-stub.c
@@ -715,8 +715,11 @@ unsigned long efi_main(efi_handle_t handle,
(IS_ENABLED(CONFIG_X86_32) && buffer_end > KERNEL_IMAGE_SIZE) ||
(IS_ENABLED(CONFIG_X86_64) && buffer_end > MAXMEM_X86_64_4LEVEL) ||
(image_offset == 0)) {
+ extern char _bss[];
+
status = efi_relocate_kernel(&bzimage_addr,
- hdr->init_size, hdr->init_size,
+ (unsigned long)_bss - bzimage_addr,
+ hdr->init_size,
hdr->pref_address,
hdr->kernel_alignment,
LOAD_PHYSICAL_ADDR);
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c
index ddf9eae396fe..47d67bb0a516 100644
--- a/drivers/firmware/efi/test/efi_test.c
+++ b/drivers/firmware/efi/test/efi_test.c
@@ -663,6 +663,19 @@ out:
return rv;
}
+static long efi_runtime_get_supported_mask(unsigned long arg)
+{
+ unsigned int __user *supported_mask;
+ int rv = 0;
+
+ supported_mask = (unsigned int *)arg;
+
+ if (put_user(efi.runtime_supported_mask, supported_mask))
+ rv = -EFAULT;
+
+ return rv;
+}
+
static long efi_test_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -699,6 +712,9 @@ static long efi_test_ioctl(struct file *file, unsigned int cmd,
case EFI_RUNTIME_RESET_SYSTEM:
return efi_runtime_reset_system(arg);
+
+ case EFI_RUNTIME_GET_SUPPORTED_MASK:
+ return efi_runtime_get_supported_mask(arg);
}
return -ENOTTY;
diff --git a/drivers/firmware/efi/test/efi_test.h b/drivers/firmware/efi/test/efi_test.h
index f2446aa1c2e3..117349e57993 100644
--- a/drivers/firmware/efi/test/efi_test.h
+++ b/drivers/firmware/efi/test/efi_test.h
@@ -118,4 +118,7 @@ struct efi_resetsystem {
#define EFI_RUNTIME_RESET_SYSTEM \
_IOW('p', 0x0B, struct efi_resetsystem)
+#define EFI_RUNTIME_GET_SUPPORTED_MASK \
+ _IOR('p', 0x0C, unsigned int)
+
#endif /* _DRIVERS_FIRMWARE_EFI_TEST_H_ */
diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c
index 4265e9dbed84..a6c06d7476c3 100644
--- a/drivers/firmware/imx/imx-dsp.c
+++ b/drivers/firmware/imx/imx-dsp.c
@@ -60,22 +60,40 @@ static void imx_dsp_handle_rx(struct mbox_client *c, void *msg)
}
}
-static int imx_dsp_probe(struct platform_device *pdev)
+struct mbox_chan *imx_dsp_request_channel(struct imx_dsp_ipc *dsp_ipc, int idx)
{
- struct device *dev = &pdev->dev;
- struct imx_dsp_ipc *dsp_ipc;
+ struct imx_dsp_chan *dsp_chan;
+
+ if (idx >= DSP_MU_CHAN_NUM)
+ return ERR_PTR(-EINVAL);
+
+ dsp_chan = &dsp_ipc->chans[idx];
+ dsp_chan->ch = mbox_request_channel_byname(&dsp_chan->cl, dsp_chan->name);
+ return dsp_chan->ch;
+}
+EXPORT_SYMBOL(imx_dsp_request_channel);
+
+void imx_dsp_free_channel(struct imx_dsp_ipc *dsp_ipc, int idx)
+{
+ struct imx_dsp_chan *dsp_chan;
+
+ if (idx >= DSP_MU_CHAN_NUM)
+ return;
+
+ dsp_chan = &dsp_ipc->chans[idx];
+ mbox_free_channel(dsp_chan->ch);
+}
+EXPORT_SYMBOL(imx_dsp_free_channel);
+
+static int imx_dsp_setup_channels(struct imx_dsp_ipc *dsp_ipc)
+{
+ struct device *dev = dsp_ipc->dev;
struct imx_dsp_chan *dsp_chan;
struct mbox_client *cl;
char *chan_name;
int ret;
int i, j;
- device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
-
- dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
- if (!dsp_ipc)
- return -ENOMEM;
-
for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
if (i < 2)
chan_name = kasprintf(GFP_KERNEL, "txdb%d", i);
@@ -86,6 +104,7 @@ static int imx_dsp_probe(struct platform_device *pdev)
return -ENOMEM;
dsp_chan = &dsp_ipc->chans[i];
+ dsp_chan->name = chan_name;
cl = &dsp_chan->cl;
cl->dev = dev;
cl->tx_block = false;
@@ -104,27 +123,43 @@ static int imx_dsp_probe(struct platform_device *pdev)
}
dev_dbg(dev, "request mbox chan %s\n", chan_name);
- /* chan_name is not used anymore by framework */
- kfree(chan_name);
}
- dsp_ipc->dev = dev;
-
- dev_set_drvdata(dev, dsp_ipc);
-
- dev_info(dev, "NXP i.MX DSP IPC initialized\n");
-
return 0;
out:
- kfree(chan_name);
for (j = 0; j < i; j++) {
dsp_chan = &dsp_ipc->chans[j];
mbox_free_channel(dsp_chan->ch);
+ kfree(dsp_chan->name);
}
return ret;
}
+static int imx_dsp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct imx_dsp_ipc *dsp_ipc;
+ int ret;
+
+ device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent);
+
+ dsp_ipc = devm_kzalloc(dev, sizeof(*dsp_ipc), GFP_KERNEL);
+ if (!dsp_ipc)
+ return -ENOMEM;
+
+ dsp_ipc->dev = dev;
+ dev_set_drvdata(dev, dsp_ipc);
+
+ ret = imx_dsp_setup_channels(dsp_ipc);
+ if (ret < 0)
+ return ret;
+
+ dev_info(dev, "NXP i.MX DSP IPC initialized\n");
+
+ return 0;
+}
+
static int imx_dsp_remove(struct platform_device *pdev)
{
struct imx_dsp_chan *dsp_chan;
@@ -136,6 +171,7 @@ static int imx_dsp_remove(struct platform_device *pdev)
for (i = 0; i < DSP_MU_CHAN_NUM; i++) {
dsp_chan = &dsp_ipc->chans[i];
mbox_free_channel(dsp_chan->ch);
+ kfree(dsp_chan->name);
}
return 0;
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index 946eea292b52..08533ee67626 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -160,12 +160,18 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "mipi0-pwm0", IMX_SC_R_MIPI_0_PWM_0, 1, false, 0 },
{ "mipi0-i2c", IMX_SC_R_MIPI_0_I2C_0, 2, true, 0 },
+ { "mipi1", IMX_SC_R_MIPI_1, 1, false, 0 },
+ { "mipi1-pwm0", IMX_SC_R_MIPI_1_PWM_0, 1, false, 0 },
+ { "mipi1-i2c", IMX_SC_R_MIPI_1_I2C_0, 2, true, 0 },
+
/* LVDS SS */
{ "lvds0", IMX_SC_R_LVDS_0, 1, false, 0 },
+ { "lvds1", IMX_SC_R_LVDS_1, 1, false, 0 },
/* DC SS */
{ "dc0", IMX_SC_R_DC_0, 1, false, 0 },
{ "dc0-pll", IMX_SC_R_DC_0_PLL_0, 2, true, 0 },
+ { "dc0-video", IMX_SC_R_DC_0_VIDEO0, 2, true, 0 },
/* CM40 SS */
{ "cm40-i2c", IMX_SC_R_M4_0_I2C, 1, false, 0 },
@@ -180,6 +186,12 @@ static const struct imx_sc_pd_range imx8qxp_scu_pd_ranges[] = {
{ "cm41-pid", IMX_SC_R_M4_1_PID0, 5, true, 0},
{ "cm41-mu-a1", IMX_SC_R_M4_1_MU_1A, 1, false, 0},
{ "cm41-lpuart", IMX_SC_R_M4_1_UART, 1, false, 0},
+
+ /* IMAGE SS */
+ { "img-jpegdec-mp", IMX_SC_R_MJPEG_DEC_MP, 1, false, 0 },
+ { "img-jpegdec-s0", IMX_SC_R_MJPEG_DEC_S0, 4, true, 0 },
+ { "img-jpegenc-mp", IMX_SC_R_MJPEG_ENC_MP, 1, false, 0 },
+ { "img-jpegenc-s0", IMX_SC_R_MJPEG_ENC_S0, 4, true, 0 },
};
static const struct imx_sc_pd_soc imx8qxp_scu_pd = {
diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig
index 2671dcd0ad92..f2fdd3756648 100644
--- a/drivers/firmware/meson/Kconfig
+++ b/drivers/firmware/meson/Kconfig
@@ -3,8 +3,9 @@
# Amlogic Secure Monitor driver
#
config MESON_SM
- bool
- default ARCH_MESON
+ tristate "Amlogic Secure Monitor driver"
+ depends on ARCH_MESON || COMPILE_TEST
+ default y
depends on ARM64_4K_PAGES
help
Say y here to enable the Amlogic secure monitor driver
diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c
index 2854b56f6e0b..77aa5c6398aa 100644
--- a/drivers/firmware/meson/meson_sm.c
+++ b/drivers/firmware/meson/meson_sm.c
@@ -331,3 +331,4 @@ static struct platform_driver meson_sm_driver = {
},
};
module_platform_driver_probe(meson_sm_driver, meson_sm_probe);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 00af99b6f97c..f5fc429cae3f 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -58,15 +58,12 @@ typedef unsigned long (psci_fn)(unsigned long, unsigned long,
unsigned long, unsigned long);
static psci_fn *invoke_psci_fn;
-enum psci_function {
- PSCI_FN_CPU_SUSPEND,
- PSCI_FN_CPU_ON,
- PSCI_FN_CPU_OFF,
- PSCI_FN_MIGRATE,
- PSCI_FN_MAX,
-};
+static struct psci_0_1_function_ids psci_0_1_function_ids;
-static u32 psci_function_id[PSCI_FN_MAX];
+struct psci_0_1_function_ids get_psci_0_1_function_ids(void)
+{
+ return psci_0_1_function_ids;
+}
#define PSCI_0_2_POWER_STATE_MASK \
(PSCI_0_2_POWER_STATE_ID_MASK | \
@@ -146,7 +143,12 @@ static int psci_to_linux_errno(int errno)
return -EINVAL;
}
-static u32 psci_get_version(void)
+static u32 psci_0_1_get_version(void)
+{
+ return PSCI_VERSION(0, 1);
+}
+
+static u32 psci_0_2_get_version(void)
{
return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
}
@@ -163,46 +165,80 @@ int psci_set_osi_mode(bool enable)
return psci_to_linux_errno(err);
}
-static int psci_cpu_suspend(u32 state, unsigned long entry_point)
+static int __psci_cpu_suspend(u32 fn, u32 state, unsigned long entry_point)
{
int err;
- u32 fn;
- fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
err = invoke_psci_fn(fn, state, entry_point, 0);
return psci_to_linux_errno(err);
}
-static int psci_cpu_off(u32 state)
+static int psci_0_1_cpu_suspend(u32 state, unsigned long entry_point)
+{
+ return __psci_cpu_suspend(psci_0_1_function_ids.cpu_suspend,
+ state, entry_point);
+}
+
+static int psci_0_2_cpu_suspend(u32 state, unsigned long entry_point)
+{
+ return __psci_cpu_suspend(PSCI_FN_NATIVE(0_2, CPU_SUSPEND),
+ state, entry_point);
+}
+
+static int __psci_cpu_off(u32 fn, u32 state)
{
int err;
- u32 fn;
- fn = psci_function_id[PSCI_FN_CPU_OFF];
err = invoke_psci_fn(fn, state, 0, 0);
return psci_to_linux_errno(err);
}
-static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
+static int psci_0_1_cpu_off(u32 state)
+{
+ return __psci_cpu_off(psci_0_1_function_ids.cpu_off, state);
+}
+
+static int psci_0_2_cpu_off(u32 state)
+{
+ return __psci_cpu_off(PSCI_0_2_FN_CPU_OFF, state);
+}
+
+static int __psci_cpu_on(u32 fn, unsigned long cpuid, unsigned long entry_point)
{
int err;
- u32 fn;
- fn = psci_function_id[PSCI_FN_CPU_ON];
err = invoke_psci_fn(fn, cpuid, entry_point, 0);
return psci_to_linux_errno(err);
}
-static int psci_migrate(unsigned long cpuid)
+static int psci_0_1_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+ return __psci_cpu_on(psci_0_1_function_ids.cpu_on, cpuid, entry_point);
+}
+
+static int psci_0_2_cpu_on(unsigned long cpuid, unsigned long entry_point)
+{
+ return __psci_cpu_on(PSCI_FN_NATIVE(0_2, CPU_ON), cpuid, entry_point);
+}
+
+static int __psci_migrate(u32 fn, unsigned long cpuid)
{
int err;
- u32 fn;
- fn = psci_function_id[PSCI_FN_MIGRATE];
err = invoke_psci_fn(fn, cpuid, 0, 0);
return psci_to_linux_errno(err);
}
+static int psci_0_1_migrate(unsigned long cpuid)
+{
+ return __psci_migrate(psci_0_1_function_ids.migrate, cpuid);
+}
+
+static int psci_0_2_migrate(unsigned long cpuid)
+{
+ return __psci_migrate(PSCI_FN_NATIVE(0_2, MIGRATE), cpuid);
+}
+
static int psci_affinity_info(unsigned long target_affinity,
unsigned long lowest_affinity_level)
{
@@ -347,7 +383,7 @@ static void __init psci_init_system_suspend(void)
static void __init psci_init_cpu_suspend(void)
{
- int feature = psci_features(psci_function_id[PSCI_FN_CPU_SUSPEND]);
+ int feature = psci_features(PSCI_FN_NATIVE(0_2, CPU_SUSPEND));
if (feature != PSCI_RET_NOT_SUPPORTED)
psci_cpu_suspend_feature = feature;
@@ -421,24 +457,16 @@ static void __init psci_init_smccc(void)
static void __init psci_0_2_set_functions(void)
{
pr_info("Using standard PSCI v0.2 function IDs\n");
- psci_ops.get_version = psci_get_version;
-
- psci_function_id[PSCI_FN_CPU_SUSPEND] =
- PSCI_FN_NATIVE(0_2, CPU_SUSPEND);
- psci_ops.cpu_suspend = psci_cpu_suspend;
-
- psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
- psci_ops.cpu_off = psci_cpu_off;
-
- psci_function_id[PSCI_FN_CPU_ON] = PSCI_FN_NATIVE(0_2, CPU_ON);
- psci_ops.cpu_on = psci_cpu_on;
- psci_function_id[PSCI_FN_MIGRATE] = PSCI_FN_NATIVE(0_2, MIGRATE);
- psci_ops.migrate = psci_migrate;
-
- psci_ops.affinity_info = psci_affinity_info;
-
- psci_ops.migrate_info_type = psci_migrate_info_type;
+ psci_ops = (struct psci_operations){
+ .get_version = psci_0_2_get_version,
+ .cpu_suspend = psci_0_2_cpu_suspend,
+ .cpu_off = psci_0_2_cpu_off,
+ .cpu_on = psci_0_2_cpu_on,
+ .migrate = psci_0_2_migrate,
+ .affinity_info = psci_affinity_info,
+ .migrate_info_type = psci_migrate_info_type,
+ };
arm_pm_restart = psci_sys_reset;
@@ -450,7 +478,7 @@ static void __init psci_0_2_set_functions(void)
*/
static int __init psci_probe(void)
{
- u32 ver = psci_get_version();
+ u32 ver = psci_0_2_get_version();
pr_info("PSCIv%d.%d detected in firmware.\n",
PSCI_VERSION_MAJOR(ver),
@@ -514,24 +542,26 @@ static int __init psci_0_1_init(struct device_node *np)
pr_info("Using PSCI v0.1 Function IDs from DT\n");
+ psci_ops.get_version = psci_0_1_get_version;
+
if (!of_property_read_u32(np, "cpu_suspend", &id)) {
- psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
- psci_ops.cpu_suspend = psci_cpu_suspend;
+ psci_0_1_function_ids.cpu_suspend = id;
+ psci_ops.cpu_suspend = psci_0_1_cpu_suspend;
}
if (!of_property_read_u32(np, "cpu_off", &id)) {
- psci_function_id[PSCI_FN_CPU_OFF] = id;
- psci_ops.cpu_off = psci_cpu_off;
+ psci_0_1_function_ids.cpu_off = id;
+ psci_ops.cpu_off = psci_0_1_cpu_off;
}
if (!of_property_read_u32(np, "cpu_on", &id)) {
- psci_function_id[PSCI_FN_CPU_ON] = id;
- psci_ops.cpu_on = psci_cpu_on;
+ psci_0_1_function_ids.cpu_on = id;
+ psci_ops.cpu_on = psci_0_1_cpu_on;
}
if (!of_property_read_u32(np, "migrate", &id)) {
- psci_function_id[PSCI_FN_MIGRATE] = id;
- psci_ops.migrate = psci_migrate;
+ psci_0_1_function_ids.migrate = id;
+ psci_ops.migrate = psci_0_1_migrate;
}
return 0;
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index c1bbba9ee93a..440d99c63638 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -412,16 +412,12 @@ static int bpmp_populate_debugfs_inband(struct tegra_bpmp *bpmp,
goto out;
}
- len = strlen(ppath) + strlen(name) + 1;
+ len = snprintf(pathbuf, pathlen, "%s%s/", ppath, name);
if (len >= pathlen) {
err = -EINVAL;
goto out;
}
- strncpy(pathbuf, ppath, pathlen);
- strncat(pathbuf, name, strlen(name));
- strcat(pathbuf, "/");
-
err = bpmp_populate_debugfs_inband(bpmp, dentry,
pathbuf);
if (err < 0)
diff --git a/drivers/firmware/ti_sci.c b/drivers/firmware/ti_sci.c
index 896f53ec7857..235c7e7869aa 100644
--- a/drivers/firmware/ti_sci.c
+++ b/drivers/firmware/ti_sci.c
@@ -1703,14 +1703,14 @@ fail:
* @subtype: Resource assignment subtype that is being requested
* from the given device.
* @s_host: Host processor ID to which the resources are allocated
- * @range_start: Start index of the resource range
- * @range_num: Number of resources in the range
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype, u8 s_host,
- u16 *range_start, u16 *range_num)
+ struct ti_sci_resource_desc *desc)
{
struct ti_sci_msg_resp_get_resource_range *resp;
struct ti_sci_msg_req_get_resource_range *req;
@@ -1721,7 +1721,7 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
if (IS_ERR(handle))
return PTR_ERR(handle);
- if (!handle)
+ if (!handle || !desc)
return -EINVAL;
info = handle_to_ti_sci_info(handle);
@@ -1751,11 +1751,14 @@ static int ti_sci_get_resource_range(const struct ti_sci_handle *handle,
if (!ti_sci_is_response_ack(resp)) {
ret = -ENODEV;
- } else if (!resp->range_start && !resp->range_num) {
+ } else if (!resp->range_num && !resp->range_num_sec) {
+ /* Neither of the two resource range is valid */
ret = -ENODEV;
} else {
- *range_start = resp->range_start;
- *range_num = resp->range_num;
+ desc->start = resp->range_start;
+ desc->num = resp->range_num;
+ desc->start_sec = resp->range_start_sec;
+ desc->num_sec = resp->range_num_sec;
};
fail:
@@ -1771,18 +1774,18 @@ fail:
* @dev_id: TISCI device ID.
* @subtype: Resource assignment subtype that is being requested
* from the given device.
- * @range_start: Start index of the resource range
- * @range_num: Number of resources in the range
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype,
- u16 *range_start, u16 *range_num)
+ struct ti_sci_resource_desc *desc)
{
return ti_sci_get_resource_range(handle, dev_id, subtype,
TI_SCI_IRQ_SECONDARY_HOST_INVALID,
- range_start, range_num);
+ desc);
}
/**
@@ -1793,18 +1796,17 @@ static int ti_sci_cmd_get_resource_range(const struct ti_sci_handle *handle,
* @subtype: Resource assignment subtype that is being requested
* from the given device.
* @s_host: Host processor ID to which the resources are allocated
- * @range_start: Start index of the resource range
- * @range_num: Number of resources in the range
+ * @desc: Pointer to ti_sci_resource_desc to be updated with the
+ * resource range start index and number of resources
*
* Return: 0 if all went fine, else return appropriate error.
*/
static
int ti_sci_cmd_get_resource_range_from_shost(const struct ti_sci_handle *handle,
u32 dev_id, u8 subtype, u8 s_host,
- u16 *range_start, u16 *range_num)
+ struct ti_sci_resource_desc *desc)
{
- return ti_sci_get_resource_range(handle, dev_id, subtype, s_host,
- range_start, range_num);
+ return ti_sci_get_resource_range(handle, dev_id, subtype, s_host, desc);
}
/**
@@ -2047,28 +2049,17 @@ static int ti_sci_cmd_free_event_map(const struct ti_sci_handle *handle,
}
/**
- * ti_sci_cmd_ring_config() - configure RA ring
- * @handle: Pointer to TI SCI handle.
- * @valid_params: Bitfield defining validity of ring configuration
- * parameters
- * @nav_id: Device ID of Navigator Subsystem from which the ring is
- * allocated
- * @index: Ring index
- * @addr_lo: The ring base address lo 32 bits
- * @addr_hi: The ring base address hi 32 bits
- * @count: Number of ring elements
- * @mode: The mode of the ring
- * @size: The ring element size.
- * @order_id: Specifies the ring's bus order ID
+ * ti_sci_cmd_rm_ring_cfg() - Configure a NAVSS ring
+ * @handle: Pointer to TI SCI handle.
+ * @params: Pointer to ti_sci_msg_rm_ring_cfg ring config structure
*
* Return: 0 if all went well, else returns appropriate error value.
*
- * See @ti_sci_msg_rm_ring_cfg_req for more info.
+ * See @ti_sci_msg_rm_ring_cfg and @ti_sci_msg_rm_ring_cfg_req for
+ * more info.
*/
-static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
- u32 valid_params, u16 nav_id, u16 index,
- u32 addr_lo, u32 addr_hi, u32 count,
- u8 mode, u8 size, u8 order_id)
+static int ti_sci_cmd_rm_ring_cfg(const struct ti_sci_handle *handle,
+ const struct ti_sci_msg_rm_ring_cfg *params)
{
struct ti_sci_msg_rm_ring_cfg_req *req;
struct ti_sci_msg_hdr *resp;
@@ -2092,15 +2083,17 @@ static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
return ret;
}
req = (struct ti_sci_msg_rm_ring_cfg_req *)xfer->xfer_buf;
- req->valid_params = valid_params;
- req->nav_id = nav_id;
- req->index = index;
- req->addr_lo = addr_lo;
- req->addr_hi = addr_hi;
- req->count = count;
- req->mode = mode;
- req->size = size;
- req->order_id = order_id;
+ req->valid_params = params->valid_params;
+ req->nav_id = params->nav_id;
+ req->index = params->index;
+ req->addr_lo = params->addr_lo;
+ req->addr_hi = params->addr_hi;
+ req->count = params->count;
+ req->mode = params->mode;
+ req->size = params->size;
+ req->order_id = params->order_id;
+ req->virtid = params->virtid;
+ req->asel = params->asel;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
@@ -2109,90 +2102,11 @@ static int ti_sci_cmd_ring_config(const struct ti_sci_handle *handle,
}
resp = (struct ti_sci_msg_hdr *)xfer->xfer_buf;
- ret = ti_sci_is_response_ack(resp) ? 0 : -ENODEV;
-
-fail:
- ti_sci_put_one_xfer(&info->minfo, xfer);
- dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", index, ret);
- return ret;
-}
-
-/**
- * ti_sci_cmd_ring_get_config() - get RA ring configuration
- * @handle: Pointer to TI SCI handle.
- * @nav_id: Device ID of Navigator Subsystem from which the ring is
- * allocated
- * @index: Ring index
- * @addr_lo: Returns ring's base address lo 32 bits
- * @addr_hi: Returns ring's base address hi 32 bits
- * @count: Returns number of ring elements
- * @mode: Returns mode of the ring
- * @size: Returns ring element size
- * @order_id: Returns ring's bus order ID
- *
- * Return: 0 if all went well, else returns appropriate error value.
- *
- * See @ti_sci_msg_rm_ring_get_cfg_req for more info.
- */
-static int ti_sci_cmd_ring_get_config(const struct ti_sci_handle *handle,
- u32 nav_id, u32 index, u8 *mode,
- u32 *addr_lo, u32 *addr_hi,
- u32 *count, u8 *size, u8 *order_id)
-{
- struct ti_sci_msg_rm_ring_get_cfg_resp *resp;
- struct ti_sci_msg_rm_ring_get_cfg_req *req;
- struct ti_sci_xfer *xfer;
- struct ti_sci_info *info;
- struct device *dev;
- int ret = 0;
-
- if (IS_ERR_OR_NULL(handle))
- return -EINVAL;
-
- info = handle_to_ti_sci_info(handle);
- dev = info->dev;
-
- xfer = ti_sci_get_one_xfer(info, TI_SCI_MSG_RM_RING_GET_CFG,
- TI_SCI_FLAG_REQ_ACK_ON_PROCESSED,
- sizeof(*req), sizeof(*resp));
- if (IS_ERR(xfer)) {
- ret = PTR_ERR(xfer);
- dev_err(dev,
- "RM_RA:Message get config failed(%d)\n", ret);
- return ret;
- }
- req = (struct ti_sci_msg_rm_ring_get_cfg_req *)xfer->xfer_buf;
- req->nav_id = nav_id;
- req->index = index;
-
- ret = ti_sci_do_xfer(info, xfer);
- if (ret) {
- dev_err(dev, "RM_RA:Mbox get config send fail %d\n", ret);
- goto fail;
- }
-
- resp = (struct ti_sci_msg_rm_ring_get_cfg_resp *)xfer->xfer_buf;
-
- if (!ti_sci_is_response_ack(resp)) {
- ret = -ENODEV;
- } else {
- if (mode)
- *mode = resp->mode;
- if (addr_lo)
- *addr_lo = resp->addr_lo;
- if (addr_hi)
- *addr_hi = resp->addr_hi;
- if (count)
- *count = resp->count;
- if (size)
- *size = resp->size;
- if (order_id)
- *order_id = resp->order_id;
- };
+ ret = ti_sci_is_response_ack(resp) ? 0 : -EINVAL;
fail:
ti_sci_put_one_xfer(&info->minfo, xfer);
- dev_dbg(dev, "RM_RA:get config ring %u ret:%d\n", index, ret);
+ dev_dbg(dev, "RM_RA:config ring %u ret:%d\n", params->index, ret);
return ret;
}
@@ -2362,6 +2276,8 @@ static int ti_sci_cmd_rm_udmap_tx_ch_cfg(const struct ti_sci_handle *handle,
req->fdepth = params->fdepth;
req->tx_sched_priority = params->tx_sched_priority;
req->tx_burst_size = params->tx_burst_size;
+ req->tx_tdtype = params->tx_tdtype;
+ req->extended_ch_type = params->extended_ch_type;
ret = ti_sci_do_xfer(info, xfer);
if (ret) {
@@ -2921,8 +2837,7 @@ static void ti_sci_setup_ops(struct ti_sci_info *info)
iops->free_irq = ti_sci_cmd_free_irq;
iops->free_event_map = ti_sci_cmd_free_event_map;
- rops->config = ti_sci_cmd_ring_config;
- rops->get_config = ti_sci_cmd_ring_get_config;
+ rops->set_cfg = ti_sci_cmd_rm_ring_cfg;
psilops->pair = ti_sci_cmd_rm_psil_pair;
psilops->unpair = ti_sci_cmd_rm_psil_unpair;
@@ -3157,12 +3072,18 @@ u16 ti_sci_get_free_resource(struct ti_sci_resource *res)
raw_spin_lock_irqsave(&res->lock, flags);
for (set = 0; set < res->sets; set++) {
- free_bit = find_first_zero_bit(res->desc[set].res_map,
- res->desc[set].num);
- if (free_bit != res->desc[set].num) {
- set_bit(free_bit, res->desc[set].res_map);
+ struct ti_sci_resource_desc *desc = &res->desc[set];
+ int res_count = desc->num + desc->num_sec;
+
+ free_bit = find_first_zero_bit(desc->res_map, res_count);
+ if (free_bit != res_count) {
+ set_bit(free_bit, desc->res_map);
raw_spin_unlock_irqrestore(&res->lock, flags);
- return res->desc[set].start + free_bit;
+
+ if (desc->num && free_bit < desc->num)
+ return desc->start + free_bit;
+ else
+ return desc->start_sec + free_bit;
}
}
raw_spin_unlock_irqrestore(&res->lock, flags);
@@ -3183,10 +3104,14 @@ void ti_sci_release_resource(struct ti_sci_resource *res, u16 id)
raw_spin_lock_irqsave(&res->lock, flags);
for (set = 0; set < res->sets; set++) {
- if (res->desc[set].start <= id &&
- (res->desc[set].num + res->desc[set].start) > id)
- clear_bit(id - res->desc[set].start,
- res->desc[set].res_map);
+ struct ti_sci_resource_desc *desc = &res->desc[set];
+
+ if (desc->num && desc->start <= id &&
+ (desc->start + desc->num) > id)
+ clear_bit(id - desc->start, desc->res_map);
+ else if (desc->num_sec && desc->start_sec <= id &&
+ (desc->start_sec + desc->num_sec) > id)
+ clear_bit(id - desc->start_sec, desc->res_map);
}
raw_spin_unlock_irqrestore(&res->lock, flags);
}
@@ -3203,7 +3128,7 @@ u32 ti_sci_get_num_resources(struct ti_sci_resource *res)
u32 set, count = 0;
for (set = 0; set < res->sets; set++)
- count += res->desc[set].num;
+ count += res->desc[set].num + res->desc[set].num_sec;
return count;
}
@@ -3227,7 +3152,7 @@ devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
{
struct ti_sci_resource *res;
bool valid_set = false;
- int i, ret;
+ int i, ret, res_count;
res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL);
if (!res)
@@ -3242,23 +3167,23 @@ devm_ti_sci_get_resource_sets(const struct ti_sci_handle *handle,
for (i = 0; i < res->sets; i++) {
ret = handle->ops.rm_core_ops.get_range(handle, dev_id,
sub_types[i],
- &res->desc[i].start,
- &res->desc[i].num);
+ &res->desc[i]);
if (ret) {
dev_dbg(dev, "dev = %d subtype %d not allocated for this host\n",
dev_id, sub_types[i]);
- res->desc[i].start = 0;
- res->desc[i].num = 0;
+ memset(&res->desc[i], 0, sizeof(res->desc[i]));
continue;
}
- dev_dbg(dev, "dev = %d, subtype = %d, start = %d, num = %d\n",
+ dev_dbg(dev, "dev/sub_type: %d/%d, start/num: %d/%d | %d/%d\n",
dev_id, sub_types[i], res->desc[i].start,
- res->desc[i].num);
+ res->desc[i].num, res->desc[i].start_sec,
+ res->desc[i].num_sec);
valid_set = true;
+ res_count = res->desc[i].num + res->desc[i].num_sec;
res->desc[i].res_map =
- devm_kzalloc(dev, BITS_TO_LONGS(res->desc[i].num) *
+ devm_kzalloc(dev, BITS_TO_LONGS(res_count) *
sizeof(*res->desc[i].res_map), GFP_KERNEL);
if (!res->desc[i].res_map)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/firmware/ti_sci.h b/drivers/firmware/ti_sci.h
index 57cd04062994..ef3a8214d002 100644
--- a/drivers/firmware/ti_sci.h
+++ b/drivers/firmware/ti_sci.h
@@ -49,7 +49,6 @@
#define TI_SCI_MSG_RM_RING_RECONFIG 0x1102
#define TI_SCI_MSG_RM_RING_RESET 0x1103
#define TI_SCI_MSG_RM_RING_CFG 0x1110
-#define TI_SCI_MSG_RM_RING_GET_CFG 0x1111
/* PSI-L requests */
#define TI_SCI_MSG_RM_PSIL_PAIR 0x1280
@@ -574,8 +573,10 @@ struct ti_sci_msg_req_get_resource_range {
/**
* struct ti_sci_msg_resp_get_resource_range - Response to resource get range.
* @hdr: Generic Header
- * @range_start: Start index of the resource range.
- * @range_num: Number of resources in the range.
+ * @range_start: Start index of the first resource range.
+ * @range_num: Number of resources in the first range.
+ * @range_start_sec: Start index of the second resource range.
+ * @range_num_sec: Number of resources in the second range.
*
* Response to request TI_SCI_MSG_GET_RESOURCE_RANGE.
*/
@@ -583,6 +584,8 @@ struct ti_sci_msg_resp_get_resource_range {
struct ti_sci_msg_hdr hdr;
u16 range_start;
u16 range_num;
+ u16 range_start_sec;
+ u16 range_num_sec;
} __packed;
/**
@@ -656,6 +659,8 @@ struct ti_sci_msg_req_manage_irq {
* 3 - Valid bit for @tisci_msg_rm_ring_cfg_req mode
* 4 - Valid bit for @tisci_msg_rm_ring_cfg_req size
* 5 - Valid bit for @tisci_msg_rm_ring_cfg_req order_id
+ * 6 - Valid bit for @tisci_msg_rm_ring_cfg_req virtid
+ * 7 - Valid bit for @tisci_msg_rm_ring_cfg_req ASEL
* @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
* @index: ring index to be configured.
* @addr_lo: 32 LSBs of ring base address to be programmed into the ring's
@@ -669,6 +674,9 @@ struct ti_sci_msg_req_manage_irq {
* the formula (log2(size_bytes) - 2), where size_bytes cannot be
* greater than 256.
* @order_id: Specifies the ring's bus order ID.
+ * @virtid: Ring virt ID value
+ * @asel: Ring ASEL (address select) value to be set into the ASEL field of the
+ * ring's RING_BA_HI register.
*/
struct ti_sci_msg_rm_ring_cfg_req {
struct ti_sci_msg_hdr hdr;
@@ -681,49 +689,8 @@ struct ti_sci_msg_rm_ring_cfg_req {
u8 mode;
u8 size;
u8 order_id;
-} __packed;
-
-/**
- * struct ti_sci_msg_rm_ring_get_cfg_req - Get RA ring's configuration
- *
- * Gets the configuration of the non-real-time register fields of a ring. The
- * host, or a supervisor of the host, who owns the ring must be the requesting
- * host. The values of the non-real-time registers are returned in
- * @ti_sci_msg_rm_ring_get_cfg_resp.
- *
- * @hdr: Generic Header
- * @nav_id: Device ID of Navigator Subsystem from which the ring is allocated
- * @index: ring index.
- */
-struct ti_sci_msg_rm_ring_get_cfg_req {
- struct ti_sci_msg_hdr hdr;
- u16 nav_id;
- u16 index;
-} __packed;
-
-/**
- * struct ti_sci_msg_rm_ring_get_cfg_resp - Ring get configuration response
- *
- * Response received by host processor after RM has handled
- * @ti_sci_msg_rm_ring_get_cfg_req. The response contains the ring's
- * non-real-time register values.
- *
- * @hdr: Generic Header
- * @addr_lo: Ring 32 LSBs of base address
- * @addr_hi: Ring 16 MSBs of base address.
- * @count: Ring number of elements.
- * @mode: Ring mode.
- * @size: encoded Ring element size
- * @order_id: ing order ID.
- */
-struct ti_sci_msg_rm_ring_get_cfg_resp {
- struct ti_sci_msg_hdr hdr;
- u32 addr_lo;
- u32 addr_hi;
- u32 count;
- u8 mode;
- u8 size;
- u8 order_id;
+ u16 virtid;
+ u8 asel;
} __packed;
/**
@@ -910,6 +877,8 @@ struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg {
* 12 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_credit_count
* 13 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::fdepth
* 14 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_burst_size
+ * 15 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::tx_tdtype
+ * 16 - Valid bit for @ref ti_sci_msg_rm_udmap_tx_ch_cfg::extended_ch_type
*
* @nav_id: SoC device ID of Navigator Subsystem where tx channel is located
*
@@ -973,6 +942,15 @@ struct rm_ti_sci_msg_udmap_rx_flow_opt_cfg {
*
* @tx_burst_size: UDMAP transmit channel burst size configuration to be
* programmed into the tx_burst_size field of the TCHAN_TCFG register.
+ *
+ * @tx_tdtype: UDMAP transmit channel teardown type configuration to be
+ * programmed into the tdtype field of the TCHAN_TCFG register:
+ * 0 - Return immediately
+ * 1 - Wait for completion message from remote peer
+ *
+ * @extended_ch_type: Valid for BCDMA.
+ * 0 - the channel is split tx channel (tchan)
+ * 1 - the channel is block copy channel (bchan)
*/
struct ti_sci_msg_rm_udmap_tx_ch_cfg_req {
struct ti_sci_msg_hdr hdr;
@@ -994,6 +972,8 @@ struct ti_sci_msg_rm_udmap_tx_ch_cfg_req {
u16 fdepth;
u8 tx_sched_priority;
u8 tx_burst_size;
+ u8 tx_tdtype;
+ u8 extended_ch_type;
} __packed;
/**
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index fd95edeb702b..7eb9958662dd 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -615,13 +615,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data);
/**
* zynqmp_pm_set_sd_tapdelay() - Set tap delay for the SD device
*
- * @node_id Node ID of the device
- * @type Type of tap delay to set (input/output)
- * @value Value to set fot the tap delay
+ * @node_id: Node ID of the device
+ * @type: Type of tap delay to set (input/output)
+ * @value: Value to set fot the tap delay
*
* This function sets input/output tap delay for the SD device.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value)
{
@@ -633,12 +633,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay);
/**
* zynqmp_pm_sd_dll_reset() - Reset DLL logic
*
- * @node_id Node ID of the device
- * @type Reset type
+ * @node_id: Node ID of the device
+ * @type: Reset type
*
* This function resets DLL logic for the SD device.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
{
@@ -649,12 +649,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
/**
* zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs)
- * @index GGS register index
- * @value Register value to be written
+ * @index: GGS register index
+ * @value: Register value to be written
*
* This function writes value to GGS register.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_write_ggs(u32 index, u32 value)
{
@@ -665,12 +665,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs);
/**
* zynqmp_pm_write_ggs() - PM API for reading global general storage (ggs)
- * @index GGS register index
- * @value Register value to be written
+ * @index: GGS register index
+ * @value: Register value to be written
*
* This function returns GGS register value.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_read_ggs(u32 index, u32 *value)
{
@@ -682,12 +682,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs);
/**
* zynqmp_pm_write_pggs() - PM API for writing persistent global general
* storage (pggs)
- * @index PGGS register index
- * @value Register value to be written
+ * @index: PGGS register index
+ * @value: Register value to be written
*
* This function writes value to PGGS register.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_write_pggs(u32 index, u32 value)
{
@@ -699,12 +699,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs);
/**
* zynqmp_pm_write_pggs() - PM API for reading persistent global general
* storage (pggs)
- * @index PGGS register index
- * @value Register value to be written
+ * @index: PGGS register index
+ * @value: Register value to be written
*
* This function returns PGGS register value.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_read_pggs(u32 index, u32 *value)
{
@@ -715,12 +715,12 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs);
/**
* zynqmp_pm_set_boot_health_status() - PM API for setting healthy boot status
- * @value Status value to be written
+ * @value: Status value to be written
*
* This function sets healthy bit value to indicate boot health status
* to firmware.
*
- * @return Returns status, either success or error+reason
+ * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_set_boot_health_status(u32 value)
{
@@ -815,10 +815,10 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_status);
* zynqmp_pm_init_finalize() - PM call to inform firmware that the caller
* master has initialized its own power management
*
+ * Return: Returns status, either success or error+reason
+ *
* This API function is to be used for notify the power management controller
* about the completed power management initialization.
- *
- * Return: Returns status, either success or error+reason
*/
int zynqmp_pm_init_finalize(void)
{
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 5d4de5cd6759..c70f46e80a3b 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -59,8 +59,9 @@ config DEBUG_GPIO
that are most common when setting up new platforms or boards.
config GPIO_SYSFS
- bool "/sys/class/gpio/... (sysfs interface)"
+ bool "/sys/class/gpio/... (sysfs interface)" if EXPERT
depends on SYSFS
+ select GPIO_CDEV # We need to encourage the new ABI
help
Say Y here to add the legacy sysfs interface for GPIOs.
@@ -255,6 +256,7 @@ config GPIO_EP93XX
config GPIO_EXAR
tristate "Support for GPIO pins on XR17V352/354/358"
depends on SERIAL_8250_EXAR
+ select REGMAP_MMIO
help
Selecting this option will enable handling of GPIO pins present
on Exar XR17V352/354/358 chips.
@@ -296,6 +298,17 @@ config GPIO_GRGPIO
Select this to support Aeroflex Gaisler GRGPIO cores from the GRLIB
VHDL IP core library.
+config GPIO_HISI
+ tristate "HiSilicon GPIO controller driver"
+ depends on (ARM64 && ACPI) || COMPILE_TEST
+ select GPIO_GENERIC
+ select GPIOLIB_IRQCHIP
+ help
+ Say Y or M here to build support for the HiSilicon GPIO controller
+ driver GPIO block.
+ This GPIO controller support double-edge interrupt and multi-core
+ concurrent access.
+
config GPIO_HLWD
tristate "Nintendo Wii (Hollywood) GPIO"
depends on OF_GPIO
@@ -737,6 +750,17 @@ config GPIO_AMD_FCH
Note: This driver doesn't registers itself automatically, as it
needs to be provided with platform specific configuration.
(See eg. CONFIG_PCENGINES_APU2.)
+
+config GPIO_MSC313
+ bool "MStar MSC313 GPIO support"
+ depends on ARCH_MSTARV7
+ default ARCH_MSTARV7
+ select GPIOLIB_IRQCHIP
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Say Y here to support the main GPIO block on MStar/SigmaStar
+ ARMv7 based SoCs.
+
endmenu
menu "Port-mapped I/O GPIO drivers"
@@ -1590,6 +1614,8 @@ config GPIO_VIPERBOARD
endmenu
+menu "Virtual GPIO drivers"
+
config GPIO_AGGREGATOR
tristate "GPIO Aggregator"
help
@@ -1613,4 +1639,6 @@ config GPIO_MOCKUP
tools/testing/selftests/gpio/gpio-mockup.sh. Reference the usage in
it.
+endmenu
+
endif
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 09dada80ac34..35e3b6026665 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
obj-$(CONFIG_GPIO_GPIO_MM) += gpio-gpio-mm.o
obj-$(CONFIG_GPIO_GRGPIO) += gpio-grgpio.o
obj-$(CONFIG_GPIO_GW_PLD) += gpio-gw-pld.o
+obj-$(CONFIG_GPIO_HISI) += gpio-hisi.o
obj-$(CONFIG_GPIO_HLWD) += gpio-hlwd.o
obj-$(CONFIG_HTC_EGPIO) += gpio-htc-egpio.o
obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
@@ -101,6 +102,7 @@ obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o
obj-$(CONFIG_GPIO_MOXTET) += gpio-moxtet.o
obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
+obj-$(CONFIG_GPIO_MSC313) += gpio-msc313.o
obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
obj-$(CONFIG_GPIO_MT7621) += gpio-mt7621.o
obj-$(CONFIG_GPIO_MVEBU) += gpio-mvebu.o
diff --git a/drivers/gpio/TODO b/drivers/gpio/TODO
index e560e45e84f8..0229fa79499e 100644
--- a/drivers/gpio/TODO
+++ b/drivers/gpio/TODO
@@ -129,58 +129,9 @@ GPIOLIB irqchip
The GPIOLIB irqchip is a helper irqchip for "simple cases" that should
try to cover any generic kind of irqchip cascaded from a GPIO.
-- Convert all the GPIOLIB_IRQCHIP users to pass an irqchip template,
- parent and flags before calling [devm_]gpiochip_add[_data]().
- Currently we set up the irqchip after setting up the gpiochip
- using gpiochip_irqchip_add() and gpiochip_set_[chained|nested]_irqchip().
- This is too complex, so convert all users over to just set up
- the irqchip before registering the gpio_chip, typical example:
-
- /* Typical state container with dynamic irqchip */
- struct my_gpio {
- struct gpio_chip gc;
- struct irq_chip irq;
- };
-
- int irq; /* from platform etc */
- struct my_gpio *g;
- struct gpio_irq_chip *girq;
-
- /* Set up the irqchip dynamically */
- g->irq.name = "my_gpio_irq";
- g->irq.irq_ack = my_gpio_ack_irq;
- g->irq.irq_mask = my_gpio_mask_irq;
- g->irq.irq_unmask = my_gpio_unmask_irq;
- g->irq.irq_set_type = my_gpio_set_irq_type;
-
- /* Get a pointer to the gpio_irq_chip */
- girq = &g->gc.irq;
- girq->chip = &g->irq;
- girq->parent_handler = ftgpio_gpio_irq_handler;
- girq->num_parents = 1;
- girq->parents = devm_kcalloc(dev, 1, sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents)
- return -ENOMEM;
- girq->default_type = IRQ_TYPE_NONE;
- girq->handler = handle_bad_irq;
- girq->parents[0] = irq;
-
- When this is done, we will delete the old APIs for instatiating
- GPIOLIB_IRQCHIP and simplify the code.
-
- Look over and identify any remaining easily converted drivers and
dry-code conversions to gpiolib irqchip for maintainers to test
-- Drop gpiochip_set_chained_irqchip() when all the chained irqchips
- have been converted to the above infrastructure.
-
-- Add more infrastructure to make it possible to also pass a threaded
- irqchip in struct gpio_irq_chip.
-
-- Drop gpiochip_irqchip_add_nested() when all the chained irqchips
- have been converted to the above infrastructure.
-
Increase integration with pin control
@@ -191,3 +142,39 @@ use of the global GPIO numbers. Once the above is complete, it may
make sense to simply join the subsystems into one and make pin
multiplexing, pin configuration, GPIO, etc selectable options in one
and the same pin control and GPIO subsystem.
+
+
+Debugfs in place of sysfs
+
+The old sysfs code that enables simple uses of GPIOs from the
+command line is still popular despite the existance of the proper
+character device. The reason is that it is simple to use on
+root filesystems where you only have a minimal set of tools such
+as "cat", "echo" etc.
+
+The old sysfs still need to be strongly deprecated and removed
+as it relies on the global GPIO numberspace that assume a strict
+order of global GPIO numbers that do not change between boots
+and is independent of probe order.
+
+To solve this and provide an ABI that people can use for hacks
+and development, implement a debugfs interface to manipulate
+GPIO lines that can do everything that sysfs can do today: one
+directory per gpiochip and one file entry per line:
+
+/sys/kernel/debug/gpiochip/gpiochip0
+/sys/kernel/debug/gpiochip/gpiochip0/gpio0
+/sys/kernel/debug/gpiochip/gpiochip0/gpio1
+/sys/kernel/debug/gpiochip/gpiochip0/gpio2
+/sys/kernel/debug/gpiochip/gpiochip0/gpio3
+...
+/sys/kernel/debug/gpiochip/gpiochip1
+/sys/kernel/debug/gpiochip/gpiochip1/gpio0
+/sys/kernel/debug/gpiochip/gpiochip1/gpio1
+...
+
+The exact files and design of the debugfs interface can be
+discussed but the idea is to provide a low-level access point
+for debugging and hacking and to expose all lines without the
+need of any exporting. Also provide ample ammunition to shoot
+oneself in the foot, because this is debugfs after all.
diff --git a/drivers/gpio/gpio-104-idi-48.c b/drivers/gpio/gpio-104-idi-48.c
index 94c3a9bc4e75..b132afaf7d99 100644
--- a/drivers/gpio/gpio-104-idi-48.c
+++ b/drivers/gpio/gpio-104-idi-48.c
@@ -132,8 +132,7 @@ static void idi_48_irq_mask(struct irq_data *data)
outb(idi48gpio->cos_enb, idi48gpio->base + 7);
- raw_spin_unlock_irqrestore(&idi48gpio->lock,
- flags);
+ raw_spin_unlock_irqrestore(&idi48gpio->lock, flags);
}
return;
@@ -166,8 +165,7 @@ static void idi_48_irq_unmask(struct irq_data *data)
outb(idi48gpio->cos_enb, idi48gpio->base + 7);
- raw_spin_unlock_irqrestore(&idi48gpio->lock,
- flags);
+ raw_spin_unlock_irqrestore(&idi48gpio->lock, flags);
}
return;
diff --git a/drivers/gpio/gpio-amd8111.c b/drivers/gpio/gpio-amd8111.c
index fdcebe59510d..14e6b3e64add 100644
--- a/drivers/gpio/gpio-amd8111.c
+++ b/drivers/gpio/gpio-amd8111.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* GPIO driver for AMD 8111 south bridges
*
@@ -20,10 +21,6 @@
* Hardware driver for Intel i810 Random Number Generator (RNG)
* Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com>
* Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
*/
#include <linux/ioport.h>
#include <linux/module.h>
@@ -179,7 +176,6 @@ static int __init amd_gpio_init(void)
struct pci_dev *pdev = NULL;
const struct pci_device_id *ent;
-
/* We look for our device - AMD South Bridge
* I don't know about a system with two such bridges,
* so we can assume that there is max. one device.
@@ -223,11 +219,10 @@ found:
spin_lock_init(&gp.lock);
- printk(KERN_INFO "AMD-8111 GPIO detected\n");
+ dev_info(&pdev->dev, "AMD-8111 GPIO detected\n");
err = gpiochip_add_data(&gp.chip, &gp);
if (err) {
- printk(KERN_ERR "GPIO registering failed (%d)\n",
- err);
+ dev_err(&pdev->dev, "GPIO registering failed (%d)\n", err);
ioport_unmap(gp.pm);
goto out;
}
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index d5359341cc6b..678ddd375891 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -123,6 +123,7 @@ static int ath79_gpio_irq_set_type(struct irq_data *data,
switch (flow_type) {
case IRQ_TYPE_EDGE_RISING:
polarity |= mask;
+ fallthrough;
case IRQ_TYPE_EDGE_FALLING:
case IRQ_TYPE_EDGE_BOTH:
break;
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index a6f30ad6750f..7920cf256798 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -175,13 +175,13 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
err = pci_enable_device(dev);
if (err) {
- printk(KERN_ERR "bt8xxgpio: Can't enable device.\n");
+ dev_err(&dev->dev, "can't enable device.\n");
return err;
}
if (!devm_request_mem_region(&dev->dev, pci_resource_start(dev, 0),
pci_resource_len(dev, 0),
"bt8xxgpio")) {
- printk(KERN_WARNING "bt8xxgpio: Can't request iomem (0x%llx).\n",
+ dev_warn(&dev->dev, "can't request iomem (0x%llx).\n",
(unsigned long long)pci_resource_start(dev, 0));
err = -EBUSY;
goto err_disable;
@@ -191,7 +191,7 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
bg->mmio = devm_ioremap(&dev->dev, pci_resource_start(dev, 0), 0x1000);
if (!bg->mmio) {
- printk(KERN_ERR "bt8xxgpio: ioremap() failed\n");
+ dev_err(&dev->dev, "ioremap() failed\n");
err = -EIO;
goto err_disable;
}
@@ -207,7 +207,7 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
bt8xxgpio_gpio_setup(bg);
err = gpiochip_add_data(&bg->gpio, bg);
if (err) {
- printk(KERN_ERR "bt8xxgpio: Failed to register GPIOs\n");
+ dev_err(&dev->dev, "failed to register GPIOs\n");
goto err_disable;
}
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 53b24e3ae7de..6da3a247614a 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -345,12 +345,8 @@ static int cs5535_gpio_probe(struct platform_device *pdev)
mask_orig, mask);
/* finally, register with the generic GPIO API */
- err = devm_gpiochip_add_data(&pdev->dev, &cs5535_gpio_chip.chip,
- &cs5535_gpio_chip);
- if (err)
- return err;
-
- return 0;
+ return devm_gpiochip_add_data(&pdev->dev, &cs5535_gpio_chip.chip,
+ &cs5535_gpio_chip);
}
static struct platform_driver cs5535_gpio_driver = {
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index 4275c18a097a..d3233cc4b76b 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -616,10 +616,9 @@ static int dwapb_get_reset(struct dwapb_gpio *gpio)
int err;
gpio->rst = devm_reset_control_get_optional_shared(gpio->dev, NULL);
- if (IS_ERR(gpio->rst)) {
- dev_err(gpio->dev, "Cannot get reset descriptor\n");
- return PTR_ERR(gpio->rst);
- }
+ if (IS_ERR(gpio->rst))
+ return dev_err_probe(gpio->dev, PTR_ERR(gpio->rst),
+ "Cannot get reset descriptor\n");
err = reset_control_deassert(gpio->rst);
if (err) {
diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
index b1accfba017d..d37de78247a6 100644
--- a/drivers/gpio/gpio-exar.c
+++ b/drivers/gpio/gpio-exar.c
@@ -4,14 +4,17 @@
*
* Copyright (C) 2015 Sudip Mukherjee <sudip.mukherjee@codethink.co.uk>
*/
+
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/gpio/driver.h>
+#include <linux/idr.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#define EXAR_OFFSET_MPIOLVL_LO 0x90
#define EXAR_OFFSET_MPIOSEL_LO 0x93
@@ -24,60 +27,39 @@ static DEFINE_IDA(ida_index);
struct exar_gpio_chip {
struct gpio_chip gpio_chip;
- struct mutex lock;
+ struct regmap *regmap;
int index;
- void __iomem *regs;
char name[20];
unsigned int first_pin;
};
-static void exar_update(struct gpio_chip *chip, unsigned int reg, int val,
- unsigned int offset)
+static unsigned int
+exar_offset_to_sel_addr(struct exar_gpio_chip *exar_gpio, unsigned int offset)
{
- struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
- int temp;
-
- mutex_lock(&exar_gpio->lock);
- temp = readb(exar_gpio->regs + reg);
- temp &= ~BIT(offset);
- if (val)
- temp |= BIT(offset);
- writeb(temp, exar_gpio->regs + reg);
- mutex_unlock(&exar_gpio->lock);
+ return (offset + exar_gpio->first_pin) / 8 ? EXAR_OFFSET_MPIOSEL_HI
+ : EXAR_OFFSET_MPIOSEL_LO;
}
-static int exar_set_direction(struct gpio_chip *chip, int direction,
- unsigned int offset)
+static unsigned int
+exar_offset_to_lvl_addr(struct exar_gpio_chip *exar_gpio, unsigned int offset)
{
- struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
- unsigned int addr = (offset + exar_gpio->first_pin) / 8 ?
- EXAR_OFFSET_MPIOSEL_HI : EXAR_OFFSET_MPIOSEL_LO;
- unsigned int bit = (offset + exar_gpio->first_pin) % 8;
-
- exar_update(chip, addr, direction, bit);
- return 0;
+ return (offset + exar_gpio->first_pin) / 8 ? EXAR_OFFSET_MPIOLVL_HI
+ : EXAR_OFFSET_MPIOLVL_LO;
}
-static int exar_get(struct gpio_chip *chip, unsigned int reg)
+static unsigned int
+exar_offset_to_bit(struct exar_gpio_chip *exar_gpio, unsigned int offset)
{
- struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
- int value;
-
- mutex_lock(&exar_gpio->lock);
- value = readb(exar_gpio->regs + reg);
- mutex_unlock(&exar_gpio->lock);
-
- return value;
+ return (offset + exar_gpio->first_pin) % 8;
}
static int exar_get_direction(struct gpio_chip *chip, unsigned int offset)
{
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
- unsigned int addr = (offset + exar_gpio->first_pin) / 8 ?
- EXAR_OFFSET_MPIOSEL_HI : EXAR_OFFSET_MPIOSEL_LO;
- unsigned int bit = (offset + exar_gpio->first_pin) % 8;
+ unsigned int addr = exar_offset_to_sel_addr(exar_gpio, offset);
+ unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
- if (exar_get(chip, addr) & BIT(bit))
+ if (regmap_test_bits(exar_gpio->regmap, addr, BIT(bit)))
return GPIO_LINE_DIRECTION_IN;
return GPIO_LINE_DIRECTION_OUT;
@@ -86,39 +68,66 @@ static int exar_get_direction(struct gpio_chip *chip, unsigned int offset)
static int exar_get_value(struct gpio_chip *chip, unsigned int offset)
{
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
- unsigned int addr = (offset + exar_gpio->first_pin) / 8 ?
- EXAR_OFFSET_MPIOLVL_HI : EXAR_OFFSET_MPIOLVL_LO;
- unsigned int bit = (offset + exar_gpio->first_pin) % 8;
+ unsigned int addr = exar_offset_to_lvl_addr(exar_gpio, offset);
+ unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
- return !!(exar_get(chip, addr) & BIT(bit));
+ return !!(regmap_test_bits(exar_gpio->regmap, addr, BIT(bit)));
}
static void exar_set_value(struct gpio_chip *chip, unsigned int offset,
int value)
{
struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
- unsigned int addr = (offset + exar_gpio->first_pin) / 8 ?
- EXAR_OFFSET_MPIOLVL_HI : EXAR_OFFSET_MPIOLVL_LO;
- unsigned int bit = (offset + exar_gpio->first_pin) % 8;
+ unsigned int addr = exar_offset_to_lvl_addr(exar_gpio, offset);
+ unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
- exar_update(chip, addr, value, bit);
+ if (value)
+ regmap_set_bits(exar_gpio->regmap, addr, BIT(bit));
+ else
+ regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
}
static int exar_direction_output(struct gpio_chip *chip, unsigned int offset,
int value)
{
+ struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
+ unsigned int addr = exar_offset_to_sel_addr(exar_gpio, offset);
+ unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
+
exar_set_value(chip, offset, value);
- return exar_set_direction(chip, 0, offset);
+ regmap_clear_bits(exar_gpio->regmap, addr, BIT(bit));
+
+ return 0;
}
static int exar_direction_input(struct gpio_chip *chip, unsigned int offset)
{
- return exar_set_direction(chip, 1, offset);
+ struct exar_gpio_chip *exar_gpio = gpiochip_get_data(chip);
+ unsigned int addr = exar_offset_to_sel_addr(exar_gpio, offset);
+ unsigned int bit = exar_offset_to_bit(exar_gpio, offset);
+
+ regmap_set_bits(exar_gpio->regmap, addr, BIT(bit));
+
+ return 0;
}
+static void exar_devm_ida_free(void *data)
+{
+ struct exar_gpio_chip *exar_gpio = data;
+
+ ida_free(&ida_index, exar_gpio->index);
+}
+
+static const struct regmap_config exar_regmap_config = {
+ .name = "exar-gpio",
+ .reg_bits = 16,
+ .val_bits = 8,
+};
+
static int gpio_exar_probe(struct platform_device *pdev)
{
- struct pci_dev *pcidev = to_pci_dev(pdev->dev.parent);
+ struct device *dev = &pdev->dev;
+ struct pci_dev *pcidev = to_pci_dev(dev->parent);
struct exar_gpio_chip *exar_gpio;
u32 first_pin, ngpios;
void __iomem *p;
@@ -132,30 +141,37 @@ static int gpio_exar_probe(struct platform_device *pdev)
if (!p)
return -ENOMEM;
- ret = device_property_read_u32(&pdev->dev, "exar,first-pin",
- &first_pin);
+ ret = device_property_read_u32(dev, "exar,first-pin", &first_pin);
if (ret)
return ret;
- ret = device_property_read_u32(&pdev->dev, "ngpios", &ngpios);
+ ret = device_property_read_u32(dev, "ngpios", &ngpios);
if (ret)
return ret;
- exar_gpio = devm_kzalloc(&pdev->dev, sizeof(*exar_gpio), GFP_KERNEL);
+ exar_gpio = devm_kzalloc(dev, sizeof(*exar_gpio), GFP_KERNEL);
if (!exar_gpio)
return -ENOMEM;
- mutex_init(&exar_gpio->lock);
+ /*
+ * We don't need to check the return values of mmio regmap operations (unless
+ * the regmap has a clock attached which is not the case here).
+ */
+ exar_gpio->regmap = devm_regmap_init_mmio(dev, p, &exar_regmap_config);
+ if (IS_ERR(exar_gpio->regmap))
+ return PTR_ERR(exar_gpio->regmap);
+
+ index = ida_alloc(&ida_index, GFP_KERNEL);
+ if (index < 0)
+ return index;
- index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
- if (index < 0) {
- ret = index;
- goto err_mutex_destroy;
- }
+ ret = devm_add_action_or_reset(dev, exar_devm_ida_free, exar_gpio);
+ if (ret)
+ return ret;
sprintf(exar_gpio->name, "exar_gpio%d", index);
exar_gpio->gpio_chip.label = exar_gpio->name;
- exar_gpio->gpio_chip.parent = &pdev->dev;
+ exar_gpio->gpio_chip.parent = dev;
exar_gpio->gpio_chip.direction_output = exar_direction_output;
exar_gpio->gpio_chip.direction_input = exar_direction_input;
exar_gpio->gpio_chip.get_direction = exar_get_direction;
@@ -163,39 +179,20 @@ static int gpio_exar_probe(struct platform_device *pdev)
exar_gpio->gpio_chip.set = exar_set_value;
exar_gpio->gpio_chip.base = -1;
exar_gpio->gpio_chip.ngpio = ngpios;
- exar_gpio->regs = p;
exar_gpio->index = index;
exar_gpio->first_pin = first_pin;
- ret = devm_gpiochip_add_data(&pdev->dev,
- &exar_gpio->gpio_chip, exar_gpio);
+ ret = devm_gpiochip_add_data(dev, &exar_gpio->gpio_chip, exar_gpio);
if (ret)
- goto err_destroy;
+ return ret;
platform_set_drvdata(pdev, exar_gpio);
return 0;
-
-err_destroy:
- ida_simple_remove(&ida_index, index);
-err_mutex_destroy:
- mutex_destroy(&exar_gpio->lock);
- return ret;
-}
-
-static int gpio_exar_remove(struct platform_device *pdev)
-{
- struct exar_gpio_chip *exar_gpio = platform_get_drvdata(pdev);
-
- ida_simple_remove(&ida_index, exar_gpio->index);
- mutex_destroy(&exar_gpio->lock);
-
- return 0;
}
static struct platform_driver gpio_exar_driver = {
.probe = gpio_exar_probe,
- .remove = gpio_exar_remove,
.driver = {
.name = DRIVER_NAME,
},
diff --git a/drivers/gpio/gpio-hisi.c b/drivers/gpio/gpio-hisi.c
new file mode 100644
index 000000000000..ad3d4da25160
--- /dev/null
+++ b/drivers/gpio/gpio-hisi.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2020 HiSilicon Limited. */
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+
+#define HISI_GPIO_SWPORT_DR_SET_WX 0x000
+#define HISI_GPIO_SWPORT_DR_CLR_WX 0x004
+#define HISI_GPIO_SWPORT_DDR_SET_WX 0x010
+#define HISI_GPIO_SWPORT_DDR_CLR_WX 0x014
+#define HISI_GPIO_SWPORT_DDR_ST_WX 0x018
+#define HISI_GPIO_INTEN_SET_WX 0x020
+#define HISI_GPIO_INTEN_CLR_WX 0x024
+#define HISI_GPIO_INTMASK_SET_WX 0x030
+#define HISI_GPIO_INTMASK_CLR_WX 0x034
+#define HISI_GPIO_INTTYPE_EDGE_SET_WX 0x040
+#define HISI_GPIO_INTTYPE_EDGE_CLR_WX 0x044
+#define HISI_GPIO_INT_POLARITY_SET_WX 0x050
+#define HISI_GPIO_INT_POLARITY_CLR_WX 0x054
+#define HISI_GPIO_DEBOUNCE_SET_WX 0x060
+#define HISI_GPIO_DEBOUNCE_CLR_WX 0x064
+#define HISI_GPIO_INTSTATUS_WX 0x070
+#define HISI_GPIO_PORTA_EOI_WX 0x078
+#define HISI_GPIO_EXT_PORT_WX 0x080
+#define HISI_GPIO_INTCOMB_MASK_WX 0x0a0
+#define HISI_GPIO_INT_DEDGE_SET 0x0b0
+#define HISI_GPIO_INT_DEDGE_CLR 0x0b4
+#define HISI_GPIO_INT_DEDGE_ST 0x0b8
+
+#define HISI_GPIO_LINE_NUM_MAX 32
+#define HISI_GPIO_DRIVER_NAME "gpio-hisi"
+
+struct hisi_gpio {
+ struct gpio_chip chip;
+ struct device *dev;
+ void __iomem *reg_base;
+ unsigned int line_num;
+ struct irq_chip irq_chip;
+ int irq;
+};
+
+static inline u32 hisi_gpio_read_reg(struct gpio_chip *chip,
+ unsigned int off)
+{
+ struct hisi_gpio *hisi_gpio =
+ container_of(chip, struct hisi_gpio, chip);
+ void __iomem *reg = hisi_gpio->reg_base + off;
+
+ return readl(reg);
+}
+
+static inline void hisi_gpio_write_reg(struct gpio_chip *chip,
+ unsigned int off, u32 val)
+{
+ struct hisi_gpio *hisi_gpio =
+ container_of(chip, struct hisi_gpio, chip);
+ void __iomem *reg = hisi_gpio->reg_base + off;
+
+ writel(val, reg);
+}
+
+static void hisi_gpio_set_debounce(struct gpio_chip *chip, unsigned int off,
+ u32 debounce)
+{
+ if (debounce)
+ hisi_gpio_write_reg(chip, HISI_GPIO_DEBOUNCE_SET_WX, BIT(off));
+ else
+ hisi_gpio_write_reg(chip, HISI_GPIO_DEBOUNCE_CLR_WX, BIT(off));
+}
+
+static int hisi_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
+ unsigned long config)
+{
+ u32 config_para = pinconf_to_config_param(config);
+ u32 config_arg;
+
+ switch (config_para) {
+ case PIN_CONFIG_INPUT_DEBOUNCE:
+ config_arg = pinconf_to_config_argument(config);
+ hisi_gpio_set_debounce(chip, offset, config_arg);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static void hisi_gpio_set_ack(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ hisi_gpio_write_reg(chip, HISI_GPIO_PORTA_EOI_WX, BIT(irqd_to_hwirq(d)));
+}
+
+static void hisi_gpio_irq_set_mask(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTMASK_SET_WX, BIT(irqd_to_hwirq(d)));
+}
+
+static void hisi_gpio_irq_clr_mask(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTMASK_CLR_WX, BIT(irqd_to_hwirq(d)));
+}
+
+static int hisi_gpio_irq_set_type(struct irq_data *d, u32 type)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+ unsigned int mask = BIT(irqd_to_hwirq(d));
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_BOTH:
+ hisi_gpio_write_reg(chip, HISI_GPIO_INT_DEDGE_SET, mask);
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTTYPE_EDGE_SET_WX, mask);
+ hisi_gpio_write_reg(chip, HISI_GPIO_INT_POLARITY_SET_WX, mask);
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTTYPE_EDGE_SET_WX, mask);
+ hisi_gpio_write_reg(chip, HISI_GPIO_INT_POLARITY_CLR_WX, mask);
+ break;
+ case IRQ_TYPE_LEVEL_HIGH:
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTTYPE_EDGE_CLR_WX, mask);
+ hisi_gpio_write_reg(chip, HISI_GPIO_INT_POLARITY_SET_WX, mask);
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTTYPE_EDGE_CLR_WX, mask);
+ hisi_gpio_write_reg(chip, HISI_GPIO_INT_POLARITY_CLR_WX, mask);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * The dual-edge interrupt and other interrupt's registers do not
+ * take effect at the same time. The registers of the two-edge
+ * interrupts have higher priorities, the configuration of
+ * the dual-edge interrupts must be disabled before the configuration
+ * of other kind of interrupts.
+ */
+ if (type != IRQ_TYPE_EDGE_BOTH) {
+ unsigned int both = hisi_gpio_read_reg(chip, HISI_GPIO_INT_DEDGE_ST);
+
+ if (both & mask)
+ hisi_gpio_write_reg(chip, HISI_GPIO_INT_DEDGE_CLR, mask);
+ }
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ irq_set_handler_locked(d, handle_level_irq);
+ else if (type & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(d, handle_edge_irq);
+
+ return 0;
+}
+
+static void hisi_gpio_irq_enable(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ hisi_gpio_irq_clr_mask(d);
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTEN_SET_WX, BIT(irqd_to_hwirq(d)));
+}
+
+static void hisi_gpio_irq_disable(struct irq_data *d)
+{
+ struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
+
+ hisi_gpio_irq_set_mask(d);
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTEN_CLR_WX, BIT(irqd_to_hwirq(d)));
+}
+
+static void hisi_gpio_irq_handler(struct irq_desc *desc)
+{
+ struct hisi_gpio *hisi_gpio = irq_desc_get_handler_data(desc);
+ unsigned long irq_msk = hisi_gpio_read_reg(&hisi_gpio->chip,
+ HISI_GPIO_INTSTATUS_WX);
+ struct irq_chip *irq_c = irq_desc_get_chip(desc);
+ int hwirq;
+
+ chained_irq_enter(irq_c, desc);
+ for_each_set_bit(hwirq, &irq_msk, HISI_GPIO_LINE_NUM_MAX)
+ generic_handle_irq(irq_find_mapping(hisi_gpio->chip.irq.domain,
+ hwirq));
+ chained_irq_exit(irq_c, desc);
+}
+
+static void hisi_gpio_init_irq(struct hisi_gpio *hisi_gpio)
+{
+ struct gpio_chip *chip = &hisi_gpio->chip;
+ struct gpio_irq_chip *girq_chip = &chip->irq;
+
+ /* Set hooks for irq_chip */
+ hisi_gpio->irq_chip.irq_ack = hisi_gpio_set_ack;
+ hisi_gpio->irq_chip.irq_mask = hisi_gpio_irq_set_mask;
+ hisi_gpio->irq_chip.irq_unmask = hisi_gpio_irq_clr_mask;
+ hisi_gpio->irq_chip.irq_set_type = hisi_gpio_irq_set_type;
+ hisi_gpio->irq_chip.irq_enable = hisi_gpio_irq_enable;
+ hisi_gpio->irq_chip.irq_disable = hisi_gpio_irq_disable;
+
+ girq_chip->chip = &hisi_gpio->irq_chip;
+ girq_chip->default_type = IRQ_TYPE_NONE;
+ girq_chip->num_parents = 1;
+ girq_chip->parents = &hisi_gpio->irq;
+ girq_chip->parent_handler = hisi_gpio_irq_handler;
+ girq_chip->parent_handler_data = hisi_gpio;
+
+ /* Clear Mask of GPIO controller combine IRQ */
+ hisi_gpio_write_reg(chip, HISI_GPIO_INTCOMB_MASK_WX, 1);
+}
+
+static const struct acpi_device_id hisi_gpio_acpi_match[] = {
+ {"HISI0184", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(acpi, hisi_gpio_acpi_match);
+
+static void hisi_gpio_get_pdata(struct device *dev,
+ struct hisi_gpio *hisi_gpio)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct fwnode_handle *fwnode;
+ int idx = 0;
+
+ device_for_each_child_node(dev, fwnode) {
+ /* Cycle for once, no need for an array to save line_num */
+ if (fwnode_property_read_u32(fwnode, "ngpios",
+ &hisi_gpio->line_num)) {
+ dev_err(dev,
+ "failed to get number of lines for port%d and use default value instead\n",
+ idx);
+ hisi_gpio->line_num = HISI_GPIO_LINE_NUM_MAX;
+ }
+
+ if (WARN_ON(hisi_gpio->line_num > HISI_GPIO_LINE_NUM_MAX))
+ hisi_gpio->line_num = HISI_GPIO_LINE_NUM_MAX;
+
+ hisi_gpio->irq = platform_get_irq(pdev, idx);
+
+ dev_info(dev,
+ "get hisi_gpio[%d] with %d lines\n", idx,
+ hisi_gpio->line_num);
+
+ idx++;
+ }
+}
+
+static int hisi_gpio_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_gpio *hisi_gpio;
+ int port_num;
+ int ret;
+
+ /*
+ * One GPIO controller own one port currently,
+ * if we get more from ACPI table, return error.
+ */
+ port_num = device_get_child_node_count(dev);
+ if (WARN_ON(port_num != 1))
+ return -ENODEV;
+
+ hisi_gpio = devm_kzalloc(dev, sizeof(*hisi_gpio), GFP_KERNEL);
+ if (!hisi_gpio)
+ return -ENOMEM;
+
+ hisi_gpio->reg_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hisi_gpio->reg_base))
+ return PTR_ERR(hisi_gpio->reg_base);
+
+ hisi_gpio_get_pdata(dev, hisi_gpio);
+
+ hisi_gpio->dev = dev;
+
+ ret = bgpio_init(&hisi_gpio->chip, hisi_gpio->dev, 0x4,
+ hisi_gpio->reg_base + HISI_GPIO_EXT_PORT_WX,
+ hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_SET_WX,
+ hisi_gpio->reg_base + HISI_GPIO_SWPORT_DR_CLR_WX,
+ hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_SET_WX,
+ hisi_gpio->reg_base + HISI_GPIO_SWPORT_DDR_CLR_WX,
+ BGPIOF_NO_SET_ON_INPUT);
+ if (ret) {
+ dev_err(dev, "failed to init, ret = %d\n", ret);
+ return ret;
+ }
+
+ hisi_gpio->chip.set_config = hisi_gpio_set_config;
+ hisi_gpio->chip.ngpio = hisi_gpio->line_num;
+ hisi_gpio->chip.bgpio_dir_unreadable = 1;
+ hisi_gpio->chip.base = -1;
+
+ if (hisi_gpio->irq > 0)
+ hisi_gpio_init_irq(hisi_gpio);
+
+ ret = devm_gpiochip_add_data(dev, &hisi_gpio->chip, hisi_gpio);
+ if (ret) {
+ dev_err(dev, "failed to register gpiochip, ret = %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct platform_driver hisi_gpio_driver = {
+ .driver = {
+ .name = HISI_GPIO_DRIVER_NAME,
+ .acpi_match_table = hisi_gpio_acpi_match,
+ },
+ .probe = hisi_gpio_probe,
+};
+
+module_platform_driver(hisi_gpio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Luo Jiaxing <luojiaxing@huawei.com>");
+MODULE_DESCRIPTION("HiSilicon GPIO controller driver");
+MODULE_ALIAS("platform:" HISI_GPIO_DRIVER_NAME);
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index 67ed4f238d43..28b757d34046 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -15,6 +15,7 @@
#include <linux/irq.h>
#include <linux/irq_sim.h>
#include <linux/irqdomain.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/property.h>
@@ -460,9 +461,16 @@ static int gpio_mockup_probe(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id gpio_mockup_of_match[] = {
+ { .compatible = "gpio-mockup", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, gpio_mockup_of_match);
+
static struct platform_driver gpio_mockup_driver = {
.driver = {
.name = "gpio-mockup",
+ .of_match_table = gpio_mockup_of_match,
},
.probe = gpio_mockup_probe,
};
@@ -556,8 +564,7 @@ static int __init gpio_mockup_init(void)
{
int i, num_chips, err;
- if ((gpio_mockup_num_ranges < 2) ||
- (gpio_mockup_num_ranges % 2) ||
+ if ((gpio_mockup_num_ranges % 2) ||
(gpio_mockup_num_ranges > GPIO_MOCKUP_MAX_RANGES))
return -EINVAL;
diff --git a/drivers/gpio/gpio-msc313.c b/drivers/gpio/gpio-msc313.c
new file mode 100644
index 000000000000..da31a5ff7a2b
--- /dev/null
+++ b/drivers/gpio/gpio-msc313.c
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020 Daniel Palmer<daniel@thingy.jp> */
+
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/gpio/driver.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/gpio/msc313-gpio.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define DRIVER_NAME "gpio-msc313"
+
+#define MSC313_GPIO_IN BIT(0)
+#define MSC313_GPIO_OUT BIT(4)
+#define MSC313_GPIO_OEN BIT(5)
+
+/*
+ * These bits need to be saved to correctly restore the
+ * gpio state when resuming from suspend to memory.
+ */
+#define MSC313_GPIO_BITSTOSAVE (MSC313_GPIO_OUT | MSC313_GPIO_OEN)
+
+/* pad names for fuart, same for all SoCs so far */
+#define MSC313_PINNAME_FUART_RX "fuart_rx"
+#define MSC313_PINNAME_FUART_TX "fuart_tx"
+#define MSC313_PINNAME_FUART_CTS "fuart_cts"
+#define MSC313_PINNAME_FUART_RTS "fuart_rts"
+
+/* pad names for sr, mercury5 is different */
+#define MSC313_PINNAME_SR_IO2 "sr_io2"
+#define MSC313_PINNAME_SR_IO3 "sr_io3"
+#define MSC313_PINNAME_SR_IO4 "sr_io4"
+#define MSC313_PINNAME_SR_IO5 "sr_io5"
+#define MSC313_PINNAME_SR_IO6 "sr_io6"
+#define MSC313_PINNAME_SR_IO7 "sr_io7"
+#define MSC313_PINNAME_SR_IO8 "sr_io8"
+#define MSC313_PINNAME_SR_IO9 "sr_io9"
+#define MSC313_PINNAME_SR_IO10 "sr_io10"
+#define MSC313_PINNAME_SR_IO11 "sr_io11"
+#define MSC313_PINNAME_SR_IO12 "sr_io12"
+#define MSC313_PINNAME_SR_IO13 "sr_io13"
+#define MSC313_PINNAME_SR_IO14 "sr_io14"
+#define MSC313_PINNAME_SR_IO15 "sr_io15"
+#define MSC313_PINNAME_SR_IO16 "sr_io16"
+#define MSC313_PINNAME_SR_IO17 "sr_io17"
+
+/* pad names for sd, same for all SoCs so far */
+#define MSC313_PINNAME_SD_CLK "sd_clk"
+#define MSC313_PINNAME_SD_CMD "sd_cmd"
+#define MSC313_PINNAME_SD_D0 "sd_d0"
+#define MSC313_PINNAME_SD_D1 "sd_d1"
+#define MSC313_PINNAME_SD_D2 "sd_d2"
+#define MSC313_PINNAME_SD_D3 "sd_d3"
+
+/* pad names for i2c1, same for all SoCs so for */
+#define MSC313_PINNAME_I2C1_SCL "i2c1_scl"
+#define MSC313_PINNAME_I2C1_SCA "i2c1_sda"
+
+/* pad names for spi0, same for all SoCs so far */
+#define MSC313_PINNAME_SPI0_CZ "spi0_cz"
+#define MSC313_PINNAME_SPI0_CK "spi0_ck"
+#define MSC313_PINNAME_SPI0_DI "spi0_di"
+#define MSC313_PINNAME_SPI0_DO "spi0_do"
+
+#define FUART_NAMES \
+ MSC313_PINNAME_FUART_RX, \
+ MSC313_PINNAME_FUART_TX, \
+ MSC313_PINNAME_FUART_CTS, \
+ MSC313_PINNAME_FUART_RTS
+
+#define OFF_FUART_RX 0x50
+#define OFF_FUART_TX 0x54
+#define OFF_FUART_CTS 0x58
+#define OFF_FUART_RTS 0x5c
+
+#define FUART_OFFSETS \
+ OFF_FUART_RX, \
+ OFF_FUART_TX, \
+ OFF_FUART_CTS, \
+ OFF_FUART_RTS
+
+#define SR_NAMES \
+ MSC313_PINNAME_SR_IO2, \
+ MSC313_PINNAME_SR_IO3, \
+ MSC313_PINNAME_SR_IO4, \
+ MSC313_PINNAME_SR_IO5, \
+ MSC313_PINNAME_SR_IO6, \
+ MSC313_PINNAME_SR_IO7, \
+ MSC313_PINNAME_SR_IO8, \
+ MSC313_PINNAME_SR_IO9, \
+ MSC313_PINNAME_SR_IO10, \
+ MSC313_PINNAME_SR_IO11, \
+ MSC313_PINNAME_SR_IO12, \
+ MSC313_PINNAME_SR_IO13, \
+ MSC313_PINNAME_SR_IO14, \
+ MSC313_PINNAME_SR_IO15, \
+ MSC313_PINNAME_SR_IO16, \
+ MSC313_PINNAME_SR_IO17
+
+#define OFF_SR_IO2 0x88
+#define OFF_SR_IO3 0x8c
+#define OFF_SR_IO4 0x90
+#define OFF_SR_IO5 0x94
+#define OFF_SR_IO6 0x98
+#define OFF_SR_IO7 0x9c
+#define OFF_SR_IO8 0xa0
+#define OFF_SR_IO9 0xa4
+#define OFF_SR_IO10 0xa8
+#define OFF_SR_IO11 0xac
+#define OFF_SR_IO12 0xb0
+#define OFF_SR_IO13 0xb4
+#define OFF_SR_IO14 0xb8
+#define OFF_SR_IO15 0xbc
+#define OFF_SR_IO16 0xc0
+#define OFF_SR_IO17 0xc4
+
+#define SR_OFFSETS \
+ OFF_SR_IO2, \
+ OFF_SR_IO3, \
+ OFF_SR_IO4, \
+ OFF_SR_IO5, \
+ OFF_SR_IO6, \
+ OFF_SR_IO7, \
+ OFF_SR_IO8, \
+ OFF_SR_IO9, \
+ OFF_SR_IO10, \
+ OFF_SR_IO11, \
+ OFF_SR_IO12, \
+ OFF_SR_IO13, \
+ OFF_SR_IO14, \
+ OFF_SR_IO15, \
+ OFF_SR_IO16, \
+ OFF_SR_IO17
+
+#define SD_NAMES \
+ MSC313_PINNAME_SD_CLK, \
+ MSC313_PINNAME_SD_CMD, \
+ MSC313_PINNAME_SD_D0, \
+ MSC313_PINNAME_SD_D1, \
+ MSC313_PINNAME_SD_D2, \
+ MSC313_PINNAME_SD_D3
+
+#define OFF_SD_CLK 0x140
+#define OFF_SD_CMD 0x144
+#define OFF_SD_D0 0x148
+#define OFF_SD_D1 0x14c
+#define OFF_SD_D2 0x150
+#define OFF_SD_D3 0x154
+
+#define SD_OFFSETS \
+ OFF_SD_CLK, \
+ OFF_SD_CMD, \
+ OFF_SD_D0, \
+ OFF_SD_D1, \
+ OFF_SD_D2, \
+ OFF_SD_D3
+
+#define I2C1_NAMES \
+ MSC313_PINNAME_I2C1_SCL, \
+ MSC313_PINNAME_I2C1_SCA
+
+#define OFF_I2C1_SCL 0x188
+#define OFF_I2C1_SCA 0x18c
+
+#define I2C1_OFFSETS \
+ OFF_I2C1_SCL, \
+ OFF_I2C1_SCA
+
+#define SPI0_NAMES \
+ MSC313_PINNAME_SPI0_CZ, \
+ MSC313_PINNAME_SPI0_CK, \
+ MSC313_PINNAME_SPI0_DI, \
+ MSC313_PINNAME_SPI0_DO
+
+#define OFF_SPI0_CZ 0x1c0
+#define OFF_SPI0_CK 0x1c4
+#define OFF_SPI0_DI 0x1c8
+#define OFF_SPI0_DO 0x1cc
+
+#define SPI0_OFFSETS \
+ OFF_SPI0_CZ, \
+ OFF_SPI0_CK, \
+ OFF_SPI0_DI, \
+ OFF_SPI0_DO
+
+struct msc313_gpio_data {
+ const char * const *names;
+ const unsigned int *offsets;
+ const unsigned int num;
+};
+
+#define MSC313_GPIO_CHIPDATA(_chip) \
+static const struct msc313_gpio_data _chip##_data = { \
+ .names = _chip##_names, \
+ .offsets = _chip##_offsets, \
+ .num = ARRAY_SIZE(_chip##_offsets), \
+}
+
+#ifdef CONFIG_MACH_INFINITY
+static const char * const msc313_names[] = {
+ FUART_NAMES,
+ SR_NAMES,
+ SD_NAMES,
+ I2C1_NAMES,
+ SPI0_NAMES,
+};
+
+static const unsigned int msc313_offsets[] = {
+ FUART_OFFSETS,
+ SR_OFFSETS,
+ SD_OFFSETS,
+ I2C1_OFFSETS,
+ SPI0_OFFSETS,
+};
+
+MSC313_GPIO_CHIPDATA(msc313);
+#endif
+
+struct msc313_gpio {
+ void __iomem *base;
+ const struct msc313_gpio_data *gpio_data;
+ u8 *saved;
+};
+
+static void msc313_gpio_set(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct msc313_gpio *gpio = gpiochip_get_data(chip);
+ u8 gpioreg = readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]);
+
+ if (value)
+ gpioreg |= MSC313_GPIO_OUT;
+ else
+ gpioreg &= ~MSC313_GPIO_OUT;
+
+ writeb_relaxed(gpioreg, gpio->base + gpio->gpio_data->offsets[offset]);
+}
+
+static int msc313_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ struct msc313_gpio *gpio = gpiochip_get_data(chip);
+
+ return readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]) & MSC313_GPIO_IN;
+}
+
+static int msc313_gpio_direction_input(struct gpio_chip *chip, unsigned int offset)
+{
+ struct msc313_gpio *gpio = gpiochip_get_data(chip);
+ u8 gpioreg = readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]);
+
+ gpioreg |= MSC313_GPIO_OEN;
+ writeb_relaxed(gpioreg, gpio->base + gpio->gpio_data->offsets[offset]);
+
+ return 0;
+}
+
+static int msc313_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value)
+{
+ struct msc313_gpio *gpio = gpiochip_get_data(chip);
+ u8 gpioreg = readb_relaxed(gpio->base + gpio->gpio_data->offsets[offset]);
+
+ gpioreg &= ~MSC313_GPIO_OEN;
+ if (value)
+ gpioreg |= MSC313_GPIO_OUT;
+ else
+ gpioreg &= ~MSC313_GPIO_OUT;
+ writeb_relaxed(gpioreg, gpio->base + gpio->gpio_data->offsets[offset]);
+
+ return 0;
+}
+
+/*
+ * The interrupt handling happens in the parent interrupt controller,
+ * we don't do anything here.
+ */
+static struct irq_chip msc313_gpio_irqchip = {
+ .name = "GPIO",
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+};
+
+/*
+ * The parent interrupt controller needs the GIC interrupt type set to GIC_SPI
+ * so we need to provide the fwspec. Essentially gpiochip_populate_parent_fwspec_twocell
+ * that puts GIC_SPI into the first cell.
+ */
+static void *msc313_gpio_populate_parent_fwspec(struct gpio_chip *gc,
+ unsigned int parent_hwirq,
+ unsigned int parent_type)
+{
+ struct irq_fwspec *fwspec;
+
+ fwspec = kmalloc(sizeof(*fwspec), GFP_KERNEL);
+ if (!fwspec)
+ return NULL;
+
+ fwspec->fwnode = gc->irq.parent_domain->fwnode;
+ fwspec->param_count = 3;
+ fwspec->param[0] = GIC_SPI;
+ fwspec->param[1] = parent_hwirq;
+ fwspec->param[2] = parent_type;
+
+ return fwspec;
+}
+
+static int msc313e_gpio_child_to_parent_hwirq(struct gpio_chip *chip,
+ unsigned int child,
+ unsigned int child_type,
+ unsigned int *parent,
+ unsigned int *parent_type)
+{
+ struct msc313_gpio *priv = gpiochip_get_data(chip);
+ unsigned int offset = priv->gpio_data->offsets[child];
+
+ /*
+ * only the spi0 pins have interrupts on the parent
+ * on all of the known chips and so far they are all
+ * mapped to the same place
+ */
+ if (offset >= OFF_SPI0_CZ && offset <= OFF_SPI0_DO) {
+ *parent_type = child_type;
+ *parent = ((offset - OFF_SPI0_CZ) >> 2) + 28;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int msc313_gpio_probe(struct platform_device *pdev)
+{
+ const struct msc313_gpio_data *match_data;
+ struct msc313_gpio *gpio;
+ struct gpio_chip *gpiochip;
+ struct gpio_irq_chip *gpioirqchip;
+ struct irq_domain *parent_domain;
+ struct device_node *parent_node;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ match_data = of_device_get_match_data(dev);
+ if (!match_data)
+ return -EINVAL;
+
+ parent_node = of_irq_find_parent(dev->of_node);
+ if (!parent_node)
+ return -ENODEV;
+
+ parent_domain = irq_find_host(parent_node);
+ if (!parent_domain)
+ return -ENODEV;
+
+ gpio = devm_kzalloc(dev, sizeof(*gpio), GFP_KERNEL);
+ if (!gpio)
+ return -ENOMEM;
+
+ gpio->gpio_data = match_data;
+
+ gpio->saved = devm_kcalloc(dev, gpio->gpio_data->num, sizeof(*gpio->saved), GFP_KERNEL);
+ if (!gpio->saved)
+ return -ENOMEM;
+
+ gpio->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(gpio->base))
+ return PTR_ERR(gpio->base);
+
+ platform_set_drvdata(pdev, gpio);
+
+ gpiochip = devm_kzalloc(dev, sizeof(*gpiochip), GFP_KERNEL);
+ if (!gpiochip)
+ return -ENOMEM;
+
+ gpiochip->label = DRIVER_NAME;
+ gpiochip->parent = dev;
+ gpiochip->request = gpiochip_generic_request;
+ gpiochip->free = gpiochip_generic_free;
+ gpiochip->direction_input = msc313_gpio_direction_input;
+ gpiochip->direction_output = msc313_gpio_direction_output;
+ gpiochip->get = msc313_gpio_get;
+ gpiochip->set = msc313_gpio_set;
+ gpiochip->base = -1;
+ gpiochip->ngpio = gpio->gpio_data->num;
+ gpiochip->names = gpio->gpio_data->names;
+
+ gpioirqchip = &gpiochip->irq;
+ gpioirqchip->chip = &msc313_gpio_irqchip;
+ gpioirqchip->fwnode = of_node_to_fwnode(dev->of_node);
+ gpioirqchip->parent_domain = parent_domain;
+ gpioirqchip->child_to_parent_hwirq = msc313e_gpio_child_to_parent_hwirq;
+ gpioirqchip->populate_parent_alloc_arg = msc313_gpio_populate_parent_fwspec;
+ gpioirqchip->handler = handle_bad_irq;
+ gpioirqchip->default_type = IRQ_TYPE_NONE;
+
+ ret = devm_gpiochip_add_data(dev, gpiochip, gpio);
+ return ret;
+}
+
+static int msc313_gpio_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static const struct of_device_id msc313_gpio_of_match[] = {
+#ifdef CONFIG_MACH_INFINITY
+ {
+ .compatible = "mstar,msc313-gpio",
+ .data = &msc313_data,
+ },
+#endif
+ { }
+};
+
+/*
+ * The GPIO controller loses the state of the registers when the
+ * SoC goes into suspend to memory mode so we need to save some
+ * of the register bits before suspending and put it back when resuming
+ */
+static int __maybe_unused msc313_gpio_suspend(struct device *dev)
+{
+ struct msc313_gpio *gpio = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < gpio->gpio_data->num; i++)
+ gpio->saved[i] = readb_relaxed(gpio->base + gpio->gpio_data->offsets[i]) & MSC313_GPIO_BITSTOSAVE;
+
+ return 0;
+}
+
+static int __maybe_unused msc313_gpio_resume(struct device *dev)
+{
+ struct msc313_gpio *gpio = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < gpio->gpio_data->num; i++)
+ writeb_relaxed(gpio->saved[i], gpio->base + gpio->gpio_data->offsets[i]);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(msc313_gpio_ops, msc313_gpio_suspend, msc313_gpio_resume);
+
+static struct platform_driver msc313_gpio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = msc313_gpio_of_match,
+ .pm = &msc313_gpio_ops,
+ },
+ .probe = msc313_gpio_probe,
+ .remove = msc313_gpio_remove,
+};
+
+builtin_platform_driver(msc313_gpio_driver);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index 2f245594a90a..672681a976f5 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -78,8 +78,7 @@
/*
* The Armada XP has per-CPU registers for interrupt cause, interrupt
- * mask and interrupt level mask. Those are relative to the
- * percpu_membase.
+ * mask and interrupt level mask. Those are in percpu_regs range.
*/
#define GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu) ((cpu) * 0x4)
#define GPIO_EDGE_MASK_ARMADAXP_OFF(cpu) (0x10 + (cpu) * 0x4)
@@ -93,7 +92,7 @@
#define MVEBU_MAX_GPIO_PER_BANK 32
struct mvebu_pwm {
- void __iomem *membase;
+ struct regmap *regs;
unsigned long clk_rate;
struct gpio_desc *gpiod;
struct pwm_chip chip;
@@ -279,17 +278,17 @@ mvebu_gpio_write_level_mask(struct mvebu_gpio_chip *mvchip, u32 val)
}
/*
- * Functions returning addresses of individual registers for a given
+ * Functions returning offsets of individual registers for a given
* PWM controller.
*/
-static void __iomem *mvebu_pwmreg_blink_on_duration(struct mvebu_pwm *mvpwm)
+static unsigned int mvebu_pwmreg_blink_on_duration(struct mvebu_pwm *mvpwm)
{
- return mvpwm->membase + PWM_BLINK_ON_DURATION_OFF;
+ return PWM_BLINK_ON_DURATION_OFF;
}
-static void __iomem *mvebu_pwmreg_blink_off_duration(struct mvebu_pwm *mvpwm)
+static unsigned int mvebu_pwmreg_blink_off_duration(struct mvebu_pwm *mvpwm)
{
- return mvpwm->membase + PWM_BLINK_OFF_DURATION_OFF;
+ return PWM_BLINK_OFF_DURATION_OFF;
}
/*
@@ -600,6 +599,13 @@ static void mvebu_gpio_irq_handler(struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
+static const struct regmap_config mvebu_gpio_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
/*
* Functions implementing the pwm_chip methods
*/
@@ -660,9 +666,8 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
spin_lock_irqsave(&mvpwm->lock, flags);
- val = (unsigned long long)
- readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
- val *= NSEC_PER_SEC;
+ regmap_read(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm), &u);
+ val = (unsigned long long) u * NSEC_PER_SEC;
do_div(val, mvpwm->clk_rate);
if (val > UINT_MAX)
state->duty_cycle = UINT_MAX;
@@ -671,9 +676,8 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip,
else
state->duty_cycle = 1;
- val = (unsigned long long)
- readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
- val *= NSEC_PER_SEC;
+ regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &u);
+ val = (unsigned long long) u * NSEC_PER_SEC;
do_div(val, mvpwm->clk_rate);
if (val < state->duty_cycle) {
state->period = 1;
@@ -726,8 +730,8 @@ static int mvebu_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
spin_lock_irqsave(&mvpwm->lock, flags);
- writel_relaxed(on, mvebu_pwmreg_blink_on_duration(mvpwm));
- writel_relaxed(off, mvebu_pwmreg_blink_off_duration(mvpwm));
+ regmap_write(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm), on);
+ regmap_write(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), off);
if (state->enabled)
mvebu_gpio_blink(&mvchip->chip, pwm->hwpwm, 1);
else
@@ -752,10 +756,10 @@ static void __maybe_unused mvebu_pwm_suspend(struct mvebu_gpio_chip *mvchip)
regmap_read(mvchip->regs, GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset,
&mvpwm->blink_select);
- mvpwm->blink_on_duration =
- readl_relaxed(mvebu_pwmreg_blink_on_duration(mvpwm));
- mvpwm->blink_off_duration =
- readl_relaxed(mvebu_pwmreg_blink_off_duration(mvpwm));
+ regmap_read(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm),
+ &mvpwm->blink_on_duration);
+ regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm),
+ &mvpwm->blink_off_duration);
}
static void __maybe_unused mvebu_pwm_resume(struct mvebu_gpio_chip *mvchip)
@@ -764,10 +768,10 @@ static void __maybe_unused mvebu_pwm_resume(struct mvebu_gpio_chip *mvchip)
regmap_write(mvchip->regs, GPIO_BLINK_CNT_SELECT_OFF + mvchip->offset,
mvpwm->blink_select);
- writel_relaxed(mvpwm->blink_on_duration,
- mvebu_pwmreg_blink_on_duration(mvpwm));
- writel_relaxed(mvpwm->blink_off_duration,
- mvebu_pwmreg_blink_off_duration(mvpwm));
+ regmap_write(mvpwm->regs, mvebu_pwmreg_blink_on_duration(mvpwm),
+ mvpwm->blink_on_duration);
+ regmap_write(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm),
+ mvpwm->blink_off_duration);
}
static int mvebu_pwm_probe(struct platform_device *pdev,
@@ -776,6 +780,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
{
struct device *dev = &pdev->dev;
struct mvebu_pwm *mvpwm;
+ void __iomem *base;
u32 set;
if (!of_device_is_compatible(mvchip->chip.of_node,
@@ -813,9 +818,14 @@ static int mvebu_pwm_probe(struct platform_device *pdev,
mvchip->mvpwm = mvpwm;
mvpwm->mvchip = mvchip;
- mvpwm->membase = devm_platform_ioremap_resource_byname(pdev, "pwm");
- if (IS_ERR(mvpwm->membase))
- return PTR_ERR(mvpwm->membase);
+ base = devm_platform_ioremap_resource_byname(pdev, "pwm");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ mvpwm->regs = devm_regmap_init_mmio(&pdev->dev, base,
+ &mvebu_gpio_regmap_config);
+ if (IS_ERR(mvpwm->regs))
+ return PTR_ERR(mvpwm->regs);
mvpwm->clk_rate = clk_get_rate(mvchip->clk);
if (!mvpwm->clk_rate) {
@@ -1022,13 +1032,6 @@ static int mvebu_gpio_resume(struct platform_device *pdev)
return 0;
}
-static const struct regmap_config mvebu_gpio_regmap_config = {
- .reg_bits = 32,
- .reg_stride = 4,
- .val_bits = 32,
- .fast_io = true,
-};
-
static int mvebu_gpio_probe_raw(struct platform_device *pdev,
struct mvebu_gpio_chip *mvchip)
{
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 643f4c557ac2..157106e1e438 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -24,13 +24,6 @@
#include <linux/of_device.h>
#include <linux/bug.h>
-enum mxc_gpio_hwtype {
- IMX1_GPIO, /* runs on i.mx1 */
- IMX21_GPIO, /* runs on i.mx21 and i.mx27 */
- IMX31_GPIO, /* runs on i.mx31 */
- IMX35_GPIO, /* runs on all other i.mx */
-};
-
/* device type dependent stuff */
struct mxc_gpio_hwdata {
unsigned dr_reg;
@@ -68,6 +61,7 @@ struct mxc_gpio_port {
u32 both_edges;
struct mxc_gpio_reg_saved gpio_saved_reg;
bool power_off;
+ const struct mxc_gpio_hwdata *hwdata;
};
static struct mxc_gpio_hwdata imx1_imx21_gpio_hwdata = {
@@ -115,48 +109,27 @@ static struct mxc_gpio_hwdata imx35_gpio_hwdata = {
.fall_edge = 0x03,
};
-static enum mxc_gpio_hwtype mxc_gpio_hwtype;
-static struct mxc_gpio_hwdata *mxc_gpio_hwdata;
-
-#define GPIO_DR (mxc_gpio_hwdata->dr_reg)
-#define GPIO_GDIR (mxc_gpio_hwdata->gdir_reg)
-#define GPIO_PSR (mxc_gpio_hwdata->psr_reg)
-#define GPIO_ICR1 (mxc_gpio_hwdata->icr1_reg)
-#define GPIO_ICR2 (mxc_gpio_hwdata->icr2_reg)
-#define GPIO_IMR (mxc_gpio_hwdata->imr_reg)
-#define GPIO_ISR (mxc_gpio_hwdata->isr_reg)
-#define GPIO_EDGE_SEL (mxc_gpio_hwdata->edge_sel_reg)
-
-#define GPIO_INT_LOW_LEV (mxc_gpio_hwdata->low_level)
-#define GPIO_INT_HIGH_LEV (mxc_gpio_hwdata->high_level)
-#define GPIO_INT_RISE_EDGE (mxc_gpio_hwdata->rise_edge)
-#define GPIO_INT_FALL_EDGE (mxc_gpio_hwdata->fall_edge)
+#define GPIO_DR (port->hwdata->dr_reg)
+#define GPIO_GDIR (port->hwdata->gdir_reg)
+#define GPIO_PSR (port->hwdata->psr_reg)
+#define GPIO_ICR1 (port->hwdata->icr1_reg)
+#define GPIO_ICR2 (port->hwdata->icr2_reg)
+#define GPIO_IMR (port->hwdata->imr_reg)
+#define GPIO_ISR (port->hwdata->isr_reg)
+#define GPIO_EDGE_SEL (port->hwdata->edge_sel_reg)
+
+#define GPIO_INT_LOW_LEV (port->hwdata->low_level)
+#define GPIO_INT_HIGH_LEV (port->hwdata->high_level)
+#define GPIO_INT_RISE_EDGE (port->hwdata->rise_edge)
+#define GPIO_INT_FALL_EDGE (port->hwdata->fall_edge)
#define GPIO_INT_BOTH_EDGES 0x4
-static const struct platform_device_id mxc_gpio_devtype[] = {
- {
- .name = "imx1-gpio",
- .driver_data = IMX1_GPIO,
- }, {
- .name = "imx21-gpio",
- .driver_data = IMX21_GPIO,
- }, {
- .name = "imx31-gpio",
- .driver_data = IMX31_GPIO,
- }, {
- .name = "imx35-gpio",
- .driver_data = IMX35_GPIO,
- }, {
- /* sentinel */
- }
-};
-
static const struct of_device_id mxc_gpio_dt_ids[] = {
- { .compatible = "fsl,imx1-gpio", .data = &mxc_gpio_devtype[IMX1_GPIO], },
- { .compatible = "fsl,imx21-gpio", .data = &mxc_gpio_devtype[IMX21_GPIO], },
- { .compatible = "fsl,imx31-gpio", .data = &mxc_gpio_devtype[IMX31_GPIO], },
- { .compatible = "fsl,imx35-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
- { .compatible = "fsl,imx7d-gpio", .data = &mxc_gpio_devtype[IMX35_GPIO], },
+ { .compatible = "fsl,imx1-gpio", .data = &imx1_imx21_gpio_hwdata },
+ { .compatible = "fsl,imx21-gpio", .data = &imx1_imx21_gpio_hwdata },
+ { .compatible = "fsl,imx31-gpio", .data = &imx31_gpio_hwdata },
+ { .compatible = "fsl,imx35-gpio", .data = &imx35_gpio_hwdata },
+ { .compatible = "fsl,imx7d-gpio", .data = &imx35_gpio_hwdata },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, mxc_gpio_dt_ids);
@@ -372,36 +345,6 @@ static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
return rv;
}
-static void mxc_gpio_get_hw(struct platform_device *pdev)
-{
- const struct of_device_id *of_id =
- of_match_device(mxc_gpio_dt_ids, &pdev->dev);
- enum mxc_gpio_hwtype hwtype;
-
- if (of_id)
- pdev->id_entry = of_id->data;
- hwtype = pdev->id_entry->driver_data;
-
- if (mxc_gpio_hwtype) {
- /*
- * The driver works with a reasonable presupposition,
- * that is all gpio ports must be the same type when
- * running on one soc.
- */
- BUG_ON(mxc_gpio_hwtype != hwtype);
- return;
- }
-
- if (hwtype == IMX35_GPIO)
- mxc_gpio_hwdata = &imx35_gpio_hwdata;
- else if (hwtype == IMX31_GPIO)
- mxc_gpio_hwdata = &imx31_gpio_hwdata;
- else
- mxc_gpio_hwdata = &imx1_imx21_gpio_hwdata;
-
- mxc_gpio_hwtype = hwtype;
-}
-
static int mxc_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct mxc_gpio_port *port = gpiochip_get_data(gc);
@@ -417,14 +360,14 @@ static int mxc_gpio_probe(struct platform_device *pdev)
int irq_base;
int err;
- mxc_gpio_get_hw(pdev);
-
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
port->dev = &pdev->dev;
+ port->hwdata = device_get_match_data(&pdev->dev);
+
port->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(port->base))
return PTR_ERR(port->base);
@@ -461,7 +404,7 @@ static int mxc_gpio_probe(struct platform_device *pdev)
writel(0, port->base + GPIO_IMR);
writel(~0, port->base + GPIO_ISR);
- if (mxc_gpio_hwtype == IMX21_GPIO) {
+ if (of_device_is_compatible(np, "fsl,imx21-gpio")) {
/*
* Setup one handler for all GPIO interrupts. Actually setting
* the handler is needed only once, but doing it for every port
@@ -596,7 +539,6 @@ static struct platform_driver mxc_gpio_driver = {
.suppress_bind_attrs = true,
},
.probe = mxc_gpio_probe,
- .id_table = mxc_gpio_devtype,
};
static int __init gpio_mxc_init(void)
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index c4a314c68555..dfc0c1eb1b33 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -254,19 +254,6 @@ static int mxs_gpio_get_direction(struct gpio_chip *gc, unsigned offset)
return GPIO_LINE_DIRECTION_IN;
}
-static const struct platform_device_id mxs_gpio_ids[] = {
- {
- .name = "imx23-gpio",
- .driver_data = IMX23_GPIO,
- }, {
- .name = "imx28-gpio",
- .driver_data = IMX28_GPIO,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, mxs_gpio_ids);
-
static const struct of_device_id mxs_gpio_dt_ids[] = {
{ .compatible = "fsl,imx23-gpio", .data = (void *) IMX23_GPIO, },
{ .compatible = "fsl,imx28-gpio", .data = (void *) IMX28_GPIO, },
@@ -370,7 +357,6 @@ static struct platform_driver mxs_gpio_driver = {
.suppress_bind_attrs = true,
},
.probe = mxs_gpio_probe,
- .id_table = mxs_gpio_ids,
};
static int __init mxs_gpio_init(void)
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index f7ceb2b11afc..41952bb818ad 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1049,11 +1049,8 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
irq->first = irq_base;
ret = gpiochip_add_data(&bank->chip, bank);
- if (ret) {
- dev_err(bank->chip.parent,
- "Could not register gpio chip %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(bank->chip.parent, ret, "Could not register gpio chip\n");
ret = devm_request_irq(bank->chip.parent, bank->irq,
omap_gpio_irq_handler,
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 3ef19cef8da9..0b572dbc4a36 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -32,6 +32,11 @@ struct gpio_rcar_bank_info {
u32 intmsk;
};
+struct gpio_rcar_info {
+ bool has_outdtsel;
+ bool has_both_edge_trigger;
+};
+
struct gpio_rcar_priv {
void __iomem *base;
spinlock_t lock;
@@ -40,24 +45,23 @@ struct gpio_rcar_priv {
struct irq_chip irq_chip;
unsigned int irq_parent;
atomic_t wakeup_path;
- bool has_outdtsel;
- bool has_both_edge_trigger;
+ struct gpio_rcar_info info;
struct gpio_rcar_bank_info bank_info;
};
-#define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */
-#define INOUTSEL 0x04 /* General Input/Output Switching Register */
-#define OUTDT 0x08 /* General Output Register */
-#define INDT 0x0c /* General Input Register */
-#define INTDT 0x10 /* Interrupt Display Register */
-#define INTCLR 0x14 /* Interrupt Clear Register */
-#define INTMSK 0x18 /* Interrupt Mask Register */
-#define MSKCLR 0x1c /* Interrupt Mask Clear Register */
-#define POSNEG 0x20 /* Positive/Negative Logic Select Register */
-#define EDGLEVEL 0x24 /* Edge/level Select Register */
-#define FILONOFF 0x28 /* Chattering Prevention On/Off Register */
-#define OUTDTSEL 0x40 /* Output Data Select Register */
-#define BOTHEDGE 0x4c /* One Edge/Both Edge Select Register */
+#define IOINTSEL 0x00 /* General IO/Interrupt Switching Register */
+#define INOUTSEL 0x04 /* General Input/Output Switching Register */
+#define OUTDT 0x08 /* General Output Register */
+#define INDT 0x0c /* General Input Register */
+#define INTDT 0x10 /* Interrupt Display Register */
+#define INTCLR 0x14 /* Interrupt Clear Register */
+#define INTMSK 0x18 /* Interrupt Mask Register */
+#define MSKCLR 0x1c /* Interrupt Mask Clear Register */
+#define POSNEG 0x20 /* Positive/Negative Logic Select Register */
+#define EDGLEVEL 0x24 /* Edge/level Select Register */
+#define FILONOFF 0x28 /* Chattering Prevention On/Off Register */
+#define OUTDTSEL 0x40 /* Output Data Select Register */
+#define BOTHEDGE 0x4c /* One Edge/Both Edge Select Register */
#define RCAR_MAX_GPIO_PER_BANK 32
@@ -123,7 +127,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p,
gpio_rcar_modify_bit(p, EDGLEVEL, hwirq, !level_trigger);
/* Select one edge or both edges in BOTHEDGE */
- if (p->has_both_edge_trigger)
+ if (p->info.has_both_edge_trigger)
gpio_rcar_modify_bit(p, BOTHEDGE, hwirq, both);
/* Select "Interrupt Input Mode" in IOINTSEL */
@@ -162,7 +166,7 @@ static int gpio_rcar_irq_set_type(struct irq_data *d, unsigned int type)
false);
break;
case IRQ_TYPE_EDGE_BOTH:
- if (!p->has_both_edge_trigger)
+ if (!p->info.has_both_edge_trigger)
return -EINVAL;
gpio_rcar_config_interrupt_input_mode(p, hwirq, true, false,
true);
@@ -238,7 +242,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip,
gpio_rcar_modify_bit(p, INOUTSEL, gpio, output);
/* Select General Output Register to output data in OUTDTSEL */
- if (p->has_outdtsel && output)
+ if (p->info.has_outdtsel && output)
gpio_rcar_modify_bit(p, OUTDTSEL, gpio, false);
spin_unlock_irqrestore(&p->lock, flags);
@@ -295,14 +299,44 @@ static int gpio_rcar_direction_input(struct gpio_chip *chip, unsigned offset)
static int gpio_rcar_get(struct gpio_chip *chip, unsigned offset)
{
+ struct gpio_rcar_priv *p = gpiochip_get_data(chip);
u32 bit = BIT(offset);
/* testing on r8a7790 shows that INDT does not show correct pin state
* when configured as output, so use OUTDT in case of output pins */
- if (gpio_rcar_read(gpiochip_get_data(chip), INOUTSEL) & bit)
- return !!(gpio_rcar_read(gpiochip_get_data(chip), OUTDT) & bit);
+ if (gpio_rcar_read(p, INOUTSEL) & bit)
+ return !!(gpio_rcar_read(p, OUTDT) & bit);
else
- return !!(gpio_rcar_read(gpiochip_get_data(chip), INDT) & bit);
+ return !!(gpio_rcar_read(p, INDT) & bit);
+}
+
+static int gpio_rcar_get_multiple(struct gpio_chip *chip, unsigned long *mask,
+ unsigned long *bits)
+{
+ struct gpio_rcar_priv *p = gpiochip_get_data(chip);
+ u32 bankmask, outputs, m, val = 0;
+ unsigned long flags;
+
+ bankmask = mask[0] & GENMASK(chip->ngpio - 1, 0);
+ if (chip->valid_mask)
+ bankmask &= chip->valid_mask[0];
+
+ if (!bankmask)
+ return 0;
+
+ spin_lock_irqsave(&p->lock, flags);
+ outputs = gpio_rcar_read(p, INOUTSEL);
+ m = outputs & bankmask;
+ if (m)
+ val |= gpio_rcar_read(p, OUTDT) & m;
+
+ m = ~outputs & bankmask;
+ if (m)
+ val |= gpio_rcar_read(p, INDT) & m;
+ spin_unlock_irqrestore(&p->lock, flags);
+
+ bits[0] = val;
+ return 0;
}
static void gpio_rcar_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -346,11 +380,6 @@ static int gpio_rcar_direction_output(struct gpio_chip *chip, unsigned offset,
return 0;
}
-struct gpio_rcar_info {
- bool has_outdtsel;
- bool has_both_edge_trigger;
-};
-
static const struct gpio_rcar_info gpio_rcar_info_gen1 = {
.has_outdtsel = false,
.has_both_edge_trigger = false,
@@ -417,8 +446,7 @@ static int gpio_rcar_parse_dt(struct gpio_rcar_priv *p, unsigned int *npins)
int ret;
info = of_device_get_match_data(p->dev);
- p->has_outdtsel = info->has_outdtsel;
- p->has_both_edge_trigger = info->has_both_edge_trigger;
+ p->info = *info;
ret = of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args);
*npins = ret == 0 ? args.args[2] : RCAR_MAX_GPIO_PER_BANK;
@@ -479,6 +507,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
gpio_chip->get_direction = gpio_rcar_get_direction;
gpio_chip->direction_input = gpio_rcar_direction_input;
gpio_chip->get = gpio_rcar_get;
+ gpio_chip->get_multiple = gpio_rcar_get_multiple;
gpio_chip->direction_output = gpio_rcar_direction_output;
gpio_chip->set = gpio_rcar_set;
gpio_chip->set_multiple = gpio_rcar_set_multiple;
@@ -552,7 +581,7 @@ static int gpio_rcar_suspend(struct device *dev)
p->bank_info.intmsk = gpio_rcar_read(p, INTMSK);
p->bank_info.posneg = gpio_rcar_read(p, POSNEG);
p->bank_info.edglevel = gpio_rcar_read(p, EDGLEVEL);
- if (p->has_both_edge_trigger)
+ if (p->info.has_both_edge_trigger)
p->bank_info.bothedge = gpio_rcar_read(p, BOTHEDGE);
if (atomic_read(&p->wakeup_path))
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index d5eb9ca11901..403f9e833d6a 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -29,7 +29,6 @@
#define SIFIVE_GPIO_OUTPUT_XOR 0x40
#define SIFIVE_GPIO_MAX 32
-#define SIFIVE_GPIO_IRQ_OFFSET 7
struct sifive_gpio {
void __iomem *base;
@@ -37,7 +36,7 @@ struct sifive_gpio {
struct regmap *regs;
unsigned long irq_state;
unsigned int trigger[SIFIVE_GPIO_MAX];
- unsigned int irq_parent[SIFIVE_GPIO_MAX];
+ unsigned int irq_number[SIFIVE_GPIO_MAX];
};
static void sifive_gpio_set_ie(struct sifive_gpio *chip, unsigned int offset)
@@ -128,6 +127,16 @@ static void sifive_gpio_irq_eoi(struct irq_data *d)
irq_chip_eoi_parent(d);
}
+static int sifive_gpio_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ if (data->parent_data)
+ return irq_chip_set_affinity_parent(data, dest, force);
+
+ return -EINVAL;
+}
+
static struct irq_chip sifive_gpio_irqchip = {
.name = "sifive-gpio",
.irq_set_type = sifive_gpio_irq_set_type,
@@ -136,6 +145,7 @@ static struct irq_chip sifive_gpio_irqchip = {
.irq_enable = sifive_gpio_irq_enable,
.irq_disable = sifive_gpio_irq_disable,
.irq_eoi = sifive_gpio_irq_eoi,
+ .irq_set_affinity = sifive_gpio_irq_set_affinity,
};
static int sifive_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
@@ -144,8 +154,12 @@ static int sifive_gpio_child_to_parent_hwirq(struct gpio_chip *gc,
unsigned int *parent,
unsigned int *parent_type)
{
+ struct sifive_gpio *chip = gpiochip_get_data(gc);
+ struct irq_data *d = irq_get_irq_data(chip->irq_number[child]);
+
*parent_type = IRQ_TYPE_NONE;
- *parent = child + SIFIVE_GPIO_IRQ_OFFSET;
+ *parent = irqd_to_hwirq(d);
+
return 0;
}
@@ -165,7 +179,7 @@ static int sifive_gpio_probe(struct platform_device *pdev)
struct irq_domain *parent;
struct gpio_irq_chip *girq;
struct sifive_gpio *chip;
- int ret, ngpio;
+ int ret, ngpio, i;
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
@@ -200,6 +214,9 @@ static int sifive_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
+ for (i = 0; i < ngpio; i++)
+ chip->irq_number[i] = platform_get_irq(pdev, i);
+
ret = bgpio_init(&chip->gc, dev, 4,
chip->base + SIFIVE_GPIO_INPUT_VAL,
chip->base + SIFIVE_GPIO_OUTPUT_VAL,
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index b0155d6007c8..b94ef8181428 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -474,15 +474,6 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
stmpe_gpio->chip.parent = &pdev->dev;
stmpe_gpio->chip.of_node = np;
stmpe_gpio->chip.base = -1;
- /*
- * REVISIT: this makes sure the valid mask gets allocated and
- * filled in when adding the gpio_chip, but the rest of the
- * gpio_irqchip is still filled in using the old method
- * in gpiochip_irqchip_add_nested() so clean this up once we
- * get the gpio_irqchip to initialize while adding the
- * gpio_chip also for threaded irqchips.
- */
- stmpe_gpio->chip.irq.init_valid_mask = stmpe_init_irq_valid_mask;
if (IS_ENABLED(CONFIG_DEBUG_FS))
stmpe_gpio->chip.dbg_show = stmpe_dbg_show;
@@ -520,6 +511,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
girq->default_type = IRQ_TYPE_NONE;
girq->handler = handle_simple_irq;
girq->threaded = true;
+ girq->init_valid_mask = stmpe_init_irq_valid_mask;
}
ret = gpiochip_add_data(&stmpe_gpio->chip, stmpe_gpio);
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 86568154cdb3..e19ebff6018c 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -61,8 +61,16 @@ struct tegra_gpio_info;
struct tegra_gpio_bank {
unsigned int bank;
unsigned int irq;
- spinlock_t lvl_lock[4];
- spinlock_t dbc_lock[4]; /* Lock for updating debounce count register */
+
+ /*
+ * IRQ-core code uses raw locking, and thus, nested locking also
+ * should be raw in order not to trip spinlock debug warnings.
+ */
+ raw_spinlock_t lvl_lock[4];
+
+ /* Lock for updating debounce count register */
+ spinlock_t dbc_lock[4];
+
#ifdef CONFIG_PM_SLEEP
u32 cnf[4];
u32 out[4];
@@ -334,14 +342,14 @@ static int tegra_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return -EINVAL;
}
- spin_lock_irqsave(&bank->lvl_lock[port], flags);
+ raw_spin_lock_irqsave(&bank->lvl_lock[port], flags);
val = tegra_gpio_readl(tgi, GPIO_INT_LVL(tgi, gpio));
val &= ~(GPIO_INT_LVL_MASK << GPIO_BIT(gpio));
val |= lvl_type << GPIO_BIT(gpio);
tegra_gpio_writel(tgi, val, GPIO_INT_LVL(tgi, gpio));
- spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
+ raw_spin_unlock_irqrestore(&bank->lvl_lock[port], flags);
tegra_gpio_mask_write(tgi, GPIO_MSK_OE(tgi, gpio), gpio, 0);
tegra_gpio_enable(tgi, gpio);
@@ -560,6 +568,9 @@ static const struct dev_pm_ops tegra_gpio_pm_ops = {
SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(tegra_gpio_suspend, tegra_gpio_resume)
};
+static struct lock_class_key gpio_lock_class;
+static struct lock_class_key gpio_request_class;
+
static int tegra_gpio_probe(struct platform_device *pdev)
{
struct tegra_gpio_info *tgi;
@@ -661,6 +672,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
bank = &tgi->bank_info[GPIO_BANK(gpio)];
irq_set_chip_data(irq, bank);
+ irq_set_lockdep_class(irq, &gpio_lock_class, &gpio_request_class);
irq_set_chip_and_handler(irq, &tgi->ic, handle_simple_irq);
}
@@ -671,7 +683,7 @@ static int tegra_gpio_probe(struct platform_device *pdev)
tegra_gpio_irq_handler, bank);
for (j = 0; j < 4; j++) {
- spin_lock_init(&bank->lvl_lock[j]);
+ raw_spin_lock_init(&bank->lvl_lock[j]);
spin_lock_init(&bank->dbc_lock[j]);
}
}
diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c
index 9500074b1f1b..286e0b1f46e4 100644
--- a/drivers/gpio/gpio-tegra186.c
+++ b/drivers/gpio/gpio-tegra186.c
@@ -444,6 +444,16 @@ static int tegra186_irq_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
+static int tegra186_irq_set_affinity(struct irq_data *data,
+ const struct cpumask *dest,
+ bool force)
+{
+ if (data->parent_data)
+ return irq_chip_set_affinity_parent(data, dest, force);
+
+ return -EINVAL;
+}
+
static void tegra186_gpio_irq(struct irq_desc *desc)
{
struct tegra_gpio *gpio = irq_desc_get_handler_data(desc);
@@ -690,6 +700,7 @@ static int tegra186_gpio_probe(struct platform_device *pdev)
gpio->intc.irq_unmask = tegra186_irq_unmask;
gpio->intc.irq_set_type = tegra186_irq_set_type;
gpio->intc.irq_set_wake = tegra186_irq_set_wake;
+ gpio->intc.irq_set_affinity = tegra186_irq_set_affinity;
irq = &gpio->gpio.irq;
irq->chip = &gpio->intc;
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 67f9f82e0db0..be539381fd82 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -6,13 +6,14 @@
*/
#include <linux/bitops.h>
-#include <linux/init.h>
+#include <linux/clk.h>
#include <linux/errno.h>
+#include <linux/gpio/driver.h>
+#include <linux/init.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-#include <linux/io.h>
-#include <linux/gpio/driver.h>
#include <linux/slab.h>
/* Register Offset Definitions */
@@ -38,6 +39,7 @@
* @gpio_state: GPIO state shadow register
* @gpio_dir: GPIO direction shadow register
* @gpio_lock: Lock used for synchronization
+ * @clk: clock resource for this driver
*/
struct xgpio_instance {
struct gpio_chip gc;
@@ -46,6 +48,7 @@ struct xgpio_instance {
u32 gpio_state[2];
u32 gpio_dir[2];
spinlock_t gpio_lock[2];
+ struct clk *clk;
};
static inline int xgpio_index(struct xgpio_instance *chip, int gpio)
@@ -257,6 +260,23 @@ static void xgpio_save_regs(struct xgpio_instance *chip)
}
/**
+ * xgpio_remove - Remove method for the GPIO device.
+ * @pdev: pointer to the platform device
+ *
+ * This function remove gpiochips and frees all the allocated resources.
+ *
+ * Return: 0 always
+ */
+static int xgpio_remove(struct platform_device *pdev)
+{
+ struct xgpio_instance *gpio = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(gpio->clk);
+
+ return 0;
+}
+
+/**
* xgpio_of_probe - Probe method for the GPIO device.
* @pdev: pointer to the platform device
*
@@ -278,7 +298,8 @@ static int xgpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, chip);
/* Update GPIO state shadow register with default value */
- of_property_read_u32(np, "xlnx,dout-default", &chip->gpio_state[0]);
+ if (of_property_read_u32(np, "xlnx,dout-default", &chip->gpio_state[0]))
+ chip->gpio_state[0] = 0x0;
/* Update GPIO direction shadow register with default value */
if (of_property_read_u32(np, "xlnx,tri-default", &chip->gpio_dir[0]))
@@ -298,8 +319,9 @@ static int xgpio_probe(struct platform_device *pdev)
if (is_dual) {
/* Update GPIO state shadow register with default value */
- of_property_read_u32(np, "xlnx,dout-default-2",
- &chip->gpio_state[1]);
+ if (of_property_read_u32(np, "xlnx,dout-default-2",
+ &chip->gpio_state[1]))
+ chip->gpio_state[1] = 0x0;
/* Update GPIO direction shadow register with default value */
if (of_property_read_u32(np, "xlnx,tri-default-2",
@@ -334,11 +356,25 @@ static int xgpio_probe(struct platform_device *pdev)
return PTR_ERR(chip->regs);
}
+ chip->clk = devm_clk_get_optional(&pdev->dev, NULL);
+ if (IS_ERR(chip->clk)) {
+ if (PTR_ERR(chip->clk) != -EPROBE_DEFER)
+ dev_dbg(&pdev->dev, "Input clock not found\n");
+ return PTR_ERR(chip->clk);
+ }
+
+ status = clk_prepare_enable(chip->clk);
+ if (status < 0) {
+ dev_err(&pdev->dev, "Failed to prepare clk\n");
+ return status;
+ }
+
xgpio_save_regs(chip);
status = devm_gpiochip_add_data(&pdev->dev, &chip->gc, chip);
if (status) {
dev_err(&pdev->dev, "failed to add GPIO chip\n");
+ clk_disable_unprepare(chip->clk);
return status;
}
@@ -354,6 +390,7 @@ MODULE_DEVICE_TABLE(of, xgpio_of_match);
static struct platform_driver xgpio_plat_driver = {
.probe = xgpio_probe,
+ .remove = xgpio_remove,
.driver = {
.name = "gpio-xilinx",
.of_match_table = xgpio_of_match,
diff --git a/drivers/gpio/gpio-xra1403.c b/drivers/gpio/gpio-xra1403.c
index e2cac12092af..49c878cfd5c6 100644
--- a/drivers/gpio/gpio-xra1403.c
+++ b/drivers/gpio/gpio-xra1403.c
@@ -186,15 +186,7 @@ static int xra1403_probe(struct spi_device *spi)
return ret;
}
- ret = devm_gpiochip_add_data(&spi->dev, &xra->chip, xra);
- if (ret < 0) {
- dev_err(&spi->dev, "Unable to register gpiochip\n");
- return ret;
- }
-
- spi_set_drvdata(spi, xra);
-
- return 0;
+ return devm_gpiochip_add_data(&spi->dev, &xra->chip, xra);
}
static const struct spi_device_id xra1403_ids[] = {
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 834a12f3219e..e37a57d0a2f0 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -205,6 +205,68 @@ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
acpi_gpiochip_request_irq(acpi_gpio, event);
}
+static enum gpiod_flags
+acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio, int polarity)
+{
+ /* GpioInt() implies input configuration */
+ if (agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT)
+ return GPIOD_IN;
+
+ switch (agpio->io_restriction) {
+ case ACPI_IO_RESTRICT_INPUT:
+ return GPIOD_IN;
+ case ACPI_IO_RESTRICT_OUTPUT:
+ /*
+ * ACPI GPIO resources don't contain an initial value for the
+ * GPIO. Therefore we deduce that value from the pull field
+ * and the polarity instead. If the pin is pulled up we assume
+ * default to be high, if it is pulled down we assume default
+ * to be low, otherwise we leave pin untouched. For active low
+ * polarity values will be switched. See also
+ * Documentation/firmware-guide/acpi/gpio-properties.rst.
+ */
+ switch (agpio->pin_config) {
+ case ACPI_PIN_CONFIG_PULLUP:
+ return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_LOW : GPIOD_OUT_HIGH;
+ case ACPI_PIN_CONFIG_PULLDOWN:
+ return polarity == GPIO_ACTIVE_LOW ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Assume that the BIOS has configured the direction and pull
+ * accordingly.
+ */
+ return GPIOD_ASIS;
+}
+
+static struct gpio_desc *acpi_request_own_gpiod(struct gpio_chip *chip,
+ struct acpi_resource_gpio *agpio,
+ unsigned int index,
+ const char *label)
+{
+ int polarity = GPIO_ACTIVE_HIGH;
+ enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio, polarity);
+ unsigned int pin = agpio->pin_table[index];
+ struct gpio_desc *desc;
+ int ret;
+
+ desc = gpiochip_request_own_desc(chip, pin, label, polarity, flags);
+ if (IS_ERR(desc))
+ return desc;
+
+ ret = gpio_set_debounce_timeout(desc, agpio->debounce_timeout);
+ if (ret)
+ gpiochip_free_own_desc(desc);
+
+ return ret ? ERR_PTR(ret) : desc;
+}
+
static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
{
const char *controller, *pin_str;
@@ -290,8 +352,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
if (!handler)
return AE_OK;
- desc = gpiochip_request_own_desc(chip, pin, "ACPI:Event",
- GPIO_ACTIVE_HIGH, GPIOD_IN);
+ desc = acpi_request_own_gpiod(chip, agpio, 0, "ACPI:Event");
if (IS_ERR(desc)) {
dev_err(chip->parent,
"Failed to request GPIO for pin 0x%04X, err %ld\n",
@@ -526,39 +587,6 @@ static bool acpi_get_driver_gpio_data(struct acpi_device *adev,
return false;
}
-static enum gpiod_flags
-acpi_gpio_to_gpiod_flags(const struct acpi_resource_gpio *agpio)
-{
- switch (agpio->io_restriction) {
- case ACPI_IO_RESTRICT_INPUT:
- return GPIOD_IN;
- case ACPI_IO_RESTRICT_OUTPUT:
- /*
- * ACPI GPIO resources don't contain an initial value for the
- * GPIO. Therefore we deduce that value from the pull field
- * instead. If the pin is pulled up we assume default to be
- * high, if it is pulled down we assume default to be low,
- * otherwise we leave pin untouched.
- */
- switch (agpio->pin_config) {
- case ACPI_PIN_CONFIG_PULLUP:
- return GPIOD_OUT_HIGH;
- case ACPI_PIN_CONFIG_PULLDOWN:
- return GPIOD_OUT_LOW;
- default:
- break;
- }
- default:
- break;
- }
-
- /*
- * Assume that the BIOS has configured the direction and pull
- * accordingly.
- */
- return GPIOD_ASIS;
-}
-
static int
__acpi_gpio_update_gpiod_flags(enum gpiod_flags *flags, enum gpiod_flags update)
{
@@ -633,7 +661,7 @@ int acpi_gpio_update_gpiod_lookup_flags(unsigned long *lookupflags,
struct acpi_gpio_lookup {
struct acpi_gpio_info info;
int index;
- int pin_index;
+ u16 pin_index;
bool active_low;
struct gpio_desc *desc;
int n;
@@ -649,7 +677,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
if (!lookup->desc) {
const struct acpi_resource_gpio *agpio = &ares->data.gpio;
bool gpioint = agpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT;
- int pin_index;
+ u16 pin_index;
if (lookup->info.quirks & ACPI_GPIO_QUIRK_ONLY_GPIOIO && gpioint)
lookup->index++;
@@ -664,6 +692,7 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
lookup->desc = acpi_get_gpiod(agpio->resource_source.string_ptr,
agpio->pin_table[pin_index]);
lookup->info.pin_config = agpio->pin_config;
+ lookup->info.debounce = agpio->debounce_timeout;
lookup->info.gpioint = gpioint;
/*
@@ -674,13 +703,13 @@ static int acpi_populate_gpio_lookup(struct acpi_resource *ares, void *data)
* - ACPI_ACTIVE_HIGH == GPIO_ACTIVE_HIGH
*/
if (lookup->info.gpioint) {
- lookup->info.flags = GPIOD_IN;
lookup->info.polarity = agpio->polarity;
lookup->info.triggering = agpio->triggering;
} else {
- lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio);
lookup->info.polarity = lookup->active_low;
}
+
+ lookup->info.flags = acpi_gpio_to_gpiod_flags(agpio, lookup->info.polarity);
}
return 1;
@@ -794,7 +823,7 @@ static struct gpio_desc *acpi_get_gpiod_by_index(struct acpi_device *adev,
if (ret)
return ERR_PTR(ret);
- dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %d %u\n",
+ dev_dbg(&adev->dev, "GPIO: _DSD returned %s %d %u %u\n",
dev_name(&lookup.info.adev->dev), lookup.index,
lookup.pin_index, lookup.active_low);
} else {
@@ -942,6 +971,7 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
if (info.gpioint && idx++ == index) {
unsigned long lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
+ enum gpiod_flags dflags = GPIOD_ASIS;
char label[32];
int irq;
@@ -952,11 +982,18 @@ int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
if (irq < 0)
return irq;
+ acpi_gpio_update_gpiod_flags(&dflags, &info);
+ acpi_gpio_update_gpiod_lookup_flags(&lflags, &info);
+
snprintf(label, sizeof(label), "GpioInt() %d", index);
- ret = gpiod_configure_flags(desc, label, lflags, info.flags);
+ ret = gpiod_configure_flags(desc, label, lflags, dflags);
if (ret < 0)
return ret;
+ ret = gpio_set_debounce_timeout(desc, info.debounce);
+ if (ret)
+ return ret;
+
irq_flags = acpi_dev_get_irq_type(info.triggering,
info.polarity);
@@ -982,7 +1019,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
struct gpio_chip *chip = achip->chip;
struct acpi_resource_gpio *agpio;
struct acpi_resource *ares;
- int pin_index = (int)address;
+ u16 pin_index = address;
acpi_status status;
int length;
int i;
@@ -1005,7 +1042,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
return AE_BAD_PARAMETER;
}
- length = min(agpio->pin_table_length, (u16)(pin_index + bits));
+ length = min_t(u16, agpio->pin_table_length, pin_index + bits);
for (i = pin_index; i < length; ++i) {
int pin = agpio->pin_table[i];
struct acpi_gpio_connection *conn;
@@ -1042,23 +1079,18 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
}
if (!found) {
- enum gpiod_flags flags = acpi_gpio_to_gpiod_flags(agpio);
- const char *label = "ACPI:OpRegion";
-
- desc = gpiochip_request_own_desc(chip, pin, label,
- GPIO_ACTIVE_HIGH,
- flags);
+ desc = acpi_request_own_gpiod(chip, agpio, i, "ACPI:OpRegion");
if (IS_ERR(desc)) {
- status = AE_ERROR;
mutex_unlock(&achip->conn_lock);
+ status = AE_ERROR;
goto out;
}
conn = kzalloc(sizeof(*conn), GFP_KERNEL);
if (!conn) {
- status = AE_NO_MEMORY;
gpiochip_free_own_desc(desc);
mutex_unlock(&achip->conn_lock);
+ status = AE_NO_MEMORY;
goto out;
}
@@ -1070,8 +1102,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
mutex_unlock(&achip->conn_lock);
if (function == ACPI_WRITE)
- gpiod_set_raw_value_cansleep(desc,
- !!((1 << i) & *value));
+ gpiod_set_raw_value_cansleep(desc, !!(*value & BIT(i)));
else
*value |= (u64)gpiod_get_raw_value_cansleep(desc) << i;
}
@@ -1132,7 +1163,7 @@ acpi_gpiochip_parse_own_gpio(struct acpi_gpio_chip *achip,
int ret;
*lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
- *dflags = 0;
+ *dflags = GPIOD_ASIS;
*name = NULL;
ret = fwnode_property_read_u32_array(fwnode, "gpios", gpios,
diff --git a/drivers/gpio/gpiolib-acpi.h b/drivers/gpio/gpiolib-acpi.h
index 1c6d65cf0629..e2edb632b2cc 100644
--- a/drivers/gpio/gpiolib-acpi.h
+++ b/drivers/gpio/gpiolib-acpi.h
@@ -18,6 +18,7 @@ struct acpi_device;
* @pin_config: pin bias as provided by ACPI
* @polarity: interrupt polarity as provided by ACPI
* @triggering: triggering type as provided by ACPI
+ * @debounce: debounce timeout as provided by ACPI
* @quirks: Linux specific quirks as provided by struct acpi_gpio_mapping
*/
struct acpi_gpio_info {
@@ -27,6 +28,7 @@ struct acpi_gpio_info {
int pin_config;
int polarity;
int triggering;
+ unsigned int debounce;
unsigned int quirks;
};
diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c
index e9faeaf65d14..12b679ca552c 100644
--- a/drivers/gpio/gpiolib-cdev.c
+++ b/drivers/gpio/gpiolib-cdev.c
@@ -428,6 +428,12 @@ struct line {
*/
struct linereq *req;
unsigned int irq;
+ /*
+ * eflags is set by edge_detector_setup(), edge_detector_stop() and
+ * edge_detector_update(), which are themselves mutually exclusive,
+ * and is accessed by edge_irq_thread() and debounce_work_func(),
+ * which can both live with a slightly stale value.
+ */
u64 eflags;
/*
* timestamp_ns and req_seqno are accessed only by
@@ -504,11 +510,14 @@ struct linereq {
(GPIO_V2_LINE_FLAG_EDGE_RISING | \
GPIO_V2_LINE_FLAG_EDGE_FALLING)
+#define GPIO_V2_LINE_FLAG_EDGE_BOTH GPIO_V2_LINE_EDGE_FLAGS
+
#define GPIO_V2_LINE_VALID_FLAGS \
(GPIO_V2_LINE_FLAG_ACTIVE_LOW | \
GPIO_V2_LINE_DIRECTION_FLAGS | \
GPIO_V2_LINE_DRIVE_FLAGS | \
GPIO_V2_LINE_EDGE_FLAGS | \
+ GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME | \
GPIO_V2_LINE_BIAS_FLAGS)
static void linereq_put_event(struct linereq *lr,
@@ -529,11 +538,20 @@ static void linereq_put_event(struct linereq *lr,
pr_debug_ratelimited("event FIFO is full - event dropped\n");
}
+static u64 line_event_timestamp(struct line *line)
+{
+ if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &line->desc->flags))
+ return ktime_get_real_ns();
+
+ return ktime_get_ns();
+}
+
static irqreturn_t edge_irq_thread(int irq, void *p)
{
struct line *line = p;
struct linereq *lr = line->req;
struct gpio_v2_line_event le;
+ u64 eflags;
/* Do not leak kernel stack to userspace */
memset(&le, 0, sizeof(le));
@@ -546,14 +564,14 @@ static irqreturn_t edge_irq_thread(int irq, void *p)
* which case we didn't get the timestamp from
* edge_irq_handler().
*/
- le.timestamp_ns = ktime_get_ns();
+ le.timestamp_ns = line_event_timestamp(line);
if (lr->num_lines != 1)
line->req_seqno = atomic_inc_return(&lr->seqno);
}
line->timestamp_ns = 0;
- if (line->eflags == (GPIO_V2_LINE_FLAG_EDGE_RISING |
- GPIO_V2_LINE_FLAG_EDGE_FALLING)) {
+ eflags = READ_ONCE(line->eflags);
+ if (eflags == GPIO_V2_LINE_FLAG_EDGE_BOTH) {
int level = gpiod_get_value_cansleep(line->desc);
if (level)
@@ -562,10 +580,10 @@ static irqreturn_t edge_irq_thread(int irq, void *p)
else
/* Emit high-to-low event */
le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
- } else if (line->eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
+ } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) {
/* Emit low-to-high event */
le.id = GPIO_V2_LINE_EVENT_RISING_EDGE;
- } else if (line->eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
+ } else if (eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) {
/* Emit high-to-low event */
le.id = GPIO_V2_LINE_EVENT_FALLING_EDGE;
} else {
@@ -590,7 +608,7 @@ static irqreturn_t edge_irq_handler(int irq, void *p)
* Just store the timestamp in hardirq context so we get it as
* close in time as possible to the actual event.
*/
- line->timestamp_ns = ktime_get_ns();
+ line->timestamp_ns = line_event_timestamp(line);
if (lr->num_lines != 1)
line->req_seqno = atomic_inc_return(&lr->seqno);
@@ -634,6 +652,7 @@ static void debounce_work_func(struct work_struct *work)
struct line *line = container_of(work, struct line, work.work);
struct linereq *lr;
int level;
+ u64 eflags;
level = gpiod_get_raw_value_cansleep(line->desc);
if (level < 0) {
@@ -647,7 +666,8 @@ static void debounce_work_func(struct work_struct *work)
WRITE_ONCE(line->level, level);
/* -- edge detection -- */
- if (!line->eflags)
+ eflags = READ_ONCE(line->eflags);
+ if (!eflags)
return;
/* switch from physical level to logical - if they differ */
@@ -655,15 +675,15 @@ static void debounce_work_func(struct work_struct *work)
level = !level;
/* ignore edges that are not being monitored */
- if (((line->eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
- ((line->eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
+ if (((eflags == GPIO_V2_LINE_FLAG_EDGE_RISING) && !level) ||
+ ((eflags == GPIO_V2_LINE_FLAG_EDGE_FALLING) && level))
return;
/* Do not leak kernel stack to userspace */
memset(&le, 0, sizeof(le));
lr = line->req;
- le.timestamp_ns = ktime_get_ns();
+ le.timestamp_ns = line_event_timestamp(line);
le.offset = gpio_chip_hwgpio(line->desc);
line->line_seqno++;
le.line_seqno = line->line_seqno;
@@ -755,7 +775,7 @@ static void edge_detector_stop(struct line *line)
cancel_delayed_work_sync(&line->work);
WRITE_ONCE(line->sw_debounced, 0);
- line->eflags = 0;
+ WRITE_ONCE(line->eflags, 0);
/* do not change line->level - see comment in debounced_value() */
}
@@ -774,7 +794,7 @@ static int edge_detector_setup(struct line *line,
if (ret)
return ret;
}
- line->eflags = eflags;
+ WRITE_ONCE(line->eflags, eflags);
if (gpio_v2_line_config_debounced(lc, line_idx)) {
debounce_period_us = gpio_v2_line_config_debounce_period(lc, line_idx);
ret = debounce_setup(line, debounce_period_us);
@@ -817,13 +837,13 @@ static int edge_detector_update(struct line *line,
unsigned int debounce_period_us =
gpio_v2_line_config_debounce_period(lc, line_idx);
- if ((line->eflags == eflags) && !polarity_change &&
+ if ((READ_ONCE(line->eflags) == eflags) && !polarity_change &&
(READ_ONCE(line->desc->debounce_period_us) == debounce_period_us))
return 0;
/* sw debounced and still will be...*/
if (debounce_period_us && READ_ONCE(line->sw_debounced)) {
- line->eflags = eflags;
+ WRITE_ONCE(line->eflags, eflags);
WRITE_ONCE(line->desc->debounce_period_us, debounce_period_us);
return 0;
}
@@ -967,6 +987,9 @@ static void gpio_v2_line_config_flags_to_desc_flags(u64 flags,
flags & GPIO_V2_LINE_FLAG_BIAS_PULL_DOWN);
assign_bit(FLAG_BIAS_DISABLE, flagsp,
flags & GPIO_V2_LINE_FLAG_BIAS_DISABLED);
+
+ assign_bit(FLAG_EVENT_CLOCK_REALTIME, flagsp,
+ flags & GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME);
}
static long linereq_get_values(struct linereq *lr, void __user *ip)
@@ -1479,21 +1502,10 @@ static __poll_t lineevent_poll(struct file *file,
return events;
}
-static ssize_t lineevent_get_size(void)
-{
-#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
- /* i386 has no padding after 'id' */
- if (in_ia32_syscall()) {
- struct compat_gpioeevent_data {
- compat_u64 timestamp;
- u32 id;
- };
-
- return sizeof(struct compat_gpioeevent_data);
- }
-#endif
- return sizeof(struct gpioevent_data);
-}
+struct compat_gpioeevent_data {
+ compat_u64 timestamp;
+ u32 id;
+};
static ssize_t lineevent_read(struct file *file,
char __user *buf,
@@ -1515,7 +1527,10 @@ static ssize_t lineevent_read(struct file *file,
* actual sizeof() and pass this as an argument to copy_to_user() to
* drop unneeded bytes from the output.
*/
- ge_size = lineevent_get_size();
+ if (compat_need_64bit_alignment_fixup())
+ ge_size = sizeof(struct compat_gpioeevent_data);
+ else
+ ge_size = sizeof(struct gpioevent_data);
if (count < ge_size)
return -EINVAL;
@@ -1910,6 +1925,7 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
test_bit(FLAG_USED_AS_IRQ, &desc->flags) ||
test_bit(FLAG_EXPORT, &desc->flags) ||
test_bit(FLAG_SYSFS, &desc->flags) ||
+ !gpiochip_line_is_valid(gc, info->offset) ||
!ok_for_pinctrl)
info->flags |= GPIO_V2_LINE_FLAG_USED;
@@ -1938,6 +1954,9 @@ static void gpio_desc_to_lineinfo(struct gpio_desc *desc,
if (test_bit(FLAG_EDGE_FALLING, &desc->flags))
info->flags |= GPIO_V2_LINE_FLAG_EDGE_FALLING;
+ if (test_bit(FLAG_EVENT_CLOCK_REALTIME, &desc->flags))
+ info->flags |= GPIO_V2_LINE_FLAG_EVENT_CLOCK_REALTIME;
+
debounce_period_us = READ_ONCE(desc->debounce_period_us);
if (debounce_period_us) {
info->attrs[num_attrs].id = GPIO_V2_LINE_ATTR_ID_DEBOUNCE;
diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c
index 7dbce4c4ebdf..4a517e5dedf0 100644
--- a/drivers/gpio/gpiolib-devres.c
+++ b/drivers/gpio/gpiolib-devres.c
@@ -246,10 +246,8 @@ struct gpio_desc *__must_check devm_gpiod_get_index_optional(struct device *dev,
struct gpio_desc *desc;
desc = devm_gpiod_get_index(dev, con_id, index, flags);
- if (IS_ERR(desc)) {
- if (PTR_ERR(desc) == -ENOENT)
- return NULL;
- }
+ if (gpiod_not_found(desc))
+ return NULL;
return desc;
}
@@ -308,7 +306,7 @@ devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
struct gpio_descs *descs;
descs = devm_gpiod_get_array(dev, con_id, flags);
- if (PTR_ERR(descs) == -ENOENT)
+ if (gpiod_not_found(descs))
return NULL;
return descs;
@@ -479,9 +477,9 @@ void devm_gpio_free(struct device *dev, unsigned int gpio)
}
EXPORT_SYMBOL_GPL(devm_gpio_free);
-static void devm_gpio_chip_release(struct device *dev, void *res)
+static void devm_gpio_chip_release(void *data)
{
- struct gpio_chip *gc = *(struct gpio_chip **)res;
+ struct gpio_chip *gc = data;
gpiochip_remove(gc);
}
@@ -507,23 +505,12 @@ int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, vo
struct lock_class_key *lock_key,
struct lock_class_key *request_key)
{
- struct gpio_chip **ptr;
int ret;
- ptr = devres_alloc(devm_gpio_chip_release, sizeof(*ptr),
- GFP_KERNEL);
- if (!ptr)
- return -ENOMEM;
-
ret = gpiochip_add_data_with_key(gc, data, lock_key, request_key);
- if (ret < 0) {
- devres_free(ptr);
+ if (ret < 0)
return ret;
- }
- *ptr = gc;
- devres_add(dev, ptr);
-
- return 0;
+ return devm_add_action_or_reset(dev, devm_gpio_chip_release, gc);
}
EXPORT_SYMBOL_GPL(devm_gpiochip_add_data_with_key);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 2f895a2b8411..b4a71119a4b0 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -509,31 +509,31 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
&of_flags);
- if (!IS_ERR(desc) || PTR_ERR(desc) != -ENOENT)
+ if (!gpiod_not_found(desc))
break;
}
- if (PTR_ERR(desc) == -ENOENT) {
+ if (gpiod_not_found(desc)) {
/* Special handling for SPI GPIOs if used */
desc = of_find_spi_gpio(dev, con_id, &of_flags);
}
- if (PTR_ERR(desc) == -ENOENT) {
+ if (gpiod_not_found(desc)) {
/* This quirk looks up flags and all */
desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
if (!IS_ERR(desc))
return desc;
}
- if (PTR_ERR(desc) == -ENOENT) {
+ if (gpiod_not_found(desc)) {
/* Special handling for regulator GPIOs if used */
desc = of_find_regulator_gpio(dev, con_id, &of_flags);
}
- if (PTR_ERR(desc) == -ENOENT)
+ if (gpiod_not_found(desc))
desc = of_find_arizona_gpio(dev, con_id, &of_flags);
- if (PTR_ERR(desc) == -ENOENT)
+ if (gpiod_not_found(desc))
desc = of_find_usb_gpio(dev, con_id, &of_flags);
if (IS_ERR(desc))
@@ -593,7 +593,7 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np,
xlate_flags = 0;
*lflags = GPIO_LOOKUP_FLAGS_DEFAULT;
- *dflags = 0;
+ *dflags = GPIOD_ASIS;
ret = of_property_read_u32(chip_np, "#gpio-cells", &tmp);
if (ret)
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 728f6c687182..26c5466b8179 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -476,7 +476,7 @@ static ssize_t export_store(struct class *class,
*/
status = gpiod_request(desc, "sysfs");
- if (status < 0) {
+ if (status) {
if (status == -EPROBE_DEFER)
status = -ENODEV;
goto done;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 6e3c4d7a7d14..b02cc2abd3b6 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+
#include <linux/bitmap.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -119,7 +120,7 @@ struct gpio_desc *gpio_to_desc(unsigned gpio)
spin_unlock_irqrestore(&gpio_lock, flags);
if (!gpio_is_valid(gpio))
- WARN(1, "invalid GPIO %d\n", gpio);
+ pr_warn("invalid GPIO %d\n", gpio);
return NULL;
}
@@ -211,7 +212,7 @@ static int gpiochip_find_base(int ngpio)
int gpiod_get_direction(struct gpio_desc *desc)
{
struct gpio_chip *gc;
- unsigned offset;
+ unsigned int offset;
int ret;
gc = gpiod_to_chip(desc);
@@ -771,9 +772,11 @@ err_free_ida:
ida_free(&gpio_ida, gdev->id);
err_free_gdev:
/* failures here can mean systems won't boot... */
- pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
- gdev->base, gdev->base + gdev->ngpio - 1,
- gc->label ? : "generic", ret);
+ if (ret != -EPROBE_DEFER) {
+ pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
+ gdev->base, gdev->base + gdev->ngpio - 1,
+ gc->label ? : "generic", ret);
+ }
kfree(gdev);
return ret;
}
@@ -936,67 +939,6 @@ bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc,
}
EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid);
-/**
- * gpiochip_set_cascaded_irqchip() - connects a cascaded irqchip to a gpiochip
- * @gc: the gpiochip to set the irqchip chain to
- * @parent_irq: the irq number corresponding to the parent IRQ for this
- * cascaded irqchip
- * @parent_handler: the parent interrupt handler for the accumulated IRQ
- * coming out of the gpiochip. If the interrupt is nested rather than
- * cascaded, pass NULL in this handler argument
- */
-static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gc,
- unsigned int parent_irq,
- irq_flow_handler_t parent_handler)
-{
- struct gpio_irq_chip *girq = &gc->irq;
- struct device *dev = &gc->gpiodev->dev;
-
- if (!girq->domain) {
- chip_err(gc, "called %s before setting up irqchip\n",
- __func__);
- return;
- }
-
- if (parent_handler) {
- if (gc->can_sleep) {
- chip_err(gc,
- "you cannot have chained interrupts on a chip that may sleep\n");
- return;
- }
- girq->parents = devm_kcalloc(dev, 1,
- sizeof(*girq->parents),
- GFP_KERNEL);
- if (!girq->parents) {
- chip_err(gc, "out of memory allocating parent IRQ\n");
- return;
- }
- girq->parents[0] = parent_irq;
- girq->num_parents = 1;
- /*
- * The parent irqchip is already using the chip_data for this
- * irqchip, so our callbacks simply use the handler_data.
- */
- irq_set_chained_handler_and_data(parent_irq, parent_handler,
- gc);
- }
-}
-
-/**
- * gpiochip_set_nested_irqchip() - connects a nested irqchip to a gpiochip
- * @gc: the gpiochip to set the irqchip nested handler to
- * @irqchip: the irqchip to nest to the gpiochip
- * @parent_irq: the irq number corresponding to the parent IRQ for this
- * nested irqchip
- */
-void gpiochip_set_nested_irqchip(struct gpio_chip *gc,
- struct irq_chip *irqchip,
- unsigned int parent_irq)
-{
- gpiochip_set_cascaded_irqchip(gc, parent_irq, NULL);
-}
-EXPORT_SYMBOL_GPL(gpiochip_set_nested_irqchip);
-
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
/**
@@ -1394,7 +1336,7 @@ void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
}
EXPORT_SYMBOL_GPL(gpiochip_irq_domain_deactivate);
-static int gpiochip_to_irq(struct gpio_chip *gc, unsigned offset)
+static int gpiochip_to_irq(struct gpio_chip *gc, unsigned int offset)
{
struct irq_domain *domain = gc->irq.domain;
@@ -1477,7 +1419,8 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gc)
if (WARN_ON(gc->irq.irq_enable))
return;
/* Check if the irqchip already has this hook... */
- if (irqchip->irq_enable == gpiochip_irq_enable) {
+ if (irqchip->irq_enable == gpiochip_irq_enable ||
+ irqchip->irq_mask == gpiochip_irq_mask) {
/*
* ...and if so, give a gentle warning that this is bad
* practice.
@@ -1648,98 +1591,6 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gc)
}
/**
- * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
- * @gc: the gpiochip to add the irqchip to
- * @irqchip: the irqchip to add to the gpiochip
- * @first_irq: if not dynamically assigned, the base (first) IRQ to
- * allocate gpiochip irqs from
- * @handler: the irq handler to use (often a predefined irq core function)
- * @type: the default type for IRQs on this irqchip, pass IRQ_TYPE_NONE
- * to have the core avoid setting up any default type in the hardware.
- * @threaded: whether this irqchip uses a nested thread handler
- * @lock_key: lockdep class for IRQ lock
- * @request_key: lockdep class for IRQ request
- *
- * This function closely associates a certain irqchip with a certain
- * gpiochip, providing an irq domain to translate the local IRQs to
- * global irqs in the gpiolib core, and making sure that the gpiochip
- * is passed as chip data to all related functions. Driver callbacks
- * need to use gpiochip_get_data() to get their local state containers back
- * from the gpiochip passed as chip data. An irqdomain will be stored
- * in the gpiochip that shall be used by the driver to handle IRQ number
- * translation. The gpiochip will need to be initialized and registered
- * before calling this function.
- *
- * This function will handle two cell:ed simple IRQs and assumes all
- * the pins on the gpiochip can generate a unique IRQ. Everything else
- * need to be open coded.
- */
-int gpiochip_irqchip_add_key(struct gpio_chip *gc,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type,
- bool threaded,
- struct lock_class_key *lock_key,
- struct lock_class_key *request_key)
-{
- struct device_node *of_node;
-
- if (!gc || !irqchip)
- return -EINVAL;
-
- if (!gc->parent) {
- chip_err(gc, "missing gpiochip .dev parent pointer\n");
- return -EINVAL;
- }
- gc->irq.threaded = threaded;
- of_node = gc->parent->of_node;
-#ifdef CONFIG_OF_GPIO
- /*
- * If the gpiochip has an assigned OF node this takes precedence
- * FIXME: get rid of this and use gc->parent->of_node
- * everywhere
- */
- if (gc->of_node)
- of_node = gc->of_node;
-#endif
- /*
- * Specifying a default trigger is a terrible idea if DT or ACPI is
- * used to configure the interrupts, as you may end-up with
- * conflicting triggers. Tell the user, and reset to NONE.
- */
- if (WARN(of_node && type != IRQ_TYPE_NONE,
- "%pOF: Ignoring %d default trigger\n", of_node, type))
- type = IRQ_TYPE_NONE;
- if (has_acpi_companion(gc->parent) && type != IRQ_TYPE_NONE) {
- acpi_handle_warn(ACPI_HANDLE(gc->parent),
- "Ignoring %d default trigger\n", type);
- type = IRQ_TYPE_NONE;
- }
-
- gc->irq.chip = irqchip;
- gc->irq.handler = handler;
- gc->irq.default_type = type;
- gc->to_irq = gpiochip_to_irq;
- gc->irq.lock_key = lock_key;
- gc->irq.request_key = request_key;
- gc->irq.domain = irq_domain_add_simple(of_node,
- gc->ngpio, first_irq,
- &gpiochip_domain_ops, gc);
- if (!gc->irq.domain) {
- gc->irq.chip = NULL;
- return -EINVAL;
- }
-
- gpiochip_set_irq_hooks(gc);
-
- acpi_gpiochip_request_interrupts(gc);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
-
-/**
* gpiochip_irqchip_add_domain() - adds an irqdomain to a gpiochip
* @gc: the gpiochip to add the irqchip to
* @domain: the irqdomain to add to the gpiochip
@@ -1788,7 +1639,7 @@ static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc)
* @gc: the gpiochip owning the GPIO
* @offset: the offset of the GPIO to request for GPIO function
*/
-int gpiochip_generic_request(struct gpio_chip *gc, unsigned offset)
+int gpiochip_generic_request(struct gpio_chip *gc, unsigned int offset)
{
#ifdef CONFIG_PINCTRL
if (list_empty(&gc->gpiodev->pin_ranges))
@@ -1804,7 +1655,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_request);
* @gc: the gpiochip to request the gpio function for
* @offset: the offset of the GPIO to free from GPIO function
*/
-void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset)
+void gpiochip_generic_free(struct gpio_chip *gc, unsigned int offset)
{
#ifdef CONFIG_PINCTRL
if (list_empty(&gc->gpiodev->pin_ranges))
@@ -1821,7 +1672,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_free);
* @offset: the offset of the GPIO to apply the configuration
* @config: the configuration to be applied
*/
-int gpiochip_generic_config(struct gpio_chip *gc, unsigned offset,
+int gpiochip_generic_config(struct gpio_chip *gc, unsigned int offset,
unsigned long config)
{
return pinctrl_gpio_set_config(gc->gpiodev->base + offset, config);
@@ -1985,11 +1836,9 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
desc_set_label(desc, label ? : "?");
- ret = 0;
} else {
- kfree_const(label);
ret = -EBUSY;
- goto done;
+ goto out_free_unlock;
}
if (gc->request) {
@@ -2002,11 +1851,10 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
ret = -EINVAL;
spin_lock_irqsave(&gpio_lock, flags);
- if (ret < 0) {
+ if (ret) {
desc_set_label(desc, NULL);
- kfree_const(label);
clear_bit(FLAG_REQUESTED, &desc->flags);
- goto done;
+ goto out_free_unlock;
}
}
if (gc->get_direction) {
@@ -2015,8 +1863,12 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
gpiod_get_direction(desc);
spin_lock_irqsave(&gpio_lock, flags);
}
-done:
spin_unlock_irqrestore(&gpio_lock, flags);
+ return 0;
+
+out_free_unlock:
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ kfree_const(label);
return ret;
}
@@ -2068,7 +1920,7 @@ int gpiod_request(struct gpio_desc *desc, const char *label)
if (try_module_get(gdev->owner)) {
ret = gpiod_request_commit(desc, label);
- if (ret < 0)
+ if (ret)
module_put(gdev->owner);
else
get_device(&gdev->dev);
@@ -2151,7 +2003,7 @@ void gpiod_free(struct gpio_desc *desc)
* help with diagnostics, and knowing that the signal is used as a GPIO
* can help avoid accidentally multiplexing it to another controller.
*/
-const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned offset)
+const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned int offset)
{
struct gpio_desc *desc;
@@ -2251,30 +2103,49 @@ static int gpio_do_set_config(struct gpio_chip *gc, unsigned int offset,
return gc->set_config(gc, offset, config);
}
-static int gpio_set_config(struct gpio_desc *desc, enum pin_config_param mode)
+static int gpio_set_config_with_argument(struct gpio_desc *desc,
+ enum pin_config_param mode,
+ u32 argument)
{
struct gpio_chip *gc = desc->gdev->chip;
unsigned long config;
- unsigned arg;
+
+ config = pinconf_to_config_packed(mode, argument);
+ return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config);
+}
+
+static int gpio_set_config_with_argument_optional(struct gpio_desc *desc,
+ enum pin_config_param mode,
+ u32 argument)
+{
+ struct device *dev = &desc->gdev->dev;
+ int gpio = gpio_chip_hwgpio(desc);
+ int ret;
+
+ ret = gpio_set_config_with_argument(desc, mode, argument);
+ if (ret != -ENOTSUPP)
+ return ret;
switch (mode) {
- case PIN_CONFIG_BIAS_PULL_DOWN:
- case PIN_CONFIG_BIAS_PULL_UP:
- arg = 1;
+ case PIN_CONFIG_PERSIST_STATE:
+ dev_dbg(dev, "Persistence not supported for GPIO %d\n", gpio);
break;
-
default:
- arg = 0;
+ break;
}
- config = PIN_CONF_PACKED(mode, arg);
- return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config);
+ return 0;
+}
+
+static int gpio_set_config(struct gpio_desc *desc, enum pin_config_param mode)
+{
+ return gpio_set_config_with_argument(desc, mode, 0);
}
static int gpio_set_bias(struct gpio_desc *desc)
{
- int bias = 0;
- int ret = 0;
+ enum pin_config_param bias;
+ unsigned int arg;
if (test_bit(FLAG_BIAS_DISABLE, &desc->flags))
bias = PIN_CONFIG_BIAS_DISABLE;
@@ -2282,13 +2153,28 @@ static int gpio_set_bias(struct gpio_desc *desc)
bias = PIN_CONFIG_BIAS_PULL_UP;
else if (test_bit(FLAG_PULL_DOWN, &desc->flags))
bias = PIN_CONFIG_BIAS_PULL_DOWN;
+ else
+ return 0;
- if (bias) {
- ret = gpio_set_config(desc, bias);
- if (ret != -ENOTSUPP)
- return ret;
+ switch (bias) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = 1;
+ break;
+
+ default:
+ arg = 0;
+ break;
}
- return 0;
+
+ return gpio_set_config_with_argument_optional(desc, bias, arg);
+}
+
+int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce)
+{
+ return gpio_set_config_with_argument_optional(desc,
+ PIN_CONFIG_INPUT_DEBOUNCE,
+ debounce);
}
/**
@@ -2510,7 +2396,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_config);
* 0 on success, %-ENOTSUPP if the controller doesn't support setting the
* debounce time.
*/
-int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
+int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce)
{
unsigned long config;
@@ -2529,11 +2415,6 @@ EXPORT_SYMBOL_GPL(gpiod_set_debounce);
*/
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
{
- struct gpio_chip *gc;
- unsigned long packed;
- int gpio;
- int rc;
-
VALIDATE_DESC(desc);
/*
* Handle FLAG_TRANSITORY first, enabling queries to gpiolib for
@@ -2542,21 +2423,9 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
assign_bit(FLAG_TRANSITORY, &desc->flags, transitory);
/* If the driver supports it, set the persistence state now */
- gc = desc->gdev->chip;
- if (!gc->set_config)
- return 0;
-
- packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
- !transitory);
- gpio = gpio_chip_hwgpio(desc);
- rc = gpio_do_set_config(gc, gpio, packed);
- if (rc == -ENOTSUPP) {
- dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
- gpio);
- return 0;
- }
-
- return rc;
+ return gpio_set_config_with_argument_optional(desc,
+ PIN_CONFIG_PERSIST_STATE,
+ !transitory);
}
EXPORT_SYMBOL_GPL(gpiod_set_transitory);
@@ -3784,7 +3653,7 @@ struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
desc = fwnode_get_named_gpiod(fwnode, prop_name, index, flags,
label);
- if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT))
+ if (!gpiod_not_found(desc))
break;
}
@@ -3960,7 +3829,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
* Either we are not using DT or ACPI, or their lookup did not return
* a result. In that case, use platform lookup as a fallback.
*/
- if (!desc || desc == ERR_PTR(-ENOENT)) {
+ if (!desc || gpiod_not_found(desc)) {
dev_dbg(dev, "using lookup tables for GPIO lookup\n");
desc = gpiod_find(dev, con_id, idx, &lookupflags);
}
@@ -3975,7 +3844,7 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
* the device name as label
*/
ret = gpiod_request(desc, con_id ? con_id : devname);
- if (ret < 0) {
+ if (ret) {
if (ret == -EBUSY && flags & GPIOD_FLAGS_BIT_NONEXCLUSIVE) {
/*
* This happens when there are several consumers for
@@ -4095,10 +3964,8 @@ struct gpio_desc *__must_check gpiod_get_index_optional(struct device *dev,
struct gpio_desc *desc;
desc = gpiod_get_index(dev, con_id, index, flags);
- if (IS_ERR(desc)) {
- if (PTR_ERR(desc) == -ENOENT)
- return NULL;
- }
+ if (gpiod_not_found(desc))
+ return NULL;
return desc;
}
@@ -4300,7 +4167,7 @@ struct gpio_descs *__must_check gpiod_get_array_optional(struct device *dev,
struct gpio_descs *descs;
descs = gpiod_get_array(dev, con_id, flags);
- if (PTR_ERR(descs) == -ENOENT)
+ if (gpiod_not_found(descs))
return NULL;
return descs;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index b674b5bb980e..30bc3f80f83e 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -116,6 +116,7 @@ struct gpio_desc {
#define FLAG_BIAS_DISABLE 15 /* GPIO has pull disabled */
#define FLAG_EDGE_RISING 16 /* GPIO CDEV detects rising edge events */
#define FLAG_EDGE_FALLING 17 /* GPIO CDEV detects falling edge events */
+#define FLAG_EVENT_CLOCK_REALTIME 18 /* GPIO CDEV reports REALTIME timestamps in events */
/* Connection label */
const char *label;
@@ -130,10 +131,13 @@ struct gpio_desc {
#endif
};
+#define gpiod_not_found(desc) (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
+
int gpiod_request(struct gpio_desc *desc, const char *label);
void gpiod_free(struct gpio_desc *desc);
int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
unsigned long lflags, enum gpiod_flags dflags);
+int gpio_set_debounce_timeout(struct gpio_desc *desc, unsigned int debounce);
int gpiod_hog(struct gpio_desc *desc, const char *name,
unsigned long lflags, enum gpiod_flags dflags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 6e2953233231..5993dd0fdd8e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1024,6 +1024,7 @@ struct amdgpu_device {
/* enable runtime pm on the device */
bool runpm;
bool in_runpm;
+ bool has_pr3;
bool pm_sysfs_en;
bool ucode_sysfs_en;
@@ -1230,6 +1231,7 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
const u32 *registers,
const u32 array_size);
+bool amdgpu_device_supports_atpx(struct drm_device *dev);
bool amdgpu_device_supports_boco(struct drm_device *dev);
bool amdgpu_device_supports_baco(struct drm_device *dev);
bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev,
@@ -1280,6 +1282,8 @@ int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
+int amdgpu_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
/*
* functions used by amdgpu_encoder.c
@@ -1311,11 +1315,11 @@ int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_device *adev,
struct amdgpu_dm_backlight_caps *caps);
-bool amdgpu_acpi_is_s0ix_supported(void);
+bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
-static inline bool amdgpu_acpi_is_s0ix_supported(void) { return false; }
+static inline bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { return false; }
#endif
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 4f4fda53c08a..8155c54392c8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -901,10 +901,12 @@ void amdgpu_acpi_fini(struct amdgpu_device *adev)
*
* returns true if supported, false if not.
*/
-bool amdgpu_acpi_is_s0ix_supported(void)
+bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
{
- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)
- return true;
+ if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {
+ if (adev->flags & AMD_IS_APU)
+ return true;
+ }
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 7791d074bd32..2d991da2cead 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1213,7 +1213,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
ret = amdgpu_amdkfd_reserve_mem_limit(adev, size, alloc_domain, !!sg);
if (ret) {
- pr_debug("Insufficient system memory\n");
+ pr_debug("Insufficient memory\n");
goto err_reserve_limit;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 65d1b23d7e74..b9c11c2b2885 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1414,10 +1414,12 @@ out:
pm_runtime_put_autosuspend(connector->dev->dev);
}
- drm_dp_set_subconnector_property(&amdgpu_connector->base,
- ret,
- amdgpu_dig_connector->dpcd,
- amdgpu_dig_connector->downstream_ports);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP)
+ drm_dp_set_subconnector_property(&amdgpu_connector->base,
+ ret,
+ amdgpu_dig_connector->dpcd,
+ amdgpu_dig_connector->downstream_ports);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 79dd85f71fab..b69c34074d8d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -212,14 +212,14 @@ static DEVICE_ATTR(serial_number, S_IRUGO,
amdgpu_device_get_serial_number, NULL);
/**
- * amdgpu_device_supports_boco - Is the device a dGPU with HG/PX power control
+ * amdgpu_device_supports_atpx - Is the device a dGPU with HG/PX power control
*
* @dev: drm_device pointer
*
* Returns true if the device is a dGPU with HG/PX power control,
* otherwise return false.
*/
-bool amdgpu_device_supports_boco(struct drm_device *dev)
+bool amdgpu_device_supports_atpx(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -229,6 +229,23 @@ bool amdgpu_device_supports_boco(struct drm_device *dev)
}
/**
+ * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
+ *
+ * @dev: drm_device pointer
+ *
+ * Returns true if the device is a dGPU with HG/PX power control,
+ * otherwise return false.
+ */
+bool amdgpu_device_supports_boco(struct drm_device *dev)
+{
+ struct amdgpu_device *adev = drm_to_adev(dev);
+
+ if (adev->has_pr3)
+ return true;
+ return false;
+}
+
+/**
* amdgpu_device_supports_baco - Does the device support BACO
*
* @dev: drm_device pointer
@@ -1398,7 +1415,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
struct drm_device *dev = pci_get_drvdata(pdev);
int r;
- if (amdgpu_device_supports_boco(dev) && state == VGA_SWITCHEROO_OFF)
+ if (amdgpu_device_supports_atpx(dev) && state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
@@ -2531,11 +2548,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (adev->gmc.xgmi.num_physical_nodes > 1)
amdgpu_xgmi_remove_device(adev);
- amdgpu_amdkfd_device_fini(adev);
-
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
+ amdgpu_amdkfd_device_fini(adev);
+
/* need to disable SMC first */
for (i = 0; i < adev->num_ip_blocks; i++) {
if (!adev->ip_blocks[i].status.hw)
@@ -2650,7 +2667,7 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
int i, r;
- if (!amdgpu_acpi_is_s0ix_supported() || amdgpu_in_reset(adev)) {
+ if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev)) {
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
}
@@ -3177,7 +3194,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
struct drm_device *ddev = adev_to_drm(adev);
struct pci_dev *pdev = adev->pdev;
int r, i;
- bool boco = false;
+ bool atpx = false;
u32 max_MBps;
adev->shutdown = false;
@@ -3349,15 +3366,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
- if (amdgpu_device_supports_boco(ddev))
- boco = true;
+ if (amdgpu_device_supports_atpx(ddev))
+ atpx = true;
if (amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
!pci_is_thunderbolt_attached(adev->pdev))
vga_switcheroo_register_client(adev->pdev,
- &amdgpu_switcheroo_ops, boco);
- if (boco)
+ &amdgpu_switcheroo_ops, atpx);
+ if (atpx)
vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
if (amdgpu_emu_mode == 1) {
@@ -3540,7 +3557,7 @@ fence_driver_init:
failed:
amdgpu_vf_error_trans_all(adev);
- if (boco)
+ if (atpx)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
failed_unmap:
@@ -3604,7 +3621,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
amdgpu_has_atpx_dgpu_power_cntl()) &&
!pci_is_thunderbolt_attached(adev->pdev))
vga_switcheroo_unregister_client(adev->pdev);
- if (amdgpu_device_supports_boco(adev_to_drm(adev)))
+ if (amdgpu_device_supports_atpx(adev_to_drm(adev)))
vga_switcheroo_fini_domain_pm_ops(adev->dev);
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, NULL, NULL, NULL);
@@ -3710,7 +3727,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
amdgpu_fence_driver_suspend(adev);
- if (!amdgpu_acpi_is_s0ix_supported() || amdgpu_in_reset(adev))
+ if (!amdgpu_acpi_is_s0ix_supported(adev) || amdgpu_in_reset(adev))
r = amdgpu_device_ip_suspend_phase2(adev);
else
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);
@@ -3744,7 +3761,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (amdgpu_acpi_is_s0ix_supported())
+ if (amdgpu_acpi_is_s0ix_supported(adev))
amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);
/* post card */
@@ -5052,8 +5069,7 @@ out:
* @pdev: pointer to PCI device
*
* Called when the error recovery driver tells us that its
- * OK to resume normal operation. Use completion to allow
- * halted scsi ops to resume.
+ * OK to resume normal operation.
*/
void amdgpu_pci_resume(struct pci_dev *pdev)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index ebdab31f9de9..72efd579ec5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1340,7 +1340,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
}
adev->in_runpm = true;
- if (amdgpu_device_supports_boco(drm_dev))
+ if (amdgpu_device_supports_atpx(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
@@ -1348,13 +1348,11 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
if (ret)
return ret;
- if (amdgpu_device_supports_boco(drm_dev)) {
+ if (amdgpu_device_supports_atpx(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
*/
- if (amdgpu_is_atpx_hybrid()) {
- pci_ignore_hotplug(pdev);
- } else {
+ if (!amdgpu_is_atpx_hybrid()) {
amdgpu_device_cache_pci_state(pdev);
pci_disable_device(pdev);
pci_ignore_hotplug(pdev);
@@ -1378,28 +1376,31 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
if (!adev->runpm)
return -EINVAL;
- if (amdgpu_device_supports_boco(drm_dev)) {
+ if (amdgpu_device_supports_atpx(drm_dev)) {
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
*/
- if (amdgpu_is_atpx_hybrid()) {
- pci_set_master(pdev);
- } else {
+ if (!amdgpu_is_atpx_hybrid()) {
pci_set_power_state(pdev, PCI_D0);
amdgpu_device_load_pci_state(pdev);
ret = pci_enable_device(pdev);
if (ret)
return ret;
- pci_set_master(pdev);
}
+ pci_set_master(pdev);
+ } else if (amdgpu_device_supports_boco(drm_dev)) {
+ /* Only need to handle PCI state in the driver for ATPX
+ * PCI core handles it for _PR3.
+ */
+ pci_set_master(pdev);
} else if (amdgpu_device_supports_baco(drm_dev)) {
amdgpu_device_baco_exit(drm_dev);
}
ret = amdgpu_device_resume(drm_dev, false);
drm_kms_helper_poll_enable(drm_dev);
- if (amdgpu_device_supports_boco(drm_dev))
+ if (amdgpu_device_supports_atpx(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
adev->in_runpm = false;
return 0;
@@ -1533,8 +1534,6 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
-int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
-
const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 02af47ddddbc..6e679db5e46f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -496,13 +496,15 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev)
break;
}
- if (!amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE))
+ if (amdgpu_sriov_vf(adev) ||
+ !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) {
size = 0;
- else
+ } else {
size = amdgpu_gmc_get_vbios_fb_size(adev);
- if (adev->mman.keep_stolen_vga_memory)
- size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
+ if (adev->mman.keep_stolen_vga_memory)
+ size = max(size, (unsigned)AMDGPU_VBIOS_VGA_ALLOCATION);
+ }
/* set to 0 if the pre-OS buffer uses up most of vram */
if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index fc12fc72366f..b16b32797624 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -133,6 +133,7 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev)
int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
{
struct drm_device *dev;
+ struct pci_dev *parent;
int r, acpi_status;
dev = adev_to_drm(adev);
@@ -144,6 +145,9 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
!pci_is_thunderbolt_attached(dev->pdev))
flags |= AMD_IS_PX;
+ parent = pci_upstream_bridge(adev->pdev);
+ adev->has_pr3 = parent ? pci_pr3_present(parent) : false;
+
/* amdgpu_device_init should report only fatal error
* like memory allocation failure or iomapping failure,
* or memory manager initialization failure, it must
@@ -156,9 +160,14 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
goto out;
}
- if (amdgpu_device_supports_boco(dev) &&
- (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */
+ if (amdgpu_device_supports_atpx(dev) &&
+ (amdgpu_runtime_pm != 0)) { /* enable runpm by default for atpx */
+ adev->runpm = true;
+ dev_info(adev->dev, "Using ATPX for runtime pm\n");
+ } else if (amdgpu_device_supports_boco(dev) &&
+ (amdgpu_runtime_pm != 0)) { /* enable runpm by default for boco */
adev->runpm = true;
+ dev_info(adev->dev, "Using BOCO for runtime pm\n");
} else if (amdgpu_device_supports_baco(dev) &&
(amdgpu_runtime_pm != 0)) {
switch (adev->asic_type) {
@@ -180,6 +189,8 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
adev->runpm = true;
break;
}
+ if (adev->runpm)
+ dev_info(adev->dev, "Using BACO for runtime pm\n");
}
/* Call ACPI methods: require modeset init
@@ -192,7 +203,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags)
if (adev->runpm) {
/* only need to skip on ATPX */
- if (amdgpu_device_supports_boco(dev) &&
+ if (amdgpu_device_supports_atpx(dev) &&
!amdgpu_is_atpx_hybrid())
dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
pm_runtime_use_autosuspend(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 523d22db094b..347fec669424 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -563,7 +563,7 @@ static int psp_asd_load(struct psp_context *psp)
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
- if (amdgpu_sriov_vf(psp->adev) || !psp->asd_fw)
+ if (amdgpu_sriov_vf(psp->adev) || !psp->asd_ucode_size)
return 0;
cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
@@ -1315,8 +1315,12 @@ static int psp_hdcp_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->hdcp_context.hdcp_initialized)
- return 0;
+ if (!psp->hdcp_context.hdcp_initialized) {
+ if (psp->hdcp_context.hdcp_shared_buf)
+ goto out;
+ else
+ return 0;
+ }
ret = psp_hdcp_unload(psp);
if (ret)
@@ -1324,6 +1328,7 @@ static int psp_hdcp_terminate(struct psp_context *psp)
psp->hdcp_context.hdcp_initialized = false;
+out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo,
&psp->hdcp_context.hdcp_shared_mc_addr,
@@ -1462,8 +1467,12 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->dtm_context.dtm_initialized)
- return 0;
+ if (!psp->dtm_context.dtm_initialized) {
+ if (psp->dtm_context.dtm_shared_buf)
+ goto out;
+ else
+ return 0;
+ }
ret = psp_dtm_unload(psp);
if (ret)
@@ -1471,6 +1480,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
psp->dtm_context.dtm_initialized = false;
+out:
/* free hdcp shared memory */
amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo,
&psp->dtm_context.dtm_shared_mc_addr,
@@ -2589,11 +2599,10 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
switch (desc->fw_type) {
case TA_FW_TYPE_PSP_ASD:
- psp->asd_fw_version = le32_to_cpu(desc->fw_version);
+ psp->asd_fw_version = le32_to_cpu(desc->fw_version);
psp->asd_feature_version = le32_to_cpu(desc->fw_version);
- psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
+ psp->asd_ucode_size = le32_to_cpu(desc->size_bytes);
psp->asd_start_addr = ucode_start_addr;
- psp->asd_fw = psp->ta_fw;
break;
case TA_FW_TYPE_PSP_XGMI:
psp->ta_xgmi_ucode_version = le32_to_cpu(desc->fw_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index c136bd449744..82e952696d24 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -1518,7 +1518,7 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
struct ras_err_handler_data *data;
int i = 0;
- int ret = 0;
+ int ret = 0, status;
if (!con || !con->eh_data || !bps || !count)
return -EINVAL;
@@ -1543,12 +1543,12 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev,
.size = AMDGPU_GPU_PAGE_SIZE,
.flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED,
};
- ret = amdgpu_vram_mgr_query_page_status(
+ status = amdgpu_vram_mgr_query_page_status(
ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM),
data->bps[i].retired_page);
- if (ret == -EBUSY)
+ if (status == -EBUSY)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING;
- else if (ret == -ENOENT)
+ else if (status == -ENOENT)
(*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 1dd040166c63..19d9aa76cfbf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -30,6 +30,7 @@
#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
+#define EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -62,7 +63,8 @@
static bool __is_ras_eeprom_supported(struct amdgpu_device *adev)
{
if ((adev->asic_type == CHIP_VEGA20) ||
- (adev->asic_type == CHIP_ARCTURUS))
+ (adev->asic_type == CHIP_ARCTURUS) ||
+ (adev->asic_type == CHIP_SIENNA_CICHLID))
return true;
return false;
@@ -100,6 +102,10 @@ static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
case CHIP_ARCTURUS:
return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
+ case CHIP_SIENNA_CICHLID:
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_SIENNA_CICHLID;
+ break;
+
default:
return false;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 324d5e3f3579..6752d8b13118 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -358,10 +358,11 @@ TRACE_EVENT(amdgpu_vm_update_ptes,
}
),
TP_printk("pid:%u vm_ctx:0x%llx start:0x%010llx end:0x%010llx,"
- " flags:0x%llx, incr:%llu, dst:\n%s", __entry->pid,
+ " flags:0x%llx, incr:%llu, dst:\n%s%s", __entry->pid,
__entry->vm_ctx, __entry->start, __entry->end,
__entry->flags, __entry->incr, __print_array(
- __get_dynamic_array(dst), __entry->nptes, 8))
+ __get_dynamic_array(dst), min(__entry->nptes, 32u), 8),
+ __entry->nptes > 32 ? "..." : "")
);
TRACE_EVENT(amdgpu_vm_set_ptes,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 7c5b60e53482..8b989670ed66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -240,7 +240,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+ DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n",
version_major, version_minor, family_id);
/*
@@ -267,7 +267,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
dec_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
enc_minor = (le32_to_cpu(hdr->ucode_version) >> 24) & 0x3f;
enc_major = (le32_to_cpu(hdr->ucode_version) >> 30) & 0x3;
- DRM_INFO("Found UVD firmware ENC: %hu.%hu DEC: .%hu Family ID: %hu\n",
+ DRM_INFO("Found UVD firmware ENC: %u.%u DEC: .%u Family ID: %u\n",
enc_major, enc_minor, dec_minor, family_id);
adev->uvd.max_handles = AMDGPU_MAX_UVD_HANDLES;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 9791a4057e8b..0d5284b936e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -179,7 +179,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
version_major = (ucode_version >> 20) & 0xfff;
version_minor = (ucode_version >> 8) & 0xfff;
binary_id = ucode_version & 0xff;
- DRM_INFO("Found VCE firmware Version: %hhd.%hhd Binary ID: %hhd\n",
+ DRM_INFO("Found VCE firmware Version: %d.%d Binary ID: %d\n",
version_major, version_minor, binary_id);
adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
(binary_id << 8));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 1c97244e0d74..4a77c7424dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -181,7 +181,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
enc_major = fw_check;
dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
- DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
+ DRM_INFO("Found VCN firmware Version ENC: %u.%u DEC: %u VEP: %u Revision: %u\n",
enc_major, enc_minor, dec_ver, vep, fw_rev);
} else {
unsigned int version_major, version_minor, family_id;
@@ -189,7 +189,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
+ DRM_INFO("Found VCN firmware Version: %u.%u Family ID: %u\n",
version_major, version_minor, family_id);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index fc9bb94eaaf4..5f4805e4d04a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -1647,7 +1647,7 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
}
/* No CPG in Arcturus */
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->gfx.num_gfx_rings) {
r = gfx_v9_0_init_cp_gfx_microcode(adev, chip_name);
if (r)
return r;
@@ -2633,7 +2633,14 @@ static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
static void gfx_v9_0_enable_gui_idle_interrupt(struct amdgpu_device *adev,
bool enable)
{
- u32 tmp = RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
+ u32 tmp;
+
+ /* don't toggle interrupts that are only applicable
+ * to me0 pipe0 on AISCs that have me0 removed */
+ if (!adev->gfx.num_gfx_rings)
+ return;
+
+ tmp= RREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE, enable ? 1 : 0);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL_RING0, CNTX_EMPTY_INT_ENABLE, enable ? 1 : 0);
@@ -3822,7 +3829,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->gfx.num_gfx_rings) {
/* legacy firmware loading */
r = gfx_v9_0_cp_gfx_load_microcode(adev);
if (r)
@@ -3838,7 +3845,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
if (r)
return r;
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->gfx.num_gfx_rings) {
r = gfx_v9_0_cp_gfx_resume(adev);
if (r)
return r;
@@ -3848,7 +3855,7 @@ static int gfx_v9_0_cp_resume(struct amdgpu_device *adev)
if (r)
return r;
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->gfx.num_gfx_rings) {
ring = &adev->gfx.gfx_ring[0];
r = amdgpu_ring_test_helper(ring);
if (r)
@@ -3884,7 +3891,7 @@ static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
static void gfx_v9_0_cp_enable(struct amdgpu_device *adev, bool enable)
{
- if (adev->asic_type != CHIP_ARCTURUS)
+ if (adev->gfx.num_gfx_rings)
gfx_v9_0_cp_gfx_enable(adev, enable);
gfx_v9_0_cp_compute_enable(adev, enable);
}
@@ -4025,7 +4032,7 @@ static int gfx_v9_0_soft_reset(void *handle)
/* stop the rlc */
adev->gfx.rlc.funcs->stop(adev);
- if (adev->asic_type != CHIP_ARCTURUS)
+ if (adev->gfx.num_gfx_rings)
/* Disable GFX parsing/prefetching */
gfx_v9_0_cp_gfx_enable(adev, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index e1531d97f486..e22268f9dba7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1577,13 +1577,10 @@ static int gmc_v9_0_hw_init(void *handle)
gmc_v9_0_init_golden_registers(adev);
if (adev->mode_info.num_crtc) {
- if (adev->asic_type != CHIP_ARCTURUS) {
- /* Lockout access through VGA aperture*/
- WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
-
- /* disable VGA render */
- WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
- }
+ /* Lockout access through VGA aperture*/
+ WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
+ /* disable VGA render */
+ WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
}
amdgpu_device_program_register_sequence(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 092ff2c43658..f107385faba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -136,6 +136,7 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
break;
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_DIMGREY_CAVEFISH:
mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw];
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index b72c8e4ca36b..07104a1de308 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -310,7 +310,7 @@ static void mmhub_v2_3_setup_vmid_config(struct amdgpu_device *adev)
/* Send no-retry XNACK on fault to suppress VM fault storm. */
tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL,
RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
- !amdgpu_noretry);
+ !adev->gmc.noretry);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_CNTL,
i * hub->ctx_distance, tmp);
WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index f5ce9a9f4cf5..7767ccca526b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -187,7 +187,16 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
static int xgpu_ai_request_reset(struct amdgpu_device *adev)
{
- return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+ int ret, i = 0;
+
+ while (i < AI_MAILBOX_POLL_MSG_REP_MAX) {
+ ret = xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+ if (!ret)
+ break;
+ i++;
+ }
+
+ return ret;
}
static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
index 83b453f5d717..50572635d0f8 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.h
@@ -25,8 +25,9 @@
#define __MXGPU_AI_H__
#define AI_MAILBOX_POLL_ACK_TIMEDOUT 500
-#define AI_MAILBOX_POLL_MSG_TIMEDOUT 12000
+#define AI_MAILBOX_POLL_MSG_TIMEDOUT 6000
#define AI_MAILBOX_POLL_FLR_TIMEDOUT 5000
+#define AI_MAILBOX_POLL_MSG_REP_MAX 11
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
index 666ed99cc14b..dd5c1e6ce009 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
@@ -200,7 +200,16 @@ static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
static int xgpu_nv_request_reset(struct amdgpu_device *adev)
{
- return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+ int ret, i = 0;
+
+ while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
+ ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
+ if (!ret)
+ break;
+ i++;
+ }
+
+ return ret;
}
static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
index 52605e14a1a5..9f5808616174 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
@@ -27,6 +27,7 @@
#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500
#define NV_MAILBOX_POLL_MSG_TIMEDOUT 6000
#define NV_MAILBOX_POLL_FLR_TIMEDOUT 5000
+#define NV_MAILBOX_POLL_MSG_REP_MAX 11
enum idh_request {
IDH_REQ_GPU_INIT_ACCESS = 1,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index ac02dd707c44..6bee3677394a 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -362,6 +362,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
+ case CHIP_DIMGREY_CAVEFISH:
return AMD_RESET_METHOD_MODE1;
default:
if (smu_baco_is_support(smu))
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index 39e17aae655f..f1ba36a094da 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -153,6 +153,9 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
struct amdgpu_firmware_info *info = NULL;
const struct common_firmware_header *header = NULL;
+ if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID))
+ return 0;
+
DRM_DEBUG("\n");
switch (adev->asic_type) {
@@ -807,6 +810,37 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev)
return 0;
}
+static int sdma_v5_2_soft_reset(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ u32 grbm_soft_reset;
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < adev->sdma.num_instances; i++) {
+ grbm_soft_reset = REG_SET_FIELD(0,
+ GRBM_SOFT_RESET, SOFT_RESET_SDMA0,
+ 1);
+ grbm_soft_reset <<= i;
+
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+ tmp |= grbm_soft_reset;
+ DRM_DEBUG("GRBM_SOFT_RESET=0x%08X\n", tmp);
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+
+ udelay(50);
+
+ tmp &= ~grbm_soft_reset;
+ WREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET, tmp);
+ tmp = RREG32_SOC15(GC, 0, mmGRBM_SOFT_RESET);
+
+ udelay(50);
+ }
+
+ return 0;
+}
+
/**
* sdma_v5_2_start - setup and start the async dma engines
*
@@ -838,6 +872,7 @@ static int sdma_v5_2_start(struct amdgpu_device *adev)
msleep(1000);
}
+ sdma_v5_2_soft_reset(adev);
/* unhalt the MEs */
sdma_v5_2_enable(adev, true);
/* enable sdma ring preemption */
@@ -1366,13 +1401,6 @@ static int sdma_v5_2_wait_for_idle(void *handle)
return -ETIMEDOUT;
}
-static int sdma_v5_2_soft_reset(void *handle)
-{
- /* todo */
-
- return 0;
-}
-
static int sdma_v5_2_ring_preempt_ib(struct amdgpu_ring *ring)
{
int i, r = 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index b3672d10ea54..e8fb10c41f16 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: MIT
#
-# Heterogenous system architecture configuration
+# Heterogeneous system architecture configuration
#
config HSA_AMD
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 50922ff2927b..72c893fff61a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -422,7 +422,7 @@ static const struct kfd_device_info navi10_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -440,7 +440,7 @@ static const struct kfd_device_info navi12_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -458,7 +458,7 @@ static const struct kfd_device_info navi14_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -476,7 +476,7 @@ static const struct kfd_device_info sienna_cichlid_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
.num_sdma_engines = 4,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -494,7 +494,7 @@ static const struct kfd_device_info navy_flounder_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
@@ -530,7 +530,7 @@ static const struct kfd_device_info dimgrey_cavefish_device_info = {
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.needs_iommu_device = false,
.supports_cwsr = true,
- .needs_pci_atomics = false,
+ .needs_pci_atomics = true,
.num_sdma_engines = 2,
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 8,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index f0a6f6665c81..e686ce2bf3b3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -72,8 +72,8 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
{
int i;
- int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
- + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
+ int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec
+ + pipe) * dqm->dev->shared_resources.num_queue_per_pipe;
/* queue is available for KFD usage if bit is 1 */
for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 797b5d4b43e5..e509a175ed17 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -6,7 +6,7 @@ config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
select SND_HDA_COMPONENT if SND_HDA_CORE
- select DRM_AMD_DC_DCN if (X86 || PPC64 || (ARM64 && KERNEL_MODE_NEON)) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
+ select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index c23896207e9d..146486071d01 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -196,10 +196,6 @@ static int amdgpu_dm_encoder_init(struct drm_device *dev,
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
-static int amdgpu_dm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock);
-
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
@@ -2212,7 +2208,7 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.get_format_info = amd_get_format_info,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = amdgpu_dm_atomic_check,
- .atomic_commit = amdgpu_dm_atomic_commit,
+ .atomic_commit = drm_atomic_helper_commit,
};
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
@@ -5124,9 +5120,8 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
int preferred_refresh = 0;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_dec_dpcd_caps dsc_caps;
-#endif
uint32_t link_bandwidth_kbps;
-
+#endif
struct dc_sink *sink = NULL;
if (aconnector == NULL) {
DRM_ERROR("aconnector is NULL!\n");
@@ -5208,11 +5203,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
&dsc_caps);
-#endif
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
-#if defined(CONFIG_DRM_AMD_DC_DCN)
if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) {
/* Set DSC policy according to dsc_clock_en */
dc_dsc_policy_set_enable_dsc_when_not_needed(
@@ -5349,7 +5342,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
}
#ifdef CONFIG_DEBUG_FS
-int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
+static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state,
struct drm_property *property,
uint64_t val)
@@ -5373,7 +5366,7 @@ int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
return 0;
}
-int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
+static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
const struct drm_crtc_state *state,
struct drm_property *property,
uint64_t *val)
@@ -8070,20 +8063,6 @@ static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_stat
stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
}
-static int amdgpu_dm_atomic_commit(struct drm_device *dev,
- struct drm_atomic_state *state,
- bool nonblock)
-{
- /*
- * Add check here for SoC's that support hardware cursor plane, to
- * unset legacy_cursor_update
- */
-
- return drm_atomic_helper_commit(dev, state, nonblock);
-
- /*TODO Handle EINTR, reenable IRQ*/
-}
-
/**
* amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
* @state: The atomic state to commit
@@ -8399,8 +8378,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
manage_dm_interrupts(adev, acrtc, true);
}
-#ifdef CONFIG_DEBUG_FS
- if (new_crtc_state->active &&
+ if (IS_ENABLED(CONFIG_DEBUG_FS) && new_crtc_state->active &&
amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
/**
* Frontend may have changed so reapply the CRC capture
@@ -8421,7 +8399,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
amdgpu_dm_crtc_configure_crc_source(
crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
}
-#endif
}
for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
@@ -9388,7 +9365,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- if (dm_old_crtc_state->dsc_force_changed && new_crtc_state)
+ if (dm_old_crtc_state->dsc_force_changed)
new_crtc_state->mode_changed = true;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 0b31779a0485..2ee6edb3df93 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -337,10 +337,29 @@ struct amdgpu_display_manager {
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
#ifdef CONFIG_DEBUG_FS
- /* set the crc calculation window*/
+ /**
+ * @crc_win_x_start_property:
+ *
+ * X start of the crc calculation window
+ */
struct drm_property *crc_win_x_start_property;
+ /**
+ * @crc_win_y_start_property:
+ *
+ * Y start of the crc calculation window
+ */
struct drm_property *crc_win_y_start_property;
+ /**
+ * @crc_win_x_end_property:
+ *
+ * X end of the crc calculation window
+ */
struct drm_property *crc_win_x_end_property;
+ /**
+ * @crc_win_y_end_property:
+ *
+ * Y end of the crc calculation window
+ */
struct drm_property *crc_win_y_end_property;
#endif
/**
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index ff6db26626ea..7b886a779a8c 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -81,6 +81,14 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
return pipe_crc_sources;
}
+static void amdgpu_dm_set_crc_window_default(struct dm_crtc_state *dm_crtc_state)
+{
+ dm_crtc_state->crc_window.x_start = 0;
+ dm_crtc_state->crc_window.y_start = 0;
+ dm_crtc_state->crc_window.x_end = 0;
+ dm_crtc_state->crc_window.y_end = 0;
+}
+
bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state)
{
bool ret = true;
@@ -141,7 +149,10 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
mutex_lock(&adev->dm.dc_lock);
/* Enable CRTC CRC generation if necessary. */
- if (dm_is_crc_source_crtc(source)) {
+ if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
+ if (!enable)
+ amdgpu_dm_set_crc_window_default(dm_crtc_state);
+
if (!amdgpu_dm_crc_window_is_default(dm_crtc_state)) {
crc_window = &tmp_window;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
index 0235bfb246e5..eba2f1d35d07 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
@@ -46,13 +46,13 @@ static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source
}
/* amdgpu_dm_crc.c */
-#ifdef CONFIG_DEBUG_FS
bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state);
bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
struct dm_crtc_state *dm_old_crtc_state);
int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
struct dm_crtc_state *dm_crtc_state,
enum amdgpu_dm_pipe_crc_source source);
+#ifdef CONFIG_DEBUG_FS
int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *src_name,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
index 357778556b06..26ed70e5538a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
@@ -165,7 +165,10 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
handler = list_entry(entry, struct amdgpu_dm_irq_handler_data,
list);
- if (ih == handler) {
+ if (handler == NULL)
+ continue;
+
+ if (ih == handler->handler) {
/* Found our handler. Remove it from the list. */
list_del(&handler->list);
handler_removed = true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 6f975c16779d..8ab0b9060d2b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -24,6 +24,7 @@
*/
#include <linux/version.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_dp_helper.h>
@@ -252,8 +253,10 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
static struct drm_encoder *
dm_mst_atomic_best_encoder(struct drm_connector *connector,
- struct drm_connector_state *connector_state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
+ connector);
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc);
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 64f515d74410..f3c00f479e1c 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
calcs_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-calcs_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index d59b380e7b7f..ff96bee57bfc 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -104,13 +104,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
@@ -125,13 +118,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn30/dcn30_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN30)
@@ -146,13 +132,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
endif
-# prevent build errors:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# this file is unused on arm64, just like on ppc64
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/clk_mgr/dcn301/vg_clk_mgr.o := -mgeneral-regs-only
-endif
-
AMD_DAL_CLK_MGR_DCN301 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn301/,$(CLK_MGR_DCN301))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN301)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 6f4fe8fce6b7..01b1853b7750 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -75,15 +75,8 @@ int rn_get_active_display_cnt_wa(
for (i = 0; i < dc->link_count; i++) {
const struct dc_link *link = dc->links[i];
- /*
- * Only notify active stream or virtual stream.
- * Need to notify virtual stream to work around
- * headless case. HPD does not fire when system is in
- * S0i2.
- */
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
- if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
- link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ if (link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
@@ -234,12 +227,11 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
// always update dtos unless clock is lowered and not safe to lower
- if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
- rn_update_clocks_update_dpp_dto(
- clk_mgr,
- context,
- clk_mgr_base->clks.actual_dppclk_khz,
- safe_to_lower);
+ rn_update_clocks_update_dpp_dto(
+ clk_mgr,
+ context,
+ clk_mgr_base->clks.actual_dppclk_khz,
+ safe_to_lower);
}
if (update_dispclk &&
@@ -738,32 +730,32 @@ static struct wm_table ddr4_wm_table_rn = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 9.09,
- .sr_enter_plus_exit_time_us = 10.14,
+ .sr_exit_time_us = 11.90,
+ .sr_enter_plus_exit_time_us = 12.80,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 13.18,
+ .sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 13.18,
+ .sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 10.12,
- .sr_enter_plus_exit_time_us = 11.48,
+ .sr_exit_time_us = 13.18,
+ .sr_enter_plus_exit_time_us = 14.30,
.valid = true,
},
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
index 11a7b583d561..7deeec9d1c7c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c
@@ -99,7 +99,7 @@ int rn_vbios_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, unsigned
/* Trigger the message transaction by writing the message ID */
REG_WRITE(MP1_SMN_C2PMSG_67, msg_id);
- result = rn_smu_wait_for_response(clk_mgr, 10, 1000);
+ result = rn_smu_wait_for_response(clk_mgr, 10, 200000);
ASSERT(result == VBIOSSMC_Result_OK || result == VBIOSSMC_Result_UnknownCmd);
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 9a8e66bba9c0..991b9c5beaa3 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -74,15 +74,8 @@ int vg_get_active_display_cnt_wa(
for (i = 0; i < dc->link_count; i++) {
const struct dc_link *link = dc->links[i];
- /*
- * Only notify active stream or virtual stream.
- * Need to notify virtual stream to work around
- * headless case. HPD does not fire when system is in
- * S0i2.
- */
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
- if (link->connector_signal == SIGNAL_TYPE_VIRTUAL ||
- link->link_enc->funcs->is_dig_enabled(link->link_enc))
+ if (link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a901baf2aaef..f4a2088ab179 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2487,9 +2487,14 @@ enum dc_status dc_link_validate_mode_timing(
static struct abm *get_abm_from_stream_res(const struct dc_link *link)
{
int i;
- struct dc *dc = link->ctx->dc;
+ struct dc *dc = NULL;
struct abm *abm = NULL;
+ if (!link || !link->ctx)
+ return NULL;
+
+ dc = link->ctx->dc;
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i];
struct dc_stream_state *stream = pipe_ctx.stream;
@@ -3267,9 +3272,6 @@ void core_link_enable_stream(
}
}
-#if defined(CONFIG_DRM_AMD_DC_DCN3_0)
-#endif
-
/* turn off otg test pattern if enable */
if (pipe_ctx->stream_res.tg->funcs->set_test_pattern)
pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 6b11d4af54af..2fc12239b22c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -3173,13 +3173,7 @@ static void get_active_converter_info(
}
/* DPCD 0x5 bit 0 = 1, it indicate it's branch device */
- if (ds_port.fields.PORT_TYPE == DOWNSTREAM_DP) {
- link->dpcd_caps.is_branch_dev = false;
- }
-
- else {
- link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
- }
+ link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT;
switch (ds_port.fields.PORT_TYPE) {
case DOWNSTREAM_VGA:
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index b8f1e2d33423..3aedadb34548 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -42,7 +42,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.115"
+#define DC_VER "3.2.116"
#define MAX_SURFACES 3
#define MAX_PLANES 6
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index b409f6b2bfd8..210466b2d863 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -119,7 +119,8 @@ static const struct link_encoder_funcs dce110_lnk_enc_funcs = {
.disable_hpd = dce110_link_encoder_disable_hpd,
.is_dig_enabled = dce110_is_dig_enabled,
.destroy = dce110_link_encoder_destroy,
- .get_max_link_cap = dce110_link_encoder_get_max_link_cap
+ .get_max_link_cap = dce110_link_encoder_get_max_link_cap,
+ .get_dig_frontend = dce110_get_dig_frontend,
};
static enum bp_result link_transmitter_control(
@@ -235,6 +236,44 @@ static void set_link_training_complete(
}
+unsigned int dce110_get_dig_frontend(struct link_encoder *enc)
+{
+ struct dce110_link_encoder *enc110 = TO_DCE110_LINK_ENC(enc);
+ u32 value;
+ enum engine_id result;
+
+ REG_GET(DIG_BE_CNTL, DIG_FE_SOURCE_SELECT, &value);
+
+ switch (value) {
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGA:
+ result = ENGINE_ID_DIGA;
+ break;
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGB:
+ result = ENGINE_ID_DIGB;
+ break;
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGC:
+ result = ENGINE_ID_DIGC;
+ break;
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGD:
+ result = ENGINE_ID_DIGD;
+ break;
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGE:
+ result = ENGINE_ID_DIGE;
+ break;
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGF:
+ result = ENGINE_ID_DIGF;
+ break;
+ case DCE110_DIG_FE_SOURCE_SELECT_DIGG:
+ result = ENGINE_ID_DIGG;
+ break;
+ default:
+ // invalid source select DIG
+ result = ENGINE_ID_UNKNOWN;
+ }
+
+ return result;
+}
+
void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
struct link_encoder *enc,
uint32_t index)
@@ -1665,7 +1704,8 @@ static const struct link_encoder_funcs dce60_lnk_enc_funcs = {
.disable_hpd = dce110_link_encoder_disable_hpd,
.is_dig_enabled = dce110_is_dig_enabled,
.destroy = dce110_link_encoder_destroy,
- .get_max_link_cap = dce110_link_encoder_get_max_link_cap
+ .get_max_link_cap = dce110_link_encoder_get_max_link_cap,
+ .get_dig_frontend = dce110_get_dig_frontend
};
void dce60_link_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
index cb714a48b171..fc6ade824c23 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h
@@ -295,6 +295,8 @@ void dce110_link_encoder_connect_dig_be_to_fe(
enum engine_id engine,
bool connect);
+unsigned int dce110_get_dig_frontend(struct link_encoder *enc);
+
void dce110_link_encoder_set_dp_phy_pattern_training_pattern(
struct link_encoder *enc,
uint32_t index);
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 82bc4e192bbf..915fbb8e8168 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -1268,7 +1268,7 @@ void dce120_timing_generator_construct(
tg110->min_h_front_porch = 0;
tg110->min_h_back_porch = 0;
- tg110->min_h_sync_width = 8;
+ tg110->min_h_sync_width = 4;
tg110->min_v_sync_width = 1;
tg110->min_v_blank = 3;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 733e6e6e43bd..62ad1a11bff9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -31,11 +31,4 @@ DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o dcn10_hw_sequencer.o \
AMD_DAL_DCN10 = $(addprefix $(AMDDALPATH)/dc/dcn10/,$(DCN10))
-# fix:
-# ...: '-mgeneral-regs-only' is incompatible with the use of floating-point types
-# aarch64 does not support soft-float, so use hard-float and handle this in code
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn10/dcn10_resource.o := -mgeneral-regs-only
-endif
-
AMD_DISPLAY_FILES += $(AMD_DAL_DCN10)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index 75637c291e75..6f42d10dd772 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -124,11 +124,11 @@ bool hubbub1_verify_allow_pstate_change_high(
* still not asserted, we are probably stuck and going to hang
*
* TODO: Figure out why it takes ~100us on linux
- * pstate takes around ~100us on linux. Unknown currently as to
- * why it takes that long on linux
+ * pstate takes around ~100us (up to 200us) on linux. Unknown currently
+ * as to why it takes that long on linux
*/
const unsigned int pstate_wait_timeout_us = 200;
- const unsigned int pstate_wait_expected_timeout_us = 40;
+ const unsigned int pstate_wait_expected_timeout_us = 180;
static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 41679ad531c5..9e796dfeac20 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -1241,6 +1241,22 @@ void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst)
REG_UPDATE(DCHUBP_CNTL, HUBP_VTG_SEL, otg_inst);
}
+bool hubp1_in_blank(struct hubp *hubp)
+{
+ uint32_t in_blank;
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_GET(DCHUBP_CNTL, HUBP_IN_BLANK, &in_blank);
+ return in_blank ? true : false;
+}
+
+void hubp1_soft_reset(struct hubp *hubp, bool reset)
+{
+ struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
+
+ REG_UPDATE(DCHUBP_CNTL, HUBP_DISABLE, reset ? 1 : 0);
+}
+
void hubp1_init(struct hubp *hubp)
{
//do nothing
@@ -1272,6 +1288,8 @@ static const struct hubp_funcs dcn10_hubp_funcs = {
.dmdata_set_attributes = NULL,
.dmdata_load = NULL,
+ .hubp_soft_reset = hubp1_soft_reset,
+ .hubp_in_blank = hubp1_in_blank,
};
/*****************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
index 780af5b3c16f..a9a6ed7f4f99 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
@@ -260,6 +260,7 @@
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_NO_OUTSTANDING_REQ, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VTG_SEL, mask_sh),\
HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_IN_BLANK, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_PIPES, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, NUM_BANKS, mask_sh),\
HUBP_SF(HUBP0_DCSURF_ADDR_CONFIG, PIPE_INTERLEAVE, mask_sh),\
@@ -455,6 +456,7 @@
type HUBP_VTG_SEL;\
type HUBP_UNDERFLOW_STATUS;\
type HUBP_UNDERFLOW_CLEAR;\
+ type HUBP_IN_BLANK;\
type NUM_PIPES;\
type NUM_BANKS;\
type PIPE_INTERLEAVE;\
@@ -772,5 +774,7 @@ void hubp1_vready_workaround(struct hubp *hubp,
void hubp1_init(struct hubp *hubp);
void hubp1_read_state_common(struct hubp *hubp);
+bool hubp1_in_blank(struct hubp *hubp);
+void hubp1_soft_reset(struct hubp *hubp, bool reset);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 9f7d6b087553..cfc130e2d6fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -2736,7 +2736,7 @@ static void dcn10_program_all_pipe_in_tree(
pipe_ctx->pipe_dlg_param.vupdate_width);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
index 3fcd408e9103..100ce0e28fd5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
@@ -467,6 +467,17 @@ void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
REG_SET(CUR[opp_id], 0, CUR_VUPDATE_LOCK_SET, lock ? 1 : 0);
}
+unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id)
+{
+ struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
+ uint32_t val = 0;
+
+ if (opp_id < MAX_OPP && REG(MUX[opp_id]))
+ REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);
+
+ return val;
+}
+
static const struct mpc_funcs dcn10_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
@@ -483,6 +494,7 @@ static const struct mpc_funcs dcn10_mpc_funcs = {
.set_denorm_clamp = NULL,
.set_output_csc = NULL,
.set_output_gamma = NULL,
+ .get_mpc_out_mux = mpc1_get_mpc_out_mux,
};
void dcn10_mpc_construct(struct dcn10_mpc *mpc10,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
index 66a4719c22a0..dbfffc6383dc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.h
@@ -200,4 +200,5 @@ void mpc1_read_mpcc_state(
void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock);
+unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index a125d3f05c81..f033397a84e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -272,7 +272,7 @@ void optc1_program_timing(
vupdate_offset,
vupdate_width);
- optc->funcs->set_vtg_params(optc, dc_crtc_timing);
+ optc->funcs->set_vtg_params(optc, dc_crtc_timing, true);
/* TODO
* patched_crtc_timing.flags.HORZ_COUNT_BY_TWO == 1
@@ -312,7 +312,7 @@ void optc1_program_timing(
}
void optc1_set_vtg_params(struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing)
+ const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2)
{
struct dc_crtc_timing patched_crtc_timing;
uint32_t asic_blank_end;
@@ -348,9 +348,12 @@ void optc1_set_vtg_params(struct timing_generator *optc,
}
}
- REG_UPDATE_2(CONTROL,
- VTG0_FP2, v_fp2,
- VTG0_VCOUNT_INIT, v_init);
+ if (program_fp2)
+ REG_UPDATE_2(CONTROL,
+ VTG0_FP2, v_fp2,
+ VTG0_VCOUNT_INIT, v_init);
+ else
+ REG_UPDATE(CONTROL, VTG0_VCOUNT_INIT, v_init);
}
void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable)
@@ -1540,7 +1543,7 @@ void dcn10_timing_generator_init(struct optc *optc1)
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
- optc1->min_h_sync_width = 8;
+ optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
index 344eb487219e..b12bd9aae52f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
@@ -700,6 +700,6 @@ bool optc1_get_crc(struct timing_generator *optc,
bool optc1_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
void optc1_set_vtg_params(struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing);
+ const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2);
#endif /* __DC_TIMING_GENERATOR_DCN10_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index bdc37831535e..36745193c391 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1534,15 +1534,8 @@ static bool dcn10_resource_construct(
memcpy(dc->dcn_ip, &dcn10_ip_defaults, sizeof(dcn10_ip_defaults));
memcpy(dc->dcn_soc, &dcn10_soc_defaults, sizeof(dcn10_soc_defaults));
-#if defined(CONFIG_ARM64)
- /* Aarch64 does not support -msoft-float/-mfloat-abi=soft */
- DC_FP_START();
- dcn10_resource_construct_fp(dc);
- DC_FP_END();
-#else
/* Other architectures we build for build this with soft-float */
dcn10_resource_construct_fp(dc);
-#endif
pool->base.pp_smu = dcn10_pp_smu_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index 9e38c37c1d73..76b334644f9e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -81,7 +81,9 @@
SRI(DP_MSE_RATE_UPDATE, DP, id), \
SRI(DP_PIXEL_FORMAT, DP, id), \
SRI(DP_SEC_CNTL, DP, id), \
+ SRI(DP_SEC_CNTL1, DP, id), \
SRI(DP_SEC_CNTL2, DP, id), \
+ SRI(DP_SEC_CNTL5, DP, id), \
SRI(DP_SEC_CNTL6, DP, id), \
SRI(DP_STEER_FIFO, DP, id), \
SRI(DP_VID_M, DP, id), \
@@ -126,7 +128,9 @@ struct dcn10_stream_enc_registers {
uint32_t DP_MSE_RATE_UPDATE;
uint32_t DP_PIXEL_FORMAT;
uint32_t DP_SEC_CNTL;
+ uint32_t DP_SEC_CNTL1;
uint32_t DP_SEC_CNTL2;
+ uint32_t DP_SEC_CNTL5;
uint32_t DP_SEC_CNTL6;
uint32_t DP_STEER_FIFO;
uint32_t DP_VID_M;
@@ -411,6 +415,8 @@ struct dcn10_stream_enc_registers {
type DP_SEC_GSP3_ENABLE;\
type DP_SEC_GSP4_ENABLE;\
type DP_SEC_GSP5_ENABLE;\
+ type DP_SEC_GSP5_LINE_NUM;\
+ type DP_SEC_GSP5_LINE_REFERENCE;\
type DP_SEC_GSP6_ENABLE;\
type DP_SEC_GSP7_ENABLE;\
type DP_SEC_GSP7_PPS;\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index 624cb1341ef1..5fcaf78334ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -17,10 +17,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
index b7e44e53a342..0df0da2e6a4d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c
@@ -1595,6 +1595,8 @@ static struct hubp_funcs dcn20_hubp_funcs = {
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp1_init,
.validate_dml_output = hubp2_validate_dml_output,
+ .hubp_in_blank = hubp1_in_blank,
+ .hubp_soft_reset = hubp1_soft_reset,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index abcb06044e6e..cb822df21b7c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -1586,7 +1586,10 @@ static void dcn20_program_pipe(
&& !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
hws->funcs.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible);
- if (pipe_ctx->update_flags.bits.global_sync) {
+ /* Only update TG on top pipe */
+ if (pipe_ctx->update_flags.bits.global_sync && !pipe_ctx->top_pipe
+ && !pipe_ctx->prev_odm_pipe) {
+
pipe_ctx->stream_res.tg->funcs->program_global_sync(
pipe_ctx->stream_res.tg,
pipe_ctx->pipe_dlg_param.vready_offset,
@@ -1594,8 +1597,11 @@ static void dcn20_program_pipe(
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
+
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, true);
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
@@ -1695,14 +1701,6 @@ void dcn20_program_front_end_for_ctx(
&& context->res_ctx.pipe_ctx[i].stream)
hws->funcs.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true);
- /* wait for outstanding pending changes before adding or removing planes */
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
- context->res_ctx.pipe_ctx[i].update_flags.bits.enable) {
- dc->hwss.wait_for_pending_cleared(dc, context);
- break;
- }
- }
/* Disconnect mpcc */
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -1856,7 +1854,7 @@ bool dcn20_update_bandwidth(
pipe_ctx->pipe_dlg_param.vupdate_width);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
- pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
+ pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing, false);
if (pipe_ctx->prev_odm_pipe == NULL)
hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
@@ -2251,11 +2249,11 @@ void dcn20_get_mpctree_visual_confirm_color(
{
const struct tg_color pipe_colors[6] = {
{MAX_TG_COLOR_VALUE, 0, 0}, // red
- {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow
- {0, MAX_TG_COLOR_VALUE, 0}, // blue
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE / 4, 0}, // orange
+ {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, // yellow
+ {0, MAX_TG_COLOR_VALUE, 0}, // green
+ {0, 0, MAX_TG_COLOR_VALUE}, // blue
{MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple
- {0, 0, MAX_TG_COLOR_VALUE}, // green
- {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange
};
struct pipe_ctx *top_pipe = pipe_ctx;
@@ -2280,14 +2278,11 @@ void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
// input to MPCC is always RGB, by default leave black_color at 0
if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
- hws->funcs.get_hdr_visual_confirm_color(
- pipe_ctx, &blnd_cfg.black_color);
+ hws->funcs.get_hdr_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
- hws->funcs.get_surface_visual_confirm_color(
- pipe_ctx, &blnd_cfg.black_color);
+ hws->funcs.get_surface_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) {
- dcn20_get_mpctree_visual_confirm_color(
- pipe_ctx, &blnd_cfg.black_color);
+ dcn20_get_mpctree_visual_confirm_color(pipe_ctx, &blnd_cfg.black_color);
}
if (per_pixel_alpha)
@@ -2581,4 +2576,4 @@ void dcn20_set_disp_pattern_generator(const struct dc *dc,
{
pipe_ctx->stream_res.opp->funcs->opp_set_disp_pattern_generator(pipe_ctx->stream_res.opp, test_pattern,
color_space, color_depth, solid_color, width, height, offset);
-} \ No newline at end of file
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
index 99cc095dc33c..6a99fdd55e8c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mpc.c
@@ -556,6 +556,7 @@ const struct mpc_funcs dcn20_mpc_funcs = {
.set_ocsc_default = mpc2_set_ocsc_default,
.set_output_gamma = mpc2_set_output_gamma,
.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
+ .get_mpc_out_mux = mpc1_get_mpc_out_mux,
};
void dcn20_mpc_construct(struct dcn20_mpc *mpc20,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index ff36db5edf6c..e04ecf0fc0db 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -1933,7 +1933,7 @@ bool dcn20_split_stream_for_odm(
next_odm_pipe->stream_res.opp = pool->opps[next_odm_pipe->pipe_idx];
else
next_odm_pipe->stream_res.opp = next_odm_pipe->top_pipe->stream_res.opp;
- if (next_odm_pipe->stream->timing.flags.DSC == 1) {
+ if (next_odm_pipe->stream->timing.flags.DSC == 1 && !next_odm_pipe->top_pipe) {
dcn20_acquire_dsc(dc, res_ctx, &next_odm_pipe->stream_res.dsc, next_odm_pipe->pipe_idx);
ASSERT(next_odm_pipe->stream_res.dsc);
if (next_odm_pipe->stream_res.dsc == NULL)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
index d2a805bd4573..9a881e639709 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
@@ -83,6 +83,8 @@
SE_SF(DIG0_HDMI_METADATA_PACKET_CONTROL, HDMI_METADATA_PACKET_LINE, mask_sh),\
SE_SF(DIG0_DIG_FE_CNTL, DOLBY_VISION_EN, mask_sh),\
SE_SF(DP0_DP_PIXEL_FORMAT, DP_PIXEL_COMBINE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL1, DP_SEC_GSP5_LINE_REFERENCE, mask_sh),\
+ SE_SF(DP0_DP_SEC_CNTL5, DP_SEC_GSP5_LINE_NUM, mask_sh),\
SE_SF(DP0_DP_SEC_FRAMING4, DP_SST_SDP_SPLITTING, mask_sh)
void dcn20_stream_encoder_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index 1ee5fc03b7b3..bb8c95141082 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -13,10 +13,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
index b7efa777ec73..e44a37491c1e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_dccg.h
@@ -32,5 +32,6 @@ struct dccg *dccg21_create(
const struct dccg_shift *dccg_shift,
const struct dccg_mask *dccg_mask);
+void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk);
#endif /* __DCN21_DCCG_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index 248c2711aace..c20331eb62e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -41,11 +41,6 @@ CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec
CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mgeneral-regs-only
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
index 2ae159e2dd6e..46ea39f5ef8d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
@@ -51,7 +51,7 @@
(enc10->link_regs->index)
-static bool dcn30_link_encoder_validate_output_with_stream(
+bool dcn30_link_encoder_validate_output_with_stream(
struct link_encoder *enc,
const struct dc_stream_state *stream)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
index 2fbf879cd327..f2d90f2b8bf1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h
@@ -78,4 +78,8 @@ void dcn30_link_encoder_construct(
void enc3_hw_init(struct link_encoder *enc);
+bool dcn30_link_encoder_validate_output_with_stream(
+ struct link_encoder *enc,
+ const struct dc_stream_state *stream);
+
#endif /* __DC_LINK_ENCODER__DCN30_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index af462fe4260d..88ffa9ff1ed1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -509,6 +509,8 @@ static struct hubp_funcs dcn30_hubp_funcs = {
.hubp_clear_underflow = hubp2_clear_underflow,
.hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
.hubp_init = hubp3_init,
+ .hubp_in_blank = hubp1_in_blank,
+ .hubp_soft_reset = hubp1_soft_reset,
};
bool hubp3_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index 283995ab9eeb..3deb3fb1724d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -668,7 +668,7 @@ void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx)
is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal);
is_dp = dc_is_dp_signal(pipe_ctx->stream->signal);
- if (!is_hdmi_tmds)
+ if (!is_hdmi_tmds && !is_dp)
return;
if (is_hdmi_tmds)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
index d7d053fc6e91..3e6f76096119 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c
@@ -1428,6 +1428,7 @@ const struct mpc_funcs dcn30_mpc_funcs = {
.program_3dlut = mpc3_program_3dlut,
.release_rmu = mpcc3_release_rmu,
.power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
+ .get_mpc_out_mux = mpc1_get_mpc_out_mux,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index b1f228fc119a..3ba3991ee612 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -350,7 +350,7 @@ void dcn30_timing_generator_init(struct optc *optc1)
optc1->min_h_blank = 32;
optc1->min_v_blank = 3;
optc1->min_v_blank_interlace = 5;
- optc1->min_h_sync_width = 8;
+ optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 2fd5d34e4ba6..3ca7d911d25c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -21,10 +21,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
index 36e44e1b07fa..8d4924b7dc22 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
@@ -20,10 +20,6 @@ ifdef CONFIG_PPC64
CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index a02a33dcd70b..6bb7f2905821 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -33,10 +33,6 @@ ifdef CONFIG_PPC64
dml_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-dml_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index f2624a1156e5..8d31eb75c6a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -10,10 +10,6 @@ ifdef CONFIG_PPC64
dsc_ccflags := -mhard-float -maltivec
endif
-ifdef CONFIG_ARM64
-dsc_rcflags := -mgeneral-regs-only
-endif
-
ifdef CONFIG_CC_IS_GCC
ifeq ($(call cc-ifversion, -lt, 0701, y), y)
IS_OLD_GCC = 1
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index 315e3061c592..22f3f643ed1b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -188,6 +188,8 @@ struct hubp_funcs {
void (*set_unbounded_requesting)(
struct hubp *hubp,
bool enable);
+ bool (*hubp_in_blank)(struct hubp *hubp);
+ void (*hubp_soft_reset)(struct hubp *hubp, bool reset);
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 879f502ae530..75c77ad9cbfe 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -359,6 +359,10 @@ struct mpc_funcs {
int (*release_rmu)(struct mpc *mpc, int mpcc_id);
+ unsigned int (*get_mpc_out_mux)(
+ struct mpc *mpc,
+ int opp_id);
+
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 12d5718caea8..f7632fe25976 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -271,7 +271,7 @@ struct timing_generator_funcs {
struct dc_crtc_timing *hw_crtc_timing);
void (*set_vtg_params)(struct timing_generator *optc,
- const struct dc_crtc_timing *dc_crtc_timing);
+ const struct dc_crtc_timing *dc_crtc_timing, bool program_fp2);
void (*set_dsc_config)(struct timing_generator *optc,
enum optc_dsc_mode dsc_mode,
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index 95cb56929e79..126c2f3a4dd3 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -55,10 +55,6 @@
#include <asm/fpu/api.h>
#define DC_FP_START() kernel_fpu_begin()
#define DC_FP_END() kernel_fpu_end()
-#elif defined(CONFIG_ARM64)
-#include <asm/neon.h>
-#define DC_FP_START() kernel_neon_begin()
-#define DC_FP_END() kernel_neon_end()
#elif defined(CONFIG_PPC64)
#include <asm/switch_to.h>
#include <asm/cputable.h>
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index b20a39f488ae..249a076d6f69 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -47,10 +47,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x931573111
+#define DMUB_FW_VERSION_GIT_HASH 0xf51b86a
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 45
+#define DMUB_FW_VERSION_REVISION 47
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -514,12 +514,20 @@ enum dp_aux_request_action {
enum aux_return_code_type {
AUX_RET_SUCCESS = 0,
+ AUX_RET_ERROR_UNKNOWN,
+ AUX_RET_ERROR_INVALID_REPLY,
AUX_RET_ERROR_TIMEOUT,
- AUX_RET_ERROR_NO_DATA,
+ AUX_RET_ERROR_HPD_DISCON,
+ AUX_RET_ERROR_ENGINE_ACQUIRE,
AUX_RET_ERROR_INVALID_OPERATION,
AUX_RET_ERROR_PROTOCOL_ERROR,
};
+enum aux_channel_type {
+ AUX_CHANNEL_LEGACY_DDC,
+ AUX_CHANNEL_DPIA
+};
+
/* DP AUX command */
struct aux_transaction_parameters {
uint8_t is_i2c_over_aux;
@@ -532,9 +540,10 @@ struct aux_transaction_parameters {
struct dmub_cmd_dp_aux_control_data {
uint32_t handle;
- uint8_t port_index;
+ uint8_t instance;
uint8_t sw_crc_enabled;
uint16_t timeout;
+ enum aux_channel_type type;
struct aux_transaction_parameters dpaux;
};
@@ -558,7 +567,7 @@ struct aux_reply_data {
struct aux_reply_control_data {
uint32_t handle;
- uint8_t phy_port_index;
+ uint8_t instance;
uint8_t result;
uint16_t pad;
};
@@ -581,7 +590,7 @@ enum dp_hpd_status {
};
struct dp_hpd_data {
- uint8_t phy_port_index;
+ uint8_t instance;
uint8_t hpd_type;
uint8_t hpd_status;
uint8_t pad;
@@ -732,27 +741,30 @@ enum dmub_cmd_abm_type {
struct abm_config_table {
/* Parameters for crgb conversion */
uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; // 0B
- uint16_t crgb_offset[NUM_POWER_FN_SEGS]; // 15B
- uint16_t crgb_slope[NUM_POWER_FN_SEGS]; // 31B
+ uint16_t crgb_offset[NUM_POWER_FN_SEGS]; // 16B
+ uint16_t crgb_slope[NUM_POWER_FN_SEGS]; // 32B
/* Parameters for custom curve */
- uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; // 47B
- uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; // 79B
-
- uint16_t ambient_thresholds_lux[NUM_AMBI_LEVEL]; // 111B
- uint16_t min_abm_backlight; // 121B
-
- uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 123B
- uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 143B
- uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 163B
- uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 183B
- uint8_t hybrid_factor[NUM_AGGR_LEVEL]; // 203B
- uint8_t contrast_factor[NUM_AGGR_LEVEL]; // 207B
- uint8_t deviation_gain[NUM_AGGR_LEVEL]; // 211B
- uint8_t min_knee[NUM_AGGR_LEVEL]; // 215B
- uint8_t max_knee[NUM_AGGR_LEVEL]; // 219B
- uint8_t iir_curve[NUM_AMBI_LEVEL]; // 223B
- uint8_t pad3[3]; // 228B
+ uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; // 48B
+ uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; // 78B
+
+ uint16_t ambient_thresholds_lux[NUM_AMBI_LEVEL]; // 112B
+ uint16_t min_abm_backlight; // 122B
+
+ uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 124B
+ uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 144B
+ uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 164B
+ uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; // 184B
+ uint8_t hybrid_factor[NUM_AGGR_LEVEL]; // 204B
+ uint8_t contrast_factor[NUM_AGGR_LEVEL]; // 208B
+ uint8_t deviation_gain[NUM_AGGR_LEVEL]; // 212B
+ uint8_t min_knee[NUM_AGGR_LEVEL]; // 216B
+ uint8_t max_knee[NUM_AGGR_LEVEL]; // 220B
+ uint8_t iir_curve[NUM_AMBI_LEVEL]; // 224B
+ uint8_t pad3[3]; // 229B
+
+ uint16_t blRampReduction[NUM_AGGR_LEVEL]; // 232B
+ uint16_t blRampStart[NUM_AGGR_LEVEL]; // 240B
};
struct dmub_cmd_abm_set_pipe_data {
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index eced40a2fce4..5c67e12b2e55 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -30,6 +30,14 @@
#include "opp.h"
#include "color_gamma.h"
+/* When calculating LUT values the first region and at least one subsequent
+ * region are calculated with full precision. These defines are a demarcation
+ * of where the second region starts and ends.
+ * These are hardcoded values to avoid recalculating them in loops.
+ */
+#define PRECISE_LUT_REGION_START 224
+#define PRECISE_LUT_REGION_END 239
+
static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
// these are helpers for calculations to reduce stack usage
@@ -346,7 +354,13 @@ static struct fixed31_32 translate_from_linear_space(
dc_fixpt_recip(args->gamma));
}
scratch_1 = dc_fixpt_add(one, args->a3);
- if (cal_buffer->buffer_index < 16)
+ /* In the first region (first 16 points) and in the
+ * region delimited by START/END we calculate with
+ * full precision to avoid error accumulation.
+ */
+ if ((cal_buffer->buffer_index >= PRECISE_LUT_REGION_START &&
+ cal_buffer->buffer_index <= PRECISE_LUT_REGION_END) ||
+ (cal_buffer->buffer_index < 16))
scratch_2 = dc_fixpt_pow(args->arg,
dc_fixpt_recip(args->gamma));
else
@@ -397,9 +411,7 @@ static struct fixed31_32 translate_from_linear_space_long(
dc_fixpt_recip(args->gamma))),
args->a2);
else
- return dc_fixpt_mul(
- args->arg,
- args->a1);
+ return dc_fixpt_mul(args->arg, args->a1);
}
static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf, struct calculate_buffer *cal_buffer)
@@ -717,7 +729,6 @@ static struct fixed31_32 calculate_mapped_value(
BREAK_TO_DEBUGGER();
result = dc_fixpt_zero;
} else {
- BREAK_TO_DEBUGGER();
result = dc_fixpt_one;
}
@@ -976,6 +987,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma,
cal_buffer->buffer_index = 0; // see var definition for more info
rgb += 32; // first 32 points have problems with fixed point, too small
coord_x += 32;
+
for (i = 32; i <= hw_points_num; i++) {
if (!is_clipped) {
if (use_eetf) {
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index f244b72e74e0..73ca49f05bd3 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -128,8 +128,12 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
- /* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
+ /* Some MST display may choose to report the internal panel as an HDCP RX.
+ * To update this condition with 1(because the immediate repeater's internal
+ * panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp).
+ * Device count must be greater than or equal to tracked hdcp displays.
+ */
+ return ((1 + get_device_count(hdcp)) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index 549c113abcf7..a0895a7efda2 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -207,8 +207,11 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
- /* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
+ /* Some MST display may choose to report the internal panel as an HDCP RX. */
+ /* To update this condition with 1(because the immediate repeater's internal */
+ /* panel is possibly not included in DEVICE_COUNT) + get_device_count(hdcp). */
+ /* Device count must be greater than or equal to tracked hdcp displays. */
+ return ((1 + get_device_count(hdcp)) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index cc983f662157..4fd8bce95d84 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -82,22 +82,24 @@ struct abm_parameters {
unsigned char deviation_gain;
unsigned char min_knee;
unsigned char max_knee;
+ unsigned short blRampReduction;
+ unsigned short blRampStart;
};
static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = {
-// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
- {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0},
- {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf},
- {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0},
- {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed
+ {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0, 0xCCCC, 0xCCCC},
+ {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf, 0xCCCC, 0xCCCC},
+ {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0, 0xCCCC, 0xCCCC},
+ {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
};
static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = {
-// min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee
- {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
- {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
- {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
- {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70},
+// min_red max_red bright_pos dark_pos bright_gain contrast dev min_knee max_knee blStart blRed
+ {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
+ {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
+ {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
+ {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70, 0xCCCC, 0xCCCC},
};
static const struct abm_parameters * const abm_settings[] = {
@@ -662,6 +664,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
{
struct iram_table_v_2_2 ram_table;
struct abm_config_table config;
+ unsigned int set = params.set;
bool result = false;
uint32_t i, j = 0;
@@ -710,6 +713,18 @@ bool dmub_init_abm_config(struct resource_pool *res_pool,
config.max_knee[i] = ram_table.max_knee[i];
}
+ if (params.backlight_ramping_override) {
+ for (i = 0; i < NUM_AGGR_LEVEL; i++) {
+ config.blRampReduction[i] = params.backlight_ramping_reduction;
+ config.blRampStart[i] = params.backlight_ramping_start;
+ }
+ } else {
+ for (i = 0; i < NUM_AGGR_LEVEL; i++) {
+ config.blRampReduction[i] = abm_settings[set][i].blRampReduction;
+ config.blRampStart[i] = abm_settings[set][i].blRampStart;
+ }
+ }
+
config.min_abm_backlight = ram_table.min_abm_backlight;
#if defined(CONFIG_DRM_AMD_DC_DCN)
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
index fa4728d88092..6f2eecce6baa 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h
@@ -39,6 +39,7 @@ enum abm_defines {
struct dmcu_iram_parameters {
unsigned int *backlight_lut_array;
unsigned int backlight_lut_array_size;
+ bool backlight_ramping_override;
unsigned int backlight_ramping_reduction;
unsigned int backlight_ramping_start;
unsigned int min_abm_backlight;
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index c38635992101..3cb8d4c5c1a3 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -499,6 +499,7 @@ enum atombios_firmware_capability
ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100,
ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200,
ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING = 0x00000400,
+ ATOM_FIRMWARE_CAP_ENABLE_2ND_USB20PORT = 0x0008000,
};
enum atom_cooling_solution_id{
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 89be49a43500..4bdbcce7092d 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -227,6 +227,7 @@ struct smu_bios_boot_up_values
uint32_t content_revision;
uint32_t fclk;
uint32_t lclk;
+ uint32_t firmware_caps;
};
enum smu_table_id
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/inc/smu_types.h
index 4a6d1381df16..720d15612fe1 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_types.h
@@ -178,7 +178,7 @@
__SMU_DUMMY_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW), \
__SMU_DUMMY_MAP(GET_UMC_FW_WA), \
__SMU_DUMMY_MAP(Mode1Reset), \
- __SMU_DUMMY_MAP(Spare), \
+ __SMU_DUMMY_MAP(RlcPowerNotify), \
__SMU_DUMMY_MAP(SetHardMinIspiclkByFreq), \
__SMU_DUMMY_MAP(SetHardMinIspxclkByFreq), \
__SMU_DUMMY_MAP(SetSoftMinSocclkByFreq), \
@@ -209,6 +209,8 @@
__SMU_DUMMY_MAP(SetSoftMinCclk), \
__SMU_DUMMY_MAP(SetSoftMaxCclk), \
__SMU_DUMMY_MAP(SetGpoFeaturePMask), \
+ __SMU_DUMMY_MAP(DisallowGpo), \
+ __SMU_DUMMY_MAP(Enable2ndUSB20Port), \
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index e5aa0725147c..13de692a4213 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -30,7 +30,7 @@
#define SMU11_DRIVER_IF_VERSION_NV10 0x36
#define SMU11_DRIVER_IF_VERSION_NV12 0x36
#define SMU11_DRIVER_IF_VERSION_NV14 0x36
-#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3B
+#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x3D
#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xC
#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x02
#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h
index 35dd6072cc45..d2e10a724560 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h
@@ -134,6 +134,10 @@
#define PPSMC_MSG_SetGpoFeaturePMask 0x45
#define PPSMC_MSG_SetSMBUSInterrupt 0x46
-#define PPSMC_Message_Count 0x47
+#define PPSMC_MSG_DisallowGpo 0x56
+
+#define PPSMC_MSG_Enable2ndUSB20Port 0x57
+
+#define PPSMC_Message_Count 0x58
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
index 7e69b3bd311b..55d7892e4e0e 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
@@ -41,7 +41,7 @@
#define PPSMC_MSG_PowerUpIspByTile 0x7
#define PPSMC_MSG_PowerDownVcn 0x8 // VCN is power gated by default
#define PPSMC_MSG_PowerUpVcn 0x9
-#define PPSMC_MSG_spare 0xA
+#define PPSMC_MSG_RlcPowerNotify 0xA
#define PPSMC_MSG_SetHardMinVcn 0xB // For wireless display
#define PPSMC_MSG_SetSoftMinGfxclk 0xC //Sets SoftMin for GFXCLK. Arg is in MHz
#define PPSMC_MSG_ActiveProcessNotify 0xD
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
index e57e64bbacdc..88322781e447 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.c
@@ -251,7 +251,7 @@ static int smu10_set_hard_min_gfxclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cl
smu10_data->gfx_actual_soft_min_freq = clock;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
- smu10_data->gfx_actual_soft_min_freq,
+ clock,
NULL);
}
return 0;
@@ -558,7 +558,8 @@ static int smu10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
/* enable the pp_od_clk_voltage sysfs file */
hwmgr->od_enabled = 1;
-
+ /* disabled fine grain tuning function by default */
+ data->fine_grain_enabled = 0;
return result;
}
@@ -597,6 +598,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100;
uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1;
uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1;
+ uint32_t fine_grain_min_freq = 0, fine_grain_max_freq = 0;
if (hwmgr->smu_version < 0x1E3700) {
pr_info("smu firmware version too old, can not set dpm level\n");
@@ -613,6 +615,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_max_freq_limit/100,
@@ -648,6 +658,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@@ -658,6 +676,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
min_mclk,
@@ -668,6 +694,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
SMU10_UMD_PSTATE_GFXCLK,
@@ -703,6 +737,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
min_sclk,
@@ -741,6 +783,14 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_LOW:
+ data->fine_grain_enabled = 0;
+
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &fine_grain_min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &fine_grain_max_freq);
+
+ data->gfx_actual_soft_min_freq = fine_grain_min_freq;
+ data->gfx_actual_soft_max_freq = fine_grain_max_freq;
+
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinGfxClk,
data->gfx_min_freq_limit/100,
@@ -759,6 +809,7 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
+ data->fine_grain_enabled = 1;
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
break;
@@ -948,6 +999,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
struct smu10_voltage_dependency_table *mclk_table =
data->clock_vol_info.vdd_dep_on_fclk;
uint32_t i, now, size = 0;
+ uint32_t min_freq, max_freq = 0;
+ uint32_t ret = 0;
switch (type) {
case PP_SCLK:
@@ -983,18 +1036,28 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
break;
case OD_SCLK:
if (hwmgr->od_enabled) {
- size = sprintf(buf, "%s:\n", "OD_SCLK");
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (ret)
+ return ret;
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (ret)
+ return ret;
+ size = sprintf(buf, "%s:\n", "OD_SCLK");
size += sprintf(buf + size, "0: %10uMhz\n",
- (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : data->gfx_min_freq_limit/100);
- size += sprintf(buf + size, "1: %10uMhz\n", data->gfx_max_freq_limit/100);
+ (data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
+ size += sprintf(buf + size, "1: %10uMhz\n",
+ (data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
}
break;
case OD_RANGE:
if (hwmgr->od_enabled) {
- uint32_t min_freq, max_freq = 0;
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
- smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (ret)
+ return ret;
+ ret = smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (ret)
+ return ret;
size = sprintf(buf, "%s:\n", "OD_RANGE");
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
@@ -1414,23 +1477,96 @@ static int smu10_set_fine_grain_clk_vol(struct pp_hwmgr *hwmgr,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
{
+ uint32_t min_freq, max_freq = 0;
+ struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
+ int ret = 0;
+
if (!hwmgr->od_enabled) {
pr_err("Fine grain not support\n");
return -EINVAL;
}
- if (size != 2) {
- pr_err("Input parameter number not correct\n");
+ if (!smu10_data->fine_grain_enabled) {
+ pr_err("Fine grain not started\n");
return -EINVAL;
}
if (type == PP_OD_EDIT_SCLK_VDDC_TABLE) {
- if (input[0] == 0)
- smu10_set_hard_min_gfxclk_by_freq(hwmgr, input[1]);
- else if (input[0] == 1)
- smu10_set_soft_max_gfxclk_by_freq(hwmgr, input[1]);
- else
+ if (size != 2) {
+ pr_err("Input parameter number not correct\n");
return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ if (input[1] < min_freq) {
+ pr_err("Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], min_freq);
+ return -EINVAL;
+ }
+ smu10_data->gfx_actual_soft_min_freq = input[1];
+ } else if (input[0] == 1) {
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+ if (input[1] > max_freq) {
+ pr_err("Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], max_freq);
+ return -EINVAL;
+ }
+ smu10_data->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ } else if (type == PP_OD_RESTORE_DEFAULT_TABLE) {
+ if (size != 0) {
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMinGfxclkFrequency, &min_freq);
+ smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetMaxGfxclkFrequency, &max_freq);
+
+ smu10_data->gfx_actual_soft_min_freq = min_freq;
+ smu10_data->gfx_actual_soft_max_freq = max_freq;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ min_freq,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ max_freq,
+ NULL);
+ if (ret)
+ return ret;
+ } else if (type == PP_OD_COMMIT_DPM_TABLE) {
+ if (size != 0) {
+ pr_err("Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (smu10_data->gfx_actual_soft_min_freq > smu10_data->gfx_actual_soft_max_freq) {
+ pr_err("The setting minimun sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu10_data->gfx_actual_soft_min_freq, smu10_data->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetHardMinGfxClk,
+ smu10_data->gfx_actual_soft_min_freq,
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smum_send_msg_to_smc_with_parameter(hwmgr,
+ PPSMC_MSG_SetSoftMaxGfxClk,
+ smu10_data->gfx_actual_soft_max_freq,
+ NULL);
+ if (ret)
+ return ret;
+ } else {
+ return -EINVAL;
}
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
index 6c9b5f060902..808e0ecbe1f0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu10_hwmgr.h
@@ -283,6 +283,7 @@ struct smu10_hwmgr {
uint32_t vclk_soft_min;
uint32_t dclk_soft_min;
uint32_t gfx_actual_soft_min_freq;
+ uint32_t gfx_actual_soft_max_freq;
uint32_t gfx_min_freq_limit;
uint32_t gfx_max_freq_limit; /* in 10Khz*/
@@ -299,6 +300,8 @@ struct smu10_hwmgr {
bool need_min_deep_sleep_dcefclk;
uint32_t deep_sleep_dcefclk;
uint32_t num_active_display;
+
+ bool fine_grain_enabled;
};
struct pp_hwmgr;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index cf999b7a2164..8b867a6d52b5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -847,12 +847,10 @@ static int smu_sw_init(void *handle)
smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
- if (!amdgpu_sriov_vf(adev) || (adev->asic_type != CHIP_NAVI12)) {
- ret = smu_init_microcode(smu);
- if (ret) {
- dev_err(adev->dev, "Failed to load smu firmware!\n");
- return ret;
- }
+ ret = smu_init_microcode(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to load smu firmware!\n");
+ return ret;
}
ret = smu_smc_table_sw_init(smu);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 3f20f77afdd2..12b36eb0ff6a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -128,6 +128,8 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
MSG_MAP(SetGpoFeaturePMask, PPSMC_MSG_SetGpoFeaturePMask, 0),
+ MSG_MAP(DisallowGpo, PPSMC_MSG_DisallowGpo, 0),
+ MSG_MAP(Enable2ndUSB20Port, PPSMC_MSG_Enable2ndUSB20Port, 0),
};
static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = {
@@ -302,6 +304,9 @@ static int sienna_cichlid_check_powerplay_table(struct smu_context *smu)
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_HARDWAREDC)
+ smu->dc_controlled_by_gpio = true;
+
if (powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_BACO ||
powerplay_table->platform_caps & SMU_11_0_7_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true;
@@ -377,7 +382,7 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
- SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
@@ -386,10 +391,10 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
- sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
+ sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM);
- smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
goto err0_out;
smu_table->metrics_time = 0;
@@ -418,7 +423,8 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
uint32_t *value)
{
struct smu_table_context *smu_table= &smu->smu_table;
- SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+ SmuMetrics_t *metrics =
+ &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
int ret = 0;
mutex_lock(&smu->metrics_lock);
@@ -1065,12 +1071,18 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
pstate_table->gfxclk_pstate.min = gfx_table->min;
pstate_table->gfxclk_pstate.peak = gfx_table->max;
+ if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK)
+ pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
+ if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK)
+ pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.min = soc_table->min;
pstate_table->socclk_pstate.peak = soc_table->max;
+ if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK)
+ pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
return 0;
}
@@ -1156,7 +1168,9 @@ static int sienna_cichlid_get_fan_parameters(struct smu_context *smu)
static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *buf)
{
- DpmActivityMonitorCoeffInt_t activity_monitor;
+ DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
+ DpmActivityMonitorCoeffInt_t *activity_monitor =
+ &(activity_monitor_external.DpmActivityMonitorCoeffInt);
uint32_t i, size = 0;
int16_t workload_type = 0;
static const char *profile_name[] = {
@@ -1198,7 +1212,7 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
result = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, workload_type,
- (void *)(&activity_monitor), false);
+ (void *)(&activity_monitor_external), false);
if (result) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return result;
@@ -1211,43 +1225,43 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
" ",
0,
"GFXCLK",
- activity_monitor.Gfx_FPS,
- activity_monitor.Gfx_MinFreqStep,
- activity_monitor.Gfx_MinActiveFreqType,
- activity_monitor.Gfx_MinActiveFreq,
- activity_monitor.Gfx_BoosterFreqType,
- activity_monitor.Gfx_BoosterFreq,
- activity_monitor.Gfx_PD_Data_limit_c,
- activity_monitor.Gfx_PD_Data_error_coeff,
- activity_monitor.Gfx_PD_Data_error_rate_coeff);
+ activity_monitor->Gfx_FPS,
+ activity_monitor->Gfx_MinFreqStep,
+ activity_monitor->Gfx_MinActiveFreqType,
+ activity_monitor->Gfx_MinActiveFreq,
+ activity_monitor->Gfx_BoosterFreqType,
+ activity_monitor->Gfx_BoosterFreq,
+ activity_monitor->Gfx_PD_Data_limit_c,
+ activity_monitor->Gfx_PD_Data_error_coeff,
+ activity_monitor->Gfx_PD_Data_error_rate_coeff);
size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
1,
"SOCCLK",
- activity_monitor.Fclk_FPS,
- activity_monitor.Fclk_MinFreqStep,
- activity_monitor.Fclk_MinActiveFreqType,
- activity_monitor.Fclk_MinActiveFreq,
- activity_monitor.Fclk_BoosterFreqType,
- activity_monitor.Fclk_BoosterFreq,
- activity_monitor.Fclk_PD_Data_limit_c,
- activity_monitor.Fclk_PD_Data_error_coeff,
- activity_monitor.Fclk_PD_Data_error_rate_coeff);
+ activity_monitor->Fclk_FPS,
+ activity_monitor->Fclk_MinFreqStep,
+ activity_monitor->Fclk_MinActiveFreqType,
+ activity_monitor->Fclk_MinActiveFreq,
+ activity_monitor->Fclk_BoosterFreqType,
+ activity_monitor->Fclk_BoosterFreq,
+ activity_monitor->Fclk_PD_Data_limit_c,
+ activity_monitor->Fclk_PD_Data_error_coeff,
+ activity_monitor->Fclk_PD_Data_error_rate_coeff);
size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
" ",
2,
"MEMLK",
- activity_monitor.Mem_FPS,
- activity_monitor.Mem_MinFreqStep,
- activity_monitor.Mem_MinActiveFreqType,
- activity_monitor.Mem_MinActiveFreq,
- activity_monitor.Mem_BoosterFreqType,
- activity_monitor.Mem_BoosterFreq,
- activity_monitor.Mem_PD_Data_limit_c,
- activity_monitor.Mem_PD_Data_error_coeff,
- activity_monitor.Mem_PD_Data_error_rate_coeff);
+ activity_monitor->Mem_FPS,
+ activity_monitor->Mem_MinFreqStep,
+ activity_monitor->Mem_MinActiveFreqType,
+ activity_monitor->Mem_MinActiveFreq,
+ activity_monitor->Mem_BoosterFreqType,
+ activity_monitor->Mem_BoosterFreq,
+ activity_monitor->Mem_PD_Data_limit_c,
+ activity_monitor->Mem_PD_Data_error_coeff,
+ activity_monitor->Mem_PD_Data_error_rate_coeff);
}
return size;
@@ -1255,7 +1269,10 @@ static int sienna_cichlid_get_power_profile_mode(struct smu_context *smu, char *
static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
{
- DpmActivityMonitorCoeffInt_t activity_monitor;
+
+ DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
+ DpmActivityMonitorCoeffInt_t *activity_monitor =
+ &(activity_monitor_external.DpmActivityMonitorCoeffInt);
int workload_type, ret = 0;
smu->power_profile_mode = input[size];
@@ -1269,7 +1286,7 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor), false);
+ (void *)(&activity_monitor_external), false);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
return ret;
@@ -1277,43 +1294,43 @@ static int sienna_cichlid_set_power_profile_mode(struct smu_context *smu, long *
switch (input[0]) {
case 0: /* Gfxclk */
- activity_monitor.Gfx_FPS = input[1];
- activity_monitor.Gfx_MinFreqStep = input[2];
- activity_monitor.Gfx_MinActiveFreqType = input[3];
- activity_monitor.Gfx_MinActiveFreq = input[4];
- activity_monitor.Gfx_BoosterFreqType = input[5];
- activity_monitor.Gfx_BoosterFreq = input[6];
- activity_monitor.Gfx_PD_Data_limit_c = input[7];
- activity_monitor.Gfx_PD_Data_error_coeff = input[8];
- activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9];
+ activity_monitor->Gfx_FPS = input[1];
+ activity_monitor->Gfx_MinFreqStep = input[2];
+ activity_monitor->Gfx_MinActiveFreqType = input[3];
+ activity_monitor->Gfx_MinActiveFreq = input[4];
+ activity_monitor->Gfx_BoosterFreqType = input[5];
+ activity_monitor->Gfx_BoosterFreq = input[6];
+ activity_monitor->Gfx_PD_Data_limit_c = input[7];
+ activity_monitor->Gfx_PD_Data_error_coeff = input[8];
+ activity_monitor->Gfx_PD_Data_error_rate_coeff = input[9];
break;
case 1: /* Socclk */
- activity_monitor.Fclk_FPS = input[1];
- activity_monitor.Fclk_MinFreqStep = input[2];
- activity_monitor.Fclk_MinActiveFreqType = input[3];
- activity_monitor.Fclk_MinActiveFreq = input[4];
- activity_monitor.Fclk_BoosterFreqType = input[5];
- activity_monitor.Fclk_BoosterFreq = input[6];
- activity_monitor.Fclk_PD_Data_limit_c = input[7];
- activity_monitor.Fclk_PD_Data_error_coeff = input[8];
- activity_monitor.Fclk_PD_Data_error_rate_coeff = input[9];
+ activity_monitor->Fclk_FPS = input[1];
+ activity_monitor->Fclk_MinFreqStep = input[2];
+ activity_monitor->Fclk_MinActiveFreqType = input[3];
+ activity_monitor->Fclk_MinActiveFreq = input[4];
+ activity_monitor->Fclk_BoosterFreqType = input[5];
+ activity_monitor->Fclk_BoosterFreq = input[6];
+ activity_monitor->Fclk_PD_Data_limit_c = input[7];
+ activity_monitor->Fclk_PD_Data_error_coeff = input[8];
+ activity_monitor->Fclk_PD_Data_error_rate_coeff = input[9];
break;
case 2: /* Memlk */
- activity_monitor.Mem_FPS = input[1];
- activity_monitor.Mem_MinFreqStep = input[2];
- activity_monitor.Mem_MinActiveFreqType = input[3];
- activity_monitor.Mem_MinActiveFreq = input[4];
- activity_monitor.Mem_BoosterFreqType = input[5];
- activity_monitor.Mem_BoosterFreq = input[6];
- activity_monitor.Mem_PD_Data_limit_c = input[7];
- activity_monitor.Mem_PD_Data_error_coeff = input[8];
- activity_monitor.Mem_PD_Data_error_rate_coeff = input[9];
+ activity_monitor->Mem_FPS = input[1];
+ activity_monitor->Mem_MinFreqStep = input[2];
+ activity_monitor->Mem_MinActiveFreqType = input[3];
+ activity_monitor->Mem_MinActiveFreq = input[4];
+ activity_monitor->Mem_BoosterFreqType = input[5];
+ activity_monitor->Mem_BoosterFreq = input[6];
+ activity_monitor->Mem_PD_Data_limit_c = input[7];
+ activity_monitor->Mem_PD_Data_error_coeff = input[8];
+ activity_monitor->Mem_PD_Data_error_rate_coeff = input[9];
break;
}
ret = smu_cmn_update_table(smu,
SMU_TABLE_ACTIVITY_MONITOR_COEFF, WORKLOAD_PPLIB_CUSTOM_BIT,
- (void *)(&activity_monitor), true);
+ (void *)(&activity_monitor_external), true);
if (ret) {
dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
return ret;
@@ -2355,7 +2372,7 @@ static void sienna_cichlid_fill_i2c_req(SwI2cRequest_t *req, bool write,
{
int i;
- req->I2CcontrollerPort = 0;
+ req->I2CcontrollerPort = 1;
req->I2CSpeed = 2;
req->SlaveAddress = address;
req->NumCmds = numbytes;
@@ -2582,52 +2599,54 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
struct smu_table_context *smu_table = &smu->smu_table;
struct gpu_metrics_v1_0 *gpu_metrics =
(struct gpu_metrics_v1_0 *)smu_table->gpu_metrics_table;
- SmuMetrics_t metrics;
+ SmuMetricsExternal_t metrics_external;
+ SmuMetrics_t *metrics =
+ &(metrics_external.SmuMetrics);
int ret = 0;
ret = smu_cmn_get_metrics_table(smu,
- &metrics,
+ &metrics_external,
true);
if (ret)
return ret;
smu_v11_0_init_gpu_metrics_v1_0(gpu_metrics);
- gpu_metrics->temperature_edge = metrics.TemperatureEdge;
- gpu_metrics->temperature_hotspot = metrics.TemperatureHotspot;
- gpu_metrics->temperature_mem = metrics.TemperatureMem;
- gpu_metrics->temperature_vrgfx = metrics.TemperatureVrGfx;
- gpu_metrics->temperature_vrsoc = metrics.TemperatureVrSoc;
- gpu_metrics->temperature_vrmem = metrics.TemperatureVrMem0;
+ gpu_metrics->temperature_edge = metrics->TemperatureEdge;
+ gpu_metrics->temperature_hotspot = metrics->TemperatureHotspot;
+ gpu_metrics->temperature_mem = metrics->TemperatureMem;
+ gpu_metrics->temperature_vrgfx = metrics->TemperatureVrGfx;
+ gpu_metrics->temperature_vrsoc = metrics->TemperatureVrSoc;
+ gpu_metrics->temperature_vrmem = metrics->TemperatureVrMem0;
- gpu_metrics->average_gfx_activity = metrics.AverageGfxActivity;
- gpu_metrics->average_umc_activity = metrics.AverageUclkActivity;
- gpu_metrics->average_mm_activity = metrics.VcnActivityPercentage;
+ gpu_metrics->average_gfx_activity = metrics->AverageGfxActivity;
+ gpu_metrics->average_umc_activity = metrics->AverageUclkActivity;
+ gpu_metrics->average_mm_activity = metrics->VcnActivityPercentage;
- gpu_metrics->average_socket_power = metrics.AverageSocketPower;
- gpu_metrics->energy_accumulator = metrics.EnergyAccumulator;
+ gpu_metrics->average_socket_power = metrics->AverageSocketPower;
+ gpu_metrics->energy_accumulator = metrics->EnergyAccumulator;
- if (metrics.AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
- gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPostDs;
+ if (metrics->AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
+ gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPostDs;
else
- gpu_metrics->average_gfxclk_frequency = metrics.AverageGfxclkFrequencyPreDs;
- gpu_metrics->average_uclk_frequency = metrics.AverageUclkFrequencyPostDs;
- gpu_metrics->average_vclk0_frequency = metrics.AverageVclk0Frequency;
- gpu_metrics->average_dclk0_frequency = metrics.AverageDclk0Frequency;
- gpu_metrics->average_vclk1_frequency = metrics.AverageVclk1Frequency;
- gpu_metrics->average_dclk1_frequency = metrics.AverageDclk1Frequency;
+ gpu_metrics->average_gfxclk_frequency = metrics->AverageGfxclkFrequencyPreDs;
+ gpu_metrics->average_uclk_frequency = metrics->AverageUclkFrequencyPostDs;
+ gpu_metrics->average_vclk0_frequency = metrics->AverageVclk0Frequency;
+ gpu_metrics->average_dclk0_frequency = metrics->AverageDclk0Frequency;
+ gpu_metrics->average_vclk1_frequency = metrics->AverageVclk1Frequency;
+ gpu_metrics->average_dclk1_frequency = metrics->AverageDclk1Frequency;
- gpu_metrics->current_gfxclk = metrics.CurrClock[PPCLK_GFXCLK];
- gpu_metrics->current_socclk = metrics.CurrClock[PPCLK_SOCCLK];
- gpu_metrics->current_uclk = metrics.CurrClock[PPCLK_UCLK];
- gpu_metrics->current_vclk0 = metrics.CurrClock[PPCLK_VCLK_0];
- gpu_metrics->current_dclk0 = metrics.CurrClock[PPCLK_DCLK_0];
- gpu_metrics->current_vclk1 = metrics.CurrClock[PPCLK_VCLK_1];
- gpu_metrics->current_dclk1 = metrics.CurrClock[PPCLK_DCLK_1];
+ gpu_metrics->current_gfxclk = metrics->CurrClock[PPCLK_GFXCLK];
+ gpu_metrics->current_socclk = metrics->CurrClock[PPCLK_SOCCLK];
+ gpu_metrics->current_uclk = metrics->CurrClock[PPCLK_UCLK];
+ gpu_metrics->current_vclk0 = metrics->CurrClock[PPCLK_VCLK_0];
+ gpu_metrics->current_dclk0 = metrics->CurrClock[PPCLK_DCLK_0];
+ gpu_metrics->current_vclk1 = metrics->CurrClock[PPCLK_VCLK_1];
+ gpu_metrics->current_dclk1 = metrics->CurrClock[PPCLK_DCLK_1];
- gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->throttle_status = metrics->ThrottlerStatus;
- gpu_metrics->current_fan_speed = metrics.CurrFanSpeed;
+ gpu_metrics->current_fan_speed = metrics->CurrFanSpeed;
gpu_metrics->pcie_link_width =
smu_v11_0_get_current_pcie_link_width(smu);
@@ -2650,23 +2669,82 @@ static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
static int sienna_cichlid_gpo_control(struct smu_context *smu,
bool enablement)
{
+ uint32_t smu_version;
int ret = 0;
+
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFX_GPO_BIT)) {
- if (enablement)
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetGpoFeaturePMask,
- GFX_GPO_PACE_MASK | GFX_GPO_DEM_MASK,
- NULL);
- else
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetGpoFeaturePMask,
- 0,
- NULL);
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return ret;
+
+ if (enablement) {
+ if (smu_version < 0x003a2500) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetGpoFeaturePMask,
+ GFX_GPO_PACE_MASK | GFX_GPO_DEM_MASK,
+ NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_DisallowGpo,
+ 0,
+ NULL);
+ }
+ } else {
+ if (smu_version < 0x003a2500) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetGpoFeaturePMask,
+ 0,
+ NULL);
+ } else {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_DisallowGpo,
+ 1,
+ NULL);
+ }
+ }
}
return ret;
}
+
+static int sienna_cichlid_notify_2nd_usb20_port(struct smu_context *smu)
+{
+ uint32_t smu_version;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (ret)
+ return ret;
+
+ /*
+ * Message SMU_MSG_Enable2ndUSB20Port is supported by 58.45
+ * onwards PMFWs.
+ */
+ if (smu_version < 0x003A2D00)
+ return 0;
+
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_Enable2ndUSB20Port,
+ smu->smu_table.boot_values.firmware_caps & ATOM_FIRMWARE_CAP_ENABLE_2ND_USB20PORT ?
+ 1 : 0,
+ NULL);
+}
+
+static int sienna_cichlid_system_features_control(struct smu_context *smu,
+ bool en)
+{
+ int ret = 0;
+
+ if (en) {
+ ret = sienna_cichlid_notify_2nd_usb20_port(smu);
+ if (ret)
+ return ret;
+ }
+
+ return smu_v11_0_system_features_control(smu, en);
+}
+
static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_allowed_feature_mask = sienna_cichlid_get_allowed_feature_mask,
.set_default_dpm_table = sienna_cichlid_set_default_dpm_table,
@@ -2707,7 +2785,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.set_driver_table_location = smu_v11_0_set_driver_table_location,
.set_tool_table_location = smu_v11_0_set_tool_table_location,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
- .system_features_control = smu_v11_0_system_features_control,
+ .system_features_control = sienna_cichlid_system_features_control,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
.send_smc_msg = smu_cmn_send_smc_msg,
.init_display_count = NULL,
@@ -2740,6 +2818,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.get_dpm_ultimate_freq = sienna_cichlid_get_dpm_ultimate_freq,
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.run_btc = sienna_cichlid_run_btc,
+ .set_power_source = smu_v11_0_set_power_source,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
.get_gpu_metrics = sienna_cichlid_get_gpu_metrics,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
index 57e120c440ea..38cd0ece24f6 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
@@ -29,6 +29,10 @@ typedef enum {
POWER_SOURCE_COUNT,
} POWER_SOURCE_e;
+#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK 1825
+#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960
+#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000
+
extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 624065d3c079..b279dbbbce6b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -91,6 +91,11 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
const struct common_firmware_header *header;
struct amdgpu_firmware_info *ucode = NULL;
+ if (amdgpu_sriov_vf(adev) &&
+ ((adev->asic_type == CHIP_NAVI12) ||
+ (adev->asic_type == CHIP_SIENNA_CICHLID)))
+ return 0;
+
switch (adev->asic_type) {
case CHIP_ARCTURUS:
chip_name = "arcturus";
@@ -554,6 +559,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
smu->smu_table.boot_values.pp_table_id = 0;
+ smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
break;
case 3:
default:
@@ -569,6 +575,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
+ smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
}
smu->smu_table.boot_values.format_revision = header->format_revision;
@@ -929,9 +936,13 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu,
if (power_src < 0)
return -EINVAL;
+ /*
+ * BIT 24-31: ControllerId (only PPT0 is supported for now)
+ * BIT 16-23: PowerSource
+ */
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GetPptLimit,
- power_src << 16,
+ (0 << 24) | (power_src << 16),
power_limit);
if (ret)
dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
@@ -941,6 +952,7 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu,
int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
{
+ int power_src;
int ret = 0;
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
@@ -948,6 +960,22 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return -EOPNOTSUPP;
}
+ power_src = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_PWR,
+ smu->adev->pm.ac_power ?
+ SMU_POWER_SOURCE_AC :
+ SMU_POWER_SOURCE_DC);
+ if (power_src < 0)
+ return -EINVAL;
+
+ /*
+ * BIT 24-31: ControllerId (only PPT0 is supported for now)
+ * BIT 16-23: PowerSource
+ * BIT 0-15: PowerLimit
+ */
+ n &= 0xFFFF;
+ n |= 0 << 24;
+ n |= (power_src) << 16;
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
if (ret) {
dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
@@ -2064,6 +2092,22 @@ int smu_v11_0_deep_sleep_control(struct smu_context *smu,
}
}
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
+ if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
+ ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
+ if (ret) {
+ dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
+ return ret;
+ }
+ }
+
if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
if (ret) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index a81e5c823211..5c1482d4ca43 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -64,7 +64,7 @@ static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0),
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
- MSG_MAP(Spare, PPSMC_MSG_spare, 0),
+ MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0),
MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0),
MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0),
MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0),
@@ -252,7 +252,8 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
*value = metrics->UvdActivity;
break;
case METRICS_AVERAGE_SOCKETPOWER:
- *value = metrics->CurrentSocketPower;
+ *value = (metrics->CurrentSocketPower << 8) /
+ 1000 ;
break;
case METRICS_TEMPERATURE_EDGE:
*value = metrics->GfxTemperature / 100 *
@@ -722,6 +723,17 @@ static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
return 0;
}
+static int vangogh_system_features_control(struct smu_context *smu, bool en)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (adev->pm.fw_version >= 0x43f1700)
+ return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
+ en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL);
+ else
+ return 0;
+}
+
static const struct pptable_funcs vangogh_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
@@ -749,6 +761,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.print_clk_levels = vangogh_print_fine_grain_clk,
.set_default_dpm_table = vangogh_set_default_dpm_tables,
.set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
+ .system_features_control = vangogh_system_features_control,
};
void vangogh_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
index 8756766296cd..eab455493076 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.h
@@ -32,4 +32,8 @@ extern void vangogh_set_ppt_funcs(struct smu_context *smu);
#define VANGOGH_UMD_PSTATE_SOCCLK 678
#define VANGOGH_UMD_PSTATE_FCLK 800
+/* RLC Power Status */
+#define RLC_STATUS_OFF 0
+#define RLC_STATUS_NORMAL 1
+
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index dc75db8af371..f743685a20e8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -188,6 +188,7 @@ static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type
return -EINVAL;
*freq = clk_table->SocClocks[dpm_level].Freq;
break;
+ case SMU_UCLK:
case SMU_MCLK:
if (dpm_level >= NUM_FCLK_DPM_LEVELS)
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 522d55004655..06abf2a7ce9e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -225,6 +225,7 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
break;
case SMU_FCLK:
case SMU_MCLK:
+ case SMU_UCLK:
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
index 1f8195bad536..ca891ae14d36 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c
@@ -152,7 +152,6 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev)
ret = of_reserved_mem_device_init(dev);
if (ret && ret != -ENODEV)
return ret;
- ret = 0;
for_each_available_child_of_node(np, child) {
if (of_node_name_eq(child, "pipeline")) {
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index 6b99df696384..034ee08482e0 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -81,10 +81,10 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_commit_modeset_enables(dev, old_state);
- drm_atomic_helper_wait_for_flip_done(dev, old_state);
-
drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_wait_for_flip_done(dev, old_state);
+
drm_atomic_helper_cleanup_planes(dev, old_state);
}
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
index 452e505a1fd3..719a79728e24 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c
@@ -137,9 +137,10 @@ komeda_pipeline_get_first_component(struct komeda_pipeline *pipe,
u32 comp_mask)
{
struct komeda_component *c = NULL;
+ unsigned long comp_mask_local = (unsigned long)comp_mask;
int id;
- id = find_first_bit((unsigned long *)&comp_mask, 32);
+ id = find_first_bit(&comp_mask_local, 32);
if (id < 32)
c = komeda_pipeline_get_component(pipe, id);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
index 8f32ae7c25d0..5c085116de3f 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c
@@ -704,10 +704,10 @@ komeda_compiz_set_input(struct komeda_compiz *compiz,
cin->layer_alpha = dflow->layer_alpha;
old_st = komeda_component_get_old_state(&compiz->base, drm_st);
- WARN_ON(!old_st);
/* compare with old to check if this input has been changed */
- if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
+ if (WARN_ON(!old_st) ||
+ memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin)))
c_st->changed_active_inputs |= BIT(idx);
komeda_component_add_input(c_st, &dflow->input, idx);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ddd0e3239150..ba1507036f26 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -122,7 +122,8 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
continue;
if (funcs->atomic_best_encoder)
- new_encoder = funcs->atomic_best_encoder(connector, new_conn_state);
+ new_encoder = funcs->atomic_best_encoder(connector,
+ state);
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
@@ -345,8 +346,7 @@ update_connector_routing(struct drm_atomic_state *state,
funcs = connector->helper_private;
if (funcs->atomic_best_encoder)
- new_encoder = funcs->atomic_best_encoder(connector,
- new_connector_state);
+ new_encoder = funcs->atomic_best_encoder(connector, state);
else if (funcs->best_encoder)
new_encoder = funcs->best_encoder(connector);
else
@@ -1313,7 +1313,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev,
if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) {
WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK);
- funcs->atomic_commit(connector, new_conn_state);
+ funcs->atomic_commit(connector, old_state);
}
}
}
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index ae2234aae93d..5c2141e9a9f4 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -196,10 +196,10 @@
* exposed and assumed to be black).
*
* SCALING_FILTER:
- *
* Indicates scaling filter to be used for plane scaler
*
* The value of this property can be one of the following:
+ *
* Default:
* Driver's default scaling filter
* Nearest Neighbor:
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 7a01d0918861..aeb1327e3077 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -77,6 +77,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
if ((entry->map->offset & 0xffffffff) ==
(map->offset & 0xffffffff))
return entry;
+ break;
default: /* Make gcc happy */
;
}
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index fe573acf1067..ce45e380f4a2 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -314,9 +314,6 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map
struct dma_buf_map *map = &buffer->map;
int ret;
- if (dma_buf_map_is_set(map))
- goto out;
-
/*
* FIXME: The dependency on GEM here isn't required, we could
* convert the driver handle to a dma-buf instead and use the
@@ -329,7 +326,6 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map
if (ret)
return ret;
-out:
*map_copy = *map;
return 0;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index f927976eca50..74090fc3aa55 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -230,14 +230,14 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc)
*
* Setting MODE_ID to 0 will release reserved resources for the CRTC.
* SCALING_FILTER:
- * Atomic property for setting the scaling filter for CRTC scaler
+ * Atomic property for setting the scaling filter for CRTC scaler
*
- * The value of this property can be one of the following:
- * Default:
- * Driver's default scaling filter
- * Nearest Neighbor:
- * Nearest Neighbor scaling filter
+ * The value of this property can be one of the following:
*
+ * Default:
+ * Driver's default scaling filter
+ * Nearest Neighbor:
+ * Nearest Neighbor scaling filter
*/
/**
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 74f5a3197214..e95cce8e736d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -3102,6 +3102,8 @@ static int drm_cvt_modes(struct drm_connector *connector,
height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
switch (cvt->code[1] & 0x0c) {
+ /* default - because compiler doesn't see that we've enumerated all cases */
+ default:
case 0x00:
width = height * 4 / 3;
break;
@@ -3114,8 +3116,6 @@ static int drm_cvt_modes(struct drm_connector *connector,
case 0x0c:
width = height * 15 / 9;
break;
- default:
- unreachable();
}
for (j = 1; j < 5; j++) {
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 25edf670867c..4b8119510687 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -371,9 +371,9 @@ static void drm_fb_helper_resume_worker(struct work_struct *work)
console_unlock();
}
-static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
- struct drm_clip_rect *clip,
- struct dma_buf_map *dst)
+static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip,
+ struct dma_buf_map *dst)
{
struct drm_framebuffer *fb = fb_helper->fb;
unsigned int cpp = fb->format->cpp[0];
@@ -391,40 +391,86 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
}
}
-static void drm_fb_helper_dirty_work(struct work_struct *work)
+static int drm_fb_helper_damage_blit(struct drm_fb_helper *fb_helper,
+ struct drm_clip_rect *clip)
+{
+ struct drm_client_buffer *buffer = fb_helper->buffer;
+ struct dma_buf_map map, dst;
+ int ret;
+
+ /*
+ * We have to pin the client buffer to its current location while
+ * flushing the shadow buffer. In the general case, concurrent
+ * modesetting operations could try to move the buffer and would
+ * fail. The modeset has to be serialized by acquiring the reservation
+ * object of the underlying BO here.
+ *
+ * For fbdev emulation, we only have to protect against fbdev modeset
+ * operations. Nothing else will involve the client buffer's BO. So it
+ * is sufficient to acquire struct drm_fb_helper.lock here.
+ */
+ mutex_lock(&fb_helper->lock);
+
+ ret = drm_client_buffer_vmap(buffer, &map);
+ if (ret)
+ goto out;
+
+ dst = map;
+ drm_fb_helper_damage_blit_real(fb_helper, clip, &dst);
+
+ drm_client_buffer_vunmap(buffer);
+
+out:
+ mutex_unlock(&fb_helper->lock);
+
+ return ret;
+}
+
+static void drm_fb_helper_damage_work(struct work_struct *work)
{
struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper,
- dirty_work);
- struct drm_clip_rect *clip = &helper->dirty_clip;
+ damage_work);
+ struct drm_device *dev = helper->dev;
+ struct drm_clip_rect *clip = &helper->damage_clip;
struct drm_clip_rect clip_copy;
unsigned long flags;
- struct dma_buf_map map;
int ret;
- spin_lock_irqsave(&helper->dirty_lock, flags);
+ spin_lock_irqsave(&helper->damage_lock, flags);
clip_copy = *clip;
clip->x1 = clip->y1 = ~0;
clip->x2 = clip->y2 = 0;
- spin_unlock_irqrestore(&helper->dirty_lock, flags);
+ spin_unlock_irqrestore(&helper->damage_lock, flags);
- /* call dirty callback only when it has been really touched */
- if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) {
-
- /* Generic fbdev uses a shadow buffer */
- if (helper->buffer) {
- ret = drm_client_buffer_vmap(helper->buffer, &map);
- if (ret)
- return;
- drm_fb_helper_dirty_blit_real(helper, &clip_copy, &map);
- }
+ /* Call damage handlers only if necessary */
+ if (!(clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2))
+ return;
- if (helper->fb->funcs->dirty)
- helper->fb->funcs->dirty(helper->fb, NULL, 0, 0,
- &clip_copy, 1);
+ if (helper->buffer) {
+ ret = drm_fb_helper_damage_blit(helper, &clip_copy);
+ if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
+ goto err;
+ }
- if (helper->buffer)
- drm_client_buffer_vunmap(helper->buffer);
+ if (helper->fb->funcs->dirty) {
+ ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1);
+ if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
+ goto err;
}
+
+ return;
+
+err:
+ /*
+ * Restore damage clip rectangle on errors. The next run
+ * of the damage worker will perform the update.
+ */
+ spin_lock_irqsave(&helper->damage_lock, flags);
+ clip->x1 = min_t(u32, clip->x1, clip_copy.x1);
+ clip->y1 = min_t(u32, clip->y1, clip_copy.y1);
+ clip->x2 = max_t(u32, clip->x2, clip_copy.x2);
+ clip->y2 = max_t(u32, clip->y2, clip_copy.y2);
+ spin_unlock_irqrestore(&helper->damage_lock, flags);
}
/**
@@ -440,10 +486,10 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs)
{
INIT_LIST_HEAD(&helper->kernel_fb_list);
- spin_lock_init(&helper->dirty_lock);
+ spin_lock_init(&helper->damage_lock);
INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker);
- INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work);
- helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0;
+ INIT_WORK(&helper->damage_work, drm_fb_helper_damage_work);
+ helper->damage_clip.x1 = helper->damage_clip.y1 = ~0;
mutex_init(&helper->lock);
helper->funcs = funcs;
helper->dev = dev;
@@ -579,7 +625,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
return;
cancel_work_sync(&fb_helper->resume_work);
- cancel_work_sync(&fb_helper->dirty_work);
+ cancel_work_sync(&fb_helper->damage_work);
info = fb_helper->fbdev;
if (info) {
@@ -614,30 +660,30 @@ static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
fb->funcs->dirty;
}
-static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y,
- u32 width, u32 height)
+static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y,
+ u32 width, u32 height)
{
struct drm_fb_helper *helper = info->par;
- struct drm_clip_rect *clip = &helper->dirty_clip;
+ struct drm_clip_rect *clip = &helper->damage_clip;
unsigned long flags;
if (!drm_fbdev_use_shadow_fb(helper))
return;
- spin_lock_irqsave(&helper->dirty_lock, flags);
+ spin_lock_irqsave(&helper->damage_lock, flags);
clip->x1 = min_t(u32, clip->x1, x);
clip->y1 = min_t(u32, clip->y1, y);
clip->x2 = max_t(u32, clip->x2, x + width);
clip->y2 = max_t(u32, clip->y2, y + height);
- spin_unlock_irqrestore(&helper->dirty_lock, flags);
+ spin_unlock_irqrestore(&helper->damage_lock, flags);
- schedule_work(&helper->dirty_work);
+ schedule_work(&helper->damage_work);
}
/**
* drm_fb_helper_deferred_io() - fbdev deferred_io callback function
* @info: fb_info struct pointer
- * @pagelist: list of dirty mmap framebuffer pages
+ * @pagelist: list of mmap framebuffer pages that have to be flushed
*
* This function is used as the &fb_deferred_io.deferred_io
* callback function for flushing the fbdev mmap writes.
@@ -662,7 +708,7 @@ void drm_fb_helper_deferred_io(struct fb_info *info,
y1 = min / info->fix.line_length;
y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length),
info->var.yres);
- drm_fb_helper_dirty(info, 0, y1, info->var.xres, y2 - y1);
+ drm_fb_helper_damage(info, 0, y1, info->var.xres, y2 - y1);
}
}
EXPORT_SYMBOL(drm_fb_helper_deferred_io);
@@ -699,8 +745,7 @@ ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf,
ret = fb_sys_write(info, buf, count, ppos);
if (ret > 0)
- drm_fb_helper_dirty(info, 0, 0, info->var.xres,
- info->var.yres);
+ drm_fb_helper_damage(info, 0, 0, info->var.xres, info->var.yres);
return ret;
}
@@ -717,8 +762,7 @@ void drm_fb_helper_sys_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
sys_fillrect(info, rect);
- drm_fb_helper_dirty(info, rect->dx, rect->dy,
- rect->width, rect->height);
+ drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_fillrect);
@@ -733,8 +777,7 @@ void drm_fb_helper_sys_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
sys_copyarea(info, area);
- drm_fb_helper_dirty(info, area->dx, area->dy,
- area->width, area->height);
+ drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_copyarea);
@@ -749,8 +792,7 @@ void drm_fb_helper_sys_imageblit(struct fb_info *info,
const struct fb_image *image)
{
sys_imageblit(info, image);
- drm_fb_helper_dirty(info, image->dx, image->dy,
- image->width, image->height);
+ drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height);
}
EXPORT_SYMBOL(drm_fb_helper_sys_imageblit);
@@ -765,8 +807,7 @@ void drm_fb_helper_cfb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
cfb_fillrect(info, rect);
- drm_fb_helper_dirty(info, rect->dx, rect->dy,
- rect->width, rect->height);
+ drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect);
@@ -781,8 +822,7 @@ void drm_fb_helper_cfb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
cfb_copyarea(info, area);
- drm_fb_helper_dirty(info, area->dx, area->dy,
- area->width, area->height);
+ drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea);
@@ -797,8 +837,7 @@ void drm_fb_helper_cfb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
cfb_imageblit(info, image);
- drm_fb_helper_dirty(info, image->dx, image->dy,
- image->width, image->height);
+ drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height);
}
EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit);
@@ -1988,14 +2027,19 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
if (!fb_helper->dev)
return;
- if (fbi && fbi->fbdefio) {
- fb_deferred_io_cleanup(fbi);
- shadow = fbi->screen_buffer;
+ if (fbi) {
+ if (fbi->fbdefio)
+ fb_deferred_io_cleanup(fbi);
+ if (drm_fbdev_use_shadow_fb(fb_helper))
+ shadow = fbi->screen_buffer;
}
drm_fb_helper_fini(fb_helper);
- vfree(shadow);
+ if (shadow)
+ vfree(shadow);
+ else
+ drm_client_buffer_vunmap(fb_helper->buffer);
drm_client_framebuffer_delete(fb_helper->buffer);
}
@@ -2189,6 +2233,9 @@ static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
if (ret > 0)
*ppos += ret;
+ if (ret > 0)
+ drm_fb_helper_damage(info, 0, 0, info->var.xres_virtual, info->var.yres_virtual);
+
return ret ? ret : err;
}
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 499189c48f0b..9825c378dfa6 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -51,13 +51,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
if (!obj)
return ERR_PTR(-ENOMEM);
+ shmem = to_drm_gem_shmem_obj(obj);
+
if (!obj->funcs)
obj->funcs = &drm_gem_shmem_funcs;
- if (private)
+ if (private) {
drm_gem_private_object_init(dev, obj, size);
- else
+ shmem->map_wc = false; /* dma-buf mappings use always writecombine */
+ } else {
ret = drm_gem_object_init(dev, obj, size);
+ }
if (ret)
goto err_free;
@@ -65,7 +69,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private)
if (ret)
goto err_release;
- shmem = to_drm_gem_shmem_obj(obj);
mutex_init(&shmem->pages_lock);
mutex_init(&shmem->vmap_lock);
INIT_LIST_HEAD(&shmem->madv_list);
@@ -284,7 +287,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct
if (ret)
goto err_zero_use;
- if (!shmem->map_cached)
+ if (shmem->map_wc)
prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
VM_MAP, prot);
@@ -477,33 +480,6 @@ bool drm_gem_shmem_purge(struct drm_gem_object *obj)
EXPORT_SYMBOL(drm_gem_shmem_purge);
/**
- * drm_gem_shmem_create_object_cached - Create a shmem buffer object with
- * cached mappings
- * @dev: DRM device
- * @size: Size of the object to allocate
- *
- * By default, shmem buffer objects use writecombine mappings. This
- * function implements struct drm_driver.gem_create_object for shmem
- * buffer objects with cached mappings.
- *
- * Returns:
- * A struct drm_gem_shmem_object * on success or NULL negative on failure.
- */
-struct drm_gem_object *
-drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size)
-{
- struct drm_gem_shmem_object *shmem;
-
- shmem = kzalloc(sizeof(*shmem), GFP_KERNEL);
- if (!shmem)
- return NULL;
- shmem->map_cached = true;
-
- return &shmem->base;
-}
-EXPORT_SYMBOL(drm_gem_shmem_create_object_cached);
-
-/**
* drm_gem_shmem_dumb_create - Create a dumb shmem buffer object
* @file: DRM file structure to create the dumb buffer for
* @dev: DRM device
@@ -626,7 +602,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- if (!shmem->map_cached)
+ if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &drm_gem_shmem_vm_ops;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index bbd235473645..6d38c5c17f23 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -145,10 +145,8 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj,
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
- fput(vma->vm_file);
- get_file(etnaviv_obj->base.filp);
vma->vm_pgoff = 0;
- vma->vm_file = etnaviv_obj->base.filp;
+ vma_set_file(vma, etnaviv_obj->base.filp);
vma->vm_page_prot = vm_page_prot;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index ce82d654d0f2..34d78c654df3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1436,6 +1436,9 @@ struct intel_dp {
bool ycbcr_444_to_420;
} dfp;
+ /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
+ struct pm_qos_request pm_qos;
+
/* Display stream compression testing */
bool force_dsc_en;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 2165398d2c7c..37f1a10fd021 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -1489,7 +1489,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
- cpu_latency_qos_update_request(&i915->pm_qos, 0);
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, 0);
intel_dp_check_edp(intel_dp);
@@ -1622,7 +1622,7 @@ done:
ret = recv_bytes;
out:
- cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
+ cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
if (vdd)
edp_panel_vdd_off(intel_dp, false);
@@ -1898,6 +1898,9 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
static void
intel_dp_aux_fini(struct intel_dp *intel_dp)
{
+ if (cpu_latency_qos_request_active(&intel_dp->pm_qos))
+ cpu_latency_qos_remove_request(&intel_dp->pm_qos);
+
kfree(intel_dp->aux.name);
}
@@ -1950,6 +1953,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
encoder->base.name);
intel_dp->aux.transfer = intel_dp_aux_transfer;
+ cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE);
}
bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 0c8684634fca..27f04aed8764 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -23,6 +23,7 @@
*
*/
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
@@ -719,11 +720,13 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
}
static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
- struct drm_connector_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
+ connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_dp *intel_dp = intel_connector->mst_port;
- struct intel_crtc *crtc = to_intel_crtc(state->crtc);
+ struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc);
return &intel_dp->mst_encoders[crtc->pipe]->base.base;
}
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index ad5cc13037ae..1c939f9c9bc9 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -297,13 +297,9 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
*/
void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
{
- struct irq_desc *desc;
-
if (!HAS_LPE_AUDIO(dev_priv))
return;
- desc = irq_to_desc(dev_priv->lpe_audio.irq);
-
lpe_audio_platdev_destroy(dev_priv);
irq_free_desc(dev_priv->lpe_audio.irq);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 0dd477e56573..04e9c04545ad 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -114,8 +114,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
if (ret)
return ret;
- fput(vma->vm_file);
- vma->vm_file = get_file(obj->base.filp);
+ vma_set_file(vma, obj->base.filp);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index b07dc1156a0e..bd3046e5a934 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -382,7 +382,7 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
return true;
if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
- (vma->node.start + vma->node.size - 1) >> 32)
+ (vma->node.start + vma->node.size + 4095) >> 32)
return true;
if (flags & __EXEC_OBJECT_NEEDS_MAP &&
@@ -1046,7 +1046,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach
GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32));
cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
- __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1));
+ i915_gem_object_flush_map(obj);
i915_gem_object_unpin_map(obj);
intel_gt_chipset_flush(cache->rq->engine->gt);
@@ -1296,6 +1296,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto err_pool;
}
+ memset32(cmd, 0, pool->obj->base.size / sizeof(u32));
+
batch = i915_vma_instance(pool->obj, vma->vm, NULL);
if (IS_ERR(batch)) {
err = PTR_ERR(batch);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 3d69e51f3e4d..ec28a6cde49b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -893,8 +893,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
* requires avoiding extraneous references to their filp, hence why
* we prefer to use an anonymous file for their mmaps.
*/
- fput(vma->vm_file);
- vma->vm_file = anon;
+ vma_set_file(vma, anon);
+ /* Drop the initial creation reference, the vma is now holding one. */
+ fput(anon);
switch (mmo->mmap_type) {
case I915_MMAP_TYPE_WC:
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 180c23e2e25e..602f1a0bc587 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -53,6 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 49, 0, 1), huc_def(icl, 9, 0, 0)) \
fw_def(COMETLAKE, 5, guc_def(cml, 49, 0, 1), huc_def(cml, 4, 0, 0)) \
+ fw_def(COMETLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(COFFEELAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
fw_def(GEMINILAKE, 0, guc_def(glk, 49, 0, 1), huc_def(glk, 4, 0, 0)) \
fw_def(KABYLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 93265951fdbb..b0899b665e85 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1166,7 +1166,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
}
}
if (IS_ERR(src)) {
- unsigned long x, n;
+ unsigned long x, n, remain;
void *ptr;
/*
@@ -1177,14 +1177,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* We don't care about copying too much here as we only
* validate up to the end of the batch.
*/
+ remain = length;
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
- length = round_up(length,
+ remain = round_up(remain,
boot_cpu_data.x86_clflush_size);
ptr = dst;
x = offset_in_page(offset);
- for (n = offset >> PAGE_SHIFT; length; n++) {
- int len = min(length, PAGE_SIZE - x);
+ for (n = offset >> PAGE_SHIFT; remain; n++) {
+ int len = min(remain, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush)
@@ -1193,13 +1194,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
kunmap_atomic(src);
ptr += len;
- length -= len;
+ remain -= len;
x = 0;
}
}
i915_gem_object_unpin_pages(src_obj);
+ memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
+
/* dst_obj is returned with vmap pinned */
return dst;
}
@@ -1392,11 +1395,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)
#define LENGTH_BIAS 2
-static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
-{
- return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
-}
-
/**
* intel_engine_cmd_parser() - parse a batch buffer for privilege violations
* @engine: the engine on which the batch is to execute
@@ -1538,16 +1536,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
ret = 0; /* allow execution */
}
}
-
- if (shadow_needs_clflush(shadow->obj))
- drm_clflush_virt_range(batch_end, 8);
}
- if (shadow_needs_clflush(shadow->obj)) {
- void *ptr = page_mask_bits(shadow->obj->mm.mapping);
-
- drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
- }
+ i915_gem_object_flush_map(shadow->obj);
if (!IS_ERR_OR_NULL(jump_whitelist))
kfree(jump_whitelist);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 320856b665a1..88ad754962af 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -578,8 +578,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
- cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
-
intel_gt_init_workarounds(dev_priv);
/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -626,7 +624,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
- cpu_latency_qos_remove_request(&dev_priv->pm_qos);
err_mem_regions:
intel_memory_regions_driver_release(dev_priv);
err_ggtt:
@@ -648,8 +645,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv)
if (pdev->msi_enabled)
pci_disable_msi(pdev);
-
- cpu_latency_qos_remove_request(&dev_priv->pm_qos);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 15be8debae54..632c713227dc 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -891,9 +891,6 @@ struct drm_i915_private {
bool display_irqs_enabled;
- /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
- struct pm_qos_request pm_qos;
-
/* Sideband mailbox protection */
struct mutex sb_lock;
struct pm_qos_request sb_qos;
@@ -1579,9 +1576,9 @@ static inline const struct i915_rev_steppings *
tgl_revids_get(struct drm_i915_private *dev_priv)
{
if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv))
- return tgl_uy_revids;
+ return &tgl_uy_revids[INTEL_REVID(dev_priv)];
else
- return tgl_revids;
+ return &tgl_revids[INTEL_REVID(dev_priv)];
}
#define IS_TGL_DISP_REVID(p, since, until) \
@@ -1591,14 +1588,14 @@ tgl_revids_get(struct drm_i915_private *dev_priv)
#define IS_TGL_UY_GT_REVID(p, since, until) \
((IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_uy_revids->gt_stepping >= (since) && \
- tgl_uy_revids->gt_stepping <= (until))
+ tgl_uy_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
+ tgl_uy_revids[INTEL_REVID(p)].gt_stepping <= (until))
#define IS_TGL_GT_REVID(p, since, until) \
(IS_TIGERLAKE(p) && \
!(IS_TGL_U(p) || IS_TGL_Y(p)) && \
- tgl_revids->gt_stepping >= (since) && \
- tgl_revids->gt_stepping <= (until))
+ tgl_revids[INTEL_REVID(p)].gt_stepping >= (since) && \
+ tgl_revids[INTEL_REVID(p)].gt_stepping <= (until))
#define RKL_REVID_A0 0x0
#define RKL_REVID_B0 0x1
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index dc6febc63f1c..6cdb052e3850 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -60,6 +60,24 @@
* and related files, but that will be described in separate chapters.
*/
+/*
+ * Interrupt statistic for PMU. Increments the counter only if the
+ * interrupt originated from the the GPU so interrupts from a device which
+ * shares the interrupt line are not accounted.
+ */
+static inline void pmu_irq_stats(struct drm_i915_private *i915,
+ irqreturn_t res)
+{
+ if (unlikely(res != IRQ_HANDLED))
+ return;
+
+ /*
+ * A clever compiler translates that into INC. A not so clever one
+ * should at least prevent store tearing.
+ */
+ WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
+}
+
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
enum hpd_pin pin);
@@ -1668,6 +1686,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
+ pmu_irq_stats(dev_priv, ret);
+
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
@@ -1745,6 +1765,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0);
+ pmu_irq_stats(dev_priv, ret);
+
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
@@ -2155,6 +2177,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
if (sde_ier)
raw_reg_write(regs, SDEIER, sde_ier);
+ pmu_irq_stats(i915, ret);
+
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
enable_rpm_wakeref_asserts(&i915->runtime_pm);
@@ -2541,6 +2565,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
gen8_master_intr_enable(regs);
+ pmu_irq_stats(dev_priv, IRQ_HANDLED);
+
return IRQ_HANDLED;
}
@@ -2636,6 +2662,8 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
gen11_gu_misc_irq_handler(gt, gu_misc_iir);
+ pmu_irq_stats(i915, IRQ_HANDLED);
+
return IRQ_HANDLED;
}
@@ -3934,6 +3962,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
+ pmu_irq_stats(dev_priv, ret);
+
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
@@ -4043,6 +4073,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
+ pmu_irq_stats(dev_priv, ret);
+
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
@@ -4189,6 +4221,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0);
+ pmu_irq_stats(dev_priv, IRQ_HANDLED);
+
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret;
@@ -4242,18 +4276,21 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
*/
dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv);
- if (HAS_PCH_DG1(dev_priv))
- dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
- else if (INTEL_GEN(dev_priv) >= 11)
- dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
- else if (IS_GEN9_LP(dev_priv))
- dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
- else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
- else if (HAS_GMCH(dev_priv) && I915_HAS_HOTPLUG(dev_priv))
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- else
- dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
+ if (HAS_GMCH(dev_priv)) {
+ if (I915_HAS_HOTPLUG(dev_priv))
+ dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ } else {
+ if (HAS_PCH_DG1(dev_priv))
+ dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
+ else if (INTEL_GEN(dev_priv) >= 11)
+ dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
+ else if (IS_GEN9_LP(dev_priv))
+ dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
+ else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
+ dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+ else
+ dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
+ }
}
/**
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 3b12c8ff7182..649c26518d26 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -914,7 +914,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
intel_uncore_rmw(uncore, oastatus_reg,
GEN8_OASTATUS_COUNTER_OVERFLOW |
GEN8_OASTATUS_REPORT_LOST,
- IS_GEN_RANGE(uncore->i915, 8, 10) ?
+ IS_GEN_RANGE(uncore->i915, 8, 11) ?
(GEN8_OASTATUS_HEAD_POINTER_WRAP |
GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0);
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index cd786ad12be7..d76685ce0399 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -4,7 +4,6 @@
* Copyright © 2017-2018 Intel Corporation
*/
-#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include "gt/intel_engine.h"
@@ -424,22 +423,6 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}
-static u64 count_interrupts(struct drm_i915_private *i915)
-{
- /* open-coded kstat_irqs() */
- struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
- u64 sum = 0;
- int cpu;
-
- if (!desc || !desc->kstat_irqs)
- return 0;
-
- for_each_possible_cpu(cpu)
- sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
-
- return sum;
-}
-
static void i915_pmu_event_destroy(struct perf_event *event)
{
struct drm_i915_private *i915 =
@@ -590,7 +573,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
USEC_PER_SEC /* to MHz */);
break;
case I915_PMU_INTERRUPTS:
- val = count_interrupts(i915);
+ val = READ_ONCE(pmu->irq_count);
break;
case I915_PMU_RC6_RESIDENCY:
val = get_rc6(&i915->gt);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index a24885ab415c..8405d6da5b9a 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -112,6 +112,14 @@ struct i915_pmu {
*/
ktime_t sleep_last;
/**
+ * @irq_count: Number of interrupts
+ *
+ * Intentionally unsigned long to avoid atomics or heuristics on 32bit.
+ * 4e9 interrupts are a lot and postprocessing can really deal with an
+ * occasional wraparound easily. It's 32bit after all.
+ */
+ unsigned long irq_count;
+ /**
* @events_attr_group: Device events attribute group.
*/
struct attribute_group events_attr_group;
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.h b/drivers/gpu/drm/imx/dcss/dcss-dev.h
index c642ae17837f..1e582270c6ea 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-dev.h
+++ b/drivers/gpu/drm/imx/dcss/dcss-dev.h
@@ -7,6 +7,7 @@
#define __DCSS_PRV_H__
#include <drm/drm_fourcc.h>
+#include <drm/drm_plane.h>
#include <linux/io.h>
#include <video/videomode.h>
@@ -165,6 +166,8 @@ void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm,
/* SCALER */
int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base);
void dcss_scaler_exit(struct dcss_scaler *scl);
+void dcss_scaler_set_filter(struct dcss_scaler *scl, int ch_num,
+ enum drm_scaling_filter scaling_filter);
void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
const struct drm_format_info *format,
int src_xres, int src_yres, int dst_xres, int dst_yres,
diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c
index e13652e3a115..03ba88f7f995 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-plane.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c
@@ -103,15 +103,15 @@ static bool dcss_plane_can_rotate(const struct drm_format_info *format,
bool mod_present, u64 modifier,
unsigned int rotation)
{
- bool linear_format = !mod_present ||
- (mod_present && modifier == DRM_FORMAT_MOD_LINEAR);
+ bool linear_format = !mod_present || modifier == DRM_FORMAT_MOD_LINEAR;
u32 supported_rotation = DRM_MODE_ROTATE_0;
if (!format->is_yuv && linear_format)
supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
DRM_MODE_REFLECT_MASK;
else if (!format->is_yuv &&
- modifier == DRM_FORMAT_MOD_VIVANTE_TILED)
+ (modifier == DRM_FORMAT_MOD_VIVANTE_TILED ||
+ modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED))
supported_rotation = DRM_MODE_ROTATE_MASK |
DRM_MODE_REFLECT_MASK;
else if (format->is_yuv && linear_format &&
@@ -257,7 +257,8 @@ static bool dcss_plane_needs_setup(struct drm_plane_state *state,
state->src_h != old_state->src_h ||
fb->format->format != old_fb->format->format ||
fb->modifier != old_fb->modifier ||
- state->rotation != old_state->rotation;
+ state->rotation != old_state->rotation ||
+ state->scaling_filter != old_state->scaling_filter;
}
static void dcss_plane_atomic_update(struct drm_plane *plane,
@@ -272,6 +273,7 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
u32 src_w, src_h, dst_w, dst_h;
struct drm_rect src, dst;
bool enable = true;
+ bool is_rotation_90_or_270;
if (!fb || !state->crtc || !state->visible)
return;
@@ -309,8 +311,16 @@ static void dcss_plane_atomic_update(struct drm_plane *plane,
dcss_plane_atomic_set_base(dcss_plane);
+ is_rotation_90_or_270 = state->rotation & (DRM_MODE_ROTATE_90 |
+ DRM_MODE_ROTATE_270);
+
+ dcss_scaler_set_filter(dcss->scaler, dcss_plane->ch_num,
+ state->scaling_filter);
+
dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num,
- state->fb->format, src_w, src_h,
+ state->fb->format,
+ is_rotation_90_or_270 ? src_h : src_w,
+ is_rotation_90_or_270 ? src_w : src_h,
dst_w, dst_h,
drm_mode_vrefresh(&crtc_state->mode));
@@ -388,6 +398,10 @@ struct dcss_plane *dcss_plane_init(struct drm_device *drm,
if (ret)
return ERR_PTR(ret);
+ drm_plane_create_scaling_filter_property(&dcss_plane->base,
+ BIT(DRM_SCALING_FILTER_DEFAULT) |
+ BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR));
+
drm_plane_create_rotation_property(&dcss_plane->base,
DRM_MODE_ROTATE_0,
DRM_MODE_ROTATE_0 |
diff --git a/drivers/gpu/drm/imx/dcss/dcss-scaler.c b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
index cd21905de580..47852b9dd5ea 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-scaler.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-scaler.c
@@ -77,6 +77,8 @@ struct dcss_scaler_ch {
u32 c_vstart;
u32 c_hstart;
+
+ bool use_nn_interpolation;
};
struct dcss_scaler {
@@ -243,6 +245,17 @@ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps,
}
}
+static void dcss_scaler_nearest_neighbor_filter(bool use_5_taps,
+ int coef[][PSC_NUM_TAPS])
+{
+ int i, j;
+
+ for (i = 0; i < PSC_STORED_PHASES; i++)
+ for (j = 0; j < PSC_NUM_TAPS; j++)
+ coef[i][j] = j == PSC_NUM_TAPS >> 1 ?
+ (1 << PSC_COEFF_PRECISION) : 0;
+}
+
/**
* dcss_scaler_filter_design() - Compute filter coefficients using
* Gaussian filter.
@@ -253,7 +266,8 @@ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps,
*/
static void dcss_scaler_filter_design(int src_length, int dst_length,
bool use_5_taps, bool phase0_identity,
- int coef[][PSC_NUM_TAPS])
+ int coef[][PSC_NUM_TAPS],
+ bool nn_interpolation)
{
int fc_q;
@@ -263,8 +277,11 @@ static void dcss_scaler_filter_design(int src_length, int dst_length,
else
fc_q = div_q(dst_length, src_length * PSC_NUM_PHASES);
- /* compute gaussian filter coefficients */
- dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef);
+ if (nn_interpolation)
+ dcss_scaler_nearest_neighbor_filter(use_5_taps, coef);
+ else
+ /* compute gaussian filter coefficients */
+ dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef);
}
static void dcss_scaler_write(struct dcss_scaler_ch *ch, u32 val, u32 ofs)
@@ -653,12 +670,14 @@ static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch,
/* horizontal luma */
dcss_scaler_filter_design(src_xres, dst_xres, false,
- src_xres == dst_xres, coef);
+ src_xres == dst_xres, coef,
+ ch->use_nn_interpolation);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
/* vertical luma */
dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
- src_yres == dst_yres, coef);
+ src_yres == dst_yres, coef,
+ ch->use_nn_interpolation);
if (program_5_taps)
dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
@@ -678,14 +697,14 @@ static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch,
/* horizontal chroma */
dcss_scaler_filter_design(src_xres, dst_xres, false,
(src_xres == dst_xres) && (ch->c_hstart == 0),
- coef);
+ coef, ch->use_nn_interpolation);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HCHR, coef);
/* vertical chroma */
dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps,
(src_yres == dst_yres) && (ch->c_vstart == 0),
- coef);
+ coef, ch->use_nn_interpolation);
if (program_5_taps)
dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef);
else
@@ -700,12 +719,14 @@ static void dcss_scaler_rgb_coef_set(struct dcss_scaler_ch *ch,
/* horizontal RGB */
dcss_scaler_filter_design(src_xres, dst_xres, false,
- src_xres == dst_xres, coef);
+ src_xres == dst_xres, coef,
+ ch->use_nn_interpolation);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef);
/* vertical RGB */
dcss_scaler_filter_design(src_yres, dst_yres, false,
- src_yres == dst_yres, coef);
+ src_yres == dst_yres, coef,
+ ch->use_nn_interpolation);
dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef);
}
@@ -751,6 +772,14 @@ static void dcss_scaler_set_rgb10_order(struct dcss_scaler_ch *ch,
ch->sdata_ctrl |= a2r10g10b10_format << A2R10G10B10_FORMAT_POS;
}
+void dcss_scaler_set_filter(struct dcss_scaler *scl, int ch_num,
+ enum drm_scaling_filter scaling_filter)
+{
+ struct dcss_scaler_ch *ch = &scl->ch[ch_num];
+
+ ch->use_nn_interpolation = scaling_filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR;
+}
+
void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num,
const struct drm_format_info *format,
int src_xres, int src_yres, int dst_xres, int dst_yres,
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 832e5280a6ed..de62966243cd 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -225,7 +225,7 @@ struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t siz
mutex_init(&bo->lock);
INIT_LIST_HEAD(&bo->va);
-
+ bo->base.map_wc = true;
bo->base.base.funcs = &lima_gem_funcs;
return &bo->base.base;
diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig
index b3990126562c..71c689b573c9 100644
--- a/drivers/gpu/drm/mcde/Kconfig
+++ b/drivers/gpu/drm/mcde/Kconfig
@@ -4,6 +4,7 @@ config DRM_MCDE
depends on CMA
depends on ARM || COMPILE_TEST
depends on OF
+ depends on COMMON_CLK
select MFD_SYSCON
select DRM_MIPI_DSI
select DRM_BRIDGE
diff --git a/drivers/gpu/drm/mcde/Makefile b/drivers/gpu/drm/mcde/Makefile
index fe28f4e0fe46..15d9c89a3273 100644
--- a/drivers/gpu/drm/mcde/Makefile
+++ b/drivers/gpu/drm/mcde/Makefile
@@ -1,3 +1,3 @@
-mcde_drm-y += mcde_drv.o mcde_dsi.o mcde_display.o
+mcde_drm-y += mcde_drv.o mcde_dsi.o mcde_clk_div.o mcde_display.o
obj-$(CONFIG_DRM_MCDE) += mcde_drm.o
diff --git a/drivers/gpu/drm/mcde/mcde_clk_div.c b/drivers/gpu/drm/mcde/mcde_clk_div.c
new file mode 100644
index 000000000000..038821d2ef80
--- /dev/null
+++ b/drivers/gpu/drm/mcde/mcde_clk_div.c
@@ -0,0 +1,192 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/clk-provider.h>
+#include <linux/regulator/consumer.h>
+
+#include "mcde_drm.h"
+#include "mcde_display_regs.h"
+
+/* The MCDE internal clock dividers for FIFO A and B */
+struct mcde_clk_div {
+ struct clk_hw hw;
+ struct mcde *mcde;
+ u32 cr;
+ u32 cr_div;
+};
+
+static int mcde_clk_div_enable(struct clk_hw *hw)
+{
+ struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw);
+ struct mcde *mcde = cdiv->mcde;
+ u32 val;
+
+ spin_lock(&mcde->fifo_crx1_lock);
+ val = readl(mcde->regs + cdiv->cr);
+ /*
+ * Select the PLL72 (LCD) clock as parent
+ * FIXME: implement other parents.
+ */
+ val &= ~MCDE_CRX1_CLKSEL_MASK;
+ val |= MCDE_CRX1_CLKSEL_CLKPLL72 << MCDE_CRX1_CLKSEL_SHIFT;
+ /* Internal clock */
+ val |= MCDE_CRA1_CLKTYPE_TVXCLKSEL1;
+
+ /* Clear then set the divider */
+ val &= ~(MCDE_CRX1_BCD | MCDE_CRX1_PCD_MASK);
+ val |= cdiv->cr_div;
+
+ writel(val, mcde->regs + cdiv->cr);
+ spin_unlock(&mcde->fifo_crx1_lock);
+
+ return 0;
+}
+
+static int mcde_clk_div_choose_div(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate, bool set_parent)
+{
+ int best_div = 1, div;
+ struct clk_hw *parent = clk_hw_get_parent(hw);
+ unsigned long best_prate = 0;
+ unsigned long best_diff = ~0ul;
+ int max_div = (1 << MCDE_CRX1_PCD_BITS) - 1;
+
+ for (div = 1; div < max_div; div++) {
+ unsigned long this_prate, div_rate, diff;
+
+ if (set_parent)
+ this_prate = clk_hw_round_rate(parent, rate * div);
+ else
+ this_prate = *prate;
+ div_rate = DIV_ROUND_UP_ULL(this_prate, div);
+ diff = abs(rate - div_rate);
+
+ if (diff < best_diff) {
+ best_div = div;
+ best_diff = diff;
+ best_prate = this_prate;
+ }
+ }
+
+ *prate = best_prate;
+ return best_div;
+}
+
+static long mcde_clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ int div = mcde_clk_div_choose_div(hw, rate, prate, true);
+
+ return DIV_ROUND_UP_ULL(*prate, div);
+}
+
+static unsigned long mcde_clk_div_recalc_rate(struct clk_hw *hw,
+ unsigned long prate)
+{
+ struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw);
+ struct mcde *mcde = cdiv->mcde;
+ u32 cr;
+ int div;
+
+ /*
+ * If the MCDE is not powered we can't access registers.
+ * It will come up with 0 in the divider register bits, which
+ * means "divide by 2".
+ */
+ if (!regulator_is_enabled(mcde->epod))
+ return DIV_ROUND_UP_ULL(prate, 2);
+
+ cr = readl(mcde->regs + cdiv->cr);
+ if (cr & MCDE_CRX1_BCD)
+ return prate;
+
+ /* 0 in the PCD means "divide by 2", 1 means "divide by 3" etc */
+ div = cr & MCDE_CRX1_PCD_MASK;
+ div += 2;
+
+ return DIV_ROUND_UP_ULL(prate, div);
+}
+
+static int mcde_clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long prate)
+{
+ struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw);
+ int div = mcde_clk_div_choose_div(hw, rate, &prate, false);
+ u32 cr = 0;
+
+ /*
+ * We cache the CR bits to set the divide in the state so that
+ * we can call this before we can even write to the hardware.
+ */
+ if (div == 1) {
+ /* Bypass clock divider */
+ cr |= MCDE_CRX1_BCD;
+ } else {
+ div -= 2;
+ cr |= div & MCDE_CRX1_PCD_MASK;
+ }
+ cdiv->cr_div = cr;
+
+ return 0;
+}
+
+static const struct clk_ops mcde_clk_div_ops = {
+ .enable = mcde_clk_div_enable,
+ .recalc_rate = mcde_clk_div_recalc_rate,
+ .round_rate = mcde_clk_div_round_rate,
+ .set_rate = mcde_clk_div_set_rate,
+};
+
+int mcde_init_clock_divider(struct mcde *mcde)
+{
+ struct device *dev = mcde->dev;
+ struct mcde_clk_div *fifoa;
+ struct mcde_clk_div *fifob;
+ const char *parent_name;
+ struct clk_init_data fifoa_init = {
+ .name = "fifoa",
+ .ops = &mcde_clk_div_ops,
+ .parent_names = &parent_name,
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ struct clk_init_data fifob_init = {
+ .name = "fifob",
+ .ops = &mcde_clk_div_ops,
+ .parent_names = &parent_name,
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ };
+ int ret;
+
+ spin_lock_init(&mcde->fifo_crx1_lock);
+ parent_name = __clk_get_name(mcde->lcd_clk);
+
+ /* Allocate 2 clocks */
+ fifoa = devm_kzalloc(dev, sizeof(*fifoa), GFP_KERNEL);
+ if (!fifoa)
+ return -ENOMEM;
+ fifob = devm_kzalloc(dev, sizeof(*fifob), GFP_KERNEL);
+ if (!fifob)
+ return -ENOMEM;
+
+ fifoa->mcde = mcde;
+ fifoa->cr = MCDE_CRA1;
+ fifoa->hw.init = &fifoa_init;
+ ret = devm_clk_hw_register(dev, &fifoa->hw);
+ if (ret) {
+ dev_err(dev, "error registering FIFO A clock divider\n");
+ return ret;
+ }
+ mcde->fifoa_clk = fifoa->hw.clk;
+
+ fifob->mcde = mcde;
+ fifob->cr = MCDE_CRB1;
+ fifob->hw.init = &fifob_init;
+ ret = devm_clk_hw_register(dev, &fifob->hw);
+ if (ret) {
+ dev_err(dev, "error registering FIFO B clock divider\n");
+ return ret;
+ }
+ mcde->fifob_clk = fifob->hw.clk;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c
index c271e5bf042e..7c2e0b865441 100644
--- a/drivers/gpu/drm/mcde/mcde_display.c
+++ b/drivers/gpu/drm/mcde/mcde_display.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/dma-buf.h>
#include <linux/regulator/consumer.h>
+#include <linux/media-bus-format.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
@@ -16,6 +17,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_simple_kms_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_vblank.h>
#include <video/mipi_display.h>
@@ -57,10 +59,15 @@ enum mcde_overlay {
MCDE_OVERLAY_5,
};
-enum mcde_dsi_formatter {
+enum mcde_formatter {
MCDE_DSI_FORMATTER_0 = 0,
MCDE_DSI_FORMATTER_1,
MCDE_DSI_FORMATTER_2,
+ MCDE_DSI_FORMATTER_3,
+ MCDE_DSI_FORMATTER_4,
+ MCDE_DSI_FORMATTER_5,
+ MCDE_DPI_FORMATTER_0,
+ MCDE_DPI_FORMATTER_1,
};
void mcde_display_irq(struct mcde *mcde)
@@ -81,7 +88,7 @@ void mcde_display_irq(struct mcde *mcde)
*
* TODO: Currently only one DSI link is supported.
*/
- if (mcde_dsi_irq(mcde->mdsi)) {
+ if (!mcde->dpi_output && mcde_dsi_irq(mcde->mdsi)) {
u32 val;
/*
@@ -243,73 +250,70 @@ static int mcde_configure_extsrc(struct mcde *mcde, enum mcde_extsrc src,
val = 0 << MCDE_EXTSRCXCONF_BUF_ID_SHIFT;
val |= 1 << MCDE_EXTSRCXCONF_BUF_NB_SHIFT;
val |= 0 << MCDE_EXTSRCXCONF_PRI_OVLID_SHIFT;
- /*
- * MCDE has inverse semantics from DRM on RBG/BGR which is why
- * all the modes are inversed here.
- */
+
switch (format) {
case DRM_FORMAT_ARGB8888:
val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_ABGR8888:
val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XRGB8888:
val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XBGR8888:
val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_RGB888:
val |= MCDE_EXTSRCXCONF_BPP_RGB888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_BGR888:
val |= MCDE_EXTSRCXCONF_BPP_RGB888 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_ARGB4444:
val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_ABGR4444:
val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XRGB4444:
val |= MCDE_EXTSRCXCONF_BPP_RGB444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XBGR4444:
val |= MCDE_EXTSRCXCONF_BPP_RGB444 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XRGB1555:
val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_XBGR1555:
val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_RGB565:
val |= MCDE_EXTSRCXCONF_BPP_RGB565 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
- val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_BGR565:
val |= MCDE_EXTSRCXCONF_BPP_RGB565 <<
MCDE_EXTSRCXCONF_BPP_SHIFT;
+ val |= MCDE_EXTSRCXCONF_BGR;
break;
case DRM_FORMAT_YUV422:
val |= MCDE_EXTSRCXCONF_BPP_YCBCR422 <<
@@ -556,6 +560,7 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
<< MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT;
break;
case MCDE_VIDEO_FORMATTER_FLOW:
+ case MCDE_DPI_FORMATTER_FLOW:
val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE
<< MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT;
val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER
@@ -564,7 +569,7 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
default:
dev_err(mcde->dev, "unknown flow mode %d\n",
mcde->flow_mode);
- break;
+ return;
}
writel(val, mcde->regs + sync);
@@ -594,10 +599,35 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch,
mcde->regs + mux);
break;
}
+
+ /*
+ * If using DPI configure the sync event.
+ * TODO: this is for LCD only, it does not cover TV out.
+ */
+ if (mcde->dpi_output) {
+ u32 stripwidth;
+
+ stripwidth = 0xF000 / (mode->vdisplay * 4);
+ dev_info(mcde->dev, "stripwidth: %d\n", stripwidth);
+
+ val = MCDE_SYNCHCONF_HWREQVEVENT_ACTIVE_VIDEO |
+ (mode->hdisplay - 1 - stripwidth) << MCDE_SYNCHCONF_HWREQVCNT_SHIFT |
+ MCDE_SYNCHCONF_SWINTVEVENT_ACTIVE_VIDEO |
+ (mode->hdisplay - 1 - stripwidth) << MCDE_SYNCHCONF_SWINTVCNT_SHIFT;
+
+ switch (fifo) {
+ case MCDE_FIFO_A:
+ writel(val, mcde->regs + MCDE_SYNCHCONFA);
+ break;
+ case MCDE_FIFO_B:
+ writel(val, mcde->regs + MCDE_SYNCHCONFB);
+ break;
+ }
+ }
}
static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo,
- enum mcde_dsi_formatter fmt,
+ enum mcde_formatter fmt,
int fifo_wtrmrk)
{
u32 val;
@@ -618,12 +648,49 @@ static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo,
}
val = fifo_wtrmrk << MCDE_CTRLX_FIFOWTRMRK_SHIFT;
- /* We only support DSI formatting for now */
- val |= MCDE_CTRLX_FORMTYPE_DSI <<
- MCDE_CTRLX_FORMTYPE_SHIFT;
- /* Select the formatter to use for this FIFO */
- val |= fmt << MCDE_CTRLX_FORMID_SHIFT;
+ /*
+ * Select the formatter to use for this FIFO
+ *
+ * The register definitions imply that different IDs should be used
+ * by the DSI formatters depending on if they are in VID or CMD
+ * mode, and the manual says they are dedicated but identical.
+ * The vendor code uses them as it seems fit.
+ */
+ switch (fmt) {
+ case MCDE_DSI_FORMATTER_0:
+ val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DSI0VID << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DSI_FORMATTER_1:
+ val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DSI0CMD << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DSI_FORMATTER_2:
+ val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DSI1VID << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DSI_FORMATTER_3:
+ val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DSI1CMD << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DSI_FORMATTER_4:
+ val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DSI2VID << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DSI_FORMATTER_5:
+ val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DSI2CMD << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DPI_FORMATTER_0:
+ val |= MCDE_CTRLX_FORMTYPE_DPITV << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DPIA << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ case MCDE_DPI_FORMATTER_1:
+ val |= MCDE_CTRLX_FORMTYPE_DPITV << MCDE_CTRLX_FORMTYPE_SHIFT;
+ val |= MCDE_CTRLX_FORMID_DPIB << MCDE_CTRLX_FORMID_SHIFT;
+ break;
+ }
writel(val, mcde->regs + ctrl);
/* Blend source with Alpha 0xff on FIFO */
@@ -631,17 +698,54 @@ static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo,
0xff << MCDE_CRX0_ALPHABLEND_SHIFT;
writel(val, mcde->regs + cr0);
- /* Set-up from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() */
-
- /* Use the MCDE clock for this FIFO */
- val = MCDE_CRX1_CLKSEL_MCDECLK << MCDE_CRX1_CLKSEL_SHIFT;
+ spin_lock(&mcde->fifo_crx1_lock);
+ val = readl(mcde->regs + cr1);
+ /*
+ * Set-up from mcde_fmtr_dsi.c, fmtr_dsi_enable_video()
+ * FIXME: a different clock needs to be selected for TV out.
+ */
+ if (mcde->dpi_output) {
+ struct drm_connector *connector = drm_panel_bridge_connector(mcde->bridge);
+ u32 bus_format;
+
+ /* Assume RGB888 24 bit if we have no further info */
+ if (!connector->display_info.num_bus_formats) {
+ dev_info(mcde->dev, "panel does not specify bus format, assume RGB888\n");
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ } else {
+ bus_format = connector->display_info.bus_formats[0];
+ }
- /* TODO: when adding DPI support add OUTBPP etc here */
+ /*
+ * Set up the CDWIN and OUTBPP for the LCD
+ *
+ * FIXME: fill this in if you know the correspondance between the MIPI
+ * DPI specification and the media bus formats.
+ */
+ val &= ~MCDE_CRX1_CDWIN_MASK;
+ val &= ~MCDE_CRX1_OUTBPP_MASK;
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ val |= MCDE_CRX1_CDWIN_24BPP << MCDE_CRX1_CDWIN_SHIFT;
+ val |= MCDE_CRX1_OUTBPP_24BPP << MCDE_CRX1_OUTBPP_SHIFT;
+ break;
+ default:
+ dev_err(mcde->dev, "unknown bus format, assume RGB888\n");
+ val |= MCDE_CRX1_CDWIN_24BPP << MCDE_CRX1_CDWIN_SHIFT;
+ val |= MCDE_CRX1_OUTBPP_24BPP << MCDE_CRX1_OUTBPP_SHIFT;
+ break;
+ }
+ } else {
+ /* Use the MCDE clock for DSI */
+ val &= ~MCDE_CRX1_CLKSEL_MASK;
+ val |= MCDE_CRX1_CLKSEL_MCDECLK << MCDE_CRX1_CLKSEL_SHIFT;
+ }
writel(val, mcde->regs + cr1);
+ spin_unlock(&mcde->fifo_crx1_lock);
};
static void mcde_configure_dsi_formatter(struct mcde *mcde,
- enum mcde_dsi_formatter fmt,
+ enum mcde_formatter fmt,
u32 formatter_frame,
int pkt_size)
{
@@ -681,6 +785,9 @@ static void mcde_configure_dsi_formatter(struct mcde *mcde,
delay0 = MCDE_DSIVID2DELAY0;
delay1 = MCDE_DSIVID2DELAY1;
break;
+ default:
+ dev_err(mcde->dev, "tried to configure a non-DSI formatter as DSI\n");
+ return;
}
/*
@@ -700,7 +807,9 @@ static void mcde_configure_dsi_formatter(struct mcde *mcde,
MCDE_DSICONF0_PACKING_SHIFT;
break;
case MIPI_DSI_FMT_RGB666_PACKED:
- val |= MCDE_DSICONF0_PACKING_RGB666_PACKED <<
+ dev_err(mcde->dev,
+ "we cannot handle the packed RGB666 format\n");
+ val |= MCDE_DSICONF0_PACKING_RGB666 <<
MCDE_DSICONF0_PACKING_SHIFT;
break;
case MIPI_DSI_FMT_RGB565:
@@ -860,73 +969,140 @@ static int mcde_dsi_get_pkt_div(int ppl, int fifo_size)
return 1;
}
-static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *cstate,
- struct drm_plane_state *plane_state)
+static void mcde_setup_dpi(struct mcde *mcde, const struct drm_display_mode *mode,
+ int *fifo_wtrmrk_lvl)
{
- struct drm_crtc *crtc = &pipe->crtc;
- struct drm_plane *plane = &pipe->plane;
- struct drm_device *drm = crtc->dev;
- struct mcde *mcde = to_mcde(drm);
- const struct drm_display_mode *mode = &cstate->mode;
- struct drm_framebuffer *fb = plane->state->fb;
- u32 format = fb->format->format;
- u32 formatter_ppl = mode->hdisplay; /* pixels per line */
- u32 formatter_lpf = mode->vdisplay; /* lines per frame */
- int pkt_size, fifo_wtrmrk;
- int cpp = fb->format->cpp[0];
- int formatter_cpp;
- struct drm_format_name_buf tmp;
- u32 formatter_frame;
- u32 pkt_div;
+ struct drm_connector *connector = drm_panel_bridge_connector(mcde->bridge);
+ u32 hsw, hfp, hbp;
+ u32 vsw, vfp, vbp;
u32 val;
- int ret;
- /* This powers up the entire MCDE block and the DSI hardware */
- ret = regulator_enable(mcde->epod);
- if (ret) {
- dev_err(drm->dev, "can't re-enable EPOD regulator\n");
- return;
- }
+ /* FIXME: we only support LCD, implement TV out */
+ hsw = mode->hsync_end - mode->hsync_start;
+ hfp = mode->hsync_start - mode->hdisplay;
+ hbp = mode->htotal - mode->hsync_end;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vfp = mode->vsync_start - mode->vdisplay;
+ vbp = mode->vtotal - mode->vsync_end;
- dev_info(drm->dev, "enable MCDE, %d x %d format %s\n",
- mode->hdisplay, mode->vdisplay,
- drm_get_format_name(format, &tmp));
- if (!mcde->mdsi) {
- /* TODO: deal with this for non-DSI output */
- dev_err(drm->dev, "no DSI master attached!\n");
- return;
- }
+ dev_info(mcde->dev, "output on DPI LCD from channel A\n");
+ /* Display actual values */
+ dev_info(mcde->dev, "HSW: %d, HFP: %d, HBP: %d, VSW: %d, VFP: %d, VBP: %d\n",
+ hsw, hfp, hbp, vsw, vfp, vbp);
+
+ /*
+ * The pixel fetcher is 128 64-bit words deep = 1024 bytes.
+ * One overlay of 32bpp (4 cpp) assumed, fetch 160 pixels.
+ * 160 * 4 = 640 bytes.
+ */
+ *fifo_wtrmrk_lvl = 640;
/* Set up the main control, watermark level at 7 */
val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT;
- /* 24 bits DPI: connect LSB Ch B to D[0:7] */
- val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT;
- /* TV out: connect LSB Ch B to D[8:15] */
- val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT;
+
+ /*
+ * This sets up the internal silicon muxing of the DPI
+ * lines. This is how the silicon connects out to the
+ * external pins, then the pins need to be further
+ * configured into "alternate functions" using pin control
+ * to actually get the signals out.
+ *
+ * FIXME: this is hardcoded to the only setting found in
+ * the wild. If we need to use different settings for
+ * different DPI displays, make this parameterizable from
+ * the device tree.
+ */
+ /* 24 bits DPI: connect Ch A LSB to D[0:7] */
+ val |= 0 << MCDE_CONF0_OUTMUX0_SHIFT;
+ /* 24 bits DPI: connect Ch A MID to D[8:15] */
+ val |= 1 << MCDE_CONF0_OUTMUX1_SHIFT;
/* Don't care about this muxing */
val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT;
- /* 24 bits DPI: connect MID Ch B to D[24:31] */
- val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT;
- /* 5: 24 bits DPI: connect MSB Ch B to D[32:39] */
- val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT;
- /* Syncmux bits zero: DPI channel A and B on output pins A and B resp */
+ /* Don't care about this muxing */
+ val |= 0 << MCDE_CONF0_OUTMUX3_SHIFT;
+ /* 24 bits DPI: connect Ch A MSB to D[32:39] */
+ val |= 2 << MCDE_CONF0_OUTMUX4_SHIFT;
+ /* Syncmux bits zero: DPI channel A */
writel(val, mcde->regs + MCDE_CONF0);
- /* Clear any pending interrupts */
- mcde_display_disable_irqs(mcde);
- writel(0, mcde->regs + MCDE_IMSCERR);
- writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR);
+ /* This hammers us into LCD mode */
+ writel(0, mcde->regs + MCDE_TVCRA);
+
+ /* Front porch and sync width */
+ val = (vsw << MCDE_TVBL1_BEL1_SHIFT);
+ val |= (vfp << MCDE_TVBL1_BSL1_SHIFT);
+ writel(val, mcde->regs + MCDE_TVBL1A);
+ /* The vendor driver sets the same value into TVBL2A */
+ writel(val, mcde->regs + MCDE_TVBL2A);
+
+ /* Vertical back porch */
+ val = (vbp << MCDE_TVDVO_DVO1_SHIFT);
+ /* The vendor drivers sets the same value into TVDVOA */
+ val |= (vbp << MCDE_TVDVO_DVO2_SHIFT);
+ writel(val, mcde->regs + MCDE_TVDVOA);
+
+ /* Horizontal back porch, as 0 = 1 cycle we need to subtract 1 */
+ writel((hbp - 1), mcde->regs + MCDE_TVTIM1A);
+
+ /* Horizongal sync width and horizonal front porch, 0 = 1 cycle */
+ val = ((hsw - 1) << MCDE_TVLBALW_LBW_SHIFT);
+ val |= ((hfp - 1) << MCDE_TVLBALW_ALW_SHIFT);
+ writel(val, mcde->regs + MCDE_TVLBALWA);
+
+ /* Blank some TV registers we don't use */
+ writel(0, mcde->regs + MCDE_TVISLA);
+ writel(0, mcde->regs + MCDE_TVBLUA);
+
+ /* Set up sync inversion etc */
+ val = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ val |= MCDE_LCDTIM1B_IHS;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ val |= MCDE_LCDTIM1B_IVS;
+ if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW)
+ val |= MCDE_LCDTIM1B_IOE;
+ if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)
+ val |= MCDE_LCDTIM1B_IPC;
+ writel(val, mcde->regs + MCDE_LCDTIM1A);
+}
- dev_info(drm->dev, "output in %s mode, format %dbpp\n",
+static void mcde_setup_dsi(struct mcde *mcde, const struct drm_display_mode *mode,
+ int cpp, int *fifo_wtrmrk_lvl, int *dsi_formatter_frame,
+ int *dsi_pkt_size)
+{
+ u32 formatter_ppl = mode->hdisplay; /* pixels per line */
+ u32 formatter_lpf = mode->vdisplay; /* lines per frame */
+ int formatter_frame;
+ int formatter_cpp;
+ int fifo_wtrmrk;
+ u32 pkt_div;
+ int pkt_size;
+ u32 val;
+
+ dev_info(mcde->dev, "output in %s mode, format %dbpp\n",
(mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ?
"VIDEO" : "CMD",
mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format));
formatter_cpp =
mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format) / 8;
- dev_info(drm->dev, "overlay CPP %d bytes, DSI CPP %d bytes\n",
- cpp,
- formatter_cpp);
+ dev_info(mcde->dev, "Overlay CPP: %d bytes, DSI formatter CPP %d bytes\n",
+ cpp, formatter_cpp);
+
+ /* Set up the main control, watermark level at 7 */
+ val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT;
+
+ /*
+ * This is the internal silicon muxing of the DPI
+ * (parallell display) lines. Since we are not using
+ * this at all (we are using DSI) these are just
+ * dummy values from the vendor tree.
+ */
+ val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT;
+ val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT;
+ val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT;
+ val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT;
+ val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT;
+ writel(val, mcde->regs + MCDE_CONF0);
/* Calculations from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() */
@@ -948,9 +1124,9 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
/* The FIFO is 640 entries deep on this v3 hardware */
pkt_div = mcde_dsi_get_pkt_div(mode->hdisplay, 640);
}
- dev_dbg(drm->dev, "FIFO watermark after flooring: %d bytes\n",
+ dev_dbg(mcde->dev, "FIFO watermark after flooring: %d bytes\n",
fifo_wtrmrk);
- dev_dbg(drm->dev, "Packet divisor: %d bytes\n", pkt_div);
+ dev_dbg(mcde->dev, "Packet divisor: %d bytes\n", pkt_div);
/* NOTE: pkt_div is 1 for video mode */
pkt_size = (formatter_ppl * formatter_cpp) / pkt_div;
@@ -958,16 +1134,64 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
if (!(mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO))
pkt_size++;
- dev_dbg(drm->dev, "DSI packet size: %d * %d bytes per line\n",
+ dev_dbg(mcde->dev, "DSI packet size: %d * %d bytes per line\n",
pkt_size, pkt_div);
- dev_dbg(drm->dev, "Overlay frame size: %u bytes\n",
+ dev_dbg(mcde->dev, "Overlay frame size: %u bytes\n",
mode->hdisplay * mode->vdisplay * cpp);
- mcde->stride = mode->hdisplay * cpp;
- dev_dbg(drm->dev, "Overlay line stride: %u bytes\n",
- mcde->stride);
/* NOTE: pkt_div is 1 for video mode */
formatter_frame = pkt_size * pkt_div * formatter_lpf;
- dev_dbg(drm->dev, "Formatter frame size: %u bytes\n", formatter_frame);
+ dev_dbg(mcde->dev, "Formatter frame size: %u bytes\n", formatter_frame);
+
+ *fifo_wtrmrk_lvl = fifo_wtrmrk;
+ *dsi_pkt_size = pkt_size;
+ *dsi_formatter_frame = formatter_frame;
+}
+
+static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *cstate,
+ struct drm_plane_state *plane_state)
+{
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_plane *plane = &pipe->plane;
+ struct drm_device *drm = crtc->dev;
+ struct mcde *mcde = to_mcde(drm);
+ const struct drm_display_mode *mode = &cstate->mode;
+ struct drm_framebuffer *fb = plane->state->fb;
+ u32 format = fb->format->format;
+ int dsi_pkt_size;
+ int fifo_wtrmrk;
+ int cpp = fb->format->cpp[0];
+ struct drm_format_name_buf tmp;
+ u32 dsi_formatter_frame;
+ u32 val;
+ int ret;
+
+ /* This powers up the entire MCDE block and the DSI hardware */
+ ret = regulator_enable(mcde->epod);
+ if (ret) {
+ dev_err(drm->dev, "can't re-enable EPOD regulator\n");
+ return;
+ }
+
+ dev_info(drm->dev, "enable MCDE, %d x %d format %s\n",
+ mode->hdisplay, mode->vdisplay,
+ drm_get_format_name(format, &tmp));
+
+
+ /* Clear any pending interrupts */
+ mcde_display_disable_irqs(mcde);
+ writel(0, mcde->regs + MCDE_IMSCERR);
+ writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR);
+
+ if (mcde->dpi_output)
+ mcde_setup_dpi(mcde, mode, &fifo_wtrmrk);
+ else
+ mcde_setup_dsi(mcde, mode, cpp, &fifo_wtrmrk,
+ &dsi_formatter_frame, &dsi_pkt_size);
+
+ mcde->stride = mode->hdisplay * cpp;
+ dev_dbg(drm->dev, "Overlay line stride: %u bytes\n",
+ mcde->stride);
/* Drain the FIFO A + channel 0 pipe so we have a clean slate */
mcde_drain_pipe(mcde, MCDE_FIFO_A, MCDE_CHANNEL_0);
@@ -995,29 +1219,47 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe,
*/
mcde_configure_channel(mcde, MCDE_CHANNEL_0, MCDE_FIFO_A, mode);
- /* Configure FIFO A to use DSI formatter 0 */
- mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DSI_FORMATTER_0,
- fifo_wtrmrk);
+ if (mcde->dpi_output) {
+ unsigned long lcd_freq;
+
+ /* Configure FIFO A to use DPI formatter 0 */
+ mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DPI_FORMATTER_0,
+ fifo_wtrmrk);
+
+ /* Set up and enable the LCD clock */
+ lcd_freq = clk_round_rate(mcde->fifoa_clk, mode->clock * 1000);
+ ret = clk_set_rate(mcde->fifoa_clk, lcd_freq);
+ if (ret)
+ dev_err(mcde->dev, "failed to set LCD clock rate %lu Hz\n",
+ lcd_freq);
+ ret = clk_prepare_enable(mcde->fifoa_clk);
+ if (ret) {
+ dev_err(mcde->dev, "failed to enable FIFO A DPI clock\n");
+ return;
+ }
+ dev_info(mcde->dev, "LCD FIFO A clk rate %lu Hz\n",
+ clk_get_rate(mcde->fifoa_clk));
+ } else {
+ /* Configure FIFO A to use DSI formatter 0 */
+ mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DSI_FORMATTER_0,
+ fifo_wtrmrk);
- /*
- * This brings up the DSI bridge which is tightly connected
- * to the MCDE DSI formatter.
- *
- * FIXME: if we want to use another formatter, such as DPI,
- * we need to be more elaborate here and select the appropriate
- * bridge.
- */
- mcde_dsi_enable(mcde->bridge);
+ /*
+ * This brings up the DSI bridge which is tightly connected
+ * to the MCDE DSI formatter.
+ */
+ mcde_dsi_enable(mcde->bridge);
- /* Configure the DSI formatter 0 for the DSI panel output */
- mcde_configure_dsi_formatter(mcde, MCDE_DSI_FORMATTER_0,
- formatter_frame, pkt_size);
+ /* Configure the DSI formatter 0 for the DSI panel output */
+ mcde_configure_dsi_formatter(mcde, MCDE_DSI_FORMATTER_0,
+ dsi_formatter_frame, dsi_pkt_size);
+ }
switch (mcde->flow_mode) {
case MCDE_COMMAND_TE_FLOW:
case MCDE_COMMAND_BTA_TE_FLOW:
case MCDE_VIDEO_TE_FLOW:
- /* We are using TE in some comination */
+ /* We are using TE in some combination */
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
val = MCDE_VSCRC_VSPOL;
else
@@ -1069,8 +1311,12 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe)
/* Disable FIFO A flow */
mcde_disable_fifo(mcde, MCDE_FIFO_A, true);
- /* This disables the DSI bridge */
- mcde_dsi_disable(mcde->bridge);
+ if (mcde->dpi_output) {
+ clk_disable_unprepare(mcde->fifoa_clk);
+ } else {
+ /* This disables the DSI bridge */
+ mcde_dsi_disable(mcde->bridge);
+ }
event = crtc->state->event;
if (event) {
@@ -1261,6 +1507,10 @@ int mcde_display_init(struct drm_device *drm)
DRM_FORMAT_YUV422,
};
+ ret = mcde_init_clock_divider(mcde);
+ if (ret)
+ return ret;
+
ret = drm_simple_display_pipe_init(drm, &mcde->pipe,
&mcde_display_funcs,
formats, ARRAY_SIZE(formats),
diff --git a/drivers/gpu/drm/mcde/mcde_display_regs.h b/drivers/gpu/drm/mcde/mcde_display_regs.h
index d3ac7ef5ff9a..2ad78c59d627 100644
--- a/drivers/gpu/drm/mcde/mcde_display_regs.h
+++ b/drivers/gpu/drm/mcde/mcde_display_regs.h
@@ -215,6 +215,80 @@
#define MCDE_OVLXCOMP_Z_SHIFT 27
#define MCDE_OVLXCOMP_Z_MASK 0x78000000
+/* DPI/TV configuration registers, channel A and B */
+#define MCDE_TVCRA 0x00000838
+#define MCDE_TVCRB 0x00000A38
+#define MCDE_TVCR_MOD_TV BIT(0) /* 0 = LCD mode */
+#define MCDE_TVCR_INTEREN BIT(1)
+#define MCDE_TVCR_IFIELD BIT(2)
+#define MCDE_TVCR_TVMODE_SDTV_656P (0 << 3)
+#define MCDE_TVCR_TVMODE_SDTV_656P_LE (3 << 3)
+#define MCDE_TVCR_TVMODE_SDTV_656P_BE (4 << 3)
+#define MCDE_TVCR_SDTVMODE_Y0CBY1CR (0 << 6)
+#define MCDE_TVCR_SDTVMODE_CBY0CRY1 (1 << 6)
+#define MCDE_TVCR_AVRGEN BIT(8)
+#define MCDE_TVCR_CKINV BIT(9)
+
+/* TV blanking control register 1, channel A and B */
+#define MCDE_TVBL1A 0x0000083C
+#define MCDE_TVBL1B 0x00000A3C
+#define MCDE_TVBL1_BEL1_SHIFT 0 /* VFP vertical front porch 11 bits */
+#define MCDE_TVBL1_BSL1_SHIFT 16 /* VSW vertical sync pulse width 11 bits */
+
+/* Pixel processing TV start line, channel A and B */
+#define MCDE_TVISLA 0x00000840
+#define MCDE_TVISLB 0x00000A40
+#define MCDE_TVISL_FSL1_SHIFT 0 /* Field 1 identification start line 11 bits */
+#define MCDE_TVISL_FSL2_SHIFT 16 /* Field 2 identification start line 11 bits */
+
+/* Pixel processing TV DVO offset */
+#define MCDE_TVDVOA 0x00000844
+#define MCDE_TVDVOB 0x00000A44
+#define MCDE_TVDVO_DVO1_SHIFT 0 /* VBP vertical back porch 0 = 0 */
+#define MCDE_TVDVO_DVO2_SHIFT 16
+
+/*
+ * Pixel processing TV Timing 1
+ * HBP horizontal back porch 11 bits horizontal offset
+ * 0 = 1 pixel HBP, 255 = 256 pixels, so actual value - 1
+ */
+#define MCDE_TVTIM1A 0x0000084C
+#define MCDE_TVTIM1B 0x00000A4C
+
+/* Pixel processing TV LBALW */
+/* 0 = 1 clock cycle, 255 = 256 clock cycles */
+#define MCDE_TVLBALWA 0x00000850
+#define MCDE_TVLBALWB 0x00000A50
+#define MCDE_TVLBALW_LBW_SHIFT 0 /* HSW horizonal sync width, line blanking width 11 bits */
+#define MCDE_TVLBALW_ALW_SHIFT 16 /* HFP horizontal front porch, active line width 11 bits */
+
+/* TV blanking control register 1, channel A and B */
+#define MCDE_TVBL2A 0x00000854
+#define MCDE_TVBL2B 0x00000A54
+#define MCDE_TVBL2_BEL2_SHIFT 0 /* Field 2 blanking end line 11 bits */
+#define MCDE_TVBL2_BSL2_SHIFT 16 /* Field 2 blanking start line 11 bits */
+
+/* Pixel processing TV background */
+#define MCDE_TVBLUA 0x00000858
+#define MCDE_TVBLUB 0x00000A58
+#define MCDE_TVBLU_TVBLU_SHIFT 0 /* 8 bits luminance */
+#define MCDE_TVBLU_TVBCB_SHIFT 8 /* 8 bits Cb chrominance */
+#define MCDE_TVBLU_TVBCR_SHIFT 16 /* 8 bits Cr chrominance */
+
+/* Pixel processing LCD timing 1 */
+#define MCDE_LCDTIM1A 0x00000860
+#define MCDE_LCDTIM1B 0x00000A60
+/* inverted vertical sync pulse for HRTFT 0 = active low, 1 active high */
+#define MCDE_LCDTIM1B_IVP BIT(19)
+/* inverted vertical sync, 0 = active high (the normal), 1 = active low */
+#define MCDE_LCDTIM1B_IVS BIT(20)
+/* inverted horizontal sync, 0 = active high (the normal), 1 = active low */
+#define MCDE_LCDTIM1B_IHS BIT(21)
+/* inverted panel clock 0 = rising edge data out, 1 = falling edge data out */
+#define MCDE_LCDTIM1B_IPC BIT(22)
+/* invert output enable 0 = active high, 1 = active low */
+#define MCDE_LCDTIM1B_IOE BIT(23)
+
#define MCDE_CRC 0x00000C00
#define MCDE_CRC_C1EN BIT(2)
#define MCDE_CRC_C2EN BIT(3)
@@ -360,6 +434,7 @@
#define MCDE_CRB1 0x00000A04
#define MCDE_CRX1_PCD_SHIFT 0
#define MCDE_CRX1_PCD_MASK 0x000003FF
+#define MCDE_CRX1_PCD_BITS 10
#define MCDE_CRX1_CLKSEL_SHIFT 10
#define MCDE_CRX1_CLKSEL_MASK 0x00001C00
#define MCDE_CRX1_CLKSEL_CLKPLL72 0
@@ -421,8 +496,20 @@
#define MCDE_ROTACONF 0x0000087C
#define MCDE_ROTBCONF 0x00000A7C
+/* Synchronization event configuration */
#define MCDE_SYNCHCONFA 0x00000880
#define MCDE_SYNCHCONFB 0x00000A80
+#define MCDE_SYNCHCONF_HWREQVEVENT_SHIFT 0
+#define MCDE_SYNCHCONF_HWREQVEVENT_VSYNC (0 << 0)
+#define MCDE_SYNCHCONF_HWREQVEVENT_BACK_PORCH (1 << 0)
+#define MCDE_SYNCHCONF_HWREQVEVENT_ACTIVE_VIDEO (2 << 0)
+#define MCDE_SYNCHCONF_HWREQVEVENT_FRONT_PORCH (3 << 0)
+#define MCDE_SYNCHCONF_HWREQVCNT_SHIFT 2 /* 14 bits */
+#define MCDE_SYNCHCONF_SWINTVEVENT_VSYNC (0 << 16)
+#define MCDE_SYNCHCONF_SWINTVEVENT_BACK_PORCH (1 << 16)
+#define MCDE_SYNCHCONF_SWINTVEVENT_ACTIVE_VIDEO (2 << 16)
+#define MCDE_SYNCHCONF_SWINTVEVENT_FRONT_PORCH (3 << 16)
+#define MCDE_SYNCHCONF_SWINTVCNT_SHIFT 18 /* 14 bits */
/* Channel A+B control registers */
#define MCDE_CTRLA 0x00000884
@@ -465,8 +552,8 @@
#define MCDE_DSICONF0_PACKING_MASK 0x00700000
#define MCDE_DSICONF0_PACKING_RGB565 0
#define MCDE_DSICONF0_PACKING_RGB666 1
-#define MCDE_DSICONF0_PACKING_RGB666_PACKED 2
-#define MCDE_DSICONF0_PACKING_RGB888 3
+#define MCDE_DSICONF0_PACKING_RGB888 2
+#define MCDE_DSICONF0_PACKING_BGR888 3
#define MCDE_DSICONF0_PACKING_HDTV 4
#define MCDE_DSIVID0FRAME 0x00000E04
diff --git a/drivers/gpu/drm/mcde/mcde_drm.h b/drivers/gpu/drm/mcde/mcde_drm.h
index 8253e2f9993e..ecb70b4b737c 100644
--- a/drivers/gpu/drm/mcde/mcde_drm.h
+++ b/drivers/gpu/drm/mcde/mcde_drm.h
@@ -62,6 +62,8 @@ enum mcde_flow_mode {
MCDE_VIDEO_TE_FLOW,
/* Video mode with the formatter itself as sync source */
MCDE_VIDEO_FORMATTER_FLOW,
+ /* DPI video with the formatter itsels as sync source */
+ MCDE_DPI_FORMATTER_FLOW,
};
struct mcde {
@@ -72,6 +74,7 @@ struct mcde {
struct drm_connector *connector;
struct drm_simple_display_pipe pipe;
struct mipi_dsi_device *mdsi;
+ bool dpi_output;
s16 stride;
enum mcde_flow_mode flow_mode;
unsigned int flow_active;
@@ -82,6 +85,11 @@ struct mcde {
struct clk *mcde_clk;
struct clk *lcd_clk;
struct clk *hdmi_clk;
+ /* Handles to the clock dividers for FIFO A and B */
+ struct clk *fifoa_clk;
+ struct clk *fifob_clk;
+ /* Locks the MCDE FIFO control register A and B */
+ spinlock_t fifo_crx1_lock;
struct regulator *epod;
struct regulator *vana;
@@ -105,4 +113,6 @@ void mcde_display_irq(struct mcde *mcde);
void mcde_display_disable_irqs(struct mcde *mcde);
int mcde_display_init(struct drm_device *drm);
+int mcde_init_clock_divider(struct mcde *mcde);
+
#endif /* _MCDE_DRM_H_ */
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 9d25181bd7e2..e60566a5739c 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -22,13 +22,13 @@
* The hardware has four display pipes, and the layout is a little
* bit like this::
*
- * Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
- * External 0..5 0..3 A,B, 3 x DSI bridge
+ * Memory -> Overlay -> Channel -> FIFO -> 8 formatters -> DSI/DPI
+ * External 0..5 0..3 A,B, 6 x DSI bridge
* source 0..9 C0,C1 2 x DPI
*
* FIFOs A and B are for LCD and HDMI while FIFO CO/C1 are for
* panels with embedded buffer.
- * 3 of the formatters are for DSI.
+ * 6 of the formatters are for DSI, 3 pairs for VID/CMD respectively.
* 2 of the formatters are for DPI.
*
* Behind the formatters are the DSI or DPI ports that route to
@@ -130,9 +130,37 @@ static int mcde_modeset_init(struct drm_device *drm)
struct mcde *mcde = to_mcde(drm);
int ret;
+ /*
+ * If no other bridge was found, check if we have a DPI panel or
+ * any other bridge connected directly to the MCDE DPI output.
+ * If a DSI bridge is found, DSI will take precedence.
+ *
+ * TODO: more elaborate bridge selection if we have more than one
+ * thing attached to the system.
+ */
if (!mcde->bridge) {
- dev_err(drm->dev, "no display output bridge yet\n");
- return -EPROBE_DEFER;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+
+ ret = drm_of_find_panel_or_bridge(drm->dev->of_node,
+ 0, 0, &panel, &bridge);
+ if (ret) {
+ dev_err(drm->dev,
+ "Could not locate any output bridge or panel\n");
+ return ret;
+ }
+ if (panel) {
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DPI);
+ if (IS_ERR(bridge)) {
+ dev_err(drm->dev,
+ "Could not connect panel bridge\n");
+ return PTR_ERR(bridge);
+ }
+ }
+ mcde->dpi_output = true;
+ mcde->bridge = bridge;
+ mcde->flow_mode = MCDE_DPI_FORMATTER_FLOW;
}
mode_config = &drm->mode_config;
@@ -156,13 +184,7 @@ static int mcde_modeset_init(struct drm_device *drm)
return ret;
}
- /*
- * Attach the DSI bridge
- *
- * TODO: when adding support for the DPI bridge or several DSI bridges,
- * we selectively connect the bridge(s) here instead of this simple
- * attachment.
- */
+ /* Attach the bridge. */
ret = drm_simple_display_pipe_attach_bridge(&mcde->pipe,
mcde->bridge);
if (ret) {
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index bfe994230543..bdd37eadecd5 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -829,8 +829,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
mtk_crtc->cmdq_client =
cmdq_mbox_create(mtk_crtc->mmsys_dev,
- drm_crtc_index(&mtk_crtc->base),
- 2000);
+ drm_crtc_index(&mtk_crtc->base));
if (IS_ERR(mtk_crtc->cmdq_client)) {
dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n",
drm_crtc_index(&mtk_crtc->base));
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 1d9e00b69462..5aa52b7afeec 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -7,6 +7,7 @@
#define MTK_DRM_DDP_COMP_H
#include <linux/io.h>
+#include <linux/soc/mediatek/mtk-mmsys.h>
struct device;
struct device_node;
@@ -35,39 +36,6 @@ enum mtk_ddp_comp_type {
MTK_DDP_COMP_TYPE_MAX,
};
-enum mtk_ddp_comp_id {
- DDP_COMPONENT_AAL0,
- DDP_COMPONENT_AAL1,
- DDP_COMPONENT_BLS,
- DDP_COMPONENT_CCORR,
- DDP_COMPONENT_COLOR0,
- DDP_COMPONENT_COLOR1,
- DDP_COMPONENT_DITHER,
- DDP_COMPONENT_DPI0,
- DDP_COMPONENT_DPI1,
- DDP_COMPONENT_DSI0,
- DDP_COMPONENT_DSI1,
- DDP_COMPONENT_DSI2,
- DDP_COMPONENT_DSI3,
- DDP_COMPONENT_GAMMA,
- DDP_COMPONENT_OD0,
- DDP_COMPONENT_OD1,
- DDP_COMPONENT_OVL0,
- DDP_COMPONENT_OVL_2L0,
- DDP_COMPONENT_OVL_2L1,
- DDP_COMPONENT_OVL1,
- DDP_COMPONENT_PWM0,
- DDP_COMPONENT_PWM1,
- DDP_COMPONENT_PWM2,
- DDP_COMPONENT_RDMA0,
- DDP_COMPONENT_RDMA1,
- DDP_COMPONENT_RDMA2,
- DDP_COMPONENT_UFOE,
- DDP_COMPONENT_WDMA0,
- DDP_COMPONENT_WDMA1,
- DDP_COMPONENT_ID_MAX,
-};
-
struct mtk_ddp_comp;
struct cmdq_pkt;
struct mtk_ddp_comp_funcs {
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 7f8eea494147..aad75a22dc33 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -145,8 +145,6 @@ struct meson_dw_hdmi {
struct reset_control *hdmitx_apb;
struct reset_control *hdmitx_ctrl;
struct reset_control *hdmitx_phy;
- struct clk *hdmi_pclk;
- struct clk *venci_clk;
struct regulator *hdmi_supply;
u32 irq_stat;
struct dw_hdmi *hdmi;
@@ -946,6 +944,29 @@ static void meson_disable_regulator(void *data)
regulator_disable(data);
}
+static void meson_disable_clk(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int meson_enable_clk(struct device *dev, char *name)
+{
+ struct clk *clk;
+ int ret;
+
+ clk = devm_clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Unable to get %s pclk\n", name);
+ return PTR_ERR(clk);
+ }
+
+ ret = clk_prepare_enable(clk);
+ if (!ret)
+ ret = devm_add_action_or_reset(dev, meson_disable_clk, clk);
+
+ return ret;
+}
+
static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
void *data)
{
@@ -1026,19 +1047,17 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
if (IS_ERR(meson_dw_hdmi->hdmitx))
return PTR_ERR(meson_dw_hdmi->hdmitx);
- meson_dw_hdmi->hdmi_pclk = devm_clk_get(dev, "isfr");
- if (IS_ERR(meson_dw_hdmi->hdmi_pclk)) {
- dev_err(dev, "Unable to get HDMI pclk\n");
- return PTR_ERR(meson_dw_hdmi->hdmi_pclk);
- }
- clk_prepare_enable(meson_dw_hdmi->hdmi_pclk);
+ ret = meson_enable_clk(dev, "isfr");
+ if (ret)
+ return ret;
- meson_dw_hdmi->venci_clk = devm_clk_get(dev, "venci");
- if (IS_ERR(meson_dw_hdmi->venci_clk)) {
- dev_err(dev, "Unable to get venci clk\n");
- return PTR_ERR(meson_dw_hdmi->venci_clk);
- }
- clk_prepare_enable(meson_dw_hdmi->venci_clk);
+ ret = meson_enable_clk(dev, "iahb");
+ if (ret)
+ return ret;
+
+ ret = meson_enable_clk(dev, "venci");
+ if (ret)
+ return ret;
dw_plat_data->regm = devm_regmap_init(dev, NULL, meson_dw_hdmi,
&meson_dw_hdmi_regmap_config);
@@ -1071,6 +1090,8 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
encoder->possible_crtcs = BIT(0);
+ meson_dw_hdmi_init(meson_dw_hdmi);
+
DRM_DEBUG_DRIVER("encoder initialized\n");
/* Bridge / Connector */
@@ -1095,8 +1116,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
if (IS_ERR(meson_dw_hdmi->hdmi))
return PTR_ERR(meson_dw_hdmi->hdmi);
- meson_dw_hdmi_init(meson_dw_hdmi);
-
next_bridge = of_drm_find_bridge(pdev->dev.of_node);
if (next_bridge)
drm_bridge_attach(encoder, next_bridge,
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 0f07f259503d..a977c9f49719 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -37,7 +37,6 @@ static const struct drm_driver mgag200_driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
- .gem_create_object = drm_gem_shmem_create_object_cached,
DRM_GEM_SHMEM_DRIVER_OPS,
};
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index 7e82c41a85f1..bdc989183c64 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -534,8 +534,10 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev)
if (!gpu->aspace) {
dev_err(dev->dev, "No memory protection without MMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
return gpu;
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 93da6683a866..4534633fe7cd 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -564,8 +564,10 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index c0be3a0f36b2..82bebb40234d 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -692,8 +692,10 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
* implement a cmdstream validator.
*/
DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n");
- ret = -ENXIO;
- goto fail;
+ if (!allow_vram_carveout) {
+ ret = -ENXIO;
+ goto fail;
+ }
}
icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem");
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 87c8b033ad1a..12e75ba360f9 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -18,6 +18,10 @@ bool snapshot_debugbus = false;
MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)");
module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600);
+bool allow_vram_carveout = false;
+MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU");
+module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600);
+
static const struct adreno_info gpulist[] = {
{
.rev = ADRENO_REV(2, 0, 0, 0),
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 6cf9975e951e..f09175698827 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -191,8 +191,6 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
struct platform_device *pdev)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
- struct io_pgtable_domain_attr pgtbl_cfg;
struct iommu_domain *iommu;
struct msm_mmu *mmu;
struct msm_gem_address_space *aspace;
@@ -202,13 +200,18 @@ adreno_iommu_create_address_space(struct msm_gpu *gpu,
if (!iommu)
return NULL;
- /*
- * This allows GPU to set the bus attributes required to use system
- * cache on behalf of the iommu page table walker.
- */
- if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
- pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
- iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+
+ if (adreno_is_a6xx(adreno_gpu)) {
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct io_pgtable_domain_attr pgtbl_cfg;
+ /*
+ * This allows GPU to set the bus attributes required to use system
+ * cache on behalf of the iommu page table walker.
+ */
+ if (!IS_ERR(a6xx_gpu->htw_llc_slice)) {
+ pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA;
+ iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg);
+ }
}
mmu = msm_iommu_new(&pdev->dev, iommu);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index c3775f79525a..b3d9a333591b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -18,6 +18,7 @@
#include "adreno_pm4.xml.h"
extern bool snapshot_debugbus;
+extern bool allow_vram_carveout;
enum {
ADRENO_FW_PM4 = 0,
@@ -211,6 +212,11 @@ static inline int adreno_is_a540(struct adreno_gpu *gpu)
return gpu->revn == 540;
}
+static inline bool adreno_is_a6xx(struct adreno_gpu *gpu)
+{
+ return ((gpu->revn < 700 && gpu->revn > 599));
+}
+
static inline int adreno_is_a618(struct adreno_gpu *gpu)
{
return gpu->revn == 618;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 6e971d552911..3bc7ed21de28 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -693,6 +693,13 @@ static int dp_irq_hpd_handle(struct dp_display_private *dp, u32 data)
return 0;
}
+ if (state == ST_CONNECT_PENDING) {
+ /* wait until ST_CONNECTED */
+ dp_add_event(dp, EV_IRQ_HPD_INT, 0, 1); /* delay = 1 */
+ mutex_unlock(&dp->event_mutex);
+ return 0;
+ }
+
ret = dp_display_usbpd_attention_cb(&dp->pdev->dev);
if (ret == -ECONNRESET) { /* cable unplugged */
dp->core_initialized = false;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 97dca3e378b7..d1780bcac8cc 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -167,12 +167,18 @@ int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
rc = dp_panel_read_dpcd(dp_panel);
+ if (rc) {
+ DRM_ERROR("read dpcd failed %d\n", rc);
+ return rc;
+ }
+
bw_code = drm_dp_link_rate_to_bw_code(dp_panel->link_info.rate);
- if (rc || !is_link_rate_valid(bw_code) ||
+ if (!is_link_rate_valid(bw_code) ||
!is_lane_count_valid(dp_panel->link_info.num_lanes) ||
(bw_code > dp_panel->max_bw_code)) {
- DRM_ERROR("read dpcd failed %d\n", rc);
- return rc;
+ DRM_ERROR("Illegal link rate=%d lane=%d\n", dp_panel->link_info.rate,
+ dp_panel->link_info.num_lanes);
+ return -EINVAL;
}
if (dp_panel->dfp_present) {
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 535a0263ceeb..108c405e03dd 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -457,14 +457,14 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
drm_mode_config_init(ddev);
- /* Bind all our sub-components: */
- ret = component_bind_all(dev, ddev);
+ ret = msm_init_vram(ddev);
if (ret)
goto err_destroy_mdss;
- ret = msm_init_vram(ddev);
+ /* Bind all our sub-components: */
+ ret = component_bind_all(dev, ddev);
if (ret)
- goto err_msm_uninit;
+ goto err_destroy_mdss;
dma_set_max_seg_size(dev, UINT_MAX);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 82cbaf337b50..9d10739c4eb2 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -96,6 +96,8 @@ static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!msm_gem_is_locked(obj));
+
if (!msm_obj->pages) {
struct drm_device *dev = obj->dev;
struct page **p;
@@ -211,10 +213,8 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
- fput(vma->vm_file);
- get_file(obj->filp);
vma->vm_pgoff = 0;
- vma->vm_file = obj->filp;
+ vma_set_file(vma, obj->filp);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
@@ -990,6 +990,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
if (msm_obj->pages)
kvfree(msm_obj->pages);
+ put_iova_vmas(obj);
+
/* dma_buf_detach() grabs resv lock, so we need to unlock
* prior to drm_prime_gem_destroy
*/
@@ -999,11 +1001,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
} else {
msm_gem_vunmap(obj);
put_pages(obj);
+ put_iova_vmas(obj);
msm_gem_unlock(obj);
}
- put_iova_vmas(obj);
-
drm_gem_object_release(obj);
kfree(msm_obj);
@@ -1117,6 +1118,8 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
struct msm_gem_vma *vma;
struct page **pages;
+ drm_gem_private_object_init(dev, obj, size);
+
msm_gem_lock(obj);
vma = add_vma(obj, NULL);
@@ -1128,9 +1131,9 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
to_msm_bo(obj)->vram_node = &vma->node;
- drm_gem_private_object_init(dev, obj, size);
-
+ msm_gem_lock(obj);
pages = get_pages(obj);
+ msm_gem_unlock(obj);
if (IS_ERR(pages)) {
ret = PTR_ERR(pages);
goto fail;
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index 6faf17b6408d..6da93551e2e5 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -134,11 +134,8 @@ static int mxsfb_attach_bridge(struct mxsfb_drm_private *mxsfb)
return -ENODEV;
ret = drm_bridge_attach(&mxsfb->encoder, bridge, NULL, 0);
- if (ret) {
- DRM_DEV_ERROR(drm->dev,
- "failed to attach bridge: %d\n", ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(drm->dev, ret, "Failed to attach bridge\n");
mxsfb->bridge = bridge;
@@ -212,7 +209,8 @@ static int mxsfb_load(struct drm_device *drm,
ret = mxsfb_attach_bridge(mxsfb);
if (ret) {
- dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(drm->dev, "Cannot connect bridge: %d\n", ret);
goto err_vblank;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 36d6b6093d16..33fff388dd83 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -32,6 +32,7 @@
#include <linux/hdmi.h>
#include <linux/component.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_edid.h>
@@ -1161,8 +1162,10 @@ nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id)
static struct drm_encoder *
nv50_mstc_atomic_best_encoder(struct drm_connector *connector,
- struct drm_connector_state *connector_state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
+ connector);
struct nv50_mstc *mstc = nv50_mstc(connector);
struct drm_crtc *crtc = connector_state->crtc;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 1386b0fc1640..c85b1af06b7b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -942,16 +942,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
- if ((old_reg->mem_type == TTM_PL_SYSTEM &&
- new_reg->mem_type == TTM_PL_VRAM) ||
- (old_reg->mem_type == TTM_PL_VRAM &&
- new_reg->mem_type == TTM_PL_SYSTEM)) {
- hop->fpfn = 0;
- hop->lpfn = 0;
- hop->mem_type = TTM_PL_TT;
- hop->flags = 0;
- return -EMULTIHOP;
- }
if (new_reg->mem_type == TTM_PL_TT) {
ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg);
@@ -995,14 +985,25 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
/* Hardware assisted copy. */
if (drm->ttm.move) {
+ if ((old_reg->mem_type == TTM_PL_SYSTEM &&
+ new_reg->mem_type == TTM_PL_VRAM) ||
+ (old_reg->mem_type == TTM_PL_VRAM &&
+ new_reg->mem_type == TTM_PL_SYSTEM)) {
+ hop->fpfn = 0;
+ hop->lpfn = 0;
+ hop->mem_type = TTM_PL_TT;
+ hop->flags = 0;
+ return -EMULTIHOP;
+ }
ret = nouveau_bo_move_m2mf(bo, evict, ctx,
new_reg);
- if (!ret)
- goto out;
- }
+ } else
+ ret = -ENODEV;
- /* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
+ if (ret) {
+ /* Fallback to software copy. */
+ ret = ttm_bo_move_memcpy(bo, ctx, new_reg);
+ }
out:
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 68c271f4250b..30d299ca8795 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -564,9 +564,8 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj,
* address_space (so unmap_mapping_range does what we want,
* in particular in the case of mmap'd dmabufs)
*/
- fput(vma->vm_file);
vma->vm_pgoff = 0;
- vma->vm_file = get_file(obj->filp);
+ vma_set_file(vma, obj->filp);
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
}
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
index 210e70da3a15..6b4e97bfd46e 100644
--- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c
@@ -23,76 +23,254 @@
#include "panel-samsung-s6e63m0.h"
/* Manufacturer Command Set */
-#define MCS_ELVSS_ON 0xb1
-#define MCS_MIECTL1 0xc0
-#define MCS_BCMODE 0xc1
+#define MCS_ELVSS_ON 0xb1
+#define MCS_TEMP_SWIRE 0xb2
+#define MCS_MIECTL1 0xc0
+#define MCS_BCMODE 0xc1
#define MCS_ERROR_CHECK 0xd5
#define MCS_READ_ID1 0xda
#define MCS_READ_ID2 0xdb
#define MCS_READ_ID3 0xdc
#define MCS_LEVEL_2_KEY 0xf0
#define MCS_MTP_KEY 0xf1
-#define MCS_DISCTL 0xf2
-#define MCS_SRCCTL 0xf6
-#define MCS_IFCTL 0xf7
-#define MCS_PANELCTL 0xF8
-#define MCS_PGAMMACTL 0xfa
+#define MCS_DISCTL 0xf2
+#define MCS_SRCCTL 0xf6
+#define MCS_IFCTL 0xf7
+#define MCS_PANELCTL 0xf8
+#define MCS_PGAMMACTL 0xfa
#define S6E63M0_LCD_ID_VALUE_M2 0xA4
#define S6E63M0_LCD_ID_VALUE_SM2 0xB4
#define S6E63M0_LCD_ID_VALUE_SM2_1 0xB6
-#define NUM_GAMMA_LEVELS 11
-#define GAMMA_TABLE_COUNT 23
+#define NUM_GAMMA_LEVELS 28
+#define GAMMA_TABLE_COUNT 23
-#define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1)
+#define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1)
/* array of gamma tables for gamma value 2.2 */
static u8 const s6e63m0_gamma_22[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = {
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x78, 0xEC, 0x3D, 0xC8,
- 0xC2, 0xB6, 0xC4, 0xC7, 0xB6, 0xD5, 0xD7,
- 0xCC, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51 },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x73, 0x4A, 0x3D, 0xC0,
- 0xC2, 0xB1, 0xBB, 0xBE, 0xAC, 0xCE, 0xCF,
- 0xC5, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82 },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x70, 0x51, 0x3E, 0xBF,
- 0xC1, 0xAF, 0xB9, 0xBC, 0xAB, 0xCC, 0xCC,
- 0xC2, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x6C, 0x54, 0x3A, 0xBC,
- 0xBF, 0xAC, 0xB7, 0xBB, 0xA9, 0xC9, 0xC9,
- 0xBE, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x69, 0x54, 0x37, 0xBB,
- 0xBE, 0xAC, 0xB4, 0xB7, 0xA6, 0xC7, 0xC8,
- 0xBC, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x66, 0x55, 0x34, 0xBA,
- 0xBD, 0xAB, 0xB1, 0xB5, 0xA3, 0xC5, 0xC6,
- 0xB9, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x63, 0x53, 0x31, 0xB8,
- 0xBC, 0xA9, 0xB0, 0xB5, 0xA2, 0xC4, 0xC4,
- 0xB8, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2 },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x62, 0x54, 0x30, 0xB9,
- 0xBB, 0xA9, 0xB0, 0xB3, 0xA1, 0xC1, 0xC3,
- 0xB7, 0x00, 0x91, 0x00, 0x95, 0x00, 0xDA },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x66, 0x58, 0x34, 0xB6,
- 0xBA, 0xA7, 0xAF, 0xB3, 0xA0, 0xC1, 0xC2,
- 0xB7, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1 },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x64, 0x56, 0x33, 0xB6,
- 0xBA, 0xA8, 0xAC, 0xB1, 0x9D, 0xC1, 0xC1,
- 0xB7, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6 },
- { MCS_PGAMMACTL, 0x00,
- 0x18, 0x08, 0x24, 0x5f, 0x50, 0x2d, 0xB6,
- 0xB9, 0xA7, 0xAd, 0xB1, 0x9f, 0xbe, 0xC0,
- 0xB5, 0x00, 0xa0, 0x00, 0xa4, 0x00, 0xdb },
+ /* 30 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0xA1, 0x51, 0x7B, 0xCE,
+ 0xCB, 0xC2, 0xC7, 0xCB, 0xBC, 0xDA, 0xDD,
+ 0xD3, 0x00, 0x53, 0x00, 0x52, 0x00, 0x6F, },
+ /* 40 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x97, 0x58, 0x71, 0xCC,
+ 0xCB, 0xC0, 0xC5, 0xC9, 0xBA, 0xD9, 0xDC,
+ 0xD1, 0x00, 0x5B, 0x00, 0x5A, 0x00, 0x7A, },
+ /* 50 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x96, 0x58, 0x72, 0xCB,
+ 0xCA, 0xBF, 0xC6, 0xC9, 0xBA, 0xD6, 0xD9,
+ 0xCD, 0x00, 0x61, 0x00, 0x61, 0x00, 0x83, },
+ /* 60 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x91, 0x5E, 0x6E, 0xC9,
+ 0xC9, 0xBD, 0xC4, 0xC9, 0xB8, 0xD3, 0xD7,
+ 0xCA, 0x00, 0x69, 0x00, 0x67, 0x00, 0x8D, },
+ /* 70 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x8E, 0x62, 0x6B, 0xC7,
+ 0xC9, 0xBB, 0xC3, 0xC7, 0xB7, 0xD3, 0xD7,
+ 0xCA, 0x00, 0x6E, 0x00, 0x6C, 0x00, 0x94, },
+ /* 80 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x89, 0x68, 0x65, 0xC9,
+ 0xC9, 0xBC, 0xC1, 0xC5, 0xB6, 0xD2, 0xD5,
+ 0xC9, 0x00, 0x73, 0x00, 0x72, 0x00, 0x9A, },
+ /* 90 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x89, 0x69, 0x64, 0xC7,
+ 0xC8, 0xBB, 0xC0, 0xC5, 0xB4, 0xD2, 0xD5,
+ 0xC9, 0x00, 0x77, 0x00, 0x76, 0x00, 0xA0, },
+ /* 100 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x86, 0x69, 0x60, 0xC6,
+ 0xC8, 0xBA, 0xBF, 0xC4, 0xB4, 0xD0, 0xD4,
+ 0xC6, 0x00, 0x7C, 0x00, 0x7A, 0x00, 0xA7, },
+ /* 110 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x86, 0x6A, 0x60, 0xC5,
+ 0xC7, 0xBA, 0xBD, 0xC3, 0xB2, 0xD0, 0xD4,
+ 0xC5, 0x00, 0x80, 0x00, 0x7E, 0x00, 0xAD, },
+ /* 120 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x82, 0x6B, 0x5E, 0xC4,
+ 0xC8, 0xB9, 0xBD, 0xC2, 0xB1, 0xCE, 0xD2,
+ 0xC4, 0x00, 0x85, 0x00, 0x82, 0x00, 0xB3, },
+ /* 130 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x8C, 0x6C, 0x60, 0xC3,
+ 0xC7, 0xB9, 0xBC, 0xC1, 0xAF, 0xCE, 0xD2,
+ 0xC3, 0x00, 0x88, 0x00, 0x86, 0x00, 0xB8, },
+ /* 140 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x80, 0x6C, 0x5F, 0xC1,
+ 0xC6, 0xB7, 0xBC, 0xC1, 0xAE, 0xCD, 0xD0,
+ 0xC2, 0x00, 0x8C, 0x00, 0x8A, 0x00, 0xBE, },
+ /* 150 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x80, 0x6E, 0x5F, 0xC1,
+ 0xC6, 0xB6, 0xBC, 0xC0, 0xAE, 0xCC, 0xD0,
+ 0xC2, 0x00, 0x8F, 0x00, 0x8D, 0x00, 0xC2, },
+ /* 160 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x7F, 0x6E, 0x5F, 0xC0,
+ 0xC6, 0xB5, 0xBA, 0xBF, 0xAD, 0xCB, 0xCF,
+ 0xC0, 0x00, 0x94, 0x00, 0x91, 0x00, 0xC8, },
+ /* 170 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x7C, 0x6D, 0x5C, 0xC0,
+ 0xC6, 0xB4, 0xBB, 0xBE, 0xAD, 0xCA, 0xCF,
+ 0xC0, 0x00, 0x96, 0x00, 0x94, 0x00, 0xCC, },
+ /* 180 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x7B, 0x6D, 0x5B, 0xC0,
+ 0xC5, 0xB3, 0xBA, 0xBE, 0xAD, 0xCA, 0xCE,
+ 0xBF, 0x00, 0x99, 0x00, 0x97, 0x00, 0xD0, },
+ /* 190 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x7A, 0x6D, 0x59, 0xC1,
+ 0xC5, 0xB4, 0xB8, 0xBD, 0xAC, 0xC9, 0xCE,
+ 0xBE, 0x00, 0x9D, 0x00, 0x9A, 0x00, 0xD5, },
+ /* 200 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x79, 0x6D, 0x58, 0xC1,
+ 0xC4, 0xB4, 0xB6, 0xBD, 0xAA, 0xCA, 0xCD,
+ 0xBE, 0x00, 0x9F, 0x00, 0x9D, 0x00, 0xD9, },
+ /* 210 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x79, 0x6D, 0x57, 0xC0,
+ 0xC4, 0xB4, 0xB7, 0xBD, 0xAA, 0xC8, 0xCC,
+ 0xBD, 0x00, 0xA2, 0x00, 0xA0, 0x00, 0xDD, },
+ /* 220 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x78, 0x6F, 0x58, 0xBF,
+ 0xC4, 0xB3, 0xB5, 0xBB, 0xA9, 0xC8, 0xCC,
+ 0xBC, 0x00, 0xA6, 0x00, 0xA3, 0x00, 0xE2, },
+ /* 230 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x75, 0x6F, 0x56, 0xBF,
+ 0xC3, 0xB2, 0xB6, 0xBB, 0xA8, 0xC7, 0xCB,
+ 0xBC, 0x00, 0xA8, 0x00, 0xA6, 0x00, 0xE6, },
+ /* 240 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x76, 0x6F, 0x56, 0xC0,
+ 0xC3, 0xB2, 0xB5, 0xBA, 0xA8, 0xC6, 0xCB,
+ 0xBB, 0x00, 0xAA, 0x00, 0xA8, 0x00, 0xE9, },
+ /* 250 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x74, 0x6D, 0x54, 0xBF,
+ 0xC3, 0xB2, 0xB4, 0xBA, 0xA7, 0xC6, 0xCA,
+ 0xBA, 0x00, 0xAD, 0x00, 0xAB, 0x00, 0xED, },
+ /* 260 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x74, 0x6E, 0x54, 0xBD,
+ 0xC2, 0xB0, 0xB5, 0xBA, 0xA7, 0xC5, 0xC9,
+ 0xBA, 0x00, 0xB0, 0x00, 0xAE, 0x00, 0xF1, },
+ /* 270 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x71, 0x6C, 0x50, 0xBD,
+ 0xC3, 0xB0, 0xB4, 0xB8, 0xA6, 0xC6, 0xC9,
+ 0xBB, 0x00, 0xB2, 0x00, 0xB1, 0x00, 0xF4, },
+ /* 280 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x6E, 0x6C, 0x4D, 0xBE,
+ 0xC3, 0xB1, 0xB3, 0xB8, 0xA5, 0xC6, 0xC8,
+ 0xBB, 0x00, 0xB4, 0x00, 0xB3, 0x00, 0xF7, },
+ /* 290 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x71, 0x70, 0x50, 0xBD,
+ 0xC1, 0xB0, 0xB2, 0xB8, 0xA4, 0xC6, 0xC7,
+ 0xBB, 0x00, 0xB6, 0x00, 0xB6, 0x00, 0xFA, },
+ /* 300 cd */
+ { MCS_PGAMMACTL, 0x02,
+ 0x18, 0x08, 0x24, 0x70, 0x6E, 0x4E, 0xBC,
+ 0xC0, 0xAF, 0xB3, 0xB8, 0xA5, 0xC5, 0xC7,
+ 0xBB, 0x00, 0xB9, 0x00, 0xB8, 0x00, 0xFC, },
+};
+
+#define NUM_ACL_LEVELS 7
+#define ACL_TABLE_COUNT 28
+
+static u8 const s6e63m0_acl[NUM_ACL_LEVELS][ACL_TABLE_COUNT] = {
+ /* NULL ACL */
+ { MCS_BCMODE,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00 },
+ /* 40P ACL */
+ { MCS_BCMODE,
+ 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
+ 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x06, 0x0C, 0x11, 0x16, 0x1C, 0x21, 0x26,
+ 0x2B, 0x31, 0x36 },
+ /* 43P ACL */
+ { MCS_BCMODE,
+ 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
+ 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x07, 0x0C, 0x12, 0x18, 0x1E, 0x23, 0x29,
+ 0x2F, 0x34, 0x3A },
+ /* 45P ACL */
+ { MCS_BCMODE,
+ 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
+ 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x07, 0x0D, 0x13, 0x19, 0x1F, 0x25, 0x2B,
+ 0x31, 0x37, 0x3D },
+ /* 47P ACL */
+ { MCS_BCMODE,
+ 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
+ 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x07, 0x0E, 0x14, 0x1B, 0x21, 0x27, 0x2E,
+ 0x34, 0x3B, 0x41 },
+ /* 48P ACL */
+ { MCS_BCMODE,
+ 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
+ 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x08, 0x0E, 0x15, 0x1B, 0x22, 0x29, 0x2F,
+ 0x36, 0x3C, 0x43 },
+ /* 50P ACL */
+ { MCS_BCMODE,
+ 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00,
+ 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x01, 0x08, 0x0F, 0x16, 0x1D, 0x24, 0x2A, 0x31,
+ 0x38, 0x3F, 0x46 },
+};
+
+/* This tells us which ACL level goes with which gamma */
+static u8 const s6e63m0_acl_per_gamma[NUM_GAMMA_LEVELS] = {
+ /* 30 - 60 cd: ACL off/NULL */
+ 0, 0, 0, 0,
+ /* 70 - 250 cd: 40P ACL */
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 260 - 300 cd: 50P ACL */
+ 6, 6, 6, 6, 6,
+};
+
+/* The ELVSS backlight regulator has 5 levels */
+#define S6E63M0_ELVSS_LEVELS 5
+
+static u8 const s6e63m0_elvss_offsets[S6E63M0_ELVSS_LEVELS] = {
+ 0x00, /* not set */
+ 0x0D, /* 30 cd - 100 cd */
+ 0x09, /* 110 cd - 160 cd */
+ 0x07, /* 170 cd - 200 cd */
+ 0x00, /* 210 cd - 300 cd */
+};
+
+/* This tells us which ELVSS level goes with which gamma */
+static u8 const s6e63m0_elvss_per_gamma[NUM_GAMMA_LEVELS] = {
+ /* 30 - 100 cd */
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ /* 110 - 160 cd */
+ 2, 2, 2, 2, 2, 2,
+ /* 170 - 200 cd */
+ 3, 3, 3, 3,
+ /* 210 - 300 cd */
+ 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
};
struct s6e63m0 {
@@ -102,6 +280,7 @@ struct s6e63m0 {
struct drm_panel panel;
struct backlight_device *bl_dev;
u8 lcd_type;
+ u8 elvss_pulse;
struct regulator_bulk_data supplies[2];
struct gpio_desc *reset_gpio;
@@ -187,17 +366,25 @@ static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx)
dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3);
- /* We attempt to detect what panel is mounted on the controller */
+ /*
+ * We attempt to detect what panel is mounted on the controller.
+ * The third ID byte represents the desired ELVSS pulse for
+ * some displays.
+ */
switch (id2) {
case S6E63M0_LCD_ID_VALUE_M2:
dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI M2\n");
+ ctx->elvss_pulse = id3;
break;
case S6E63M0_LCD_ID_VALUE_SM2:
case S6E63M0_LCD_ID_VALUE_SM2_1:
dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI SM2\n");
+ ctx->elvss_pulse = id3;
break;
default:
dev_info(ctx->dev, "unknown LCD panel type %02x\n", id2);
+ /* Default ELVSS pulse level */
+ ctx->elvss_pulse = 0x16;
break;
}
@@ -210,7 +397,7 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
{
s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL,
0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f,
- 0x63, 0x86, 0x1a, 0x33, 0x0d, 0x00, 0x00);
+ 0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00);
s6e63m0_dcs_write_seq_static(ctx, MCS_DISCTL,
0x02, 0x03, 0x1c, 0x10, 0x10);
@@ -226,9 +413,8 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
0x01);
s6e63m0_dcs_write_seq_static(ctx, MCS_SRCCTL,
- 0x00, 0x8c, 0x07);
- s6e63m0_dcs_write_seq_static(ctx, 0xb3,
- 0xc);
+ 0x00, 0x8e, 0x07);
+ s6e63m0_dcs_write_seq_static(ctx, 0xb3, 0x6c);
s6e63m0_dcs_write_seq_static(ctx, 0xb5,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
@@ -247,9 +433,12 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b,
0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a,
0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23,
- 0x21, 0x20, 0x1e, 0x1e, 0x00, 0x00, 0x11,
- 0x22, 0x33, 0x44, 0x44, 0x44, 0x55, 0x55,
- 0x66, 0x66, 0x66, 0x66, 0x66, 0x66);
+ 0x21, 0x20, 0x1e, 0x1e);
+
+ s6e63m0_dcs_write_seq_static(ctx, 0xb8,
+ 0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44,
+ 0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66,
+ 0x66, 0x66);
s6e63m0_dcs_write_seq_static(ctx, 0xb9,
0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17,
@@ -269,7 +458,7 @@ static void s6e63m0_init(struct s6e63m0 *ctx)
0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06,
0x09, 0x0d, 0x0f, 0x12, 0x15, 0x18);
- s6e63m0_dcs_write_seq_static(ctx, 0xb2,
+ s6e63m0_dcs_write_seq_static(ctx, MCS_TEMP_SWIRE,
0x10, 0x10, 0x0b, 0x05);
s6e63m0_dcs_write_seq_static(ctx, MCS_MIECTL1,
@@ -447,15 +636,33 @@ static const struct drm_panel_funcs s6e63m0_drm_funcs = {
static int s6e63m0_set_brightness(struct backlight_device *bd)
{
struct s6e63m0 *ctx = bl_get_data(bd);
-
int brightness = bd->props.brightness;
-
- /* disable and set new gamma */
+ u8 elvss_val;
+ u8 elvss_cmd_set[5];
+ int i;
+
+ /* Adjust ELVSS to candela level */
+ i = s6e63m0_elvss_per_gamma[brightness];
+ elvss_val = ctx->elvss_pulse + s6e63m0_elvss_offsets[i];
+ if (elvss_val > 0x1f)
+ elvss_val = 0x1f;
+ elvss_cmd_set[0] = MCS_TEMP_SWIRE;
+ elvss_cmd_set[1] = elvss_val;
+ elvss_cmd_set[2] = elvss_val;
+ elvss_cmd_set[3] = elvss_val;
+ elvss_cmd_set[4] = elvss_val;
+ s6e63m0_dcs_write(ctx, elvss_cmd_set, 5);
+
+ /* Update the ACL per gamma value */
+ i = s6e63m0_acl_per_gamma[brightness];
+ s6e63m0_dcs_write(ctx, s6e63m0_acl[i],
+ ARRAY_SIZE(s6e63m0_acl[i]));
+
+ /* Update gamma table */
s6e63m0_dcs_write(ctx, s6e63m0_gamma_22[brightness],
ARRAY_SIZE(s6e63m0_gamma_22[brightness]));
+ s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL, 0x03);
- /* update gamma table. */
- s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL, 0x01);
return s6e63m0_clear_error(ctx);
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 597f676a6591..41bbec72b2da 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2267,6 +2267,31 @@ static const struct panel_desc innolux_n116bge = {
},
};
+static const struct drm_display_mode innolux_n125hce_gn1_mode = {
+ .clock = 162000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 40,
+ .hsync_end = 1920 + 40 + 40,
+ .htotal = 1920 + 40 + 40 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 4,
+ .vsync_end = 1080 + 4 + 4,
+ .vtotal = 1080 + 4 + 4 + 24,
+};
+
+static const struct panel_desc innolux_n125hce_gn1 = {
+ .modes = &innolux_n125hce_gn1_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 276,
+ .height = 155,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode innolux_n156bge_l21_mode = {
.clock = 69300,
.hdisplay = 1366,
@@ -4123,6 +4148,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,n116bge",
.data = &innolux_n116bge,
}, {
+ .compatible = "innolux,n125hce-gn1",
+ .data = &innolux_n125hce_gn1,
+ }, {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index 57a31dd0ffed..3e0723bc36bd 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -228,7 +228,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
INIT_LIST_HEAD(&obj->mappings.list);
mutex_init(&obj->mappings.lock);
obj->base.base.funcs = &panfrost_gem_funcs;
- obj->base.map_cached = pfdev->coherent;
+ obj->base.map_wc = !pfdev->coherent;
return &obj->base.base;
}
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 128c38c8a837..7dd0c69baa47 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -115,7 +115,7 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo,
ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
if (ttm == NULL)
return NULL;
- if (ttm_dma_tt_init(ttm, bo, page_flags, ttm_cached)) {
+ if (ttm_tt_init(ttm, bo, page_flags, ttm_cached)) {
kfree(ttm);
return NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index d59ef6e92a40..23195d5d4e91 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -730,9 +730,6 @@ int radeon_ttm_init(struct radeon_device *rdev)
}
rdev->mman.initialized = true;
- ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb,
- dma_addressing_limited(&rdev->pdev->dev));
-
r = radeon_ttm_init_vram(rdev);
if (r) {
DRM_ERROR("Failed initializing VRAM heap.\n");
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 57fb3eb3a4b4..39c1c339be7b 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -155,7 +155,7 @@ int radeon_uvd_init(struct radeon_device *rdev)
family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
+ DRM_INFO("Found UVD firmware Version: %u.%u Family ID: %u\n",
version_major, version_minor, family_id);
/*
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 5e8006444704..a450497368b2 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -122,7 +122,7 @@ int radeon_vce_init(struct radeon_device *rdev)
if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1)
return -EINVAL;
- DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n",
+ DRM_INFO("Found VCE firmware/feedback version %d.%d.%d / %d!\n",
start, mid, end, rdev->vce.fb_version);
rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index 55960cbb1019..522e51a404cc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -805,25 +805,6 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
ret = of_dma_configure(drm->dev, dev->of_node, true);
if (ret)
return ret;
- } else {
- /*
- * If we don't have the interconnect property, most likely
- * because of an old DT, we need to set the DMA offset by hand
- * on our device since the RAM mapping is at 0 for the DMA bus,
- * unlike the CPU.
- *
- * XXX(hch): this has no business in a driver and needs to move
- * to the device tree.
- *
- * If we have two subsequent calls to dma_direct_set_offset
- * returns -EINVAL. Unfortunately, this happens when we have two
- * backends in the system, and will result in the driver
- * reporting an error while it has been setup properly before.
- * Ignore EINVAL, but it should really be removed eventually.
- */
- ret = dma_direct_set_offset(drm->dev, PHYS_OFFSET, 0, SZ_4G);
- if (ret && ret != -EINVAL)
- return ret;
}
backend->engine.node = dev->of_node;
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 1b96780b4989..a00b7ab9c14c 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -63,6 +63,9 @@ static atomic_long_t allocated_pages;
static struct ttm_pool_type global_write_combined[MAX_ORDER];
static struct ttm_pool_type global_uncached[MAX_ORDER];
+static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
+static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
+
static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker mm_shrinker;
@@ -236,21 +239,6 @@ static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
return p;
}
-/* Count the number of pages available in a pool_type */
-static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
-{
- unsigned int count = 0;
- struct page *p;
-
- spin_lock(&pt->lock);
- /* Only used for debugfs, the overhead doesn't matter */
- list_for_each_entry(p, &pt->pages, lru)
- ++count;
- spin_unlock(&pt->lock);
-
- return count;
-}
-
/* Initialize and add a pool type to the global shrinker list */
static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
enum ttm_caching caching, unsigned int order)
@@ -290,8 +278,14 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
#ifdef CONFIG_X86
switch (caching) {
case ttm_write_combined:
+ if (pool->use_dma32)
+ return &global_dma32_write_combined[order];
+
return &global_write_combined[order];
case ttm_uncached:
+ if (pool->use_dma32)
+ return &global_dma32_uncached[order];
+
return &global_uncached[order];
default:
break;
@@ -513,7 +507,6 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j);
}
-EXPORT_SYMBOL(ttm_pool_init);
/**
* ttm_pool_fini - Cleanup a pool
@@ -531,9 +524,22 @@ void ttm_pool_fini(struct ttm_pool *pool)
for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
-EXPORT_SYMBOL(ttm_pool_fini);
#ifdef CONFIG_DEBUG_FS
+/* Count the number of pages available in a pool_type */
+static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
+{
+ unsigned int count = 0;
+ struct page *p;
+
+ spin_lock(&pt->lock);
+ /* Only used for debugfs, the overhead doesn't matter */
+ list_for_each_entry(p, &pt->pages, lru)
+ ++count;
+ spin_unlock(&pt->lock);
+
+ return count;
+}
/* Dump information about the different pool types */
static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
@@ -570,6 +576,11 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
seq_puts(m, "uc\t:");
ttm_pool_debugfs_orders(global_uncached, m);
+ seq_puts(m, "wc 32\t:");
+ ttm_pool_debugfs_orders(global_dma32_write_combined, m);
+ seq_puts(m, "uc 32\t:");
+ ttm_pool_debugfs_orders(global_dma32_uncached, m);
+
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
seq_puts(m, "DMA ");
switch (i) {
@@ -640,6 +651,11 @@ int ttm_pool_mgr_init(unsigned long num_pages)
ttm_pool_type_init(&global_write_combined[i], NULL,
ttm_write_combined, i);
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
+
+ ttm_pool_type_init(&global_dma32_write_combined[i], NULL,
+ ttm_write_combined, i);
+ ttm_pool_type_init(&global_dma32_uncached[i], NULL,
+ ttm_uncached, i);
}
mm_shrinker.count_objects = ttm_pool_shrinker_count;
@@ -660,6 +676,9 @@ void ttm_pool_mgr_fini(void)
for (i = 0; i < MAX_ORDER; ++i) {
ttm_pool_type_fini(&global_write_combined[i]);
ttm_pool_type_fini(&global_uncached[i]);
+
+ ttm_pool_type_fini(&global_dma32_write_combined[i]);
+ ttm_pool_type_fini(&global_dma32_uncached[i]);
}
unregister_shrinker(&mm_shrinker);
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index b5a8dd9fdf02..9269092697d8 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -38,8 +38,6 @@ static const struct drm_driver driver = {
.driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
/* GEM hooks */
- .gem_create_object = drm_gem_shmem_create_object_cached,
-
.fops = &udl_driver_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 8b52cb25877c..6a8731ab9d7d 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -78,7 +78,7 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size)
obj = &bo->base.base;
obj->funcs = &v3d_gem_funcs;
-
+ bo->base.map_wc = true;
INIT_LIST_HEAD(&bo->unref_head);
return &bo->base.base;
diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c
index 34612edcabbd..8aa5220885f4 100644
--- a/drivers/gpu/drm/vc4/vc4_txp.c
+++ b/drivers/gpu/drm/vc4/vc4_txp.c
@@ -273,8 +273,10 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn,
}
static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
- struct drm_connector_state *conn_state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state,
+ conn);
struct vc4_txp *txp = connector_to_vc4_txp(conn);
struct drm_gem_cma_object *gem;
struct drm_display_mode *mode;
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 9a413091abb6..f8635ccaf9a1 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -403,8 +403,7 @@ static int vgem_prime_mmap(struct drm_gem_object *obj,
if (ret)
return ret;
- fput(vma->vm_file);
- vma->vm_file = get_file(obj->filp);
+ vma_set_file(vma, obj->filp);
vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index 24cc445169e2..a3e0fb5b8671 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -364,6 +364,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
irqwait->request.sequence +=
atomic_read(&cur_irq->irq_received);
irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
+ break;
case VIA_IRQ_ABSOLUTE:
break;
default:
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
index 8d8135f424ef..3d6e3a70f318 100644
--- a/drivers/gpu/drm/via/via_verifier.c
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -1001,8 +1001,8 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
state = via_check_vheader6(&buf, buf_end);
break;
case state_command:
- if ((HALCYON_HEADER2 == (cmd = *buf)) &&
- supported_3d)
+ cmd = *buf;
+ if ((cmd == HALCYON_HEADER2) && supported_3d)
state = state_header2;
else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
state = state_header1;
@@ -1064,7 +1064,8 @@ via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
state = via_parse_vheader6(dev_priv, &buf, buf_end);
break;
case state_command:
- if (HALCYON_HEADER2 == (cmd = *buf))
+ cmd = *buf;
+ if (cmd == HALCYON_HEADER2)
state = state_header2;
else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
state = state_header1;
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index f336a8fa6666..5fefc88d47e4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -67,8 +67,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data)
struct virtio_gpu_device *vgdev = node->minor->dev->dev_private;
seq_printf(m, "fence %llu %lld\n",
- (u64)atomic64_read(&vgdev->fence_drv.last_seq),
- vgdev->fence_drv.sync_seq);
+ (u64)atomic64_read(&vgdev->fence_drv.last_fence_id),
+ vgdev->fence_drv.current_fence_id);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 3c0e17212c33..6a232553c99b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -127,8 +127,8 @@ typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf);
struct virtio_gpu_fence_driver {
- atomic64_t last_seq;
- uint64_t sync_seq;
+ atomic64_t last_fence_id;
+ uint64_t current_fence_id;
uint64_t context;
struct list_head fences;
spinlock_t lock;
@@ -257,7 +257,7 @@ struct virtio_gpu_fpriv {
struct mutex context_lock;
};
-/* virtio_ioctl.c */
+/* virtgpu_ioctl.c */
#define DRM_VIRTIO_NUM_IOCTLS 11
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
@@ -420,7 +420,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence);
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
- u64 last_seq);
+ u64 fence_id);
/* virtgpu_object.c */
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index 5b2a4146c5bd..728ca36f6327 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -48,7 +48,7 @@ static bool virtio_fence_signaled(struct dma_fence *f)
/* leaked fence outside driver before completing
* initialization with virtio_gpu_fence_emit */
return false;
- if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno)
+ if (atomic64_read(&fence->drv->last_fence_id) >= fence->f.seqno)
return true;
return false;
}
@@ -62,7 +62,8 @@ static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size)
{
struct virtio_gpu_fence *fence = to_virtio_fence(f);
- snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq));
+ snprintf(str, size, "%llu",
+ (u64)atomic64_read(&fence->drv->last_fence_id));
}
static const struct dma_fence_ops virtio_fence_ops = {
@@ -100,7 +101,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
- fence->f.seqno = ++drv->sync_seq;
+ fence->f.seqno = ++drv->current_fence_id;
dma_fence_get(&fence->f);
list_add_tail(&fence->node, &drv->fences);
spin_unlock_irqrestore(&drv->lock, irq_flags);
@@ -112,16 +113,16 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
}
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
- u64 last_seq)
+ u64 fence_id)
{
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
struct virtio_gpu_fence *fence, *tmp;
unsigned long irq_flags;
spin_lock_irqsave(&drv->lock, irq_flags);
- atomic64_set(&vgdev->fence_drv.last_seq, last_seq);
+ atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id);
list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
- if (last_seq < fence->f.seqno)
+ if (fence_id < fence->f.seqno)
continue;
dma_fence_signal_locked(&fence->f);
list_del(&fence->node);
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 5417f365d1a3..23eb6d772e40 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -591,8 +591,9 @@ static int verify_blob(struct virtio_gpu_device *vgdev,
return 0;
}
-static int virtio_gpu_resource_create_blob(struct drm_device *dev,
- void *data, struct drm_file *file)
+static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
+ void *data,
+ struct drm_file *file)
{
int ret = 0;
uint32_t handle = 0;
@@ -696,6 +697,6 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
- virtio_gpu_resource_create_blob,
+ virtio_gpu_resource_create_blob_ioctl,
DRM_RENDER_ALLOW),
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index d9ad27e00905..d69a5b6da553 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -144,7 +144,6 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
dshmem = &shmem->base.base;
dshmem->base.funcs = &virtio_gpu_shmem_funcs;
- dshmem->map_cached = true;
return &dshmem->base;
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 1a1b5bc8e121..d4d39227f2ed 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -82,7 +82,6 @@ static const struct drm_driver vkms_driver = {
.driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM,
.release = vkms_release,
.fops = &vkms_driver_fops,
- .gem_create_object = drm_gem_shmem_create_object_cached,
DRM_GEM_SHMEM_DRIVER_OPS,
.name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c
index 67f80ab1e85f..78fdc1d59186 100644
--- a/drivers/gpu/drm/vkms/vkms_writeback.c
+++ b/drivers/gpu/drm/vkms/vkms_writeback.c
@@ -2,6 +2,7 @@
#include <linux/dma-buf-map.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_writeback.h>
#include <drm/drm_probe_helper.h>
@@ -105,8 +106,10 @@ static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector,
}
static void vkms_wb_atomic_commit(struct drm_connector *conn,
- struct drm_connector_state *state)
+ struct drm_atomic_state *state)
{
+ struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state,
+ conn);
struct vkms_device *vkmsdev = drm_device_to_vkms_device(conn->dev);
struct vkms_output *output = &vkmsdev->output;
struct drm_writeback_connector *wb_conn = &output->wb_connector;
@@ -122,7 +125,7 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn,
crtc_state->active_writeback = conn_state->writeback_job->priv;
crtc_state->wb_pending = true;
spin_unlock_irq(&output->composer_lock);
- drm_writeback_queue_job(wb_conn, state);
+ drm_writeback_queue_job(wb_conn, connector_state);
}
static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = {
diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c
index fa69b94debd9..7596dc164648 100644
--- a/drivers/hsi/controllers/omap_ssi_core.c
+++ b/drivers/hsi/controllers/omap_ssi_core.c
@@ -355,7 +355,7 @@ static int ssi_add_controller(struct hsi_controller *ssi,
err = ida_simple_get(&platform_omap_ssi_ida, 0, 0, GFP_KERNEL);
if (err < 0)
- goto out_err;
+ return err;
ssi->id = err;
ssi->owner = THIS_MODULE;
diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c
index 47f0208aa7c3..c3fb5beb846e 100644
--- a/drivers/hsi/hsi_core.c
+++ b/drivers/hsi/hsi_core.c
@@ -352,7 +352,7 @@ static void hsi_port_release(struct device *dev)
}
/**
- * hsi_unregister_port - Unregister an HSI port
+ * hsi_port_unregister_clients - Unregister an HSI port
* @port: The HSI port to unregister
*/
void hsi_port_unregister_clients(struct hsi_port *port)
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 502f8cd95f6d..d491fdcee61f 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -2550,7 +2550,6 @@ static void hv_kexec_handler(void)
/* Make sure conn_state is set as hv_synic_cleanup checks for it */
mb();
cpuhp_remove_state(hyperv_cpuhp_online);
- hyperv_cleanup();
};
static void hv_crash_handler(struct pt_regs *regs)
@@ -2566,7 +2565,6 @@ static void hv_crash_handler(struct pt_regs *regs)
cpu = smp_processor_id();
hv_stimer_cleanup(cpu);
hv_synic_disable_regs(cpu);
- hyperv_cleanup();
};
static int hv_synic_suspend(void)
diff --git a/drivers/hwmon/amd_energy.c b/drivers/hwmon/amd_energy.c
index 9b306448b7a0..822c2e74b98d 100644
--- a/drivers/hwmon/amd_energy.c
+++ b/drivers/hwmon/amd_energy.c
@@ -222,7 +222,7 @@ static int amd_create_sensor(struct device *dev,
*/
cpus = num_present_cpus() / num_siblings;
- s_config = devm_kcalloc(dev, cpus + sockets,
+ s_config = devm_kcalloc(dev, cpus + sockets + 1,
sizeof(u32), GFP_KERNEL);
if (!s_config)
return -ENOMEM;
@@ -254,6 +254,7 @@ static int amd_create_sensor(struct device *dev,
scnprintf(label_l[i], 10, "Esocket%u", (i - cpus));
}
+ s_config[i] = 0;
return 0;
}
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index a250481b5a97..3bc2551577a3 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -11,13 +11,6 @@
* convert raw register values is from https://github.com/ocerman/zenpower.
* The information is not confirmed from chip datasheets, but experiments
* suggest that it provides reasonable temperature values.
- * - Register addresses to read chip voltage and current are also from
- * https://github.com/ocerman/zenpower, and not confirmed from chip
- * datasheets. Current calibration is board specific and not typically
- * shared by board vendors. For this reason, current values are
- * normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC
- * current. Reported values can be adjusted using the sensors configuration
- * file.
*/
#include <linux/bitops.h>
@@ -109,10 +102,7 @@ struct k10temp_data {
int temp_offset;
u32 temp_adjust_mask;
u32 show_temp;
- u32 svi_addr[2];
bool is_zen;
- bool show_current;
- int cfactor[2];
};
#define TCTL_BIT 0
@@ -137,16 +127,6 @@ static const struct tctl_offset tctl_offset_table[] = {
{ 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */
};
-static bool is_threadripper(void)
-{
- return strstr(boot_cpu_data.x86_model_id, "Threadripper");
-}
-
-static bool is_epyc(void)
-{
- return strstr(boot_cpu_data.x86_model_id, "EPYC");
-}
-
static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval)
{
pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval);
@@ -211,16 +191,6 @@ static const char *k10temp_temp_label[] = {
"Tccd8",
};
-static const char *k10temp_in_label[] = {
- "Vcore",
- "Vsoc",
-};
-
-static const char *k10temp_curr_label[] = {
- "Icore",
- "Isoc",
-};
-
static int k10temp_read_labels(struct device *dev,
enum hwmon_sensor_types type,
u32 attr, int channel, const char **str)
@@ -229,50 +199,6 @@ static int k10temp_read_labels(struct device *dev,
case hwmon_temp:
*str = k10temp_temp_label[channel];
break;
- case hwmon_in:
- *str = k10temp_in_label[channel];
- break;
- case hwmon_curr:
- *str = k10temp_curr_label[channel];
- break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static int k10temp_read_curr(struct device *dev, u32 attr, int channel,
- long *val)
-{
- struct k10temp_data *data = dev_get_drvdata(dev);
- u32 regval;
-
- switch (attr) {
- case hwmon_curr_input:
- amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
- data->svi_addr[channel], &regval);
- *val = DIV_ROUND_CLOSEST(data->cfactor[channel] *
- (regval & 0xff),
- 1000);
- break;
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val)
-{
- struct k10temp_data *data = dev_get_drvdata(dev);
- u32 regval;
-
- switch (attr) {
- case hwmon_in_input:
- amd_smn_read(amd_pci_dev_to_node_id(data->pdev),
- data->svi_addr[channel], &regval);
- regval = (regval >> 16) & 0xff;
- *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100);
- break;
default:
return -EOPNOTSUPP;
}
@@ -331,10 +257,6 @@ static int k10temp_read(struct device *dev, enum hwmon_sensor_types type,
switch (type) {
case hwmon_temp:
return k10temp_read_temp(dev, attr, channel, val);
- case hwmon_in:
- return k10temp_read_in(dev, attr, channel, val);
- case hwmon_curr:
- return k10temp_read_curr(dev, attr, channel, val);
default:
return -EOPNOTSUPP;
}
@@ -383,11 +305,6 @@ static umode_t k10temp_is_visible(const void *_data,
return 0;
}
break;
- case hwmon_in:
- case hwmon_curr:
- if (!data->show_current)
- return 0;
- break;
default:
return 0;
}
@@ -517,20 +434,10 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case 0x8: /* Zen+ */
case 0x11: /* Zen APU */
case 0x18: /* Zen+ APU */
- data->show_current = !is_threadripper() && !is_epyc();
- data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0;
- data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1;
- data->cfactor[0] = F17H_M01H_CFACTOR_ICORE;
- data->cfactor[1] = F17H_M01H_CFACTOR_ISOC;
k10temp_get_ccd_support(pdev, data, 4);
break;
case 0x31: /* Zen2 Threadripper */
case 0x71: /* Zen2 */
- data->show_current = !is_threadripper() && !is_epyc();
- data->cfactor[0] = F17H_M31H_CFACTOR_ICORE;
- data->cfactor[1] = F17H_M31H_CFACTOR_ISOC;
- data->svi_addr[0] = F17H_M31H_SVI_TEL_PLANE0;
- data->svi_addr[1] = F17H_M31H_SVI_TEL_PLANE1;
k10temp_get_ccd_support(pdev, data, 8);
break;
}
@@ -542,11 +449,6 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
switch (boot_cpu_data.x86_model) {
case 0x0 ... 0x1: /* Zen3 */
- data->show_current = true;
- data->svi_addr[0] = F19H_M01_SVI_TEL_PLANE0;
- data->svi_addr[1] = F19H_M01_SVI_TEL_PLANE1;
- data->cfactor[0] = F19H_M01H_CFACTOR_ICORE;
- data->cfactor[1] = F19H_M01H_CFACTOR_ISOC;
k10temp_get_ccd_support(pdev, data, 8);
break;
}
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 777439f48c14..111a91dc6b79 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -334,8 +334,18 @@ static int pwm_fan_probe(struct platform_device *pdev)
ctx->pwm_value = MAX_PWM;
- /* Set duty cycle to maximum allowed and enable PWM output */
pwm_init_state(ctx->pwm, &state);
+ /*
+ * __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
+ * long. Check this here to prevent the fan running at a too low
+ * frequency.
+ */
+ if (state.period > ULONG_MAX / MAX_PWM + 1) {
+ dev_err(dev, "Configured period too big\n");
+ return -EINVAL;
+ }
+
+ /* Set duty cycle to maximum allowed and enable PWM output */
state.duty_cycle = ctx->pwm->args.period - 1;
state.enabled = true;
diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c
index 09ce30cba54b..17d064e58938 100644
--- a/drivers/hwmon/scmi-hwmon.c
+++ b/drivers/hwmon/scmi-hwmon.c
@@ -30,7 +30,7 @@ static inline u64 __pow10(u8 x)
static int scmi_hwmon_scale(const struct scmi_sensor_info *sensor, u64 *value)
{
- s8 scale = sensor->scale;
+ int scale = sensor->scale;
u64 f;
switch (sensor->type) {
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index ae90713443fa..877fe3733a42 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1449,7 +1449,7 @@ static int i801_add_mux(struct i801_priv *priv)
/* Register GPIO descriptor lookup table */
lookup = devm_kzalloc(dev,
- struct_size(lookup, table, mux_config->n_gpios),
+ struct_size(lookup, table, mux_config->n_gpios + 1),
GFP_KERNEL);
if (!lookup)
return -ENOMEM;
diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c
index 33de99b7bc20..0818d3e50734 100644
--- a/drivers/i2c/busses/i2c-mt65xx.c
+++ b/drivers/i2c/busses/i2c-mt65xx.c
@@ -38,6 +38,7 @@
#define I2C_IO_CONFIG_OPEN_DRAIN 0x0003
#define I2C_IO_CONFIG_PUSH_PULL 0x0000
#define I2C_SOFT_RST 0x0001
+#define I2C_HANDSHAKE_RST 0x0020
#define I2C_FIFO_ADDR_CLR 0x0001
#define I2C_DELAY_LEN 0x0002
#define I2C_TIME_CLR_VALUE 0x0000
@@ -45,6 +46,7 @@
#define I2C_WRRD_TRANAC_VALUE 0x0002
#define I2C_RD_TRANAC_VALUE 0x0001
#define I2C_SCL_MIS_COMP_VALUE 0x0000
+#define I2C_CHN_CLR_FLAG 0x0000
#define I2C_DMA_CON_TX 0x0000
#define I2C_DMA_CON_RX 0x0001
@@ -54,7 +56,9 @@
#define I2C_DMA_START_EN 0x0001
#define I2C_DMA_INT_FLAG_NONE 0x0000
#define I2C_DMA_CLR_FLAG 0x0000
+#define I2C_DMA_WARM_RST 0x0001
#define I2C_DMA_HARD_RST 0x0002
+#define I2C_DMA_HANDSHAKE_RST 0x0004
#define MAX_SAMPLE_CNT_DIV 8
#define MAX_STEP_CNT_DIV 64
@@ -475,11 +479,24 @@ static void mtk_i2c_init_hw(struct mtk_i2c *i2c)
{
u16 control_reg;
- writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
- udelay(50);
- writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
-
- mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+ if (i2c->dev_comp->dma_sync) {
+ writel(I2C_DMA_WARM_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(10);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ udelay(10);
+ writel(I2C_DMA_HANDSHAKE_RST | I2C_DMA_HARD_RST,
+ i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_HANDSHAKE_RST | I2C_SOFT_RST,
+ OFFSET_SOFTRESET);
+ udelay(10);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_CHN_CLR_FLAG, OFFSET_SOFTRESET);
+ } else {
+ writel(I2C_DMA_HARD_RST, i2c->pdmabase + OFFSET_RST);
+ udelay(50);
+ writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_RST);
+ mtk_i2c_writew(i2c, I2C_SOFT_RST, OFFSET_SOFTRESET);
+ }
/* Set ioconfig */
if (i2c->use_push_pull)
diff --git a/drivers/i2c/busses/i2c-sprd.c b/drivers/i2c/busses/i2c-sprd.c
index 19cda6742423..2917fecf6c80 100644
--- a/drivers/i2c/busses/i2c-sprd.c
+++ b/drivers/i2c/busses/i2c-sprd.c
@@ -72,6 +72,8 @@
/* timeout (ms) for pm runtime autosuspend */
#define SPRD_I2C_PM_TIMEOUT 1000
+/* timeout (ms) for transfer message */
+#define I2C_XFER_TIMEOUT 1000
/* SPRD i2c data structure */
struct sprd_i2c {
@@ -244,6 +246,7 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, bool is_last_msg)
{
struct sprd_i2c *i2c_dev = i2c_adap->algo_data;
+ unsigned long time_left;
i2c_dev->msg = msg;
i2c_dev->buf = msg->buf;
@@ -273,7 +276,10 @@ static int sprd_i2c_handle_msg(struct i2c_adapter *i2c_adap,
sprd_i2c_opt_start(i2c_dev);
- wait_for_completion(&i2c_dev->complete);
+ time_left = wait_for_completion_timeout(&i2c_dev->complete,
+ msecs_to_jiffies(I2C_XFER_TIMEOUT));
+ if (!time_left)
+ return -ETIMEDOUT;
return i2c_dev->err;
}
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
index 1c6b78ad5ade..b61bf53ec07a 100644
--- a/drivers/i3c/master.c
+++ b/drivers/i3c/master.c
@@ -2537,7 +2537,7 @@ int i3c_master_register(struct i3c_master_controller *master,
ret = i3c_master_bus_init(master);
if (ret)
- goto err_put_dev;
+ goto err_destroy_wq;
ret = device_add(&master->dev);
if (ret)
@@ -2568,6 +2568,9 @@ err_del_dev:
err_cleanup_bus:
i3c_master_bus_cleanup(master);
+err_destroy_wq:
+ destroy_workqueue(master->wq);
+
err_put_dev:
put_device(&master->dev);
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
index 4e80a1fcbf91..e68f15f4b4d0 100644
--- a/drivers/i3c/master/Kconfig
+++ b/drivers/i3c/master/Kconfig
@@ -21,3 +21,16 @@ config DW_I3C_MASTER
This driver can also be built as a module. If so, the module
will be called dw-i3c-master.
+
+config MIPI_I3C_HCI
+ tristate "MIPI I3C Host Controller Interface driver (EXPERIMENTAL)"
+ depends on I3C
+ help
+ Support for hardware following the MIPI Aliance's I3C Host Controller
+ Interface specification.
+
+ For details please see:
+ https://www.mipi.org/specifications/i3c-hci
+
+ This driver can also be built as a module. If so, the module will be
+ called mipi-i3c-hci.
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
index 7eea9e086144..b892fd4cafad 100644
--- a/drivers/i3c/master/Makefile
+++ b/drivers/i3c/master/Makefile
@@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_CDNS_I3C_MASTER) += i3c-master-cdns.o
obj-$(CONFIG_DW_I3C_MASTER) += dw-i3c-master.o
+obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci/
diff --git a/drivers/i3c/master/mipi-i3c-hci/Makefile b/drivers/i3c/master/mipi-i3c-hci/Makefile
new file mode 100644
index 000000000000..a658e7b8262c
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: BSD-3-Clause
+
+obj-$(CONFIG_MIPI_I3C_HCI) += mipi-i3c-hci.o
+mipi-i3c-hci-y := core.o ext_caps.o pio.o dma.o \
+ cmd_v1.o cmd_v2.o \
+ dat_v1.o dct_v1.o
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd.h b/drivers/i3c/master/mipi-i3c-hci/cmd.h
new file mode 100644
index 000000000000..1d6dd2c5d01a
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common command/response related stuff
+ */
+
+#ifndef CMD_H
+#define CMD_H
+
+/*
+ * Those bits are common to all descriptor formats and
+ * may be manipulated by the core code.
+ */
+#define CMD_0_TOC W0_BIT_(31)
+#define CMD_0_ROC W0_BIT_(30)
+#define CMD_0_ATTR W0_MASK(2, 0)
+
+/*
+ * Response Descriptor Structure
+ */
+#define RESP_STATUS(resp) FIELD_GET(GENMASK(31, 28), resp)
+#define RESP_TID(resp) FIELD_GET(GENMASK(27, 24), resp)
+#define RESP_DATA_LENGTH(resp) FIELD_GET(GENMASK(21, 0), resp)
+
+#define RESP_ERR_FIELD GENMASK(31, 28)
+
+enum hci_resp_err {
+ RESP_SUCCESS = 0x0,
+ RESP_ERR_CRC = 0x1,
+ RESP_ERR_PARITY = 0x2,
+ RESP_ERR_FRAME = 0x3,
+ RESP_ERR_ADDR_HEADER = 0x4,
+ RESP_ERR_BCAST_NACK_7E = 0x4,
+ RESP_ERR_NACK = 0x5,
+ RESP_ERR_OVL = 0x6,
+ RESP_ERR_I3C_SHORT_READ = 0x7,
+ RESP_ERR_HC_TERMINATED = 0x8,
+ RESP_ERR_I2C_WR_DATA_NACK = 0x9,
+ RESP_ERR_BUS_XFER_ABORTED = 0x9,
+ RESP_ERR_NOT_SUPPORTED = 0xa,
+ RESP_ERR_ABORTED_WITH_CRC = 0xb,
+ /* 0xc to 0xf are reserved for transfer specific errors */
+};
+
+/* TID generation (4 bits wide in all cases) */
+#define hci_get_tid(bits) \
+ (atomic_inc_return_relaxed(&hci->next_cmd_tid) % (1U << 4))
+
+/* This abstracts operations with our command descriptor formats */
+struct hci_cmd_ops {
+ int (*prep_ccc)(struct i3c_hci *hci, struct hci_xfer *xfer,
+ u8 ccc_addr, u8 ccc_cmd, bool raw);
+ void (*prep_i3c_xfer)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ struct hci_xfer *xfer);
+ void (*prep_i2c_xfer)(struct i3c_hci *hci, struct i2c_dev_desc *dev,
+ struct hci_xfer *xfer);
+ int (*perform_daa)(struct i3c_hci *hci);
+};
+
+/* Our various instances */
+extern const struct hci_cmd_ops mipi_i3c_hci_cmd_v1;
+extern const struct hci_cmd_ops mipi_i3c_hci_cmd_v2;
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
new file mode 100644
index 000000000000..d97c3175e0e2
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * I3C HCI v1.0/v1.1 Command Descriptor Handling
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i3c/master.h>
+
+#include "hci.h"
+#include "cmd.h"
+#include "dat.h"
+#include "dct.h"
+
+
+/*
+ * Address Assignment Command
+ */
+
+#define CMD_0_ATTR_A FIELD_PREP(CMD_0_ATTR, 0x2)
+
+#define CMD_A0_TOC W0_BIT_(31)
+#define CMD_A0_ROC W0_BIT_(30)
+#define CMD_A0_DEV_COUNT(v) FIELD_PREP(W0_MASK(29, 26), v)
+#define CMD_A0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
+#define CMD_A0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+#define CMD_A0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Immediate Data Transfer Command
+ */
+
+#define CMD_0_ATTR_I FIELD_PREP(CMD_0_ATTR, 0x1)
+
+#define CMD_I1_DATA_BYTE_4(v) FIELD_PREP(W1_MASK(63, 56), v)
+#define CMD_I1_DATA_BYTE_3(v) FIELD_PREP(W1_MASK(55, 48), v)
+#define CMD_I1_DATA_BYTE_2(v) FIELD_PREP(W1_MASK(47, 40), v)
+#define CMD_I1_DATA_BYTE_1(v) FIELD_PREP(W1_MASK(39, 32), v)
+#define CMD_I1_DEF_BYTE(v) FIELD_PREP(W1_MASK(39, 32), v)
+#define CMD_I0_TOC W0_BIT_(31)
+#define CMD_I0_ROC W0_BIT_(30)
+#define CMD_I0_RNW W0_BIT_(29)
+#define CMD_I0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
+#define CMD_I0_DTT(v) FIELD_PREP(W0_MASK(25, 23), v)
+#define CMD_I0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
+#define CMD_I0_CP W0_BIT_(15)
+#define CMD_I0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+#define CMD_I0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Regular Data Transfer Command
+ */
+
+#define CMD_0_ATTR_R FIELD_PREP(CMD_0_ATTR, 0x0)
+
+#define CMD_R1_DATA_LENGTH(v) FIELD_PREP(W1_MASK(63, 48), v)
+#define CMD_R1_DEF_BYTE(v) FIELD_PREP(W1_MASK(39, 32), v)
+#define CMD_R0_TOC W0_BIT_(31)
+#define CMD_R0_ROC W0_BIT_(30)
+#define CMD_R0_RNW W0_BIT_(29)
+#define CMD_R0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
+#define CMD_R0_DBP W0_BIT_(25)
+#define CMD_R0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
+#define CMD_R0_CP W0_BIT_(15)
+#define CMD_R0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+#define CMD_R0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Combo Transfer (Write + Write/Read) Command
+ */
+
+#define CMD_0_ATTR_C FIELD_PREP(CMD_0_ATTR, 0x3)
+
+#define CMD_C1_DATA_LENGTH(v) FIELD_PREP(W1_MASK(63, 48), v)
+#define CMD_C1_OFFSET(v) FIELD_PREP(W1_MASK(47, 32), v)
+#define CMD_C0_TOC W0_BIT_(31)
+#define CMD_C0_ROC W0_BIT_(30)
+#define CMD_C0_RNW W0_BIT_(29)
+#define CMD_C0_MODE(v) FIELD_PREP(W0_MASK(28, 26), v)
+#define CMD_C0_16_BIT_SUBOFFSET W0_BIT_(25)
+#define CMD_C0_FIRST_PHASE_MODE W0_BIT_(24)
+#define CMD_C0_DATA_LENGTH_POSITION(v) FIELD_PREP(W0_MASK(23, 22), v)
+#define CMD_C0_DEV_INDEX(v) FIELD_PREP(W0_MASK(20, 16), v)
+#define CMD_C0_CP W0_BIT_(15)
+#define CMD_C0_CMD(v) FIELD_PREP(W0_MASK(14, 7), v)
+#define CMD_C0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Internal Control Command
+ */
+
+#define CMD_0_ATTR_M FIELD_PREP(CMD_0_ATTR, 0x7)
+
+#define CMD_M1_VENDOR_SPECIFIC W1_MASK(63, 32)
+#define CMD_M0_MIPI_RESERVED W0_MASK(31, 12)
+#define CMD_M0_MIPI_CMD W0_MASK(11, 8)
+#define CMD_M0_VENDOR_INFO_PRESENT W0_BIT_( 7)
+#define CMD_M0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+
+/* Data Transfer Speed and Mode */
+enum hci_cmd_mode {
+ MODE_I3C_SDR0 = 0x0,
+ MODE_I3C_SDR1 = 0x1,
+ MODE_I3C_SDR2 = 0x2,
+ MODE_I3C_SDR3 = 0x3,
+ MODE_I3C_SDR4 = 0x4,
+ MODE_I3C_HDR_TSx = 0x5,
+ MODE_I3C_HDR_DDR = 0x6,
+ MODE_I3C_HDR_BT = 0x7,
+ MODE_I3C_Fm_FmP = 0x8,
+ MODE_I2C_Fm = 0x0,
+ MODE_I2C_FmP = 0x1,
+ MODE_I2C_UD1 = 0x2,
+ MODE_I2C_UD2 = 0x3,
+ MODE_I2C_UD3 = 0x4,
+};
+
+static enum hci_cmd_mode get_i3c_mode(struct i3c_hci *hci)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+
+ if (bus->scl_rate.i3c >= 12500000)
+ return MODE_I3C_SDR0;
+ if (bus->scl_rate.i3c > 8000000)
+ return MODE_I3C_SDR1;
+ if (bus->scl_rate.i3c > 6000000)
+ return MODE_I3C_SDR2;
+ if (bus->scl_rate.i3c > 4000000)
+ return MODE_I3C_SDR3;
+ if (bus->scl_rate.i3c > 2000000)
+ return MODE_I3C_SDR4;
+ return MODE_I3C_Fm_FmP;
+}
+
+static enum hci_cmd_mode get_i2c_mode(struct i3c_hci *hci)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+
+ if (bus->scl_rate.i2c >= 1000000)
+ return MODE_I2C_FmP;
+ return MODE_I2C_Fm;
+}
+
+static void fill_data_bytes(struct hci_xfer *xfer, u8 *data,
+ unsigned int data_len)
+{
+ xfer->cmd_desc[1] = 0;
+ switch (data_len) {
+ case 4:
+ xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_4(data[3]);
+ fallthrough;
+ case 3:
+ xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_3(data[2]);
+ fallthrough;
+ case 2:
+ xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_2(data[1]);
+ fallthrough;
+ case 1:
+ xfer->cmd_desc[1] |= CMD_I1_DATA_BYTE_1(data[0]);
+ fallthrough;
+ case 0:
+ break;
+ }
+ /* we consumed all the data with the cmd descriptor */
+ xfer->data = NULL;
+}
+
+static int hci_cmd_v1_prep_ccc(struct i3c_hci *hci,
+ struct hci_xfer *xfer,
+ u8 ccc_addr, u8 ccc_cmd, bool raw)
+{
+ unsigned int dat_idx = 0;
+ enum hci_cmd_mode mode = get_i3c_mode(hci);
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+ int ret;
+
+ /* this should never happen */
+ if (WARN_ON(raw))
+ return -EINVAL;
+
+ if (ccc_addr != I3C_BROADCAST_ADDR) {
+ ret = mipi_i3c_hci_dat_v1.get_index(hci, ccc_addr);
+ if (ret < 0)
+ return ret;
+ dat_idx = ret;
+ }
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 4) {
+ /* we use an Immediate Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_I |
+ CMD_I0_TID(xfer->cmd_tid) |
+ CMD_I0_CMD(ccc_cmd) | CMD_I0_CP |
+ CMD_I0_DEV_INDEX(dat_idx) |
+ CMD_I0_DTT(data_len) |
+ CMD_I0_MODE(mode);
+ fill_data_bytes(xfer, data, data_len);
+ } else {
+ /* we use a Regular Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_R |
+ CMD_R0_TID(xfer->cmd_tid) |
+ CMD_R0_CMD(ccc_cmd) | CMD_R0_CP |
+ CMD_R0_DEV_INDEX(dat_idx) |
+ CMD_R0_MODE(mode) |
+ (rnw ? CMD_R0_RNW : 0);
+ xfer->cmd_desc[1] =
+ CMD_R1_DATA_LENGTH(data_len);
+ }
+
+ return 0;
+}
+
+static void hci_cmd_v1_prep_i3c_xfer(struct i3c_hci *hci,
+ struct i3c_dev_desc *dev,
+ struct hci_xfer *xfer)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ unsigned int dat_idx = dev_data->dat_idx;
+ enum hci_cmd_mode mode = get_i3c_mode(hci);
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 4) {
+ /* we use an Immediate Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_I |
+ CMD_I0_TID(xfer->cmd_tid) |
+ CMD_I0_DEV_INDEX(dat_idx) |
+ CMD_I0_DTT(data_len) |
+ CMD_I0_MODE(mode);
+ fill_data_bytes(xfer, data, data_len);
+ } else {
+ /* we use a Regular Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_R |
+ CMD_R0_TID(xfer->cmd_tid) |
+ CMD_R0_DEV_INDEX(dat_idx) |
+ CMD_R0_MODE(mode) |
+ (rnw ? CMD_R0_RNW : 0);
+ xfer->cmd_desc[1] =
+ CMD_R1_DATA_LENGTH(data_len);
+ }
+}
+
+static void hci_cmd_v1_prep_i2c_xfer(struct i3c_hci *hci,
+ struct i2c_dev_desc *dev,
+ struct hci_xfer *xfer)
+{
+ struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
+ unsigned int dat_idx = dev_data->dat_idx;
+ enum hci_cmd_mode mode = get_i2c_mode(hci);
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 4) {
+ /* we use an Immediate Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_I |
+ CMD_I0_TID(xfer->cmd_tid) |
+ CMD_I0_DEV_INDEX(dat_idx) |
+ CMD_I0_DTT(data_len) |
+ CMD_I0_MODE(mode);
+ fill_data_bytes(xfer, data, data_len);
+ } else {
+ /* we use a Regular Data Transfer Command */
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_R |
+ CMD_R0_TID(xfer->cmd_tid) |
+ CMD_R0_DEV_INDEX(dat_idx) |
+ CMD_R0_MODE(mode) |
+ (rnw ? CMD_R0_RNW : 0);
+ xfer->cmd_desc[1] =
+ CMD_R1_DATA_LENGTH(data_len);
+ }
+}
+
+static int hci_cmd_v1_daa(struct i3c_hci *hci)
+{
+ struct hci_xfer *xfer;
+ int ret, dat_idx = -1;
+ u8 next_addr = 0;
+ u64 pid;
+ unsigned int dcr, bcr;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ xfer = hci_alloc_xfer(2);
+ if (!xfer)
+ return -ENOMEM;
+
+ /*
+ * Simple for now: we allocate a temporary DAT entry, do a single
+ * DAA, register the device which will allocate its own DAT entry
+ * via the core callback, then free the temporary DAT entry.
+ * Loop until there is no more devices to assign an address to.
+ * Yes, there is room for improvements.
+ */
+ for (;;) {
+ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
+ if (ret < 0)
+ break;
+ dat_idx = ret;
+ ret = i3c_master_get_free_addr(&hci->master, next_addr);
+ if (ret < 0)
+ break;
+ next_addr = ret;
+
+ DBG("next_addr = 0x%02x, DAA using DAT %d", next_addr, dat_idx);
+ mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dat_idx, next_addr);
+ mipi_i3c_hci_dct_index_reset(hci);
+
+ xfer->cmd_tid = hci_get_tid();
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_A |
+ CMD_A0_TID(xfer->cmd_tid) |
+ CMD_A0_CMD(I3C_CCC_ENTDAA) |
+ CMD_A0_DEV_INDEX(dat_idx) |
+ CMD_A0_DEV_COUNT(1) |
+ CMD_A0_ROC | CMD_A0_TOC;
+ xfer->cmd_desc[1] = 0;
+ hci->io->queue_xfer(hci, xfer, 1);
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, 1)) {
+ ret = -ETIME;
+ break;
+ }
+ if (RESP_STATUS(xfer[0].response) == RESP_ERR_NACK &&
+ RESP_STATUS(xfer[0].response) == 1) {
+ ret = 0; /* no more devices to be assigned */
+ break;
+ }
+ if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ break;
+ }
+
+ i3c_hci_dct_get_val(hci, 0, &pid, &dcr, &bcr);
+ DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
+
+ mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
+ dat_idx = -1;
+
+ /*
+ * TODO: Extend the subsystem layer to allow for registering
+ * new device and provide BCR/DCR/PID at the same time.
+ */
+ ret = i3c_master_add_i3c_dev_locked(&hci->master, next_addr);
+ if (ret)
+ break;
+ }
+
+ if (dat_idx >= 0)
+ mipi_i3c_hci_dat_v1.free_entry(hci, dat_idx);
+ hci_free_xfer(xfer, 1);
+ return ret;
+}
+
+const struct hci_cmd_ops mipi_i3c_hci_cmd_v1 = {
+ .prep_ccc = hci_cmd_v1_prep_ccc,
+ .prep_i3c_xfer = hci_cmd_v1_prep_i3c_xfer,
+ .prep_i2c_xfer = hci_cmd_v1_prep_i2c_xfer,
+ .perform_daa = hci_cmd_v1_daa,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
new file mode 100644
index 000000000000..4493b2b067cb
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
@@ -0,0 +1,316 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * I3C HCI v2.0 Command Descriptor Handling
+ *
+ * Note: The I3C HCI v2.0 spec is still in flux. The code here will change.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/i3c/master.h>
+
+#include "hci.h"
+#include "cmd.h"
+#include "xfer_mode_rate.h"
+
+
+/*
+ * Unified Data Transfer Command
+ */
+
+#define CMD_0_ATTR_U FIELD_PREP(CMD_0_ATTR, 0x4)
+
+#define CMD_U3_HDR_TSP_ML_CTRL(v) FIELD_PREP(W3_MASK(107, 104), v)
+#define CMD_U3_IDB4(v) FIELD_PREP(W3_MASK(103, 96), v)
+#define CMD_U3_HDR_CMD(v) FIELD_PREP(W3_MASK(103, 96), v)
+#define CMD_U2_IDB3(v) FIELD_PREP(W2_MASK( 95, 88), v)
+#define CMD_U2_HDR_BT(v) FIELD_PREP(W2_MASK( 95, 88), v)
+#define CMD_U2_IDB2(v) FIELD_PREP(W2_MASK( 87, 80), v)
+#define CMD_U2_BT_CMD2(v) FIELD_PREP(W2_MASK( 87, 80), v)
+#define CMD_U2_IDB1(v) FIELD_PREP(W2_MASK( 79, 72), v)
+#define CMD_U2_BT_CMD1(v) FIELD_PREP(W2_MASK( 79, 72), v)
+#define CMD_U2_IDB0(v) FIELD_PREP(W2_MASK( 71, 64), v)
+#define CMD_U2_BT_CMD0(v) FIELD_PREP(W2_MASK( 71, 64), v)
+#define CMD_U1_ERR_HANDLING(v) FIELD_PREP(W1_MASK( 63, 62), v)
+#define CMD_U1_ADD_FUNC(v) FIELD_PREP(W1_MASK( 61, 56), v)
+#define CMD_U1_COMBO_XFER W1_BIT_( 55)
+#define CMD_U1_DATA_LENGTH(v) FIELD_PREP(W1_MASK( 53, 32), v)
+#define CMD_U0_TOC W0_BIT_( 31)
+#define CMD_U0_ROC W0_BIT_( 30)
+#define CMD_U0_MAY_YIELD W0_BIT_( 29)
+#define CMD_U0_NACK_RCNT(v) FIELD_PREP(W0_MASK( 28, 27), v)
+#define CMD_U0_IDB_COUNT(v) FIELD_PREP(W0_MASK( 26, 24), v)
+#define CMD_U0_MODE_INDEX(v) FIELD_PREP(W0_MASK( 22, 18), v)
+#define CMD_U0_XFER_RATE(v) FIELD_PREP(W0_MASK( 17, 15), v)
+#define CMD_U0_DEV_ADDRESS(v) FIELD_PREP(W0_MASK( 14, 8), v)
+#define CMD_U0_RnW W0_BIT_( 7)
+#define CMD_U0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+/*
+ * Address Assignment Command
+ */
+
+#define CMD_0_ATTR_A FIELD_PREP(CMD_0_ATTR, 0x2)
+
+#define CMD_A1_DATA_LENGTH(v) FIELD_PREP(W1_MASK( 53, 32), v)
+#define CMD_A0_TOC W0_BIT_( 31)
+#define CMD_A0_ROC W0_BIT_( 30)
+#define CMD_A0_XFER_RATE(v) FIELD_PREP(W0_MASK( 17, 15), v)
+#define CMD_A0_ASSIGN_ADDRESS(v) FIELD_PREP(W0_MASK( 14, 8), v)
+#define CMD_A0_TID(v) FIELD_PREP(W0_MASK( 6, 3), v)
+
+
+static unsigned int get_i3c_rate_idx(struct i3c_hci *hci)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+
+ if (bus->scl_rate.i3c >= 12000000)
+ return XFERRATE_I3C_SDR0;
+ if (bus->scl_rate.i3c > 8000000)
+ return XFERRATE_I3C_SDR1;
+ if (bus->scl_rate.i3c > 6000000)
+ return XFERRATE_I3C_SDR2;
+ if (bus->scl_rate.i3c > 4000000)
+ return XFERRATE_I3C_SDR3;
+ if (bus->scl_rate.i3c > 2000000)
+ return XFERRATE_I3C_SDR4;
+ return XFERRATE_I3C_SDR_FM_FMP;
+}
+
+static unsigned int get_i2c_rate_idx(struct i3c_hci *hci)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+
+ if (bus->scl_rate.i2c >= 1000000)
+ return XFERRATE_I2C_FMP;
+ return XFERRATE_I2C_FM;
+}
+
+static void hci_cmd_v2_prep_private_xfer(struct i3c_hci *hci,
+ struct hci_xfer *xfer,
+ u8 addr, unsigned int mode,
+ unsigned int rate)
+{
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 5) {
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_U |
+ CMD_U0_TID(xfer->cmd_tid) |
+ CMD_U0_DEV_ADDRESS(addr) |
+ CMD_U0_XFER_RATE(rate) |
+ CMD_U0_MODE_INDEX(mode) |
+ CMD_U0_IDB_COUNT(data_len);
+ xfer->cmd_desc[1] =
+ CMD_U1_DATA_LENGTH(0);
+ xfer->cmd_desc[2] = 0;
+ xfer->cmd_desc[3] = 0;
+ switch (data_len) {
+ case 5:
+ xfer->cmd_desc[3] |= CMD_U3_IDB4(data[4]);
+ fallthrough;
+ case 4:
+ xfer->cmd_desc[2] |= CMD_U2_IDB3(data[3]);
+ fallthrough;
+ case 3:
+ xfer->cmd_desc[2] |= CMD_U2_IDB2(data[2]);
+ fallthrough;
+ case 2:
+ xfer->cmd_desc[2] |= CMD_U2_IDB1(data[1]);
+ fallthrough;
+ case 1:
+ xfer->cmd_desc[2] |= CMD_U2_IDB0(data[0]);
+ fallthrough;
+ case 0:
+ break;
+ }
+ /* we consumed all the data with the cmd descriptor */
+ xfer->data = NULL;
+ } else {
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_U |
+ CMD_U0_TID(xfer->cmd_tid) |
+ (rnw ? CMD_U0_RnW : 0) |
+ CMD_U0_DEV_ADDRESS(addr) |
+ CMD_U0_XFER_RATE(rate) |
+ CMD_U0_MODE_INDEX(mode);
+ xfer->cmd_desc[1] =
+ CMD_U1_DATA_LENGTH(data_len);
+ xfer->cmd_desc[2] = 0;
+ xfer->cmd_desc[3] = 0;
+ }
+}
+
+static int hci_cmd_v2_prep_ccc(struct i3c_hci *hci, struct hci_xfer *xfer,
+ u8 ccc_addr, u8 ccc_cmd, bool raw)
+{
+ unsigned int mode = XFERMODE_IDX_I3C_SDR;
+ unsigned int rate = get_i3c_rate_idx(hci);
+ u8 *data = xfer->data;
+ unsigned int data_len = xfer->data_len;
+ bool rnw = xfer->rnw;
+
+ if (raw && ccc_addr != I3C_BROADCAST_ADDR) {
+ hci_cmd_v2_prep_private_xfer(hci, xfer, ccc_addr, mode, rate);
+ return 0;
+ }
+
+ xfer->cmd_tid = hci_get_tid();
+
+ if (!rnw && data_len <= 4) {
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_U |
+ CMD_U0_TID(xfer->cmd_tid) |
+ CMD_U0_DEV_ADDRESS(ccc_addr) |
+ CMD_U0_XFER_RATE(rate) |
+ CMD_U0_MODE_INDEX(mode) |
+ CMD_U0_IDB_COUNT(data_len + (!raw ? 0 : 1));
+ xfer->cmd_desc[1] =
+ CMD_U1_DATA_LENGTH(0);
+ xfer->cmd_desc[2] =
+ CMD_U2_IDB0(ccc_cmd);
+ xfer->cmd_desc[3] = 0;
+ switch (data_len) {
+ case 4:
+ xfer->cmd_desc[3] |= CMD_U3_IDB4(data[3]);
+ fallthrough;
+ case 3:
+ xfer->cmd_desc[2] |= CMD_U2_IDB3(data[2]);
+ fallthrough;
+ case 2:
+ xfer->cmd_desc[2] |= CMD_U2_IDB2(data[1]);
+ fallthrough;
+ case 1:
+ xfer->cmd_desc[2] |= CMD_U2_IDB1(data[0]);
+ fallthrough;
+ case 0:
+ break;
+ }
+ /* we consumed all the data with the cmd descriptor */
+ xfer->data = NULL;
+ } else {
+ xfer->cmd_desc[0] =
+ CMD_0_ATTR_U |
+ CMD_U0_TID(xfer->cmd_tid) |
+ (rnw ? CMD_U0_RnW : 0) |
+ CMD_U0_DEV_ADDRESS(ccc_addr) |
+ CMD_U0_XFER_RATE(rate) |
+ CMD_U0_MODE_INDEX(mode) |
+ CMD_U0_IDB_COUNT(!raw ? 0 : 1);
+ xfer->cmd_desc[1] =
+ CMD_U1_DATA_LENGTH(data_len);
+ xfer->cmd_desc[2] =
+ CMD_U2_IDB0(ccc_cmd);
+ xfer->cmd_desc[3] = 0;
+ }
+
+ return 0;
+}
+
+static void hci_cmd_v2_prep_i3c_xfer(struct i3c_hci *hci,
+ struct i3c_dev_desc *dev,
+ struct hci_xfer *xfer)
+{
+ unsigned int mode = XFERMODE_IDX_I3C_SDR;
+ unsigned int rate = get_i3c_rate_idx(hci);
+ u8 addr = dev->info.dyn_addr;
+
+ hci_cmd_v2_prep_private_xfer(hci, xfer, addr, mode, rate);
+}
+
+static void hci_cmd_v2_prep_i2c_xfer(struct i3c_hci *hci,
+ struct i2c_dev_desc *dev,
+ struct hci_xfer *xfer)
+{
+ unsigned int mode = XFERMODE_IDX_I2C;
+ unsigned int rate = get_i2c_rate_idx(hci);
+ u8 addr = dev->addr;
+
+ hci_cmd_v2_prep_private_xfer(hci, xfer, addr, mode, rate);
+}
+
+static int hci_cmd_v2_daa(struct i3c_hci *hci)
+{
+ struct hci_xfer *xfer;
+ int ret;
+ u8 next_addr = 0;
+ u32 device_id[2];
+ u64 pid;
+ unsigned int dcr, bcr;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ xfer = hci_alloc_xfer(2);
+ if (!xfer)
+ return -ENOMEM;
+
+ xfer[0].data = &device_id;
+ xfer[0].data_len = 8;
+ xfer[0].rnw = true;
+ xfer[0].cmd_desc[1] = CMD_A1_DATA_LENGTH(8);
+ xfer[1].completion = &done;
+
+ for (;;) {
+ ret = i3c_master_get_free_addr(&hci->master, next_addr);
+ if (ret < 0)
+ break;
+ next_addr = ret;
+ DBG("next_addr = 0x%02x", next_addr);
+ xfer[0].cmd_tid = hci_get_tid();
+ xfer[0].cmd_desc[0] =
+ CMD_0_ATTR_A |
+ CMD_A0_TID(xfer[0].cmd_tid) |
+ CMD_A0_ROC;
+ xfer[1].cmd_tid = hci_get_tid();
+ xfer[1].cmd_desc[0] =
+ CMD_0_ATTR_A |
+ CMD_A0_TID(xfer[1].cmd_tid) |
+ CMD_A0_ASSIGN_ADDRESS(next_addr) |
+ CMD_A0_ROC |
+ CMD_A0_TOC;
+ hci->io->queue_xfer(hci, xfer, 2);
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, 2)) {
+ ret = -ETIME;
+ break;
+ }
+ if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
+ ret = 0; /* no more devices to be assigned */
+ break;
+ }
+ if (RESP_STATUS(xfer[1].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ break;
+ }
+
+ pid = FIELD_GET(W1_MASK(47, 32), device_id[1]);
+ pid = (pid << 32) | device_id[0];
+ bcr = FIELD_GET(W1_MASK(55, 48), device_id[1]);
+ dcr = FIELD_GET(W1_MASK(63, 56), device_id[1]);
+ DBG("assigned address %#x to device PID=0x%llx DCR=%#x BCR=%#x",
+ next_addr, pid, dcr, bcr);
+ /*
+ * TODO: Extend the subsystem layer to allow for registering
+ * new device and provide BCR/DCR/PID at the same time.
+ */
+ ret = i3c_master_add_i3c_dev_locked(&hci->master, next_addr);
+ if (ret)
+ break;
+ }
+
+ hci_free_xfer(xfer, 2);
+ return ret;
+}
+
+const struct hci_cmd_ops mipi_i3c_hci_cmd_v2 = {
+ .prep_ccc = hci_cmd_v2_prep_ccc,
+ .prep_i3c_xfer = hci_cmd_v2_prep_i3c_xfer,
+ .prep_i2c_xfer = hci_cmd_v2_prep_i2c_xfer,
+ .perform_daa = hci_cmd_v2_daa,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
new file mode 100644
index 000000000000..500abd27fb22
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -0,0 +1,798 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Core driver code with main interface to the I3C subsystem.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "hci.h"
+#include "ext_caps.h"
+#include "cmd.h"
+#include "dat.h"
+
+
+/*
+ * Host Controller Capabilities and Operation Registers
+ */
+
+#define reg_read(r) readl(hci->base_regs + (r))
+#define reg_write(r, v) writel(v, hci->base_regs + (r))
+#define reg_set(r, v) reg_write(r, reg_read(r) | (v))
+#define reg_clear(r, v) reg_write(r, reg_read(r) & ~(v))
+
+#define HCI_VERSION 0x00 /* HCI Version (in BCD) */
+
+#define HC_CONTROL 0x04
+#define HC_CONTROL_BUS_ENABLE BIT(31)
+#define HC_CONTROL_RESUME BIT(30)
+#define HC_CONTROL_ABORT BIT(29)
+#define HC_CONTROL_HALT_ON_CMD_TIMEOUT BIT(12)
+#define HC_CONTROL_HOT_JOIN_CTRL BIT(8) /* Hot-Join ACK/NACK Control */
+#define HC_CONTROL_I2C_TARGET_PRESENT BIT(7)
+#define HC_CONTROL_PIO_MODE BIT(6) /* DMA/PIO Mode Selector */
+#define HC_CONTROL_DATA_BIG_ENDIAN BIT(4)
+#define HC_CONTROL_IBA_INCLUDE BIT(0) /* Include I3C Broadcast Address */
+
+#define MASTER_DEVICE_ADDR 0x08 /* Master Device Address */
+#define MASTER_DYNAMIC_ADDR_VALID BIT(31) /* Dynamic Address is Valid */
+#define MASTER_DYNAMIC_ADDR(v) FIELD_PREP(GENMASK(22, 16), v)
+
+#define HC_CAPABILITIES 0x0c
+#define HC_CAP_SG_DC_EN BIT(30)
+#define HC_CAP_SG_IBI_EN BIT(29)
+#define HC_CAP_SG_CR_EN BIT(28)
+#define HC_CAP_MAX_DATA_LENGTH GENMASK(24, 22)
+#define HC_CAP_CMD_SIZE GENMASK(21, 20)
+#define HC_CAP_DIRECT_COMMANDS_EN BIT(18)
+#define HC_CAP_MULTI_LANE_EN BIT(15)
+#define HC_CAP_CMD_CCC_DEFBYTE BIT(10)
+#define HC_CAP_HDR_BT_EN BIT(8)
+#define HC_CAP_HDR_TS_EN BIT(7)
+#define HC_CAP_HDR_DDR_EN BIT(6)
+#define HC_CAP_NON_CURRENT_MASTER_CAP BIT(5) /* master handoff capable */
+#define HC_CAP_DATA_BYTE_CFG_EN BIT(4) /* endian selection possible */
+#define HC_CAP_AUTO_COMMAND BIT(3)
+#define HC_CAP_COMBO_COMMAND BIT(2)
+
+#define RESET_CONTROL 0x10
+#define BUS_RESET BIT(31)
+#define BUS_RESET_TYPE GENMASK(30, 29)
+#define IBI_QUEUE_RST BIT(5)
+#define RX_FIFO_RST BIT(4)
+#define TX_FIFO_RST BIT(3)
+#define RESP_QUEUE_RST BIT(2)
+#define CMD_QUEUE_RST BIT(1)
+#define SOFT_RST BIT(0) /* Core Reset */
+
+#define PRESENT_STATE 0x14
+#define STATE_CURRENT_MASTER BIT(2)
+
+#define INTR_STATUS 0x20
+#define INTR_STATUS_ENABLE 0x24
+#define INTR_SIGNAL_ENABLE 0x28
+#define INTR_FORCE 0x2c
+#define INTR_HC_CMD_SEQ_UFLOW_STAT BIT(12) /* Cmd Sequence Underflow */
+#define INTR_HC_RESET_CANCEL BIT(11) /* HC Cancelled Reset */
+#define INTR_HC_INTERNAL_ERR BIT(10) /* HC Internal Error */
+#define INTR_HC_PIO BIT(8) /* cascaded PIO interrupt */
+#define INTR_HC_RINGS GENMASK(7, 0)
+
+#define DAT_SECTION 0x30 /* Device Address Table */
+#define DAT_ENTRY_SIZE GENMASK(31, 28)
+#define DAT_TABLE_SIZE GENMASK(18, 12)
+#define DAT_TABLE_OFFSET GENMASK(11, 0)
+
+#define DCT_SECTION 0x34 /* Device Characteristics Table */
+#define DCT_ENTRY_SIZE GENMASK(31, 28)
+#define DCT_TABLE_INDEX GENMASK(23, 19)
+#define DCT_TABLE_SIZE GENMASK(18, 12)
+#define DCT_TABLE_OFFSET GENMASK(11, 0)
+
+#define RING_HEADERS_SECTION 0x38
+#define RING_HEADERS_OFFSET GENMASK(15, 0)
+
+#define PIO_SECTION 0x3c
+#define PIO_REGS_OFFSET GENMASK(15, 0) /* PIO Offset */
+
+#define EXT_CAPS_SECTION 0x40
+#define EXT_CAPS_OFFSET GENMASK(15, 0)
+
+#define IBI_NOTIFY_CTRL 0x58 /* IBI Notify Control */
+#define IBI_NOTIFY_SIR_REJECTED BIT(3) /* Rejected Target Interrupt Request */
+#define IBI_NOTIFY_MR_REJECTED BIT(1) /* Rejected Master Request Control */
+#define IBI_NOTIFY_HJ_REJECTED BIT(0) /* Rejected Hot-Join Control */
+
+#define DEV_CTX_BASE_LO 0x60
+#define DEV_CTX_BASE_HI 0x64
+
+
+static inline struct i3c_hci *to_i3c_hci(struct i3c_master_controller *m)
+{
+ return container_of(m, struct i3c_hci, master);
+}
+
+static int i3c_hci_bus_init(struct i3c_master_controller *m)
+{
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_device_info info;
+ int ret;
+
+ DBG("");
+
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
+ ret = mipi_i3c_hci_dat_v1.init(hci);
+ if (ret)
+ return ret;
+ }
+
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ return ret;
+ reg_write(MASTER_DEVICE_ADDR,
+ MASTER_DYNAMIC_ADDR(ret) | MASTER_DYNAMIC_ADDR_VALID);
+ memset(&info, 0, sizeof(info));
+ info.dyn_addr = ret;
+ ret = i3c_master_set_info(m, &info);
+ if (ret)
+ return ret;
+
+ ret = hci->io->init(hci);
+ if (ret)
+ return ret;
+
+ reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
+ DBG("HC_CONTROL = %#x", reg_read(HC_CONTROL));
+
+ return 0;
+}
+
+static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
+{
+ struct i3c_hci *hci = to_i3c_hci(m);
+
+ DBG("");
+
+ reg_clear(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
+ hci->io->cleanup(hci);
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.cleanup(hci);
+}
+
+void mipi_i3c_hci_resume(struct i3c_hci *hci)
+{
+ /* the HC_CONTROL_RESUME bit is R/W1C so just read and write back */
+ reg_write(HC_CONTROL, reg_read(HC_CONTROL));
+}
+
+/* located here rather than pio.c because needed bits are in core reg space */
+void mipi_i3c_hci_pio_reset(struct i3c_hci *hci)
+{
+ reg_write(RESET_CONTROL, RX_FIFO_RST | TX_FIFO_RST | RESP_QUEUE_RST);
+}
+
+/* located here rather than dct.c because needed bits are in core reg space */
+void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
+{
+ reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
+}
+
+static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *ccc)
+{
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct hci_xfer *xfer;
+ bool raw = !!(hci->quirks & HCI_QUIRK_RAW_CCC);
+ bool prefixed = raw && !!(ccc->id & I3C_CCC_DIRECT);
+ unsigned int nxfers = ccc->ndests + prefixed;
+ DECLARE_COMPLETION_ONSTACK(done);
+ int i, last, ret = 0;
+
+ DBG("cmd=%#x rnw=%d ndests=%d data[0].len=%d",
+ ccc->id, ccc->rnw, ccc->ndests, ccc->dests[0].payload.len);
+
+ xfer = hci_alloc_xfer(nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ if (prefixed) {
+ xfer->data = NULL;
+ xfer->data_len = 0;
+ xfer->rnw = false;
+ hci->cmd->prep_ccc(hci, xfer, I3C_BROADCAST_ADDR,
+ ccc->id, true);
+ xfer++;
+ }
+
+ for (i = 0; i < nxfers - prefixed; i++) {
+ xfer[i].data = ccc->dests[i].payload.data;
+ xfer[i].data_len = ccc->dests[i].payload.len;
+ xfer[i].rnw = ccc->rnw;
+ ret = hci->cmd->prep_ccc(hci, &xfer[i], ccc->dests[i].addr,
+ ccc->id, raw);
+ if (ret)
+ goto out;
+ xfer[i].cmd_desc[0] |= CMD_0_ROC;
+ }
+ last = i - 1;
+ xfer[last].cmd_desc[0] |= CMD_0_TOC;
+ xfer[last].completion = &done;
+
+ if (prefixed)
+ xfer--;
+
+ ret = hci->io->queue_xfer(hci, xfer, nxfers);
+ if (ret)
+ goto out;
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, nxfers)) {
+ ret = -ETIME;
+ goto out;
+ }
+ for (i = prefixed; i < nxfers; i++) {
+ if (ccc->rnw)
+ ccc->dests[i - prefixed].payload.len =
+ RESP_DATA_LENGTH(xfer[i].response);
+ if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ if (ccc->rnw)
+ DBG("got: %*ph",
+ ccc->dests[0].payload.len, ccc->dests[0].payload.data);
+
+out:
+ hci_free_xfer(xfer, nxfers);
+ return ret;
+}
+
+static int i3c_hci_daa(struct i3c_master_controller *m)
+{
+ struct i3c_hci *hci = to_i3c_hci(m);
+
+ DBG("");
+
+ return hci->cmd->perform_daa(hci);
+}
+
+static int i3c_hci_priv_xfers(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *i3c_xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct hci_xfer *xfer;
+ DECLARE_COMPLETION_ONSTACK(done);
+ unsigned int size_limit;
+ int i, last, ret = 0;
+
+ DBG("nxfers = %d", nxfers);
+
+ xfer = hci_alloc_xfer(nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ size_limit = 1U << (16 + FIELD_GET(HC_CAP_MAX_DATA_LENGTH, hci->caps));
+
+ for (i = 0; i < nxfers; i++) {
+ xfer[i].data_len = i3c_xfers[i].len;
+ ret = -EFBIG;
+ if (xfer[i].data_len >= size_limit)
+ goto out;
+ xfer[i].rnw = i3c_xfers[i].rnw;
+ if (i3c_xfers[i].rnw) {
+ xfer[i].data = i3c_xfers[i].data.in;
+ } else {
+ /* silence the const qualifier warning with a cast */
+ xfer[i].data = (void *) i3c_xfers[i].data.out;
+ }
+ hci->cmd->prep_i3c_xfer(hci, dev, &xfer[i]);
+ xfer[i].cmd_desc[0] |= CMD_0_ROC;
+ }
+ last = i - 1;
+ xfer[last].cmd_desc[0] |= CMD_0_TOC;
+ xfer[last].completion = &done;
+
+ ret = hci->io->queue_xfer(hci, xfer, nxfers);
+ if (ret)
+ goto out;
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, nxfers)) {
+ ret = -ETIME;
+ goto out;
+ }
+ for (i = 0; i < nxfers; i++) {
+ if (i3c_xfers[i].rnw)
+ i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
+ if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+out:
+ hci_free_xfer(xfer, nxfers);
+ return ret;
+}
+
+static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
+ const struct i2c_msg *i2c_xfers, int nxfers)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct hci_xfer *xfer;
+ DECLARE_COMPLETION_ONSTACK(done);
+ int i, last, ret = 0;
+
+ DBG("nxfers = %d", nxfers);
+
+ xfer = hci_alloc_xfer(nxfers);
+ if (!xfer)
+ return -ENOMEM;
+
+ for (i = 0; i < nxfers; i++) {
+ xfer[i].data = i2c_xfers[i].buf;
+ xfer[i].data_len = i2c_xfers[i].len;
+ xfer[i].rnw = i2c_xfers[i].flags & I2C_M_RD;
+ hci->cmd->prep_i2c_xfer(hci, dev, &xfer[i]);
+ xfer[i].cmd_desc[0] |= CMD_0_ROC;
+ }
+ last = i - 1;
+ xfer[last].cmd_desc[0] |= CMD_0_TOC;
+ xfer[last].completion = &done;
+
+ ret = hci->io->queue_xfer(hci, xfer, nxfers);
+ if (ret)
+ goto out;
+ if (!wait_for_completion_timeout(&done, HZ) &&
+ hci->io->dequeue_xfer(hci, xfer, nxfers)) {
+ ret = -ETIME;
+ goto out;
+ }
+ for (i = 0; i < nxfers; i++) {
+ if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+out:
+ hci_free_xfer(xfer, nxfers);
+ return ret;
+}
+
+static int i3c_hci_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data;
+ int ret;
+
+ DBG("");
+
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1) {
+ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
+ if (ret < 0) {
+ kfree(dev_data);
+ return ret;
+ }
+ mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, ret, dev->info.dyn_addr);
+ dev_data->dat_idx = ret;
+ }
+ i3c_dev_set_master_data(dev, dev_data);
+ return 0;
+}
+
+static int i3c_hci_reattach_i3c_dev(struct i3c_dev_desc *dev, u8 old_dyn_addr)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+
+ DBG("");
+
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.set_dynamic_addr(hci, dev_data->dat_idx,
+ dev->info.dyn_addr);
+ return 0;
+}
+
+static void i3c_hci_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+
+ DBG("");
+
+ i3c_dev_set_master_data(dev, NULL);
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
+ kfree(dev_data);
+}
+
+static int i3c_hci_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data;
+ int ret;
+
+ DBG("");
+
+ if (hci->cmd != &mipi_i3c_hci_cmd_v1)
+ return 0;
+ dev_data = kzalloc(sizeof(*dev_data), GFP_KERNEL);
+ if (!dev_data)
+ return -ENOMEM;
+ ret = mipi_i3c_hci_dat_v1.alloc_entry(hci);
+ if (ret < 0) {
+ kfree(dev_data);
+ return ret;
+ }
+ mipi_i3c_hci_dat_v1.set_static_addr(hci, ret, dev->addr);
+ mipi_i3c_hci_dat_v1.set_flags(hci, ret, DAT_0_I2C_DEVICE, 0);
+ dev_data->dat_idx = ret;
+ i2c_dev_set_master_data(dev, dev_data);
+ return 0;
+}
+
+static void i3c_hci_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i2c_dev_get_master_data(dev);
+
+ DBG("");
+
+ if (dev_data) {
+ i2c_dev_set_master_data(dev, NULL);
+ if (hci->cmd == &mipi_i3c_hci_cmd_v1)
+ mipi_i3c_hci_dat_v1.free_entry(hci, dev_data->dat_idx);
+ kfree(dev_data);
+ }
+}
+
+static int i3c_hci_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ unsigned int dat_idx = dev_data->dat_idx;
+
+ if (req->max_payload_len != 0)
+ mipi_i3c_hci_dat_v1.set_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
+ else
+ mipi_i3c_hci_dat_v1.clear_flags(hci, dat_idx, DAT_0_IBI_PAYLOAD, 0);
+ return hci->io->request_ibi(hci, dev, req);
+}
+
+static void i3c_hci_free_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+
+ hci->io->free_ibi(hci, dev);
+}
+
+static int i3c_hci_enable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+
+ mipi_i3c_hci_dat_v1.clear_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
+ return i3c_master_enec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+}
+
+static int i3c_hci_disable_ibi(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+
+ mipi_i3c_hci_dat_v1.set_flags(hci, dev_data->dat_idx, DAT_0_SIR_REJECT, 0);
+ return i3c_master_disec_locked(m, dev->info.dyn_addr, I3C_CCC_EVENT_SIR);
+}
+
+static void i3c_hci_recycle_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct i3c_hci *hci = to_i3c_hci(m);
+
+ hci->io->recycle_ibi_slot(hci, dev, slot);
+}
+
+static const struct i3c_master_controller_ops i3c_hci_ops = {
+ .bus_init = i3c_hci_bus_init,
+ .bus_cleanup = i3c_hci_bus_cleanup,
+ .do_daa = i3c_hci_daa,
+ .send_ccc_cmd = i3c_hci_send_ccc_cmd,
+ .priv_xfers = i3c_hci_priv_xfers,
+ .i2c_xfers = i3c_hci_i2c_xfers,
+ .attach_i3c_dev = i3c_hci_attach_i3c_dev,
+ .reattach_i3c_dev = i3c_hci_reattach_i3c_dev,
+ .detach_i3c_dev = i3c_hci_detach_i3c_dev,
+ .attach_i2c_dev = i3c_hci_attach_i2c_dev,
+ .detach_i2c_dev = i3c_hci_detach_i2c_dev,
+ .request_ibi = i3c_hci_request_ibi,
+ .free_ibi = i3c_hci_free_ibi,
+ .enable_ibi = i3c_hci_enable_ibi,
+ .disable_ibi = i3c_hci_disable_ibi,
+ .recycle_ibi_slot = i3c_hci_recycle_ibi_slot,
+};
+
+static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
+{
+ struct i3c_hci *hci = dev_id;
+ irqreturn_t result = IRQ_NONE;
+ u32 val;
+
+ val = reg_read(INTR_STATUS);
+ DBG("INTR_STATUS = %#x", val);
+
+ if (val) {
+ reg_write(INTR_STATUS, val);
+ } else {
+ /* v1.0 does not have PIO cascaded notification bits */
+ val |= INTR_HC_PIO;
+ }
+
+ if (val & INTR_HC_RESET_CANCEL) {
+ DBG("cancelled reset");
+ val &= ~INTR_HC_RESET_CANCEL;
+ }
+ if (val & INTR_HC_INTERNAL_ERR) {
+ dev_err(&hci->master.dev, "Host Controller Internal Error\n");
+ val &= ~INTR_HC_INTERNAL_ERR;
+ }
+ if (val & INTR_HC_PIO) {
+ hci->io->irq_handler(hci, 0);
+ val &= ~INTR_HC_PIO;
+ }
+ if (val & INTR_HC_RINGS) {
+ hci->io->irq_handler(hci, val & INTR_HC_RINGS);
+ val &= ~INTR_HC_RINGS;
+ }
+ if (val)
+ dev_err(&hci->master.dev, "unexpected INTR_STATUS %#x\n", val);
+ else
+ result = IRQ_HANDLED;
+
+ return result;
+}
+
+static int i3c_hci_init(struct i3c_hci *hci)
+{
+ u32 regval, offset;
+ int ret;
+
+ /* Validate HCI hardware version */
+ regval = reg_read(HCI_VERSION);
+ hci->version_major = (regval >> 8) & 0xf;
+ hci->version_minor = (regval >> 4) & 0xf;
+ hci->revision = regval & 0xf;
+ dev_notice(&hci->master.dev, "MIPI I3C HCI v%u.%u r%02u\n",
+ hci->version_major, hci->version_minor, hci->revision);
+ /* known versions */
+ switch (regval & ~0xf) {
+ case 0x100: /* version 1.0 */
+ case 0x110: /* version 1.1 */
+ case 0x200: /* version 2.0 */
+ break;
+ default:
+ dev_err(&hci->master.dev, "unsupported HCI version\n");
+ return -EPROTONOSUPPORT;
+ }
+
+ hci->caps = reg_read(HC_CAPABILITIES);
+ DBG("caps = %#x", hci->caps);
+
+ regval = reg_read(DAT_SECTION);
+ offset = FIELD_GET(DAT_TABLE_OFFSET, regval);
+ hci->DAT_regs = offset ? hci->base_regs + offset : NULL;
+ hci->DAT_entries = FIELD_GET(DAT_TABLE_SIZE, regval);
+ hci->DAT_entry_size = FIELD_GET(DAT_ENTRY_SIZE, regval);
+ dev_info(&hci->master.dev, "DAT: %u %u-bytes entries at offset %#x\n",
+ hci->DAT_entries, hci->DAT_entry_size * 4, offset);
+
+ regval = reg_read(DCT_SECTION);
+ offset = FIELD_GET(DCT_TABLE_OFFSET, regval);
+ hci->DCT_regs = offset ? hci->base_regs + offset : NULL;
+ hci->DCT_entries = FIELD_GET(DCT_TABLE_SIZE, regval);
+ hci->DCT_entry_size = FIELD_GET(DCT_ENTRY_SIZE, regval);
+ dev_info(&hci->master.dev, "DCT: %u %u-bytes entries at offset %#x\n",
+ hci->DCT_entries, hci->DCT_entry_size * 4, offset);
+
+ regval = reg_read(RING_HEADERS_SECTION);
+ offset = FIELD_GET(RING_HEADERS_OFFSET, regval);
+ hci->RHS_regs = offset ? hci->base_regs + offset : NULL;
+ dev_info(&hci->master.dev, "Ring Headers at offset %#x\n", offset);
+
+ regval = reg_read(PIO_SECTION);
+ offset = FIELD_GET(PIO_REGS_OFFSET, regval);
+ hci->PIO_regs = offset ? hci->base_regs + offset : NULL;
+ dev_info(&hci->master.dev, "PIO section at offset %#x\n", offset);
+
+ regval = reg_read(EXT_CAPS_SECTION);
+ offset = FIELD_GET(EXT_CAPS_OFFSET, regval);
+ hci->EXTCAPS_regs = offset ? hci->base_regs + offset : NULL;
+ dev_info(&hci->master.dev, "Extended Caps at offset %#x\n", offset);
+
+ ret = i3c_hci_parse_ext_caps(hci);
+ if (ret)
+ return ret;
+
+ /*
+ * Now let's reset the hardware.
+ * SOFT_RST must be clear before we write to it.
+ * Then we must wait until it clears again.
+ */
+ ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
+ !(regval & SOFT_RST), 1, 10000);
+ if (ret)
+ return -ENXIO;
+ reg_write(RESET_CONTROL, SOFT_RST);
+ ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
+ !(regval & SOFT_RST), 1, 10000);
+ if (ret)
+ return -ENXIO;
+
+ /* Disable all interrupts and allow all signal updates */
+ reg_write(INTR_SIGNAL_ENABLE, 0x0);
+ reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+
+ /* Make sure our data ordering fits the host's */
+ regval = reg_read(HC_CONTROL);
+ if (IS_ENABLED(CONFIG_BIG_ENDIAN)) {
+ if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
+ regval |= HC_CONTROL_DATA_BIG_ENDIAN;
+ reg_write(HC_CONTROL, regval);
+ regval = reg_read(HC_CONTROL);
+ if (!(regval & HC_CONTROL_DATA_BIG_ENDIAN)) {
+ dev_err(&hci->master.dev, "cannot set BE mode\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ } else {
+ if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
+ regval &= ~HC_CONTROL_DATA_BIG_ENDIAN;
+ reg_write(HC_CONTROL, regval);
+ regval = reg_read(HC_CONTROL);
+ if (regval & HC_CONTROL_DATA_BIG_ENDIAN) {
+ dev_err(&hci->master.dev, "cannot clear BE mode\n");
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
+ /* Select our command descriptor model */
+ switch (FIELD_GET(HC_CAP_CMD_SIZE, hci->caps)) {
+ case 0:
+ hci->cmd = &mipi_i3c_hci_cmd_v1;
+ break;
+ case 1:
+ hci->cmd = &mipi_i3c_hci_cmd_v2;
+ break;
+ default:
+ dev_err(&hci->master.dev, "wrong CMD_SIZE capability value\n");
+ return -EINVAL;
+ }
+
+ /* Try activating DMA operations first */
+ if (hci->RHS_regs) {
+ reg_clear(HC_CONTROL, HC_CONTROL_PIO_MODE);
+ if (reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE) {
+ dev_err(&hci->master.dev, "PIO mode is stuck\n");
+ ret = -EIO;
+ } else {
+ hci->io = &mipi_i3c_hci_dma;
+ dev_info(&hci->master.dev, "Using DMA\n");
+ }
+ }
+
+ /* If no DMA, try PIO */
+ if (!hci->io && hci->PIO_regs) {
+ reg_set(HC_CONTROL, HC_CONTROL_PIO_MODE);
+ if (!(reg_read(HC_CONTROL) & HC_CONTROL_PIO_MODE)) {
+ dev_err(&hci->master.dev, "DMA mode is stuck\n");
+ ret = -EIO;
+ } else {
+ hci->io = &mipi_i3c_hci_pio;
+ dev_info(&hci->master.dev, "Using PIO\n");
+ }
+ }
+
+ if (!hci->io) {
+ dev_err(&hci->master.dev, "neither DMA nor PIO can be used\n");
+ if (!ret)
+ ret = -EINVAL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int i3c_hci_probe(struct platform_device *pdev)
+{
+ struct i3c_hci *hci;
+ int irq, ret;
+
+ hci = devm_kzalloc(&pdev->dev, sizeof(*hci), GFP_KERNEL);
+ if (!hci)
+ return -ENOMEM;
+ hci->base_regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(hci->base_regs))
+ return PTR_ERR(hci->base_regs);
+
+ platform_set_drvdata(pdev, hci);
+ /* temporary for dev_printk's, to be replaced in i3c_master_register */
+ hci->master.dev.init_name = dev_name(&pdev->dev);
+
+ ret = i3c_hci_init(hci);
+ if (ret)
+ return ret;
+
+ irq = platform_get_irq(pdev, 0);
+ ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
+ 0, NULL, hci);
+ if (ret)
+ return ret;
+
+ ret = i3c_master_register(&hci->master, &pdev->dev,
+ &i3c_hci_ops, false);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int i3c_hci_remove(struct platform_device *pdev)
+{
+ struct i3c_hci *hci = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = i3c_master_unregister(&hci->master);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct __maybe_unused of_device_id i3c_hci_of_match[] = {
+ { .compatible = "mipi-i3c-hci", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i3c_hci_of_match);
+
+static struct platform_driver i3c_hci_driver = {
+ .probe = i3c_hci_probe,
+ .remove = i3c_hci_remove,
+ .driver = {
+ .name = "mipi-i3c-hci",
+ .of_match_table = of_match_ptr(i3c_hci_of_match),
+ },
+};
+module_platform_driver(i3c_hci_driver);
+
+MODULE_AUTHOR("Nicolas Pitre <npitre@baylibre.com>");
+MODULE_DESCRIPTION("MIPI I3C HCI driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/i3c/master/mipi-i3c-hci/dat.h b/drivers/i3c/master/mipi-i3c-hci/dat.h
new file mode 100644
index 000000000000..1f0f345c3daf
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dat.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common DAT related stuff
+ */
+
+#ifndef DAT_H
+#define DAT_H
+
+/* Global DAT flags */
+#define DAT_0_I2C_DEVICE W0_BIT_(31)
+#define DAT_0_SIR_REJECT W0_BIT_(13)
+#define DAT_0_IBI_PAYLOAD W0_BIT_(12)
+
+struct hci_dat_ops {
+ int (*init)(struct i3c_hci *hci);
+ void (*cleanup)(struct i3c_hci *hci);
+ int (*alloc_entry)(struct i3c_hci *hci);
+ void (*free_entry)(struct i3c_hci *hci, unsigned int dat_idx);
+ void (*set_dynamic_addr)(struct i3c_hci *hci, unsigned int dat_idx, u8 addr);
+ void (*set_static_addr)(struct i3c_hci *hci, unsigned int dat_idx, u8 addr);
+ void (*set_flags)(struct i3c_hci *hci, unsigned int dat_idx, u32 w0, u32 w1);
+ void (*clear_flags)(struct i3c_hci *hci, unsigned int dat_idx, u32 w0, u32 w1);
+ int (*get_index)(struct i3c_hci *hci, u8 address);
+};
+
+extern const struct hci_dat_ops mipi_i3c_hci_dat_v1;
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/dat_v1.c b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
new file mode 100644
index 000000000000..783e551a2c85
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dat_v1.c
@@ -0,0 +1,184 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitmap.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "dat.h"
+
+
+/*
+ * Device Address Table Structure
+ */
+
+#define DAT_1_AUTOCMD_HDR_CODE W1_MASK(58, 51)
+#define DAT_1_AUTOCMD_MODE W1_MASK(50, 48)
+#define DAT_1_AUTOCMD_VALUE W1_MASK(47, 40)
+#define DAT_1_AUTOCMD_MASK W1_MASK(39, 32)
+/* DAT_0_I2C_DEVICE W0_BIT_(31) */
+#define DAT_0_DEV_NACK_RETRY_CNT W0_MASK(30, 29)
+#define DAT_0_RING_ID W0_MASK(28, 26)
+#define DAT_0_DYNADDR_PARITY W0_BIT_(23)
+#define DAT_0_DYNAMIC_ADDRESS W0_MASK(22, 16)
+#define DAT_0_TS W0_BIT_(15)
+#define DAT_0_MR_REJECT W0_BIT_(14)
+/* DAT_0_SIR_REJECT W0_BIT_(13) */
+/* DAT_0_IBI_PAYLOAD W0_BIT_(12) */
+#define DAT_0_STATIC_ADDRESS W0_MASK(6, 0)
+
+#define dat_w0_read(i) readl(hci->DAT_regs + (i) * 8)
+#define dat_w1_read(i) readl(hci->DAT_regs + (i) * 8 + 4)
+#define dat_w0_write(i, v) writel(v, hci->DAT_regs + (i) * 8)
+#define dat_w1_write(i, v) writel(v, hci->DAT_regs + (i) * 8 + 4)
+
+static inline bool dynaddr_parity(unsigned int addr)
+{
+ addr |= 1 << 7;
+ addr += addr >> 4;
+ addr += addr >> 2;
+ addr += addr >> 1;
+ return (addr & 1);
+}
+
+static int hci_dat_v1_init(struct i3c_hci *hci)
+{
+ unsigned int dat_idx;
+
+ if (!hci->DAT_regs) {
+ dev_err(&hci->master.dev,
+ "only DAT in register space is supported at the moment\n");
+ return -EOPNOTSUPP;
+ }
+ if (hci->DAT_entry_size != 8) {
+ dev_err(&hci->master.dev,
+ "only 8-bytes DAT entries are supported at the moment\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* use a bitmap for faster free slot search */
+ hci->DAT_data = bitmap_zalloc(hci->DAT_entries, GFP_KERNEL);
+ if (!hci->DAT_data)
+ return -ENOMEM;
+
+ /* clear them */
+ for (dat_idx = 0; dat_idx < hci->DAT_entries; dat_idx++) {
+ dat_w0_write(dat_idx, 0);
+ dat_w1_write(dat_idx, 0);
+ }
+
+ return 0;
+}
+
+static void hci_dat_v1_cleanup(struct i3c_hci *hci)
+{
+ bitmap_free(hci->DAT_data);
+ hci->DAT_data = NULL;
+}
+
+static int hci_dat_v1_alloc_entry(struct i3c_hci *hci)
+{
+ unsigned int dat_idx;
+
+ dat_idx = find_first_zero_bit(hci->DAT_data, hci->DAT_entries);
+ if (dat_idx >= hci->DAT_entries)
+ return -ENOENT;
+ __set_bit(dat_idx, hci->DAT_data);
+
+ /* default flags */
+ dat_w0_write(dat_idx, DAT_0_SIR_REJECT | DAT_0_MR_REJECT);
+
+ return dat_idx;
+}
+
+static void hci_dat_v1_free_entry(struct i3c_hci *hci, unsigned int dat_idx)
+{
+ dat_w0_write(dat_idx, 0);
+ dat_w1_write(dat_idx, 0);
+ __clear_bit(dat_idx, hci->DAT_data);
+}
+
+static void hci_dat_v1_set_dynamic_addr(struct i3c_hci *hci,
+ unsigned int dat_idx, u8 address)
+{
+ u32 dat_w0;
+
+ dat_w0 = dat_w0_read(dat_idx);
+ dat_w0 &= ~(DAT_0_DYNAMIC_ADDRESS | DAT_0_DYNADDR_PARITY);
+ dat_w0 |= FIELD_PREP(DAT_0_DYNAMIC_ADDRESS, address) |
+ (dynaddr_parity(address) ? DAT_0_DYNADDR_PARITY : 0);
+ dat_w0_write(dat_idx, dat_w0);
+}
+
+static void hci_dat_v1_set_static_addr(struct i3c_hci *hci,
+ unsigned int dat_idx, u8 address)
+{
+ u32 dat_w0;
+
+ dat_w0 = dat_w0_read(dat_idx);
+ dat_w0 &= ~DAT_0_STATIC_ADDRESS;
+ dat_w0 |= FIELD_PREP(DAT_0_STATIC_ADDRESS, address);
+ dat_w0_write(dat_idx, dat_w0);
+}
+
+static void hci_dat_v1_set_flags(struct i3c_hci *hci, unsigned int dat_idx,
+ u32 w0_flags, u32 w1_flags)
+{
+ u32 dat_w0, dat_w1;
+
+ dat_w0 = dat_w0_read(dat_idx);
+ dat_w1 = dat_w1_read(dat_idx);
+ dat_w0 |= w0_flags;
+ dat_w1 |= w1_flags;
+ dat_w0_write(dat_idx, dat_w0);
+ dat_w1_write(dat_idx, dat_w1);
+}
+
+static void hci_dat_v1_clear_flags(struct i3c_hci *hci, unsigned int dat_idx,
+ u32 w0_flags, u32 w1_flags)
+{
+ u32 dat_w0, dat_w1;
+
+ dat_w0 = dat_w0_read(dat_idx);
+ dat_w1 = dat_w1_read(dat_idx);
+ dat_w0 &= ~w0_flags;
+ dat_w1 &= ~w1_flags;
+ dat_w0_write(dat_idx, dat_w0);
+ dat_w1_write(dat_idx, dat_w1);
+}
+
+static int hci_dat_v1_get_index(struct i3c_hci *hci, u8 dev_addr)
+{
+ unsigned int dat_idx;
+ u32 dat_w0;
+
+ for (dat_idx = find_first_bit(hci->DAT_data, hci->DAT_entries);
+ dat_idx < hci->DAT_entries;
+ dat_idx = find_next_bit(hci->DAT_data, hci->DAT_entries, dat_idx)) {
+ dat_w0 = dat_w0_read(dat_idx);
+ if (FIELD_GET(DAT_0_DYNAMIC_ADDRESS, dat_w0) == dev_addr)
+ return dat_idx;
+ }
+
+ return -ENODEV;
+}
+
+const struct hci_dat_ops mipi_i3c_hci_dat_v1 = {
+ .init = hci_dat_v1_init,
+ .cleanup = hci_dat_v1_cleanup,
+ .alloc_entry = hci_dat_v1_alloc_entry,
+ .free_entry = hci_dat_v1_free_entry,
+ .set_dynamic_addr = hci_dat_v1_set_dynamic_addr,
+ .set_static_addr = hci_dat_v1_set_static_addr,
+ .set_flags = hci_dat_v1_set_flags,
+ .clear_flags = hci_dat_v1_clear_flags,
+ .get_index = hci_dat_v1_get_index,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/dct.h b/drivers/i3c/master/mipi-i3c-hci/dct.h
new file mode 100644
index 000000000000..1028e0b40d89
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dct.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common DCT related stuff
+ */
+
+#ifndef DCT_H
+#define DCT_H
+
+void i3c_hci_dct_get_val(struct i3c_hci *hci, unsigned int dct_idx,
+ u64 *pid, unsigned int *dcr, unsigned int *bcr);
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/dct_v1.c b/drivers/i3c/master/mipi-i3c-hci/dct_v1.c
new file mode 100644
index 000000000000..acfd4d60f7b0
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dct_v1.c
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ */
+
+#include <linux/device.h>
+#include <linux/bitfield.h>
+#include <linux/i3c/master.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "dct.h"
+
+/*
+ * Device Characteristic Table
+ */
+
+void i3c_hci_dct_get_val(struct i3c_hci *hci, unsigned int dct_idx,
+ u64 *pid, unsigned int *dcr, unsigned int *bcr)
+{
+ void __iomem *reg = hci->DCT_regs + dct_idx * 4 * 4;
+ u32 dct_entry_data[4];
+ unsigned int i;
+
+ for (i = 0; i < 4; i++) {
+ dct_entry_data[i] = readl(reg);
+ reg += 4;
+ }
+
+ *pid = ((u64)dct_entry_data[0]) << (47 - 32 + 1) |
+ FIELD_GET(W1_MASK(47, 32), dct_entry_data[1]);
+ *dcr = FIELD_GET(W2_MASK(71, 64), dct_entry_data[2]);
+ *bcr = FIELD_GET(W2_MASK(79, 72), dct_entry_data[2]);
+}
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
new file mode 100644
index 000000000000..af873a9be050
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -0,0 +1,784 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Note: The I3C HCI v2.0 spec is still in flux. The IBI support is based on
+ * v1.x of the spec and v2.0 will likely be split out.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "cmd.h"
+#include "ibi.h"
+
+
+/*
+ * Software Parameter Values (somewhat arb itrary for now).
+ * Some of them could be determined at run time eventually.
+ */
+
+#define XFER_RINGS 1 /* max: 8 */
+#define XFER_RING_ENTRIES 16 /* max: 255 */
+
+#define IBI_RINGS 1 /* max: 8 */
+#define IBI_STATUS_RING_ENTRIES 32 /* max: 255 */
+#define IBI_CHUNK_CACHELINES 1 /* max: 256 bytes equivalent */
+#define IBI_CHUNK_POOL_SIZE 128 /* max: 1023 */
+
+/*
+ * Ring Header Preamble
+ */
+
+#define rhs_reg_read(r) readl(hci->RHS_regs + (RHS_##r))
+#define rhs_reg_write(r, v) writel(v, hci->RHS_regs + (RHS_##r))
+
+#define RHS_CONTROL 0x00
+#define PREAMBLE_SIZE GENMASK(31, 24) /* Preamble Section Size */
+#define HEADER_SIZE GENMASK(23, 16) /* Ring Header Size */
+#define MAX_HEADER_COUNT_CAP GENMASK(7, 4) /* HC Max Header Count */
+#define MAX_HEADER_COUNT GENMASK(3, 0) /* Driver Max Header Count */
+
+#define RHS_RHn_OFFSET(n) (0x04 + (n)*4)
+
+/*
+ * Ring Header (Per-Ring Bundle)
+ */
+
+#define rh_reg_read(r) readl(rh->regs + (RH_##r))
+#define rh_reg_write(r, v) writel(v, rh->regs + (RH_##r))
+
+#define RH_CR_SETUP 0x00 /* Command/Response Ring */
+#define CR_XFER_STRUCT_SIZE GENMASK(31, 24)
+#define CR_RESP_STRUCT_SIZE GENMASK(23, 16)
+#define CR_RING_SIZE GENMASK(8, 0)
+
+#define RH_IBI_SETUP 0x04
+#define IBI_STATUS_STRUCT_SIZE GENMASK(31, 24)
+#define IBI_STATUS_RING_SIZE GENMASK(23, 16)
+#define IBI_DATA_CHUNK_SIZE GENMASK(12, 10)
+#define IBI_DATA_CHUNK_COUNT GENMASK(9, 0)
+
+#define RH_CHUNK_CONTROL 0x08
+
+#define RH_INTR_STATUS 0x10
+#define RH_INTR_STATUS_ENABLE 0x14
+#define RH_INTR_SIGNAL_ENABLE 0x18
+#define RH_INTR_FORCE 0x1c
+#define INTR_IBI_READY BIT(12)
+#define INTR_TRANSFER_COMPLETION BIT(11)
+#define INTR_RING_OP BIT(10)
+#define INTR_TRANSFER_ERR BIT(9)
+#define INTR_WARN_INS_STOP_MODE BIT(7)
+#define INTR_IBI_RING_FULL BIT(6)
+#define INTR_TRANSFER_ABORT BIT(5)
+
+#define RH_RING_STATUS 0x20
+#define RING_STATUS_LOCKED BIT(3)
+#define RING_STATUS_ABORTED BIT(2)
+#define RING_STATUS_RUNNING BIT(1)
+#define RING_STATUS_ENABLED BIT(0)
+
+#define RH_RING_CONTROL 0x24
+#define RING_CTRL_ABORT BIT(2)
+#define RING_CTRL_RUN_STOP BIT(1)
+#define RING_CTRL_ENABLE BIT(0)
+
+#define RH_RING_OPERATION1 0x28
+#define RING_OP1_IBI_DEQ_PTR GENMASK(23, 16)
+#define RING_OP1_CR_SW_DEQ_PTR GENMASK(15, 8)
+#define RING_OP1_CR_ENQ_PTR GENMASK(7, 0)
+
+#define RH_RING_OPERATION2 0x2c
+#define RING_OP2_IBI_ENQ_PTR GENMASK(23, 16)
+#define RING_OP2_CR_DEQ_PTR GENMASK(7, 0)
+
+#define RH_CMD_RING_BASE_LO 0x30
+#define RH_CMD_RING_BASE_HI 0x34
+#define RH_RESP_RING_BASE_LO 0x38
+#define RH_RESP_RING_BASE_HI 0x3c
+#define RH_IBI_STATUS_RING_BASE_LO 0x40
+#define RH_IBI_STATUS_RING_BASE_HI 0x44
+#define RH_IBI_DATA_RING_BASE_LO 0x48
+#define RH_IBI_DATA_RING_BASE_HI 0x4c
+
+#define RH_CMD_RING_SG 0x50 /* Ring Scatter Gather Support */
+#define RH_RESP_RING_SG 0x54
+#define RH_IBI_STATUS_RING_SG 0x58
+#define RH_IBI_DATA_RING_SG 0x5c
+#define RING_SG_BLP BIT(31) /* Buffer Vs. List Pointer */
+#define RING_SG_LIST_SIZE GENMASK(15, 0)
+
+/*
+ * Data Buffer Descriptor (in memory)
+ */
+
+#define DATA_BUF_BLP BIT(31) /* Buffer Vs. List Pointer */
+#define DATA_BUF_IOC BIT(30) /* Interrupt on Completion */
+#define DATA_BUF_BLOCK_SIZE GENMASK(15, 0)
+
+
+struct hci_rh_data {
+ void __iomem *regs;
+ void *xfer, *resp, *ibi_status, *ibi_data;
+ dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma;
+ unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total;
+ unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz;
+ unsigned int done_ptr, ibi_chunk_ptr;
+ struct hci_xfer **src_xfers;
+ spinlock_t lock;
+ struct completion op_done;
+};
+
+struct hci_rings_data {
+ unsigned int total;
+ struct hci_rh_data headers[];
+};
+
+struct hci_dma_dev_ibi_data {
+ struct i3c_generic_ibi_pool *pool;
+ unsigned int max_len;
+};
+
+static inline u32 lo32(dma_addr_t physaddr)
+{
+ return physaddr;
+}
+
+static inline u32 hi32(dma_addr_t physaddr)
+{
+ /* trickery to avoid compiler warnings on 32-bit build targets */
+ if (sizeof(dma_addr_t) > 4) {
+ u64 hi = physaddr;
+ return hi >> 32;
+ }
+ return 0;
+}
+
+static void hci_dma_cleanup(struct i3c_hci *hci)
+{
+ struct hci_rings_data *rings = hci->io_data;
+ struct hci_rh_data *rh;
+ unsigned int i;
+
+ if (!rings)
+ return;
+
+ for (i = 0; i < rings->total; i++) {
+ rh = &rings->headers[i];
+
+ rh_reg_write(RING_CONTROL, 0);
+ rh_reg_write(CR_SETUP, 0);
+ rh_reg_write(IBI_SETUP, 0);
+ rh_reg_write(INTR_SIGNAL_ENABLE, 0);
+
+ if (rh->xfer)
+ dma_free_coherent(&hci->master.dev,
+ rh->xfer_struct_sz * rh->xfer_entries,
+ rh->xfer, rh->xfer_dma);
+ if (rh->resp)
+ dma_free_coherent(&hci->master.dev,
+ rh->resp_struct_sz * rh->xfer_entries,
+ rh->resp, rh->resp_dma);
+ kfree(rh->src_xfers);
+ if (rh->ibi_status)
+ dma_free_coherent(&hci->master.dev,
+ rh->ibi_status_sz * rh->ibi_status_entries,
+ rh->ibi_status, rh->ibi_status_dma);
+ if (rh->ibi_data_dma)
+ dma_unmap_single(&hci->master.dev, rh->ibi_data_dma,
+ rh->ibi_chunk_sz * rh->ibi_chunks_total,
+ DMA_FROM_DEVICE);
+ kfree(rh->ibi_data);
+ }
+
+ rhs_reg_write(CONTROL, 0);
+
+ kfree(rings);
+ hci->io_data = NULL;
+}
+
+static int hci_dma_init(struct i3c_hci *hci)
+{
+ struct hci_rings_data *rings;
+ struct hci_rh_data *rh;
+ u32 regval;
+ unsigned int i, nr_rings, xfers_sz, resps_sz;
+ unsigned int ibi_status_ring_sz, ibi_data_ring_sz;
+ int ret;
+
+ regval = rhs_reg_read(CONTROL);
+ nr_rings = FIELD_GET(MAX_HEADER_COUNT_CAP, regval);
+ dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings);
+ if (unlikely(nr_rings > 8)) {
+ dev_err(&hci->master.dev, "number of rings should be <= 8\n");
+ nr_rings = 8;
+ }
+ if (nr_rings > XFER_RINGS)
+ nr_rings = XFER_RINGS;
+ rings = kzalloc(sizeof(*rings) + nr_rings * sizeof(*rh), GFP_KERNEL);
+ if (!rings)
+ return -ENOMEM;
+ hci->io_data = rings;
+ rings->total = nr_rings;
+
+ for (i = 0; i < rings->total; i++) {
+ u32 offset = rhs_reg_read(RHn_OFFSET(i));
+
+ dev_info(&hci->master.dev, "Ring %d at offset %#x\n", i, offset);
+ ret = -EINVAL;
+ if (!offset)
+ goto err_out;
+ rh = &rings->headers[i];
+ rh->regs = hci->base_regs + offset;
+ spin_lock_init(&rh->lock);
+ init_completion(&rh->op_done);
+
+ rh->xfer_entries = XFER_RING_ENTRIES;
+
+ regval = rh_reg_read(CR_SETUP);
+ rh->xfer_struct_sz = FIELD_GET(CR_XFER_STRUCT_SIZE, regval);
+ rh->resp_struct_sz = FIELD_GET(CR_RESP_STRUCT_SIZE, regval);
+ DBG("xfer_struct_sz = %d, resp_struct_sz = %d",
+ rh->xfer_struct_sz, rh->resp_struct_sz);
+ xfers_sz = rh->xfer_struct_sz * rh->xfer_entries;
+ resps_sz = rh->resp_struct_sz * rh->xfer_entries;
+
+ rh->xfer = dma_alloc_coherent(&hci->master.dev, xfers_sz,
+ &rh->xfer_dma, GFP_KERNEL);
+ rh->resp = dma_alloc_coherent(&hci->master.dev, resps_sz,
+ &rh->resp_dma, GFP_KERNEL);
+ rh->src_xfers =
+ kmalloc_array(rh->xfer_entries, sizeof(*rh->src_xfers),
+ GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!rh->xfer || !rh->resp || !rh->src_xfers)
+ goto err_out;
+
+ rh_reg_write(CMD_RING_BASE_LO, lo32(rh->xfer_dma));
+ rh_reg_write(CMD_RING_BASE_HI, hi32(rh->xfer_dma));
+ rh_reg_write(RESP_RING_BASE_LO, lo32(rh->resp_dma));
+ rh_reg_write(RESP_RING_BASE_HI, hi32(rh->resp_dma));
+
+ regval = FIELD_PREP(CR_RING_SIZE, rh->xfer_entries);
+ rh_reg_write(CR_SETUP, regval);
+
+ rh_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+ rh_reg_write(INTR_SIGNAL_ENABLE, INTR_IBI_READY |
+ INTR_TRANSFER_COMPLETION |
+ INTR_RING_OP |
+ INTR_TRANSFER_ERR |
+ INTR_WARN_INS_STOP_MODE |
+ INTR_IBI_RING_FULL |
+ INTR_TRANSFER_ABORT);
+
+ /* IBIs */
+
+ if (i >= IBI_RINGS)
+ goto ring_ready;
+
+ regval = rh_reg_read(IBI_SETUP);
+ rh->ibi_status_sz = FIELD_GET(IBI_STATUS_STRUCT_SIZE, regval);
+ rh->ibi_status_entries = IBI_STATUS_RING_ENTRIES;
+ rh->ibi_chunks_total = IBI_CHUNK_POOL_SIZE;
+
+ rh->ibi_chunk_sz = dma_get_cache_alignment();
+ rh->ibi_chunk_sz *= IBI_CHUNK_CACHELINES;
+ BUG_ON(rh->ibi_chunk_sz > 256);
+
+ ibi_status_ring_sz = rh->ibi_status_sz * rh->ibi_status_entries;
+ ibi_data_ring_sz = rh->ibi_chunk_sz * rh->ibi_chunks_total;
+
+ rh->ibi_status =
+ dma_alloc_coherent(&hci->master.dev, ibi_status_ring_sz,
+ &rh->ibi_status_dma, GFP_KERNEL);
+ rh->ibi_data = kmalloc(ibi_data_ring_sz, GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!rh->ibi_status || !rh->ibi_data)
+ goto err_out;
+ rh->ibi_data_dma =
+ dma_map_single(&hci->master.dev, rh->ibi_data,
+ ibi_data_ring_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&hci->master.dev, rh->ibi_data_dma)) {
+ rh->ibi_data_dma = 0;
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ regval = FIELD_PREP(IBI_STATUS_RING_SIZE,
+ rh->ibi_status_entries) |
+ FIELD_PREP(IBI_DATA_CHUNK_SIZE,
+ ilog2(rh->ibi_chunk_sz) - 2) |
+ FIELD_PREP(IBI_DATA_CHUNK_COUNT,
+ rh->ibi_chunks_total);
+ rh_reg_write(IBI_SETUP, regval);
+
+ regval = rh_reg_read(INTR_SIGNAL_ENABLE);
+ regval |= INTR_IBI_READY;
+ rh_reg_write(INTR_SIGNAL_ENABLE, regval);
+
+ring_ready:
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+ }
+
+ regval = FIELD_PREP(MAX_HEADER_COUNT, rings->total);
+ rhs_reg_write(CONTROL, regval);
+ return 0;
+
+err_out:
+ hci_dma_cleanup(hci);
+ return ret;
+}
+
+static void hci_dma_unmap_xfer(struct i3c_hci *hci,
+ struct hci_xfer *xfer_list, unsigned int n)
+{
+ struct hci_xfer *xfer;
+ unsigned int i;
+
+ for (i = 0; i < n; i++) {
+ xfer = xfer_list + i;
+ dma_unmap_single(&hci->master.dev,
+ xfer->data_dma, xfer->data_len,
+ xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ }
+}
+
+static int hci_dma_queue_xfer(struct i3c_hci *hci,
+ struct hci_xfer *xfer_list, int n)
+{
+ struct hci_rings_data *rings = hci->io_data;
+ struct hci_rh_data *rh;
+ unsigned int i, ring, enqueue_ptr;
+ u32 op1_val, op2_val;
+
+ /* For now we only use ring 0 */
+ ring = 0;
+ rh = &rings->headers[ring];
+
+ op1_val = rh_reg_read(RING_OPERATION1);
+ enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val);
+ for (i = 0; i < n; i++) {
+ struct hci_xfer *xfer = xfer_list + i;
+ u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
+
+ /* store cmd descriptor */
+ *ring_data++ = xfer->cmd_desc[0];
+ *ring_data++ = xfer->cmd_desc[1];
+ if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
+ *ring_data++ = xfer->cmd_desc[2];
+ *ring_data++ = xfer->cmd_desc[3];
+ }
+
+ /* first word of Data Buffer Descriptor Structure */
+ if (!xfer->data)
+ xfer->data_len = 0;
+ *ring_data++ =
+ FIELD_PREP(DATA_BUF_BLOCK_SIZE, xfer->data_len) |
+ ((i == n - 1) ? DATA_BUF_IOC : 0);
+
+ /* 2nd and 3rd words of Data Buffer Descriptor Structure */
+ if (xfer->data) {
+ xfer->data_dma =
+ dma_map_single(&hci->master.dev,
+ xfer->data,
+ xfer->data_len,
+ xfer->rnw ?
+ DMA_FROM_DEVICE :
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&hci->master.dev,
+ xfer->data_dma)) {
+ hci_dma_unmap_xfer(hci, xfer_list, i);
+ return -ENOMEM;
+ }
+ *ring_data++ = lo32(xfer->data_dma);
+ *ring_data++ = hi32(xfer->data_dma);
+ } else {
+ *ring_data++ = 0;
+ *ring_data++ = 0;
+ }
+
+ /* remember corresponding xfer struct */
+ rh->src_xfers[enqueue_ptr] = xfer;
+ /* remember corresponding ring/entry for this xfer structure */
+ xfer->ring_number = ring;
+ xfer->ring_entry = enqueue_ptr;
+
+ enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries;
+
+ /*
+ * We may update the hardware view of the enqueue pointer
+ * only if we didn't reach its dequeue pointer.
+ */
+ op2_val = rh_reg_read(RING_OPERATION2);
+ if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) {
+ /* the ring is full */
+ hci_dma_unmap_xfer(hci, xfer_list, i + 1);
+ return -EBUSY;
+ }
+ }
+
+ /* take care to update the hardware enqueue pointer atomically */
+ spin_lock_irq(&rh->lock);
+ op1_val = rh_reg_read(RING_OPERATION1);
+ op1_val &= ~RING_OP1_CR_ENQ_PTR;
+ op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
+ rh_reg_write(RING_OPERATION1, op1_val);
+ spin_unlock_irq(&rh->lock);
+
+ return 0;
+}
+
+static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
+ struct hci_xfer *xfer_list, int n)
+{
+ struct hci_rings_data *rings = hci->io_data;
+ struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number];
+ unsigned int i;
+ bool did_unqueue = false;
+
+ /* stop the ring */
+ rh_reg_write(RING_CONTROL, RING_CTRL_ABORT);
+ if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) {
+ /*
+ * We're deep in it if ever this condition is ever met.
+ * Hardware might still be writing to memory, etc.
+ * Better suspend the world than risking silent corruption.
+ */
+ dev_crit(&hci->master.dev, "unable to abort the ring\n");
+ BUG();
+ }
+
+ for (i = 0; i < n; i++) {
+ struct hci_xfer *xfer = xfer_list + i;
+ int idx = xfer->ring_entry;
+
+ /*
+ * At the time the abort happened, the xfer might have
+ * completed already. If not then replace corresponding
+ * descriptor entries with a no-op.
+ */
+ if (idx >= 0) {
+ u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx;
+
+ /* store no-op cmd descriptor */
+ *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7);
+ *ring_data++ = 0;
+ if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
+ *ring_data++ = 0;
+ *ring_data++ = 0;
+ }
+
+ /* disassociate this xfer struct */
+ rh->src_xfers[idx] = NULL;
+
+ /* and unmap it */
+ hci_dma_unmap_xfer(hci, xfer, 1);
+
+ did_unqueue = true;
+ }
+ }
+
+ /* restart the ring */
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+
+ return did_unqueue;
+}
+
+static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
+{
+ u32 op1_val, op2_val, resp, *ring_resp;
+ unsigned int tid, done_ptr = rh->done_ptr;
+ struct hci_xfer *xfer;
+
+ for (;;) {
+ op2_val = rh_reg_read(RING_OPERATION2);
+ if (done_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val))
+ break;
+
+ ring_resp = rh->resp + rh->resp_struct_sz * done_ptr;
+ resp = *ring_resp;
+ tid = RESP_TID(resp);
+ DBG("resp = 0x%08x", resp);
+
+ xfer = rh->src_xfers[done_ptr];
+ if (!xfer) {
+ DBG("orphaned ring entry");
+ } else {
+ hci_dma_unmap_xfer(hci, xfer, 1);
+ xfer->ring_entry = -1;
+ xfer->response = resp;
+ if (tid != xfer->cmd_tid) {
+ dev_err(&hci->master.dev,
+ "response tid=%d when expecting %d\n",
+ tid, xfer->cmd_tid);
+ /* TODO: do something about it? */
+ }
+ if (xfer->completion)
+ complete(xfer->completion);
+ }
+
+ done_ptr = (done_ptr + 1) % rh->xfer_entries;
+ rh->done_ptr = done_ptr;
+ }
+
+ /* take care to update the software dequeue pointer atomically */
+ spin_lock(&rh->lock);
+ op1_val = rh_reg_read(RING_OPERATION1);
+ op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
+ op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);
+ rh_reg_write(RING_OPERATION1, op1_val);
+ spin_unlock(&rh->lock);
+}
+
+static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct i3c_generic_ibi_pool *pool;
+ struct hci_dma_dev_ibi_data *dev_ibi;
+
+ dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
+ if (!dev_ibi)
+ return -ENOMEM;
+ pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(pool)) {
+ kfree(dev_ibi);
+ return PTR_ERR(pool);
+ }
+ dev_ibi->pool = pool;
+ dev_ibi->max_len = req->max_payload_len;
+ dev_data->ibi_data = dev_ibi;
+ return 0;
+}
+
+static void hci_dma_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
+
+ dev_data->ibi_data = NULL;
+ i3c_generic_ibi_free_pool(dev_ibi->pool);
+ kfree(dev_ibi);
+}
+
+static void hci_dma_recycle_ibi_slot(struct i3c_hci *hci,
+ struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct hci_dma_dev_ibi_data *dev_ibi = dev_data->ibi_data;
+
+ i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
+}
+
+static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
+{
+ struct i3c_dev_desc *dev;
+ struct i3c_hci_dev_data *dev_data;
+ struct hci_dma_dev_ibi_data *dev_ibi;
+ struct i3c_ibi_slot *slot;
+ u32 op1_val, op2_val, ibi_status_error;
+ unsigned int ptr, enq_ptr, deq_ptr;
+ unsigned int ibi_size, ibi_chunks, ibi_data_offset, first_part;
+ int ibi_addr, last_ptr;
+ void *ring_ibi_data;
+ dma_addr_t ring_ibi_data_dma;
+
+ op1_val = rh_reg_read(RING_OPERATION1);
+ deq_ptr = FIELD_GET(RING_OP1_IBI_DEQ_PTR, op1_val);
+
+ op2_val = rh_reg_read(RING_OPERATION2);
+ enq_ptr = FIELD_GET(RING_OP2_IBI_ENQ_PTR, op2_val);
+
+ ibi_status_error = 0;
+ ibi_addr = -1;
+ ibi_chunks = 0;
+ ibi_size = 0;
+ last_ptr = -1;
+
+ /* let's find all we can about this IBI */
+ for (ptr = deq_ptr; ptr != enq_ptr;
+ ptr = (ptr + 1) % rh->ibi_status_entries) {
+ u32 ibi_status, *ring_ibi_status;
+ unsigned int chunks;
+
+ ring_ibi_status = rh->ibi_status + rh->ibi_status_sz * ptr;
+ ibi_status = *ring_ibi_status;
+ DBG("status = %#x", ibi_status);
+
+ if (ibi_status_error) {
+ /* we no longer care */
+ } else if (ibi_status & IBI_ERROR) {
+ ibi_status_error = ibi_status;
+ } else if (ibi_addr == -1) {
+ ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
+ } else if (ibi_addr != FIELD_GET(IBI_TARGET_ADDR, ibi_status)) {
+ /* the address changed unexpectedly */
+ ibi_status_error = ibi_status;
+ }
+
+ chunks = FIELD_GET(IBI_CHUNKS, ibi_status);
+ ibi_chunks += chunks;
+ if (!(ibi_status & IBI_LAST_STATUS)) {
+ ibi_size += chunks * rh->ibi_chunk_sz;
+ } else {
+ ibi_size += FIELD_GET(IBI_DATA_LENGTH, ibi_status);
+ last_ptr = ptr;
+ break;
+ }
+ }
+
+ /* validate what we've got */
+
+ if (last_ptr == -1) {
+ /* this IBI sequence is not yet complete */
+ DBG("no LAST_STATUS available (e=%d d=%d)", enq_ptr, deq_ptr);
+ return;
+ }
+ deq_ptr = last_ptr + 1;
+ deq_ptr %= rh->ibi_status_entries;
+
+ if (ibi_status_error) {
+ dev_err(&hci->master.dev, "IBI error from %#x\n", ibi_addr);
+ goto done;
+ }
+
+ /* determine who this is for */
+ dev = i3c_hci_addr_to_dev(hci, ibi_addr);
+ if (!dev) {
+ dev_err(&hci->master.dev,
+ "IBI for unknown device %#x\n", ibi_addr);
+ goto done;
+ }
+
+ dev_data = i3c_dev_get_master_data(dev);
+ dev_ibi = dev_data->ibi_data;
+ if (ibi_size > dev_ibi->max_len) {
+ dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
+ ibi_size, dev_ibi->max_len);
+ goto done;
+ }
+
+ /*
+ * This ring model is not suitable for zero-copy processing of IBIs.
+ * We have the data chunk ring wrap-around to deal with, meaning
+ * that the payload might span multiple chunks beginning at the
+ * end of the ring and wrap to the start of the ring. Furthermore
+ * there is no guarantee that those chunks will be released in order
+ * and in a timely manner by the upper driver. So let's just copy
+ * them to a discrete buffer. In practice they're supposed to be
+ * small anyway.
+ */
+ slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
+ if (!slot) {
+ dev_err(&hci->master.dev, "no free slot for IBI\n");
+ goto done;
+ }
+
+ /* copy first part of the payload */
+ ibi_data_offset = rh->ibi_chunk_sz * rh->ibi_chunk_ptr;
+ ring_ibi_data = rh->ibi_data + ibi_data_offset;
+ ring_ibi_data_dma = rh->ibi_data_dma + ibi_data_offset;
+ first_part = (rh->ibi_chunks_total - rh->ibi_chunk_ptr)
+ * rh->ibi_chunk_sz;
+ if (first_part > ibi_size)
+ first_part = ibi_size;
+ dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ first_part, DMA_FROM_DEVICE);
+ memcpy(slot->data, ring_ibi_data, first_part);
+
+ /* copy second part if any */
+ if (ibi_size > first_part) {
+ /* we wrap back to the start and copy remaining data */
+ ring_ibi_data = rh->ibi_data;
+ ring_ibi_data_dma = rh->ibi_data_dma;
+ dma_sync_single_for_cpu(&hci->master.dev, ring_ibi_data_dma,
+ ibi_size - first_part, DMA_FROM_DEVICE);
+ memcpy(slot->data + first_part, ring_ibi_data,
+ ibi_size - first_part);
+ }
+
+ /* submit it */
+ slot->dev = dev;
+ slot->len = ibi_size;
+ i3c_master_queue_ibi(dev, slot);
+
+done:
+ /* take care to update the ibi dequeue pointer atomically */
+ spin_lock(&rh->lock);
+ op1_val = rh_reg_read(RING_OPERATION1);
+ op1_val &= ~RING_OP1_IBI_DEQ_PTR;
+ op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
+ rh_reg_write(RING_OPERATION1, op1_val);
+ spin_unlock(&rh->lock);
+
+ /* update the chunk pointer */
+ rh->ibi_chunk_ptr += ibi_chunks;
+ rh->ibi_chunk_ptr %= rh->ibi_chunks_total;
+
+ /* and tell the hardware about freed chunks */
+ rh_reg_write(CHUNK_CONTROL, rh_reg_read(CHUNK_CONTROL) + ibi_chunks);
+}
+
+static bool hci_dma_irq_handler(struct i3c_hci *hci, unsigned int mask)
+{
+ struct hci_rings_data *rings = hci->io_data;
+ unsigned int i;
+ bool handled = false;
+
+ for (i = 0; mask && i < 8; i++) {
+ struct hci_rh_data *rh;
+ u32 status;
+
+ if (!(mask & BIT(i)))
+ continue;
+ mask &= ~BIT(i);
+
+ rh = &rings->headers[i];
+ status = rh_reg_read(INTR_STATUS);
+ DBG("rh%d status: %#x", i, status);
+ if (!status)
+ continue;
+ rh_reg_write(INTR_STATUS, status);
+
+ if (status & INTR_IBI_READY)
+ hci_dma_process_ibi(hci, rh);
+ if (status & (INTR_TRANSFER_COMPLETION | INTR_TRANSFER_ERR))
+ hci_dma_xfer_done(hci, rh);
+ if (status & INTR_RING_OP)
+ complete(&rh->op_done);
+
+ if (status & INTR_TRANSFER_ABORT)
+ dev_notice_ratelimited(&hci->master.dev,
+ "ring %d: Transfer Aborted\n", i);
+ if (status & INTR_WARN_INS_STOP_MODE)
+ dev_warn_ratelimited(&hci->master.dev,
+ "ring %d: Inserted Stop on Mode Change\n", i);
+ if (status & INTR_IBI_RING_FULL)
+ dev_err_ratelimited(&hci->master.dev,
+ "ring %d: IBI Ring Full Condition\n", i);
+
+ handled = true;
+ }
+
+ return handled;
+}
+
+const struct hci_io_ops mipi_i3c_hci_dma = {
+ .init = hci_dma_init,
+ .cleanup = hci_dma_cleanup,
+ .queue_xfer = hci_dma_queue_xfer,
+ .dequeue_xfer = hci_dma_dequeue_xfer,
+ .irq_handler = hci_dma_irq_handler,
+ .request_ibi = hci_dma_request_ibi,
+ .free_ibi = hci_dma_free_ibi,
+ .recycle_ibi_slot = hci_dma_recycle_ibi_slot,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/ext_caps.c b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
new file mode 100644
index 000000000000..2e9b23efdc45
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/ext_caps.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "ext_caps.h"
+#include "xfer_mode_rate.h"
+
+
+/* Extended Capability Header */
+#define CAP_HEADER_LENGTH GENMASK(23, 8)
+#define CAP_HEADER_ID GENMASK(7, 0)
+
+static int hci_extcap_hardware_id(struct i3c_hci *hci, void __iomem *base)
+{
+ hci->vendor_mipi_id = readl(base + 0x04);
+ hci->vendor_version_id = readl(base + 0x08);
+ hci->vendor_product_id = readl(base + 0x0c);
+
+ dev_info(&hci->master.dev, "vendor MIPI ID: %#x\n", hci->vendor_mipi_id);
+ dev_info(&hci->master.dev, "vendor version ID: %#x\n", hci->vendor_version_id);
+ dev_info(&hci->master.dev, "vendor product ID: %#x\n", hci->vendor_product_id);
+
+ /* ought to go in a table if this grows too much */
+ switch (hci->vendor_mipi_id) {
+ case MIPI_VENDOR_NXP:
+ hci->quirks |= HCI_QUIRK_RAW_CCC;
+ DBG("raw CCC quirks set");
+ break;
+ }
+
+ return 0;
+}
+
+static int hci_extcap_master_config(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 master_config = readl(base + 0x04);
+ unsigned int operation_mode = FIELD_GET(GENMASK(5, 4), master_config);
+ static const char * const functionality[] = {
+ "(unknown)", "master only", "target only",
+ "primary/secondary master" };
+ dev_info(&hci->master.dev, "operation mode: %s\n", functionality[operation_mode]);
+ if (operation_mode & 0x1)
+ return 0;
+ dev_err(&hci->master.dev, "only master mode is currently supported\n");
+ return -EOPNOTSUPP;
+}
+
+static int hci_extcap_multi_bus(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 bus_instance = readl(base + 0x04);
+ unsigned int count = FIELD_GET(GENMASK(3, 0), bus_instance);
+
+ dev_info(&hci->master.dev, "%d bus instances\n", count);
+ return 0;
+}
+
+static int hci_extcap_xfer_modes(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 header = readl(base);
+ u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
+ unsigned int index;
+
+ dev_info(&hci->master.dev, "transfer mode table has %d entries\n",
+ entries);
+ base += 4; /* skip header */
+ for (index = 0; index < entries; index++) {
+ u32 mode_entry = readl(base);
+
+ DBG("mode %d: 0x%08x", index, mode_entry);
+ /* TODO: will be needed when I3C core does more than SDR */
+ base += 4;
+ }
+
+ return 0;
+}
+
+static int hci_extcap_xfer_rates(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 header = readl(base);
+ u32 entries = FIELD_GET(CAP_HEADER_LENGTH, header) - 1;
+ u32 rate_entry;
+ unsigned int index, rate, rate_id, mode_id;
+
+ base += 4; /* skip header */
+
+ dev_info(&hci->master.dev, "available data rates:\n");
+ for (index = 0; index < entries; index++) {
+ rate_entry = readl(base);
+ DBG("entry %d: 0x%08x", index, rate_entry);
+ rate = FIELD_GET(XFERRATE_ACTUAL_RATE_KHZ, rate_entry);
+ rate_id = FIELD_GET(XFERRATE_RATE_ID, rate_entry);
+ mode_id = FIELD_GET(XFERRATE_MODE_ID, rate_entry);
+ dev_info(&hci->master.dev, "rate %d for %s = %d kHz\n",
+ rate_id,
+ mode_id == XFERRATE_MODE_I3C ? "I3C" :
+ mode_id == XFERRATE_MODE_I2C ? "I2C" :
+ "unknown mode",
+ rate);
+ base += 4;
+ }
+
+ return 0;
+}
+
+static int hci_extcap_auto_command(struct i3c_hci *hci, void __iomem *base)
+{
+ u32 autocmd_ext_caps = readl(base + 0x04);
+ unsigned int max_count = FIELD_GET(GENMASK(3, 0), autocmd_ext_caps);
+ u32 autocmd_ext_config = readl(base + 0x08);
+ unsigned int count = FIELD_GET(GENMASK(3, 0), autocmd_ext_config);
+
+ dev_info(&hci->master.dev, "%d/%d active auto-command entries\n",
+ count, max_count);
+ /* remember auto-command register location for later use */
+ hci->AUTOCMD_regs = base;
+ return 0;
+}
+
+static int hci_extcap_debug(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "debug registers present\n");
+ hci->DEBUG_regs = base;
+ return 0;
+}
+
+static int hci_extcap_scheduled_cmd(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "scheduled commands available\n");
+ /* hci->schedcmd_regs = base; */
+ return 0;
+}
+
+static int hci_extcap_non_curr_master(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "Non-Current Master support available\n");
+ /* hci->NCM_regs = base; */
+ return 0;
+}
+
+static int hci_extcap_ccc_resp_conf(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "CCC Response Configuration available\n");
+ return 0;
+}
+
+static int hci_extcap_global_DAT(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "Global DAT available\n");
+ return 0;
+}
+
+static int hci_extcap_multilane(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "Master Multi-Lane support available\n");
+ return 0;
+}
+
+static int hci_extcap_ncm_multilane(struct i3c_hci *hci, void __iomem *base)
+{
+ dev_info(&hci->master.dev, "NCM Multi-Lane support available\n");
+ return 0;
+}
+
+struct hci_ext_caps {
+ u8 id;
+ u16 min_length;
+ int (*parser)(struct i3c_hci *hci, void __iomem *base);
+};
+
+#define EXT_CAP(_id, _highest_mandatory_reg_offset, _parser) \
+ { .id = (_id), .parser = (_parser), \
+ .min_length = (_highest_mandatory_reg_offset)/4 + 1 }
+
+static const struct hci_ext_caps ext_capabilities[] = {
+ EXT_CAP(0x01, 0x0c, hci_extcap_hardware_id),
+ EXT_CAP(0x02, 0x04, hci_extcap_master_config),
+ EXT_CAP(0x03, 0x04, hci_extcap_multi_bus),
+ EXT_CAP(0x04, 0x24, hci_extcap_xfer_modes),
+ EXT_CAP(0x05, 0x08, hci_extcap_auto_command),
+ EXT_CAP(0x08, 0x40, hci_extcap_xfer_rates),
+ EXT_CAP(0x0c, 0x10, hci_extcap_debug),
+ EXT_CAP(0x0d, 0x0c, hci_extcap_scheduled_cmd),
+ EXT_CAP(0x0e, 0x80, hci_extcap_non_curr_master), /* TODO confirm size */
+ EXT_CAP(0x0f, 0x04, hci_extcap_ccc_resp_conf),
+ EXT_CAP(0x10, 0x08, hci_extcap_global_DAT),
+ EXT_CAP(0x9d, 0x04, hci_extcap_multilane),
+ EXT_CAP(0x9e, 0x04, hci_extcap_ncm_multilane),
+};
+
+static int hci_extcap_vendor_NXP(struct i3c_hci *hci, void __iomem *base)
+{
+ hci->vendor_data = (__force void *)base;
+ dev_info(&hci->master.dev, "Build Date Info = %#x\n", readl(base + 1*4));
+ /* reset the FPGA */
+ writel(0xdeadbeef, base + 1*4);
+ return 0;
+}
+
+struct hci_ext_cap_vendor_specific {
+ u32 vendor;
+ u8 cap;
+ u16 min_length;
+ int (*parser)(struct i3c_hci *hci, void __iomem *base);
+};
+
+#define EXT_CAP_VENDOR(_vendor, _cap, _highest_mandatory_reg_offset) \
+ { .vendor = (MIPI_VENDOR_##_vendor), .cap = (_cap), \
+ .parser = (hci_extcap_vendor_##_vendor), \
+ .min_length = (_highest_mandatory_reg_offset)/4 + 1 }
+
+static const struct hci_ext_cap_vendor_specific vendor_ext_caps[] = {
+ EXT_CAP_VENDOR(NXP, 0xc0, 0x20),
+};
+
+static int hci_extcap_vendor_specific(struct i3c_hci *hci, void __iomem *base,
+ u32 cap_id, u32 cap_length)
+{
+ const struct hci_ext_cap_vendor_specific *vendor_cap_entry;
+ int i;
+
+ vendor_cap_entry = NULL;
+ for (i = 0; i < ARRAY_SIZE(vendor_ext_caps); i++) {
+ if (vendor_ext_caps[i].vendor == hci->vendor_mipi_id &&
+ vendor_ext_caps[i].cap == cap_id) {
+ vendor_cap_entry = &vendor_ext_caps[i];
+ break;
+ }
+ }
+
+ if (!vendor_cap_entry) {
+ dev_notice(&hci->master.dev,
+ "unknown ext_cap 0x%02x for vendor 0x%02x\n",
+ cap_id, hci->vendor_mipi_id);
+ return 0;
+ }
+ if (cap_length < vendor_cap_entry->min_length) {
+ dev_err(&hci->master.dev,
+ "ext_cap 0x%02x has size %d (expecting >= %d)\n",
+ cap_id, cap_length, vendor_cap_entry->min_length);
+ return -EINVAL;
+ }
+ return vendor_cap_entry->parser(hci, base);
+}
+
+int i3c_hci_parse_ext_caps(struct i3c_hci *hci)
+{
+ void __iomem *curr_cap = hci->EXTCAPS_regs;
+ void __iomem *end = curr_cap + 0x1000; /* some arbitrary limit */
+ u32 cap_header, cap_id, cap_length;
+ const struct hci_ext_caps *cap_entry;
+ int i, err = 0;
+
+ if (!curr_cap)
+ return 0;
+
+ for (; !err && curr_cap < end; curr_cap += cap_length * 4) {
+ cap_header = readl(curr_cap);
+ cap_id = FIELD_GET(CAP_HEADER_ID, cap_header);
+ cap_length = FIELD_GET(CAP_HEADER_LENGTH, cap_header);
+ DBG("id=0x%02x length=%d", cap_id, cap_length);
+ if (!cap_length)
+ break;
+ if (curr_cap + cap_length * 4 >= end) {
+ dev_err(&hci->master.dev,
+ "ext_cap 0x%02x has size %d (too big)\n",
+ cap_id, cap_length);
+ err = -EINVAL;
+ break;
+ }
+
+ if (cap_id >= 0xc0 && cap_id <= 0xcf) {
+ err = hci_extcap_vendor_specific(hci, curr_cap,
+ cap_id, cap_length);
+ continue;
+ }
+
+ cap_entry = NULL;
+ for (i = 0; i < ARRAY_SIZE(ext_capabilities); i++) {
+ if (ext_capabilities[i].id == cap_id) {
+ cap_entry = &ext_capabilities[i];
+ break;
+ }
+ }
+ if (!cap_entry) {
+ dev_notice(&hci->master.dev,
+ "unknown ext_cap 0x%02x\n", cap_id);
+ } else if (cap_length < cap_entry->min_length) {
+ dev_err(&hci->master.dev,
+ "ext_cap 0x%02x has size %d (expecting >= %d)\n",
+ cap_id, cap_length, cap_entry->min_length);
+ err = -EINVAL;
+ } else {
+ err = cap_entry->parser(hci, curr_cap);
+ }
+ }
+ return err;
+}
diff --git a/drivers/i3c/master/mipi-i3c-hci/ext_caps.h b/drivers/i3c/master/mipi-i3c-hci/ext_caps.h
new file mode 100644
index 000000000000..9df17822fdb4
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/ext_caps.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Extended Capability Definitions
+ */
+
+#ifndef EXTCAPS_H
+#define EXTCAPS_H
+
+/* MIPI vendor IDs */
+#define MIPI_VENDOR_NXP 0x11b
+
+
+int i3c_hci_parse_ext_caps(struct i3c_hci *hci);
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
new file mode 100644
index 000000000000..80beb1d5be8f
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common HCI stuff
+ */
+
+#ifndef HCI_H
+#define HCI_H
+
+
+/* Handy logging macro to save on line length */
+#define DBG(x, ...) pr_devel("%s: " x "\n", __func__, ##__VA_ARGS__)
+
+/* 32-bit word aware bit and mask macros */
+#define W0_MASK(h, l) GENMASK((h) - 0, (l) - 0)
+#define W1_MASK(h, l) GENMASK((h) - 32, (l) - 32)
+#define W2_MASK(h, l) GENMASK((h) - 64, (l) - 64)
+#define W3_MASK(h, l) GENMASK((h) - 96, (l) - 96)
+
+/* Same for single bit macros (trailing _ to align with W*_MASK width) */
+#define W0_BIT_(x) BIT((x) - 0)
+#define W1_BIT_(x) BIT((x) - 32)
+#define W2_BIT_(x) BIT((x) - 64)
+#define W3_BIT_(x) BIT((x) - 96)
+
+
+struct hci_cmd_ops;
+
+/* Our main structure */
+struct i3c_hci {
+ struct i3c_master_controller master;
+ void __iomem *base_regs;
+ void __iomem *DAT_regs;
+ void __iomem *DCT_regs;
+ void __iomem *RHS_regs;
+ void __iomem *PIO_regs;
+ void __iomem *EXTCAPS_regs;
+ void __iomem *AUTOCMD_regs;
+ void __iomem *DEBUG_regs;
+ const struct hci_io_ops *io;
+ void *io_data;
+ const struct hci_cmd_ops *cmd;
+ atomic_t next_cmd_tid;
+ u32 caps;
+ unsigned int quirks;
+ unsigned int DAT_entries;
+ unsigned int DAT_entry_size;
+ void *DAT_data;
+ unsigned int DCT_entries;
+ unsigned int DCT_entry_size;
+ u8 version_major;
+ u8 version_minor;
+ u8 revision;
+ u32 vendor_mipi_id;
+ u32 vendor_version_id;
+ u32 vendor_product_id;
+ void *vendor_data;
+};
+
+
+/*
+ * Structure to represent a master initiated transfer.
+ * The rnw, data and data_len fields must be initialized before calling any
+ * hci->cmd->*() method. The cmd method will initialize cmd_desc[] and
+ * possibly modify (clear) the data field. Then xfer->cmd_desc[0] can
+ * be augmented with CMD_0_ROC and/or CMD_0_TOC.
+ * The completion field needs to be initialized before queueing with
+ * hci->io->queue_xfer(), and requires CMD_0_ROC to be set.
+ */
+struct hci_xfer {
+ u32 cmd_desc[4];
+ u32 response;
+ bool rnw;
+ void *data;
+ unsigned int data_len;
+ unsigned int cmd_tid;
+ struct completion *completion;
+ union {
+ struct {
+ /* PIO specific */
+ struct hci_xfer *next_xfer;
+ struct hci_xfer *next_data;
+ struct hci_xfer *next_resp;
+ unsigned int data_left;
+ u32 data_word_before_partial;
+ };
+ struct {
+ /* DMA specific */
+ dma_addr_t data_dma;
+ int ring_number;
+ int ring_entry;
+ };
+ };
+};
+
+static inline struct hci_xfer *hci_alloc_xfer(unsigned int n)
+{
+ return kzalloc(sizeof(struct hci_xfer) * n, GFP_KERNEL);
+}
+
+static inline void hci_free_xfer(struct hci_xfer *xfer, unsigned int n)
+{
+ kfree(xfer);
+}
+
+
+/* This abstracts PIO vs DMA operations */
+struct hci_io_ops {
+ bool (*irq_handler)(struct i3c_hci *hci, unsigned int mask);
+ int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
+ bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
+ int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+ void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev);
+ void (*recycle_ibi_slot)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot);
+ int (*init)(struct i3c_hci *hci);
+ void (*cleanup)(struct i3c_hci *hci);
+};
+
+extern const struct hci_io_ops mipi_i3c_hci_pio;
+extern const struct hci_io_ops mipi_i3c_hci_dma;
+
+
+/* Our per device master private data */
+struct i3c_hci_dev_data {
+ int dat_idx;
+ void *ibi_data;
+};
+
+
+/* list of quirks */
+#define HCI_QUIRK_RAW_CCC BIT(1) /* CCC framing must be explicit */
+
+
+/* global functions */
+void mipi_i3c_hci_resume(struct i3c_hci *hci);
+void mipi_i3c_hci_pio_reset(struct i3c_hci *hci);
+void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci);
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/ibi.h b/drivers/i3c/master/mipi-i3c-hci/ibi.h
new file mode 100644
index 000000000000..e1f98e264da0
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/ibi.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Common IBI related stuff
+ */
+
+#ifndef IBI_H
+#define IBI_H
+
+/*
+ * IBI Status Descriptor bits
+ */
+#define IBI_STS BIT(31)
+#define IBI_ERROR BIT(30)
+#define IBI_STATUS_TYPE BIT(29)
+#define IBI_HW_CONTEXT GENMASK(28, 26)
+#define IBI_TS BIT(25)
+#define IBI_LAST_STATUS BIT(24)
+#define IBI_CHUNKS GENMASK(23, 16)
+#define IBI_ID GENMASK(15, 8)
+#define IBI_TARGET_ADDR GENMASK(15, 9)
+#define IBI_TARGET_RNW BIT(8)
+#define IBI_DATA_LENGTH GENMASK(7, 0)
+
+/* handy helpers */
+static inline struct i3c_dev_desc *
+i3c_hci_addr_to_dev(struct i3c_hci *hci, unsigned int addr)
+{
+ struct i3c_bus *bus = i3c_master_get_bus(&hci->master);
+ struct i3c_dev_desc *dev;
+
+ i3c_bus_for_each_i3cdev(bus, dev) {
+ if (dev->info.dyn_addr == addr)
+ return dev;
+ }
+ return NULL;
+}
+
+#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/pio.c b/drivers/i3c/master/mipi-i3c-hci/pio.c
new file mode 100644
index 000000000000..d0272aa93599
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/pio.c
@@ -0,0 +1,1041 @@
+// SPDX-License-Identifier: BSD-3-Clause
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i3c/master.h>
+#include <linux/io.h>
+
+#include "hci.h"
+#include "cmd.h"
+#include "ibi.h"
+
+
+/*
+ * PIO Access Area
+ */
+
+#define pio_reg_read(r) readl(hci->PIO_regs + (PIO_##r))
+#define pio_reg_write(r, v) writel(v, hci->PIO_regs + (PIO_##r))
+
+#define PIO_COMMAND_QUEUE_PORT 0x00
+#define PIO_RESPONSE_QUEUE_PORT 0x04
+#define PIO_XFER_DATA_PORT 0x08
+#define PIO_IBI_PORT 0x0c
+
+#define PIO_QUEUE_THLD_CTRL 0x10
+#define QUEUE_IBI_STATUS_THLD GENMASK(31, 24)
+#define QUEUE_IBI_DATA_THLD GENMASK(23, 16)
+#define QUEUE_RESP_BUF_THLD GENMASK(15, 8)
+#define QUEUE_CMD_EMPTY_BUF_THLD GENMASK(7, 0)
+
+#define PIO_DATA_BUFFER_THLD_CTRL 0x14
+#define DATA_RX_START_THLD GENMASK(26, 24)
+#define DATA_TX_START_THLD GENMASK(18, 16)
+#define DATA_RX_BUF_THLD GENMASK(10, 8)
+#define DATA_TX_BUF_THLD GENMASK(2, 0)
+
+#define PIO_QUEUE_SIZE 0x18
+#define TX_DATA_BUFFER_SIZE GENMASK(31, 24)
+#define RX_DATA_BUFFER_SIZE GENMASK(23, 16)
+#define IBI_STATUS_SIZE GENMASK(15, 8)
+#define CR_QUEUE_SIZE GENMASK(7, 0)
+
+#define PIO_INTR_STATUS 0x20
+#define PIO_INTR_STATUS_ENABLE 0x24
+#define PIO_INTR_SIGNAL_ENABLE 0x28
+#define PIO_INTR_FORCE 0x2c
+#define STAT_TRANSFER_BLOCKED BIT(25)
+#define STAT_PERR_RESP_UFLOW BIT(24)
+#define STAT_PERR_CMD_OFLOW BIT(23)
+#define STAT_PERR_IBI_UFLOW BIT(22)
+#define STAT_PERR_RX_UFLOW BIT(21)
+#define STAT_PERR_TX_OFLOW BIT(20)
+#define STAT_ERR_RESP_QUEUE_FULL BIT(19)
+#define STAT_WARN_RESP_QUEUE_FULL BIT(18)
+#define STAT_ERR_IBI_QUEUE_FULL BIT(17)
+#define STAT_WARN_IBI_QUEUE_FULL BIT(16)
+#define STAT_ERR_RX_DATA_FULL BIT(15)
+#define STAT_WARN_RX_DATA_FULL BIT(14)
+#define STAT_ERR_TX_DATA_EMPTY BIT(13)
+#define STAT_WARN_TX_DATA_EMPTY BIT(12)
+#define STAT_TRANSFER_ERR BIT(9)
+#define STAT_WARN_INS_STOP_MODE BIT(7)
+#define STAT_TRANSFER_ABORT BIT(5)
+#define STAT_RESP_READY BIT(4)
+#define STAT_CMD_QUEUE_READY BIT(3)
+#define STAT_IBI_STATUS_THLD BIT(2)
+#define STAT_RX_THLD BIT(1)
+#define STAT_TX_THLD BIT(0)
+
+#define PIO_QUEUE_CUR_STATUS 0x38
+#define CUR_IBI_Q_LEVEL GENMASK(28, 20)
+#define CUR_RESP_Q_LEVEL GENMASK(18, 10)
+#define CUR_CMD_Q_EMPTY_LEVEL GENMASK(8, 0)
+
+#define PIO_DATA_BUFFER_CUR_STATUS 0x3c
+#define CUR_RX_BUF_LVL GENMASK(26, 16)
+#define CUR_TX_BUF_LVL GENMASK(10, 0)
+
+/*
+ * Handy status bit combinations
+ */
+
+#define STAT_LATENCY_WARNINGS (STAT_WARN_RESP_QUEUE_FULL | \
+ STAT_WARN_IBI_QUEUE_FULL | \
+ STAT_WARN_RX_DATA_FULL | \
+ STAT_WARN_TX_DATA_EMPTY | \
+ STAT_WARN_INS_STOP_MODE)
+
+#define STAT_LATENCY_ERRORS (STAT_ERR_RESP_QUEUE_FULL | \
+ STAT_ERR_IBI_QUEUE_FULL | \
+ STAT_ERR_RX_DATA_FULL | \
+ STAT_ERR_TX_DATA_EMPTY)
+
+#define STAT_PROG_ERRORS (STAT_TRANSFER_BLOCKED | \
+ STAT_PERR_RESP_UFLOW | \
+ STAT_PERR_CMD_OFLOW | \
+ STAT_PERR_IBI_UFLOW | \
+ STAT_PERR_RX_UFLOW | \
+ STAT_PERR_TX_OFLOW)
+
+#define STAT_ALL_ERRORS (STAT_TRANSFER_ABORT | \
+ STAT_TRANSFER_ERR | \
+ STAT_LATENCY_ERRORS | \
+ STAT_PROG_ERRORS)
+
+struct hci_pio_dev_ibi_data {
+ struct i3c_generic_ibi_pool *pool;
+ unsigned int max_len;
+};
+
+struct hci_pio_ibi_data {
+ struct i3c_ibi_slot *slot;
+ void *data_ptr;
+ unsigned int addr;
+ unsigned int seg_len, seg_cnt;
+ unsigned int max_len;
+ bool last_seg;
+};
+
+struct hci_pio_data {
+ spinlock_t lock;
+ struct hci_xfer *curr_xfer, *xfer_queue;
+ struct hci_xfer *curr_rx, *rx_queue;
+ struct hci_xfer *curr_tx, *tx_queue;
+ struct hci_xfer *curr_resp, *resp_queue;
+ struct hci_pio_ibi_data ibi;
+ unsigned int rx_thresh_size, tx_thresh_size;
+ unsigned int max_ibi_thresh;
+ u32 reg_queue_thresh;
+ u32 enabled_irqs;
+};
+
+static int hci_pio_init(struct i3c_hci *hci)
+{
+ struct hci_pio_data *pio;
+ u32 val, size_val, rx_thresh, tx_thresh, ibi_val;
+
+ pio = kzalloc(sizeof(*pio), GFP_KERNEL);
+ if (!pio)
+ return -ENOMEM;
+
+ hci->io_data = pio;
+ spin_lock_init(&pio->lock);
+
+ size_val = pio_reg_read(QUEUE_SIZE);
+ dev_info(&hci->master.dev, "CMD/RESP FIFO = %ld entries\n",
+ FIELD_GET(CR_QUEUE_SIZE, size_val));
+ dev_info(&hci->master.dev, "IBI FIFO = %ld bytes\n",
+ 4 * FIELD_GET(IBI_STATUS_SIZE, size_val));
+ dev_info(&hci->master.dev, "RX data FIFO = %d bytes\n",
+ 4 * (2 << FIELD_GET(RX_DATA_BUFFER_SIZE, size_val)));
+ dev_info(&hci->master.dev, "TX data FIFO = %d bytes\n",
+ 4 * (2 << FIELD_GET(TX_DATA_BUFFER_SIZE, size_val)));
+
+ /*
+ * Let's initialize data thresholds to half of the actual FIFO size.
+ * The start thresholds aren't used (set to 0) as the FIFO is always
+ * serviced before the corresponding command is queued.
+ */
+ rx_thresh = FIELD_GET(RX_DATA_BUFFER_SIZE, size_val);
+ tx_thresh = FIELD_GET(TX_DATA_BUFFER_SIZE, size_val);
+ if (hci->version_major == 1) {
+ /* those are expressed as 2^[n+1), so just sub 1 if not 0 */
+ if (rx_thresh)
+ rx_thresh -= 1;
+ if (tx_thresh)
+ tx_thresh -= 1;
+ pio->rx_thresh_size = 2 << rx_thresh;
+ pio->tx_thresh_size = 2 << tx_thresh;
+ } else {
+ /* size is 2^(n+1) and threshold is 2^n i.e. already halved */
+ pio->rx_thresh_size = 1 << rx_thresh;
+ pio->tx_thresh_size = 1 << tx_thresh;
+ }
+ val = FIELD_PREP(DATA_RX_BUF_THLD, rx_thresh) |
+ FIELD_PREP(DATA_TX_BUF_THLD, tx_thresh);
+ pio_reg_write(DATA_BUFFER_THLD_CTRL, val);
+
+ /*
+ * Let's raise an interrupt as soon as there is one free cmd slot
+ * or one available response or IBI. For IBI data let's use half the
+ * IBI queue size within allowed bounds.
+ */
+ ibi_val = FIELD_GET(IBI_STATUS_SIZE, size_val);
+ pio->max_ibi_thresh = clamp_val(ibi_val/2, 1, 63);
+ val = FIELD_PREP(QUEUE_IBI_STATUS_THLD, 1) |
+ FIELD_PREP(QUEUE_IBI_DATA_THLD, pio->max_ibi_thresh) |
+ FIELD_PREP(QUEUE_RESP_BUF_THLD, 1) |
+ FIELD_PREP(QUEUE_CMD_EMPTY_BUF_THLD, 1);
+ pio_reg_write(QUEUE_THLD_CTRL, val);
+ pio->reg_queue_thresh = val;
+
+ /* Disable all IRQs but allow all status bits */
+ pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
+ pio_reg_write(INTR_STATUS_ENABLE, 0xffffffff);
+
+ /* Always accept error interrupts (will be activated on first xfer) */
+ pio->enabled_irqs = STAT_ALL_ERRORS;
+
+ return 0;
+}
+
+static void hci_pio_cleanup(struct i3c_hci *hci)
+{
+ struct hci_pio_data *pio = hci->io_data;
+
+ pio_reg_write(INTR_SIGNAL_ENABLE, 0x0);
+
+ if (pio) {
+ DBG("status = %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ BUG_ON(pio->curr_xfer);
+ BUG_ON(pio->curr_rx);
+ BUG_ON(pio->curr_tx);
+ BUG_ON(pio->curr_resp);
+ kfree(pio);
+ hci->io_data = NULL;
+ }
+}
+
+static void hci_pio_write_cmd(struct i3c_hci *hci, struct hci_xfer *xfer)
+{
+ DBG("cmd_desc[%d] = 0x%08x", 0, xfer->cmd_desc[0]);
+ DBG("cmd_desc[%d] = 0x%08x", 1, xfer->cmd_desc[1]);
+ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[0]);
+ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[1]);
+ if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
+ DBG("cmd_desc[%d] = 0x%08x", 2, xfer->cmd_desc[2]);
+ DBG("cmd_desc[%d] = 0x%08x", 3, xfer->cmd_desc[3]);
+ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[2]);
+ pio_reg_write(COMMAND_QUEUE_PORT, xfer->cmd_desc[3]);
+ }
+}
+
+static bool hci_pio_do_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_xfer *xfer = pio->curr_rx;
+ unsigned int nr_words;
+ u32 *p;
+
+ p = xfer->data;
+ p += (xfer->data_len - xfer->data_left) / 4;
+
+ while (xfer->data_left >= 4) {
+ /* bail out if FIFO hasn't reached the threshold value yet */
+ if (!(pio_reg_read(INTR_STATUS) & STAT_RX_THLD))
+ return false;
+ nr_words = min(xfer->data_left / 4, pio->rx_thresh_size);
+ /* extract data from FIFO */
+ xfer->data_left -= nr_words * 4;
+ DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ while (nr_words--)
+ *p++ = pio_reg_read(XFER_DATA_PORT);
+ }
+
+ /* trailing data is retrieved upon response reception */
+ return !xfer->data_left;
+}
+
+static void hci_pio_do_trailing_rx(struct i3c_hci *hci,
+ struct hci_pio_data *pio, unsigned int count)
+{
+ struct hci_xfer *xfer = pio->curr_rx;
+ u32 *p;
+
+ DBG("%d remaining", count);
+
+ p = xfer->data;
+ p += (xfer->data_len - xfer->data_left) / 4;
+
+ if (count >= 4) {
+ unsigned int nr_words = count / 4;
+ /* extract data from FIFO */
+ xfer->data_left -= nr_words * 4;
+ DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ while (nr_words--)
+ *p++ = pio_reg_read(XFER_DATA_PORT);
+ }
+
+ count &= 3;
+ if (count) {
+ /*
+ * There are trailing bytes in the last word.
+ * Fetch it and extract bytes in an endian independent way.
+ * Unlike the TX case, we must not write memory past the
+ * end of the destination buffer.
+ */
+ u8 *p_byte = (u8 *)p;
+ u32 data = pio_reg_read(XFER_DATA_PORT);
+
+ xfer->data_word_before_partial = data;
+ xfer->data_left -= count;
+ data = (__force u32) cpu_to_le32(data);
+ while (count--) {
+ *p_byte++ = data;
+ data >>= 8;
+ }
+ }
+}
+
+static bool hci_pio_do_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_xfer *xfer = pio->curr_tx;
+ unsigned int nr_words;
+ u32 *p;
+
+ p = xfer->data;
+ p += (xfer->data_len - xfer->data_left) / 4;
+
+ while (xfer->data_left >= 4) {
+ /* bail out if FIFO free space is below set threshold */
+ if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
+ return false;
+ /* we can fill up to that TX threshold */
+ nr_words = min(xfer->data_left / 4, pio->tx_thresh_size);
+ /* push data into the FIFO */
+ xfer->data_left -= nr_words * 4;
+ DBG("now %d left %d", nr_words * 4, xfer->data_left);
+ while (nr_words--)
+ pio_reg_write(XFER_DATA_PORT, *p++);
+ }
+
+ if (xfer->data_left) {
+ /*
+ * There are trailing bytes to send. We can simply load
+ * them from memory as a word which will keep those bytes
+ * in their proper place even on a BE system. This will
+ * also get some bytes past the actual buffer but no one
+ * should care as they won't be sent out.
+ */
+ if (!(pio_reg_read(INTR_STATUS) & STAT_TX_THLD))
+ return false;
+ DBG("trailing %d", xfer->data_left);
+ pio_reg_write(XFER_DATA_PORT, *p);
+ xfer->data_left = 0;
+ }
+
+ return true;
+}
+
+static bool hci_pio_process_rx(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ while (pio->curr_rx && hci_pio_do_rx(hci, pio))
+ pio->curr_rx = pio->curr_rx->next_data;
+ return !pio->curr_rx;
+}
+
+static bool hci_pio_process_tx(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ while (pio->curr_tx && hci_pio_do_tx(hci, pio))
+ pio->curr_tx = pio->curr_tx->next_data;
+ return !pio->curr_tx;
+}
+
+static void hci_pio_queue_data(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_xfer *xfer = pio->curr_xfer;
+ struct hci_xfer *prev_queue_tail;
+
+ if (!xfer->data) {
+ xfer->data_len = xfer->data_left = 0;
+ return;
+ }
+
+ if (xfer->rnw) {
+ prev_queue_tail = pio->rx_queue;
+ pio->rx_queue = xfer;
+ if (pio->curr_rx) {
+ prev_queue_tail->next_data = xfer;
+ } else {
+ pio->curr_rx = xfer;
+ if (!hci_pio_process_rx(hci, pio))
+ pio->enabled_irqs |= STAT_RX_THLD;
+ }
+ } else {
+ prev_queue_tail = pio->tx_queue;
+ pio->tx_queue = xfer;
+ if (pio->curr_tx) {
+ prev_queue_tail->next_data = xfer;
+ } else {
+ pio->curr_tx = xfer;
+ if (!hci_pio_process_tx(hci, pio))
+ pio->enabled_irqs |= STAT_TX_THLD;
+ }
+ }
+}
+
+static void hci_pio_push_to_next_rx(struct i3c_hci *hci, struct hci_xfer *xfer,
+ unsigned int words_to_keep)
+{
+ u32 *from = xfer->data;
+ u32 from_last;
+ unsigned int received, count;
+
+ received = (xfer->data_len - xfer->data_left) / 4;
+ if ((xfer->data_len - xfer->data_left) & 3) {
+ from_last = xfer->data_word_before_partial;
+ received += 1;
+ } else {
+ from_last = from[received];
+ }
+ from += words_to_keep;
+ count = received - words_to_keep;
+
+ while (count) {
+ unsigned int room, left, chunk, bytes_to_move;
+ u32 last_word;
+
+ xfer = xfer->next_data;
+ if (!xfer) {
+ dev_err(&hci->master.dev, "pushing RX data to unexistent xfer\n");
+ return;
+ }
+
+ room = DIV_ROUND_UP(xfer->data_len, 4);
+ left = DIV_ROUND_UP(xfer->data_left, 4);
+ chunk = min(count, room);
+ if (chunk > left) {
+ hci_pio_push_to_next_rx(hci, xfer, chunk - left);
+ left = chunk;
+ xfer->data_left = left * 4;
+ }
+
+ bytes_to_move = xfer->data_len - xfer->data_left;
+ if (bytes_to_move & 3) {
+ /* preserve word to become partial */
+ u32 *p = xfer->data;
+
+ xfer->data_word_before_partial = p[bytes_to_move / 4];
+ }
+ memmove(xfer->data + chunk, xfer->data, bytes_to_move);
+
+ /* treat last word specially because of partial word issues */
+ chunk -= 1;
+
+ memcpy(xfer->data, from, chunk * 4);
+ xfer->data_left -= chunk * 4;
+ from += chunk;
+ count -= chunk;
+
+ last_word = (count == 1) ? from_last : *from++;
+ if (xfer->data_left < 4) {
+ /*
+ * Like in hci_pio_do_trailing_rx(), preserve original
+ * word to be stored partially then store bytes it
+ * in an endian independent way.
+ */
+ u8 *p_byte = xfer->data;
+
+ p_byte += chunk * 4;
+ xfer->data_word_before_partial = last_word;
+ last_word = (__force u32) cpu_to_le32(last_word);
+ while (xfer->data_left--) {
+ *p_byte++ = last_word;
+ last_word >>= 8;
+ }
+ } else {
+ u32 *p = xfer->data;
+
+ p[chunk] = last_word;
+ xfer->data_left -= 4;
+ }
+ count--;
+ }
+}
+
+static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
+ u32 status);
+
+static bool hci_pio_process_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ while (pio->curr_resp &&
+ (pio_reg_read(INTR_STATUS) & STAT_RESP_READY)) {
+ struct hci_xfer *xfer = pio->curr_resp;
+ u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
+ unsigned int tid = RESP_TID(resp);
+
+ DBG("resp = 0x%08x", resp);
+ if (tid != xfer->cmd_tid) {
+ dev_err(&hci->master.dev,
+ "response tid=%d when expecting %d\n",
+ tid, xfer->cmd_tid);
+ /* let's pretend it is a prog error... any of them */
+ hci_pio_err(hci, pio, STAT_PROG_ERRORS);
+ return false;
+ }
+ xfer->response = resp;
+
+ if (pio->curr_rx == xfer) {
+ /*
+ * Response availability implies RX completion.
+ * Retrieve trailing RX data if any.
+ * Note that short reads are possible.
+ */
+ unsigned int received, expected, to_keep;
+
+ received = xfer->data_len - xfer->data_left;
+ expected = RESP_DATA_LENGTH(xfer->response);
+ if (expected > received) {
+ hci_pio_do_trailing_rx(hci, pio,
+ expected - received);
+ } else if (received > expected) {
+ /* we consumed data meant for next xfer */
+ to_keep = DIV_ROUND_UP(expected, 4);
+ hci_pio_push_to_next_rx(hci, xfer, to_keep);
+ }
+
+ /* then process the RX list pointer */
+ if (hci_pio_process_rx(hci, pio))
+ pio->enabled_irqs &= ~STAT_RX_THLD;
+ }
+
+ /*
+ * We're about to give back ownership of the xfer structure
+ * to the waiting instance. Make sure no reference to it
+ * still exists.
+ */
+ if (pio->curr_rx == xfer) {
+ DBG("short RX ?");
+ pio->curr_rx = pio->curr_rx->next_data;
+ } else if (pio->curr_tx == xfer) {
+ DBG("short TX ?");
+ pio->curr_tx = pio->curr_tx->next_data;
+ } else if (xfer->data_left) {
+ DBG("PIO xfer count = %d after response",
+ xfer->data_left);
+ }
+
+ pio->curr_resp = xfer->next_resp;
+ if (xfer->completion)
+ complete(xfer->completion);
+ }
+ return !pio->curr_resp;
+}
+
+static void hci_pio_queue_resp(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_xfer *xfer = pio->curr_xfer;
+ struct hci_xfer *prev_queue_tail;
+
+ if (!(xfer->cmd_desc[0] & CMD_0_ROC))
+ return;
+
+ prev_queue_tail = pio->resp_queue;
+ pio->resp_queue = xfer;
+ if (pio->curr_resp) {
+ prev_queue_tail->next_resp = xfer;
+ } else {
+ pio->curr_resp = xfer;
+ if (!hci_pio_process_resp(hci, pio))
+ pio->enabled_irqs |= STAT_RESP_READY;
+ }
+}
+
+static bool hci_pio_process_cmd(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ while (pio->curr_xfer &&
+ (pio_reg_read(INTR_STATUS) & STAT_CMD_QUEUE_READY)) {
+ /*
+ * Always process the data FIFO before sending the command
+ * so needed TX data or RX space is available upfront.
+ */
+ hci_pio_queue_data(hci, pio);
+ /*
+ * Then queue our response request. This will also process
+ * the response FIFO in case it got suddenly filled up
+ * with results from previous commands.
+ */
+ hci_pio_queue_resp(hci, pio);
+ /*
+ * Finally send the command.
+ */
+ hci_pio_write_cmd(hci, pio->curr_xfer);
+ /*
+ * And move on.
+ */
+ pio->curr_xfer = pio->curr_xfer->next_xfer;
+ }
+ return !pio->curr_xfer;
+}
+
+static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
+{
+ struct hci_pio_data *pio = hci->io_data;
+ struct hci_xfer *prev_queue_tail;
+ int i;
+
+ DBG("n = %d", n);
+
+ /* link xfer instances together and initialize data count */
+ for (i = 0; i < n; i++) {
+ xfer[i].next_xfer = (i + 1 < n) ? &xfer[i + 1] : NULL;
+ xfer[i].next_data = NULL;
+ xfer[i].next_resp = NULL;
+ xfer[i].data_left = xfer[i].data_len;
+ }
+
+ spin_lock_irq(&pio->lock);
+ prev_queue_tail = pio->xfer_queue;
+ pio->xfer_queue = &xfer[n - 1];
+ if (pio->curr_xfer) {
+ prev_queue_tail->next_xfer = xfer;
+ } else {
+ pio->curr_xfer = xfer;
+ if (!hci_pio_process_cmd(hci, pio))
+ pio->enabled_irqs |= STAT_CMD_QUEUE_READY;
+ pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
+ DBG("status = %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ }
+ spin_unlock_irq(&pio->lock);
+ return 0;
+}
+
+static bool hci_pio_dequeue_xfer_common(struct i3c_hci *hci,
+ struct hci_pio_data *pio,
+ struct hci_xfer *xfer, int n)
+{
+ struct hci_xfer *p, **p_prev_next;
+ int i;
+
+ /*
+ * To safely dequeue a transfer request, it must be either entirely
+ * processed, or not yet processed at all. If our request tail is
+ * reachable from either the data or resp list that means the command
+ * was submitted and not yet completed.
+ */
+ for (p = pio->curr_resp; p; p = p->next_resp)
+ for (i = 0; i < n; i++)
+ if (p == &xfer[i])
+ goto pio_screwed;
+ for (p = pio->curr_rx; p; p = p->next_data)
+ for (i = 0; i < n; i++)
+ if (p == &xfer[i])
+ goto pio_screwed;
+ for (p = pio->curr_tx; p; p = p->next_data)
+ for (i = 0; i < n; i++)
+ if (p == &xfer[i])
+ goto pio_screwed;
+
+ /*
+ * The command was completed, or wasn't yet submitted.
+ * Unlink it from the que if the later.
+ */
+ p_prev_next = &pio->curr_xfer;
+ for (p = pio->curr_xfer; p; p = p->next_xfer) {
+ if (p == &xfer[0]) {
+ *p_prev_next = xfer[n - 1].next_xfer;
+ break;
+ }
+ p_prev_next = &p->next_xfer;
+ }
+
+ /* return true if we actually unqueued something */
+ return !!p;
+
+pio_screwed:
+ /*
+ * Life is tough. We must invalidate the hardware state and
+ * discard everything that is still queued.
+ */
+ for (p = pio->curr_resp; p; p = p->next_resp) {
+ p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED);
+ if (p->completion)
+ complete(p->completion);
+ }
+ for (p = pio->curr_xfer; p; p = p->next_xfer) {
+ p->response = FIELD_PREP(RESP_ERR_FIELD, RESP_ERR_HC_TERMINATED);
+ if (p->completion)
+ complete(p->completion);
+ }
+ pio->curr_xfer = pio->curr_rx = pio->curr_tx = pio->curr_resp = NULL;
+
+ return true;
+}
+
+static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
+{
+ struct hci_pio_data *pio = hci->io_data;
+ int ret;
+
+ spin_lock_irq(&pio->lock);
+ DBG("n=%d status=%#x/%#x", n,
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ DBG("main_status = %#x/%#x",
+ readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
+
+ ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
+ spin_unlock_irq(&pio->lock);
+ return ret;
+}
+
+static void hci_pio_err(struct i3c_hci *hci, struct hci_pio_data *pio,
+ u32 status)
+{
+ /* TODO: this ought to be more sophisticated eventually */
+
+ if (pio_reg_read(INTR_STATUS) & STAT_RESP_READY) {
+ /* this may happen when an error is signaled with ROC unset */
+ u32 resp = pio_reg_read(RESPONSE_QUEUE_PORT);
+
+ dev_err(&hci->master.dev,
+ "orphan response (%#x) on error\n", resp);
+ }
+
+ /* dump states on programming errors */
+ if (status & STAT_PROG_ERRORS) {
+ u32 queue = pio_reg_read(QUEUE_CUR_STATUS);
+ u32 data = pio_reg_read(DATA_BUFFER_CUR_STATUS);
+
+ dev_err(&hci->master.dev,
+ "prog error %#lx (C/R/I = %ld/%ld/%ld, TX/RX = %ld/%ld)\n",
+ status & STAT_PROG_ERRORS,
+ FIELD_GET(CUR_CMD_Q_EMPTY_LEVEL, queue),
+ FIELD_GET(CUR_RESP_Q_LEVEL, queue),
+ FIELD_GET(CUR_IBI_Q_LEVEL, queue),
+ FIELD_GET(CUR_TX_BUF_LVL, data),
+ FIELD_GET(CUR_RX_BUF_LVL, data));
+ }
+
+ /* just bust out everything with pending responses for now */
+ hci_pio_dequeue_xfer_common(hci, pio, pio->curr_resp, 1);
+ /* ... and half-way TX transfers if any */
+ if (pio->curr_tx && pio->curr_tx->data_left != pio->curr_tx->data_len)
+ hci_pio_dequeue_xfer_common(hci, pio, pio->curr_tx, 1);
+ /* then reset the hardware */
+ mipi_i3c_hci_pio_reset(hci);
+ mipi_i3c_hci_resume(hci);
+
+ DBG("status=%#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+}
+
+static void hci_pio_set_ibi_thresh(struct i3c_hci *hci,
+ struct hci_pio_data *pio,
+ unsigned int thresh_val)
+{
+ u32 regval = pio->reg_queue_thresh;
+
+ regval &= ~QUEUE_IBI_STATUS_THLD;
+ regval |= FIELD_PREP(QUEUE_IBI_STATUS_THLD, thresh_val);
+ /* write the threshold reg only if it changes */
+ if (regval != pio->reg_queue_thresh) {
+ pio_reg_write(QUEUE_THLD_CTRL, regval);
+ pio->reg_queue_thresh = regval;
+ DBG("%d", thresh_val);
+ }
+}
+
+static bool hci_pio_get_ibi_segment(struct i3c_hci *hci,
+ struct hci_pio_data *pio)
+{
+ struct hci_pio_ibi_data *ibi = &pio->ibi;
+ unsigned int nr_words, thresh_val;
+ u32 *p;
+
+ p = ibi->data_ptr;
+ p += (ibi->seg_len - ibi->seg_cnt) / 4;
+
+ while ((nr_words = ibi->seg_cnt/4)) {
+ /* determine our IBI queue threshold value */
+ thresh_val = min(nr_words, pio->max_ibi_thresh);
+ hci_pio_set_ibi_thresh(hci, pio, thresh_val);
+ /* bail out if we don't have that amount of data ready */
+ if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
+ return false;
+ /* extract the data from the IBI port */
+ nr_words = thresh_val;
+ ibi->seg_cnt -= nr_words * 4;
+ DBG("now %d left %d", nr_words * 4, ibi->seg_cnt);
+ while (nr_words--)
+ *p++ = pio_reg_read(IBI_PORT);
+ }
+
+ if (ibi->seg_cnt) {
+ /*
+ * There are trailing bytes in the last word.
+ * Fetch it and extract bytes in an endian independent way.
+ * Unlike the TX case, we must not write past the end of
+ * the destination buffer.
+ */
+ u32 data;
+ u8 *p_byte = (u8 *)p;
+
+ hci_pio_set_ibi_thresh(hci, pio, 1);
+ if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
+ return false;
+ DBG("trailing %d", ibi->seg_cnt);
+ data = pio_reg_read(IBI_PORT);
+ data = (__force u32) cpu_to_le32(data);
+ while (ibi->seg_cnt--) {
+ *p_byte++ = data;
+ data >>= 8;
+ }
+ }
+
+ return true;
+}
+
+static bool hci_pio_prep_new_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_pio_ibi_data *ibi = &pio->ibi;
+ struct i3c_dev_desc *dev;
+ struct i3c_hci_dev_data *dev_data;
+ struct hci_pio_dev_ibi_data *dev_ibi;
+ u32 ibi_status;
+
+ /*
+ * We have a new IBI. Try to set up its payload retrieval.
+ * When returning true, the IBI data has to be consumed whether
+ * or not we are set up to capture it. If we return true with
+ * ibi->slot == NULL that means the data payload has to be
+ * drained out of the IBI port and dropped.
+ */
+
+ ibi_status = pio_reg_read(IBI_PORT);
+ DBG("status = %#x", ibi_status);
+ ibi->addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
+ if (ibi_status & IBI_ERROR) {
+ dev_err(&hci->master.dev, "IBI error from %#x\n", ibi->addr);
+ return false;
+ }
+
+ ibi->last_seg = ibi_status & IBI_LAST_STATUS;
+ ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status);
+ ibi->seg_cnt = ibi->seg_len;
+
+ dev = i3c_hci_addr_to_dev(hci, ibi->addr);
+ if (!dev) {
+ dev_err(&hci->master.dev,
+ "IBI for unknown device %#x\n", ibi->addr);
+ return true;
+ }
+
+ dev_data = i3c_dev_get_master_data(dev);
+ dev_ibi = dev_data->ibi_data;
+ ibi->max_len = dev_ibi->max_len;
+
+ if (ibi->seg_len > ibi->max_len) {
+ dev_err(&hci->master.dev, "IBI payload too big (%d > %d)\n",
+ ibi->seg_len, ibi->max_len);
+ return true;
+ }
+
+ ibi->slot = i3c_generic_ibi_get_free_slot(dev_ibi->pool);
+ if (!ibi->slot) {
+ dev_err(&hci->master.dev, "no free slot for IBI\n");
+ } else {
+ ibi->slot->len = 0;
+ ibi->data_ptr = ibi->slot->data;
+ }
+ return true;
+}
+
+static void hci_pio_free_ibi_slot(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_pio_ibi_data *ibi = &pio->ibi;
+ struct hci_pio_dev_ibi_data *dev_ibi;
+
+ if (ibi->slot) {
+ dev_ibi = ibi->slot->dev->common.master_priv;
+ i3c_generic_ibi_recycle_slot(dev_ibi->pool, ibi->slot);
+ ibi->slot = NULL;
+ }
+}
+
+static bool hci_pio_process_ibi(struct i3c_hci *hci, struct hci_pio_data *pio)
+{
+ struct hci_pio_ibi_data *ibi = &pio->ibi;
+
+ if (!ibi->slot && !ibi->seg_cnt && ibi->last_seg)
+ if (!hci_pio_prep_new_ibi(hci, pio))
+ return false;
+
+ for (;;) {
+ u32 ibi_status;
+ unsigned int ibi_addr;
+
+ if (ibi->slot) {
+ if (!hci_pio_get_ibi_segment(hci, pio))
+ return false;
+ ibi->slot->len += ibi->seg_len;
+ ibi->data_ptr += ibi->seg_len;
+ if (ibi->last_seg) {
+ /* was the last segment: submit it and leave */
+ i3c_master_queue_ibi(ibi->slot->dev, ibi->slot);
+ ibi->slot = NULL;
+ hci_pio_set_ibi_thresh(hci, pio, 1);
+ return true;
+ }
+ } else if (ibi->seg_cnt) {
+ /*
+ * No slot but a non-zero count. This is the result
+ * of some error and the payload must be drained.
+ * This normally does not happen therefore no need
+ * to be extra optimized here.
+ */
+ hci_pio_set_ibi_thresh(hci, pio, 1);
+ do {
+ if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
+ return false;
+ pio_reg_read(IBI_PORT);
+ } while (--ibi->seg_cnt);
+ if (ibi->last_seg)
+ return true;
+ }
+
+ /* try to move to the next segment right away */
+ hci_pio_set_ibi_thresh(hci, pio, 1);
+ if (!(pio_reg_read(INTR_STATUS) & STAT_IBI_STATUS_THLD))
+ return false;
+ ibi_status = pio_reg_read(IBI_PORT);
+ ibi_addr = FIELD_GET(IBI_TARGET_ADDR, ibi_status);
+ if (ibi->addr != ibi_addr) {
+ /* target address changed before last segment */
+ dev_err(&hci->master.dev,
+ "unexp IBI address changed from %d to %d\n",
+ ibi->addr, ibi_addr);
+ hci_pio_free_ibi_slot(hci, pio);
+ }
+ ibi->last_seg = ibi_status & IBI_LAST_STATUS;
+ ibi->seg_len = FIELD_GET(IBI_DATA_LENGTH, ibi_status);
+ ibi->seg_cnt = ibi->seg_len;
+ if (ibi->slot && ibi->slot->len + ibi->seg_len > ibi->max_len) {
+ dev_err(&hci->master.dev,
+ "IBI payload too big (%d > %d)\n",
+ ibi->slot->len + ibi->seg_len, ibi->max_len);
+ hci_pio_free_ibi_slot(hci, pio);
+ }
+ }
+
+ return false;
+}
+
+static int hci_pio_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct i3c_generic_ibi_pool *pool;
+ struct hci_pio_dev_ibi_data *dev_ibi;
+
+ dev_ibi = kmalloc(sizeof(*dev_ibi), GFP_KERNEL);
+ if (!dev_ibi)
+ return -ENOMEM;
+ pool = i3c_generic_ibi_alloc_pool(dev, req);
+ if (IS_ERR(pool)) {
+ kfree(dev_ibi);
+ return PTR_ERR(pool);
+ }
+ dev_ibi->pool = pool;
+ dev_ibi->max_len = req->max_payload_len;
+ dev_data->ibi_data = dev_ibi;
+ return 0;
+}
+
+static void hci_pio_free_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data;
+
+ dev_data->ibi_data = NULL;
+ i3c_generic_ibi_free_pool(dev_ibi->pool);
+ kfree(dev_ibi);
+}
+
+static void hci_pio_recycle_ibi_slot(struct i3c_hci *hci,
+ struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ struct i3c_hci_dev_data *dev_data = i3c_dev_get_master_data(dev);
+ struct hci_pio_dev_ibi_data *dev_ibi = dev_data->ibi_data;
+
+ i3c_generic_ibi_recycle_slot(dev_ibi->pool, slot);
+}
+
+static bool hci_pio_irq_handler(struct i3c_hci *hci, unsigned int unused)
+{
+ struct hci_pio_data *pio = hci->io_data;
+ u32 status;
+
+ spin_lock(&pio->lock);
+ status = pio_reg_read(INTR_STATUS);
+ DBG("(in) status: %#x/%#x", status, pio->enabled_irqs);
+ status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
+ if (!status) {
+ spin_unlock(&pio->lock);
+ return false;
+ }
+
+ if (status & STAT_IBI_STATUS_THLD)
+ hci_pio_process_ibi(hci, pio);
+
+ if (status & STAT_RX_THLD)
+ if (hci_pio_process_rx(hci, pio))
+ pio->enabled_irqs &= ~STAT_RX_THLD;
+ if (status & STAT_TX_THLD)
+ if (hci_pio_process_tx(hci, pio))
+ pio->enabled_irqs &= ~STAT_TX_THLD;
+ if (status & STAT_RESP_READY)
+ if (hci_pio_process_resp(hci, pio))
+ pio->enabled_irqs &= ~STAT_RESP_READY;
+
+ if (unlikely(status & STAT_LATENCY_WARNINGS)) {
+ pio_reg_write(INTR_STATUS, status & STAT_LATENCY_WARNINGS);
+ dev_warn_ratelimited(&hci->master.dev,
+ "encountered warning condition %#lx\n",
+ status & STAT_LATENCY_WARNINGS);
+ }
+
+ if (unlikely(status & STAT_ALL_ERRORS)) {
+ pio_reg_write(INTR_STATUS, status & STAT_ALL_ERRORS);
+ hci_pio_err(hci, pio, status & STAT_ALL_ERRORS);
+ }
+
+ if (status & STAT_CMD_QUEUE_READY)
+ if (hci_pio_process_cmd(hci, pio))
+ pio->enabled_irqs &= ~STAT_CMD_QUEUE_READY;
+
+ pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
+ DBG("(out) status: %#x/%#x",
+ pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
+ spin_unlock(&pio->lock);
+ return true;
+}
+
+const struct hci_io_ops mipi_i3c_hci_pio = {
+ .init = hci_pio_init,
+ .cleanup = hci_pio_cleanup,
+ .queue_xfer = hci_pio_queue_xfer,
+ .dequeue_xfer = hci_pio_dequeue_xfer,
+ .irq_handler = hci_pio_irq_handler,
+ .request_ibi = hci_pio_request_ibi,
+ .free_ibi = hci_pio_free_ibi,
+ .recycle_ibi_slot = hci_pio_recycle_ibi_slot,
+};
diff --git a/drivers/i3c/master/mipi-i3c-hci/xfer_mode_rate.h b/drivers/i3c/master/mipi-i3c-hci/xfer_mode_rate.h
new file mode 100644
index 000000000000..1e36b75afb16
--- /dev/null
+++ b/drivers/i3c/master/mipi-i3c-hci/xfer_mode_rate.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/*
+ * Copyright (c) 2020, MIPI Alliance, Inc.
+ *
+ * Author: Nicolas Pitre <npitre@baylibre.com>
+ *
+ * Transfer Mode/Rate Table definitions as found in extended capability
+ * sections 0x04 and 0x08.
+ * This applies starting from I3C HCI v2.0.
+ */
+
+#ifndef XFER_MODE_RATE_H
+#define XFER_MODE_RATE_H
+
+/*
+ * Master Transfer Mode Table Fixed Indexes.
+ *
+ * Indexes 0x0 and 0x8 are mandatory. Availability for the rest must be
+ * obtained from the mode table in the extended capability area.
+ * Presence and definitions for indexes beyond these ones may vary.
+ */
+#define XFERMODE_IDX_I3C_SDR 0x00 /* I3C SDR Mode */
+#define XFERMODE_IDX_I3C_HDR_DDR 0x01 /* I3C HDR-DDR Mode */
+#define XFERMODE_IDX_I3C_HDR_T 0x02 /* I3C HDR-Ternary Mode */
+#define XFERMODE_IDX_I3C_HDR_BT 0x03 /* I3C HDR-BT Mode */
+#define XFERMODE_IDX_I2C 0x08 /* Legacy I2C Mode */
+
+/*
+ * Transfer Mode Table Entry Bits Definitions
+ */
+#define XFERMODE_VALID_XFER_ADD_FUNC GENMASK(21, 16)
+#define XFERMODE_ML_DATA_XFER_CODING GENMASK(15, 11)
+#define XFERMODE_ML_ADDL_LANES GENMASK(10, 8)
+#define XFERMODE_SUPPORTED BIT(7)
+#define XFERMODE_MODE GENMASK(3, 0)
+
+/*
+ * Master Data Transfer Rate Selector Values.
+ *
+ * These are the values to be used in the command descriptor XFER_RATE field
+ * and found in the RATE_ID field below.
+ * The I3C_SDR0, I3C_SDR1, I3C_SDR2, I3C_SDR3, I3C_SDR4 and I2C_FM rates
+ * are required, everything else is optional and discoverable in the
+ * Data Transfer Rate Table. Indicated are typical rates. The actual
+ * rates may vary slightly and are also specified in the Data Transfer
+ * Rate Table.
+ */
+#define XFERRATE_I3C_SDR0 0x00 /* 12.5 MHz */
+#define XFERRATE_I3C_SDR1 0x01 /* 8 MHz */
+#define XFERRATE_I3C_SDR2 0x02 /* 6 MHz */
+#define XFERRATE_I3C_SDR3 0x03 /* 4 MHz */
+#define XFERRATE_I3C_SDR4 0x04 /* 2 MHz */
+#define XFERRATE_I3C_SDR_FM_FMP 0x05 /* 400 KHz / 1 MHz */
+#define XFERRATE_I3C_SDR_USER6 0x06 /* User Defined */
+#define XFERRATE_I3C_SDR_USER7 0x07 /* User Defined */
+
+#define XFERRATE_I2C_FM 0x00 /* 400 KHz */
+#define XFERRATE_I2C_FMP 0x01 /* 1 MHz */
+#define XFERRATE_I2C_USER2 0x02 /* User Defined */
+#define XFERRATE_I2C_USER3 0x03 /* User Defined */
+#define XFERRATE_I2C_USER4 0x04 /* User Defined */
+#define XFERRATE_I2C_USER5 0x05 /* User Defined */
+#define XFERRATE_I2C_USER6 0x06 /* User Defined */
+#define XFERRATE_I2C_USER7 0x07 /* User Defined */
+
+/*
+ * Master Data Transfer Rate Table Mode ID values.
+ */
+#define XFERRATE_MODE_I3C 0x00
+#define XFERRATE_MODE_I2C 0x08
+
+/*
+ * Master Data Transfer Rate Table Entry Bits Definitions
+ */
+#define XFERRATE_MODE_ID GENMASK(31, 28)
+#define XFERRATE_RATE_ID GENMASK(22, 20)
+#define XFERRATE_ACTUAL_RATE_KHZ GENMASK(19, 0)
+
+#endif
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c
index 2162bc80f09e..013ad33fbbc8 100644
--- a/drivers/ide/ide-atapi.c
+++ b/drivers/ide/ide-atapi.c
@@ -223,7 +223,6 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
sense_rq->rq_disk = rq->rq_disk;
sense_rq->cmd_flags = REQ_OP_DRV_IN;
ide_req(sense_rq)->type = ATA_PRIV_SENSE;
- sense_rq->rq_flags |= RQF_PREEMPT;
req->cmd[0] = GPCMD_REQUEST_SENSE;
req->cmd[4] = cmd_len;
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 1a53c7a75224..4867b67b60d6 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -515,15 +515,10 @@ repeat:
* above to return us whatever is in the queue. Since we call
* ide_do_request() ourselves, we end up taking requests while
* the queue is blocked...
- *
- * We let requests forced at head of queue with ide-preempt
- * though. I hope that doesn't happen too much, hopefully not
- * unless the subdriver triggers such a thing in its own PM
- * state machine.
*/
if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
ata_pm_request(rq) == 0 &&
- (rq->rq_flags & RQF_PREEMPT) == 0) {
+ (rq->rq_flags & RQF_PM) == 0) {
/* there should be no pending command at this point */
ide_unlock_port(hwif);
goto plug_device;
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c
index 192e6c65d34e..82ab308f1aaf 100644
--- a/drivers/ide/ide-pm.c
+++ b/drivers/ide/ide-pm.c
@@ -77,7 +77,7 @@ int generic_ide_resume(struct device *dev)
}
memset(&rqpm, 0, sizeof(rqpm));
- rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
+ rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
ide_req(rq)->type = ATA_PRIV_PM_RESUME;
ide_req(rq)->special = &rqpm;
rqpm.pm_step = IDE_PM_START_RESUME;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index d79335506ecd..28f93b9aa51b 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -963,6 +963,39 @@ static struct cpuidle_state dnv_cstates[] __initdata = {
.enter = NULL }
};
+/*
+ * Note, depending on HW and FW revision, SnowRidge SoC may or may not support
+ * C6, and this is indicated in the CPUID mwait leaf.
+ */
+static struct cpuidle_state snr_cstates[] __initdata = {
+ {
+ .name = "C1",
+ .desc = "MWAIT 0x00",
+ .flags = MWAIT2flg(0x00),
+ .exit_latency = 2,
+ .target_residency = 2,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C1E",
+ .desc = "MWAIT 0x01",
+ .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE,
+ .exit_latency = 15,
+ .target_residency = 25,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .name = "C6",
+ .desc = "MWAIT 0x20",
+ .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 130,
+ .target_residency = 500,
+ .enter = &intel_idle,
+ .enter_s2idle = intel_idle_s2idle, },
+ {
+ .enter = NULL }
+};
+
static const struct idle_cpu idle_cpu_nehalem __initconst = {
.state_table = nehalem_cstates,
.auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE,
@@ -1084,6 +1117,12 @@ static const struct idle_cpu idle_cpu_dnv __initconst = {
.use_acpi = true,
};
+static const struct idle_cpu idle_cpu_snr __initconst = {
+ .state_table = snr_cstates,
+ .disable_promotion_to_c1e = true,
+ .use_acpi = true,
+};
+
static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx),
X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem),
@@ -1122,7 +1161,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, &idle_cpu_bxt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt),
X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_dnv),
+ X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr),
{}
};
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index 560865f65dc4..67f86c405a26 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -157,12 +157,6 @@ void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit)
}
EXPORT_SYMBOL(rtrs_clt_put_permit);
-void *rtrs_permit_to_pdu(struct rtrs_permit *permit)
-{
- return permit + 1;
-}
-EXPORT_SYMBOL(rtrs_permit_to_pdu);
-
/**
* rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit
* @sess: client session pointer
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h
index 9af750f4d783..8738e90e715a 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs.h
@@ -63,13 +63,6 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
void rtrs_clt_close(struct rtrs_clt *sess);
-/**
- * rtrs_permit_to_pdu() - converts rtrs_permit to opaque pdu pointer
- * @permit: RTRS permit pointer, it associates the memory allocation for future
- * RDMA operation.
- */
-void *rtrs_permit_to_pdu(struct rtrs_permit *permit);
-
enum {
RTRS_PERMIT_NOWAIT = 0,
RTRS_PERMIT_WAIT = 1,
diff --git a/drivers/interconnect/imx/imx.c b/drivers/interconnect/imx/imx.c
index 41dba7090c2a..c770951a909c 100644
--- a/drivers/interconnect/imx/imx.c
+++ b/drivers/interconnect/imx/imx.c
@@ -96,9 +96,10 @@ static int imx_icc_node_init_qos(struct icc_provider *provider,
return -ENODEV;
}
/* Allow scaling to be disabled on a per-node basis */
- if (!dn || !of_device_is_available(dn)) {
+ if (!of_device_is_available(dn)) {
dev_warn(dev, "Missing property %s, skip scaling %s\n",
adj->phandle_name, node->name);
+ of_node_put(dn);
return 0;
}
diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c
index ba43a15aefec..d7768d3c6d8a 100644
--- a/drivers/interconnect/imx/imx8mq.c
+++ b/drivers/interconnect/imx/imx8mq.c
@@ -7,6 +7,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/interconnect-provider.h>
#include <dt-bindings/interconnect/imx8mq.h>
#include "imx.h"
@@ -94,6 +95,7 @@ static struct platform_driver imx8mq_icc_driver = {
.remove = imx8mq_icc_remove,
.driver = {
.name = "imx8mq-interconnect",
+ .sync_state = icc_sync_state,
},
};
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index a8f93ba265f8..b3fb5b02bcf1 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -42,13 +42,23 @@ config INTERCONNECT_QCOM_QCS404
This is a driver for the Qualcomm Network-on-Chip on qcs404-based
platforms.
+config INTERCONNECT_QCOM_RPMH_POSSIBLE
+ tristate
+ default INTERCONNECT_QCOM
+ depends on QCOM_RPMH || (COMPILE_TEST && !QCOM_RPMH)
+ depends on QCOM_COMMAND_DB || (COMPILE_TEST && !QCOM_COMMAND_DB)
+ depends on OF || COMPILE_TEST
+ help
+ Compile-testing RPMH drivers is possible on other platforms,
+ but in order to avoid link failures, drivers must not be built-in
+ when QCOM_RPMH or QCOM_COMMAND_DB are loadable modules
+
config INTERCONNECT_QCOM_RPMH
tristate
config INTERCONNECT_QCOM_SC7180
tristate "Qualcomm SC7180 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -57,8 +67,7 @@ config INTERCONNECT_QCOM_SC7180
config INTERCONNECT_QCOM_SDM845
tristate "Qualcomm SDM845 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -67,8 +76,7 @@ config INTERCONNECT_QCOM_SDM845
config INTERCONNECT_QCOM_SM8150
tristate "Qualcomm SM8150 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
@@ -77,8 +85,7 @@ config INTERCONNECT_QCOM_SM8150
config INTERCONNECT_QCOM_SM8250
tristate "Qualcomm SM8250 interconnect driver"
- depends on INTERCONNECT_QCOM
- depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
select INTERCONNECT_QCOM_RPMH
select INTERCONNECT_QCOM_BCM_VOTER
help
diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c
index 5304aea3b058..366870150cbd 100644
--- a/drivers/interconnect/qcom/sdm845.c
+++ b/drivers/interconnect/qcom/sdm845.c
@@ -177,6 +177,7 @@ DEFINE_QBCM(bcm_sn15, "SN15", false, &qnm_memnoc);
static struct qcom_icc_bcm *aggre1_noc_bcms[] = {
&bcm_sn9,
+ &bcm_qup0,
};
static struct qcom_icc_node *aggre1_noc_nodes[] = {
@@ -190,6 +191,7 @@ static struct qcom_icc_node *aggre1_noc_nodes[] = {
[SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
[SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc,
[SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc,
+ [MASTER_QUP_1] = &qhm_qup1,
};
static const struct qcom_icc_desc sdm845_aggre1_noc = {
@@ -218,6 +220,7 @@ static struct qcom_icc_node *aggre2_noc_nodes[] = {
[SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
[SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc,
[SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc,
+ [MASTER_QUP_2] = &qhm_qup2,
};
static const struct qcom_icc_desc sdm845_aggre2_noc = {
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index f54cd79b43e4..6a1f7048dacc 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -1973,8 +1973,6 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
return r;
}
- iommu->int_enabled = true;
-
return 0;
}
@@ -2169,6 +2167,7 @@ static int iommu_init_irq(struct amd_iommu *iommu)
if (ret)
return ret;
+ iommu->int_enabled = true;
enable_faults:
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 7e2c445a1fae..f0adbc48fd17 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -3854,6 +3854,9 @@ static int irq_remapping_select(struct irq_domain *d, struct irq_fwspec *fwspec,
struct amd_iommu *iommu;
int devid = -1;
+ if (!amd_iommu_irq_remap)
+ return 0;
+
if (x86_fwspec_is_ioapic(fwspec))
devid = get_ioapic_devid(fwspec->param[0]);
else if (x86_fwspec_is_hpet(fwspec))
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 5dff7ffbef11..1b83d140742f 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -196,6 +196,8 @@ static int qcom_smmu_cfg_probe(struct arm_smmu_device *smmu)
set_bit(qsmmu->bypass_cbndx, smmu->context_map);
+ arm_smmu_cb_write(smmu, qsmmu->bypass_cbndx, ARM_SMMU_CB_SCTLR, 0);
+
reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, CBAR_TYPE_S1_TRANS_S2_BYPASS);
arm_smmu_gr1_write(smmu, ARM_SMMU_GR1_CBAR(qsmmu->bypass_cbndx), reg);
}
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index f0305e6aac1b..4078358ed66e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -863,33 +863,6 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
int i, count = 0;
- /*
- * The Intel graphic driver is used to assume that the returned
- * sg list is not combound. This blocks the efforts of converting
- * Intel IOMMU driver to dma-iommu api's. Add this quirk to make the
- * device driver work and should be removed once it's fixed in i915
- * driver.
- */
- if (IS_ENABLED(CONFIG_DRM_I915) && dev_is_pci(dev) &&
- to_pci_dev(dev)->vendor == PCI_VENDOR_ID_INTEL &&
- (to_pci_dev(dev)->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
- for_each_sg(sg, s, nents, i) {
- unsigned int s_iova_off = sg_dma_address(s);
- unsigned int s_length = sg_dma_len(s);
- unsigned int s_iova_len = s->length;
-
- s->offset += s_iova_off;
- s->length = s_length;
- sg_dma_address(s) = dma_addr + s_iova_off;
- sg_dma_len(s) = s_length;
- dma_addr += s_iova_len;
-
- pr_info_once("sg combining disabled due to i915 driver\n");
- }
-
- return nents;
- }
-
for_each_sg(sg, s, nents, i) {
/* Restore this segment's original unaligned fields first */
unsigned int s_iova_off = sg_dma_address(s);
diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index b46dbfa6d0ed..004feaed3c72 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1461,8 +1461,8 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
int mask = ilog2(__roundup_pow_of_two(npages));
unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
- if (WARN_ON_ONCE(!ALIGN(addr, align)))
- addr &= ~(align - 1);
+ if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
+ addr = ALIGN_DOWN(addr, align);
desc.qw0 = QI_EIOTLB_PASID(pasid) |
QI_EIOTLB_DID(did) |
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 788119c5b021..65cf06d70bf4 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -719,6 +719,8 @@ static int domain_update_device_node(struct dmar_domain *domain)
return nid;
}
+static void domain_update_iotlb(struct dmar_domain *domain);
+
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
@@ -744,6 +746,8 @@ static void domain_update_iommu_cap(struct dmar_domain *domain)
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw - 1);
else
domain->domain.geometry.aperture_end = __DOMAIN_MAX_ADDR(domain->gaw);
+
+ domain_update_iotlb(domain);
}
struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
@@ -1464,17 +1468,22 @@ static void domain_update_iotlb(struct dmar_domain *domain)
assert_spin_locked(&device_domain_lock);
- list_for_each_entry(info, &domain->devices, link) {
- struct pci_dev *pdev;
-
- if (!info->dev || !dev_is_pci(info->dev))
- continue;
-
- pdev = to_pci_dev(info->dev);
- if (pdev->ats_enabled) {
+ list_for_each_entry(info, &domain->devices, link)
+ if (info->ats_enabled) {
has_iotlb_device = true;
break;
}
+
+ if (!has_iotlb_device) {
+ struct subdev_domain_info *sinfo;
+
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ info = get_domain_info(sinfo->pdev);
+ if (info && info->ats_enabled) {
+ has_iotlb_device = true;
+ break;
+ }
+ }
}
domain->has_iotlb_device = has_iotlb_device;
@@ -1555,25 +1564,37 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
#endif
}
+static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
+ u64 addr, unsigned int mask)
+{
+ u16 sid, qdep;
+
+ if (!info || !info->ats_enabled)
+ return;
+
+ sid = info->bus << 8 | info->devfn;
+ qdep = info->ats_qdep;
+ qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
+ qdep, addr, mask);
+}
+
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
u64 addr, unsigned mask)
{
- u16 sid, qdep;
unsigned long flags;
struct device_domain_info *info;
+ struct subdev_domain_info *sinfo;
if (!domain->has_iotlb_device)
return;
spin_lock_irqsave(&device_domain_lock, flags);
- list_for_each_entry(info, &domain->devices, link) {
- if (!info->ats_enabled)
- continue;
+ list_for_each_entry(info, &domain->devices, link)
+ __iommu_flush_dev_iotlb(info, addr, mask);
- sid = info->bus << 8 | info->devfn;
- qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
- qdep, addr, mask);
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ info = get_domain_info(sinfo->pdev);
+ __iommu_flush_dev_iotlb(info, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
}
@@ -1877,6 +1898,7 @@ static struct dmar_domain *alloc_domain(int flags)
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
+ INIT_LIST_HEAD(&domain->subdevices);
return domain;
}
@@ -2547,7 +2569,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->iommu = iommu;
info->pasid_table = NULL;
info->auxd_enabled = 0;
- INIT_LIST_HEAD(&info->auxiliary_domains);
+ INIT_LIST_HEAD(&info->subdevices);
if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
@@ -4475,33 +4497,61 @@ is_aux_domain(struct device *dev, struct iommu_domain *domain)
domain->type == IOMMU_DOMAIN_UNMANAGED;
}
-static void auxiliary_link_device(struct dmar_domain *domain,
- struct device *dev)
+static inline struct subdev_domain_info *
+lookup_subdev_info(struct dmar_domain *domain, struct device *dev)
+{
+ struct subdev_domain_info *sinfo;
+
+ if (!list_empty(&domain->subdevices)) {
+ list_for_each_entry(sinfo, &domain->subdevices, link_domain) {
+ if (sinfo->pdev == dev)
+ return sinfo;
+ }
+ }
+
+ return NULL;
+}
+
+static int auxiliary_link_device(struct dmar_domain *domain,
+ struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
+ struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
assert_spin_locked(&device_domain_lock);
if (WARN_ON(!info))
- return;
+ return -EINVAL;
+
+ if (!sinfo) {
+ sinfo = kzalloc(sizeof(*sinfo), GFP_ATOMIC);
+ sinfo->domain = domain;
+ sinfo->pdev = dev;
+ list_add(&sinfo->link_phys, &info->subdevices);
+ list_add(&sinfo->link_domain, &domain->subdevices);
+ }
- domain->auxd_refcnt++;
- list_add(&domain->auxd, &info->auxiliary_domains);
+ return ++sinfo->users;
}
-static void auxiliary_unlink_device(struct dmar_domain *domain,
- struct device *dev)
+static int auxiliary_unlink_device(struct dmar_domain *domain,
+ struct device *dev)
{
struct device_domain_info *info = get_domain_info(dev);
+ struct subdev_domain_info *sinfo = lookup_subdev_info(domain, dev);
+ int ret;
assert_spin_locked(&device_domain_lock);
- if (WARN_ON(!info))
- return;
+ if (WARN_ON(!info || !sinfo || sinfo->users <= 0))
+ return -EINVAL;
- list_del(&domain->auxd);
- domain->auxd_refcnt--;
+ ret = --sinfo->users;
+ if (!ret) {
+ list_del(&sinfo->link_phys);
+ list_del(&sinfo->link_domain);
+ kfree(sinfo);
+ }
- if (!domain->auxd_refcnt && domain->default_pasid > 0)
- ioasid_put(domain->default_pasid);
+ return ret;
}
static int aux_domain_add_dev(struct dmar_domain *domain,
@@ -4530,6 +4580,19 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
}
spin_lock_irqsave(&device_domain_lock, flags);
+ ret = auxiliary_link_device(domain, dev);
+ if (ret <= 0)
+ goto link_failed;
+
+ /*
+ * Subdevices from the same physical device can be attached to the
+ * same domain. For such cases, only the first subdevice attachment
+ * needs to go through the full steps in this function. So if ret >
+ * 1, just goto out.
+ */
+ if (ret > 1)
+ goto out;
+
/*
* iommu->lock must be held to attach domain to iommu and setup the
* pasid entry for second level translation.
@@ -4548,10 +4611,9 @@ static int aux_domain_add_dev(struct dmar_domain *domain,
domain->default_pasid);
if (ret)
goto table_failed;
- spin_unlock(&iommu->lock);
-
- auxiliary_link_device(domain, dev);
+ spin_unlock(&iommu->lock);
+out:
spin_unlock_irqrestore(&device_domain_lock, flags);
return 0;
@@ -4560,8 +4622,10 @@ table_failed:
domain_detach_iommu(domain, iommu);
attach_failed:
spin_unlock(&iommu->lock);
+ auxiliary_unlink_device(domain, dev);
+link_failed:
spin_unlock_irqrestore(&device_domain_lock, flags);
- if (!domain->auxd_refcnt && domain->default_pasid > 0)
+ if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
ioasid_put(domain->default_pasid);
return ret;
@@ -4581,14 +4645,18 @@ static void aux_domain_remove_dev(struct dmar_domain *domain,
info = get_domain_info(dev);
iommu = info->iommu;
- auxiliary_unlink_device(domain, dev);
-
- spin_lock(&iommu->lock);
- intel_pasid_tear_down_entry(iommu, dev, domain->default_pasid, false);
- domain_detach_iommu(domain, iommu);
- spin_unlock(&iommu->lock);
+ if (!auxiliary_unlink_device(domain, dev)) {
+ spin_lock(&iommu->lock);
+ intel_pasid_tear_down_entry(iommu, dev,
+ domain->default_pasid, false);
+ domain_detach_iommu(domain, iommu);
+ spin_unlock(&iommu->lock);
+ }
spin_unlock_irqrestore(&device_domain_lock, flags);
+
+ if (list_empty(&domain->subdevices) && domain->default_pasid > 0)
+ ioasid_put(domain->default_pasid);
}
static int prepare_domain_attach_device(struct iommu_domain *domain,
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index aeffda92b10b..685200a5cff0 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1353,6 +1353,8 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
irq_data = irq_domain_get_irq_data(domain, virq + i);
irq_cfg = irqd_cfg(irq_data);
if (!irq_data || !irq_cfg) {
+ if (!i)
+ kfree(data);
ret = -EINVAL;
goto out_free_data;
}
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index 4fa248b98031..790ef3497e7e 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -142,7 +142,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(svm->iommu, &desc, 1, 0);
+ qi_submit_sync(sdev->iommu, &desc, 1, 0);
if (sdev->dev_iotlb) {
desc.qw0 = QI_DEV_EIOTLB_PASID(svm->pasid) |
@@ -166,7 +166,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
}
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(svm->iommu, &desc, 1, 0);
+ qi_submit_sync(sdev->iommu, &desc, 1, 0);
}
}
@@ -211,7 +211,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
*/
rcu_read_lock();
list_for_each_entry_rcu(sdev, &svm->devs, list)
- intel_pasid_tear_down_entry(svm->iommu, sdev->dev,
+ intel_pasid_tear_down_entry(sdev->iommu, sdev->dev,
svm->pasid, true);
rcu_read_unlock();
@@ -281,6 +281,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
struct dmar_domain *dmar_domain;
struct device_domain_info *info;
struct intel_svm *svm = NULL;
+ unsigned long iflags;
int ret = 0;
if (WARN_ON(!iommu) || !data)
@@ -363,6 +364,7 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
}
sdev->dev = dev;
sdev->sid = PCI_DEVID(info->bus, info->devfn);
+ sdev->iommu = iommu;
/* Only count users if device has aux domains */
if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX))
@@ -381,12 +383,12 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev,
* each bind of a new device even with an existing PASID, we need to
* call the nested mode setup function here.
*/
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_nested(iommu, dev,
(pgd_t *)(uintptr_t)data->gpgd,
data->hpasid, &data->vendor.vtd, dmar_domain,
data->addr_width);
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
dev_err_ratelimited(dev, "Failed to set up PASID %llu in nested mode, Err %d\n",
data->hpasid, ret);
@@ -486,6 +488,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
struct device_domain_info *info;
struct intel_svm_dev *sdev;
struct intel_svm *svm = NULL;
+ unsigned long iflags;
int pasid_max;
int ret;
@@ -546,6 +549,7 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
goto out;
}
sdev->dev = dev;
+ sdev->iommu = iommu;
ret = intel_iommu_enable_pasid(iommu, dev);
if (ret) {
@@ -575,7 +579,6 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
kfree(sdev);
goto out;
}
- svm->iommu = iommu;
if (pasid_max > intel_pasid_max_id)
pasid_max = intel_pasid_max_id;
@@ -605,14 +608,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
}
}
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
if (mm)
mmu_notifier_unregister(&svm->notifier, mm);
@@ -632,14 +635,14 @@ intel_svm_bind_mm(struct device *dev, unsigned int flags,
* Binding a new device with existing PASID, need to setup
* the PASID entry.
*/
- spin_lock(&iommu->lock);
+ spin_lock_irqsave(&iommu->lock, iflags);
ret = intel_pasid_setup_first_level(iommu, dev,
mm ? mm->pgd : init_mm.pgd,
svm->pasid, FLPT_DEFAULT_DID,
(mm ? 0 : PASID_FLAG_SUPERVISOR_MODE) |
(cpu_feature_enabled(X86_FEATURE_LA57) ?
PASID_FLAG_FL5LP : 0));
- spin_unlock(&iommu->lock);
+ spin_unlock_irqrestore(&iommu->lock, iflags);
if (ret) {
kfree(sdev);
goto out;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 4bb3293ae4d7..d20b8b333d30 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -358,7 +358,7 @@ static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
* @iovad: - iova domain in question.
* @pfn: - page frame number
* This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
+ * given domain which matches the given pfn.
*/
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
{
@@ -601,7 +601,7 @@ void queue_iova(struct iova_domain *iovad,
EXPORT_SYMBOL_GPL(queue_iova);
/**
- * put_iova_domain - destroys the iova doamin
+ * put_iova_domain - destroys the iova domain
* @iovad: - iova domain in question.
* All the iova's in that domain are destroyed.
*/
@@ -712,9 +712,9 @@ EXPORT_SYMBOL_GPL(reserve_iova);
/**
* copy_reserved_iova - copies the reserved between domains
- * @from: - source doamin from where to copy
+ * @from: - source domain from where to copy
* @to: - destination domin where to copy
- * This function copies reserved iova's from one doamin to
+ * This function copies reserved iova's from one domain to
* other.
*/
void
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index c951ad24d377..ed46e6057e33 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3844,8 +3844,6 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
val |= GICR_VPENDBASER_Valid;
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
-
- its_wait_vpt_parse_complete();
}
static void its_vpe_deschedule(struct its_vpe *vpe)
@@ -3893,6 +3891,10 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
its_vpe_deschedule(vpe);
return 0;
+ case COMMIT_VPE:
+ its_wait_vpt_parse_complete();
+ return 0;
+
case INVALL_VPE:
its_vpe_invall(vpe);
return 0;
@@ -4054,8 +4056,6 @@ static void its_vpe_4_1_schedule(struct its_vpe *vpe,
val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
-
- its_wait_vpt_parse_complete();
}
static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
@@ -4130,6 +4130,10 @@ static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
its_vpe_4_1_deschedule(vpe, info);
return 0;
+ case COMMIT_VPE:
+ its_wait_vpt_parse_complete();
+ return 0;
+
case INVALL_VPE:
its_vpe_4_1_invall(vpe);
return 0;
diff --git a/drivers/irqchip/irq-gic-v4.c b/drivers/irqchip/irq-gic-v4.c
index 0c18714ae13e..5d1dc9915272 100644
--- a/drivers/irqchip/irq-gic-v4.c
+++ b/drivers/irqchip/irq-gic-v4.c
@@ -232,6 +232,8 @@ int its_make_vpe_non_resident(struct its_vpe *vpe, bool db)
if (!ret)
vpe->resident = false;
+ vpe->ready = false;
+
return ret;
}
@@ -258,6 +260,23 @@ int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en)
return ret;
}
+int its_commit_vpe(struct its_vpe *vpe)
+{
+ struct its_cmd_info info = {
+ .cmd_type = COMMIT_VPE,
+ };
+ int ret;
+
+ WARN_ON(preemptible());
+
+ ret = its_send_vpe_cmd(vpe, &info);
+ if (!ret)
+ vpe->ready = true;
+
+ return ret;
+}
+
+
int its_invall_vpe(struct its_vpe *vpe)
{
struct its_cmd_info info = {
diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig
index 26cf0ac9c4ad..c9a53c222472 100644
--- a/drivers/isdn/mISDN/Kconfig
+++ b/drivers/isdn/mISDN/Kconfig
@@ -13,6 +13,7 @@ if MISDN != n
config MISDN_DSP
tristate "Digital Audio Processing of transparent data"
depends on MISDN
+ select BITREVERSE
help
Enable support for digital audio processing capability.
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 8f39f9ba5c80..4c2ce210c123 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -19,6 +19,7 @@ if NVM
config NVM_PBLK
tristate "Physical Block Device Open-Channel SSD target"
+ select CRC32
help
Allows an open-channel SSD to be exposed as a block device to the
host. The target assumes the device exposes raw flash and must be
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 05b1009e2820..f4abe3529acd 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -16,6 +16,13 @@ config ARM_MHU
The controller has 3 mailbox channels, the last of which can be
used in Secure mode only.
+config ARM_MHU_V2
+ tristate "ARM MHUv2 Mailbox"
+ depends on ARM_AMBA
+ help
+ Say Y here if you want to build the ARM MHUv2 controller driver,
+ which provides unidirectional mailboxes between processing elements.
+
config IMX_MBOX
tristate "i.MX Mailbox"
depends on ARCH_MXC || COMPILE_TEST
@@ -201,7 +208,7 @@ config BCM_FLEXRM_MBOX
config STM32_IPCC
tristate "STM32 IPCC Mailbox"
- depends on MACH_STM32MP157
+ depends on MACH_STM32MP157 || COMPILE_TEST
help
Mailbox implementation for STMicroelectonics STM32 family chips
with hardware for Inter-Processor Communication Controller (IPCC)
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 2e06e02b2e03..7194fa92c787 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_MAILBOX_TEST) += mailbox-test.o
obj-$(CONFIG_ARM_MHU) += arm_mhu.o arm_mhu_db.o
+obj-$(CONFIG_ARM_MHU_V2) += arm_mhuv2.o
+
obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
obj-$(CONFIG_ARMADA_37XX_RWTM_MBOX) += armada-37xx-rwtm-mailbox.o
diff --git a/drivers/mailbox/arm_mhu_db.c b/drivers/mailbox/arm_mhu_db.c
index 275efe4cca0c..8eb66c4ecf5b 100644
--- a/drivers/mailbox/arm_mhu_db.c
+++ b/drivers/mailbox/arm_mhu_db.c
@@ -180,7 +180,7 @@ static void mhu_db_shutdown(struct mbox_chan *chan)
/* Reset channel */
mhu_db_mbox_clear_irq(chan);
- kfree(chan->con_priv);
+ devm_kfree(mbox->dev, chan->con_priv);
chan->con_priv = NULL;
}
diff --git a/drivers/mailbox/arm_mhuv2.c b/drivers/mailbox/arm_mhuv2.c
new file mode 100644
index 000000000000..67fb10885bb4
--- /dev/null
+++ b/drivers/mailbox/arm_mhuv2.c
@@ -0,0 +1,1136 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ARM Message Handling Unit Version 2 (MHUv2) driver.
+ *
+ * Copyright (C) 2020 ARM Ltd.
+ * Copyright (C) 2020 Linaro Ltd.
+ *
+ * An MHUv2 mailbox controller can provide up to 124 channel windows (each 32
+ * bit long) and the driver allows any combination of both the transport
+ * protocol modes: data-transfer and doorbell, to be used on those channel
+ * windows.
+ *
+ * The transport protocols should be specified in the device tree entry for the
+ * device. The transport protocols determine how the underlying hardware
+ * resources of the device are utilized when transmitting data. Refer to the
+ * device tree bindings of the ARM MHUv2 controller for more details.
+ *
+ * The number of registered mailbox channels is dependent on both the underlying
+ * hardware - mainly the number of channel windows implemented by the platform,
+ * as well as the selected transport protocols.
+ *
+ * The MHUv2 controller can work both as a sender and receiver, but the driver
+ * and the DT bindings support unidirectional transfers for better allocation of
+ * the channels. That is, this driver will be probed for two separate devices
+ * for each mailbox controller, a sender device and a receiver device.
+ */
+
+#include <linux/amba/bus.h>
+#include <linux/interrupt.h>
+#include <linux/mailbox_controller.h>
+#include <linux/mailbox/arm_mhuv2_message.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+
+/* ====== MHUv2 Registers ====== */
+
+/* Maximum number of channel windows */
+#define MHUV2_CH_WN_MAX 124
+/* Number of combined interrupt status registers */
+#define MHUV2_CMB_INT_ST_REG_CNT 4
+#define MHUV2_STAT_BYTES (sizeof(u32))
+#define MHUV2_STAT_BITS (MHUV2_STAT_BYTES * __CHAR_BIT__)
+
+#define LSB_MASK(n) ((1 << (n * __CHAR_BIT__)) - 1)
+#define MHUV2_PROTOCOL_PROP "arm,mhuv2-protocols"
+
+/* Register Message Handling Unit Configuration fields */
+struct mhu_cfg_t {
+ u32 num_ch : 7;
+ u32 pad : 25;
+} __packed;
+
+/* register Interrupt Status fields */
+struct int_st_t {
+ u32 nr2r : 1;
+ u32 r2nr : 1;
+ u32 pad : 30;
+} __packed;
+
+/* Register Interrupt Clear fields */
+struct int_clr_t {
+ u32 nr2r : 1;
+ u32 r2nr : 1;
+ u32 pad : 30;
+} __packed;
+
+/* Register Interrupt Enable fields */
+struct int_en_t {
+ u32 r2nr : 1;
+ u32 nr2r : 1;
+ u32 chcomb : 1;
+ u32 pad : 29;
+} __packed;
+
+/* Register Implementer Identification fields */
+struct iidr_t {
+ u32 implementer : 12;
+ u32 revision : 4;
+ u32 variant : 4;
+ u32 product_id : 12;
+} __packed;
+
+/* Register Architecture Identification Register fields */
+struct aidr_t {
+ u32 arch_minor_rev : 4;
+ u32 arch_major_rev : 4;
+ u32 pad : 24;
+} __packed;
+
+/* Sender Channel Window fields */
+struct mhu2_send_ch_wn_reg {
+ u32 stat;
+ u8 pad1[0x0C - 0x04];
+ u32 stat_set;
+ u32 int_st;
+ u32 int_clr;
+ u32 int_en;
+ u8 pad2[0x20 - 0x1C];
+} __packed;
+
+/* Sender frame register fields */
+struct mhu2_send_frame_reg {
+ struct mhu2_send_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX];
+ struct mhu_cfg_t mhu_cfg;
+ u32 resp_cfg;
+ u32 access_request;
+ u32 access_ready;
+ struct int_st_t int_st;
+ struct int_clr_t int_clr;
+ struct int_en_t int_en;
+ u32 reserved0;
+ u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT];
+ u8 pad[0xFC8 - 0xFB0];
+ struct iidr_t iidr;
+ struct aidr_t aidr;
+} __packed;
+
+/* Receiver Channel Window fields */
+struct mhu2_recv_ch_wn_reg {
+ u32 stat;
+ u32 stat_masked;
+ u32 stat_clear;
+ u8 reserved0[0x10 - 0x0C];
+ u32 mask;
+ u32 mask_set;
+ u32 mask_clear;
+ u8 pad[0x20 - 0x1C];
+} __packed;
+
+/* Receiver frame register fields */
+struct mhu2_recv_frame_reg {
+ struct mhu2_recv_ch_wn_reg ch_wn[MHUV2_CH_WN_MAX];
+ struct mhu_cfg_t mhu_cfg;
+ u8 reserved0[0xF90 - 0xF84];
+ struct int_st_t int_st;
+ struct int_clr_t int_clr;
+ struct int_en_t int_en;
+ u32 pad;
+ u32 chcomb_int_st[MHUV2_CMB_INT_ST_REG_CNT];
+ u8 reserved2[0xFC8 - 0xFB0];
+ struct iidr_t iidr;
+ struct aidr_t aidr;
+} __packed;
+
+
+/* ====== MHUv2 data structures ====== */
+
+enum mhuv2_transport_protocol {
+ DOORBELL = 0,
+ DATA_TRANSFER = 1
+};
+
+enum mhuv2_frame {
+ RECEIVER_FRAME,
+ SENDER_FRAME
+};
+
+/**
+ * struct mhuv2 - MHUv2 mailbox controller data
+ *
+ * @mbox: Mailbox controller belonging to the MHU frame.
+ * @send/recv: Base address of the register mapping region.
+ * @frame: Frame type: RECEIVER_FRAME or SENDER_FRAME.
+ * @irq: Interrupt.
+ * @windows: Channel windows implemented by the platform.
+ * @minor: Minor version of the controller.
+ * @length: Length of the protocols array in bytes.
+ * @protocols: Raw protocol information, derived from device tree.
+ * @doorbell_pending_lock: spinlock required for correct operation of Tx
+ * interrupt for doorbells.
+ */
+struct mhuv2 {
+ struct mbox_controller mbox;
+ union {
+ struct mhu2_send_frame_reg __iomem *send;
+ struct mhu2_recv_frame_reg __iomem *recv;
+ };
+ enum mhuv2_frame frame;
+ unsigned int irq;
+ unsigned int windows;
+ unsigned int minor;
+ unsigned int length;
+ u32 *protocols;
+
+ spinlock_t doorbell_pending_lock;
+};
+
+#define mhu_from_mbox(_mbox) container_of(_mbox, struct mhuv2, mbox)
+
+/**
+ * struct mhuv2_protocol_ops - MHUv2 operations
+ *
+ * Each transport protocol must provide an implementation of the operations
+ * provided here.
+ *
+ * @rx_startup: Startup callback for receiver.
+ * @rx_shutdown: Shutdown callback for receiver.
+ * @read_data: Reads and clears newly available data.
+ * @tx_startup: Startup callback for receiver.
+ * @tx_shutdown: Shutdown callback for receiver.
+ * @last_tx_done: Report back if the last tx is completed or not.
+ * @send_data: Send data to the receiver.
+ */
+struct mhuv2_protocol_ops {
+ int (*rx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ void (*rx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ void *(*read_data)(struct mhuv2 *mhu, struct mbox_chan *chan);
+
+ void (*tx_startup)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ void (*tx_shutdown)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ int (*last_tx_done)(struct mhuv2 *mhu, struct mbox_chan *chan);
+ int (*send_data)(struct mhuv2 *mhu, struct mbox_chan *chan, void *arg);
+};
+
+/*
+ * MHUv2 mailbox channel's private information
+ *
+ * @ops: protocol specific ops for the channel.
+ * @ch_wn_idx: Channel window index allocated to the channel.
+ * @windows: Total number of windows consumed by the channel, only relevant
+ * in DATA_TRANSFER protocol.
+ * @doorbell: Doorbell bit number within the ch_wn_idx window, only relevant
+ * in DOORBELL protocol.
+ * @pending: Flag indicating pending doorbell interrupt, only relevant in
+ * DOORBELL protocol.
+ */
+struct mhuv2_mbox_chan_priv {
+ const struct mhuv2_protocol_ops *ops;
+ u32 ch_wn_idx;
+ union {
+ u32 windows;
+ struct {
+ u32 doorbell;
+ u32 pending;
+ };
+ };
+};
+
+/* Macro for reading a bitfield within a physically mapped packed struct */
+#define readl_relaxed_bitfield(_regptr, _field) \
+ ({ \
+ u32 _regval; \
+ _regval = readl_relaxed((_regptr)); \
+ (*(typeof((_regptr)))(&_regval))._field; \
+ })
+
+/* Macro for writing a bitfield within a physically mapped packed struct */
+#define writel_relaxed_bitfield(_value, _regptr, _field) \
+ ({ \
+ u32 _regval; \
+ _regval = readl_relaxed(_regptr); \
+ (*(typeof(_regptr))(&_regval))._field = _value; \
+ writel_relaxed(_regval, _regptr); \
+ })
+
+
+/* =================== Doorbell transport protocol operations =============== */
+
+static int mhuv2_doorbell_rx_startup(struct mhuv2 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ writel_relaxed(BIT(priv->doorbell),
+ &mhu->recv->ch_wn[priv->ch_wn_idx].mask_clear);
+ return 0;
+}
+
+static void mhuv2_doorbell_rx_shutdown(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ writel_relaxed(BIT(priv->doorbell),
+ &mhu->recv->ch_wn[priv->ch_wn_idx].mask_set);
+}
+
+static void *mhuv2_doorbell_read_data(struct mhuv2 *mhu, struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ writel_relaxed(BIT(priv->doorbell),
+ &mhu->recv->ch_wn[priv->ch_wn_idx].stat_clear);
+ return NULL;
+}
+
+static int mhuv2_doorbell_last_tx_done(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ return !(readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat) &
+ BIT(priv->doorbell));
+}
+
+static int mhuv2_doorbell_send_data(struct mhuv2 *mhu, struct mbox_chan *chan,
+ void *arg)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mhu->doorbell_pending_lock, flags);
+
+ priv->pending = 1;
+ writel_relaxed(BIT(priv->doorbell),
+ &mhu->send->ch_wn[priv->ch_wn_idx].stat_set);
+
+ spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags);
+
+ return 0;
+}
+
+static const struct mhuv2_protocol_ops mhuv2_doorbell_ops = {
+ .rx_startup = mhuv2_doorbell_rx_startup,
+ .rx_shutdown = mhuv2_doorbell_rx_shutdown,
+ .read_data = mhuv2_doorbell_read_data,
+ .last_tx_done = mhuv2_doorbell_last_tx_done,
+ .send_data = mhuv2_doorbell_send_data,
+};
+#define IS_PROTOCOL_DOORBELL(_priv) (_priv->ops == &mhuv2_doorbell_ops)
+
+/* ============= Data transfer transport protocol operations ================ */
+
+static int mhuv2_data_transfer_rx_startup(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ /*
+ * The protocol mandates that all but the last status register must be
+ * masked.
+ */
+ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_clear);
+ return 0;
+}
+
+static void mhuv2_data_transfer_rx_shutdown(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set);
+}
+
+static void *mhuv2_data_transfer_read_data(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ const int windows = priv->windows;
+ struct arm_mhuv2_mbox_msg *msg;
+ u32 *data;
+ int i, idx;
+
+ msg = kzalloc(sizeof(*msg) + windows * MHUV2_STAT_BYTES, GFP_KERNEL);
+ if (!msg)
+ return ERR_PTR(-ENOMEM);
+
+ data = msg->data = msg + 1;
+ msg->len = windows * MHUV2_STAT_BYTES;
+
+ /*
+ * Messages are expected in order of most significant word to least
+ * significant word. Refer mhuv2_data_transfer_send_data() for more
+ * details.
+ *
+ * We also need to read the stat register instead of stat_masked, as we
+ * masked all but the last window.
+ *
+ * Last channel window must be cleared as the final operation. Upon
+ * clearing the last channel window register, which is unmasked in
+ * data-transfer protocol, the interrupt is de-asserted.
+ */
+ for (i = 0; i < windows; i++) {
+ idx = priv->ch_wn_idx + i;
+ data[windows - 1 - i] = readl_relaxed(&mhu->recv->ch_wn[idx].stat);
+ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[idx].stat_clear);
+ }
+
+ return msg;
+}
+
+static void mhuv2_data_transfer_tx_startup(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ /* Enable interrupts only for the last window */
+ if (mhu->minor) {
+ writel_relaxed(0x1, &mhu->send->ch_wn[i].int_clr);
+ writel_relaxed(0x1, &mhu->send->ch_wn[i].int_en);
+ }
+}
+
+static void mhuv2_data_transfer_tx_shutdown(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ if (mhu->minor)
+ writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en);
+}
+
+static int mhuv2_data_transfer_last_tx_done(struct mhuv2 *mhu,
+ struct mbox_chan *chan)
+{
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int i = priv->ch_wn_idx + priv->windows - 1;
+
+ /* Just checking the last channel window should be enough */
+ return !readl_relaxed(&mhu->send->ch_wn[i].stat);
+}
+
+/*
+ * Message will be transmitted from most significant to least significant word.
+ * This is to allow for messages shorter than channel windows to still trigger
+ * the receiver interrupt which gets activated when the last stat register is
+ * written. As an example, a 6-word message is to be written on a 4-channel MHU
+ * connection: Registers marked with '*' are masked, and will not generate an
+ * interrupt on the receiver side once written.
+ *
+ * u32 *data = [0x00000001], [0x00000002], [0x00000003], [0x00000004],
+ * [0x00000005], [0x00000006]
+ *
+ * ROUND 1:
+ * stat reg To write Write sequence
+ * [ stat 3 ] <- [0x00000001] 4 <- triggers interrupt on receiver
+ * [ stat 2 ] <- [0x00000002] 3
+ * [ stat 1 ] <- [0x00000003] 2
+ * [ stat 0 ] <- [0x00000004] 1
+ *
+ * data += 4 // Increment data pointer by number of stat regs
+ *
+ * ROUND 2:
+ * stat reg To write Write sequence
+ * [ stat 3 ] <- [0x00000005] 2 <- triggers interrupt on receiver
+ * [ stat 2 ] <- [0x00000006] 1
+ * [ stat 1 ] <- [0x00000000]
+ * [ stat 0 ] <- [0x00000000]
+ */
+static int mhuv2_data_transfer_send_data(struct mhuv2 *mhu,
+ struct mbox_chan *chan, void *arg)
+{
+ const struct arm_mhuv2_mbox_msg *msg = arg;
+ int bytes_left = msg->len, bytes_to_send, bytes_in_round, i;
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+ int windows = priv->windows;
+ u32 *data = msg->data, word;
+
+ while (bytes_left) {
+ if (!data[0]) {
+ dev_err(mhu->mbox.dev, "Data aligned at first window can't be zero to guarantee interrupt generation at receiver");
+ return -EINVAL;
+ }
+
+ while(!mhuv2_data_transfer_last_tx_done(mhu, chan))
+ continue;
+
+ bytes_in_round = min(bytes_left, (int)(windows * MHUV2_STAT_BYTES));
+
+ for (i = windows - 1; i >= 0; i--) {
+ /* Data less than windows can transfer ? */
+ if (unlikely(bytes_in_round <= i * MHUV2_STAT_BYTES))
+ continue;
+
+ word = data[i];
+ bytes_to_send = bytes_in_round & (MHUV2_STAT_BYTES - 1);
+ if (unlikely(bytes_to_send))
+ word &= LSB_MASK(bytes_to_send);
+ else
+ bytes_to_send = MHUV2_STAT_BYTES;
+
+ writel_relaxed(word, &mhu->send->ch_wn[priv->ch_wn_idx + windows - 1 - i].stat_set);
+ bytes_left -= bytes_to_send;
+ bytes_in_round -= bytes_to_send;
+ }
+
+ data += windows;
+ }
+
+ return 0;
+}
+
+static const struct mhuv2_protocol_ops mhuv2_data_transfer_ops = {
+ .rx_startup = mhuv2_data_transfer_rx_startup,
+ .rx_shutdown = mhuv2_data_transfer_rx_shutdown,
+ .read_data = mhuv2_data_transfer_read_data,
+ .tx_startup = mhuv2_data_transfer_tx_startup,
+ .tx_shutdown = mhuv2_data_transfer_tx_shutdown,
+ .last_tx_done = mhuv2_data_transfer_last_tx_done,
+ .send_data = mhuv2_data_transfer_send_data,
+};
+
+/* Interrupt handlers */
+
+static struct mbox_chan *get_irq_chan_comb(struct mhuv2 *mhu, u32 *reg)
+{
+ struct mbox_chan *chans = mhu->mbox.chans;
+ int channel = 0, i, offset = 0, windows, protocol, ch_wn;
+ u32 stat;
+
+ for (i = 0; i < MHUV2_CMB_INT_ST_REG_CNT; i++) {
+ stat = readl_relaxed(reg + i);
+ if (!stat)
+ continue;
+
+ ch_wn = i * MHUV2_STAT_BITS + __builtin_ctz(stat);
+
+ for (i = 0; i < mhu->length; i += 2) {
+ protocol = mhu->protocols[i];
+ windows = mhu->protocols[i + 1];
+
+ if (ch_wn >= offset + windows) {
+ if (protocol == DOORBELL)
+ channel += MHUV2_STAT_BITS * windows;
+ else
+ channel++;
+
+ offset += windows;
+ continue;
+ }
+
+ /* Return first chan of the window in doorbell mode */
+ if (protocol == DOORBELL)
+ channel += MHUV2_STAT_BITS * (ch_wn - offset);
+
+ return &chans[channel];
+ }
+ }
+
+ return ERR_PTR(-EIO);
+}
+
+static irqreturn_t mhuv2_sender_interrupt(int irq, void *data)
+{
+ struct mhuv2 *mhu = data;
+ struct device *dev = mhu->mbox.dev;
+ struct mhuv2_mbox_chan_priv *priv;
+ struct mbox_chan *chan;
+ unsigned long flags;
+ int i, found = 0;
+ u32 stat;
+
+ chan = get_irq_chan_comb(mhu, mhu->send->chcomb_int_st);
+ if (IS_ERR(chan)) {
+ dev_warn(dev, "Failed to find channel for the Tx interrupt\n");
+ return IRQ_NONE;
+ }
+ priv = chan->con_priv;
+
+ if (!IS_PROTOCOL_DOORBELL(priv)) {
+ writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx + priv->windows - 1].int_clr);
+
+ if (chan->cl) {
+ mbox_chan_txdone(chan, 0);
+ return IRQ_HANDLED;
+ }
+
+ dev_warn(dev, "Tx interrupt Received on channel (%u) not currently attached to a mailbox client\n",
+ priv->ch_wn_idx);
+ return IRQ_NONE;
+ }
+
+ /* Clear the interrupt first, so we don't miss any doorbell later */
+ writel_relaxed(1, &mhu->send->ch_wn[priv->ch_wn_idx].int_clr);
+
+ /*
+ * In Doorbell mode, make sure no new transitions happen while the
+ * interrupt handler is trying to find the finished doorbell tx
+ * operations, else we may think few of the transfers were complete
+ * before they actually were.
+ */
+ spin_lock_irqsave(&mhu->doorbell_pending_lock, flags);
+
+ /*
+ * In case of doorbell mode, the first channel of the window is returned
+ * by get_irq_chan_comb(). Find all the pending channels here.
+ */
+ stat = readl_relaxed(&mhu->send->ch_wn[priv->ch_wn_idx].stat);
+
+ for (i = 0; i < MHUV2_STAT_BITS; i++) {
+ priv = chan[i].con_priv;
+
+ /* Find cases where pending was 1, but stat's bit is cleared */
+ if (priv->pending ^ ((stat >> i) & 0x1)) {
+ BUG_ON(!priv->pending);
+
+ if (!chan->cl) {
+ dev_warn(dev, "Tx interrupt received on doorbell (%u : %u) channel not currently attached to a mailbox client\n",
+ priv->ch_wn_idx, i);
+ continue;
+ }
+
+ mbox_chan_txdone(&chan[i], 0);
+ priv->pending = 0;
+ found++;
+ }
+ }
+
+ spin_unlock_irqrestore(&mhu->doorbell_pending_lock, flags);
+
+ if (!found) {
+ /*
+ * We may have already processed the doorbell in the previous
+ * iteration if the interrupt came right after we cleared it but
+ * before we read the stat register.
+ */
+ dev_dbg(dev, "Couldn't find the doorbell (%u) for the Tx interrupt interrupt\n",
+ priv->ch_wn_idx);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct mbox_chan *get_irq_chan_comb_rx(struct mhuv2 *mhu)
+{
+ struct mhuv2_mbox_chan_priv *priv;
+ struct mbox_chan *chan;
+ u32 stat;
+
+ chan = get_irq_chan_comb(mhu, mhu->recv->chcomb_int_st);
+ if (IS_ERR(chan))
+ return chan;
+
+ priv = chan->con_priv;
+ if (!IS_PROTOCOL_DOORBELL(priv))
+ return chan;
+
+ /*
+ * In case of doorbell mode, the first channel of the window is returned
+ * by the routine. Find the exact channel here.
+ */
+ stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked);
+ BUG_ON(!stat);
+
+ return chan + __builtin_ctz(stat);
+}
+
+static struct mbox_chan *get_irq_chan_stat_rx(struct mhuv2 *mhu)
+{
+ struct mbox_chan *chans = mhu->mbox.chans;
+ struct mhuv2_mbox_chan_priv *priv;
+ u32 stat;
+ int i = 0;
+
+ while (i < mhu->mbox.num_chans) {
+ priv = chans[i].con_priv;
+ stat = readl_relaxed(&mhu->recv->ch_wn[priv->ch_wn_idx].stat_masked);
+
+ if (stat) {
+ if (IS_PROTOCOL_DOORBELL(priv))
+ i += __builtin_ctz(stat);
+ return &chans[i];
+ }
+
+ i += IS_PROTOCOL_DOORBELL(priv) ? MHUV2_STAT_BITS : 1;
+ }
+
+ return ERR_PTR(-EIO);
+}
+
+static struct mbox_chan *get_irq_chan_rx(struct mhuv2 *mhu)
+{
+ if (!mhu->minor)
+ return get_irq_chan_stat_rx(mhu);
+
+ return get_irq_chan_comb_rx(mhu);
+}
+
+static irqreturn_t mhuv2_receiver_interrupt(int irq, void *arg)
+{
+ struct mhuv2 *mhu = arg;
+ struct mbox_chan *chan = get_irq_chan_rx(mhu);
+ struct device *dev = mhu->mbox.dev;
+ struct mhuv2_mbox_chan_priv *priv;
+ int ret = IRQ_NONE;
+ void *data;
+
+ if (IS_ERR(chan)) {
+ dev_warn(dev, "Failed to find channel for the rx interrupt\n");
+ return IRQ_NONE;
+ }
+ priv = chan->con_priv;
+
+ /* Read and clear the data first */
+ data = priv->ops->read_data(mhu, chan);
+
+ if (!chan->cl) {
+ dev_warn(dev, "Received data on channel (%u) not currently attached to a mailbox client\n",
+ priv->ch_wn_idx);
+ } else if (IS_ERR(data)) {
+ dev_err(dev, "Failed to read data: %lu\n", PTR_ERR(data));
+ } else {
+ mbox_chan_received_data(chan, data);
+ ret = IRQ_HANDLED;
+ }
+
+ kfree(data);
+ return ret;
+}
+
+/* Sender and receiver ops */
+static bool mhuv2_sender_last_tx_done(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ return priv->ops->last_tx_done(mhu, chan);
+}
+
+static int mhuv2_sender_send_data(struct mbox_chan *chan, void *data)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ if (!priv->ops->last_tx_done(mhu, chan))
+ return -EBUSY;
+
+ return priv->ops->send_data(mhu, chan, data);
+}
+
+static int mhuv2_sender_startup(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ if (priv->ops->tx_startup)
+ priv->ops->tx_startup(mhu, chan);
+ return 0;
+}
+
+static void mhuv2_sender_shutdown(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ if (priv->ops->tx_shutdown)
+ priv->ops->tx_shutdown(mhu, chan);
+}
+
+static const struct mbox_chan_ops mhuv2_sender_ops = {
+ .send_data = mhuv2_sender_send_data,
+ .startup = mhuv2_sender_startup,
+ .shutdown = mhuv2_sender_shutdown,
+ .last_tx_done = mhuv2_sender_last_tx_done,
+};
+
+static int mhuv2_receiver_startup(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ return priv->ops->rx_startup(mhu, chan);
+}
+
+static void mhuv2_receiver_shutdown(struct mbox_chan *chan)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(chan->mbox);
+ struct mhuv2_mbox_chan_priv *priv = chan->con_priv;
+
+ priv->ops->rx_shutdown(mhu, chan);
+}
+
+static int mhuv2_receiver_send_data(struct mbox_chan *chan, void *data)
+{
+ dev_err(chan->mbox->dev,
+ "Trying to transmit on a receiver MHU frame\n");
+ return -EIO;
+}
+
+static bool mhuv2_receiver_last_tx_done(struct mbox_chan *chan)
+{
+ dev_err(chan->mbox->dev, "Trying to Tx poll on a receiver MHU frame\n");
+ return true;
+}
+
+static const struct mbox_chan_ops mhuv2_receiver_ops = {
+ .send_data = mhuv2_receiver_send_data,
+ .startup = mhuv2_receiver_startup,
+ .shutdown = mhuv2_receiver_shutdown,
+ .last_tx_done = mhuv2_receiver_last_tx_done,
+};
+
+static struct mbox_chan *mhuv2_mbox_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *pa)
+{
+ struct mhuv2 *mhu = mhu_from_mbox(mbox);
+ struct mbox_chan *chans = mbox->chans;
+ int channel = 0, i, offset, doorbell, protocol, windows;
+
+ if (pa->args_count != 2)
+ return ERR_PTR(-EINVAL);
+
+ offset = pa->args[0];
+ doorbell = pa->args[1];
+ if (doorbell >= MHUV2_STAT_BITS)
+ goto out;
+
+ for (i = 0; i < mhu->length; i += 2) {
+ protocol = mhu->protocols[i];
+ windows = mhu->protocols[i + 1];
+
+ if (protocol == DOORBELL) {
+ if (offset < windows)
+ return &chans[channel + MHUV2_STAT_BITS * offset + doorbell];
+
+ channel += MHUV2_STAT_BITS * windows;
+ offset -= windows;
+ } else {
+ if (offset == 0) {
+ if (doorbell)
+ goto out;
+
+ return &chans[channel];
+ }
+
+ channel++;
+ offset--;
+ }
+ }
+
+out:
+ dev_err(mbox->dev, "Couldn't xlate to a valid channel (%d: %d)\n",
+ pa->args[0], doorbell);
+ return ERR_PTR(-ENODEV);
+}
+
+static int mhuv2_verify_protocol(struct mhuv2 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+ int protocol, windows, channels = 0, total_windows = 0, i;
+
+ for (i = 0; i < mhu->length; i += 2) {
+ protocol = mhu->protocols[i];
+ windows = mhu->protocols[i + 1];
+
+ if (!windows) {
+ dev_err(dev, "Window size can't be zero (%d)\n", i);
+ return -EINVAL;
+ }
+ total_windows += windows;
+
+ if (protocol == DOORBELL) {
+ channels += MHUV2_STAT_BITS * windows;
+ } else if (protocol == DATA_TRANSFER) {
+ channels++;
+ } else {
+ dev_err(dev, "Invalid protocol (%d) present in %s property at index %d\n",
+ protocol, MHUV2_PROTOCOL_PROP, i);
+ return -EINVAL;
+ }
+ }
+
+ if (total_windows > mhu->windows) {
+ dev_err(dev, "Channel windows can't be more than what's implemented by the hardware ( %d: %d)\n",
+ total_windows, mhu->windows);
+ return -EINVAL;
+ }
+
+ mhu->mbox.num_chans = channels;
+ return 0;
+}
+
+static int mhuv2_allocate_channels(struct mhuv2 *mhu)
+{
+ struct mbox_controller *mbox = &mhu->mbox;
+ struct mhuv2_mbox_chan_priv *priv;
+ struct device *dev = mbox->dev;
+ struct mbox_chan *chans;
+ int protocol, windows = 0, next_window = 0, i, j, k;
+
+ chans = devm_kcalloc(dev, mbox->num_chans, sizeof(*chans), GFP_KERNEL);
+ if (!chans)
+ return -ENOMEM;
+
+ mbox->chans = chans;
+
+ for (i = 0; i < mhu->length; i += 2) {
+ next_window += windows;
+
+ protocol = mhu->protocols[i];
+ windows = mhu->protocols[i + 1];
+
+ if (protocol == DATA_TRANSFER) {
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ch_wn_idx = next_window;
+ priv->ops = &mhuv2_data_transfer_ops;
+ priv->windows = windows;
+ chans++->con_priv = priv;
+ continue;
+ }
+
+ for (j = 0; j < windows; j++) {
+ for (k = 0; k < MHUV2_STAT_BITS; k++) {
+ priv = devm_kmalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->ch_wn_idx = next_window + j;
+ priv->ops = &mhuv2_doorbell_ops;
+ priv->doorbell = k;
+ chans++->con_priv = priv;
+ }
+
+ /*
+ * Permanently enable interrupt as we can't
+ * control it per doorbell.
+ */
+ if (mhu->frame == SENDER_FRAME && mhu->minor)
+ writel_relaxed(0x1, &mhu->send->ch_wn[priv->ch_wn_idx].int_en);
+ }
+ }
+
+ /* Make sure we have initialized all channels */
+ BUG_ON(chans - mbox->chans != mbox->num_chans);
+
+ return 0;
+}
+
+static int mhuv2_parse_channels(struct mhuv2 *mhu)
+{
+ struct device *dev = mhu->mbox.dev;
+ const struct device_node *np = dev->of_node;
+ int ret, count;
+ u32 *protocols;
+
+ count = of_property_count_u32_elems(np, MHUV2_PROTOCOL_PROP);
+ if (count <= 0 || count % 2) {
+ dev_err(dev, "Invalid %s property (%d)\n", MHUV2_PROTOCOL_PROP,
+ count);
+ return -EINVAL;
+ }
+
+ protocols = devm_kmalloc_array(dev, count, sizeof(*protocols), GFP_KERNEL);
+ if (!protocols)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(np, MHUV2_PROTOCOL_PROP, protocols, count);
+ if (ret) {
+ dev_err(dev, "Failed to read %s property: %d\n",
+ MHUV2_PROTOCOL_PROP, ret);
+ return ret;
+ }
+
+ mhu->protocols = protocols;
+ mhu->length = count;
+
+ ret = mhuv2_verify_protocol(mhu);
+ if (ret)
+ return ret;
+
+ return mhuv2_allocate_channels(mhu);
+}
+
+static int mhuv2_tx_init(struct amba_device *adev, struct mhuv2 *mhu,
+ void __iomem *reg)
+{
+ struct device *dev = mhu->mbox.dev;
+ int ret, i;
+
+ mhu->frame = SENDER_FRAME;
+ mhu->mbox.ops = &mhuv2_sender_ops;
+ mhu->send = reg;
+
+ mhu->windows = readl_relaxed_bitfield(&mhu->send->mhu_cfg, num_ch);
+ mhu->minor = readl_relaxed_bitfield(&mhu->send->aidr, arch_minor_rev);
+
+ spin_lock_init(&mhu->doorbell_pending_lock);
+
+ /*
+ * For minor version 1 and forward, tx interrupt is provided by
+ * the controller.
+ */
+ if (mhu->minor && adev->irq[0]) {
+ ret = devm_request_threaded_irq(dev, adev->irq[0], NULL,
+ mhuv2_sender_interrupt,
+ IRQF_ONESHOT, "mhuv2-tx", mhu);
+ if (ret) {
+ dev_err(dev, "Failed to request tx IRQ, fallback to polling mode: %d\n",
+ ret);
+ } else {
+ mhu->mbox.txdone_irq = true;
+ mhu->mbox.txdone_poll = false;
+ mhu->irq = adev->irq[0];
+
+ writel_relaxed_bitfield(1, &mhu->send->int_en, chcomb);
+
+ /* Disable all channel interrupts */
+ for (i = 0; i < mhu->windows; i++)
+ writel_relaxed(0x0, &mhu->send->ch_wn[i].int_en);
+
+ goto out;
+ }
+ }
+
+ mhu->mbox.txdone_irq = false;
+ mhu->mbox.txdone_poll = true;
+ mhu->mbox.txpoll_period = 1;
+
+out:
+ /* Wait for receiver to be ready */
+ writel_relaxed(0x1, &mhu->send->access_request);
+ while (!readl_relaxed(&mhu->send->access_ready))
+ continue;
+
+ return 0;
+}
+
+static int mhuv2_rx_init(struct amba_device *adev, struct mhuv2 *mhu,
+ void __iomem *reg)
+{
+ struct device *dev = mhu->mbox.dev;
+ int ret, i;
+
+ mhu->frame = RECEIVER_FRAME;
+ mhu->mbox.ops = &mhuv2_receiver_ops;
+ mhu->recv = reg;
+
+ mhu->windows = readl_relaxed_bitfield(&mhu->recv->mhu_cfg, num_ch);
+ mhu->minor = readl_relaxed_bitfield(&mhu->recv->aidr, arch_minor_rev);
+
+ mhu->irq = adev->irq[0];
+ if (!mhu->irq) {
+ dev_err(dev, "Missing receiver IRQ\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_threaded_irq(dev, mhu->irq, NULL,
+ mhuv2_receiver_interrupt, IRQF_ONESHOT,
+ "mhuv2-rx", mhu);
+ if (ret) {
+ dev_err(dev, "Failed to request rx IRQ\n");
+ return ret;
+ }
+
+ /* Mask all the channel windows */
+ for (i = 0; i < mhu->windows; i++)
+ writel_relaxed(0xFFFFFFFF, &mhu->recv->ch_wn[i].mask_set);
+
+ if (mhu->minor)
+ writel_relaxed_bitfield(1, &mhu->recv->int_en, chcomb);
+
+ return 0;
+}
+
+static int mhuv2_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct device *dev = &adev->dev;
+ const struct device_node *np = dev->of_node;
+ struct mhuv2 *mhu;
+ void __iomem *reg;
+ int ret = -EINVAL;
+
+ reg = devm_of_iomap(dev, dev->of_node, 0, NULL);
+ if (!reg)
+ return -ENOMEM;
+
+ mhu = devm_kzalloc(dev, sizeof(*mhu), GFP_KERNEL);
+ if (!mhu)
+ return -ENOMEM;
+
+ mhu->mbox.dev = dev;
+ mhu->mbox.of_xlate = mhuv2_mbox_of_xlate;
+
+ if (of_device_is_compatible(np, "arm,mhuv2-tx"))
+ ret = mhuv2_tx_init(adev, mhu, reg);
+ else if (of_device_is_compatible(np, "arm,mhuv2-rx"))
+ ret = mhuv2_rx_init(adev, mhu, reg);
+ else
+ dev_err(dev, "Invalid compatible property\n");
+
+ if (ret)
+ return ret;
+
+ /* Channel windows can't be 0 */
+ BUG_ON(!mhu->windows);
+
+ ret = mhuv2_parse_channels(mhu);
+ if (ret)
+ return ret;
+
+ amba_set_drvdata(adev, mhu);
+
+ ret = devm_mbox_controller_register(dev, &mhu->mbox);
+ if (ret)
+ dev_err(dev, "failed to register ARM MHUv2 driver %d\n", ret);
+
+ return ret;
+}
+
+static int mhuv2_remove(struct amba_device *adev)
+{
+ struct mhuv2 *mhu = amba_get_drvdata(adev);
+
+ if (mhu->frame == SENDER_FRAME)
+ writel_relaxed(0x0, &mhu->send->access_request);
+
+ return 0;
+}
+
+static struct amba_id mhuv2_ids[] = {
+ {
+ /* 2.0 */
+ .id = 0xbb0d1,
+ .mask = 0xfffff,
+ },
+ {
+ /* 2.1 */
+ .id = 0xbb076,
+ .mask = 0xfffff,
+ },
+ { 0, 0 },
+};
+MODULE_DEVICE_TABLE(amba, mhuv2_ids);
+
+static struct amba_driver mhuv2_driver = {
+ .drv = {
+ .name = "arm-mhuv2",
+ },
+ .id_table = mhuv2_ids,
+ .probe = mhuv2_probe,
+ .remove = mhuv2_remove,
+};
+module_amba_driver(mhuv2_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ARM MHUv2 Driver");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
+MODULE_AUTHOR("Tushar Khandelwal <tushar.khandelwal@arm.com>");
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index ef966887aa15..b84e0587937c 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -144,11 +144,11 @@ static irqreturn_t stm32_ipcc_tx_irq(int irq, void *data)
static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
{
- unsigned int chan = (unsigned int)link->con_priv;
+ unsigned long chan = (unsigned long)link->con_priv;
struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
controller);
- dev_dbg(ipcc->controller.dev, "%s: chan:%d\n", __func__, chan);
+ dev_dbg(ipcc->controller.dev, "%s: chan:%lu\n", __func__, chan);
/* set channel n occupied */
stm32_ipcc_set_bits(&ipcc->lock, ipcc->reg_proc + IPCC_XSCR,
@@ -163,7 +163,7 @@ static int stm32_ipcc_send_data(struct mbox_chan *link, void *data)
static int stm32_ipcc_startup(struct mbox_chan *link)
{
- unsigned int chan = (unsigned int)link->con_priv;
+ unsigned long chan = (unsigned long)link->con_priv;
struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
controller);
int ret;
@@ -183,7 +183,7 @@ static int stm32_ipcc_startup(struct mbox_chan *link)
static void stm32_ipcc_shutdown(struct mbox_chan *link)
{
- unsigned int chan = (unsigned int)link->con_priv;
+ unsigned long chan = (unsigned long)link->con_priv;
struct stm32_ipcc *ipcc = container_of(link->mbox, struct stm32_ipcc,
controller);
@@ -206,7 +206,7 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct stm32_ipcc *ipcc;
struct resource *res;
- unsigned int i;
+ unsigned long i;
int ret;
u32 ip_ver;
static const char * const irq_name[] = {"rx", "tx"};
@@ -257,9 +257,6 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
for (i = 0; i < IPCC_IRQ_NUM; i++) {
ipcc->irqs[i] = platform_get_irq_byname(pdev, irq_name[i]);
if (ipcc->irqs[i] < 0) {
- if (ipcc->irqs[i] != -EPROBE_DEFER)
- dev_err(dev, "no IRQ specified %s\n",
- irq_name[i]);
ret = ipcc->irqs[i];
goto err_clk;
}
@@ -268,7 +265,7 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
irq_thread[i], IRQF_ONESHOT,
dev_name(dev), ipcc);
if (ret) {
- dev_err(dev, "failed to request irq %d (%d)\n", i, ret);
+ dev_err(dev, "failed to request irq %lu (%d)\n", i, ret);
goto err_clk;
}
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 30ba3573626c..b7e2d9666614 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -463,6 +463,15 @@ config DM_MULTIPATH_HST
If unsure, say N.
+config DM_MULTIPATH_IOA
+ tristate "I/O Path Selector based on CPU submission"
+ depends on DM_MULTIPATH
+ help
+ This path selector selects the path based on the CPU the IO is
+ executed on and the CPU to path mapping setup at path addition time.
+
+ If unsure, say N.
+
config DM_DELAY
tristate "I/O delaying target"
depends on BLK_DEV_DM
@@ -530,11 +539,22 @@ config DM_VERITY_VERIFY_ROOTHASH_SIG
bool "Verity data device root hash signature verification support"
depends on DM_VERITY
select SYSTEM_DATA_VERIFICATION
- help
+ help
Add ability for dm-verity device to be validated if the
pre-generated tree of cryptographic checksums passed has a pkcs#7
signature file that can validate the roothash of the tree.
+ By default, rely on the builtin trusted keyring.
+
+ If unsure, say N.
+
+config DM_VERITY_VERIFY_ROOTHASH_SIG_SECONDARY_KEYRING
+ bool "Verity data device root hash signature verification with secondary keyring"
+ depends on DM_VERITY_VERIFY_ROOTHASH_SIG
+ depends on SECONDARY_TRUSTED_KEYRING
+ help
+ Rely on the secondary trusted keyring to verify dm-verity signatures.
+
If unsure, say N.
config DM_VERITY_FEC
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 6d3e234dc46a..ef7ddc27685c 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -7,23 +7,28 @@ dm-mod-y += dm.o dm-table.o dm-target.o dm-linear.o dm-stripe.o \
dm-ioctl.o dm-io.o dm-kcopyd.o dm-sysfs.o dm-stats.o \
dm-rq.o
dm-multipath-y += dm-path-selector.o dm-mpath.o
+dm-historical-service-time-y += dm-ps-historical-service-time.o
+dm-io-affinity-y += dm-ps-io-affinity.o
+dm-queue-length-y += dm-ps-queue-length.o
+dm-round-robin-y += dm-ps-round-robin.o
+dm-service-time-y += dm-ps-service-time.o
dm-snapshot-y += dm-snap.o dm-exception-store.o dm-snap-transient.o \
dm-snap-persistent.o
dm-mirror-y += dm-raid1.o
-dm-log-userspace-y \
- += dm-log-userspace-base.o dm-log-userspace-transfer.o
+dm-log-userspace-y += dm-log-userspace-base.o dm-log-userspace-transfer.o
dm-bio-prison-y += dm-bio-prison-v1.o dm-bio-prison-v2.o
dm-thin-pool-y += dm-thin.o dm-thin-metadata.o
dm-cache-y += dm-cache-target.o dm-cache-metadata.o dm-cache-policy.o \
dm-cache-background-tracker.o
-dm-cache-smq-y += dm-cache-policy-smq.o
+dm-cache-smq-y += dm-cache-policy-smq.o
dm-ebs-y += dm-ebs-target.o
dm-era-y += dm-era-target.o
dm-clone-y += dm-clone-target.o dm-clone-metadata.o
dm-verity-y += dm-verity-target.o
+dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
+
md-mod-y += md.o md-bitmap.o
raid456-y += raid5.o raid5-cache.o raid5-ppl.o
-dm-zoned-y += dm-zoned-target.o dm-zoned-metadata.o dm-zoned-reclaim.o
linear-y += md-linear.o
multipath-y += md-multipath.o
faulty-y += md-faulty.o
@@ -59,14 +64,15 @@ obj-$(CONFIG_DM_MULTIPATH) += dm-multipath.o dm-round-robin.o
obj-$(CONFIG_DM_MULTIPATH_QL) += dm-queue-length.o
obj-$(CONFIG_DM_MULTIPATH_ST) += dm-service-time.o
obj-$(CONFIG_DM_MULTIPATH_HST) += dm-historical-service-time.o
+obj-$(CONFIG_DM_MULTIPATH_IOA) += dm-io-affinity.o
obj-$(CONFIG_DM_SWITCH) += dm-switch.o
obj-$(CONFIG_DM_SNAPSHOT) += dm-snapshot.o
-obj-$(CONFIG_DM_PERSISTENT_DATA) += persistent-data/
+obj-$(CONFIG_DM_PERSISTENT_DATA) += persistent-data/
obj-$(CONFIG_DM_MIRROR) += dm-mirror.o dm-log.o dm-region-hash.o
obj-$(CONFIG_DM_LOG_USERSPACE) += dm-log-userspace.o
obj-$(CONFIG_DM_ZERO) += dm-zero.o
-obj-$(CONFIG_DM_RAID) += dm-raid.o
-obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
+obj-$(CONFIG_DM_RAID) += dm-raid.o
+obj-$(CONFIG_DM_THIN_PROVISIONING) += dm-thin-pool.o
obj-$(CONFIG_DM_VERITY) += dm-verity.o
obj-$(CONFIG_DM_CACHE) += dm-cache.o
obj-$(CONFIG_DM_CACHE_SMQ) += dm-cache-smq.o
diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
index 6469223f0b77..d636b7b2d070 100644
--- a/drivers/md/bcache/features.c
+++ b/drivers/md/bcache/features.c
@@ -17,7 +17,7 @@ struct feature {
};
static struct feature feature_list[] = {
- {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LARGE_BUCKET,
+ {BCH_FEATURE_INCOMPAT, BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE,
"large_bucket"},
{0, 0, 0 },
};
diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h
index a1653c478041..84fc2c0f0101 100644
--- a/drivers/md/bcache/features.h
+++ b/drivers/md/bcache/features.h
@@ -13,11 +13,15 @@
/* Feature set definition */
/* Incompat feature set */
-#define BCH_FEATURE_INCOMPAT_LARGE_BUCKET 0x0001 /* 32bit bucket size */
+/* 32bit bucket size, obsoleted */
+#define BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET 0x0001
+/* real bucket size is (1 << bucket_size) */
+#define BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE 0x0002
-#define BCH_FEATURE_COMPAT_SUUP 0
-#define BCH_FEATURE_RO_COMPAT_SUUP 0
-#define BCH_FEATURE_INCOMPAT_SUUP BCH_FEATURE_INCOMPAT_LARGE_BUCKET
+#define BCH_FEATURE_COMPAT_SUPP 0
+#define BCH_FEATURE_RO_COMPAT_SUPP 0
+#define BCH_FEATURE_INCOMPAT_SUPP (BCH_FEATURE_INCOMPAT_OBSO_LARGE_BUCKET| \
+ BCH_FEATURE_INCOMPAT_LOG_LARGE_BUCKET_SIZE)
#define BCH_HAS_COMPAT_FEATURE(sb, mask) \
((sb)->feature_compat & (mask))
@@ -77,7 +81,23 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \
~BCH##_FEATURE_INCOMPAT_##flagname; \
}
-BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(obso_large_bucket, OBSO_LARGE_BUCKET);
+BCH_FEATURE_INCOMPAT_FUNCS(large_bucket, LOG_LARGE_BUCKET_SIZE);
+
+static inline bool bch_has_unknown_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_compat & ~BCH_FEATURE_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_ro_compat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_ro_compat & ~BCH_FEATURE_RO_COMPAT_SUPP) != 0);
+}
+
+static inline bool bch_has_unknown_incompat_features(struct cache_sb *sb)
+{
+ return ((sb->feature_incompat & ~BCH_FEATURE_INCOMPAT_SUPP) != 0);
+}
int bch_print_cache_set_feature_compat(struct cache_set *c, char *buf, int size);
int bch_print_cache_set_feature_ro_compat(struct cache_set *c, char *buf, int size);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 0e06d721cd8e..2047a9cccdb5 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -64,9 +64,25 @@ static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s
{
unsigned int bucket_size = le16_to_cpu(s->bucket_size);
- if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES &&
- bch_has_feature_large_bucket(sb))
- bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16;
+ if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
+ if (bch_has_feature_large_bucket(sb)) {
+ unsigned int max, order;
+
+ max = sizeof(unsigned int) * BITS_PER_BYTE - 1;
+ order = le16_to_cpu(s->bucket_size);
+ /*
+ * bcache tool will make sure the overflow won't
+ * happen, an error message here is enough.
+ */
+ if (order > max)
+ pr_err("Bucket size (1 << %u) overflows\n",
+ order);
+ bucket_size = 1 << order;
+ } else if (bch_has_feature_obso_large_bucket(sb)) {
+ bucket_size +=
+ le16_to_cpu(s->obso_bucket_size_hi) << 16;
+ }
+ }
return bucket_size;
}
@@ -228,6 +244,20 @@ static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
sb->feature_compat = le64_to_cpu(s->feature_compat);
sb->feature_incompat = le64_to_cpu(s->feature_incompat);
sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
+
+ /* Check incompatible features */
+ err = "Unsupported compatible feature found";
+ if (bch_has_unknown_compat_features(sb))
+ goto err;
+
+ err = "Unsupported read-only compatible feature found";
+ if (bch_has_unknown_ro_compat_features(sb))
+ goto err;
+
+ err = "Unsupported incompatible feature found";
+ if (bch_has_unknown_incompat_features(sb))
+ goto err;
+
err = read_super_common(sb, bdev, s);
if (err)
goto err;
@@ -1302,6 +1332,12 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
bcache_device_link(&dc->disk, c, "bdev");
atomic_inc(&c->attached_dev_nr);
+ if (bch_has_feature_obso_large_bucket(&(c->cache->sb))) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(dc->disk.disk, 1);
+ }
+
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
@@ -1524,6 +1560,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
bcache_device_link(d, c, "volume");
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb)) {
+ pr_err("The obsoleted large bucket layout is unsupported, set the bcache device into read-only\n");
+ pr_err("Please update to the latest bcache-tools to create the cache device\n");
+ set_disk_ro(d->disk, 1);
+ }
+
return 0;
err:
kobject_put(&d->kobj);
@@ -2083,6 +2125,9 @@ static int run_cache_set(struct cache_set *c)
c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
bcache_write_super(c);
+ if (bch_has_feature_obso_large_bucket(&c->cache->sb))
+ pr_err("Detect obsoleted large bucket layout, all attached bcache device will be read-only\n");
+
list_for_each_entry_safe(dc, t, &uncached_devices, list)
bch_cached_dev_attach(dc, c, NULL);
@@ -2535,8 +2580,6 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
else
err = "device busy";
mutex_unlock(&bch_register_lock);
- if (!IS_ERR(bdev))
- bdput(bdev);
if (attr == &ksysfs_register_quiet)
goto done;
}
@@ -2646,8 +2689,8 @@ static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
}
list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
+ char *pdev_set_uuid = pdev->dc->sb.set_uuid;
list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
- char *pdev_set_uuid = pdev->dc->sb.set_uuid;
char *set_uuid = c->set_uuid;
if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 554e3afc9b68..00a520c03f41 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -404,7 +404,7 @@ STORE(__cached_dev)
if (!env)
return -ENOMEM;
add_uevent_var(env, "DRIVER=bcache");
- add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
+ add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid);
add_uevent_var(env, "CACHED_LABEL=%s", buf);
kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
KOBJ_CHANGE,
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 4bc453f5bbaa..541c45027cc8 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2840,7 +2840,6 @@ static void cache_postsuspend(struct dm_target *ti)
static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
bool dirty, uint32_t hint, bool hint_valid)
{
- int r;
struct cache *cache = context;
if (dirty) {
@@ -2849,11 +2848,7 @@ static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
} else
clear_bit(from_cblock(cblock), cache->dirty_bitset);
- r = policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
- if (r)
- return r;
-
- return 0;
+ return policy_load_mapping(cache->policy, oblock, cblock, dirty, hint, hint_valid);
}
/*
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 392337f16ecf..53791138d78b 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1090,16 +1090,16 @@ static const struct crypt_iv_operations crypt_iv_tcw_ops = {
.post = crypt_iv_tcw_post
};
-static struct crypt_iv_operations crypt_iv_random_ops = {
+static const struct crypt_iv_operations crypt_iv_random_ops = {
.generator = crypt_iv_random_gen
};
-static struct crypt_iv_operations crypt_iv_eboiv_ops = {
+static const struct crypt_iv_operations crypt_iv_eboiv_ops = {
.ctr = crypt_iv_eboiv_ctr,
.generator = crypt_iv_eboiv_gen
};
-static struct crypt_iv_operations crypt_iv_elephant_ops = {
+static const struct crypt_iv_operations crypt_iv_elephant_ops = {
.ctr = crypt_iv_elephant_ctr,
.dtr = crypt_iv_elephant_dtr,
.init = crypt_iv_elephant_init,
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index cb85610527c2..55bcfb74f51f 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -86,7 +86,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, int rw, struct bio_vec *bv, struct bv
else
ba = dm_bufio_new(ec->bufio, block, &b);
- if (unlikely(IS_ERR(ba))) {
+ if (IS_ERR(ba)) {
/*
* Carry on with next buffer, if any, to issue all possible
* data but return error.
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index cd0478d44058..5e306bba4375 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1600,6 +1600,7 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para
if (!argc) {
DMWARN("Empty message received.");
+ r = -EINVAL;
goto out_argv;
}
diff --git a/drivers/md/dm-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
index 186f91e2752c..186f91e2752c 100644
--- a/drivers/md/dm-historical-service-time.c
+++ b/drivers/md/dm-ps-historical-service-time.c
diff --git a/drivers/md/dm-ps-io-affinity.c b/drivers/md/dm-ps-io-affinity.c
new file mode 100644
index 000000000000..077655cd4fae
--- /dev/null
+++ b/drivers/md/dm-ps-io-affinity.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Oracle Corporation
+ *
+ * Module Author: Mike Christie
+ */
+#include "dm-path-selector.h"
+
+#include <linux/device-mapper.h>
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX "multipath io-affinity"
+
+struct path_info {
+ struct dm_path *path;
+ cpumask_var_t cpumask;
+ refcount_t refcount;
+ bool failed;
+};
+
+struct selector {
+ struct path_info **path_map;
+ cpumask_var_t path_mask;
+ atomic_t map_misses;
+};
+
+static void ioa_free_path(struct selector *s, unsigned int cpu)
+{
+ struct path_info *pi = s->path_map[cpu];
+
+ if (!pi)
+ return;
+
+ if (refcount_dec_and_test(&pi->refcount)) {
+ cpumask_clear_cpu(cpu, s->path_mask);
+ free_cpumask_var(pi->cpumask);
+ kfree(pi);
+
+ s->path_map[cpu] = NULL;
+ }
+}
+
+static int ioa_add_path(struct path_selector *ps, struct dm_path *path,
+ int argc, char **argv, char **error)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi = NULL;
+ unsigned int cpu;
+ int ret;
+
+ if (argc != 1) {
+ *error = "io-affinity ps: invalid number of arguments";
+ return -EINVAL;
+ }
+
+ pi = kzalloc(sizeof(*pi), GFP_KERNEL);
+ if (!pi) {
+ *error = "io-affinity ps: Error allocating path context";
+ return -ENOMEM;
+ }
+
+ pi->path = path;
+ path->pscontext = pi;
+ refcount_set(&pi->refcount, 1);
+
+ if (!zalloc_cpumask_var(&pi->cpumask, GFP_KERNEL)) {
+ *error = "io-affinity ps: Error allocating cpumask context";
+ ret = -ENOMEM;
+ goto free_pi;
+ }
+
+ ret = cpumask_parse(argv[0], pi->cpumask);
+ if (ret) {
+ *error = "io-affinity ps: invalid cpumask";
+ ret = -EINVAL;
+ goto free_mask;
+ }
+
+ for_each_cpu(cpu, pi->cpumask) {
+ if (cpu >= nr_cpu_ids) {
+ DMWARN_LIMIT("Ignoring mapping for CPU %u. Max CPU is %u",
+ cpu, nr_cpu_ids);
+ break;
+ }
+
+ if (s->path_map[cpu]) {
+ DMWARN("CPU mapping for %u exists. Ignoring.", cpu);
+ continue;
+ }
+
+ cpumask_set_cpu(cpu, s->path_mask);
+ s->path_map[cpu] = pi;
+ refcount_inc(&pi->refcount);
+ continue;
+ }
+
+ if (refcount_dec_and_test(&pi->refcount)) {
+ *error = "io-affinity ps: No new/valid CPU mapping found";
+ ret = -EINVAL;
+ goto free_mask;
+ }
+
+ return 0;
+
+free_mask:
+ free_cpumask_var(pi->cpumask);
+free_pi:
+ kfree(pi);
+ return ret;
+}
+
+static int ioa_create(struct path_selector *ps, unsigned argc, char **argv)
+{
+ struct selector *s;
+
+ s = kmalloc(sizeof(*s), GFP_KERNEL);
+ if (!s)
+ return -ENOMEM;
+
+ s->path_map = kzalloc(nr_cpu_ids * sizeof(struct path_info *),
+ GFP_KERNEL);
+ if (!s->path_map)
+ goto free_selector;
+
+ if (!zalloc_cpumask_var(&s->path_mask, GFP_KERNEL))
+ goto free_map;
+
+ atomic_set(&s->map_misses, 0);
+ ps->context = s;
+ return 0;
+
+free_map:
+ kfree(s->path_map);
+free_selector:
+ kfree(s);
+ return -ENOMEM;
+}
+
+static void ioa_destroy(struct path_selector *ps)
+{
+ struct selector *s = ps->context;
+ unsigned cpu;
+
+ for_each_cpu(cpu, s->path_mask)
+ ioa_free_path(s, cpu);
+
+ free_cpumask_var(s->path_mask);
+ kfree(s->path_map);
+ kfree(s);
+
+ ps->context = NULL;
+}
+
+static int ioa_status(struct path_selector *ps, struct dm_path *path,
+ status_type_t type, char *result, unsigned int maxlen)
+{
+ struct selector *s = ps->context;
+ struct path_info *pi;
+ int sz = 0;
+
+ if (!path) {
+ DMEMIT("0 ");
+ return sz;
+ }
+
+ switch(type) {
+ case STATUSTYPE_INFO:
+ DMEMIT("%d ", atomic_read(&s->map_misses));
+ break;
+ case STATUSTYPE_TABLE:
+ pi = path->pscontext;
+ DMEMIT("%*pb ", cpumask_pr_args(pi->cpumask));
+ break;
+ }
+
+ return sz;
+}
+
+static void ioa_fail_path(struct path_selector *ps, struct dm_path *p)
+{
+ struct path_info *pi = p->pscontext;
+
+ pi->failed = true;
+}
+
+static int ioa_reinstate_path(struct path_selector *ps, struct dm_path *p)
+{
+ struct path_info *pi = p->pscontext;
+
+ pi->failed = false;
+ return 0;
+}
+
+static struct dm_path *ioa_select_path(struct path_selector *ps,
+ size_t nr_bytes)
+{
+ unsigned int cpu, node;
+ struct selector *s = ps->context;
+ const struct cpumask *cpumask;
+ struct path_info *pi;
+ int i;
+
+ cpu = get_cpu();
+
+ pi = s->path_map[cpu];
+ if (pi && !pi->failed)
+ goto done;
+
+ /*
+ * Perf is not optimal, but we at least try the local node then just
+ * try not to fail.
+ */
+ if (!pi)
+ atomic_inc(&s->map_misses);
+
+ node = cpu_to_node(cpu);
+ cpumask = cpumask_of_node(node);
+ for_each_cpu(i, cpumask) {
+ pi = s->path_map[i];
+ if (pi && !pi->failed)
+ goto done;
+ }
+
+ for_each_cpu(i, s->path_mask) {
+ pi = s->path_map[i];
+ if (pi && !pi->failed)
+ goto done;
+ }
+ pi = NULL;
+
+done:
+ put_cpu();
+ return pi ? pi->path : NULL;
+}
+
+static struct path_selector_type ioa_ps = {
+ .name = "io-affinity",
+ .module = THIS_MODULE,
+ .table_args = 1,
+ .info_args = 1,
+ .create = ioa_create,
+ .destroy = ioa_destroy,
+ .status = ioa_status,
+ .add_path = ioa_add_path,
+ .fail_path = ioa_fail_path,
+ .reinstate_path = ioa_reinstate_path,
+ .select_path = ioa_select_path,
+};
+
+static int __init dm_ioa_init(void)
+{
+ int ret = dm_register_path_selector(&ioa_ps);
+
+ if (ret < 0)
+ DMERR("register failed %d", ret);
+ return ret;
+}
+
+static void __exit dm_ioa_exit(void)
+{
+ int ret = dm_unregister_path_selector(&ioa_ps);
+
+ if (ret < 0)
+ DMERR("unregister failed %d", ret);
+}
+
+module_init(dm_ioa_init);
+module_exit(dm_ioa_exit);
+
+MODULE_DESCRIPTION(DM_NAME " multipath path selector that selects paths based on the CPU IO is being executed on");
+MODULE_AUTHOR("Mike Christie <michael.christie@oracle.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-queue-length.c b/drivers/md/dm-ps-queue-length.c
index 5fd018d18418..5fd018d18418 100644
--- a/drivers/md/dm-queue-length.c
+++ b/drivers/md/dm-ps-queue-length.c
diff --git a/drivers/md/dm-round-robin.c b/drivers/md/dm-ps-round-robin.c
index bdbb7e6e8212..bdbb7e6e8212 100644
--- a/drivers/md/dm-round-robin.c
+++ b/drivers/md/dm-ps-round-robin.c
diff --git a/drivers/md/dm-service-time.c b/drivers/md/dm-ps-service-time.c
index 9cfda665e9eb..9cfda665e9eb 100644
--- a/drivers/md/dm-service-time.c
+++ b/drivers/md/dm-ps-service-time.c
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 151d022b032d..df359d33cda8 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -496,7 +496,7 @@ static void stripe_io_hints(struct dm_target *ti,
static struct target_type stripe_target = {
.name = "striped",
.version = {1, 6, 0},
- .features = DM_TARGET_PASSES_INTEGRITY,
+ .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT,
.module = THIS_MODULE,
.ctr = stripe_ctr,
.dtr = stripe_dtr,
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index bff4c7fa1cd2..262e2b0fd975 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -550,6 +550,7 @@ static int switch_iterate_devices(struct dm_target *ti,
static struct target_type switch_target = {
.name = "switch",
.version = {1, 1, 0},
+ .features = DM_TARGET_NOWAIT,
.module = THIS_MODULE,
.ctr = switch_ctr,
.dtr = switch_dtr,
diff --git a/drivers/md/dm-unstripe.c b/drivers/md/dm-unstripe.c
index e673dacf6418..7357c1bd5863 100644
--- a/drivers/md/dm-unstripe.c
+++ b/drivers/md/dm-unstripe.c
@@ -178,6 +178,7 @@ static void unstripe_io_hints(struct dm_target *ti,
static struct target_type unstripe_target = {
.name = "unstriped",
.version = {1, 1, 0},
+ .features = DM_TARGET_NOWAIT,
.module = THIS_MODULE,
.ctr = unstripe_ctr,
.dtr = unstripe_dtr,
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index f74982dcbea0..6b8e5bdd8526 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -538,6 +538,15 @@ static int verity_verify_io(struct dm_verity_io *io)
}
/*
+ * Skip verity work in response to I/O error when system is shutting down.
+ */
+static inline bool verity_is_system_shutting_down(void)
+{
+ return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
+ || system_state == SYSTEM_RESTART;
+}
+
+/*
* End one "io" structure with a given error.
*/
static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
@@ -564,7 +573,8 @@ static void verity_end_io(struct bio *bio)
{
struct dm_verity_io *io = bio->bi_private;
- if (bio->bi_status && !verity_fec_is_enabled(io->v)) {
+ if (bio->bi_status &&
+ (!verity_fec_is_enabled(io->v) || verity_is_system_shutting_down())) {
verity_finish_io(io, bio->bi_status);
return;
}
diff --git a/drivers/md/dm-verity-verify-sig.c b/drivers/md/dm-verity-verify-sig.c
index 614e43db93aa..29385dc470d5 100644
--- a/drivers/md/dm-verity-verify-sig.c
+++ b/drivers/md/dm-verity-verify-sig.c
@@ -119,8 +119,13 @@ int verity_verify_root_hash(const void *root_hash, size_t root_hash_len,
}
ret = verify_pkcs7_signature(root_hash, root_hash_len, sig_data,
- sig_len, NULL, VERIFYING_UNSPECIFIED_SIGNATURE,
- NULL, NULL);
+ sig_len,
+#ifdef CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG_SECONDARY_KEYRING
+ VERIFY_USE_SECONDARY_KEYRING,
+#else
+ NULL,
+#endif
+ VERIFYING_UNSPECIFIED_SIGNATURE, NULL, NULL);
return ret;
}
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index b65ca8dcfbdc..faa1dbffc8b4 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -59,6 +59,7 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
static struct target_type zero_target = {
.name = "zero",
.version = {1, 1, 0},
+ .features = DM_TARGET_NOWAIT,
.module = THIS_MODULE,
.ctr = zero_ctr,
.map = zero_map,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5b2f371ec4bb..b3c3c8b4cb42 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1586,7 +1586,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
ci.sector_count = bio_sectors(bio);
while (ci.sector_count && !error) {
error = __split_and_process_non_flush(&ci);
- if (current->bio_list && ci.sector_count && !error) {
+ if (ci.sector_count && !error) {
/*
* Remainder must be passed to submit_bio_noacct()
* so that it gets handled *after* bios already submitted
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index eb15c8c725ca..ec46cff80fdb 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -167,33 +167,6 @@ static int sun4i_csi_probe(struct platform_device *pdev)
if (!csi->traits)
return -EINVAL;
- /*
- * On Allwinner SoCs, some high memory bandwidth devices do DMA
- * directly over the memory bus (called MBUS), instead of the
- * system bus. The memory bus has a different addressing scheme
- * without the DRAM starting offset.
- *
- * In some cases this can be described by an interconnect in
- * the device tree. In other cases where the hardware is not
- * fully understood and the interconnect is left out of the
- * device tree, fall back to a default offset.
- */
- if (of_find_property(csi->dev->of_node, "interconnects", NULL)) {
- ret = of_dma_configure(csi->dev, csi->dev->of_node, true);
- if (ret)
- return ret;
- } else {
- /*
- * XXX(hch): this has no business in a driver and needs to move
- * to the device tree.
- */
-#ifdef PHYS_PFN_OFFSET
- ret = dma_direct_set_offset(csi->dev, PHYS_OFFSET, 0, SZ_4G);
- if (ret)
- return ret;
-#endif
- }
-
csi->mdev.dev = csi->dev;
strscpy(csi->mdev.model, "Allwinner Video Capture Device",
sizeof(csi->mdev.model));
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
index e69e14379fc6..27935f1e9555 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
@@ -881,14 +881,6 @@ static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev,
return 0;
}
-/*
- * PHYS_OFFSET isn't available on all architectures. In order to
- * accommodate for COMPILE_TEST, let's define it to something dumb.
- */
-#if defined(CONFIG_COMPILE_TEST) && !defined(PHYS_OFFSET)
-#define PHYS_OFFSET 0
-#endif
-
static int sun6i_csi_probe(struct platform_device *pdev)
{
struct sun6i_csi_dev *sdev;
@@ -899,15 +891,6 @@ static int sun6i_csi_probe(struct platform_device *pdev)
return -ENOMEM;
sdev->dev = &pdev->dev;
- /*
- * The DMA bus has the memory mapped at 0.
- *
- * XXX(hch): this has no business in a driver and needs to move
- * to the device tree.
- */
- ret = dma_direct_set_offset(sdev->dev, PHYS_OFFSET, 0, SZ_4G);
- if (ret)
- return ret;
ret = sun6i_csi_resource_request(sdev, pdev);
if (ret)
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index ba5d07886607..ed863bf5ea80 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -825,10 +825,6 @@ static int deinterlace_probe(struct platform_device *pdev)
return ret;
}
- ret = of_dma_configure(dev->dev, dev->dev->of_node, true);
- if (ret)
- return ret;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dev->base))
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 00e013b14703..3ea6913df176 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -128,7 +128,7 @@ config OMAP_GPMC_DEBUG
config TI_EMIF_SRAM
tristate "Texas Instruments EMIF SRAM driver"
- depends on SOC_AM33XX || SOC_AM43XX || (ARM && COMPILE_TEST)
+ depends on SOC_AM33XX || SOC_AM43XX || (ARM && CPU_V7 && COMPILE_TEST)
depends on SRAM
help
This driver is for the EMIF module available on Texas Instruments
@@ -191,8 +191,8 @@ config DA8XX_DDRCTL
config PL353_SMC
tristate "ARM PL35X Static Memory Controller(SMC) driver"
default y if ARM
- depends on ARM
- depends on ARM_AMBA || COMPILE_TEST
+ depends on ARM || COMPILE_TEST
+ depends on ARM_AMBA
help
This driver is for the ARM PL351/PL353 Static Memory
Controller(SMC) module.
diff --git a/drivers/memory/jz4780-nemc.c b/drivers/memory/jz4780-nemc.c
index 3ec5cb0fce1e..555f7ac3b7dd 100644
--- a/drivers/memory/jz4780-nemc.c
+++ b/drivers/memory/jz4780-nemc.c
@@ -291,6 +291,8 @@ static int jz4780_nemc_probe(struct platform_device *pdev)
nemc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
/*
* The driver currently only uses the registers up to offset
@@ -304,9 +306,9 @@ static int jz4780_nemc_probe(struct platform_device *pdev)
}
nemc->base = devm_ioremap(dev, res->start, NEMC_REG_LEN);
- if (IS_ERR(nemc->base)) {
+ if (!nemc->base) {
dev_err(dev, "failed to get I/O memory\n");
- return PTR_ERR(nemc->base);
+ return -ENOMEM;
}
writel(0, nemc->base + NEMC_NFCSR);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index 691e4c344cf8..ac350f8d1e20 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -268,6 +268,10 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
/* IPU0 | IPU1 | CCU */
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+};
+
static const struct of_device_id mtk_smi_larb_of_ids[] = {
{
.compatible = "mediatek,mt8167-smi-larb",
@@ -293,6 +297,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = {
.compatible = "mediatek,mt8183-smi-larb",
.data = &mtk_smi_larb_mt8183
},
+ {
+ .compatible = "mediatek,mt8192-smi-larb",
+ .data = &mtk_smi_larb_mt8192
+ },
{}
};
@@ -432,6 +440,13 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
F_MMU1_LARB(7),
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt8192 = {
+ .gen = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
+ F_MMU1_LARB(6),
+};
+
static const struct of_device_id mtk_smi_common_of_ids[] = {
{
.compatible = "mediatek,mt8173-smi-common",
@@ -457,6 +472,10 @@ static const struct of_device_id mtk_smi_common_of_ids[] = {
.compatible = "mediatek,mt8183-smi-common",
.data = &mtk_smi_common_mt8183,
},
+ {
+ .compatible = "mediatek,mt8192-smi-common",
+ .data = &mtk_smi_common_mt8192,
+ },
{}
};
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index f2a33a1af836..8d36e221def1 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset.h>
@@ -204,18 +203,6 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
}
EXPORT_SYMBOL(rpcif_sw_init);
-void rpcif_enable_rpm(struct rpcif *rpc)
-{
- pm_runtime_enable(rpc->dev);
-}
-EXPORT_SYMBOL(rpcif_enable_rpm);
-
-void rpcif_disable_rpm(struct rpcif *rpc)
-{
- pm_runtime_put_sync(rpc->dev);
-}
-EXPORT_SYMBOL(rpcif_disable_rpm);
-
void rpcif_hw_init(struct rpcif *rpc, bool hyperflash)
{
u32 dummy;
@@ -508,7 +495,8 @@ exit:
return ret;
err_out:
- ret = reset_control_reset(rpc->rstc);
+ if (reset_control_reset(rpc->rstc))
+ dev_err(rpc->dev, "Failed to reset HW\n");
rpcif_hw_init(rpc, rpc->bus_size == 2);
goto exit;
}
@@ -560,9 +548,11 @@ static int rpcif_probe(struct platform_device *pdev)
} else if (of_device_is_compatible(flash, "cfi-flash")) {
name = "rpc-if-hyperflash";
} else {
+ of_node_put(flash);
dev_warn(&pdev->dev, "unknown flash type\n");
return -ENODEV;
}
+ of_node_put(flash);
vdev = platform_device_alloc(name, pdev->id);
if (!vdev)
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index 9f0a96bf9ccc..ca7077a06f4c 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -3,14 +3,17 @@ config TEGRA_MC
bool "NVIDIA Tegra Memory Controller support"
default y
depends on ARCH_TEGRA
+ select INTERCONNECT
help
This driver supports the Memory Controller (MC) hardware found on
NVIDIA Tegra SoCs.
config TEGRA20_EMC
- bool "NVIDIA Tegra20 External Memory Controller driver"
+ tristate "NVIDIA Tegra20 External Memory Controller driver"
default y
- depends on ARCH_TEGRA_2x_SOC
+ depends on TEGRA_MC && ARCH_TEGRA_2x_SOC
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ select PM_DEVFREQ
help
This driver is for the External Memory Controller (EMC) found on
Tegra20 chips. The EMC controls the external DRAM on the board.
@@ -18,9 +21,10 @@ config TEGRA20_EMC
external memory.
config TEGRA30_EMC
- bool "NVIDIA Tegra30 External Memory Controller driver"
+ tristate "NVIDIA Tegra30 External Memory Controller driver"
default y
depends on TEGRA_MC && ARCH_TEGRA_3x_SOC
+ select PM_OPP
help
This driver is for the External Memory Controller (EMC) found on
Tegra30 chips. The EMC controls the external DRAM on the board.
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index ec8403557ed4..44064de962c2 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -6,6 +6,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -42,6 +43,54 @@ static const struct of_device_id tegra_mc_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tegra_mc_of_match);
+static void tegra_mc_devm_action_put_device(void *data)
+{
+ struct tegra_mc *mc = data;
+
+ put_device(mc->dev);
+}
+
+/**
+ * devm_tegra_memory_controller_get() - get Tegra Memory Controller handle
+ * @dev: device pointer for the consumer device
+ *
+ * This function will search for the Memory Controller node in a device-tree
+ * and retrieve the Memory Controller handle.
+ *
+ * Return: ERR_PTR() on error or a valid pointer to a struct tegra_mc.
+ */
+struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct tegra_mc *mc;
+ int err;
+
+ np = of_parse_phandle(dev->of_node, "nvidia,memory-controller", 0);
+ if (!np)
+ return ERR_PTR(-ENOENT);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return ERR_PTR(-ENODEV);
+
+ mc = platform_get_drvdata(pdev);
+ if (!mc) {
+ put_device(&pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ err = devm_add_action(dev, tegra_mc_devm_action_put_device, mc);
+ if (err) {
+ put_device(mc->dev);
+ return ERR_PTR(err);
+ }
+
+ return mc;
+}
+EXPORT_SYMBOL_GPL(devm_tegra_memory_controller_get);
+
static int tegra_mc_block_dma_common(struct tegra_mc *mc,
const struct tegra_mc_reset *rst)
{
@@ -298,6 +347,7 @@ int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate)
return 0;
}
+EXPORT_SYMBOL_GPL(tegra_mc_write_emem_configuration);
unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
{
@@ -309,6 +359,7 @@ unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc)
return dram_count;
}
+EXPORT_SYMBOL_GPL(tegra_mc_get_emem_device_count);
static int load_one_timing(struct tegra_mc *mc,
struct tegra_mc_timing *timing,
@@ -591,6 +642,101 @@ static __maybe_unused irqreturn_t tegra20_mc_irq(int irq, void *data)
return IRQ_HANDLED;
}
+/*
+ * Memory Controller (MC) has few Memory Clients that are issuing memory
+ * bandwidth allocation requests to the MC interconnect provider. The MC
+ * provider aggregates the requests and then sends the aggregated request
+ * up to the External Memory Controller (EMC) interconnect provider which
+ * re-configures hardware interface to External Memory (EMEM) in accordance
+ * to the required bandwidth. Each MC interconnect node represents an
+ * individual Memory Client.
+ *
+ * Memory interconnect topology:
+ *
+ * +----+
+ * +--------+ | |
+ * | TEXSRD +--->+ |
+ * +--------+ | |
+ * | | +-----+ +------+
+ * ... | MC +--->+ EMC +--->+ EMEM |
+ * | | +-----+ +------+
+ * +--------+ | |
+ * | DISP.. +--->+ |
+ * +--------+ | |
+ * +----+
+ */
+static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
+{
+ struct icc_node *node;
+ unsigned int i;
+ int err;
+
+ /* older device-trees don't have interconnect properties */
+ if (!device_property_present(mc->dev, "#interconnect-cells") ||
+ !mc->soc->icc_ops)
+ return 0;
+
+ mc->provider.dev = mc->dev;
+ mc->provider.data = &mc->provider;
+ mc->provider.set = mc->soc->icc_ops->set;
+ mc->provider.aggregate = mc->soc->icc_ops->aggregate;
+ mc->provider.xlate_extended = mc->soc->icc_ops->xlate_extended;
+
+ err = icc_provider_add(&mc->provider);
+ if (err)
+ return err;
+
+ /* create Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_MC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto del_provider;
+ }
+
+ node->name = "Memory Controller";
+ icc_node_add(node, &mc->provider);
+
+ /* link Memory Controller to External Memory Controller */
+ err = icc_link_create(node, TEGRA_ICC_EMC);
+ if (err)
+ goto remove_nodes;
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ /* create MC client node */
+ node = icc_node_create(mc->soc->clients[i].id);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = mc->soc->clients[i].name;
+ icc_node_add(node, &mc->provider);
+
+ /* link Memory Client to Memory Controller */
+ err = icc_link_create(node, TEGRA_ICC_MC);
+ if (err)
+ goto remove_nodes;
+ }
+
+ /*
+ * MC driver is registered too early, so early that generic driver
+ * syncing doesn't work for the MC. But it doesn't really matter
+ * since syncing works for the EMC drivers, hence we can sync the
+ * MC driver by ourselves and then EMC will complete syncing of
+ * the whole ICC state.
+ */
+ icc_sync_state(mc->dev);
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&mc->provider);
+del_provider:
+ icc_provider_del(&mc->provider);
+
+ return err;
+}
+
static int tegra_mc_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -659,10 +805,8 @@ static int tegra_mc_probe(struct platform_device *pdev)
}
mc->irq = platform_get_irq(pdev, 0);
- if (mc->irq < 0) {
- dev_err(&pdev->dev, "interrupt not specified\n");
+ if (mc->irq < 0)
return mc->irq;
- }
WARN(!mc->soc->client_id_mask, "missing client ID mask for this SoC\n");
@@ -681,6 +825,11 @@ static int tegra_mc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to register reset controller: %d\n",
err);
+ err = tegra_mc_interconnect_setup(mc);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to initialize interconnect: %d\n",
+ err);
+
if (IS_ENABLED(CONFIG_TEGRA_IOMMU_SMMU) && mc->soc->smmu) {
mc->smmu = tegra_smmu_probe(&pdev->dev, mc->soc->smmu, mc);
if (IS_ERR(mc->smmu)) {
diff --git a/drivers/memory/tegra/mc.h b/drivers/memory/tegra/mc.h
index afa3ba45c9e6..33e40d600592 100644
--- a/drivers/memory/tegra/mc.h
+++ b/drivers/memory/tegra/mc.h
@@ -78,6 +78,20 @@
#define MC_TIMING_UPDATE BIT(0)
+static inline u32 tegra_mc_scale_percents(u64 val, unsigned int percents)
+{
+ val = val * percents;
+ do_div(val, 100);
+
+ return min_t(u64, val, U32_MAX);
+}
+
+static inline struct tegra_mc *
+icc_provider_to_tegra_mc(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_mc, provider);
+}
+
static inline u32 mc_readl(struct tegra_mc *mc, unsigned long offset)
{
return readl_relaxed(mc->regs + offset);
@@ -115,4 +129,12 @@ extern const struct tegra_mc_soc tegra132_mc_soc;
extern const struct tegra_mc_soc tegra210_mc_soc;
#endif
+/*
+ * These IDs are for internal use of Tegra ICC drivers. The ID numbers are
+ * chosen such that they don't conflict with the device-tree ICC node IDs.
+ */
+#define TEGRA_ICC_MC 1000
+#define TEGRA_ICC_EMC 1001
+#define TEGRA_ICC_EMEM 1002
+
#endif /* MEMORY_TEGRA_MC_H */
diff --git a/drivers/memory/tegra/tegra114.c b/drivers/memory/tegra/tegra114.c
index 48ef01c3ff90..ed376ba2d2fe 100644
--- a/drivers/memory/tegra/tegra114.c
+++ b/drivers/memory/tegra/tegra114.c
@@ -15,6 +15,12 @@ static const struct tegra_mc_client tegra114_mc_clients[] = {
.id = 0x00,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
}, {
.id = 0x01,
.name = "display0a",
diff --git a/drivers/memory/tegra/tegra124-emc.c b/drivers/memory/tegra/tegra124-emc.c
index 76ace42a688a..ee8ee39e98ed 100644
--- a/drivers/memory/tegra/tegra124-emc.c
+++ b/drivers/memory/tegra/tegra124-emc.c
@@ -1177,10 +1177,8 @@ static void emc_debugfs_init(struct device *dev, struct tegra_emc *emc)
static int tegra_emc_probe(struct platform_device *pdev)
{
- struct platform_device *mc;
struct device_node *np;
struct tegra_emc *emc;
- struct resource *res;
u32 ram_code;
int err;
@@ -1190,25 +1188,13 @@ static int tegra_emc_probe(struct platform_device *pdev)
emc->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- emc->regs = devm_ioremap_resource(&pdev->dev, res);
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emc->regs))
return PTR_ERR(emc->regs);
- np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not get memory controller\n");
- return -ENOENT;
- }
-
- mc = of_find_device_by_node(np);
- of_node_put(np);
- if (!mc)
- return -ENOENT;
-
- emc->mc = platform_get_drvdata(mc);
- if (!emc->mc)
- return -EPROBE_DEFER;
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
ram_code = tegra_read_ram_code();
diff --git a/drivers/memory/tegra/tegra124.c b/drivers/memory/tegra/tegra124.c
index 0cede24479bf..e2389573d3c0 100644
--- a/drivers/memory/tegra/tegra124.c
+++ b/drivers/memory/tegra/tegra124.c
@@ -15,6 +15,12 @@ static const struct tegra_mc_client tegra124_mc_clients[] = {
.id = 0x00,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
}, {
.id = 0x01,
.name = "display0a",
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index 027f46287dbf..686aaf477d8a 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -8,19 +8,27 @@
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
+#include <linux/devfreq.h>
#include <linux/err.h>
+#include <linux/interconnect-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/types.h>
+#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
+#include "mc.h"
+
#define EMC_INTSTATUS 0x000
#define EMC_INTMASK 0x004
#define EMC_DBG 0x008
@@ -62,6 +70,11 @@
#define EMC_ODT_READ 0x0b4
#define EMC_FBIO_CFG5 0x104
#define EMC_FBIO_CFG6 0x114
+#define EMC_STAT_CONTROL 0x160
+#define EMC_STAT_LLMC_CONTROL 0x178
+#define EMC_STAT_PWR_CLOCK_LIMIT 0x198
+#define EMC_STAT_PWR_CLOCKS 0x19c
+#define EMC_STAT_PWR_COUNT 0x1a0
#define EMC_AUTO_CAL_INTERVAL 0x2a8
#define EMC_CFG_2 0x2b8
#define EMC_CFG_DIG_DLL 0x2bc
@@ -88,6 +101,12 @@
#define EMC_DBG_READ_DQM_CTRL BIT(9)
#define EMC_DBG_CFG_PRIORITY BIT(24)
+#define EMC_FBIO_CFG5_DRAM_WIDTH_X16 BIT(4)
+
+#define EMC_PWR_GATHER_CLEAR (1 << 8)
+#define EMC_PWR_GATHER_DISABLE (2 << 8)
+#define EMC_PWR_GATHER_ENABLE (3 << 8)
+
static const u16 emc_timing_registers[] = {
EMC_RC,
EMC_RFC,
@@ -142,11 +161,26 @@ struct emc_timing {
u32 data[ARRAY_SIZE(emc_timing_registers)];
};
+enum emc_rate_request_type {
+ EMC_RATE_DEVFREQ,
+ EMC_RATE_DEBUG,
+ EMC_RATE_ICC,
+ EMC_RATE_TYPE_MAX,
+};
+
+struct emc_rate_request {
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
struct tegra_emc {
struct device *dev;
+ struct tegra_mc *mc;
+ struct icc_provider provider;
struct notifier_block clk_nb;
struct clk *clk;
void __iomem *regs;
+ unsigned int dram_bus_width;
struct emc_timing *timings;
unsigned int num_timings;
@@ -156,6 +190,17 @@ struct tegra_emc {
unsigned long min_rate;
unsigned long max_rate;
} debugfs;
+
+ /*
+ * There are multiple sources in the EMC driver which could request
+ * a min/max clock rate, these rates are contained in this array.
+ */
+ struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
+
+ /* protect shared rate-change code path */
+ struct mutex rate_lock;
+
+ struct devfreq_simple_ondemand_data ondemand_data;
};
static irqreturn_t tegra_emc_isr(int irq, void *data)
@@ -383,6 +428,11 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
u32 value, ram_code;
int err;
+ if (of_get_child_count(dev->of_node) == 0) {
+ dev_info(dev, "device-tree doesn't have memory timings\n");
+ return NULL;
+ }
+
if (!of_property_read_bool(dev->of_node, "nvidia,use-ram-code"))
return of_node_get(dev->of_node);
@@ -408,7 +458,7 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
static int emc_setup_hw(struct tegra_emc *emc)
{
u32 intmask = EMC_REFRESH_OVERFLOW_INT;
- u32 emc_cfg, emc_dbg;
+ u32 emc_cfg, emc_dbg, emc_fbio;
emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
@@ -439,6 +489,15 @@ static int emc_setup_hw(struct tegra_emc *emc)
emc_dbg &= ~EMC_DBG_FORCE_UPDATE;
writel_relaxed(emc_dbg, emc->regs + EMC_DBG);
+ emc_fbio = readl_relaxed(emc->regs + EMC_FBIO_CFG5);
+
+ if (emc_fbio & EMC_FBIO_CFG5_DRAM_WIDTH_X16)
+ emc->dram_bus_width = 16;
+ else
+ emc->dram_bus_width = 32;
+
+ dev_info(emc->dev, "%ubit DRAM bus\n", emc->dram_bus_width);
+
return 0;
}
@@ -451,6 +510,9 @@ static long emc_round_rate(unsigned long rate,
struct tegra_emc *emc = arg;
unsigned int i;
+ if (!emc->num_timings)
+ return clk_get_rate(emc->clk);
+
min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
for (i = 0; i < emc->num_timings; i++) {
@@ -480,6 +542,83 @@ static long emc_round_rate(unsigned long rate,
return timing->rate;
}
+static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
+ emc->requested_rate[i].min_rate = 0;
+ emc->requested_rate[i].max_rate = ULONG_MAX;
+ }
+}
+
+static int emc_request_rate(struct tegra_emc *emc,
+ unsigned long new_min_rate,
+ unsigned long new_max_rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = emc->requested_rate;
+ unsigned long min_rate = 0, max_rate = ULONG_MAX;
+ unsigned int i;
+ int err;
+
+ /* select minimum and maximum rates among the requested rates */
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
+ if (i == type) {
+ min_rate = max(new_min_rate, min_rate);
+ max_rate = min(new_max_rate, max_rate);
+ } else {
+ min_rate = max(req->min_rate, min_rate);
+ max_rate = min(req->max_rate, max_rate);
+ }
+ }
+
+ if (min_rate > max_rate) {
+ dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
+ __func__, type, min_rate, max_rate);
+ return -ERANGE;
+ }
+
+ /*
+ * EMC rate-changes should go via OPP API because it manages voltage
+ * changes.
+ */
+ err = dev_pm_opp_set_rate(emc->dev, min_rate);
+ if (err)
+ return err;
+
+ emc->requested_rate[type].min_rate = new_min_rate;
+ emc->requested_rate[type].max_rate = new_max_rate;
+
+ return 0;
+}
+
+static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, rate, req->max_rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, req->min_rate, rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
/*
* debugfs interface
*
@@ -563,7 +702,7 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_min_rate(emc->clk, rate);
+ err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -593,7 +732,7 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_max_rate(emc->clk, rate);
+ err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -650,47 +789,330 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc, &tegra_emc_debug_max_rate_fops);
}
+static inline struct tegra_emc *
+to_tegra_emc_provider(struct icc_provider *provider)
+{
+ return container_of(provider, struct tegra_emc, provider);
+}
+
+static struct icc_node_data *
+emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * SRC and DST nodes should have matching TAG in order to have
+ * it set by default for a requested path.
+ */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ ndata->node = node;
+
+ return ndata;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
+ unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
+ unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
+ unsigned long long rate = max(avg_bw, peak_bw);
+ unsigned int dram_data_bus_width_bytes;
+ int err;
+
+ /*
+ * Tegra20 EMC runs on x2 clock rate of SDRAM bus because DDR data
+ * is sampled on both clock edges. This means that EMC clock rate
+ * equals to the peak data-rate.
+ */
+ dram_data_bus_width_bytes = emc->dram_bus_width / 8;
+ do_div(rate, dram_data_bus_width_bytes);
+ rate = min_t(u64, rate, U32_MAX);
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+{
+ const struct tegra_mc_soc *soc;
+ struct icc_node *node;
+ int err;
+
+ emc->mc = devm_tegra_memory_controller_get(emc->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+
+ soc = emc->mc->soc;
+
+ emc->provider.dev = emc->dev;
+ emc->provider.set = emc_icc_set;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+
+ err = icc_provider_add(&emc->provider);
+ if (err)
+ goto err_msg;
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto del_provider;
+ }
+
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
+ }
+
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
+
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+del_provider:
+ icc_provider_del(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static int tegra_emc_opp_table_init(struct tegra_emc *emc)
+{
+ u32 hw_version = BIT(tegra_sku_info.soc_process_id);
+ struct opp_table *clk_opp_table, *hw_opp_table;
+ int err;
+
+ clk_opp_table = dev_pm_opp_set_clkname(emc->dev, NULL);
+ err = PTR_ERR_OR_ZERO(clk_opp_table);
+ if (err) {
+ dev_err(emc->dev, "failed to set OPP clk: %d\n", err);
+ return err;
+ }
+
+ hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
+ err = PTR_ERR_OR_ZERO(hw_opp_table);
+ if (err) {
+ dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
+ goto put_clk_table;
+ }
+
+ err = dev_pm_opp_of_add_table(emc->dev);
+ if (err) {
+ if (err == -ENODEV)
+ dev_err(emc->dev, "OPP table not found, please update your device tree\n");
+ else
+ dev_err(emc->dev, "failed to add OPP table: %d\n", err);
+
+ goto put_hw_table;
+ }
+
+ dev_info(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
+ hw_version, clk_get_rate(emc->clk) / 1000000);
+
+ /* first dummy rate-set initializes voltage state */
+ err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
+ if (err) {
+ dev_err(emc->dev, "failed to initialize OPP clock: %d\n", err);
+ goto remove_table;
+ }
+
+ return 0;
+
+remove_table:
+ dev_pm_opp_of_remove_table(emc->dev);
+put_hw_table:
+ dev_pm_opp_put_supported_hw(hw_opp_table);
+put_clk_table:
+ dev_pm_opp_put_clkname(clk_opp_table);
+
+ return err;
+}
+
+static void devm_tegra_emc_unset_callback(void *data)
+{
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
+}
+
+static void devm_tegra_emc_unreg_clk_notifier(void *data)
+{
+ struct tegra_emc *emc = data;
+
+ clk_notifier_unregister(emc->clk, &emc->clk_nb);
+}
+
+static int tegra_emc_init_clk(struct tegra_emc *emc)
+{
+ int err;
+
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
+ err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback,
+ NULL);
+ if (err)
+ return err;
+
+ emc->clk = devm_clk_get(emc->dev, NULL);
+ if (IS_ERR(emc->clk)) {
+ dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk);
+ return PTR_ERR(emc->clk);
+ }
+
+ err = clk_notifier_register(emc->clk, &emc->clk_nb);
+ if (err) {
+ dev_err(emc->dev, "failed to register clk notifier: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(emc->dev,
+ devm_tegra_emc_unreg_clk_notifier, emc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_devfreq_target(struct device *dev, unsigned long *freq,
+ u32 flags)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+ struct dev_pm_opp *opp;
+ unsigned long rate;
+
+ opp = devfreq_recommended_opp(dev, freq, flags);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "failed to find opp for %lu Hz\n", *freq);
+ return PTR_ERR(opp);
+ }
+
+ rate = dev_pm_opp_get_freq(opp);
+ dev_pm_opp_put(opp);
+
+ return emc_set_min_rate(emc, rate, EMC_RATE_DEVFREQ);
+}
+
+static int tegra_emc_devfreq_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct tegra_emc *emc = dev_get_drvdata(dev);
+
+ /* freeze counters */
+ writel_relaxed(EMC_PWR_GATHER_DISABLE, emc->regs + EMC_STAT_CONTROL);
+
+ /*
+ * busy_time: number of clocks EMC request was accepted
+ * total_time: number of clocks PWR_GATHER control was set to ENABLE
+ */
+ stat->busy_time = readl_relaxed(emc->regs + EMC_STAT_PWR_COUNT);
+ stat->total_time = readl_relaxed(emc->regs + EMC_STAT_PWR_CLOCKS);
+ stat->current_frequency = clk_get_rate(emc->clk);
+
+ /* clear counters and restart */
+ writel_relaxed(EMC_PWR_GATHER_CLEAR, emc->regs + EMC_STAT_CONTROL);
+ writel_relaxed(EMC_PWR_GATHER_ENABLE, emc->regs + EMC_STAT_CONTROL);
+
+ return 0;
+}
+
+static struct devfreq_dev_profile tegra_emc_devfreq_profile = {
+ .polling_ms = 30,
+ .target = tegra_emc_devfreq_target,
+ .get_dev_status = tegra_emc_devfreq_get_dev_status,
+};
+
+static int tegra_emc_devfreq_init(struct tegra_emc *emc)
+{
+ struct devfreq *devfreq;
+
+ /*
+ * PWR_COUNT is 1/2 of PWR_CLOCKS at max, and thus, the up-threshold
+ * should be less than 50. Secondly, multiple active memory clients
+ * may cause over 20% of lost clock cycles due to stalls caused by
+ * competing memory accesses. This means that threshold should be
+ * set to a less than 30 in order to have a properly working governor.
+ */
+ emc->ondemand_data.upthreshold = 20;
+
+ /*
+ * Reset statistic gathers state, select global bandwidth for the
+ * statistics collection mode and set clocks counter saturation
+ * limit to maximum.
+ */
+ writel_relaxed(0x00000000, emc->regs + EMC_STAT_CONTROL);
+ writel_relaxed(0x00000000, emc->regs + EMC_STAT_LLMC_CONTROL);
+ writel_relaxed(0xffffffff, emc->regs + EMC_STAT_PWR_CLOCK_LIMIT);
+
+ devfreq = devm_devfreq_add_device(emc->dev, &tegra_emc_devfreq_profile,
+ DEVFREQ_GOV_SIMPLE_ONDEMAND,
+ &emc->ondemand_data);
+ if (IS_ERR(devfreq)) {
+ dev_err(emc->dev, "failed to initialize devfreq: %pe", devfreq);
+ return PTR_ERR(devfreq);
+ }
+
+ return 0;
+}
+
static int tegra_emc_probe(struct platform_device *pdev)
{
struct device_node *np;
struct tegra_emc *emc;
- struct resource *res;
int irq, err;
- /* driver has nothing to do in a case of memory timing absence */
- if (of_get_child_count(pdev->dev.of_node) == 0) {
- dev_info(&pdev->dev,
- "EMC device tree node doesn't have memory timings\n");
- return 0;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
- dev_err(&pdev->dev, "interrupt not specified\n");
dev_err(&pdev->dev, "please update your device tree\n");
return irq;
}
- np = tegra_emc_find_node_by_ram_code(&pdev->dev);
- if (!np)
- return -EINVAL;
-
emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
- if (!emc) {
- of_node_put(np);
+ if (!emc)
return -ENOMEM;
- }
+ mutex_init(&emc->rate_lock);
emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
emc->dev = &pdev->dev;
- err = tegra_emc_load_timings_from_dt(emc, np);
- of_node_put(np);
- if (err)
- return err;
+ np = tegra_emc_find_node_by_ram_code(&pdev->dev);
+ if (np) {
+ err = tegra_emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- emc->regs = devm_ioremap_resource(&pdev->dev, res);
+ emc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emc->regs))
return PTR_ERR(emc->regs);
@@ -701,41 +1123,39 @@ static int tegra_emc_probe(struct platform_device *pdev)
err = devm_request_irq(&pdev->dev, irq, tegra_emc_isr, 0,
dev_name(&pdev->dev), emc);
if (err) {
- dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n", irq, err);
+ dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
return err;
}
- tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
-
- emc->clk = devm_clk_get(&pdev->dev, "emc");
- if (IS_ERR(emc->clk)) {
- err = PTR_ERR(emc->clk);
- dev_err(&pdev->dev, "failed to get emc clock: %d\n", err);
- goto unset_cb;
- }
+ err = tegra_emc_init_clk(emc);
+ if (err)
+ return err;
- err = clk_notifier_register(emc->clk, &emc->clk_nb);
- if (err) {
- dev_err(&pdev->dev, "failed to register clk notifier: %d\n",
- err);
- goto unset_cb;
- }
+ err = tegra_emc_opp_table_init(emc);
+ if (err)
+ return err;
platform_set_drvdata(pdev, emc);
+ tegra_emc_rate_requests_init(emc);
tegra_emc_debugfs_init(emc);
+ tegra_emc_interconnect_init(emc);
+ tegra_emc_devfreq_init(emc);
- return 0;
-
-unset_cb:
- tegra20_clk_set_emc_round_callback(NULL, NULL);
+ /*
+ * Don't allow the kernel module to be unloaded. Unloading adds some
+ * extra complexity which doesn't really worth the effort in a case of
+ * this driver.
+ */
+ try_module_get(THIS_MODULE);
- return err;
+ return 0;
}
static const struct of_device_id tegra_emc_of_match[] = {
{ .compatible = "nvidia,tegra20-emc", },
{},
};
+MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
static struct platform_driver tegra_emc_driver = {
.probe = tegra_emc_probe,
@@ -743,11 +1163,11 @@ static struct platform_driver tegra_emc_driver = {
.name = "tegra20-emc",
.of_match_table = tegra_emc_of_match,
.suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
},
};
+module_platform_driver(tegra_emc_driver);
-static int __init tegra_emc_init(void)
-{
- return platform_driver_register(&tegra_emc_driver);
-}
-subsys_initcall(tegra_emc_init);
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra20 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra20.c b/drivers/memory/tegra/tegra20.c
index a8098bff91d9..29ecf02805a0 100644
--- a/drivers/memory/tegra/tegra20.c
+++ b/drivers/memory/tegra/tegra20.c
@@ -3,6 +3,10 @@
* Copyright (C) 2012 NVIDIA CORPORATION. All rights reserved.
*/
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
#include <dt-bindings/memory/tegra20-mc.h>
#include "mc.h"
@@ -280,6 +284,78 @@ static const struct tegra_mc_reset_ops tegra20_mc_reset_ops = {
.reset_status = tegra20_mc_reset_status,
};
+static int tegra20_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ /*
+ * It should be possible to tune arbitration knobs here, but the
+ * default values are known to work well on all devices. Hence
+ * nothing to do here so far.
+ */
+ return 0;
+}
+
+static int tegra20_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ /*
+ * ISO clients need to reserve extra bandwidth up-front because
+ * there could be high bandwidth pressure during initial filling
+ * of the client's FIFO buffers. Secondly, we need to take into
+ * account impurities of the memory subsystem.
+ */
+ if (tag & TEGRA_MC_ICC_TAG_ISO)
+ peak_bw = tegra_mc_scale_percents(peak_bw, 300);
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static struct icc_node_data *
+tegra20_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ unsigned int i, idx = spec->args[0];
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id != idx)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ ndata->node = node;
+
+ /* these clients are isochronous by default */
+ if (strstarts(node->name, "display") ||
+ strstarts(node->name, "vi"))
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ else
+ ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
+
+ return ndata;
+ }
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == idx)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct tegra_mc_icc_ops tegra20_mc_icc_ops = {
+ .xlate_extended = tegra20_mc_of_icc_xlate_extended,
+ .aggregate = tegra20_mc_icc_aggreate,
+ .set = tegra20_mc_icc_set,
+};
+
const struct tegra_mc_soc tegra20_mc_soc = {
.clients = tegra20_mc_clients,
.num_clients = ARRAY_SIZE(tegra20_mc_clients),
@@ -290,4 +366,5 @@ const struct tegra_mc_soc tegra20_mc_soc = {
.reset_ops = &tegra20_mc_reset_ops,
.resets = tegra20_mc_resets,
.num_resets = ARRAY_SIZE(tegra20_mc_resets),
+ .icc_ops = &tegra20_mc_icc_ops,
};
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
index cdd663ba4733..5f224796e32e 100644
--- a/drivers/memory/tegra/tegra210-emc-core.c
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -1828,7 +1828,6 @@ static int tegra210_emc_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cd;
unsigned long current_rate;
- struct platform_device *mc;
struct tegra210_emc *emc;
struct device_node *np;
unsigned int i;
@@ -1846,35 +1845,19 @@ static int tegra210_emc_probe(struct platform_device *pdev)
spin_lock_init(&emc->lock);
emc->dev = &pdev->dev;
- np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not get memory controller\n");
- return -ENOENT;
- }
-
- mc = of_find_device_by_node(np);
- of_node_put(np);
- if (!mc)
- return -ENOENT;
-
- emc->mc = platform_get_drvdata(mc);
- if (!emc->mc) {
- put_device(&mc->dev);
- return -EPROBE_DEFER;
- }
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
emc->regs = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(emc->regs)) {
- err = PTR_ERR(emc->regs);
- goto put_mc;
- }
+ if (IS_ERR(emc->regs))
+ return PTR_ERR(emc->regs);
for (i = 0; i < 2; i++) {
emc->channel[i] = devm_platform_ioremap_resource(pdev, 1 + i);
- if (IS_ERR(emc->channel[i])) {
- err = PTR_ERR(emc->channel[i]);
- goto put_mc;
- }
+ if (IS_ERR(emc->channel[i]))
+ return PTR_ERR(emc->channel[i]);
+
}
tegra210_emc_detect(emc);
@@ -1884,7 +1867,7 @@ static int tegra210_emc_probe(struct platform_device *pdev)
err = of_reserved_mem_device_init_by_name(emc->dev, np, "nominal");
if (err < 0) {
dev_err(emc->dev, "failed to get nominal EMC table: %d\n", err);
- goto put_mc;
+ return err;
}
err = of_reserved_mem_device_init_by_name(emc->dev, np, "derated");
@@ -2015,8 +1998,7 @@ detach:
tegra210_clk_emc_detach(emc->clk);
release:
of_reserved_mem_device_release(emc->dev);
-put_mc:
- put_device(emc->mc->dev);
+
return err;
}
@@ -2027,7 +2009,6 @@ static int tegra210_emc_remove(struct platform_device *pdev)
debugfs_remove_recursive(emc->debugfs.root);
tegra210_clk_emc_detach(emc->clk);
of_reserved_mem_device_release(emc->dev);
- put_device(emc->mc->dev);
return 0;
}
diff --git a/drivers/memory/tegra/tegra210.c b/drivers/memory/tegra/tegra210.c
index 7fb8b5438bf4..b3bbc5a05ba1 100644
--- a/drivers/memory/tegra/tegra210.c
+++ b/drivers/memory/tegra/tegra210.c
@@ -24,7 +24,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2e8,
.shift = 0,
.mask = 0xff,
- .def = 0xc2,
+ .def = 0x1e,
},
}, {
.id = 0x02,
@@ -38,7 +38,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f4,
.shift = 0,
.mask = 0xff,
- .def = 0xc6,
+ .def = 0x1e,
},
}, {
.id = 0x03,
@@ -52,7 +52,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2e8,
.shift = 16,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x04,
@@ -66,7 +66,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f4,
.shift = 16,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x05,
@@ -80,7 +80,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2ec,
.shift = 0,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x06,
@@ -94,7 +94,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f8,
.shift = 0,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x0e,
@@ -108,7 +108,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2e0,
.shift = 0,
.mask = 0xff,
- .def = 0x13,
+ .def = 0x2e,
},
}, {
.id = 0x0f,
@@ -136,7 +136,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f0,
.shift = 0,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x11,
@@ -150,7 +150,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2fc,
.shift = 0,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x15,
@@ -380,7 +380,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x350,
.shift = 16,
.mask = 0xff,
- .def = 0x65,
+ .def = 0x80,
},
}, {
.id = 0x44,
@@ -620,7 +620,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x2f0,
.shift = 16,
.mask = 0xff,
- .def = 0x50,
+ .def = 0x1e,
},
}, {
.id = 0x60,
@@ -648,7 +648,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x3bc,
.shift = 0,
.mask = 0xff,
- .def = 0x49,
+ .def = 0x5a,
},
}, {
.id = 0x62,
@@ -676,7 +676,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x3c4,
.shift = 0,
.mask = 0xff,
- .def = 0x49,
+ .def = 0x5a,
},
}, {
.id = 0x64,
@@ -897,7 +897,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.bit = 1,
},
.la = {
- .reg = 0xb98,
+ .reg = 0x3e0,
.shift = 16,
.mask = 0xff,
.def = 0x80,
@@ -956,7 +956,7 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
.reg = 0x3ec,
.shift = 16,
.mask = 0xff,
- .def = 0xff,
+ .def = 0x80,
},
}, {
.id = 0x86,
@@ -1020,35 +1020,45 @@ static const struct tegra_mc_client tegra210_mc_clients[] = {
};
static const struct tegra_smmu_swgroup tegra210_swgroups[] = {
- { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
- { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
{ .name = "afi", .swgroup = TEGRA_SWGROUP_AFI, .reg = 0x238 },
{ .name = "avpc", .swgroup = TEGRA_SWGROUP_AVPC, .reg = 0x23c },
- { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "dc", .swgroup = TEGRA_SWGROUP_DC, .reg = 0x240 },
+ { .name = "dcb", .swgroup = TEGRA_SWGROUP_DCB, .reg = 0x244 },
{ .name = "hc", .swgroup = TEGRA_SWGROUP_HC, .reg = 0x250 },
+ { .name = "hda", .swgroup = TEGRA_SWGROUP_HDA, .reg = 0x254 },
+ { .name = "isp2", .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 },
{ .name = "nvenc", .swgroup = TEGRA_SWGROUP_NVENC, .reg = 0x264 },
+ { .name = "nv", .swgroup = TEGRA_SWGROUP_NV, .reg = 0x268 },
+ { .name = "nv2", .swgroup = TEGRA_SWGROUP_NV2, .reg = 0x26c },
{ .name = "ppcs", .swgroup = TEGRA_SWGROUP_PPCS, .reg = 0x270 },
{ .name = "sata", .swgroup = TEGRA_SWGROUP_SATA, .reg = 0x274 },
- { .name = "isp2", .swgroup = TEGRA_SWGROUP_ISP2, .reg = 0x258 },
+ { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+ { .name = "vic", .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 },
{ .name = "xusb_host", .swgroup = TEGRA_SWGROUP_XUSB_HOST, .reg = 0x288 },
{ .name = "xusb_dev", .swgroup = TEGRA_SWGROUP_XUSB_DEV, .reg = 0x28c },
- { .name = "isp2b", .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 },
- { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
{ .name = "a9avp", .swgroup = TEGRA_SWGROUP_A9AVP, .reg = 0x290 },
- { .name = "gpu", .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac },
+ { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
+ { .name = "ppcs1", .swgroup = TEGRA_SWGROUP_PPCS1, .reg = 0x298 },
+ { .name = "dc1", .swgroup = TEGRA_SWGROUP_DC1, .reg = 0xa88 },
{ .name = "sdmmc1a", .swgroup = TEGRA_SWGROUP_SDMMC1A, .reg = 0xa94 },
{ .name = "sdmmc2a", .swgroup = TEGRA_SWGROUP_SDMMC2A, .reg = 0xa98 },
{ .name = "sdmmc3a", .swgroup = TEGRA_SWGROUP_SDMMC3A, .reg = 0xa9c },
{ .name = "sdmmc4a", .swgroup = TEGRA_SWGROUP_SDMMC4A, .reg = 0xaa0 },
- { .name = "vic", .swgroup = TEGRA_SWGROUP_VIC, .reg = 0x284 },
- { .name = "vi", .swgroup = TEGRA_SWGROUP_VI, .reg = 0x280 },
+ { .name = "isp2b", .swgroup = TEGRA_SWGROUP_ISP2B, .reg = 0xaa4 },
+ { .name = "gpu", .swgroup = TEGRA_SWGROUP_GPU, .reg = 0xaac },
+ { .name = "ppcs2", .swgroup = TEGRA_SWGROUP_PPCS2, .reg = 0xab0 },
{ .name = "nvdec", .swgroup = TEGRA_SWGROUP_NVDEC, .reg = 0xab4 },
{ .name = "ape", .swgroup = TEGRA_SWGROUP_APE, .reg = 0xab8 },
- { .name = "nvjpg", .swgroup = TEGRA_SWGROUP_NVJPG, .reg = 0xac0 },
{ .name = "se", .swgroup = TEGRA_SWGROUP_SE, .reg = 0xabc },
+ { .name = "nvjpg", .swgroup = TEGRA_SWGROUP_NVJPG, .reg = 0xac0 },
+ { .name = "hc1", .swgroup = TEGRA_SWGROUP_HC1, .reg = 0xac4 },
+ { .name = "se1", .swgroup = TEGRA_SWGROUP_SE1, .reg = 0xac8 },
{ .name = "axiap", .swgroup = TEGRA_SWGROUP_AXIAP, .reg = 0xacc },
{ .name = "etr", .swgroup = TEGRA_SWGROUP_ETR, .reg = 0xad0 },
{ .name = "tsecb", .swgroup = TEGRA_SWGROUP_TSECB, .reg = 0xad4 },
+ { .name = "tsec1", .swgroup = TEGRA_SWGROUP_TSEC1, .reg = 0xad8 },
+ { .name = "tsecb1", .swgroup = TEGRA_SWGROUP_TSECB1, .reg = 0xadc },
+ { .name = "nvdec1", .swgroup = TEGRA_SWGROUP_NVDEC1, .reg = 0xae0 },
};
static const unsigned int tegra210_group_display[] = {
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 055af0e08a2e..44ac155936aa 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -14,16 +14,21 @@
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/err.h>
+#include <linux/interconnect-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_opp.h>
+#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/types.h>
+#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
#include "mc.h"
@@ -323,9 +328,21 @@ struct emc_timing {
bool emc_cfg_dyn_self_ref;
};
+enum emc_rate_request_type {
+ EMC_RATE_DEBUG,
+ EMC_RATE_ICC,
+ EMC_RATE_TYPE_MAX,
+};
+
+struct emc_rate_request {
+ unsigned long min_rate;
+ unsigned long max_rate;
+};
+
struct tegra_emc {
struct device *dev;
struct tegra_mc *mc;
+ struct icc_provider provider;
struct notifier_block clk_nb;
struct clk *clk;
void __iomem *regs;
@@ -352,6 +369,15 @@ struct tegra_emc {
unsigned long min_rate;
unsigned long max_rate;
} debugfs;
+
+ /*
+ * There are multiple sources in the EMC driver which could request
+ * a min/max clock rate, these rates are contained in this array.
+ */
+ struct emc_rate_request requested_rate[EMC_RATE_TYPE_MAX];
+
+ /* protect shared rate-change code path */
+ struct mutex rate_lock;
};
static int emc_seq_update_timing(struct tegra_emc *emc)
@@ -988,6 +1014,11 @@ static struct device_node *emc_find_node_by_ram_code(struct device *dev)
u32 value, ram_code;
int err;
+ if (of_get_child_count(dev->of_node) == 0) {
+ dev_info(dev, "device-tree doesn't have memory timings\n");
+ return NULL;
+ }
+
ram_code = tegra_read_ram_code();
for_each_child_of_node(dev->of_node, np) {
@@ -1057,6 +1088,9 @@ static long emc_round_rate(unsigned long rate,
struct tegra_emc *emc = arg;
unsigned int i;
+ if (!emc->num_timings)
+ return clk_get_rate(emc->clk);
+
min_rate = min(min_rate, emc->timings[emc->num_timings - 1].rate);
for (i = 0; i < emc->num_timings; i++) {
@@ -1086,6 +1120,83 @@ static long emc_round_rate(unsigned long rate,
return timing->rate;
}
+static void tegra_emc_rate_requests_init(struct tegra_emc *emc)
+{
+ unsigned int i;
+
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++) {
+ emc->requested_rate[i].min_rate = 0;
+ emc->requested_rate[i].max_rate = ULONG_MAX;
+ }
+}
+
+static int emc_request_rate(struct tegra_emc *emc,
+ unsigned long new_min_rate,
+ unsigned long new_max_rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = emc->requested_rate;
+ unsigned long min_rate = 0, max_rate = ULONG_MAX;
+ unsigned int i;
+ int err;
+
+ /* select minimum and maximum rates among the requested rates */
+ for (i = 0; i < EMC_RATE_TYPE_MAX; i++, req++) {
+ if (i == type) {
+ min_rate = max(new_min_rate, min_rate);
+ max_rate = min(new_max_rate, max_rate);
+ } else {
+ min_rate = max(req->min_rate, min_rate);
+ max_rate = min(req->max_rate, max_rate);
+ }
+ }
+
+ if (min_rate > max_rate) {
+ dev_err_ratelimited(emc->dev, "%s: type %u: out of range: %lu %lu\n",
+ __func__, type, min_rate, max_rate);
+ return -ERANGE;
+ }
+
+ /*
+ * EMC rate-changes should go via OPP API because it manages voltage
+ * changes.
+ */
+ err = dev_pm_opp_set_rate(emc->dev, min_rate);
+ if (err)
+ return err;
+
+ emc->requested_rate[type].min_rate = new_min_rate;
+ emc->requested_rate[type].max_rate = new_max_rate;
+
+ return 0;
+}
+
+static int emc_set_min_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, rate, req->max_rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
+static int emc_set_max_rate(struct tegra_emc *emc, unsigned long rate,
+ enum emc_rate_request_type type)
+{
+ struct emc_rate_request *req = &emc->requested_rate[type];
+ int ret;
+
+ mutex_lock(&emc->rate_lock);
+ ret = emc_request_rate(emc, req->min_rate, rate, type);
+ mutex_unlock(&emc->rate_lock);
+
+ return ret;
+}
+
/*
* debugfs interface
*
@@ -1169,7 +1280,7 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_min_rate(emc->clk, rate);
+ err = emc_set_min_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -1199,7 +1310,7 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
if (!tegra_emc_validate_rate(emc, rate))
return -EINVAL;
- err = clk_set_max_rate(emc->clk, rate);
+ err = emc_set_max_rate(emc, rate, EMC_RATE_DEBUG);
if (err < 0)
return err;
@@ -1256,51 +1367,239 @@ static void tegra_emc_debugfs_init(struct tegra_emc *emc)
emc, &tegra_emc_debug_max_rate_fops);
}
-static int tegra_emc_probe(struct platform_device *pdev)
+static inline struct tegra_emc *
+to_tegra_emc_provider(struct icc_provider *provider)
{
- struct platform_device *mc;
- struct device_node *np;
- struct tegra_emc *emc;
+ return container_of(provider, struct tegra_emc, provider);
+}
+
+static struct icc_node_data *
+emc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct icc_provider *provider = data;
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ /* External Memory is the only possible ICC route */
+ list_for_each_entry(node, &provider->nodes, node_list) {
+ if (node->id != TEGRA_ICC_EMEM)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ /*
+ * SRC and DST nodes should have matching TAG in order to have
+ * it set by default for a requested path.
+ */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ ndata->node = node;
+
+ return ndata;
+ }
+
+ return ERR_PTR(-EPROBE_DEFER);
+}
+
+static int emc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_emc *emc = to_tegra_emc_provider(dst->provider);
+ unsigned long long peak_bw = icc_units_to_bps(dst->peak_bw);
+ unsigned long long avg_bw = icc_units_to_bps(dst->avg_bw);
+ unsigned long long rate = max(avg_bw, peak_bw);
+ const unsigned int dram_data_bus_width_bytes = 4;
+ const unsigned int ddr = 2;
+ int err;
+
+ /*
+ * Tegra30 EMC runs on a clock rate of SDRAM bus. This means that
+ * EMC clock rate is twice smaller than the peak data rate because
+ * data is sampled on both EMC clock edges.
+ */
+ do_div(rate, ddr * dram_data_bus_width_bytes);
+ rate = min_t(u64, rate, U32_MAX);
+
+ err = emc_set_min_rate(emc, rate, EMC_RATE_ICC);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_interconnect_init(struct tegra_emc *emc)
+{
+ const struct tegra_mc_soc *soc = emc->mc->soc;
+ struct icc_node *node;
int err;
- if (of_get_child_count(pdev->dev.of_node) == 0) {
- dev_info(&pdev->dev,
- "device-tree node doesn't have memory timings\n");
- return -ENODEV;
+ emc->provider.dev = emc->dev;
+ emc->provider.set = emc_icc_set;
+ emc->provider.data = &emc->provider;
+ emc->provider.aggregate = soc->icc_ops->aggregate;
+ emc->provider.xlate_extended = emc_of_icc_xlate_extended;
+
+ err = icc_provider_add(&emc->provider);
+ if (err)
+ goto err_msg;
+
+ /* create External Memory Controller node */
+ node = icc_node_create(TEGRA_ICC_EMC);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto del_provider;
}
- np = of_parse_phandle(pdev->dev.of_node, "nvidia,memory-controller", 0);
- if (!np) {
- dev_err(&pdev->dev, "could not get memory controller node\n");
- return -ENOENT;
+ node->name = "External Memory Controller";
+ icc_node_add(node, &emc->provider);
+
+ /* link External Memory Controller to External Memory (DRAM) */
+ err = icc_link_create(node, TEGRA_ICC_EMEM);
+ if (err)
+ goto remove_nodes;
+
+ /* create External Memory node */
+ node = icc_node_create(TEGRA_ICC_EMEM);
+ if (IS_ERR(node)) {
+ err = PTR_ERR(node);
+ goto remove_nodes;
}
- mc = of_find_device_by_node(np);
- of_node_put(np);
- if (!mc)
- return -ENOENT;
+ node->name = "External Memory (DRAM)";
+ icc_node_add(node, &emc->provider);
- np = emc_find_node_by_ram_code(&pdev->dev);
- if (!np)
- return -EINVAL;
+ return 0;
+
+remove_nodes:
+ icc_nodes_remove(&emc->provider);
+del_provider:
+ icc_provider_del(&emc->provider);
+err_msg:
+ dev_err(emc->dev, "failed to initialize ICC: %d\n", err);
+
+ return err;
+}
+
+static int tegra_emc_opp_table_init(struct tegra_emc *emc)
+{
+ u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
+ struct opp_table *clk_opp_table, *hw_opp_table;
+ int err;
+
+ clk_opp_table = dev_pm_opp_set_clkname(emc->dev, NULL);
+ err = PTR_ERR_OR_ZERO(clk_opp_table);
+ if (err) {
+ dev_err(emc->dev, "failed to set OPP clk: %d\n", err);
+ return err;
+ }
+
+ hw_opp_table = dev_pm_opp_set_supported_hw(emc->dev, &hw_version, 1);
+ err = PTR_ERR_OR_ZERO(hw_opp_table);
+ if (err) {
+ dev_err(emc->dev, "failed to set OPP supported HW: %d\n", err);
+ goto put_clk_table;
+ }
+
+ err = dev_pm_opp_of_add_table(emc->dev);
+ if (err) {
+ if (err == -ENODEV)
+ dev_err(emc->dev, "OPP table not found, please update your device tree\n");
+ else
+ dev_err(emc->dev, "failed to add OPP table: %d\n", err);
+
+ goto put_hw_table;
+ }
+
+ dev_info(emc->dev, "OPP HW ver. 0x%x, current clock rate %lu MHz\n",
+ hw_version, clk_get_rate(emc->clk) / 1000000);
+
+ /* first dummy rate-set initializes voltage state */
+ err = dev_pm_opp_set_rate(emc->dev, clk_get_rate(emc->clk));
+ if (err) {
+ dev_err(emc->dev, "failed to initialize OPP clock: %d\n", err);
+ goto remove_table;
+ }
+
+ return 0;
+
+remove_table:
+ dev_pm_opp_of_remove_table(emc->dev);
+put_hw_table:
+ dev_pm_opp_put_supported_hw(hw_opp_table);
+put_clk_table:
+ dev_pm_opp_put_clkname(clk_opp_table);
+
+ return err;
+}
+
+static void devm_tegra_emc_unset_callback(void *data)
+{
+ tegra20_clk_set_emc_round_callback(NULL, NULL);
+}
+
+static void devm_tegra_emc_unreg_clk_notifier(void *data)
+{
+ struct tegra_emc *emc = data;
+
+ clk_notifier_unregister(emc->clk, &emc->clk_nb);
+}
+
+static int tegra_emc_init_clk(struct tegra_emc *emc)
+{
+ int err;
+
+ tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
+
+ err = devm_add_action_or_reset(emc->dev, devm_tegra_emc_unset_callback,
+ NULL);
+ if (err)
+ return err;
+
+ emc->clk = devm_clk_get(emc->dev, NULL);
+ if (IS_ERR(emc->clk)) {
+ dev_err(emc->dev, "failed to get EMC clock: %pe\n", emc->clk);
+ return PTR_ERR(emc->clk);
+ }
+
+ err = clk_notifier_register(emc->clk, &emc->clk_nb);
+ if (err) {
+ dev_err(emc->dev, "failed to register clk notifier: %d\n", err);
+ return err;
+ }
+
+ err = devm_add_action_or_reset(emc->dev,
+ devm_tegra_emc_unreg_clk_notifier, emc);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_emc_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct tegra_emc *emc;
+ int err;
emc = devm_kzalloc(&pdev->dev, sizeof(*emc), GFP_KERNEL);
- if (!emc) {
- of_node_put(np);
+ if (!emc)
return -ENOMEM;
- }
- emc->mc = platform_get_drvdata(mc);
- if (!emc->mc)
- return -EPROBE_DEFER;
+ emc->mc = devm_tegra_memory_controller_get(&pdev->dev);
+ if (IS_ERR(emc->mc))
+ return PTR_ERR(emc->mc);
+ mutex_init(&emc->rate_lock);
emc->clk_nb.notifier_call = emc_clk_change_notify;
emc->dev = &pdev->dev;
- err = emc_load_timings_from_dt(emc, np);
- of_node_put(np);
- if (err)
- return err;
+ np = emc_find_node_by_ram_code(&pdev->dev);
+ if (np) {
+ err = emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
emc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emc->regs))
@@ -1311,10 +1610,9 @@ static int tegra_emc_probe(struct platform_device *pdev)
return err;
err = platform_get_irq(pdev, 0);
- if (err < 0) {
- dev_err(&pdev->dev, "interrupt not specified: %d\n", err);
+ if (err < 0)
return err;
- }
+
emc->irq = err;
err = devm_request_irq(&pdev->dev, emc->irq, tegra_emc_isr, 0,
@@ -1324,31 +1622,27 @@ static int tegra_emc_probe(struct platform_device *pdev)
return err;
}
- tegra20_clk_set_emc_round_callback(emc_round_rate, emc);
-
- emc->clk = devm_clk_get(&pdev->dev, "emc");
- if (IS_ERR(emc->clk)) {
- err = PTR_ERR(emc->clk);
- dev_err(&pdev->dev, "failed to get emc clock: %d\n", err);
- goto unset_cb;
- }
+ err = tegra_emc_init_clk(emc);
+ if (err)
+ return err;
- err = clk_notifier_register(emc->clk, &emc->clk_nb);
- if (err) {
- dev_err(&pdev->dev, "failed to register clk notifier: %d\n",
- err);
- goto unset_cb;
- }
+ err = tegra_emc_opp_table_init(emc);
+ if (err)
+ return err;
platform_set_drvdata(pdev, emc);
+ tegra_emc_rate_requests_init(emc);
tegra_emc_debugfs_init(emc);
+ tegra_emc_interconnect_init(emc);
- return 0;
-
-unset_cb:
- tegra20_clk_set_emc_round_callback(NULL, NULL);
+ /*
+ * Don't allow the kernel module to be unloaded. Unloading adds some
+ * extra complexity which doesn't really worth the effort in a case of
+ * this driver.
+ */
+ try_module_get(THIS_MODULE);
- return err;
+ return 0;
}
static int tegra_emc_suspend(struct device *dev)
@@ -1393,6 +1687,7 @@ static const struct of_device_id tegra_emc_of_match[] = {
{ .compatible = "nvidia,tegra30-emc", },
{},
};
+MODULE_DEVICE_TABLE(of, tegra_emc_of_match);
static struct platform_driver tegra_emc_driver = {
.probe = tegra_emc_probe,
@@ -1401,11 +1696,11 @@ static struct platform_driver tegra_emc_driver = {
.of_match_table = tegra_emc_of_match,
.pm = &tegra_emc_pm_ops,
.suppress_bind_attrs = true,
+ .sync_state = icc_sync_state,
},
};
+module_platform_driver(tegra_emc_driver);
-static int __init tegra_emc_init(void)
-{
- return platform_driver_register(&tegra_emc_driver);
-}
-subsys_initcall(tegra_emc_init);
+MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
+MODULE_DESCRIPTION("NVIDIA Tegra30 EMC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra30.c b/drivers/memory/tegra/tegra30.c
index fcdd812eed80..ea849003014b 100644
--- a/drivers/memory/tegra/tegra30.c
+++ b/drivers/memory/tegra/tegra30.c
@@ -4,7 +4,8 @@
*/
#include <linux/of.h>
-#include <linux/mm.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
#include <dt-bindings/memory/tegra30-mc.h>
@@ -36,6 +37,13 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.id = 0x00,
.name = "ptcr",
.swgroup = TEGRA_SWGROUP_PTC,
+ .la = {
+ .reg = 0x34c,
+ .shift = 0,
+ .mask = 0xff,
+ .def = 0x0,
+ },
+ .fifo_size = 16 * 2,
}, {
.id = 0x01,
.name = "display0a",
@@ -50,6 +58,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x02,
.name = "display0ab",
@@ -64,6 +73,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x03,
.name = "display0b",
@@ -78,6 +88,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x04,
.name = "display0bb",
@@ -92,6 +103,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x05,
.name = "display0c",
@@ -106,6 +118,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x06,
.name = "display0cb",
@@ -120,6 +133,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x07,
.name = "display1b",
@@ -134,6 +148,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x08,
.name = "display1bb",
@@ -148,6 +163,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x4e,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x09,
.name = "eppup",
@@ -162,6 +178,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x17,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x0a,
.name = "g2pr",
@@ -176,6 +193,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x09,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x0b,
.name = "g2sr",
@@ -190,6 +208,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x09,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x0c,
.name = "mpeunifbr",
@@ -204,6 +223,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x50,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x0d,
.name = "viruv",
@@ -218,6 +238,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x2c,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x0e,
.name = "afir",
@@ -232,6 +253,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x10,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x0f,
.name = "avpcarm7r",
@@ -246,6 +268,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x04,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x10,
.name = "displayhc",
@@ -260,6 +283,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x11,
.name = "displayhcb",
@@ -274,6 +298,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x12,
.name = "fdcdrd",
@@ -288,6 +313,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x13,
.name = "fdcdrd2",
@@ -302,6 +328,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x14,
.name = "g2dr",
@@ -316,6 +343,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x15,
.name = "hdar",
@@ -330,6 +358,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x16,
.name = "host1xdmar",
@@ -344,6 +373,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x05,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x17,
.name = "host1xr",
@@ -358,6 +388,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x50,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x18,
.name = "idxsrd",
@@ -372,6 +403,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x19,
.name = "idxsrd2",
@@ -386,6 +418,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x1a,
.name = "mpe_ipred",
@@ -400,6 +433,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x80,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x1b,
.name = "mpeamemrd",
@@ -414,6 +448,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x42,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x1c,
.name = "mpecsrd",
@@ -428,6 +463,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x1d,
.name = "ppcsahbdmar",
@@ -442,6 +478,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x10,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x1e,
.name = "ppcsahbslvr",
@@ -456,6 +493,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x12,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x1f,
.name = "satar",
@@ -470,6 +508,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x33,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x20,
.name = "texsrd",
@@ -484,6 +523,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x21,
.name = "texsrd2",
@@ -498,6 +538,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x22,
.name = "vdebsevr",
@@ -512,6 +553,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x23,
.name = "vdember",
@@ -526,6 +568,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xd0,
},
+ .fifo_size = 16 * 4,
}, {
.id = 0x24,
.name = "vdemcer",
@@ -540,6 +583,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x2a,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x25,
.name = "vdetper",
@@ -554,6 +598,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x74,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x26,
.name = "mpcorelpr",
@@ -564,6 +609,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x04,
},
+ .fifo_size = 16 * 14,
}, {
.id = 0x27,
.name = "mpcorer",
@@ -574,6 +620,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x04,
},
+ .fifo_size = 16 * 14,
}, {
.id = 0x28,
.name = "eppu",
@@ -588,6 +635,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x6c,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x29,
.name = "eppv",
@@ -602,6 +650,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x6c,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2a,
.name = "eppy",
@@ -616,6 +665,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x6c,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2b,
.name = "mpeunifbw",
@@ -630,6 +680,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x13,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x2c,
.name = "viwsb",
@@ -644,6 +695,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x12,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2d,
.name = "viwu",
@@ -658,6 +710,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xb2,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2e,
.name = "viwv",
@@ -672,6 +725,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xb2,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x2f,
.name = "viwy",
@@ -686,6 +740,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x12,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x30,
.name = "g2dw",
@@ -700,6 +755,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x9,
},
+ .fifo_size = 16 * 128,
}, {
.id = 0x31,
.name = "afiw",
@@ -714,6 +770,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0c,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x32,
.name = "avpcarm7w",
@@ -728,6 +785,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0e,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x33,
.name = "fdcdwr",
@@ -742,6 +800,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x34,
.name = "fdcdwr2",
@@ -756,6 +815,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0a,
},
+ .fifo_size = 16 * 48,
}, {
.id = 0x35,
.name = "hdaw",
@@ -770,6 +830,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x36,
.name = "host1xw",
@@ -784,6 +845,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x10,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x37,
.name = "ispw",
@@ -798,6 +860,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 64,
}, {
.id = 0x38,
.name = "mpcorelpw",
@@ -808,6 +871,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0e,
},
+ .fifo_size = 16 * 24,
}, {
.id = 0x39,
.name = "mpcorew",
@@ -818,6 +882,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x0e,
},
+ .fifo_size = 16 * 24,
}, {
.id = 0x3a,
.name = "mpecswr",
@@ -832,6 +897,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 8,
}, {
.id = 0x3b,
.name = "ppcsahbdmaw",
@@ -846,6 +912,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x10,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x3c,
.name = "ppcsahbslvw",
@@ -860,6 +927,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x06,
},
+ .fifo_size = 16 * 4,
}, {
.id = 0x3d,
.name = "sataw",
@@ -874,6 +942,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x33,
},
+ .fifo_size = 16 * 32,
}, {
.id = 0x3e,
.name = "vdebsevw",
@@ -888,6 +957,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 4,
}, {
.id = 0x3f,
.name = "vdedbgw",
@@ -902,6 +972,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0xff,
},
+ .fifo_size = 16 * 16,
}, {
.id = 0x40,
.name = "vdembew",
@@ -916,6 +987,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x42,
},
+ .fifo_size = 16 * 2,
}, {
.id = 0x41,
.name = "vdetpmw",
@@ -930,6 +1002,7 @@ static const struct tegra_mc_client tegra30_mc_clients[] = {
.mask = 0xff,
.def = 0x2a,
},
+ .fifo_size = 16 * 16,
},
};
@@ -1011,6 +1084,175 @@ static const struct tegra_mc_reset tegra30_mc_resets[] = {
TEGRA30_MC_RESET(VI, 0x200, 0x204, 17),
};
+static void tegra30_mc_tune_client_latency(struct tegra_mc *mc,
+ const struct tegra_mc_client *client,
+ unsigned int bandwidth_mbytes_sec)
+{
+ u32 arb_tolerance_compensation_nsec, arb_tolerance_compensation_div;
+ const struct tegra_mc_la *la = &client->la;
+ unsigned int fifo_size = client->fifo_size;
+ u32 arb_nsec, la_ticks, value;
+
+ /* see 18.4.1 Client Configuration in Tegra3 TRM v03p */
+ if (bandwidth_mbytes_sec)
+ arb_nsec = fifo_size * NSEC_PER_USEC / bandwidth_mbytes_sec;
+ else
+ arb_nsec = U32_MAX;
+
+ /*
+ * Latency allowness should be set with consideration for the module's
+ * latency tolerance and internal buffering capabilities.
+ *
+ * Display memory clients use isochronous transfers and have very low
+ * tolerance to a belated transfers. Hence we need to compensate the
+ * memory arbitration imperfection for them in order to prevent FIFO
+ * underflow condition when memory bus is busy.
+ *
+ * VI clients also need a stronger compensation.
+ */
+ switch (client->swgroup) {
+ case TEGRA_SWGROUP_MPCORE:
+ case TEGRA_SWGROUP_PTC:
+ /*
+ * We always want lower latency for these clients, hence
+ * don't touch them.
+ */
+ return;
+
+ case TEGRA_SWGROUP_DC:
+ case TEGRA_SWGROUP_DCB:
+ arb_tolerance_compensation_nsec = 1050;
+ arb_tolerance_compensation_div = 2;
+ break;
+
+ case TEGRA_SWGROUP_VI:
+ arb_tolerance_compensation_nsec = 1050;
+ arb_tolerance_compensation_div = 1;
+ break;
+
+ default:
+ arb_tolerance_compensation_nsec = 150;
+ arb_tolerance_compensation_div = 1;
+ break;
+ }
+
+ if (arb_nsec > arb_tolerance_compensation_nsec)
+ arb_nsec -= arb_tolerance_compensation_nsec;
+ else
+ arb_nsec = 0;
+
+ arb_nsec /= arb_tolerance_compensation_div;
+
+ /*
+ * Latency allowance is a number of ticks a request from a particular
+ * client may wait in the EMEM arbiter before it becomes a high-priority
+ * request.
+ */
+ la_ticks = arb_nsec / mc->tick;
+ la_ticks = min(la_ticks, la->mask);
+
+ value = mc_readl(mc, la->reg);
+ value &= ~(la->mask << la->shift);
+ value |= la_ticks << la->shift;
+ mc_writel(mc, value, la->reg);
+}
+
+static int tegra30_mc_icc_set(struct icc_node *src, struct icc_node *dst)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(src->provider);
+ const struct tegra_mc_client *client = &mc->soc->clients[src->id];
+ u64 peak_bandwidth = icc_units_to_bps(src->peak_bw);
+
+ /*
+ * Skip pre-initialization that is done by icc_node_add(), which sets
+ * bandwidth to maximum for all clients before drivers are loaded.
+ *
+ * This doesn't make sense for us because we don't have drivers for all
+ * clients and it's okay to keep configuration left from bootloader
+ * during boot, at least for today.
+ */
+ if (src == dst)
+ return 0;
+
+ /* convert bytes/sec to megabytes/sec */
+ do_div(peak_bandwidth, 1000000);
+
+ tegra30_mc_tune_client_latency(mc, client, peak_bandwidth);
+
+ return 0;
+}
+
+static int tegra30_mc_icc_aggreate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ /*
+ * ISO clients need to reserve extra bandwidth up-front because
+ * there could be high bandwidth pressure during initial filling
+ * of the client's FIFO buffers. Secondly, we need to take into
+ * account impurities of the memory subsystem.
+ */
+ if (tag & TEGRA_MC_ICC_TAG_ISO)
+ peak_bw = tegra_mc_scale_percents(peak_bw, 400);
+
+ *agg_avg += avg_bw;
+ *agg_peak = max(*agg_peak, peak_bw);
+
+ return 0;
+}
+
+static struct icc_node_data *
+tegra30_mc_of_icc_xlate_extended(struct of_phandle_args *spec, void *data)
+{
+ struct tegra_mc *mc = icc_provider_to_tegra_mc(data);
+ const struct tegra_mc_client *client;
+ unsigned int i, idx = spec->args[0];
+ struct icc_node_data *ndata;
+ struct icc_node *node;
+
+ list_for_each_entry(node, &mc->provider.nodes, node_list) {
+ if (node->id != idx)
+ continue;
+
+ ndata = kzalloc(sizeof(*ndata), GFP_KERNEL);
+ if (!ndata)
+ return ERR_PTR(-ENOMEM);
+
+ client = &mc->soc->clients[idx];
+ ndata->node = node;
+
+ switch (client->swgroup) {
+ case TEGRA_SWGROUP_DC:
+ case TEGRA_SWGROUP_DCB:
+ case TEGRA_SWGROUP_PTC:
+ case TEGRA_SWGROUP_VI:
+ /* these clients are isochronous by default */
+ ndata->tag = TEGRA_MC_ICC_TAG_ISO;
+ break;
+
+ default:
+ ndata->tag = TEGRA_MC_ICC_TAG_DEFAULT;
+ break;
+ }
+
+ return ndata;
+ }
+
+ for (i = 0; i < mc->soc->num_clients; i++) {
+ if (mc->soc->clients[i].id == idx)
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ dev_err(mc->dev, "invalid ICC client ID %u\n", idx);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static const struct tegra_mc_icc_ops tegra30_mc_icc_ops = {
+ .xlate_extended = tegra30_mc_of_icc_xlate_extended,
+ .aggregate = tegra30_mc_icc_aggreate,
+ .set = tegra30_mc_icc_set,
+};
+
const struct tegra_mc_soc tegra30_mc_soc = {
.clients = tegra30_mc_clients,
.num_clients = ARRAY_SIZE(tegra30_mc_clients),
@@ -1025,4 +1267,5 @@ const struct tegra_mc_soc tegra30_mc_soc = {
.reset_ops = &tegra_mc_reset_ops_common,
.resets = tegra30_mc_resets,
.num_resets = ARRAY_SIZE(tegra30_mc_resets),
+ .icc_ops = &tegra30_mc_icc_ops,
};
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 6d1bf7c3ca3b..e43dea89b094 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -1513,26 +1513,14 @@ static int ab8500_interrupts_show(struct seq_file *s, void *p)
{
int line;
- seq_puts(s, "name: number: number of: wake:\n");
+ seq_puts(s, "name: number: irq: number of: wake:\n");
for (line = 0; line < num_interrupt_lines; line++) {
- struct irq_desc *desc = irq_to_desc(line + irq_first);
-
- seq_printf(s, "%3i: %6i %4i",
+ seq_printf(s, "%3i: %4i %6i %4i\n",
line,
+ line + irq_first,
num_interrupts[line],
num_wake_interrupts[line]);
-
- if (desc && desc->name)
- seq_printf(s, "-%-8s", desc->name);
- if (desc && desc->action) {
- struct irqaction *action = desc->action;
-
- seq_printf(s, " %s", action->name);
- while ((action = action->next) != NULL)
- seq_printf(s, ", %s", action->name);
- }
- seq_putc(s, '\n');
}
return 0;
diff --git a/drivers/mfd/menelaus.c b/drivers/mfd/menelaus.c
index b64d3315a5e1..07e0ca2e467c 100644
--- a/drivers/mfd/menelaus.c
+++ b/drivers/mfd/menelaus.c
@@ -1119,7 +1119,7 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
menelaus_write_reg(MENELAUS_RTC_CTRL, m->rtc_control);
}
- err = rtc_register_device(m->rtc);
+ err = devm_rtc_register_device(m->rtc);
if (err) {
if (alarm) {
menelaus_remove_irq_work(MENELAUS_RTCALM_IRQ);
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index beb482310a58..b2b3d2b0f808 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -472,8 +472,11 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cntr = &hdev->aggregated_cs_counters;
cs = kzalloc(sizeof(*cs), GFP_ATOMIC);
- if (!cs)
+ if (!cs) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
return -ENOMEM;
+ }
cs->ctx = ctx;
cs->submitted = false;
@@ -486,6 +489,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs_cmpl = kmalloc(sizeof(*cs_cmpl), GFP_ATOMIC);
if (!cs_cmpl) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_cs;
}
@@ -513,6 +518,8 @@ static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues,
sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC);
if (!cs->jobs_in_queue_cnt) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&cntr->out_of_mem_drop_cnt);
rc = -ENOMEM;
goto free_fence;
}
@@ -562,7 +569,7 @@ void hl_cs_rollback_all(struct hl_device *hdev)
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
flush_workqueue(hdev->cq_wq[i]);
- /* Make sure we don't have leftovers in the H/W queues mirror list */
+ /* Make sure we don't have leftovers in the CS mirror list */
list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) {
cs_get(cs);
cs->aborted = true;
@@ -764,11 +771,14 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
static int hl_cs_copy_chunk_array(struct hl_device *hdev,
struct hl_cs_chunk **cs_chunk_array,
- void __user *chunks, u32 num_chunks)
+ void __user *chunks, u32 num_chunks,
+ struct hl_ctx *ctx)
{
u32 size_to_copy;
if (num_chunks > HL_MAX_JOBS_PER_CS) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Number of chunks can NOT be larger than %d\n",
HL_MAX_JOBS_PER_CS);
@@ -777,11 +787,16 @@ static int hl_cs_copy_chunk_array(struct hl_device *hdev,
*cs_chunk_array = kmalloc_array(num_chunks, sizeof(**cs_chunk_array),
GFP_ATOMIC);
- if (!*cs_chunk_array)
+ if (!*cs_chunk_array) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
+ }
size_to_copy = num_chunks * sizeof(struct hl_cs_chunk);
if (copy_from_user(*cs_chunk_array, chunks, size_to_copy)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev, "Failed to copy cs chunk array from user\n");
kfree(*cs_chunk_array);
return -EFAULT;
@@ -797,6 +812,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_chunk *cs_chunk_array;
struct hl_cs_counters_atomic *cntr;
+ struct hl_ctx *ctx = hpriv->ctx;
struct hl_cs_job *job;
struct hl_cs *cs;
struct hl_cb *cb;
@@ -805,7 +821,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cntr = &hdev->aggregated_cs_counters;
*cs_seq = ULLONG_MAX;
- rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+ rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+ hpriv->ctx);
if (rc)
goto out;
@@ -832,8 +849,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = validate_queue_index(hdev, chunk, &queue_type,
&is_kernel_allocated_cb);
if (rc) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
goto free_cs_object;
}
@@ -841,8 +858,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
cb = get_cb_from_cs_chunk(hdev, &hpriv->cb_mgr, chunk);
if (!cb) {
atomic64_inc(
- &hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ &ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
goto free_cs_object;
}
@@ -856,8 +873,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
- atomic64_inc(
- &hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
atomic64_inc(&cntr->out_of_mem_drop_cnt);
dev_err(hdev->dev, "Failed to allocate a new job\n");
rc = -ENOMEM;
@@ -891,7 +907,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
rc = cs_parser(hpriv, job);
if (rc) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.parsing_drop_cnt);
atomic64_inc(&cntr->parsing_drop_cnt);
dev_err(hdev->dev,
"Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n",
@@ -901,8 +917,8 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
}
if (int_queues_only) {
- atomic64_inc(&hpriv->ctx->cs_counters.parsing_drop_cnt);
- atomic64_inc(&cntr->parsing_drop_cnt);
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Reject CS %d.%llu because only internal queues jobs are present\n",
cs->ctx->asid, cs->sequence);
@@ -1042,7 +1058,7 @@ out:
}
static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
- struct hl_cs_chunk *chunk, u64 *signal_seq)
+ struct hl_cs_chunk *chunk, u64 *signal_seq, struct hl_ctx *ctx)
{
u64 *signal_seq_arr = NULL;
u32 size_to_copy, signal_seq_arr_len;
@@ -1052,6 +1068,8 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
/* currently only one signal seq is supported */
if (signal_seq_arr_len != 1) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Wait for signal CS supports only one signal CS seq\n");
return -EINVAL;
@@ -1060,13 +1078,18 @@ static int cs_ioctl_extract_signal_seq(struct hl_device *hdev,
signal_seq_arr = kmalloc_array(signal_seq_arr_len,
sizeof(*signal_seq_arr),
GFP_ATOMIC);
- if (!signal_seq_arr)
+ if (!signal_seq_arr) {
+ atomic64_inc(&ctx->cs_counters.out_of_mem_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.out_of_mem_drop_cnt);
return -ENOMEM;
+ }
size_to_copy = chunk->num_signal_seq_arr * sizeof(*signal_seq_arr);
if (copy_from_user(signal_seq_arr,
u64_to_user_ptr(chunk->signal_seq_arr),
size_to_copy)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&hdev->aggregated_cs_counters.validation_drop_cnt);
dev_err(hdev->dev,
"Failed to copy signal seq array from user\n");
rc = -EFAULT;
@@ -1153,6 +1176,7 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_compl *sig_waitcs_cmpl;
u32 q_idx, collective_engine_id = 0;
+ struct hl_cs_counters_atomic *cntr;
struct hl_fence *sig_fence = NULL;
struct hl_ctx *ctx = hpriv->ctx;
enum hl_queue_type q_type;
@@ -1160,9 +1184,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
u64 signal_seq;
int rc;
+ cntr = &hdev->aggregated_cs_counters;
*cs_seq = ULLONG_MAX;
- rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks);
+ rc = hl_cs_copy_chunk_array(hdev, &cs_chunk_array, chunks, num_chunks,
+ ctx);
if (rc)
goto out;
@@ -1170,6 +1196,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
chunk = &cs_chunk_array[0];
if (chunk->queue_index >= hdev->asic_prop.max_queues) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
rc = -EINVAL;
@@ -1181,6 +1209,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
q_type = hw_queue_prop->type;
if (!hw_queue_prop->supports_sync_stream) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d does not support sync stream operations\n",
q_idx);
@@ -1190,6 +1220,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
if (cs_type == CS_TYPE_COLLECTIVE_WAIT) {
if (!(hw_queue_prop->collective_mode == HL_COLLECTIVE_MASTER)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Queue index %d is invalid\n", q_idx);
rc = -EINVAL;
@@ -1200,12 +1232,14 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
}
if (cs_type == CS_TYPE_WAIT || cs_type == CS_TYPE_COLLECTIVE_WAIT) {
- rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq);
+ rc = cs_ioctl_extract_signal_seq(hdev, chunk, &signal_seq, ctx);
if (rc)
goto free_cs_chunk_array;
sig_fence = hl_ctx_get_fence(ctx, signal_seq);
if (IS_ERR(sig_fence)) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"Failed to get signal CS with seq 0x%llx\n",
signal_seq);
@@ -1223,6 +1257,8 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
container_of(sig_fence, struct hl_cs_compl, base_fence);
if (sig_waitcs_cmpl->type != CS_TYPE_SIGNAL) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
dev_err(hdev->dev,
"CS seq 0x%llx is not of a signal CS\n",
signal_seq);
@@ -1270,8 +1306,11 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
else if (cs_type == CS_TYPE_COLLECTIVE_WAIT)
rc = hdev->asic_funcs->collective_wait_create_jobs(hdev, ctx,
cs, q_idx, collective_engine_id);
- else
+ else {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
rc = -EINVAL;
+ }
if (rc)
goto free_cs_object;
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 5871162a8442..1456eabf9601 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -17,12 +17,12 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
- if (hdev->disabled)
- status = HL_DEVICE_STATUS_MALFUNCTION;
- else if (atomic_read(&hdev->in_reset))
+ if (atomic_read(&hdev->in_reset))
status = HL_DEVICE_STATUS_IN_RESET;
else if (hdev->needs_reset)
status = HL_DEVICE_STATUS_NEEDS_RESET;
+ else if (hdev->disabled)
+ status = HL_DEVICE_STATUS_MALFUNCTION;
else
status = HL_DEVICE_STATUS_OPERATIONAL;
@@ -1092,6 +1092,7 @@ kill_processes:
GFP_KERNEL);
if (!hdev->kernel_ctx) {
rc = -ENOMEM;
+ hl_mmu_fini(hdev);
goto out_err;
}
@@ -1103,6 +1104,7 @@ kill_processes:
"failed to init kernel ctx in hard reset\n");
kfree(hdev->kernel_ctx);
hdev->kernel_ctx = NULL;
+ hl_mmu_fini(hdev);
goto out_err;
}
}
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 0e1c629e9800..20f77f58edef 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -627,25 +627,38 @@ int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
security_status = RREG32(cpu_security_boot_status_reg);
/* We read security status multiple times during boot:
- * 1. preboot - we check if fw security feature is supported
- * 2. boot cpu - we get boot cpu security status
- * 3. FW application - we get FW application security status
+ * 1. preboot - a. Check whether the security status bits are valid
+ * b. Check whether fw security is enabled
+ * c. Check whether hard reset is done by preboot
+ * 2. boot cpu - a. Fetch boot cpu security status
+ * b. Check whether hard reset is done by boot cpu
+ * 3. FW application - a. Fetch fw application security status
+ * b. Check whether hard reset is done by fw app
*
* Preboot:
* Check security status bit (CPU_BOOT_DEV_STS0_ENABLED), if it is set
* check security enabled bit (CPU_BOOT_DEV_STS0_SECURITY_EN)
*/
if (security_status & CPU_BOOT_DEV_STS0_ENABLED) {
- hdev->asic_prop.fw_security_status_valid = 1;
- prop->fw_security_disabled =
- !(security_status & CPU_BOOT_DEV_STS0_SECURITY_EN);
+ prop->fw_security_status_valid = 1;
+
+ if (security_status & CPU_BOOT_DEV_STS0_SECURITY_EN)
+ prop->fw_security_disabled = false;
+ else
+ prop->fw_security_disabled = true;
+
+ if (security_status & CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ prop->hard_reset_done_by_fw = true;
} else {
- hdev->asic_prop.fw_security_status_valid = 0;
+ prop->fw_security_status_valid = 0;
prop->fw_security_disabled = true;
}
+ dev_dbg(hdev->dev, "Firmware preboot hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
dev_info(hdev->dev, "firmware-level security is %s\n",
- prop->fw_security_disabled ? "disabled" : "enabled");
+ prop->fw_security_disabled ? "disabled" : "enabled");
return 0;
}
@@ -655,6 +668,7 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
u32 cpu_security_boot_status_reg, u32 boot_err0_reg,
bool skip_bmc, u32 cpu_timeout, u32 boot_fit_timeout)
{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
u32 status;
int rc;
@@ -723,11 +737,22 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
/* Read U-Boot version now in case we will later fail */
hdev->asic_funcs->read_device_fw_version(hdev, FW_COMP_UBOOT);
+ /* Clear reset status since we need to read it again from boot CPU */
+ prop->hard_reset_done_by_fw = false;
+
/* Read boot_cpu security bits */
- if (hdev->asic_prop.fw_security_status_valid)
- hdev->asic_prop.fw_boot_cpu_security_map =
+ if (prop->fw_security_status_valid) {
+ prop->fw_boot_cpu_security_map =
RREG32(cpu_security_boot_status_reg);
+ if (prop->fw_boot_cpu_security_map &
+ CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
+ prop->hard_reset_done_by_fw = true;
+ }
+
+ dev_dbg(hdev->dev, "Firmware boot CPU hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
+
if (rc) {
detect_cpu_boot_status(hdev, status);
rc = -EIO;
@@ -796,18 +821,21 @@ int hl_fw_init_cpu(struct hl_device *hdev, u32 cpu_boot_status_reg,
goto out;
}
+ /* Clear reset status since we need to read again from app */
+ prop->hard_reset_done_by_fw = false;
+
/* Read FW application security bits */
- if (hdev->asic_prop.fw_security_status_valid) {
- hdev->asic_prop.fw_app_security_map =
+ if (prop->fw_security_status_valid) {
+ prop->fw_app_security_map =
RREG32(cpu_security_boot_status_reg);
- if (hdev->asic_prop.fw_app_security_map &
+ if (prop->fw_app_security_map &
CPU_BOOT_DEV_STS0_FW_HARD_RST_EN)
- hdev->asic_prop.hard_reset_done_by_fw = true;
+ prop->hard_reset_done_by_fw = true;
}
- dev_dbg(hdev->dev, "Firmware hard-reset is %s\n",
- hdev->asic_prop.hard_reset_done_by_fw ? "enabled" : "disabled");
+ dev_dbg(hdev->dev, "Firmware application CPU hard-reset is %s\n",
+ prop->hard_reset_done_by_fw ? "enabled" : "disabled");
dev_info(hdev->dev, "Successfully loaded firmware to device\n");
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index 571eda6ef5ab..e0d7f5fbaa5c 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -944,7 +944,7 @@ struct hl_asic_funcs {
u32 (*get_signal_cb_size)(struct hl_device *hdev);
u32 (*get_wait_cb_size)(struct hl_device *hdev);
u32 (*gen_signal_cb)(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size);
+ u32 size, bool eb);
u32 (*gen_wait_cb)(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
void (*reset_sob)(struct hl_device *hdev, void *data);
@@ -1000,6 +1000,7 @@ struct hl_va_range {
* @queue_full_drop_cnt: dropped due to queue full
* @device_in_reset_drop_cnt: dropped due to device in reset
* @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight
+ * @validation_drop_cnt: dropped due to error in validation
*/
struct hl_cs_counters_atomic {
atomic64_t out_of_mem_drop_cnt;
@@ -1007,6 +1008,7 @@ struct hl_cs_counters_atomic {
atomic64_t queue_full_drop_cnt;
atomic64_t device_in_reset_drop_cnt;
atomic64_t max_cs_in_flight_drop_cnt;
+ atomic64_t validation_drop_cnt;
};
/**
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 6bbb6bca6860..032d114f01ea 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -544,6 +544,7 @@ static struct pci_driver hl_pci_driver = {
.id_table = ids,
.probe = hl_pci_probe,
.remove = hl_pci_remove,
+ .shutdown = hl_pci_remove,
.driver.pm = &hl_pm_ops,
.err_handler = &hl_pci_err_handler,
};
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index 32e6af1db4e3..12efbd9d2e3a 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -335,6 +335,8 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
atomic64_read(&cntr->device_in_reset_drop_cnt);
cs_counters.total_max_cs_in_flight_drop_cnt =
atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
+ cs_counters.total_validation_drop_cnt =
+ atomic64_read(&cntr->validation_drop_cnt);
if (hpriv->ctx) {
cs_counters.ctx_out_of_mem_drop_cnt =
@@ -352,6 +354,9 @@ static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
cs_counters.ctx_max_cs_in_flight_drop_cnt =
atomic64_read(
&hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
+ cs_counters.ctx_validation_drop_cnt =
+ atomic64_read(
+ &hpriv->ctx->cs_counters.validation_drop_cnt);
}
return copy_to_user(out, &cs_counters,
@@ -406,7 +411,7 @@ static int total_energy_consumption_info(struct hl_fpriv *hpriv,
static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
struct hl_device *hdev = hpriv->hdev;
- struct hl_pll_frequency_info freq_info = {0};
+ struct hl_pll_frequency_info freq_info = { {0} };
u32 max_size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
int rc;
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 7caf868d1585..76217258780a 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -418,8 +418,11 @@ static void init_signal_cs(struct hl_device *hdev,
"generate signal CB, sob_id: %d, sob val: 0x%x, q_idx: %d\n",
cs_cmpl->hw_sob->sob_id, cs_cmpl->sob_val, q_idx);
+ /* we set an EB since we must make sure all oeprations are done
+ * when sending the signal
+ */
hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
- cs_cmpl->hw_sob->sob_id, 0);
+ cs_cmpl->hw_sob->sob_id, 0, true);
kref_get(&hw_sob->kref);
diff --git a/drivers/misc/habanalabs/common/pci.c b/drivers/misc/habanalabs/common/pci.c
index 923b2606e29f..b4725e6101f6 100644
--- a/drivers/misc/habanalabs/common/pci.c
+++ b/drivers/misc/habanalabs/common/pci.c
@@ -130,10 +130,8 @@ static int hl_pci_elbi_write(struct hl_device *hdev, u64 addr, u32 data)
if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE)
return 0;
- if (val & PCI_CONFIG_ELBI_STS_ERR) {
- dev_err(hdev->dev, "Error writing to ELBI\n");
+ if (val & PCI_CONFIG_ELBI_STS_ERR)
return -EIO;
- }
if (!(val & PCI_CONFIG_ELBI_STS_MASK)) {
dev_err(hdev->dev, "ELBI write didn't finish in time\n");
@@ -160,8 +158,12 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
dbi_offset = addr & 0xFFF;
- rc = hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
+ /* Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0x00300000);
+
+ rc = hl_pci_elbi_write(hdev, prop->pcie_dbi_base_address + dbi_offset,
data);
if (rc)
@@ -244,9 +246,11 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+ /* Return the DBI window to the default location
+ * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
if (rc)
dev_err(hdev->dev, "failed to map bar %u to 0x%08llx\n",
@@ -294,9 +298,11 @@ int hl_pci_set_outbound_region(struct hl_device *hdev,
/* Enable */
rc |= hl_pci_iatu_write(hdev, 0x004, 0x80000000);
- /* Return the DBI window to the default location */
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
- rc |= hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr + 4, 0);
+ /* Return the DBI window to the default location
+ * Ignore result of writing to pcie_aux_dbi_reg_addr as it could fail
+ * in case the firmware security is enabled
+ */
+ hl_pci_elbi_write(hdev, prop->pcie_aux_dbi_reg_addr, 0);
return rc;
}
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 1f1926607c5e..8c09e4466af8 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -151,19 +151,6 @@ static const u16 gaudi_packet_sizes[MAX_PACKET_ID] = {
[PACKET_LOAD_AND_EXE] = sizeof(struct packet_load_and_exe)
};
-static const u32 gaudi_pll_base_addresses[GAUDI_PLL_MAX] = {
- [CPU_PLL] = mmPSOC_CPU_PLL_NR,
- [PCI_PLL] = mmPSOC_PCI_PLL_NR,
- [SRAM_PLL] = mmSRAM_W_PLL_NR,
- [HBM_PLL] = mmPSOC_HBM_PLL_NR,
- [NIC_PLL] = mmNIC0_PLL_NR,
- [DMA_PLL] = mmDMA_W_PLL_NR,
- [MESH_PLL] = mmMESH_W_PLL_NR,
- [MME_PLL] = mmPSOC_MME_PLL_NR,
- [TPC_PLL] = mmPSOC_TPC_PLL_NR,
- [IF_PLL] = mmIF_W_PLL_NR
-};
-
static inline bool validate_packet_id(enum packet_id id)
{
switch (id) {
@@ -374,7 +361,7 @@ static int gaudi_cpucp_info_get(struct hl_device *hdev);
static void gaudi_disable_clock_gating(struct hl_device *hdev);
static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid);
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size);
+ u32 size, bool eb);
static u32 gaudi_gen_wait_cb(struct hl_device *hdev,
struct hl_gen_wait_properties *prop);
@@ -667,12 +654,6 @@ static int gaudi_early_init(struct hl_device *hdev)
if (rc)
goto free_queue_props;
- if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
- }
-
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
@@ -685,6 +666,12 @@ static int gaudi_early_init(struct hl_device *hdev)
goto pci_fini;
}
+ if (gaudi_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
return 0;
pci_fini:
@@ -703,93 +690,60 @@ static int gaudi_early_fini(struct hl_device *hdev)
}
/**
- * gaudi_fetch_pll_frequency - Fetch PLL frequency values
+ * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
*
* @hdev: pointer to hl_device structure
- * @pll_index: index of the pll to fetch frequency from
- * @pll_freq: pointer to store the pll frequency in MHz in each of the available
- * outputs. if a certain output is not available a 0 will be set
*
*/
-static int gaudi_fetch_pll_frequency(struct hl_device *hdev,
- enum gaudi_pll_index pll_index,
- u16 *pll_freq_arr)
+static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
{
- u32 nr = 0, nf = 0, od = 0, pll_clk = 0, div_fctr, div_sel,
- pll_base_addr = gaudi_pll_base_addresses[pll_index];
- u16 freq = 0;
- int i, rc;
-
- if (hdev->asic_prop.fw_security_status_valid &&
- (hdev->asic_prop.fw_app_security_map &
- CPU_BOOT_DEV_STS0_PLL_INFO_EN)) {
- rc = hl_fw_cpucp_pll_info_get(hdev, pll_index, pll_freq_arr);
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+ int rc;
- if (rc)
- return rc;
- } else if (hdev->asic_prop.fw_security_disabled) {
+ if (hdev->asic_prop.fw_security_disabled) {
/* Backward compatibility */
- nr = RREG32(pll_base_addr + PLL_NR_OFFSET);
- nf = RREG32(pll_base_addr + PLL_NF_OFFSET);
- od = RREG32(pll_base_addr + PLL_OD_OFFSET);
-
- for (i = 0; i < HL_PLL_NUM_OUTPUTS; i++) {
- div_fctr = RREG32(pll_base_addr +
- PLL_DIV_FACTOR_0_OFFSET + i * 4);
- div_sel = RREG32(pll_base_addr +
- PLL_DIV_SEL_0_OFFSET + i * 4);
+ div_fctr = RREG32(mmPSOC_CPU_PLL_DIV_FACTOR_2);
+ div_sel = RREG32(mmPSOC_CPU_PLL_DIV_SEL_2);
+ nr = RREG32(mmPSOC_CPU_PLL_NR);
+ nf = RREG32(mmPSOC_CPU_PLL_NF);
+ od = RREG32(mmPSOC_CPU_PLL_OD);
- if (div_sel == DIV_SEL_REF_CLK ||
+ if (div_sel == DIV_SEL_REF_CLK ||
div_sel == DIV_SEL_DIVIDED_REF) {
- if (div_sel == DIV_SEL_REF_CLK)
- freq = PLL_REF_CLK;
- else
- freq = PLL_REF_CLK / (div_fctr + 1);
- } else if (div_sel == DIV_SEL_PLL_CLK ||
- div_sel == DIV_SEL_DIVIDED_PLL) {
- pll_clk = PLL_REF_CLK * (nf + 1) /
- ((nr + 1) * (od + 1));
- if (div_sel == DIV_SEL_PLL_CLK)
- freq = pll_clk;
- else
- freq = pll_clk / (div_fctr + 1);
- } else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d",
- div_sel);
- }
-
- pll_freq_arr[i] = freq;
+ if (div_sel == DIV_SEL_REF_CLK)
+ freq = PLL_REF_CLK;
+ else
+ freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) /
+ ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ freq = pll_clk;
+ else
+ freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d",
+ div_sel);
+ freq = 0;
}
} else {
- dev_err(hdev->dev, "Failed to fetch PLL frequency values\n");
- return -EIO;
- }
+ rc = hl_fw_cpucp_pll_info_get(hdev, CPU_PLL, pll_freq_arr);
- return 0;
-}
-
-/**
- * gaudi_fetch_psoc_frequency - Fetch PSOC frequency values
- *
- * @hdev: pointer to hl_device structure
- *
- */
-static int gaudi_fetch_psoc_frequency(struct hl_device *hdev)
-{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u16 pll_freq[HL_PLL_NUM_OUTPUTS];
- int rc;
+ if (rc)
+ return rc;
- rc = gaudi_fetch_pll_frequency(hdev, CPU_PLL, pll_freq);
- if (rc)
- return rc;
+ freq = pll_freq_arr[2];
+ }
- prop->psoc_timestamp_frequency = pll_freq[2];
- prop->psoc_pci_pll_nr = 0;
- prop->psoc_pci_pll_nf = 0;
- prop->psoc_pci_pll_od = 0;
- prop->psoc_pci_pll_div_factor = 0;
+ prop->psoc_timestamp_frequency = freq;
+ prop->psoc_pci_pll_nr = nr;
+ prop->psoc_pci_pll_nf = nf;
+ prop->psoc_pci_pll_od = od;
+ prop->psoc_pci_pll_div_factor = div_fctr;
return 0;
}
@@ -884,11 +838,17 @@ static int gaudi_init_tpc_mem(struct hl_device *hdev)
size_t fw_size;
void *cpu_addr;
dma_addr_t dma_handle;
- int rc;
+ int rc, count = 5;
+again:
rc = request_firmware(&fw, GAUDI_TPC_FW_FILE, hdev->dev);
+ if (rc == -EINTR && count-- > 0) {
+ msleep(50);
+ goto again;
+ }
+
if (rc) {
- dev_err(hdev->dev, "Firmware file %s is not found!\n",
+ dev_err(hdev->dev, "Failed to load firmware file %s\n",
GAUDI_TPC_FW_FILE);
goto out;
}
@@ -1110,7 +1070,7 @@ static void gaudi_collective_slave_init_job(struct hl_device *hdev,
prop->collective_sob_id, queue_id);
cb_size += gaudi_gen_signal_cb(hdev, job->user_cb,
- prop->collective_sob_id, cb_size);
+ prop->collective_sob_id, cb_size, false);
}
static void gaudi_collective_wait_init_cs(struct hl_cs *cs)
@@ -2449,8 +2409,6 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
gaudi_init_e2e(hdev);
gaudi_init_hbm_cred(hdev);
- hdev->asic_funcs->disable_clock_gating(hdev);
-
for (tpc_id = 0, tpc_offset = 0;
tpc_id < TPC_NUMBER_OF_ENGINES;
tpc_id++, tpc_offset += TPC_CFG_OFFSET) {
@@ -3462,6 +3420,9 @@ static void gaudi_set_clock_gating(struct hl_device *hdev)
if (hdev->in_debug)
return;
+ if (!hdev->asic_prop.fw_security_disabled)
+ return;
+
for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
enable = !!(hdev->clock_gating_mask &
(BIT_ULL(gaudi_dma_assignment[i])));
@@ -3513,7 +3474,7 @@ static void gaudi_disable_clock_gating(struct hl_device *hdev)
u32 qman_offset;
int i;
- if (!(gaudi->hw_cap_initialized & HW_CAP_CLK_GATE))
+ if (!hdev->asic_prop.fw_security_disabled)
return;
for (i = 0, qman_offset = 0 ; i < DMA_NUMBER_OF_CHANNELS ; i++) {
@@ -3806,7 +3767,7 @@ static int gaudi_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout)
static void gaudi_pre_hw_init(struct hl_device *hdev)
{
/* Perform read from the device to make sure device is up */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
if (hdev->asic_prop.fw_security_disabled) {
/* Set the access through PCI bars (Linux driver only) as
@@ -3847,6 +3808,13 @@ static int gaudi_hw_init(struct hl_device *hdev)
return rc;
}
+ /* In case the clock gating was enabled in preboot we need to disable
+ * it here before touching the MME/TPC registers.
+ * There is no need to take clk gating mutex because when this function
+ * runs, no other relevant code can run
+ */
+ hdev->asic_funcs->disable_clock_gating(hdev);
+
/* SRAM scrambler must be initialized after CPU is running from HBM */
gaudi_init_scrambler_sram(hdev);
@@ -3885,7 +3853,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
}
/* Perform read from the device to flush all configuration */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
return 0;
@@ -3927,7 +3895,10 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
/* I don't know what is the state of the CPU so make sure it is
* stopped in any means necessary
*/
- WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
+ if (hdev->asic_prop.hard_reset_done_by_fw)
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_RST_DEV);
+ else
+ WREG32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU, KMD_MSG_GOTO_WFE);
WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, GAUDI_EVENT_HALT_MACHINE);
@@ -3971,11 +3942,15 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
WREG32(mmPSOC_GLOBAL_CONF_SW_ALL_RST,
1 << PSOC_GLOBAL_CONF_SW_ALL_RST_IND_SHIFT);
- }
- dev_info(hdev->dev,
- "Issued HARD reset command, going to wait %dms\n",
- reset_timeout_ms);
+ dev_info(hdev->dev,
+ "Issued HARD reset command, going to wait %dms\n",
+ reset_timeout_ms);
+ } else {
+ dev_info(hdev->dev,
+ "Firmware performs HARD reset, going to wait %dms\n",
+ reset_timeout_ms);
+ }
/*
* After hard reset, we can't poll the BTM_FSM register because the PSOC
@@ -7936,7 +7911,7 @@ static u32 gaudi_get_wait_cb_size(struct hl_device *hdev)
}
static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size)
+ u32 size, bool eb)
{
struct hl_cb *cb = (struct hl_cb *) data;
struct packet_msg_short *pkt;
@@ -7953,7 +7928,7 @@ static u32 gaudi_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OP_MASK, 0); /* write the value */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_BASE_MASK, 3); /* W_S SOB base */
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_OPCODE_MASK, PACKET_MSG_SHORT);
- ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, 1);
+ ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_EB_MASK, eb);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_RB_MASK, 1);
ctl |= FIELD_PREP(GAUDI_PKT_SHORT_CTL_MB_MASK, 1);
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index f2d91f4fcffe..a7ab2d7e57d4 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -105,13 +105,6 @@
#define MME_ACC_OFFSET (mmMME1_ACC_BASE - mmMME0_ACC_BASE)
#define SRAM_BANK_OFFSET (mmSRAM_Y0_X1_RTR_BASE - mmSRAM_Y0_X0_RTR_BASE)
-#define PLL_NR_OFFSET 0
-#define PLL_NF_OFFSET (mmPSOC_CPU_PLL_NF - mmPSOC_CPU_PLL_NR)
-#define PLL_OD_OFFSET (mmPSOC_CPU_PLL_OD - mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_FACTOR_0_OFFSET (mmPSOC_CPU_PLL_DIV_FACTOR_0 - \
- mmPSOC_CPU_PLL_NR)
-#define PLL_DIV_SEL_0_OFFSET (mmPSOC_CPU_PLL_DIV_SEL_0 - mmPSOC_CPU_PLL_NR)
-
#define NUM_OF_SOB_IN_BLOCK \
(((mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_2047 - \
mmSYNC_MNGR_E_N_SYNC_MNGR_OBJS_SOB_OBJ_0) + 4) >> 2)
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
index 2e3612e1ee28..88a09d42e111 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_coresight.c
@@ -9,6 +9,7 @@
#include "../include/gaudi/gaudi_coresight.h"
#include "../include/gaudi/asic_reg/gaudi_regs.h"
#include "../include/gaudi/gaudi_masks.h"
+#include "../include/gaudi/gaudi_reg_map.h"
#include <uapi/misc/habanalabs.h>
#define SPMU_SECTION_SIZE MME0_ACC_SPMU_MAX_OFFSET
@@ -874,7 +875,7 @@ int gaudi_debug_coresight(struct hl_device *hdev, void *data)
}
/* Perform read from the device to flush all configuration */
- RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG);
+ RREG32(mmHW_STATE);
return rc;
}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 3e5eb9e3d7bd..b8b4aa636b7c 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -613,12 +613,6 @@ static int goya_early_init(struct hl_device *hdev)
if (rc)
goto free_queue_props;
- if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
- dev_info(hdev->dev,
- "H/W state is dirty, must reset before initializing\n");
- hdev->asic_funcs->hw_fini(hdev, true);
- }
-
/* Before continuing in the initialization, we need to read the preboot
* version to determine whether we run with a security-enabled firmware
*/
@@ -631,6 +625,12 @@ static int goya_early_init(struct hl_device *hdev)
goto pci_fini;
}
+ if (goya_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
+ dev_info(hdev->dev,
+ "H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->hw_fini(hdev, true);
+ }
+
if (!hdev->pldm) {
val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS);
if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK)
@@ -694,32 +694,47 @@ static void goya_qman0_set_security(struct hl_device *hdev, bool secure)
static void goya_fetch_psoc_frequency(struct hl_device *hdev)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 trace_freq = 0;
- u32 pll_clk = 0;
- u32 div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
- u32 div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
- u32 nr = RREG32(mmPSOC_PCI_PLL_NR);
- u32 nf = RREG32(mmPSOC_PCI_PLL_NF);
- u32 od = RREG32(mmPSOC_PCI_PLL_OD);
-
- if (div_sel == DIV_SEL_REF_CLK || div_sel == DIV_SEL_DIVIDED_REF) {
- if (div_sel == DIV_SEL_REF_CLK)
- trace_freq = PLL_REF_CLK;
- else
- trace_freq = PLL_REF_CLK / (div_fctr + 1);
- } else if (div_sel == DIV_SEL_PLL_CLK ||
- div_sel == DIV_SEL_DIVIDED_PLL) {
- pll_clk = PLL_REF_CLK * (nf + 1) / ((nr + 1) * (od + 1));
- if (div_sel == DIV_SEL_PLL_CLK)
- trace_freq = pll_clk;
- else
- trace_freq = pll_clk / (div_fctr + 1);
+ u32 nr = 0, nf = 0, od = 0, div_fctr = 0, pll_clk, div_sel;
+ u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS], freq;
+ int rc;
+
+ if (hdev->asic_prop.fw_security_disabled) {
+ div_fctr = RREG32(mmPSOC_PCI_PLL_DIV_FACTOR_1);
+ div_sel = RREG32(mmPSOC_PCI_PLL_DIV_SEL_1);
+ nr = RREG32(mmPSOC_PCI_PLL_NR);
+ nf = RREG32(mmPSOC_PCI_PLL_NF);
+ od = RREG32(mmPSOC_PCI_PLL_OD);
+
+ if (div_sel == DIV_SEL_REF_CLK ||
+ div_sel == DIV_SEL_DIVIDED_REF) {
+ if (div_sel == DIV_SEL_REF_CLK)
+ freq = PLL_REF_CLK;
+ else
+ freq = PLL_REF_CLK / (div_fctr + 1);
+ } else if (div_sel == DIV_SEL_PLL_CLK ||
+ div_sel == DIV_SEL_DIVIDED_PLL) {
+ pll_clk = PLL_REF_CLK * (nf + 1) /
+ ((nr + 1) * (od + 1));
+ if (div_sel == DIV_SEL_PLL_CLK)
+ freq = pll_clk;
+ else
+ freq = pll_clk / (div_fctr + 1);
+ } else {
+ dev_warn(hdev->dev,
+ "Received invalid div select value: %d",
+ div_sel);
+ freq = 0;
+ }
} else {
- dev_warn(hdev->dev,
- "Received invalid div select value: %d", div_sel);
+ rc = hl_fw_cpucp_pll_info_get(hdev, PCI_PLL, pll_freq_arr);
+
+ if (rc)
+ return;
+
+ freq = pll_freq_arr[1];
}
- prop->psoc_timestamp_frequency = trace_freq;
+ prop->psoc_timestamp_frequency = freq;
prop->psoc_pci_pll_nr = nr;
prop->psoc_pci_pll_nf = nf;
prop->psoc_pci_pll_od = od;
@@ -5324,7 +5339,7 @@ static u32 goya_get_wait_cb_size(struct hl_device *hdev)
}
static u32 goya_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id,
- u32 size)
+ u32 size, bool eb)
{
return 0;
}
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index e5801ecf0cb2..b637dfd69f6e 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -145,11 +145,15 @@
* implemented. This means that FW will
* perform hard reset procedure on
* receiving the halt-machine event.
- * Initialized in: linux
+ * Initialized in: preboot, u-boot, linux
*
* CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled.
+ * FW initialized Clock Gating.
+ * Initialized in: preboot
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -171,6 +175,7 @@
#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9)
#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << 10)
#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << 11)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << 13)
#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
enum cpu_boot_status {
@@ -204,6 +209,8 @@ enum kmd_msg {
KMD_MSG_GOTO_WFE,
KMD_MSG_FIT_RDY,
KMD_MSG_SKIP_BMC,
+ RESERVED,
+ KMD_MSG_RST_DEV,
};
enum cpu_msg_status {
diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile
index 7727bfd32be9..6b888d04392d 100644
--- a/drivers/misc/lkdtm/Makefile
+++ b/drivers/misc/lkdtm/Makefile
@@ -11,6 +11,7 @@ lkdtm-$(CONFIG_LKDTM) += usercopy.o
lkdtm-$(CONFIG_LKDTM) += stackleak.o
lkdtm-$(CONFIG_LKDTM) += cfi.o
lkdtm-$(CONFIG_LKDTM) += fortify.o
+lkdtm-$(CONFIG_PPC_BOOK3S_64) += powerpc.o
KASAN_SANITIZE_rodata.o := n
KASAN_SANITIZE_stackleak.o := n
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index 3c0a67f072c0..b2aff4d87c01 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -179,6 +179,9 @@ static const struct crashtype crashtypes[] = {
#ifdef CONFIG_X86_32
CRASHTYPE(DOUBLE_FAULT),
#endif
+#ifdef CONFIG_PPC_BOOK3S_64
+ CRASHTYPE(PPC_SLB_MULTIHIT),
+#endif
};
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index 6aa6d6a1a839..5ae48c64df24 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -107,4 +107,7 @@ void lkdtm_CFI_FORWARD_PROTO(void);
/* fortify.c */
void lkdtm_FORTIFIED_STRSCPY(void);
+/* powerpc.c */
+void lkdtm_PPC_SLB_MULTIHIT(void);
+
#endif
diff --git a/drivers/misc/lkdtm/powerpc.c b/drivers/misc/lkdtm/powerpc.c
new file mode 100644
index 000000000000..077c9f9ed8d0
--- /dev/null
+++ b/drivers/misc/lkdtm/powerpc.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "lkdtm.h"
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <asm/mmu.h>
+
+/* Inserts new slb entries */
+static void insert_slb_entry(unsigned long p, int ssize, int page_size)
+{
+ unsigned long flags;
+
+ flags = SLB_VSID_KERNEL | mmu_psize_defs[page_size].sllp;
+ preempt_disable();
+
+ asm volatile("slbmte %0,%1" :
+ : "r" (mk_vsid_data(p, ssize, flags)),
+ "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED))
+ : "memory");
+
+ asm volatile("slbmte %0,%1" :
+ : "r" (mk_vsid_data(p, ssize, flags)),
+ "r" (mk_esid_data(p, ssize, SLB_NUM_BOLTED + 1))
+ : "memory");
+ preempt_enable();
+}
+
+/* Inject slb multihit on vmalloc-ed address i.e 0xD00... */
+static int inject_vmalloc_slb_multihit(void)
+{
+ char *p;
+
+ p = vmalloc(PAGE_SIZE);
+ if (!p)
+ return -ENOMEM;
+
+ insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_vmalloc_psize);
+ /*
+ * This triggers exception, If handled correctly we must recover
+ * from this error.
+ */
+ p[0] = '!';
+ vfree(p);
+ return 0;
+}
+
+/* Inject slb multihit on kmalloc-ed address i.e 0xC00... */
+static int inject_kmalloc_slb_multihit(void)
+{
+ char *p;
+
+ p = kmalloc(2048, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ insert_slb_entry((unsigned long)p, MMU_SEGSIZE_1T, mmu_linear_psize);
+ /*
+ * This triggers exception, If handled correctly we must recover
+ * from this error.
+ */
+ p[0] = '!';
+ kfree(p);
+ return 0;
+}
+
+/*
+ * Few initial SLB entries are bolted. Add a test to inject
+ * multihit in bolted entry 0.
+ */
+static void insert_dup_slb_entry_0(void)
+{
+ unsigned long test_address = PAGE_OFFSET, *test_ptr;
+ unsigned long esid, vsid;
+ unsigned long i = 0;
+
+ test_ptr = (unsigned long *)test_address;
+ preempt_disable();
+
+ asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
+ asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
+
+ /* for i !=0 we would need to mask out the old entry number */
+ asm volatile("slbmte %0,%1" :
+ : "r" (vsid),
+ "r" (esid | SLB_NUM_BOLTED)
+ : "memory");
+
+ asm volatile("slbmfee %0,%1" : "=r" (esid) : "r" (i));
+ asm volatile("slbmfev %0,%1" : "=r" (vsid) : "r" (i));
+
+ /* for i !=0 we would need to mask out the old entry number */
+ asm volatile("slbmte %0,%1" :
+ : "r" (vsid),
+ "r" (esid | (SLB_NUM_BOLTED + 1))
+ : "memory");
+
+ pr_info("%s accessing test address 0x%lx: 0x%lx\n",
+ __func__, test_address, *test_ptr);
+
+ preempt_enable();
+}
+
+void lkdtm_PPC_SLB_MULTIHIT(void)
+{
+ if (!radix_enabled()) {
+ pr_info("Injecting SLB multihit errors\n");
+ /*
+ * These need not be separate tests, And they do pretty
+ * much same thing. In any case we must recover from the
+ * errors introduced by these functions, machine would not
+ * survive these tests in case of failure to handle.
+ */
+ inject_vmalloc_slb_multihit();
+ inject_kmalloc_slb_multihit();
+ insert_dup_slb_entry_0();
+ pr_info("Recovered from SLB multihit errors\n");
+ } else {
+ pr_err("XFAIL: This test is for ppc64 and with hash mode MMU only\n");
+ }
+}
diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c
index c21f65a5c762..9eb0d93b01c6 100644
--- a/drivers/misc/ocxl/context.c
+++ b/drivers/misc/ocxl/context.c
@@ -70,6 +70,7 @@ int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm)
{
int rc;
unsigned long pidr = 0;
+ struct pci_dev *dev;
// Locks both status & tidr
mutex_lock(&ctx->status_mutex);
@@ -81,8 +82,9 @@ int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm)
if (mm)
pidr = mm->context.id;
+ dev = to_pci_dev(ctx->afu->fn->dev.parent);
rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, pidr, ctx->tidr,
- amr, mm, xsl_fault_error, ctx);
+ amr, pci_dev_id(dev), mm, xsl_fault_error, ctx);
if (rc)
goto out;
diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c
index fd73d3bc0eb6..ab039c115381 100644
--- a/drivers/misc/ocxl/link.c
+++ b/drivers/misc/ocxl/link.c
@@ -2,8 +2,10 @@
// Copyright 2017 IBM Corp.
#include <linux/sched/mm.h>
#include <linux/mutex.h>
+#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/mmu_context.h>
+#include <linux/mmu_notifier.h>
#include <asm/copro.h>
#include <asm/pnv-ocxl.h>
#include <asm/xive.h>
@@ -33,6 +35,7 @@
#define SPA_PE_VALID 0x80000000
+struct ocxl_link;
struct pe_data {
struct mm_struct *mm;
@@ -41,6 +44,8 @@ struct pe_data {
/* opaque pointer to be passed to the above callback */
void *xsl_err_data;
struct rcu_head rcu;
+ struct ocxl_link *link;
+ struct mmu_notifier mmu_notifier;
};
struct spa {
@@ -83,6 +88,8 @@ struct ocxl_link {
int domain;
int bus;
int dev;
+ void __iomem *arva; /* ATSD register virtual address */
+ spinlock_t atsd_lock; /* to serialize shootdowns */
atomic_t irq_available;
struct spa *spa;
void *platform_data;
@@ -388,6 +395,7 @@ static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_l
link->bus = dev->bus->number;
link->dev = PCI_SLOT(dev->devfn);
atomic_set(&link->irq_available, MAX_IRQ_PER_LINK);
+ spin_lock_init(&link->atsd_lock);
rc = alloc_spa(dev, link);
if (rc)
@@ -403,6 +411,13 @@ static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_l
if (rc)
goto err_xsl_irq;
+ /* if link->arva is not defeined, MMIO registers are not used to
+ * generate TLB invalidate. PowerBus snooping is enabled.
+ * Otherwise, PowerBus snooping is disabled. TLB Invalidates are
+ * initiated using MMIO registers.
+ */
+ pnv_ocxl_map_lpar(dev, mfspr(SPRN_LPID), 0, &link->arva);
+
*out_link = link;
return 0;
@@ -454,6 +469,11 @@ static void release_xsl(struct kref *ref)
{
struct ocxl_link *link = container_of(ref, struct ocxl_link, ref);
+ if (link->arva) {
+ pnv_ocxl_unmap_lpar(link->arva);
+ link->arva = NULL;
+ }
+
list_del(&link->list);
/* call platform code before releasing data */
pnv_ocxl_spa_release(link->platform_data);
@@ -470,6 +490,27 @@ void ocxl_link_release(struct pci_dev *dev, void *link_handle)
}
EXPORT_SYMBOL_GPL(ocxl_link_release);
+static void invalidate_range(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct pe_data *pe_data = container_of(mn, struct pe_data, mmu_notifier);
+ struct ocxl_link *link = pe_data->link;
+ unsigned long addr, pid, page_size = PAGE_SIZE;
+
+ pid = mm->context.id;
+ trace_ocxl_mmu_notifier_range(start, end, pid);
+
+ spin_lock(&link->atsd_lock);
+ for (addr = start; addr < end; addr += page_size)
+ pnv_ocxl_tlb_invalidate(link->arva, pid, addr, page_size);
+ spin_unlock(&link->atsd_lock);
+}
+
+static const struct mmu_notifier_ops ocxl_mmu_notifier_ops = {
+ .invalidate_range = invalidate_range,
+};
+
static u64 calculate_cfg_state(bool kernel)
{
u64 state;
@@ -494,7 +535,7 @@ static u64 calculate_cfg_state(bool kernel)
}
int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
- u64 amr, struct mm_struct *mm,
+ u64 amr, u16 bdf, struct mm_struct *mm,
void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr),
void *xsl_err_data)
{
@@ -526,9 +567,13 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
pe_data->mm = mm;
pe_data->xsl_err_cb = xsl_err_cb;
pe_data->xsl_err_data = xsl_err_data;
+ pe_data->link = link;
+ pe_data->mmu_notifier.ops = &ocxl_mmu_notifier_ops;
memset(pe, 0, sizeof(struct ocxl_process_element));
pe->config_state = cpu_to_be64(calculate_cfg_state(pidr == 0));
+ pe->pasid = cpu_to_be32(pasid << (31 - 19));
+ pe->bdf = cpu_to_be16(bdf);
pe->lpid = cpu_to_be32(mfspr(SPRN_LPID));
pe->pid = cpu_to_be32(pidr);
pe->tid = cpu_to_be32(tidr);
@@ -540,8 +585,17 @@ int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr,
* by the nest MMU. If we have a kernel context, TLBIs are
* already global.
*/
- if (mm)
+ if (mm) {
mm_context_add_copro(mm);
+ if (link->arva) {
+ /* Use MMIO registers for the TLB Invalidate
+ * operations.
+ */
+ trace_ocxl_init_mmu_notifier(pasid, mm->context.id);
+ mmu_notifier_register(&pe_data->mmu_notifier, mm);
+ }
+ }
+
/*
* Barrier is to make sure PE is visible in the SPA before it
* is used by the device. It also helps with the global TLBI
@@ -672,6 +726,18 @@ int ocxl_link_remove_pe(void *link_handle, int pasid)
WARN(1, "Couldn't find pe data when removing PE\n");
} else {
if (pe_data->mm) {
+ if (link->arva) {
+ trace_ocxl_release_mmu_notifier(pasid,
+ pe_data->mm->context.id);
+ mmu_notifier_unregister(&pe_data->mmu_notifier,
+ pe_data->mm);
+ spin_lock(&link->atsd_lock);
+ pnv_ocxl_tlb_invalidate(link->arva,
+ pe_data->mm->context.id,
+ 0ull,
+ PAGE_SIZE);
+ spin_unlock(&link->atsd_lock);
+ }
mm_context_remove_copro(pe_data->mm);
mmdrop(pe_data->mm);
}
diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h
index 0bad0a123af6..10125a22d5a5 100644
--- a/drivers/misc/ocxl/ocxl_internal.h
+++ b/drivers/misc/ocxl/ocxl_internal.h
@@ -84,13 +84,16 @@ struct ocxl_context {
struct ocxl_process_element {
__be64 config_state;
- __be32 reserved1[11];
+ __be32 pasid;
+ __be16 bdf;
+ __be16 reserved1;
+ __be32 reserved2[9];
__be32 lpid;
__be32 tid;
__be32 pid;
- __be32 reserved2[10];
+ __be32 reserved3[10];
__be64 amr;
- __be32 reserved3[3];
+ __be32 reserved4[3];
__be32 software_state;
};
diff --git a/drivers/misc/ocxl/trace.h b/drivers/misc/ocxl/trace.h
index 17e21cb2addd..a33a5094ff6c 100644
--- a/drivers/misc/ocxl/trace.h
+++ b/drivers/misc/ocxl/trace.h
@@ -8,6 +8,70 @@
#include <linux/tracepoint.h>
+
+TRACE_EVENT(ocxl_mmu_notifier_range,
+ TP_PROTO(unsigned long start, unsigned long end, unsigned long pidr),
+ TP_ARGS(start, end, pidr),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, start)
+ __field(unsigned long, end)
+ __field(unsigned long, pidr)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ __entry->pidr = pidr;
+ ),
+
+ TP_printk("start=0x%lx end=0x%lx pidr=0x%lx",
+ __entry->start,
+ __entry->end,
+ __entry->pidr
+ )
+);
+
+TRACE_EVENT(ocxl_init_mmu_notifier,
+ TP_PROTO(int pasid, unsigned long pidr),
+ TP_ARGS(pasid, pidr),
+
+ TP_STRUCT__entry(
+ __field(int, pasid)
+ __field(unsigned long, pidr)
+ ),
+
+ TP_fast_assign(
+ __entry->pasid = pasid;
+ __entry->pidr = pidr;
+ ),
+
+ TP_printk("pasid=%d, pidr=0x%lx",
+ __entry->pasid,
+ __entry->pidr
+ )
+);
+
+TRACE_EVENT(ocxl_release_mmu_notifier,
+ TP_PROTO(int pasid, unsigned long pidr),
+ TP_ARGS(pasid, pidr),
+
+ TP_STRUCT__entry(
+ __field(int, pasid)
+ __field(unsigned long, pidr)
+ ),
+
+ TP_fast_assign(
+ __entry->pasid = pasid;
+ __entry->pidr = pidr;
+ ),
+
+ TP_printk("pasid=%d, pidr=0x%lx",
+ __entry->pasid,
+ __entry->pidr
+ )
+);
+
DECLARE_EVENT_CLASS(ocxl_context,
TP_PROTO(pid_t pid, void *spa, int pasid, u32 pidr, u32 tidr),
TP_ARGS(pid, spa, pasid, pidr, tidr),
diff --git a/drivers/misc/pvpanic.c b/drivers/misc/pvpanic.c
index 951b37da5e3c..41cab297d66e 100644
--- a/drivers/misc/pvpanic.c
+++ b/drivers/misc/pvpanic.c
@@ -55,12 +55,23 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
struct resource *res;
res = platform_get_mem_or_io(pdev, 0);
- if (res && resource_type(res) == IORESOURCE_IO)
+ if (!res)
+ return -EINVAL;
+
+ switch (resource_type(res)) {
+ case IORESOURCE_IO:
base = devm_ioport_map(dev, res->start, resource_size(res));
- else
+ if (!base)
+ return -ENOMEM;
+ break;
+ case IORESOURCE_MEM:
base = devm_ioremap_resource(dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+ break;
+ default:
+ return -EINVAL;
+ }
atomic_notifier_chain_register(&panic_notifier_list,
&pvpanic_panic_nb);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 40fa994ad6a8..f399edc82191 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -629,10 +629,8 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024);
}
- if (ubi->mtd->type == MTD_NORFLASH) {
- ubi_assert(ubi->mtd->writesize == 1);
+ if (ubi->mtd->type == MTD_NORFLASH)
ubi->nor_flash = 1;
- }
ubi->min_io_size = ubi->mtd->writesize;
ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft;
@@ -1352,8 +1350,6 @@ static int bytes_str_to_int(const char *str)
fallthrough;
case 'K':
result *= 1024;
- if (endp[1] == 'i' && endp[2] == 'B')
- endp += 2;
case '\0':
break;
default:
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 14d890b00d2c..2f3312c31e51 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -535,7 +535,14 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
return -EROFS;
}
- if (ubi->nor_flash) {
+ /*
+ * If the flash is ECC-ed then we have to erase the ECC block before we
+ * can write to it. But the write is in preparation to an erase in the
+ * first place. This means we cannot zero out EC and VID before the
+ * erase and we just have to hope the flash starts erasing from the
+ * start of the page.
+ */
+ if (ubi->nor_flash && ubi->mtd->writesize == 1) {
err = nor_erase_prepare(ubi, pnum);
if (err)
return err;
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 85ebd2b7e446..85de5f96c02b 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -380,7 +380,7 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
goto free_dst;
min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
- BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
+ BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
err = skb_cow_head(skb, min_headroom);
if (unlikely(err))
@@ -534,6 +534,7 @@ static void bareudp_setup(struct net_device *dev)
SET_NETDEV_DEVTYPE(dev, &bareudp_type);
dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_LLTX;
dev->features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
@@ -644,11 +645,20 @@ static int bareudp_link_config(struct net_device *dev,
return 0;
}
+static void bareudp_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct bareudp_dev *bareudp = netdev_priv(dev);
+
+ list_del(&bareudp->next);
+ unregister_netdevice_queue(dev, head);
+}
+
static int bareudp_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct bareudp_conf conf;
+ LIST_HEAD(list_kill);
int err;
err = bareudp2info(data, &conf, extack);
@@ -661,17 +671,14 @@ static int bareudp_newlink(struct net *net, struct net_device *dev,
err = bareudp_link_config(dev, tb);
if (err)
- return err;
+ goto err_unconfig;
return 0;
-}
-
-static void bareudp_dellink(struct net_device *dev, struct list_head *head)
-{
- struct bareudp_dev *bareudp = netdev_priv(dev);
- list_del(&bareudp->next);
- unregister_netdevice_queue(dev, head);
+err_unconfig:
+ bareudp_dellink(dev, &list_kill);
+ unregister_netdevice_many(&list_kill);
+ return err;
}
static size_t bareudp_get_size(const struct net_device *dev)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 424970939fd4..1c28eade6bec 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -123,6 +123,7 @@ config CAN_JANZ_ICAN3
config CAN_KVASER_PCIEFD
depends on PCI
tristate "Kvaser PCIe FD cards"
+ select CRC32
help
This is a driver for the Kvaser PCI Express CAN FD family.
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 2c9f12401276..da551fd0f502 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1852,8 +1852,6 @@ EXPORT_SYMBOL_GPL(m_can_class_register);
void m_can_class_unregister(struct m_can_classdev *cdev)
{
unregister_candev(cdev->net);
-
- m_can_clk_stop(cdev);
}
EXPORT_SYMBOL_GPL(m_can_class_unregister);
diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
index 24c737c4fc44..970f0e9d19bf 100644
--- a/drivers/net/can/m_can/tcan4x5x.c
+++ b/drivers/net/can/m_can/tcan4x5x.c
@@ -131,30 +131,6 @@ static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev)
}
-static struct can_bittiming_const tcan4x5x_bittiming_const = {
- .name = DEVICE_NAME,
- .tseg1_min = 2,
- .tseg1_max = 31,
- .tseg2_min = 2,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
-static struct can_bittiming_const tcan4x5x_data_bittiming_const = {
- .name = DEVICE_NAME,
- .tseg1_min = 1,
- .tseg1_max = 32,
- .tseg2_min = 1,
- .tseg2_max = 16,
- .sjw_max = 16,
- .brp_min = 1,
- .brp_max = 32,
- .brp_inc = 1,
-};
-
static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv)
{
int wake_state = 0;
@@ -469,8 +445,6 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
mcan_class->dev = &spi->dev;
mcan_class->ops = &tcan4x5x_ops;
mcan_class->is_peripheral = true;
- mcan_class->bit_timing = &tcan4x5x_bittiming_const;
- mcan_class->data_timing = &tcan4x5x_data_bittiming_const;
mcan_class->net->irq = spi->irq;
spi_set_drvdata(spi, priv);
diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig
index 8d36101b78e3..29cabc20109e 100644
--- a/drivers/net/can/rcar/Kconfig
+++ b/drivers/net/can/rcar/Kconfig
@@ -1,10 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
config CAN_RCAR
- tristate "Renesas R-Car CAN controller"
+ tristate "Renesas R-Car and RZ/G CAN controller"
depends on ARCH_RENESAS || ARM
help
Say Y here if you want to use CAN controller found on Renesas R-Car
- SoCs.
+ or RZ/G SoCs.
To compile this driver as a module, choose M here: the module will
be called rcar_can.
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 77129d5f410b..36235afb0bc6 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -1368,13 +1368,10 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
struct spi_transfer *last_xfer;
- tx_ring->tail += len;
-
/* Increment the TEF FIFO tail pointer 'len' times in
* a single SPI message.
- */
-
- /* Note:
+ *
+ * Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@@ -1391,6 +1388,8 @@ static int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
if (err)
return err;
+ tx_ring->tail += len;
+
err = mcp251xfd_check_tef_tail(priv);
if (err)
return err;
@@ -1553,10 +1552,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
/* Increment the RX FIFO tail pointer 'len' times in a
* single SPI message.
- */
- ring->tail += len;
-
- /* Note:
+ *
+ * Note:
*
* "cs_change == 1" on the last transfer results in an
* active chip select after the complete SPI
@@ -1572,6 +1569,8 @@ mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
last_xfer->cs_change = 1;
if (err)
return err;
+
+ ring->tail += len;
}
return 0;
diff --git a/drivers/net/dsa/hirschmann/Kconfig b/drivers/net/dsa/hirschmann/Kconfig
index 222dd35e2c9d..e01191107a4b 100644
--- a/drivers/net/dsa/hirschmann/Kconfig
+++ b/drivers/net/dsa/hirschmann/Kconfig
@@ -4,6 +4,7 @@ config NET_DSA_HIRSCHMANN_HELLCREEK
depends on HAS_IOMEM
depends on NET_DSA
depends on PTP_1588_CLOCK
+ depends on LEDS_CLASS
select NET_DSA_TAG_HELLCREEK
help
This driver adds support for Hirschmann Hellcreek TSN switches.
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 09701c17f3f6..662e68a0e7e6 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -92,9 +92,7 @@
GSWIP_MDIO_PHY_FDUP_MASK)
/* GSWIP MII Registers */
-#define GSWIP_MII_CFG0 0x00
-#define GSWIP_MII_CFG1 0x02
-#define GSWIP_MII_CFG5 0x04
+#define GSWIP_MII_CFGp(p) (0x2 * (p))
#define GSWIP_MII_CFG_EN BIT(14)
#define GSWIP_MII_CFG_LDCLKDIS BIT(12)
#define GSWIP_MII_CFG_MODE_MIIP 0x0
@@ -392,17 +390,9 @@ static void gswip_mii_mask(struct gswip_priv *priv, u32 clear, u32 set,
static void gswip_mii_mask_cfg(struct gswip_priv *priv, u32 clear, u32 set,
int port)
{
- switch (port) {
- case 0:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG0);
- break;
- case 1:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG1);
- break;
- case 5:
- gswip_mii_mask(priv, clear, set, GSWIP_MII_CFG5);
- break;
- }
+ /* There's no MII_CFG register for the CPU port */
+ if (!dsa_is_cpu_port(priv->ds, port))
+ gswip_mii_mask(priv, clear, set, GSWIP_MII_CFGp(port));
}
static void gswip_mii_mask_pcdu(struct gswip_priv *priv, u32 clear, u32 set,
@@ -822,9 +812,8 @@ static int gswip_setup(struct dsa_switch *ds)
gswip_mdio_mask(priv, 0xff, 0x09, GSWIP_MDIO_MDC_CFG1);
/* Disable the xMII link */
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 0);
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 1);
- gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, 5);
+ for (i = 0; i < priv->hw_info->max_ports; i++)
+ gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, i);
/* enable special tag insertion on cpu port */
gswip_switch_mask(priv, 0, GSWIP_FDMA_PCTRL_STEN,
@@ -1447,11 +1436,12 @@ static void gswip_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, Pause);
phylink_set(mask, Asym_Pause);
- /* With the exclusion of MII and Reverse MII, we support Gigabit,
- * including Half duplex
+ /* With the exclusion of MII, Reverse MII and Reduced MII, we
+ * support Gigabit, including Half duplex
*/
if (state->interface != PHY_INTERFACE_MODE_MII &&
- state->interface != PHY_INTERFACE_MODE_REVMII) {
+ state->interface != PHY_INTERFACE_MODE_REVMII &&
+ state->interface != PHY_INTERFACE_MODE_RMII) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseT_Half);
}
@@ -1541,9 +1531,7 @@ static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
{
struct gswip_priv *priv = ds->priv;
- /* Enable the xMII interface only for the external PHY */
- if (interface != PHY_INTERFACE_MODE_INTERNAL)
- gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
+ gswip_mii_mask_cfg(priv, 0, GSWIP_MII_CFG_EN, port);
}
static void gswip_get_strings(struct dsa_switch *ds, int port, u32 stringset,
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index e24a99031b80..4d49c5f2b790 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -159,6 +159,8 @@ struct ar9331_sw_priv {
struct dsa_switch ds;
struct dsa_switch_ops ops;
struct irq_domain *irqdomain;
+ u32 irq_mask;
+ struct mutex lock_irq;
struct mii_bus *mbus; /* mdio master */
struct mii_bus *sbus; /* mdio slave */
struct regmap *regmap;
@@ -520,32 +522,44 @@ static irqreturn_t ar9331_sw_irq(int irq, void *data)
static void ar9331_sw_mask_irq(struct irq_data *d)
{
struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
- struct regmap *regmap = priv->regmap;
- int ret;
- ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK,
- AR9331_SW_GINT_PHY_INT, 0);
- if (ret)
- dev_err(priv->dev, "could not mask IRQ\n");
+ priv->irq_mask = 0;
}
static void ar9331_sw_unmask_irq(struct irq_data *d)
{
struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+
+ priv->irq_mask = AR9331_SW_GINT_PHY_INT;
+}
+
+static void ar9331_sw_irq_bus_lock(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&priv->lock_irq);
+}
+
+static void ar9331_sw_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ar9331_sw_priv *priv = irq_data_get_irq_chip_data(d);
struct regmap *regmap = priv->regmap;
int ret;
ret = regmap_update_bits(regmap, AR9331_SW_REG_GINT_MASK,
- AR9331_SW_GINT_PHY_INT,
- AR9331_SW_GINT_PHY_INT);
+ AR9331_SW_GINT_PHY_INT, priv->irq_mask);
if (ret)
- dev_err(priv->dev, "could not unmask IRQ\n");
+ dev_err(priv->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&priv->lock_irq);
}
static struct irq_chip ar9331_sw_irq_chip = {
.name = AR9331_SW_NAME,
.irq_mask = ar9331_sw_mask_irq,
.irq_unmask = ar9331_sw_unmask_irq,
+ .irq_bus_lock = ar9331_sw_irq_bus_lock,
+ .irq_bus_sync_unlock = ar9331_sw_irq_bus_sync_unlock,
};
static int ar9331_sw_irq_map(struct irq_domain *domain, unsigned int irq,
@@ -584,6 +598,7 @@ static int ar9331_sw_irq_init(struct ar9331_sw_priv *priv)
return irq ? irq : -EINVAL;
}
+ mutex_init(&priv->lock_irq);
ret = devm_request_threaded_irq(dev, irq, NULL, ar9331_sw_irq,
IRQF_ONESHOT, AR9331_SW_NAME, priv);
if (ret) {
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 862ea44beea7..5ed80d9a6b9f 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -828,13 +828,13 @@ static int emac_probe(struct platform_device *pdev)
db->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(db->clk)) {
ret = PTR_ERR(db->clk);
- goto out_iounmap;
+ goto out_dispose_mapping;
}
ret = clk_prepare_enable(db->clk);
if (ret) {
dev_err(&pdev->dev, "Error couldn't enable clock (%d)\n", ret);
- goto out_iounmap;
+ goto out_dispose_mapping;
}
ret = sunxi_sram_claim(&pdev->dev);
@@ -893,6 +893,8 @@ out_release_sram:
sunxi_sram_release(&pdev->dev);
out_clk_disable_unprepare:
clk_disable_unprepare(db->clk);
+out_dispose_mapping:
+ irq_dispose_mapping(ndev->irq);
out_iounmap:
iounmap(db->membase);
out:
@@ -911,6 +913,7 @@ static int emac_remove(struct platform_device *pdev)
unregister_netdev(ndev);
sunxi_sram_release(&pdev->dev);
clk_disable_unprepare(db->clk);
+ irq_dispose_mapping(ndev->irq);
iounmap(db->membase);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
index efb33c078a3c..cec2018c84a9 100644
--- a/drivers/net/ethernet/aquantia/Kconfig
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -19,7 +19,6 @@ if NET_VENDOR_AQUANTIA
config AQTION
tristate "aQuantia AQtion(tm) Support"
depends on PCI
- depends on X86_64 || ARM64 || COMPILE_TEST
depends on MACSEC || MACSEC=n
help
This enables the support for the aQuantia AQtion(tm) Ethernet card.
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 0fdd19d99d99..b1ae9eb8f247 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2577,6 +2577,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
NETIF_F_HW_VLAN_CTAG_TX;
dev->hw_features |= dev->features;
dev->vlan_features |= dev->features;
+ dev->max_mtu = UMAC_MAX_MTU_SIZE;
/* Request the WOL interrupt and advertise suspend if available */
priv->wol_irq_disabled = 1;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 4edd6f8e017e..d10e4f85dd11 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6790,8 +6790,10 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
if (!ctx->tqm_fp_rings_count)
ctx->tqm_fp_rings_count = bp->max_q;
+ else if (ctx->tqm_fp_rings_count > BNXT_MAX_TQM_FP_RINGS)
+ ctx->tqm_fp_rings_count = BNXT_MAX_TQM_FP_RINGS;
- tqm_rings = ctx->tqm_fp_rings_count + 1;
+ tqm_rings = ctx->tqm_fp_rings_count + BNXT_MAX_TQM_SP_RINGS;
ctx_pg = kcalloc(tqm_rings, sizeof(*ctx_pg), GFP_KERNEL);
if (!ctx_pg) {
kfree(ctx);
@@ -6925,7 +6927,8 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
pg_dir = &req.tqm_sp_page_dir,
ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
- i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
+ i < BNXT_MAX_TQM_RINGS;
+ i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
if (!(enables & ena))
continue;
@@ -12887,10 +12890,10 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
{
+ pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
struct net_device *netdev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(netdev);
int err = 0, off;
- pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
netdev_info(bp->dev, "PCI Slot Reset\n");
@@ -12919,22 +12922,8 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
pci_save_state(pdev);
err = bnxt_hwrm_func_reset(bp);
- if (!err) {
- err = bnxt_hwrm_func_qcaps(bp);
- if (!err && netif_running(netdev))
- err = bnxt_open(netdev);
- }
- bnxt_ulp_start(bp, err);
- if (!err) {
- bnxt_reenable_sriov(bp);
+ if (!err)
result = PCI_ERS_RESULT_RECOVERED;
- }
- }
-
- if (result != PCI_ERS_RESULT_RECOVERED) {
- if (netif_running(netdev))
- dev_close(netdev);
- pci_disable_device(pdev);
}
rtnl_unlock();
@@ -12952,10 +12941,21 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
static void bnxt_io_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(netdev);
+ int err;
+ netdev_info(bp->dev, "PCI Slot Resume\n");
rtnl_lock();
- netif_device_attach(netdev);
+ err = bnxt_hwrm_func_qcaps(bp);
+ if (!err && netif_running(netdev))
+ err = bnxt_open(netdev);
+
+ bnxt_ulp_start(bp, err);
+ if (!err) {
+ bnxt_reenable_sriov(bp);
+ netif_device_attach(netdev);
+ }
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 950ea26ae0d2..51996c85547e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1436,6 +1436,11 @@ struct bnxt_ctx_pg_info {
struct bnxt_ctx_pg_info **ctx_pg_tbl;
};
+#define BNXT_MAX_TQM_SP_RINGS 1
+#define BNXT_MAX_TQM_FP_RINGS 8
+#define BNXT_MAX_TQM_RINGS \
+ (BNXT_MAX_TQM_SP_RINGS + BNXT_MAX_TQM_FP_RINGS)
+
struct bnxt_ctx_mem_info {
u32 qp_max_entries;
u16 qp_min_qp1_entries;
@@ -1474,7 +1479,7 @@ struct bnxt_ctx_mem_info {
struct bnxt_ctx_pg_info stat_mem;
struct bnxt_ctx_pg_info mrav_mem;
struct bnxt_ctx_pg_info tim_mem;
- struct bnxt_ctx_pg_info *tqm_mem[9];
+ struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
};
struct bnxt_fw_health {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index be85dad2e3bc..fcca023f22e5 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -4069,8 +4069,10 @@ static int bcmgenet_probe(struct platform_device *pdev)
clk_disable_unprepare(priv->clk);
err = register_netdev(dev);
- if (err)
+ if (err) {
+ bcmgenet_mii_exit(dev);
goto err;
+ }
return err;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d5d910916c2e..814a5b10141d 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -467,7 +467,7 @@ static void macb_set_tx_clk(struct macb *bp, int speed)
{
long ferr, rate, rate_rounded;
- if (!bp->tx_clk || !(bp->caps & MACB_CAPS_CLK_HW_CHG))
+ if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
return;
switch (speed) {
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index a0e0d8a83681..51dd030b3b36 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -621,7 +621,7 @@ static void chtls_reset_synq(struct listen_ctx *listen_ctx)
while (!skb_queue_empty(&listen_ctx->synq)) {
struct chtls_sock *csk =
- container_of((struct synq *)__skb_dequeue
+ container_of((struct synq *)skb_peek
(&listen_ctx->synq), struct chtls_sock, synq);
struct sock *child = csk->sk;
@@ -1109,6 +1109,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
const struct cpl_pass_accept_req *req,
struct chtls_dev *cdev)
{
+ struct adapter *adap = pci_get_drvdata(cdev->pdev);
struct neighbour *n = NULL;
struct inet_sock *newinet;
const struct iphdr *iph;
@@ -1118,9 +1119,10 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
struct dst_entry *dst;
struct tcp_sock *tp;
struct sock *newsk;
+ bool found = false;
u16 port_id;
int rxq_idx;
- int step;
+ int step, i;
iph = (const struct iphdr *)network_hdr;
newsk = tcp_create_openreq_child(lsk, oreq, cdev->askb);
@@ -1152,7 +1154,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
n = dst_neigh_lookup(dst, &ip6h->saddr);
#endif
}
- if (!n)
+ if (!n || !n->dev)
goto free_sk;
ndev = n->dev;
@@ -1161,6 +1163,13 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
if (is_vlan_dev(ndev))
ndev = vlan_dev_real_dev(ndev);
+ for_each_port(adap, i)
+ if (cdev->ports[i] == ndev)
+ found = true;
+
+ if (!found)
+ goto free_dst;
+
port_id = cxgb4_port_idx(ndev);
csk = chtls_sock_create(cdev);
@@ -1238,6 +1247,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
free_csk:
chtls_sock_release(&csk->kref);
free_dst:
+ neigh_release(n);
dst_release(dst);
free_sk:
inet_csk_prepare_forced_close(newsk);
@@ -1387,7 +1397,7 @@ static void chtls_pass_accept_request(struct sock *sk,
newsk = chtls_recv_sock(sk, oreq, network_hdr, req, cdev);
if (!newsk)
- goto free_oreq;
+ goto reject;
if (chtls_get_module(newsk))
goto reject;
@@ -1403,8 +1413,6 @@ static void chtls_pass_accept_request(struct sock *sk,
kfree_skb(skb);
return;
-free_oreq:
- chtls_reqsk_free(oreq);
reject:
mk_tid_release(reply_skb, 0, tid);
cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
@@ -1589,6 +1597,11 @@ static int chtls_pass_establish(struct chtls_dev *cdev, struct sk_buff *skb)
sk_wake_async(sk, 0, POLL_OUT);
data = lookup_stid(cdev->tids, stid);
+ if (!data) {
+ /* listening server close */
+ kfree_skb(skb);
+ goto unlock;
+ }
lsk = ((struct listen_ctx *)data)->lsk;
bh_lock_sock(lsk);
@@ -1997,39 +2010,6 @@ static void t4_defer_reply(struct sk_buff *skb, struct chtls_dev *cdev,
spin_unlock_bh(&cdev->deferq.lock);
}
-static void send_abort_rpl(struct sock *sk, struct sk_buff *skb,
- struct chtls_dev *cdev, int status, int queue)
-{
- struct cpl_abort_req_rss *req = cplhdr(skb);
- struct sk_buff *reply_skb;
- struct chtls_sock *csk;
-
- csk = rcu_dereference_sk_user_data(sk);
-
- reply_skb = alloc_skb(sizeof(struct cpl_abort_rpl),
- GFP_KERNEL);
-
- if (!reply_skb) {
- req->status = (queue << 1);
- t4_defer_reply(skb, cdev, send_defer_abort_rpl);
- return;
- }
-
- set_abort_rpl_wr(reply_skb, GET_TID(req), status);
- kfree_skb(skb);
-
- set_wr_txq(reply_skb, CPL_PRIORITY_DATA, queue);
- if (csk_conn_inline(csk)) {
- struct l2t_entry *e = csk->l2t_entry;
-
- if (e && sk->sk_state != TCP_SYN_RECV) {
- cxgb4_l2t_send(csk->egress_dev, reply_skb, e);
- return;
- }
- }
- cxgb4_ofld_send(cdev->lldi->ports[0], reply_skb);
-}
-
static void chtls_send_abort_rpl(struct sock *sk, struct sk_buff *skb,
struct chtls_dev *cdev,
int status, int queue)
@@ -2078,9 +2058,9 @@ static void bl_abort_syn_rcv(struct sock *lsk, struct sk_buff *skb)
queue = csk->txq_idx;
skb->sk = NULL;
+ chtls_send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
+ CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(child, lsk);
- send_abort_rpl(child, skb, BLOG_SKB_CB(skb)->cdev,
- CPL_ABORT_NO_RST, queue);
}
static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
@@ -2110,8 +2090,8 @@ static int abort_syn_rcv(struct sock *sk, struct sk_buff *skb)
if (!sock_owned_by_user(psk)) {
int queue = csk->txq_idx;
+ chtls_send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
do_abort_syn_rcv(sk, psk);
- send_abort_rpl(sk, skb, cdev, CPL_ABORT_NO_RST, queue);
} else {
skb->sk = sk;
BLOG_SKB_CB(skb)->backlog_rcv = bl_abort_syn_rcv;
@@ -2129,9 +2109,6 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
int queue = csk->txq_idx;
if (is_neg_adv(req->status)) {
- if (sk->sk_state == TCP_SYN_RECV)
- chtls_set_tcb_tflag(sk, 0, 0);
-
kfree_skb(skb);
return;
}
@@ -2158,12 +2135,12 @@ static void chtls_abort_req_rss(struct sock *sk, struct sk_buff *skb)
if (sk->sk_state == TCP_SYN_RECV && !abort_syn_rcv(sk, skb))
return;
- chtls_release_resources(sk);
- chtls_conn_done(sk);
}
chtls_send_abort_rpl(sk, skb, BLOG_SKB_CB(skb)->cdev,
rst_status, queue);
+ chtls_release_resources(sk);
+ chtls_conn_done(sk);
}
static void chtls_abort_rpl_rss(struct sock *sk, struct sk_buff *skb)
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 0981fe9652e5..3d9b0b161e24 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1211,7 +1211,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free2;
+ goto free3;
}
ret = ethoc_mdio_probe(netdev);
@@ -1243,6 +1243,7 @@ error2:
netif_napi_del(&priv->napi);
error:
mdiobus_unregister(priv->mdio);
+free3:
mdiobus_free(priv->mdio);
free2:
clk_disable_unprepare(priv->clk);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 91cff93dbdae..fb0bcd18ec0c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -878,7 +878,7 @@ static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
swa = (struct dpaa2_eth_swa *)sgt_buf;
swa->type = DPAA2_ETH_SWA_SINGLE;
swa->single.skb = skb;
- swa->sg.sgt_size = sgt_buf_size;
+ swa->single.sgt_size = sgt_buf_size;
/* Separately map the SGT buffer */
sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index c8e5d889bd81..21de56345503 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
};
module_platform_driver(fs_enet_bb_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index 8b51ee142fa3..152f4d83765a 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
};
module_platform_driver(fs_enet_fec_mdio_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index ba8869c3d891..6d853f018d53 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3889,6 +3889,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
dev->mtu = 1500;
+ dev->max_mtu = 1518;
ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
ugeth->phy_interface = phy_interface;
@@ -3934,12 +3935,12 @@ static int ucc_geth_remove(struct platform_device* ofdev)
struct device_node *np = ofdev->dev.of_node;
unregister_netdev(dev);
- free_netdev(dev);
ucc_geth_memclean(ugeth);
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(ugeth->ug_info->tbi_node);
of_node_put(ugeth->ug_info->phy_node);
+ free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index 1a9bdf66a7d8..11d4bf5dc21f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -575,7 +575,14 @@ struct ucc_geth_tx_global_pram {
u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
u32 tqptr; /* a base pointer to the Tx Queues Memory
Region */
- u8 res2[0x80 - 0x74];
+ u8 res2[0x78 - 0x74];
+ u64 snums_en;
+ u32 l2l3baseptr; /* top byte consists of a few other bit fields */
+
+ u16 mtu[8];
+ u8 res3[0xa8 - 0x94];
+ u32 wrrtablebase; /* top byte is reserved */
+ u8 res4[0xc0 - 0xac];
} __packed;
/* structure representing Extended Filtering Global Parameters in PRAM */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 7165da0ee9aa..a6e3f07caf99 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -415,6 +415,10 @@ static void __lb_other_process(struct hns_nic_ring_data *ring_data,
/* for mutl buffer*/
new_skb = skb_copy(skb, GFP_ATOMIC);
dev_kfree_skb_any(skb);
+ if (!new_skb) {
+ netdev_err(ndev, "skb alloc failed\n");
+ return;
+ }
skb = new_skb;
check_ok = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index fb5e8842983c..33defa4c180a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -169,7 +169,7 @@ struct hclgevf_mbx_arq_ring {
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
- (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#define hclge_mbx_head_ptr_move_arq(arq) \
- (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_SIZE)
+ (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index e6f37f91c489..c242883fea5d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -752,7 +752,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
- if (hdev->hw.mac.phydev) {
+ if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+ hdev->hw.mac.phydev->drv->set_loopback) {
count += 1;
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
@@ -4537,8 +4538,8 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@@ -4730,6 +4731,8 @@ static void hclge_rss_init_cfg(struct hclge_dev *hdev)
vport[i].rss_tuple_sets.ipv6_udp_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
vport[i].rss_tuple_sets.ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
HCLGE_RSS_INPUT_TUPLE_SCTP;
vport[i].rss_tuple_sets.ipv6_fragment_en =
HCLGE_RSS_INPUT_TUPLE_OTHER;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 50a294dfaff5..ca46bc9110d7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -107,6 +107,8 @@
#define HCLGE_D_IP_BIT BIT(2)
#define HCLGE_S_IP_BIT BIT(3)
#define HCLGE_V_TAG_BIT BIT(4)
+#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
#define HCLGE_RSS_TC_SIZE_0 1
#define HCLGE_RSS_TC_SIZE_1 2
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 145757cb70f9..674b3a22e91f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -917,8 +917,8 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
req->ipv4_sctp_en = tuple_sets;
break;
case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
return -EINVAL;
req->ipv6_sctp_en = tuple_sets;
@@ -2502,7 +2502,10 @@ static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
- tuple_sets->ipv6_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ tuple_sets->ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 1b183bc35604..f6d817a3edcb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -122,6 +122,8 @@
#define HCLGEVF_D_IP_BIT BIT(2)
#define HCLGEVF_S_IP_BIT BIT(3)
#define HCLGEVF_V_TAG_BIT BIT(4)
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
#define HCLGEVF_STATS_TIMER_INTERVAL 36U
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index a2191392ca4f..9778c83150f1 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -955,6 +955,7 @@ static void release_resources(struct ibmvnic_adapter *adapter)
release_rx_pools(adapter);
release_napi(adapter);
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
}
@@ -2171,10 +2172,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
napi_schedule(&adapter->napi[i]);
if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
- adapter->reset_reason == VNIC_RESET_MOBILITY) {
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
- call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
- }
+ adapter->reset_reason == VNIC_RESET_MOBILITY)
+ __netdev_notify_peers(netdev);
rc = 0;
@@ -2249,8 +2248,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
goto out;
}
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
- call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
+ __netdev_notify_peers(netdev);
out:
/* restore adapter state if reset failed */
if (rc)
@@ -2344,8 +2342,7 @@ static void __ibmvnic_reset(struct work_struct *work)
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout(60 * HZ);
}
- } else if (!(rwi->reset_reason == VNIC_RESET_FATAL &&
- adapter->from_passive_init)) {
+ } else {
rc = do_reset(adapter, rwi, reset_state);
}
kfree(rwi);
@@ -2984,9 +2981,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
int rc;
if (!scrq) {
- netdev_dbg(adapter->netdev,
- "Invalid scrq reset. irq (%d) or msgs (%p).\n",
- scrq->irq, scrq->msgs);
+ netdev_dbg(adapter->netdev, "Invalid scrq reset.\n");
return -EINVAL;
}
@@ -3876,7 +3871,9 @@ static int send_login(struct ibmvnic_adapter *adapter)
return -1;
}
+ release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
+
client_data_len = vnic_client_data_len(adapter);
buffer_size =
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ba7a0f8f6937..5b2143f4b1f8 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -436,6 +436,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
+#define FLAG2_ENABLE_S0IX_FLOWS BIT(15)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 03215b0aee4b..06442e6bef73 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -23,6 +23,13 @@ struct e1000_stats {
int stat_offset;
};
+static const char e1000e_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define E1000E_PRIV_FLAGS_S0IX_ENABLED BIT(0)
+ "s0ix-enabled",
+};
+
+#define E1000E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(e1000e_priv_flags_strings)
+
#define E1000_STAT(str, m) { \
.stat_string = str, \
.type = E1000_STATS, \
@@ -1776,6 +1783,8 @@ static int e1000e_get_sset_count(struct net_device __always_unused *netdev,
return E1000_TEST_LEN;
case ETH_SS_STATS:
return E1000_STATS_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return E1000E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -2097,6 +2106,10 @@ static void e1000_get_strings(struct net_device __always_unused *netdev,
p += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, e1000e_priv_flags_strings,
+ E1000E_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
}
}
@@ -2305,6 +2318,37 @@ static int e1000e_get_ts_info(struct net_device *netdev,
return 0;
}
+static u32 e1000e_get_priv_flags(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+ priv_flags |= E1000E_PRIV_FLAGS_S0IX_ENABLED;
+
+ return priv_flags;
+}
+
+static int e1000e_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags2 = adapter->flags2;
+
+ flags2 &= ~FLAG2_ENABLE_S0IX_FLOWS;
+ if (priv_flags & E1000E_PRIV_FLAGS_S0IX_ENABLED) {
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (hw->mac.type < e1000_pch_cnp)
+ return -EINVAL;
+ flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+ }
+
+ if (flags2 != adapter->flags2)
+ adapter->flags2 = flags2;
+
+ return 0;
+}
+
static const struct ethtool_ops e1000_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
.get_drvinfo = e1000_get_drvinfo,
@@ -2336,6 +2380,8 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.set_eee = e1000e_set_eee,
.get_link_ksettings = e1000_get_link_ksettings,
.set_link_ksettings = e1000_set_link_ksettings,
+ .get_priv_flags = e1000e_get_priv_flags,
+ .set_priv_flags = e1000e_set_priv_flags,
};
void e1000e_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9aa6fad8ed47..6fb46682b058 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1240,6 +1240,9 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
return 0;
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) {
+ struct e1000_adapter *adapter = hw->adapter;
+ bool firmware_bug = false;
+
if (force) {
/* Request ME un-configure ULP mode in the PHY */
mac_reg = er32(H2ME);
@@ -1248,16 +1251,24 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force)
ew32(H2ME, mac_reg);
}
- /* Poll up to 300msec for ME to clear ULP_CFG_DONE. */
+ /* Poll up to 2.5 seconds for ME to clear ULP_CFG_DONE.
+ * If this takes more than 1 second, show a warning indicating a
+ * firmware bug
+ */
while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) {
- if (i++ == 30) {
+ if (i++ == 250) {
ret_val = -E1000_ERR_PHY;
goto out;
}
+ if (i > 100 && !firmware_bug)
+ firmware_bug = true;
usleep_range(10000, 11000);
}
- e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
+ if (firmware_bug)
+ e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10);
+ else
+ e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10);
if (force) {
mac_reg = er32(H2ME);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 128ab6898070..e9b82c209c2d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -103,45 +103,6 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
{0, NULL}
};
-struct e1000e_me_supported {
- u16 device_id; /* supported device ID */
-};
-
-static const struct e1000e_me_supported me_supported[] = {
- {E1000_DEV_ID_PCH_LPT_I217_LM},
- {E1000_DEV_ID_PCH_LPTLP_I218_LM},
- {E1000_DEV_ID_PCH_I218_LM2},
- {E1000_DEV_ID_PCH_I218_LM3},
- {E1000_DEV_ID_PCH_SPT_I219_LM},
- {E1000_DEV_ID_PCH_SPT_I219_LM2},
- {E1000_DEV_ID_PCH_LBG_I219_LM3},
- {E1000_DEV_ID_PCH_SPT_I219_LM4},
- {E1000_DEV_ID_PCH_SPT_I219_LM5},
- {E1000_DEV_ID_PCH_CNP_I219_LM6},
- {E1000_DEV_ID_PCH_CNP_I219_LM7},
- {E1000_DEV_ID_PCH_ICP_I219_LM8},
- {E1000_DEV_ID_PCH_ICP_I219_LM9},
- {E1000_DEV_ID_PCH_CMP_I219_LM10},
- {E1000_DEV_ID_PCH_CMP_I219_LM11},
- {E1000_DEV_ID_PCH_CMP_I219_LM12},
- {E1000_DEV_ID_PCH_TGP_I219_LM13},
- {E1000_DEV_ID_PCH_TGP_I219_LM14},
- {E1000_DEV_ID_PCH_TGP_I219_LM15},
- {0}
-};
-
-static bool e1000e_check_me(u16 device_id)
-{
- struct e1000e_me_supported *id;
-
- for (id = (struct e1000e_me_supported *)me_supported;
- id->device_id; id++)
- if (device_id == id->device_id)
- return true;
-
- return false;
-}
-
/**
* __ew32_prepare - prepare to write to MAC CSR register on certain parts
* @hw: pointer to the HW structure
@@ -6962,7 +6923,6 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
int rc;
e1000e_flush_lpic(pdev);
@@ -6970,13 +6930,13 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
e1000e_pm_freeze(dev);
rc = __e1000_shutdown(pdev, false);
- if (rc)
+ if (rc) {
e1000e_pm_thaw(dev);
-
- /* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp &&
- !e1000e_check_me(hw->adapter->pdev->device))
- e1000e_s0ix_entry_flow(adapter);
+ } else {
+ /* Introduce S0ix implementation */
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
+ e1000e_s0ix_entry_flow(adapter);
+ }
return rc;
}
@@ -6986,12 +6946,10 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev)
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = to_pci_dev(dev);
- struct e1000_hw *hw = &adapter->hw;
int rc;
/* Introduce S0ix implementation */
- if (hw->mac.type >= e1000_pch_cnp &&
- !e1000e_check_me(hw->adapter->pdev->device))
+ if (adapter->flags2 & FLAG2_ENABLE_S0IX_FLOWS)
e1000e_s0ix_exit_flow(adapter);
rc = __e1000_resume(pdev);
@@ -7655,6 +7613,9 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!(adapter->flags & FLAG_HAS_AMT))
e1000e_get_hw_control(adapter);
+ if (hw->mac.type >= e1000_pch_cnp)
+ adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
+
strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d231a2cdd98f..118473dfdcbd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -120,6 +120,7 @@ enum i40e_state_t {
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
__I40E_PF_RESET_REQUESTED,
+ __I40E_PF_RESET_AND_REBUILD_REQUESTED,
__I40E_CORE_RESET_REQUESTED,
__I40E_GLOBAL_RESET_REQUESTED,
__I40E_EMP_RESET_INTR_RECEIVED,
@@ -146,6 +147,8 @@ enum i40e_state_t {
};
#define I40E_PF_RESET_FLAG BIT_ULL(__I40E_PF_RESET_REQUESTED)
+#define I40E_PF_RESET_AND_REBUILD_FLAG \
+ BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
/* VSI state flags */
enum i40e_vsi_state_t {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 1337686bd099..1db482d310c2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -36,6 +36,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf);
static void i40e_determine_queue_usage(struct i40e_pf *pf);
static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+ bool lock_acquired);
static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
@@ -8536,6 +8538,14 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
"FW LLDP is disabled\n" :
"FW LLDP is enabled\n");
+ } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+ /* Request a PF Reset
+ *
+ * Resets PF and reinitializes PFs VSI.
+ */
+ i40e_prep_for_reset(pf, lock_acquired);
+ i40e_reset_and_rebuild(pf, true, lock_acquired);
+
} else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
int v;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 729c4f0d5ac5..21ee56420c3a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -1772,7 +1772,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs) {
if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
}
ret = i40e_pci_sriov_enable(pdev, num_vfs);
goto sriov_configure_out;
@@ -1781,7 +1781,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (!pci_vfs_assigned(pf->pdev)) {
i40e_free_vfs(pf);
pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
- i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+ i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
} else {
dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
ret = -EINVAL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index bfa84bfb0488..47eb9c584a12 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -220,8 +220,11 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
} while (count);
no_buffers:
- if (rx_ring->next_to_use != ntu)
+ if (rx_ring->next_to_use != ntu) {
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
i40e_release_rx_desc(rx_ring, ntu);
+ }
return ok;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 95543dfd4fe7..0a867d64d467 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1834,11 +1834,9 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
netif_tx_stop_all_queues(netdev);
if (CLIENT_ALLOWED(adapter)) {
err = iavf_lan_add_device(adapter);
- if (err) {
- rtnl_unlock();
+ if (err)
dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
err);
- }
}
dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 39757b4cf8f4..1782146db644 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -446,8 +446,11 @@ bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
}
} while (--count);
- if (rx_ring->next_to_use != ntu)
+ if (rx_ring->next_to_use != ntu) {
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.status_error0 = 0;
ice_release_rx_desc(rx_ring, ntu);
+ }
return ret;
}
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index bf48f0ded9c7..925161959b9b 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -219,7 +219,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb_any(skb);
spin_unlock_irqrestore(&lp->lock, flags);
- return NETDEV_TX_BUSY;
+ return NETDEV_TX_OK;
}
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 563ceac3060f..bc4d8d144401 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4432,7 +4432,7 @@ static int mvneta_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
struct bpf_prog *old_prog;
if (prog && dev->mtu > MVNETA_MAX_RX_BUF_SIZE) {
- NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported on XDP");
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
return -EOPNOTSUPP;
}
@@ -5255,7 +5255,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = mvneta_port_power_up(pp, pp->phy_interface);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
- return err;
+ goto err_netdev;
}
/* Armada3700 network controller does not support per-cpu
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index afdd22827223..4b1808acef58 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -1231,7 +1231,7 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
if (port->gop_id == 2)
- val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
+ val |= GENCONF_CTRL0_PORT0_RGMII;
else if (port->gop_id == 3)
val |= GENCONF_CTRL0_PORT1_RGMII_MII;
regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
@@ -2370,17 +2370,18 @@ static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
- unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ unsigned int thread;
u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
- mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
-
- put_cpu();
+ /* PKT-coalescing registers are per-queue + per-thread */
+ for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
+ mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
+ }
}
static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
@@ -5487,7 +5488,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
struct mvpp2 *priv = port->priv;
struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int thread;
- int queue, err;
+ int queue, err, val;
/* Checks for hardware constraints */
if (port->first_rxq + port->nrxqs >
@@ -5501,6 +5502,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
mvpp2_egress_disable(port);
mvpp2_port_disable(port);
+ if (mvpp2_is_xlg(port->phy_interface)) {
+ val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+ val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+ val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+ } else {
+ val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+ val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+ writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+ }
+
port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 5692c6087bbb..a30eb90ba3d2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
return -EINVAL;
}
+/* Drop flow control pause frames */
+static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
+{
+ unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
+ struct mvpp2_prs_entry pe;
+ unsigned int len;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* For all ports - drop flow control frames */
+ pe.index = MVPP2_PE_FC_DROP;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+ /* Set match on DA */
+ len = ETH_ALEN;
+ while (len--)
+ mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
+
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
/* Enable/disable dropping all mac da's */
static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
{
@@ -1162,6 +1194,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
/* Create dummy entries for drop all and promiscuous modes */
+ mvpp2_prs_drop_fc(priv);
mvpp2_prs_mac_drop_all_set(priv, 0, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@@ -1647,8 +1680,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
MVPP2_PRS_RI_L3_PROTO_MASK);
- /* Skip eth_type + 4 bytes of IPv6 header */
- mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+ /* Jump to DIP of IPV6 header */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+ MVPP2_MAX_L3_ADDR_SIZE,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
/* Set L3 offset */
mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
index e22f6c85d380..4b68dd374733 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
@@ -129,7 +129,7 @@
#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
-/* reserved */
+#define MVPP2_PE_FC_DROP (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
#define MVPP2_PE_MAC_MC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
#define MVPP2_PE_MAC_UC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 7d0f96290943..1a8f5a039d50 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -871,8 +871,10 @@ static int cgx_lmac_init(struct cgx *cgx)
if (!lmac)
return -ENOMEM;
lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
- if (!lmac->name)
- return -ENOMEM;
+ if (!lmac->name) {
+ err = -ENOMEM;
+ goto err_lmac_free;
+ }
sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
lmac->lmac_id = i;
lmac->cgx = cgx;
@@ -883,7 +885,7 @@ static int cgx_lmac_init(struct cgx *cgx)
CGX_LMAC_FWI + i * 9),
cgx_fwi_event_handler, 0, lmac->name, lmac);
if (err)
- return err;
+ goto err_irq;
/* Enable interrupt */
cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
@@ -895,6 +897,12 @@ static int cgx_lmac_init(struct cgx *cgx)
}
return cgx_lmac_verify_fwi_version(cgx);
+
+err_irq:
+ kfree(lmac->name);
+err_lmac_free:
+ kfree(lmac);
+ return err;
}
static int cgx_lmac_exit(struct cgx *cgx)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 3f9d0ab6d5ae..bc0e4113370e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -275,7 +275,8 @@ static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
enum npa_af_rvu_health health_reporter)
{
struct rvu_npa_event_ctx *npa_event_context;
- unsigned int intr_val, alloc_dis, free_dis;
+ unsigned int alloc_dis, free_dis;
+ u64 intr_val;
int err;
npa_event_context = ctx;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 74d466796b7c..d5fc72b1a36f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -90,7 +90,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int cq_idx)
{
struct mlx4_en_dev *mdev = priv->mdev;
- int err = 0;
+ int irq, err = 0;
int timestamp_en = 0;
bool assigned_eq = false;
@@ -116,10 +116,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
assigned_eq = true;
}
-
- cq->irq_desc =
- irq_to_desc(mlx4_eq_get_irq(mdev->dev,
- cq->vector));
+ irq = mlx4_eq_get_irq(mdev->dev, cq->vector);
+ cq->aff_mask = irq_get_effective_affinity_mask(irq);
} else {
/* For TX we use the same irq per
ring we assigned for the RX */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7954c1daf2b6..c1c9118a66c9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -958,18 +958,14 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done == budget || !clean_complete) {
- const struct cpumask *aff;
- struct irq_data *idata;
int cpu_curr;
/* in case we got here because of !clean_complete */
done = budget;
cpu_curr = smp_processor_id();
- idata = irq_desc_get_irq_data(cq->irq_desc);
- aff = irq_data_get_affinity_mask(idata);
- if (likely(cpumask_test_cpu(cpu_curr, aff)))
+ if (likely(cpumask_test_cpu(cpu_curr, cq->aff_mask)))
return budget;
/* Current cpu is not according to smp_irq_affinity -
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 17f2b1919378..e8ed23190de0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -47,6 +47,7 @@
#endif
#include <linux/cpu_rmap.h>
#include <linux/ptp_clock_kernel.h>
+#include <linux/irq.h>
#include <net/xdp.h>
#include <linux/mlx4/device.h>
@@ -365,7 +366,7 @@ struct mlx4_en_cq {
struct mlx4_cqe *buf;
#define MLX4_EN_OPCODE_ERROR 0x1e
- struct irq_desc *irq_desc;
+ const struct cpumask *aff_mask;
};
struct mlx4_en_port_profile {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index a1a81cfeb607..055baf3b6cb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -684,7 +684,7 @@ struct mlx5e_channel {
spinlock_t async_icosq_lock;
/* data path - accessed per napi poll */
- struct irq_desc *irq_desc;
+ const struct cpumask *aff_mask;
struct mlx5e_ch_stats *stats;
/* control */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 351118985a57..2a2bac30daaa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -479,7 +479,6 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc;
c->stats = &priv->port_ptp_stats.ch;
- c->irq_desc = irq_to_desc(irq);
c->lag_port = lag_port;
netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
index 28aa5ae118f4..90c98ea63b7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
@@ -28,7 +28,6 @@ struct mlx5e_port_ptp {
u8 lag_port;
/* data path - accessed per napi poll */
- struct irq_desc *irq_desc;
struct mlx5e_ch_stats *stats;
/* control */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index d29af7b9c695..76177f7c5ec2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
if (!reg_c0)
return true;
+ /* If reg_c0 is not equal to the default flow tag then skb->mark
+ * is not supported and must be reset back to 0.
+ */
+ skb->mark = 0;
+
priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index e521254d886e..072363e73f1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -118,16 +118,17 @@ struct mlx5_ct_tuple {
u16 zone;
};
-struct mlx5_ct_shared_counter {
+struct mlx5_ct_counter {
struct mlx5_fc *counter;
refcount_t refcount;
+ bool is_shared;
};
struct mlx5_ct_entry {
struct rhash_head node;
struct rhash_head tuple_node;
struct rhash_head tuple_nat_node;
- struct mlx5_ct_shared_counter *shared_counter;
+ struct mlx5_ct_counter *counter;
unsigned long cookie;
unsigned long restore_cookie;
struct mlx5_ct_tuple tuple;
@@ -394,13 +395,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
}
static void
-mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
+mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
{
- if (!refcount_dec_and_test(&entry->shared_counter->refcount))
+ if (entry->counter->is_shared &&
+ !refcount_dec_and_test(&entry->counter->refcount))
return;
- mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter);
- kfree(entry->shared_counter);
+ mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
+ kfree(entry->counter);
}
static void
@@ -699,7 +701,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->dest_ft = ct_priv->post_ct;
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->outer_match_level = MLX5_MATCH_L4;
- attr->counter = entry->shared_counter->counter;
+ attr->counter = entry->counter->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
@@ -732,13 +734,34 @@ err_attr:
return err;
}
-static struct mlx5_ct_shared_counter *
+static struct mlx5_ct_counter *
+mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
+{
+ struct mlx5_ct_counter *counter;
+ int ret;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ counter->is_shared = false;
+ counter->counter = mlx5_fc_create(ct_priv->dev, true);
+ if (IS_ERR(counter->counter)) {
+ ct_dbg("Failed to create counter for ct entry");
+ ret = PTR_ERR(counter->counter);
+ kfree(counter);
+ return ERR_PTR(ret);
+ }
+
+ return counter;
+}
+
+static struct mlx5_ct_counter *
mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
struct mlx5_ct_tuple rev_tuple = entry->tuple;
- struct mlx5_ct_shared_counter *shared_counter;
- struct mlx5_core_dev *dev = ct_priv->dev;
+ struct mlx5_ct_counter *shared_counter;
struct mlx5_ct_entry *rev_entry;
__be16 tmp_port;
int ret;
@@ -767,25 +790,20 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
tuples_ht_params);
if (rev_entry) {
- if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) {
+ if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
mutex_unlock(&ct_priv->shared_counter_lock);
- return rev_entry->shared_counter;
+ return rev_entry->counter;
}
}
mutex_unlock(&ct_priv->shared_counter_lock);
- shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL);
- if (!shared_counter)
- return ERR_PTR(-ENOMEM);
-
- shared_counter->counter = mlx5_fc_create(dev, true);
- if (IS_ERR(shared_counter->counter)) {
- ct_dbg("Failed to create counter for ct entry");
- ret = PTR_ERR(shared_counter->counter);
- kfree(shared_counter);
+ shared_counter = mlx5_tc_ct_counter_create(ct_priv);
+ if (IS_ERR(shared_counter)) {
+ ret = PTR_ERR(shared_counter);
return ERR_PTR(ret);
}
+ shared_counter->is_shared = true;
refcount_set(&shared_counter->refcount, 1);
return shared_counter;
}
@@ -798,10 +816,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
{
int err;
- entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
- if (IS_ERR(entry->shared_counter)) {
- err = PTR_ERR(entry->shared_counter);
- ct_dbg("Failed to create counter for ct entry");
+ if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
+ entry->counter = mlx5_tc_ct_counter_create(ct_priv);
+ else
+ entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
+
+ if (IS_ERR(entry->counter)) {
+ err = PTR_ERR(entry->counter);
return err;
}
@@ -820,7 +841,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
err_nat:
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
- mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+ mlx5_tc_ct_counter_put(ct_priv, entry);
return err;
}
@@ -918,7 +939,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
tuples_ht_params);
mutex_unlock(&ct_priv->shared_counter_lock);
- mlx5_tc_ct_shared_counter_put(ct_priv, entry);
+ mlx5_tc_ct_counter_put(ct_priv, entry);
}
@@ -956,7 +977,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
if (!entry)
return -ENOENT;
- mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse);
+ mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 7943eb30b837..4880f2179273 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -371,6 +371,15 @@ struct mlx5e_swp_spec {
u8 tun_l4_proto;
};
+static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
+{
+ /* SWP offsets are in 2-bytes words */
+ eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
+ eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
+ eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
+ eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
+}
+
static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
struct mlx5e_swp_spec *swp_spec)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index 899b98aca0d3..1fae7fab8297 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
}
static inline void
-mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
+mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
struct mlx5e_swp_spec swp_spec = {};
unsigned int offset = 0;
@@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
}
mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
+ if (skb_vlan_tag_present(skb) && ihs)
+ mlx5e_eseg_swp_offsets_add_vlan(eseg);
}
#else
@@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb,
- struct mlx5_wqe_eth_seg *eseg)
+ struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
#ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb))
@@ -172,7 +174,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
#if IS_ENABLED(CONFIG_GENEVE)
if (skb->encapsulation)
- mlx5e_tx_tunnel_accel(skb, eseg);
+ mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d9076d543104..2d37742a888c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1010,6 +1010,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
}
+static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
+ const unsigned long link_modes, u8 autoneg)
+{
+ /* Extended link-mode has no speed limitations. */
+ if (ext)
+ return 0;
+
+ if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
+ autoneg != AUTONEG_ENABLE) {
+ netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
+ __func__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
@@ -1103,13 +1119,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
mlx5e_port_speed2linkmodes(mdev, speed, !ext);
- if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
- autoneg != AUTONEG_ENABLE) {
- netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
- __func__);
- err = -EINVAL;
+ err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
+ if (err)
goto out;
- }
link_modes = link_modes & eproto.cap;
if (!link_modes) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index fa8149f6eb08..e02e5895703d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -942,6 +942,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1087,6 +1088,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
kfree(ft->g);
+ ft->g = NULL;
return -ENOMEM;
}
@@ -1390,6 +1392,7 @@ err_destroy_groups:
ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft);
kvfree(in);
+ kfree(ft->g);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 03831650f655..6a852b4901aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1987,7 +1987,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->num_tc = params->num_tc;
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
- c->irq_desc = irq_to_desc(irq);
+ c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
@@ -3161,7 +3161,8 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
mlx5_set_port_admin_status(mdev, state);
- if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY)
+ if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
+ !MLX5_CAP_GEN(mdev, uplink_follow))
return;
if (state == MLX5_PORT_UP)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index e47e2a0059d0..61ed671fe741 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -682,9 +682,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
- struct mlx5_wqe_eth_seg *eseg)
+ struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{
- if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg)))
+ if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
return false;
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
@@ -714,7 +714,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {};
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
+ attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
@@ -731,7 +732,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
- if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth)))
+ if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
return NETDEV_TX_OK;
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 1ec3d62f026d..a3cfe06d5116 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -40,12 +40,8 @@
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{
int current_cpu = smp_processor_id();
- const struct cpumask *aff;
- struct irq_data *idata;
- idata = irq_desc_get_irq_data(c->irq_desc);
- aff = irq_data_get_affinity_mask(idata);
- return cpumask_test_cpu(current_cpu, aff);
+ return cpumask_test_cpu(current_cpu, c->aff_mask);
}
static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
index 2b85d4777303..3e19b1721303 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_lgcy.c
@@ -95,22 +95,21 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
return 0;
}
- if (!IS_ERR_OR_NULL(vport->egress.acl))
- return 0;
-
- vport->egress.acl = esw_acl_table_create(esw, vport->vport,
- MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- table_size);
- if (IS_ERR(vport->egress.acl)) {
- err = PTR_ERR(vport->egress.acl);
- vport->egress.acl = NULL;
- goto out;
+ if (!vport->egress.acl) {
+ vport->egress.acl = esw_acl_table_create(esw, vport->vport,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ table_size);
+ if (IS_ERR(vport->egress.acl)) {
+ err = PTR_ERR(vport->egress.acl);
+ vport->egress.acl = NULL;
+ goto out;
+ }
+
+ err = esw_acl_egress_lgcy_groups_create(esw, vport);
+ if (err)
+ goto out;
}
- err = esw_acl_egress_lgcy_groups_create(esw, vport);
- if (err)
- goto out;
-
esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index f3d45ef082cd..83a05371e2aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -564,7 +564,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
struct mlx5_core_dev *tmp_dev;
int i, err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
return;
tmp_dev = mlx5_get_next_phys_dev(dev);
@@ -582,12 +584,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
return;
- for (i = 0; i < MLX5_MAX_PORTS; i++) {
- tmp_dev = ldev->pf[i].dev;
- if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
- MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ if (!ldev->pf[i].dev)
break;
- }
if (i >= MLX5_MAX_PORTS)
ldev->flags |= MLX5_LAG_FLAG_READY;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c08315b51fd3..ca6f2fc39ea0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1368,8 +1368,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
MLX5_COREDEV_VF : MLX5_COREDEV_PF;
dev->priv.adev_idx = mlx5_adev_idx_alloc();
- if (dev->priv.adev_idx < 0)
- return dev->priv.adev_idx;
+ if (dev->priv.adev_idx < 0) {
+ err = dev->priv.adev_idx;
+ goto adev_init_err;
+ }
err = mlx5_mdev_init(dev, prof_sel);
if (err)
@@ -1403,6 +1405,7 @@ pci_init_err:
mlx5_mdev_uninit(dev);
mdev_init_err:
mlx5_adev_idx_free(dev->priv.adev_idx);
+adev_init_err:
mlx5_devlink_free(devlink);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 0fc7de4aa572..8e0dddc6383f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -116,7 +116,7 @@ free:
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{
mlx5_core_roce_gid_set(dev, 0, 0, 0,
- NULL, NULL, false, 0, 0);
+ NULL, NULL, false, 0, 1);
}
static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index dbd6c3946aa6..3804310c853a 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -1935,6 +1935,14 @@ static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
length, GFP_ATOMIC | GFP_DMA);
}
+static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
+{
+ /* update the tail once per 8 descriptors */
+ if ((index & 7) == 7)
+ lan743x_csr_write(rx->adapter, RX_TAIL(rx->channel_number),
+ index);
+}
+
static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
struct sk_buff *skb)
{
@@ -1965,6 +1973,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
descriptor->data0 = (RX_DESC_DATA0_OWN_ |
(length & RX_DESC_DATA0_BUF_LENGTH_MASK_));
skb_reserve(buffer_info->skb, RX_HEAD_PADDING);
+ lan743x_rx_update_tail(rx, index);
return 0;
}
@@ -1983,6 +1992,7 @@ static void lan743x_rx_reuse_ring_element(struct lan743x_rx *rx, int index)
descriptor->data0 = (RX_DESC_DATA0_OWN_ |
((buffer_info->buffer_length) &
RX_DESC_DATA0_BUF_LENGTH_MASK_));
+ lan743x_rx_update_tail(rx, index);
}
static void lan743x_rx_release_ring_element(struct lan743x_rx *rx, int index)
@@ -2193,6 +2203,7 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
{
struct lan743x_rx *rx = container_of(napi, struct lan743x_rx, napi);
struct lan743x_adapter *adapter = rx->adapter;
+ int result = RX_PROCESS_RESULT_NOTHING_TO_DO;
u32 rx_tail_flags = 0;
int count;
@@ -2201,27 +2212,19 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
lan743x_csr_write(adapter, DMAC_INT_STS,
DMAC_INT_BIT_RXFRM_(rx->channel_number));
}
- count = 0;
- while (count < weight) {
- int rx_process_result = lan743x_rx_process_packet(rx);
-
- if (rx_process_result == RX_PROCESS_RESULT_PACKET_RECEIVED) {
- count++;
- } else if (rx_process_result ==
- RX_PROCESS_RESULT_NOTHING_TO_DO) {
+ for (count = 0; count < weight; count++) {
+ result = lan743x_rx_process_packet(rx);
+ if (result == RX_PROCESS_RESULT_NOTHING_TO_DO)
break;
- } else if (rx_process_result ==
- RX_PROCESS_RESULT_PACKET_DROPPED) {
- continue;
- }
}
rx->frame_count += count;
- if (count == weight)
- goto done;
+ if (count == weight || result == RX_PROCESS_RESULT_PACKET_RECEIVED)
+ return weight;
if (!napi_complete_done(napi, count))
- goto done;
+ return count;
+ /* re-arm interrupts, must write to rx tail on some chip variants */
if (rx->vector_flags & LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET)
rx_tail_flags |= RX_TAIL_SET_TOP_INT_VEC_EN_;
if (rx->vector_flags & LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET) {
@@ -2231,10 +2234,10 @@ static int lan743x_rx_napi_poll(struct napi_struct *napi, int weight)
INT_BIT_DMA_RX_(rx->channel_number));
}
- /* update RX_TAIL */
- lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
- rx_tail_flags | rx->last_tail);
-done:
+ if (rx_tail_flags)
+ lan743x_csr_write(adapter, RX_TAIL(rx->channel_number),
+ rx_tail_flags | rx->last_tail);
+
return count;
}
@@ -2378,7 +2381,7 @@ static int lan743x_rx_open(struct lan743x_rx *rx)
netif_napi_add(adapter->netdev,
&rx->napi, lan743x_rx_napi_poll,
- rx->ring_size - 1);
+ NAPI_POLL_WEIGHT);
lan743x_csr_write(adapter, DMAC_CMD,
DMAC_CMD_RX_SWR_(rx->channel_number));
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 1e7729421a82..9cf2bc5f4289 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -1267,7 +1267,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
err = mscc_ocelot_init_ports(pdev, ports);
if (err)
- goto out_put_ports;
+ goto out_ocelot_deinit;
if (ocelot->ptp) {
err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info);
@@ -1282,8 +1282,14 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
register_switchdev_notifier(&ocelot_switchdev_nb);
register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
+ of_node_put(ports);
+
dev_info(&pdev->dev, "Ocelot switch probed\n");
+ return 0;
+
+out_ocelot_deinit:
+ ocelot_deinit(ocelot);
out_put_ports:
of_node_put(ports);
return err;
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index 776b7d264dc3..2289e1fe3741 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -506,10 +506,14 @@ static int mac_sonic_platform_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err)
- goto out;
+ goto undo_probe;
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(dev);
@@ -584,12 +588,16 @@ static int mac_sonic_nubus_probe(struct nubus_board *board)
err = register_netdev(ndev);
if (err)
- goto out;
+ goto undo_probe;
nubus_set_drvdata(board, ndev);
return 0;
+undo_probe:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
out:
free_netdev(ndev);
return err;
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index afa166ff7aef..28d9e98db81a 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -229,11 +229,14 @@ int xtsonic_probe(struct platform_device *pdev)
sonic_msg_init(dev);
if ((err = register_netdev(dev)))
- goto out1;
+ goto undo_probe1;
return 0;
-out1:
+undo_probe1:
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
release_region(dev->base_addr, SONIC_MEM_SIZE);
out:
free_netdev(dev);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index bb448c82cdc2..c029950a81e2 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -860,9 +860,6 @@ static void nfp_flower_clean(struct nfp_app *app)
skb_queue_purge(&app_priv->cmsg_skbs_low);
flush_work(&app_priv->cmsg_work);
- flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
- nfp_flower_setup_indr_tc_release);
-
if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
nfp_flower_qos_cleanup(app);
@@ -951,6 +948,9 @@ static int nfp_flower_start(struct nfp_app *app)
static void nfp_flower_stop(struct nfp_app *app)
{
nfp_tunnel_config_stop(app);
+
+ flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
+ nfp_flower_setup_indr_tc_release);
}
static int
diff --git a/drivers/net/ethernet/ni/Kconfig b/drivers/net/ethernet/ni/Kconfig
index 01229190132d..dcfbfa516e67 100644
--- a/drivers/net/ethernet/ni/Kconfig
+++ b/drivers/net/ethernet/ni/Kconfig
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
#
-# National Instuments network device configuration
+# National Instruments network device configuration
#
config NET_VENDOR_NI
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 9156c9825a16..ac4cd5d82e69 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -337,7 +337,7 @@ void ionic_rx_fill(struct ionic_queue *q)
unsigned int i, j;
unsigned int len;
- len = netdev->mtu + ETH_HLEN;
+ len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
for (i = ionic_q_space_avail(q); i; i--) {
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 4366c7a8de95..6b5ddb07ee83 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -78,6 +78,7 @@ config QED
depends on PCI
select ZLIB_INFLATE
select CRC8
+ select CRC32
select NET_DEVLINK
help
This enables the support for Marvell FastLinQ adapters family.
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index a2494bf85007..ca0ee29a57b5 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1799,6 +1799,11 @@ netdev_features_t qede_features_check(struct sk_buff *skb,
ntohs(udp_hdr(skb)->dest) != gnv_port))
return features & ~(NETIF_F_CSUM_MASK |
NETIF_F_GSO_MASK);
+ } else if (l4_proto == IPPROTO_IPIP) {
+ /* IPIP tunnels are unknown to the device or at least unsupported natively,
+ * offloads for them can't be done trivially, so disable them for such skb.
+ */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 5a7e240fd469..c2faf96fcade 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2492,6 +2492,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
qlcnic_sriov_vf_register_map(ahw);
break;
default:
+ err = -EINVAL;
goto err_out_free_hw_res;
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 46d8510b2fe2..a569abe7f5ef 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -2207,7 +2207,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
}
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_43:
@@ -2233,7 +2234,8 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
static void rtl_pll_power_up(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_33:
+ case RTL_GIGA_MAC_VER_25 ... RTL_GIGA_MAC_VER_26:
+ case RTL_GIGA_MAC_VER_32 ... RTL_GIGA_MAC_VER_33:
case RTL_GIGA_MAC_VER_37:
case RTL_GIGA_MAC_VER_39:
case RTL_GIGA_MAC_VER_43:
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index a2e80c89de2d..9a6a519426a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -721,6 +721,8 @@ static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G_ID 0x4bb0
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G_ID 0x4bb1
#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5_ID 0x4bb2
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0_ID 0x43ac
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1_ID 0x43a2
#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G_ID 0xa0ac
static const struct pci_device_id intel_eth_pci_id_table[] = {
@@ -735,6 +737,8 @@ static const struct pci_device_id intel_eth_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5_ID, &ehl_pse1_sgmii1g_info) },
{ PCI_DEVICE_DATA(INTEL, TGL_SGMII1G_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0_ID, &tgl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1_ID, &tgl_sgmii1g_info) },
{}
};
MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 459ae715b33d..f184b00f5116 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -135,7 +135,7 @@ static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
struct device *dev = dwmac->dev;
static const struct clk_parent_data mux_parents[] = {
{ .fw_name = "clkin0", },
- { .fw_name = "clkin1", },
+ { .index = -1, },
};
static const struct clk_div_table div_table[] = {
{ .div = 2, .val = 2, },
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 58e0511badba..a5e0eff4a387 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -64,6 +64,7 @@ struct emac_variant {
* @variant: reference to the current board variant
* @regmap: regmap for using the syscon
* @internal_phy_powered: Does the internal PHY is enabled
+ * @use_internal_phy: Is the internal PHY selected for use
* @mux_handle: Internal pointer used by mdio-mux lib
*/
struct sunxi_priv_data {
@@ -74,6 +75,7 @@ struct sunxi_priv_data {
const struct emac_variant *variant;
struct regmap_field *regmap_field;
bool internal_phy_powered;
+ bool use_internal_phy;
void *mux_handle;
};
@@ -539,8 +541,11 @@ static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
.dma_interrupt = sun8i_dwmac_dma_interrupt,
};
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
+
static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
{
+ struct net_device *ndev = platform_get_drvdata(pdev);
struct sunxi_priv_data *gmac = priv;
int ret;
@@ -554,13 +559,25 @@ static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
ret = clk_prepare_enable(gmac->tx_clk);
if (ret) {
- if (gmac->regulator)
- regulator_disable(gmac->regulator);
dev_err(&pdev->dev, "Could not enable AHB clock\n");
- return ret;
+ goto err_disable_regulator;
+ }
+
+ if (gmac->use_internal_phy) {
+ ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
+ if (ret)
+ goto err_disable_clk;
}
return 0;
+
+err_disable_clk:
+ clk_disable_unprepare(gmac->tx_clk);
+err_disable_regulator:
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+
+ return ret;
}
static void sun8i_dwmac_core_init(struct mac_device_info *hw,
@@ -831,7 +848,6 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
u32 reg, val;
int ret = 0;
- bool need_power_ephy = false;
if (current_child ^ desired_child) {
regmap_field_read(gmac->regmap_field, &reg);
@@ -839,13 +855,12 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
dev_info(priv->device, "Switch mux to internal PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
-
- need_power_ephy = true;
+ gmac->use_internal_phy = true;
break;
case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
dev_info(priv->device, "Switch mux to external PHY");
val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
- need_power_ephy = false;
+ gmac->use_internal_phy = false;
break;
default:
dev_err(priv->device, "Invalid child ID %x\n",
@@ -853,7 +868,7 @@ static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
return -EINVAL;
}
regmap_field_write(gmac->regmap_field, val);
- if (need_power_ephy) {
+ if (gmac->use_internal_phy) {
ret = sun8i_dwmac_power_internal_phy(priv);
if (ret)
return ret;
@@ -883,22 +898,23 @@ static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
return ret;
}
-static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
+static int sun8i_dwmac_set_syscon(struct device *dev,
+ struct plat_stmmacenet_data *plat)
{
- struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
- struct device_node *node = priv->device->of_node;
+ struct sunxi_priv_data *gmac = plat->bsp_priv;
+ struct device_node *node = dev->of_node;
int ret;
u32 reg, val;
ret = regmap_field_read(gmac->regmap_field, &val);
if (ret) {
- dev_err(priv->device, "Fail to read from regmap field.\n");
+ dev_err(dev, "Fail to read from regmap field.\n");
return ret;
}
reg = gmac->variant->default_syscon_value;
if (reg != val)
- dev_warn(priv->device,
+ dev_warn(dev,
"Current syscon value is not the default %x (expect %x)\n",
val, reg);
@@ -911,9 +927,9 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
/* Force EPHY xtal frequency to 24MHz. */
reg |= H3_EPHY_CLK_SEL;
- ret = of_mdio_parse_addr(priv->device, priv->plat->phy_node);
+ ret = of_mdio_parse_addr(dev, plat->phy_node);
if (ret < 0) {
- dev_err(priv->device, "Could not parse MDIO addr\n");
+ dev_err(dev, "Could not parse MDIO addr\n");
return ret;
}
/* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
@@ -929,17 +945,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
if (val % 100) {
- dev_err(priv->device, "tx-delay must be a multiple of 100\n");
+ dev_err(dev, "tx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
- dev_dbg(priv->device, "set tx-delay to %x\n", val);
+ dev_dbg(dev, "set tx-delay to %x\n", val);
if (val <= gmac->variant->tx_delay_max) {
reg &= ~(gmac->variant->tx_delay_max <<
SYSCON_ETXDC_SHIFT);
reg |= (val << SYSCON_ETXDC_SHIFT);
} else {
- dev_err(priv->device, "Invalid TX clock delay: %d\n",
+ dev_err(dev, "Invalid TX clock delay: %d\n",
val);
return -EINVAL;
}
@@ -947,17 +963,17 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
if (val % 100) {
- dev_err(priv->device, "rx-delay must be a multiple of 100\n");
+ dev_err(dev, "rx-delay must be a multiple of 100\n");
return -EINVAL;
}
val /= 100;
- dev_dbg(priv->device, "set rx-delay to %x\n", val);
+ dev_dbg(dev, "set rx-delay to %x\n", val);
if (val <= gmac->variant->rx_delay_max) {
reg &= ~(gmac->variant->rx_delay_max <<
SYSCON_ERXDC_SHIFT);
reg |= (val << SYSCON_ERXDC_SHIFT);
} else {
- dev_err(priv->device, "Invalid RX clock delay: %d\n",
+ dev_err(dev, "Invalid RX clock delay: %d\n",
val);
return -EINVAL;
}
@@ -968,7 +984,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
if (gmac->variant->support_rmii)
reg &= ~SYSCON_RMII_EN;
- switch (priv->plat->interface) {
+ switch (plat->interface) {
case PHY_INTERFACE_MODE_MII:
/* default */
break;
@@ -982,8 +998,8 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
break;
default:
- dev_err(priv->device, "Unsupported interface mode: %s",
- phy_modes(priv->plat->interface));
+ dev_err(dev, "Unsupported interface mode: %s",
+ phy_modes(plat->interface));
return -EINVAL;
}
@@ -1004,17 +1020,10 @@ static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
struct sunxi_priv_data *gmac = priv;
if (gmac->variant->soc_has_internal_phy) {
- /* sun8i_dwmac_exit could be called with mdiomux uninit */
- if (gmac->mux_handle)
- mdio_mux_uninit(gmac->mux_handle);
if (gmac->internal_phy_powered)
sun8i_dwmac_unpower_internal_phy(gmac);
}
- sun8i_dwmac_unset_syscon(gmac);
-
- reset_control_put(gmac->rst_ephy);
-
clk_disable_unprepare(gmac->tx_clk);
if (gmac->regulator)
@@ -1049,16 +1058,11 @@ static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
{
struct mac_device_info *mac;
struct stmmac_priv *priv = ppriv;
- int ret;
mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
if (!mac)
return NULL;
- ret = sun8i_dwmac_set_syscon(priv);
- if (ret)
- return NULL;
-
mac->pcsr = priv->ioaddr;
mac->mac = &sun8i_dwmac_ops;
mac->dma = &sun8i_dwmac_dma_ops;
@@ -1134,10 +1138,6 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
- if (IS_ERR(plat_dat))
- return PTR_ERR(plat_dat);
-
gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
if (!gmac)
return -ENOMEM;
@@ -1201,11 +1201,15 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
ret = of_get_phy_mode(dev->of_node, &interface);
if (ret)
return -EINVAL;
- plat_dat->interface = interface;
+
+ plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
/* platform data specifying hardware features and callbacks.
* hardware features were copied from Allwinner drivers.
*/
+ plat_dat->interface = interface;
plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
plat_dat->tx_coe = 1;
plat_dat->has_sun8i = true;
@@ -1214,9 +1218,13 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
plat_dat->exit = sun8i_dwmac_exit;
plat_dat->setup = sun8i_dwmac_setup;
+ ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
+ if (ret)
+ goto dwmac_deconfig;
+
ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
if (ret)
- return ret;
+ goto dwmac_syscon;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -1230,7 +1238,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
if (gmac->variant->soc_has_internal_phy) {
ret = get_ephy_nodes(priv);
if (ret)
- goto dwmac_exit;
+ goto dwmac_remove;
ret = sun8i_dwmac_register_mdio_mux(priv);
if (ret) {
dev_err(&pdev->dev, "Failed to register mux\n");
@@ -1239,15 +1247,42 @@ static int sun8i_dwmac_probe(struct platform_device *pdev)
} else {
ret = sun8i_dwmac_reset(priv);
if (ret)
- goto dwmac_exit;
+ goto dwmac_remove;
}
return ret;
dwmac_mux:
- sun8i_dwmac_unset_syscon(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+dwmac_remove:
+ stmmac_dvr_remove(&pdev->dev);
dwmac_exit:
+ sun8i_dwmac_exit(pdev, gmac);
+dwmac_syscon:
+ sun8i_dwmac_unset_syscon(gmac);
+dwmac_deconfig:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int sun8i_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+ if (gmac->variant->soc_has_internal_phy) {
+ mdio_mux_uninit(gmac->mux_handle);
+ sun8i_dwmac_unpower_internal_phy(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+ }
+
stmmac_pltfr_remove(pdev);
-return ret;
+ sun8i_dwmac_unset_syscon(gmac);
+
+ return 0;
}
static const struct of_device_id sun8i_dwmac_match[] = {
@@ -1269,7 +1304,7 @@ MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
static struct platform_driver sun8i_dwmac_driver = {
.probe = sun8i_dwmac_probe,
- .remove = stmmac_pltfr_remove,
+ .remove = sun8i_dwmac_remove,
.driver = {
.name = "dwmac-sun8i",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index d1fc7955d422..43222a34cba0 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -599,6 +599,7 @@ void cpts_unregister(struct cpts *cpts)
ptp_clock_unregister(cpts->clock);
cpts->clock = NULL;
+ cpts->phc_index = -1;
cpts_write32(cpts, 0, int_enable);
cpts_write32(cpts, 0, control);
@@ -784,6 +785,7 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs,
cpts->cc.read = cpts_systim_read;
cpts->cc.mask = CLOCKSOURCE_MASK(32);
cpts->info = cpts_info;
+ cpts->phc_index = -1;
if (n_ext_ts)
cpts->info.n_ext_ts = n_ext_ts;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index d9a5722f561b..3d1fc8d2ca66 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1791,7 +1791,7 @@ fail_open:
* ps3_gelic_driver_remove - remove a device from the control of this driver
*/
-static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
+static void ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
{
struct gelic_card *card = ps3_system_bus_get_drvdata(dev);
struct net_device *netdev0;
@@ -1840,7 +1840,6 @@ static int ps3_gelic_driver_remove(struct ps3_system_bus_device *dev)
ps3_close_hv_device(dev);
pr_debug("%s: done\n", __func__);
- return 0;
}
static struct ps3_system_bus_driver ps3_gelic_driver = {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d17bbc75f5e7..f32f28311d57 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -2050,11 +2050,11 @@ static void netvsc_link_change(struct work_struct *w)
container_of(w, struct net_device_context, dwork.work);
struct hv_device *device_obj = ndev_ctx->device_ctx;
struct net_device *net = hv_get_drvdata(device_obj);
+ unsigned long flags, next_reconfig, delay;
+ struct netvsc_reconfig *event = NULL;
struct netvsc_device *net_device;
struct rndis_device *rdev;
- struct netvsc_reconfig *event = NULL;
- bool notify = false, reschedule = false;
- unsigned long flags, next_reconfig, delay;
+ bool reschedule = false;
/* if changes are happening, comeback later */
if (!rtnl_trylock()) {
@@ -2103,7 +2103,7 @@ static void netvsc_link_change(struct work_struct *w)
netif_carrier_on(net);
netvsc_tx_enable(net_device, net);
} else {
- notify = true;
+ __netdev_notify_peers(net);
}
kfree(event);
break;
@@ -2132,9 +2132,6 @@ static void netvsc_link_change(struct work_struct *w)
rtnl_unlock();
- if (notify)
- netdev_notify_peers(net);
-
/* link_watch only sends one notification with current state per
* second, handle next reconfig event in 2 seconds.
*/
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index c4795249719d..14d9a791924b 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -326,8 +326,8 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
}
/* Issue an event ring command and wait for it to complete */
-static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
- enum gsi_evt_cmd_opcode opcode)
+static void evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
+ enum gsi_evt_cmd_opcode opcode)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion;
@@ -340,7 +340,13 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
* is issued here. Only permit *this* event ring to trigger
* an interrupt, and only enable the event control IRQ type
* when we expect it to occur.
+ *
+ * There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
*/
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
+
val = BIT(evt_ring_id);
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
gsi_irq_type_enable(gsi, GSI_EV_CTRL);
@@ -355,19 +361,16 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
if (success)
- return 0;
+ return;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
opcode, evt_ring_id, evt_ring->state);
-
- return -ETIMEDOUT;
}
/* Allocate an event ring in NOT_ALLOCATED state */
static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- int ret;
/* Get initial event ring state */
evt_ring->state = gsi_evt_ring_state(gsi, evt_ring_id);
@@ -377,14 +380,16 @@ static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
return -EINVAL;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
- dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
- evt_ring_id, evt_ring->state);
- ret = -EIO;
- }
+ evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
- return ret;
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ return 0;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
+ evt_ring_id, evt_ring->state);
+
+ return -EIO;
}
/* Reset a GSI event ring in ALLOCATED or ERROR state. */
@@ -392,7 +397,6 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
enum gsi_evt_ring_state state = evt_ring->state;
- int ret;
if (state != GSI_EVT_RING_STATE_ALLOCATED &&
state != GSI_EVT_RING_STATE_ERROR) {
@@ -401,17 +405,20 @@ static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
return;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED)
- dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
+
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
+ evt_ring_id, evt_ring->state);
}
/* Issue a hardware de-allocation request for an allocated event ring */
static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- int ret;
if (evt_ring->state != GSI_EVT_RING_STATE_ALLOCATED) {
dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
@@ -419,10 +426,14 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
return;
}
- ret = evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
- if (!ret && evt_ring->state != GSI_EVT_RING_STATE_NOT_ALLOCATED)
- dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
- evt_ring_id, evt_ring->state);
+ evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
+
+ /* If successful the event ring state will have changed */
+ if (evt_ring->state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
+ return;
+
+ dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
+ evt_ring_id, evt_ring->state);
}
/* Fetch the current state of a channel from hardware */
@@ -438,7 +449,7 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
}
/* Issue a channel command and wait for it to complete */
-static int
+static void
gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
{
struct completion *completion = &channel->completion;
@@ -453,7 +464,13 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
* issued here. So we only permit *this* channel to trigger
* an interrupt and only enable the channel control IRQ type
* when we expect it to occur.
+ *
+ * There's a small chance that a previous command completed
+ * after the interrupt was disabled, so make sure we have no
+ * pending interrupts before we enable them.
*/
+ iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
+
val = BIT(channel_id);
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
gsi_irq_type_enable(gsi, GSI_CH_CTRL);
@@ -467,12 +484,10 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
if (success)
- return 0;
+ return;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel));
-
- return -ETIMEDOUT;
}
/* Allocate GSI channel in NOT_ALLOCATED state */
@@ -481,7 +496,6 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
- int ret;
/* Get initial channel state */
state = gsi_channel_state(channel);
@@ -491,17 +505,17 @@ static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_ALLOCATE);
+ gsi_channel_command(channel, GSI_CH_ALLOCATE);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED) {
- dev_err(dev, "channel %u bad state %u after alloc\n",
- channel_id, state);
- ret = -EIO;
- }
+ if (state == GSI_CHANNEL_STATE_ALLOCATED)
+ return 0;
- return ret;
+ dev_err(dev, "channel %u bad state %u after alloc\n",
+ channel_id, state);
+
+ return -EIO;
}
/* Start an ALLOCATED channel */
@@ -509,7 +523,6 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED &&
@@ -519,17 +532,17 @@ static int gsi_channel_start_command(struct gsi_channel *channel)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_START);
+ gsi_channel_command(channel, GSI_CH_START);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_STARTED) {
- dev_err(dev, "channel %u bad state %u after start\n",
- gsi_channel_id(channel), state);
- ret = -EIO;
- }
+ if (state == GSI_CHANNEL_STATE_STARTED)
+ return 0;
- return ret;
+ dev_err(dev, "channel %u bad state %u after start\n",
+ gsi_channel_id(channel), state);
+
+ return -EIO;
}
/* Stop a GSI channel in STARTED state */
@@ -537,7 +550,6 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
@@ -554,12 +566,12 @@ static int gsi_channel_stop_command(struct gsi_channel *channel)
return -EINVAL;
}
- ret = gsi_channel_command(channel, GSI_CH_STOP);
+ gsi_channel_command(channel, GSI_CH_STOP);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (ret || state == GSI_CHANNEL_STATE_STOPPED)
- return ret;
+ if (state == GSI_CHANNEL_STATE_STOPPED)
+ return 0;
/* We may have to try again if stop is in progress */
if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
@@ -576,7 +588,6 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
{
struct device *dev = channel->gsi->dev;
enum gsi_channel_state state;
- int ret;
msleep(1); /* A short delay is required before a RESET command */
@@ -590,11 +601,11 @@ static void gsi_channel_reset_command(struct gsi_channel *channel)
return;
}
- ret = gsi_channel_command(channel, GSI_CH_RESET);
+ gsi_channel_command(channel, GSI_CH_RESET);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_ALLOCATED)
+ if (state != GSI_CHANNEL_STATE_ALLOCATED)
dev_err(dev, "channel %u bad state %u after reset\n",
gsi_channel_id(channel), state);
}
@@ -605,7 +616,6 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
struct device *dev = gsi->dev;
enum gsi_channel_state state;
- int ret;
state = gsi_channel_state(channel);
if (state != GSI_CHANNEL_STATE_ALLOCATED) {
@@ -614,11 +624,12 @@ static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
return;
}
- ret = gsi_channel_command(channel, GSI_CH_DE_ALLOC);
+ gsi_channel_command(channel, GSI_CH_DE_ALLOC);
- /* Channel state will normally have been updated */
+ /* If successful the channel state will have changed */
state = gsi_channel_state(channel);
- if (!ret && state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
+
+ if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
dev_err(dev, "channel %u bad state %u after dealloc\n",
channel_id, state);
}
diff --git a/drivers/net/ipa/ipa_clock.c b/drivers/net/ipa/ipa_clock.c
index 9dcf16f399b7..135c393437f1 100644
--- a/drivers/net/ipa/ipa_clock.c
+++ b/drivers/net/ipa/ipa_clock.c
@@ -115,13 +115,13 @@ static int ipa_interconnect_enable(struct ipa *ipa)
return ret;
data = &clock->interconnect_data[IPA_INTERCONNECT_IMEM];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
+ ret = icc_set_bw(clock->imem_path, data->average_rate,
data->peak_rate);
if (ret)
goto err_memory_path_disable;
data = &clock->interconnect_data[IPA_INTERCONNECT_CONFIG];
- ret = icc_set_bw(clock->memory_path, data->average_rate,
+ ret = icc_set_bw(clock->config_path, data->average_rate,
data->peak_rate);
if (ret)
goto err_imem_path_disable;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index fbed05ae7b0f..978ac0981d16 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1365,7 +1365,7 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
int i;
if (it->nr_segs > MAX_SKB_FRAGS + 1)
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(-EMSGSIZE);
local_bh_disable();
skb = napi_get_frags(&tfile->napi);
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 2bac57d5e8d5..5a78848db93f 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -1199,7 +1199,10 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
* accordingly. Otherwise, we should check here.
*/
if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
- delayed_ndp_size = ALIGN(ctx->max_ndp_size, ctx->tx_ndp_modulus);
+ delayed_ndp_size = ctx->max_ndp_size +
+ max_t(u32,
+ ctx->tx_ndp_modulus,
+ ctx->tx_modulus + ctx->tx_remainder) - 1;
else
delayed_ndp_size = 0;
@@ -1410,7 +1413,8 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
if (!(dev->driver_info->flags & FLAG_SEND_ZLP) &&
skb_out->len > ctx->min_tx_pkt) {
padding_count = ctx->tx_curr_size - skb_out->len;
- skb_put_zero(skb_out, padding_count);
+ if (!WARN_ON(padding_count > ctx->tx_curr_size))
+ skb_put_zero(skb_out, padding_count);
} else if (skb_out->len < ctx->tx_curr_size &&
(skb_out->len % dev->maxpacket) == 0) {
skb_put_u8(skb_out, 0); /* force short packet */
@@ -1863,9 +1867,6 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/
- netif_info(dev, link, dev->net,
- "network connection: %sconnected\n",
- !!event->wValue ? "" : "dis");
usbnet_link_change(dev, !!event->wValue, 0);
break;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d166c321ee9b..af19513a9f75 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1013,6 +1013,7 @@ static const struct usb_device_id products[] = {
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0620)}, /* Quectel EM160R-GL */
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 052975ea0af4..508408fbe78f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2093,14 +2093,16 @@ static int virtnet_set_channels(struct net_device *dev,
get_online_cpus();
err = _virtnet_set_queues(vi, queue_pairs);
- if (!err) {
- netif_set_real_num_tx_queues(dev, queue_pairs);
- netif_set_real_num_rx_queues(dev, queue_pairs);
-
- virtnet_set_affinity(vi);
+ if (err) {
+ put_online_cpus();
+ goto err;
}
+ virtnet_set_affinity(vi);
put_online_cpus();
+ netif_set_real_num_tx_queues(dev, queue_pairs);
+ netif_set_real_num_rx_queues(dev, queue_pairs);
+ err:
return err;
}
@@ -3072,6 +3074,7 @@ static int virtnet_probe(struct virtio_device *vdev)
dev_err(&vdev->dev,
"device MTU appears to have changed it is now %d < %d",
mtu, dev->min_mtu);
+ err = -EINVAL;
goto free;
}
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 4029fde71a9e..83c9481995dd 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -282,6 +282,7 @@ config SLIC_DS26522
tristate "Slic Maxim ds26522 card support"
depends on SPI
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
+ select BITREVERSE
help
This module initializes and configures the slic maxim card
in T1 or E1 mode.
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 64f855651336..261b53fc8e04 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -569,6 +569,13 @@ static void ppp_timer(struct timer_list *t)
unsigned long flags;
spin_lock_irqsave(&ppp->lock, flags);
+ /* mod_timer could be called after we entered this function but
+ * before we got the lock.
+ */
+ if (timer_pending(&proto->timer)) {
+ spin_unlock_irqrestore(&ppp->lock, flags);
+ return;
+ }
switch (proto->state) {
case STOPPING:
case REQ_SENT:
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index b97c38b9a270..350b7913622c 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -185,7 +185,7 @@ int ath11k_core_suspend(struct ath11k_base *ab)
ath11k_hif_ce_irq_disable(ab);
ret = ath11k_hif_suspend(ab);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 205c0f1a40e9..920e5026a635 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -2294,6 +2294,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
{
u8 channel_num;
u32 center_freq;
+ struct ieee80211_channel *channel;
rx_status->freq = 0;
rx_status->rate_idx = 0;
@@ -2314,9 +2315,12 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
rx_status->band = NL80211_BAND_5GHZ;
} else {
spin_lock_bh(&ar->data_lock);
- rx_status->band = ar->rx_channel->band;
- channel_num =
- ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
+ channel = ar->rx_channel;
+ if (channel) {
+ rx_status->band = channel->band;
+ channel_num =
+ ieee80211_frequency_to_channel(channel->center_freq);
+ }
spin_unlock_bh(&ar->data_lock);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
rx_desc, sizeof(struct hal_rx_desc));
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 5c175e3e09b2..c1608f64ea95 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -3021,6 +3021,7 @@ static int ath11k_mac_station_add(struct ath11k *ar,
}
if (ab->hw_params.vdev_start_delay &&
+ !arvif->is_started &&
arvif->vdev_type != WMI_VDEV_TYPE_AP) {
ret = ath11k_start_vdev_delay(ar->hw, vif);
if (ret) {
@@ -5284,7 +5285,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
/* for QCA6390 bss peer must be created before vdev_start */
if (ab->hw_params.vdev_start_delay &&
arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ !ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) {
memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
ret = 0;
goto out;
@@ -5295,7 +5297,9 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params.vdev_start_delay) {
+ if (ab->hw_params.vdev_start_delay &&
+ (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+ arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
param.vdev_id = arvif->vdev_id;
param.peer_type = WMI_PEER_TYPE_DEFAULT;
param.peer_addr = ar->mac_addr;
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 857647aa57c8..20b415cd96c4 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -274,7 +274,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
return ret;
}
@@ -283,7 +283,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
return ret;
}
@@ -292,7 +292,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
return ret;
}
@@ -301,7 +301,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL,
PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
- if (!ret) {
+ if (ret) {
ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
return ret;
}
@@ -886,6 +886,32 @@ static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
pci_disable_device(pci_dev);
}
+static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+{
+ struct ath11k_base *ab = ab_pci->ab;
+
+ pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ &ab_pci->link_ctl);
+
+ ath11k_dbg(ab, ATH11K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
+ ab_pci->link_ctl,
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
+ u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+
+ /* disable L0s and L1 */
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+
+ set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
+}
+
+static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
+{
+ if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
+ pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+ ab_pci->link_ctl);
+}
+
static int ath11k_pci_power_up(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -895,6 +921,11 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath11k_pci_sw_reset(ab_pci->ab, true);
+ /* Disable ASPM during firmware download due to problems switching
+ * to AMSS state.
+ */
+ ath11k_pci_aspm_disable(ab_pci);
+
ret = ath11k_mhi_start(ab_pci);
if (ret) {
ath11k_err(ab, "failed to start mhi: %d\n", ret);
@@ -908,6 +939,9 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+ /* restore aspm in case firmware bootup fails */
+ ath11k_pci_aspm_restore(ab_pci);
+
ath11k_pci_force_wake(ab_pci->ab);
ath11k_mhi_stop(ab_pci);
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
@@ -965,6 +999,8 @@ static int ath11k_pci_start(struct ath11k_base *ab)
set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
+ ath11k_pci_aspm_restore(ab_pci);
+
ath11k_pci_ce_irqs_enable(ab);
ath11k_ce_rx_post_buf(ab);
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index 0432a702416b..fe44d0dfce19 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -63,6 +63,7 @@ struct ath11k_msi_config {
enum ath11k_pci_flags {
ATH11K_PCI_FLAG_INIT_DONE,
ATH11K_PCI_FLAG_IS_MSI_64,
+ ATH11K_PCI_ASPM_RESTORE,
};
struct ath11k_pci {
@@ -80,6 +81,7 @@ struct ath11k_pci {
/* enum ath11k_pci_flags */
unsigned long flags;
+ u16 link_ctl;
};
static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index 1866d82678fa..b69e7ebfa930 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -76,6 +76,23 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
return NULL;
}
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id)
+{
+ struct ath11k_peer *peer;
+
+ spin_lock_bh(&ab->base_lock);
+
+ list_for_each_entry(peer, &ab->peers, list) {
+ if (vdev_id == peer->vdev_id) {
+ spin_unlock_bh(&ab->base_lock);
+ return peer;
+ }
+ }
+ spin_unlock_bh(&ab->base_lock);
+ return NULL;
+}
+
void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
{
struct ath11k_peer *peer;
diff --git a/drivers/net/wireless/ath/ath11k/peer.h b/drivers/net/wireless/ath/ath11k/peer.h
index bba2e00b6944..8553ed061aea 100644
--- a/drivers/net/wireless/ath/ath11k/peer.h
+++ b/drivers/net/wireless/ath/ath11k/peer.h
@@ -43,5 +43,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
struct ieee80211_sta *sta, struct peer_create_params *param);
int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+ int vdev_id);
#endif /* _PEER_H_ */
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index f0b5c50974f3..0db623ff4bb9 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -1660,6 +1660,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
struct qmi_txn txn = {};
int ret = 0, i;
+ bool delayed;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
@@ -1672,11 +1673,13 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
* failure to FW and FW will then request mulitple blocks of small
* chunk size memory.
*/
- if (!ab->bus_params.fixed_mem_region && ab->qmi.mem_seg_count <= 2) {
+ if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) {
+ delayed = true;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
ab->qmi.mem_seg_count);
memset(req, 0, sizeof(*req));
} else {
+ delayed = false;
req->mem_seg_len = ab->qmi.mem_seg_count;
for (i = 0; i < req->mem_seg_len ; i++) {
@@ -1708,6 +1711,12 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
}
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+ /* the error response is expected when
+ * target_mem_delayed is true.
+ */
+ if (delayed && resp.resp.error == 0)
+ goto out;
+
ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
@@ -1742,6 +1751,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
int i;
struct target_mem_chunk *chunk;
+ ab->qmi.target_mem_delayed = false;
+
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
chunk = &ab->qmi.target_mem[i];
chunk->vaddr = dma_alloc_coherent(ab->dev,
@@ -1749,6 +1760,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
&chunk->paddr,
GFP_KERNEL);
if (!chunk->vaddr) {
+ if (ab->qmi.mem_seg_count <= 2) {
+ ath11k_dbg(ab, ATH11K_DBG_QMI,
+ "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+ chunk->size,
+ chunk->type);
+ ath11k_qmi_free_target_mem_chunk(ab);
+ ab->qmi.target_mem_delayed = true;
+ return 0;
+ }
ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n",
chunk->size,
chunk->type);
@@ -2517,7 +2537,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
ret);
return;
}
- } else if (msg->mem_seg_len > 2) {
+ } else {
ret = ath11k_qmi_alloc_target_mem_chunk(ab);
if (ret) {
ath11k_warn(ab, "qmi failed to alloc target memory: %d\n",
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 92925c9eac67..7bad374cc23a 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -125,6 +125,7 @@ struct ath11k_qmi {
struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
u32 mem_seg_count;
u32 target_mem_mode;
+ bool target_mem_delayed;
u8 cal_done;
struct target_info target;
struct m3_mem_region m3_mem;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index da4b546b62cb..73869d445c5b 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -3460,6 +3460,9 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
len = sizeof(*cmd);
skb = ath11k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
diff --git a/drivers/net/wireless/ath/wil6210/Kconfig b/drivers/net/wireless/ath/wil6210/Kconfig
index 6a95b199bf62..f074e9c31aa2 100644
--- a/drivers/net/wireless/ath/wil6210/Kconfig
+++ b/drivers/net/wireless/ath/wil6210/Kconfig
@@ -2,6 +2,7 @@
config WIL6210
tristate "Wilocity 60g WiFi card wil6210 support"
select WANT_DEV_COREDUMP
+ select CRC32
depends on CFG80211
depends on PCI
default n
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index ed4635bd151a..102a8f14c22d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -40,9 +40,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
.types = BIT(NL80211_IFTYPE_ADHOC)
}, {
.max = 16,
- .types = BIT(NL80211_IFTYPE_AP) |
+ .types = BIT(NL80211_IFTYPE_AP)
#ifdef CONFIG_MAC80211_MESH
- BIT(NL80211_IFTYPE_MESH_POINT)
+ | BIT(NL80211_IFTYPE_MESH_POINT)
#endif
}, {
.max = MT7915_MAX_INTERFACES,
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 62b5b912818f..0b6facb17ff7 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -157,10 +157,14 @@ static void mt76s_net_worker(struct mt76_worker *w)
static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
- bool wake, mcu = q == dev->q_mcu[MT_MCUQ_WM];
struct mt76_queue_entry entry;
int nframes = 0;
+ bool mcu;
+ if (!q)
+ return 0;
+
+ mcu = q == dev->q_mcu[MT_MCUQ_WM];
while (q->queued > 0) {
if (!q->entry[q->tail].done)
break;
@@ -177,21 +181,12 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
nframes++;
}
- wake = q->stopped && q->queued < q->ndesc - 8;
- if (wake)
- q->stopped = false;
-
if (!q->queued)
wake_up(&dev->tx_wait);
- if (mcu)
- goto out;
-
- mt76_txq_schedule(&dev->phy, q->qid);
+ if (!mcu)
+ mt76_txq_schedule(&dev->phy, q->qid);
- if (wake)
- ieee80211_wake_queue(dev->hw, q->qid);
-out:
return nframes;
}
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index dc850109de22..b95d093728b9 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -811,11 +811,12 @@ static void mt76u_status_worker(struct mt76_worker *w)
struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
struct mt76_queue_entry entry;
struct mt76_queue *q;
- bool wake;
int i;
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
q = dev->phy.q_tx[i];
+ if (!q)
+ continue;
while (q->queued > 0) {
if (!q->entry[q->tail].done)
@@ -827,10 +828,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
mt76_queue_tx_complete(dev, q, &entry);
}
- wake = q->stopped && q->queued < q->ndesc - 8;
- if (wake)
- q->stopped = false;
-
if (!q->queued)
wake_up(&dev->tx_wait);
@@ -839,8 +836,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
if (dev->drv->tx_status_data &&
!test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
queue_work(dev->wq, &dev->usb.stat_work);
- if (wake)
- ieee80211_wake_queue(dev->hw, i);
}
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index a7259dbc953d..965bd9589045 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -78,7 +78,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
"Firmware callback routine entered!\n");
- complete(&rtlpriv->firmware_loading_complete);
if (!firmware) {
if (rtlpriv->cfg->alt_fw_name) {
err = request_firmware(&firmware,
@@ -91,13 +90,13 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
}
pr_err("Selected firmware is not available\n");
rtlpriv->max_fw_size = 0;
- return;
+ goto exit;
}
found_alt:
if (firmware->size > rtlpriv->max_fw_size) {
pr_err("Firmware is too big!\n");
release_firmware(firmware);
- return;
+ goto exit;
}
if (!is_wow) {
memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
@@ -109,6 +108,9 @@ found_alt:
rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
}
release_firmware(firmware);
+
+exit:
+ complete(&rtlpriv->firmware_loading_complete);
}
void rtl_fw_cb(const struct firmware *firmware, void *context)
diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c
index 103bf5c92bdc..f042d3eaf8f6 100644
--- a/drivers/nfc/s3fwrn5/nci.c
+++ b/drivers/nfc/s3fwrn5/nci.c
@@ -23,31 +23,11 @@ static int s3fwrn5_nci_prop_rsp(struct nci_dev *ndev, struct sk_buff *skb)
static struct nci_driver_ops s3fwrn5_nci_prop_ops[] = {
{
.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
- NCI_PROP_AGAIN),
- .rsp = s3fwrn5_nci_prop_rsp,
- },
- {
- .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
- NCI_PROP_GET_RFREG),
- .rsp = s3fwrn5_nci_prop_rsp,
- },
- {
- .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
NCI_PROP_SET_RFREG),
.rsp = s3fwrn5_nci_prop_rsp,
},
{
.opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
- NCI_PROP_GET_RFREG_VER),
- .rsp = s3fwrn5_nci_prop_rsp,
- },
- {
- .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
- NCI_PROP_SET_RFREG_VER),
- .rsp = s3fwrn5_nci_prop_rsp,
- },
- {
- .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
NCI_PROP_START_RFREG),
.rsp = s3fwrn5_nci_prop_rsp,
},
@@ -61,11 +41,6 @@ static struct nci_driver_ops s3fwrn5_nci_prop_ops[] = {
NCI_PROP_FW_CFG),
.rsp = s3fwrn5_nci_prop_rsp,
},
- {
- .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY,
- NCI_PROP_WR_RESET),
- .rsp = s3fwrn5_nci_prop_rsp,
- },
};
void s3fwrn5_nci_get_prop_ops(struct nci_driver_ops **ops, size_t *n)
diff --git a/drivers/nfc/s3fwrn5/nci.h b/drivers/nfc/s3fwrn5/nci.h
index 23c0b28f247a..a80f0fb082a8 100644
--- a/drivers/nfc/s3fwrn5/nci.h
+++ b/drivers/nfc/s3fwrn5/nci.h
@@ -11,9 +11,6 @@
#include "s3fwrn5.h"
-#define NCI_PROP_AGAIN 0x01
-
-#define NCI_PROP_GET_RFREG 0x21
#define NCI_PROP_SET_RFREG 0x22
struct nci_prop_set_rfreg_cmd {
@@ -25,23 +22,6 @@ struct nci_prop_set_rfreg_rsp {
__u8 status;
};
-#define NCI_PROP_GET_RFREG_VER 0x24
-
-struct nci_prop_get_rfreg_ver_rsp {
- __u8 status;
- __u8 data[8];
-};
-
-#define NCI_PROP_SET_RFREG_VER 0x25
-
-struct nci_prop_set_rfreg_ver_cmd {
- __u8 data[8];
-};
-
-struct nci_prop_set_rfreg_ver_rsp {
- __u8 status;
-};
-
#define NCI_PROP_START_RFREG 0x26
struct nci_prop_start_rfreg_rsp {
@@ -70,8 +50,6 @@ struct nci_prop_fw_cfg_rsp {
__u8 status;
};
-#define NCI_PROP_WR_RESET 0x2f
-
void s3fwrn5_nci_get_prop_ops(struct nci_driver_ops **ops, size_t *n);
int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name);
diff --git a/drivers/nfc/s3fwrn5/phy_common.c b/drivers/nfc/s3fwrn5/phy_common.c
index 497b02b30ae7..81318478d5fd 100644
--- a/drivers/nfc/s3fwrn5/phy_common.c
+++ b/drivers/nfc/s3fwrn5/phy_common.c
@@ -20,7 +20,8 @@ void s3fwrn5_phy_set_wake(void *phy_id, bool wake)
mutex_lock(&phy->mutex);
gpio_set_value(phy->gpio_fw_wake, wake);
- msleep(S3FWRN5_EN_WAIT_TIME);
+ if (wake)
+ msleep(S3FWRN5_EN_WAIT_TIME);
mutex_unlock(&phy->mutex);
}
EXPORT_SYMBOL(s3fwrn5_phy_set_wake);
diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c
index d54261f50851..e7a4c2aa8baa 100644
--- a/drivers/ntb/hw/idt/ntb_hw_idt.c
+++ b/drivers/ntb/hw/idt/ntb_hw_idt.c
@@ -2511,7 +2511,7 @@ static int idt_init_dbgfs(struct idt_ntb_dev *ndev)
/* If the top directory is not created then do nothing */
if (IS_ERR_OR_NULL(dbgfs_topdir)) {
dev_info(&ndev->ntb.pdev->dev, "Top DebugFS directory absent");
- return PTR_ERR(dbgfs_topdir);
+ return PTR_ERR_OR_ZERO(dbgfs_topdir);
}
/* Create the info file node */
@@ -2756,7 +2756,7 @@ static int idt_pci_probe(struct pci_dev *pdev,
/* Allocate the memory for IDT NTB device data */
ndev = idt_create_dev(pdev, id);
- if (IS_ERR_OR_NULL(ndev))
+ if (IS_ERR(ndev))
return PTR_ERR(ndev);
/* Initialize the basic PCI subsystem of the device */
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.h b/drivers/ntb/hw/intel/ntb_hw_gen1.h
index 1b759942d8af..344249fc18d1 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen1.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen1.h
@@ -141,6 +141,7 @@
#define NTB_HWERR_B2BDOORBELL_BIT14 BIT_ULL(2)
#define NTB_HWERR_MSIX_VECTOR32_BAD BIT_ULL(3)
#define NTB_HWERR_BAR_ALIGN BIT_ULL(4)
+#define NTB_HWERR_LTR_BAD BIT_ULL(5)
extern struct intel_b2b_addr xeon_b2b_usd_addr;
extern struct intel_b2b_addr xeon_b2b_dsd_addr;
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.c b/drivers/ntb/hw/intel/ntb_hw_gen4.c
index bc4541cbf8c6..fede05151f69 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.c
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.c
@@ -177,8 +177,10 @@ int gen4_init_dev(struct intel_ntb_dev *ndev)
ndev->reg = &gen4_reg;
- if (pdev_is_ICX(pdev))
+ if (pdev_is_ICX(pdev)) {
ndev->hwerr_flags |= NTB_HWERR_BAR_ALIGN;
+ ndev->hwerr_flags |= NTB_HWERR_LTR_BAD;
+ }
ppd1 = ioread32(ndev->self_mmio + GEN4_PPD1_OFFSET);
ndev->ntb.topo = gen4_ppd_topo(ndev, ppd1);
@@ -431,6 +433,25 @@ static int intel_ntb4_link_enable(struct ntb_dev *ntb,
dev_dbg(&ntb->pdev->dev,
"ignoring max_width %d\n", max_width);
+ if (!(ndev->hwerr_flags & NTB_HWERR_LTR_BAD)) {
+ u32 ltr;
+
+ /* Setup active snoop LTR values */
+ ltr = NTB_LTR_ACTIVE_REQMNT | NTB_LTR_ACTIVE_VAL | NTB_LTR_ACTIVE_LATSCALE;
+ /* Setup active non-snoop values */
+ ltr = (ltr << NTB_LTR_NS_SHIFT) | ltr;
+ iowrite32(ltr, ndev->self_mmio + GEN4_LTR_ACTIVE_OFFSET);
+
+ /* Setup idle snoop LTR values */
+ ltr = NTB_LTR_IDLE_VAL | NTB_LTR_IDLE_LATSCALE | NTB_LTR_IDLE_REQMNT;
+ /* Setup idle non-snoop values */
+ ltr = (ltr << NTB_LTR_NS_SHIFT) | ltr;
+ iowrite32(ltr, ndev->self_mmio + GEN4_LTR_IDLE_OFFSET);
+
+ /* setup PCIe LTR to active */
+ iowrite8(NTB_LTR_SWSEL_ACTIVE, ndev->self_mmio + GEN4_LTR_SWSEL_OFFSET);
+ }
+
ntb_ctl = NTB_CTL_E2I_BAR23_SNOOP | NTB_CTL_I2E_BAR23_SNOOP;
ntb_ctl |= NTB_CTL_E2I_BAR45_SNOOP | NTB_CTL_I2E_BAR45_SNOOP;
iowrite32(ntb_ctl, ndev->self_mmio + ndev->reg->ntb_ctl);
@@ -476,6 +497,10 @@ static int intel_ntb4_link_disable(struct ntb_dev *ntb)
lnkctl |= GEN4_LINK_CTRL_LINK_DISABLE;
iowrite16(lnkctl, ndev->self_mmio + GEN4_LINK_CTRL_OFFSET);
+ /* set LTR to idle */
+ if (!(ndev->hwerr_flags & NTB_HWERR_LTR_BAD))
+ iowrite8(NTB_LTR_SWSEL_IDLE, ndev->self_mmio + GEN4_LTR_SWSEL_OFFSET);
+
ndev->dev_up = 0;
return 0;
diff --git a/drivers/ntb/hw/intel/ntb_hw_gen4.h b/drivers/ntb/hw/intel/ntb_hw_gen4.h
index a868c788de02..3fcd3fdce9ed 100644
--- a/drivers/ntb/hw/intel/ntb_hw_gen4.h
+++ b/drivers/ntb/hw/intel/ntb_hw_gen4.h
@@ -35,6 +35,9 @@
#define GEN4_IM_SPAD_SEM_OFFSET 0x00c0 /* SPAD hw semaphore */
#define GEN4_IM_SPAD_STICKY_OFFSET 0x00c4 /* sticky SPAD */
#define GEN4_IM_DOORBELL_OFFSET 0x0100 /* 0-31 doorbells */
+#define GEN4_LTR_SWSEL_OFFSET 0x30ec
+#define GEN4_LTR_ACTIVE_OFFSET 0x30f0
+#define GEN4_LTR_IDLE_OFFSET 0x30f4
#define GEN4_EM_SPAD_OFFSET 0x8080
/* note, link status is now in MMIO and not config space for NTB */
#define GEN4_LINK_CTRL_OFFSET 0xb050
@@ -80,6 +83,18 @@
#define NTB_SJC_FORCEDETECT 0x000004
+#define NTB_LTR_SWSEL_ACTIVE 0x0
+#define NTB_LTR_SWSEL_IDLE 0x1
+
+#define NTB_LTR_NS_SHIFT 16
+#define NTB_LTR_ACTIVE_VAL 0x0000 /* 0 us */
+#define NTB_LTR_ACTIVE_LATSCALE 0x0800 /* 1us scale */
+#define NTB_LTR_ACTIVE_REQMNT 0x8000 /* snoop req enable */
+
+#define NTB_LTR_IDLE_VAL 0x0258 /* 600 us */
+#define NTB_LTR_IDLE_LATSCALE 0x0800 /* 1us scale */
+#define NTB_LTR_IDLE_REQMNT 0x8000 /* snoop req enable */
+
ssize_t ndev_ntb4_debugfs_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *offp);
int gen4_init_dev(struct intel_ntb_dev *ndev);
diff --git a/drivers/ntb/msi.c b/drivers/ntb/msi.c
index 0a5e884a920c..3f05cfbc73af 100644
--- a/drivers/ntb/msi.c
+++ b/drivers/ntb/msi.c
@@ -282,15 +282,13 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
struct ntb_msi_desc *msi_desc)
{
struct msi_desc *entry;
- struct irq_desc *desc;
int ret;
if (!ntb->msi)
return -EINVAL;
for_each_pci_msi_entry(entry, ntb->pdev) {
- desc = irq_to_desc(entry->irq);
- if (desc->action)
+ if (irq_has_action(entry->irq))
continue;
ret = devm_request_threaded_irq(&ntb->dev, entry->irq, handler,
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index 2e258bee7db2..aa53e0b769bd 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -7,7 +7,6 @@
#ifndef _LINUX_BTT_H
#define _LINUX_BTT_H
-#include <linux/badblocks.h>
#include <linux/types.h>
#define BTT_SIG_LEN 16
@@ -197,6 +196,8 @@ struct arena_info {
int log_index[2];
};
+struct badblocks;
+
/**
* struct btt - handle for a BTT instance
* @btt_disk: Pointer to the gendisk for BTT device
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 5a7c80053c62..030dbde6b088 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -4,6 +4,7 @@
*/
#include <linux/device.h>
#include <linux/sizes.h>
+#include <linux/badblocks.h>
#include "nd-core.h"
#include "pmem.h"
#include "pfn.h"
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index c21ba0602029..7de592d7eff4 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -3,7 +3,6 @@
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
#include <linux/libnvdimm.h>
-#include <linux/badblocks.h>
#include <linux/suspend.h>
#include <linux/export.h>
#include <linux/module.h>
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 47a4828b8b31..9251441fd8a3 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -980,6 +980,15 @@ static int __blk_label_update(struct nd_region *nd_region,
}
}
+ /* release slots associated with any invalidated UUIDs */
+ mutex_lock(&nd_mapping->lock);
+ list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list)
+ if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)) {
+ reap_victim(nd_mapping, label_ent);
+ list_move(&label_ent->list, &list);
+ }
+ mutex_unlock(&nd_mapping->lock);
+
/*
* Find the resource associated with the first label in the set
* per the v1.2 namespace specification.
@@ -999,8 +1008,10 @@ static int __blk_label_update(struct nd_region *nd_region,
if (is_old_resource(res, old_res_list, old_num_resources))
continue; /* carry-over */
slot = nd_label_alloc_slot(ndd);
- if (slot == UINT_MAX)
+ if (slot == UINT_MAX) {
+ rc = -ENXIO;
goto abort;
+ }
dev_dbg(ndd->dev, "allocated: %d\n", slot);
nd_label = to_label(ndd, slot);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index ce1b61519441..f320273fc672 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -179,7 +179,7 @@ int nvme_reset_ctrl(struct nvme_ctrl *ctrl)
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
+static int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
{
int ret;
@@ -192,7 +192,6 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
return ret;
}
-EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
@@ -331,7 +330,7 @@ static inline void nvme_end_req(struct request *req)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
- nvme_trace_bio_complete(req, status);
+ nvme_trace_bio_complete(req);
blk_mq_end_request(req, status);
}
@@ -578,7 +577,7 @@ struct request *nvme_alloc_request(struct request_queue *q,
}
EXPORT_SYMBOL_GPL(nvme_alloc_request);
-struct request *nvme_alloc_request_qid(struct request_queue *q,
+static struct request *nvme_alloc_request_qid(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid)
{
struct request *req;
@@ -589,7 +588,6 @@ struct request *nvme_alloc_request_qid(struct request_queue *q,
nvme_init_request(req, cmd);
return req;
}
-EXPORT_SYMBOL_GPL(nvme_alloc_request_qid);
static int nvme_toggle_streams(struct nvme_ctrl *ctrl, bool enable)
{
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 38373a0e86ef..5f36cfa8136c 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -166,6 +166,7 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set admin_tag_set;
struct blk_mq_tag_set tag_set;
+ struct work_struct ioerr_work;
struct delayed_work connect_work;
struct kref ref;
@@ -1889,6 +1890,15 @@ __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
}
static void
+nvme_fc_ctrl_ioerr_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, ioerr_work);
+
+ nvme_fc_error_recovery(ctrl, "transport detected io error");
+}
+
+static void
nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
{
struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
@@ -2046,7 +2056,7 @@ done:
check_error:
if (terminate_assoc)
- nvme_fc_error_recovery(ctrl, "transport detected io error");
+ queue_work(nvme_reset_wq, &ctrl->ioerr_work);
}
static int
@@ -3233,6 +3243,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@@ -3449,6 +3460,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+ INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@@ -3540,6 +3552,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
+ cancel_work_sync(&ctrl->ioerr_work);
cancel_work_sync(&ctrl->ctrl.reset_work);
cancel_delayed_work_sync(&ctrl->connect_work);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 7e49f61f81df..88a6b97247f5 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -610,8 +610,6 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
#define NVME_QID_ANY -1
struct request *nvme_alloc_request(struct request_queue *q,
struct nvme_command *cmd, blk_mq_req_flags_t flags);
-struct request *nvme_alloc_request_qid(struct request_queue *q,
- struct nvme_command *cmd, blk_mq_req_flags_t flags, int qid);
void nvme_cleanup_cmd(struct request *req);
blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmd);
@@ -630,7 +628,6 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned int fid,
int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
-int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
@@ -675,8 +672,7 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
kblockd_schedule_work(&head->requeue_work);
}
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
{
struct nvme_ns *ns = req->q->queuedata;
@@ -731,8 +727,7 @@ static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
{
}
-static inline void nvme_trace_bio_complete(struct request *req,
- blk_status_t status)
+static inline void nvme_trace_bio_complete(struct request *req)
{
}
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index b4385cb0ff60..50d9a20568a2 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -967,6 +967,7 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
{
struct nvme_completion *cqe = &nvmeq->cqes[idx];
+ __u16 command_id = READ_ONCE(cqe->command_id);
struct request *req;
/*
@@ -975,17 +976,17 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
* aborts. We don't even bother to allocate a struct request
* for them but rather special case them here.
*/
- if (unlikely(nvme_is_aen_req(nvmeq->qid, cqe->command_id))) {
+ if (unlikely(nvme_is_aen_req(nvmeq->qid, command_id))) {
nvme_complete_async_event(&nvmeq->dev->ctrl,
cqe->status, &cqe->result);
return;
}
- req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id);
+ req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), command_id);
if (unlikely(!req)) {
dev_warn(nvmeq->dev->ctrl.device,
"invalid id %d completed on queue %d\n",
- cqe->command_id, le16_to_cpu(cqe->sq_id));
+ command_id, le16_to_cpu(cqe->sq_id));
return;
}
@@ -3196,7 +3197,8 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
- .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
+ NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
.driver_data = NVME_QUIRK_LIGHTNVM, },
{ PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 1ba659927442..979ee31b8dd1 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -262,6 +262,16 @@ static inline void nvme_tcp_advance_req(struct nvme_tcp_request *req,
}
}
+static inline void nvme_tcp_send_all(struct nvme_tcp_queue *queue)
+{
+ int ret;
+
+ /* drain the send queue as much as we can... */
+ do {
+ ret = nvme_tcp_try_send(queue);
+ } while (ret > 0);
+}
+
static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
bool sync, bool last)
{
@@ -279,7 +289,7 @@ static inline void nvme_tcp_queue_request(struct nvme_tcp_request *req,
if (queue->io_cpu == smp_processor_id() &&
sync && empty && mutex_trylock(&queue->send_mutex)) {
queue->more_requests = !last;
- nvme_tcp_try_send(queue);
+ nvme_tcp_send_all(queue);
queue->more_requests = false;
mutex_unlock(&queue->send_mutex);
} else if (last) {
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index 733d9363900e..68213f0a052b 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1501,7 +1501,8 @@ static ssize_t
fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int opcode, starting, amount;
+ unsigned int opcode;
+ int starting, amount;
if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
return -EBADRQC;
@@ -1588,8 +1589,8 @@ out_destroy_class:
static void __exit fcloop_exit(void)
{
- struct fcloop_lport *lport;
- struct fcloop_nport *nport;
+ struct fcloop_lport *lport = NULL;
+ struct fcloop_nport *nport = NULL;
struct fcloop_tport *tport;
struct fcloop_rport *rport;
unsigned long flags;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 5c1e7cb7fe0d..bdfc22eb2a10 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -1641,6 +1641,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
spin_lock_irqsave(&queue->state_lock, flags);
switch (queue->state) {
case NVMET_RDMA_Q_CONNECTING:
+ while (!list_empty(&queue->rsp_wait_list)) {
+ struct nvmet_rdma_rsp *rsp;
+
+ rsp = list_first_entry(&queue->rsp_wait_list,
+ struct nvmet_rdma_rsp,
+ wait_list);
+ list_del(&rsp->wait_list);
+ nvmet_rdma_put_rsp(rsp);
+ }
+ fallthrough;
case NVMET_RDMA_Q_LIVE:
queue->state = NVMET_RDMA_Q_DISCONNECTING;
disconnect = true;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 4268eb359915..8c905aabacc0 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -1092,7 +1092,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
if (IS_ERR(opp_table->clk)) {
ret = PTR_ERR(opp_table->clk);
if (ret == -EPROBE_DEFER)
- goto err;
+ goto remove_opp_dev;
dev_dbg(dev, "%s: Couldn't find clock: %d\n", __func__, ret);
}
@@ -1101,7 +1101,7 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
ret = dev_pm_opp_of_find_icc_paths(dev, opp_table);
if (ret) {
if (ret == -EPROBE_DEFER)
- goto err;
+ goto put_clk;
dev_warn(dev, "%s: Error finding interconnect paths: %d\n",
__func__, ret);
@@ -1113,6 +1113,11 @@ static struct opp_table *_allocate_opp_table(struct device *dev, int index)
return opp_table;
+put_clk:
+ if (!IS_ERR(opp_table->clk))
+ clk_put(opp_table->clk);
+remove_opp_dev:
+ _remove_opp_dev(opp_dev, opp_table);
err:
kfree(opp_table);
return ERR_PTR(ret);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 516b151e0ef3..8a84c005f32b 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -397,12 +397,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
pp);
ret = dma_set_mask(pci->dev, DMA_BIT_MASK(32));
- if (!ret) {
- dev_warn(pci->dev,
- "Failed to set DMA mask to 32-bit. "
- "Devices with only 32-bit MSI support"
- " may not work properly\n");
- }
+ if (ret)
+ dev_warn(pci->dev, "Failed to set DMA mask to 32-bit. Devices with only 32-bit MSI support may not work properly\n");
pp->msi_data = dma_map_single_attrs(pci->dev, &pp->msi_msg,
sizeof(pp->msi_msg),
diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
index 5597b2a49598..6fa216e52d14 100644
--- a/drivers/pci/controller/dwc/pcie-tegra194.c
+++ b/drivers/pci/controller/dwc/pcie-tegra194.c
@@ -853,12 +853,14 @@ static void config_gen3_gen4_eq_presets(struct tegra_pcie_dw *pcie)
dw_pcie_writel_dbi(pci, GEN3_RELATED_OFF, val);
}
-static void tegra_pcie_prepare_host(struct pcie_port *pp)
+static int tegra_pcie_dw_host_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
u32 val;
+ pp->bridge->ops = &tegra_pci_ops;
+
if (!pcie->pcie_cap_base)
pcie->pcie_cap_base = dw_pcie_find_capability(&pcie->pci,
PCI_CAP_ID_EXP);
@@ -907,10 +909,24 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
dw_pcie_writel_dbi(pci, CFG_TIMER_CTRL_MAX_FUNC_NUM_OFF, val);
}
- dw_pcie_setup_rc(pp);
-
clk_set_rate(pcie->core_clk, GEN4_CORE_CLK_FREQ);
+ return 0;
+}
+
+static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
+{
+ u32 val, offset, speed, tmp;
+ struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
+ struct pcie_port *pp = &pci->pp;
+ bool retry = true;
+
+ if (pcie->mode == DW_PCIE_EP_TYPE) {
+ enable_irq(pcie->pex_rst_irq);
+ return 0;
+ }
+
+retry_link:
/* Assert RST */
val = appl_readl(pcie, APPL_PINMUX);
val &= ~APPL_PINMUX_PEX_RST;
@@ -929,19 +945,10 @@ static void tegra_pcie_prepare_host(struct pcie_port *pp)
appl_writel(pcie, val, APPL_PINMUX);
msleep(100);
-}
-
-static int tegra_pcie_dw_host_init(struct pcie_port *pp)
-{
- struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
- u32 val, tmp, offset, speed;
-
- pp->bridge->ops = &tegra_pci_ops;
-
- tegra_pcie_prepare_host(pp);
if (dw_pcie_wait_for_link(pci)) {
+ if (!retry)
+ return 0;
/*
* There are some endpoints which can't get the link up if
* root port has Data Link Feature (DLF) enabled.
@@ -975,10 +982,11 @@ static int tegra_pcie_dw_host_init(struct pcie_port *pp)
val &= ~PCI_DLF_EXCHANGE_ENABLE;
dw_pcie_writel_dbi(pci, offset, val);
- tegra_pcie_prepare_host(pp);
+ tegra_pcie_dw_host_init(pp);
+ dw_pcie_setup_rc(pp);
- if (dw_pcie_wait_for_link(pci))
- return 0;
+ retry = false;
+ goto retry_link;
}
speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
@@ -998,15 +1006,6 @@ static int tegra_pcie_dw_link_up(struct dw_pcie *pci)
return !!(val & PCI_EXP_LNKSTA_DLLLA);
}
-static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
-{
- struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
-
- enable_irq(pcie->pex_rst_irq);
-
- return 0;
-}
-
static void tegra_pcie_dw_stop_link(struct dw_pcie *pci)
{
struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
@@ -2215,6 +2214,10 @@ static int tegra_pcie_dw_resume_noirq(struct device *dev)
goto fail_host_init;
}
+ ret = tegra_pcie_dw_start_link(&pcie->pci);
+ if (ret < 0)
+ goto fail_host_init;
+
/* Restore MSI interrupt vector */
dw_pcie_writel_dbi(&pcie->pci, PORT_LOGIC_MSI_CTRL_INT_0_EN,
pcie->msi_ctrl_int);
diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
index a2632d02ce8f..c637de3a389b 100644
--- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
+++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c
@@ -306,13 +306,11 @@ int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
static void mobiveil_mask_intx_irq(struct irq_data *data)
{
- struct irq_desc *desc = irq_to_desc(data->irq);
- struct mobiveil_pcie *pcie;
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
struct mobiveil_root_port *rp;
unsigned long flags;
u32 mask, shifted_val;
- pcie = irq_desc_get_chip_data(desc);
rp = &pcie->rp;
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
@@ -324,13 +322,11 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
static void mobiveil_unmask_intx_irq(struct irq_data *data)
{
- struct irq_desc *desc = irq_to_desc(data->irq);
- struct mobiveil_pcie *pcie;
+ struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
struct mobiveil_root_port *rp;
unsigned long flags;
u32 shifted_val, mask;
- pcie = irq_desc_get_chip_data(desc);
rp = &pcie->rp;
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 7f29c2fdcd51..07e36661bbc2 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -374,13 +374,11 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
static void nwl_mask_leg_irq(struct irq_data *data)
{
- struct irq_desc *desc = irq_to_desc(data->irq);
- struct nwl_pcie *pcie;
+ struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 mask;
u32 val;
- pcie = irq_desc_get_chip_data(desc);
mask = 1 << (data->hwirq - 1);
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
@@ -390,13 +388,11 @@ static void nwl_mask_leg_irq(struct irq_data *data)
static void nwl_unmask_leg_irq(struct irq_data *data)
{
- struct irq_desc *desc = irq_to_desc(data->irq);
- struct nwl_pcie *pcie;
+ struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
unsigned long flags;
u32 mask;
u32 val;
- pcie = irq_desc_get_chip_data(desc);
mask = 1 << (data->hwirq - 1);
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 82d10b6661c7..d13b8d1a780a 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -244,10 +244,6 @@ config PCMCIA_VRC4171
tristate "NEC VRC4171 Card Controllers support"
depends on CPU_VR41XX && ISA && PCMCIA
-config PCMCIA_VRC4173
- tristate "NEC VRC4173 CARDU support"
- depends on CPU_VR41XX && PCI && PCMCIA
-
config OMAP_CF
tristate "OMAP CompactFlash Controller"
depends on PCMCIA && ARCH_OMAP16XX
@@ -258,6 +254,7 @@ config OMAP_CF
config AT91_CF
tristate "AT91 CompactFlash Controller"
depends on PCI
+ depends on OF
depends on PCMCIA && ARCH_AT91
help
Say Y here to support the CompactFlash controller on AT91 chips.
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 01779c5c45f3..d82c07c4806b 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -30,7 +30,6 @@ obj-$(CONFIG_PCMCIA_SA1100) += sa1100_cs.o
obj-$(CONFIG_PCMCIA_SA1111) += sa1111_cs.o
obj-$(CONFIG_PCMCIA_BCM63XX) += bcm63xx_pcmcia.o
obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o
-obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o
obj-$(CONFIG_OMAP_CF) += omap_cf.o
obj-$(CONFIG_AT91_CF) += at91_cf.o
obj-$(CONFIG_ELECTRA_CF) += electra_cf.o
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 7db0e9c74dfc..6b1edfc890a3 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -13,7 +13,6 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/gpio.h>
-#include <linux/platform_data/atmel.h>
#include <linux/io.h>
#include <linux/sizes.h>
#include <linux/mfd/syscon.h>
@@ -35,6 +34,17 @@
#define CF_IO_PHYS (1 << 23)
#define CF_MEM_PHYS (0x017ff800)
+struct at91_cf_data {
+ int irq_pin; /* I/O IRQ */
+ int det_pin; /* Card detect */
+ int vcc_pin; /* power switching */
+ int rst_pin; /* card reset */
+ u8 chipselect; /* EBI Chip Select number */
+ u8 flags;
+#define AT91_CF_TRUE_IDE 0x01
+#define AT91_IDE_SWAP_A0_A2 0x02
+};
+
struct regmap *mc;
/*--------------------------------------------------------------------------*/
@@ -209,16 +219,18 @@ static struct pccard_operations at91_cf_ops = {
/*--------------------------------------------------------------------------*/
-#if defined(CONFIG_OF)
static const struct of_device_id at91_cf_dt_ids[] = {
{ .compatible = "atmel,at91rm9200-cf" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, at91_cf_dt_ids);
-static int at91_cf_dt_init(struct platform_device *pdev)
+static int at91_cf_probe(struct platform_device *pdev)
{
- struct at91_cf_data *board;
+ struct at91_cf_socket *cf;
+ struct at91_cf_data *board;
+ struct resource *io;
+ int status;
board = devm_kzalloc(&pdev->dev, sizeof(*board), GFP_KERNEL);
if (!board)
@@ -229,33 +241,9 @@ static int at91_cf_dt_init(struct platform_device *pdev)
board->vcc_pin = of_get_gpio(pdev->dev.of_node, 2);
board->rst_pin = of_get_gpio(pdev->dev.of_node, 3);
- pdev->dev.platform_data = board;
-
mc = syscon_regmap_lookup_by_compatible("atmel,at91rm9200-sdramc");
-
- return PTR_ERR_OR_ZERO(mc);
-}
-#else
-static int at91_cf_dt_init(struct platform_device *pdev)
-{
- return -ENODEV;
-}
-#endif
-
-static int at91_cf_probe(struct platform_device *pdev)
-{
- struct at91_cf_socket *cf;
- struct at91_cf_data *board = pdev->dev.platform_data;
- struct resource *io;
- int status;
-
- if (!board) {
- status = at91_cf_dt_init(pdev);
- if (status)
- return status;
-
- board = pdev->dev.platform_data;
- }
+ if (IS_ERR(mc))
+ return PTR_ERR(mc);
if (!gpio_is_valid(board->det_pin) || !gpio_is_valid(board->rst_pin))
return -ENODEV;
@@ -399,7 +387,7 @@ static int at91_cf_resume(struct platform_device *pdev)
static struct platform_driver at91_cf_driver = {
.driver = {
.name = "at91_cf",
- .of_match_table = of_match_ptr(at91_cf_dt_ids),
+ .of_match_table = at91_cf_dt_ids,
},
.probe = at91_cf_probe,
.remove = at91_cf_remove,
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index a7c7c7cd2326..a6fbc709913e 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -452,7 +452,7 @@ static int db1x_pcmcia_socket_probe(struct platform_device *pdev)
printk(KERN_INFO "db1xxx-ss: unknown board %d!\n", bid);
ret = -ENODEV;
goto out0;
- };
+ }
/*
* gather resources necessary and optional nice-to-haves to
diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c
index 35158cfd9c1a..40a5cffe24a4 100644
--- a/drivers/pcmcia/electra_cf.c
+++ b/drivers/pcmcia/electra_cf.c
@@ -229,6 +229,8 @@ static int electra_cf_probe(struct platform_device *ofdev)
cf->socket.pci_irq = cf->irq;
+ status = -EINVAL;
+
prop = of_get_property(np, "card-detect-gpio", NULL);
if (!prop)
goto fail1;
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index d3ef5534991e..f0b2c2d03469 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -252,11 +252,15 @@ static int __init omap_cf_probe(struct platform_device *pdev)
/* pcmcia layer only remaps "real" memory */
cf->socket.io_offset = (unsigned long)
ioremap(cf->phys_cf + SZ_4K, SZ_2K);
- if (!cf->socket.io_offset)
+ if (!cf->socket.io_offset) {
+ status = -ENOMEM;
goto fail1;
+ }
- if (!request_mem_region(cf->phys_cf, SZ_8K, driver_name))
+ if (!request_mem_region(cf->phys_cf, SZ_8K, driver_name)) {
+ status = -ENXIO;
goto fail1;
+ }
/* NOTE: CF conflicts with MMC1 */
omap_cfg_reg(W11_1610_CF_CD1);
diff --git a/drivers/pcmcia/vrc4173_cardu.c b/drivers/pcmcia/vrc4173_cardu.c
deleted file mode 100644
index 9fb0c3addfd4..000000000000
--- a/drivers/pcmcia/vrc4173_cardu.c
+++ /dev/null
@@ -1,591 +0,0 @@
-/*
- * FILE NAME
- * drivers/pcmcia/vrc4173_cardu.c
- *
- * BRIEF MODULE DESCRIPTION
- * NEC VRC4173 CARDU driver for Socket Services
- * (This device doesn't support CardBus. it is supporting only 16bit PC Card.)
- *
- * Copyright 2002,2003 Yoichi Yuasa <yuasa@linux-mips.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-
-#include <asm/io.h>
-
-#include <pcmcia/ss.h>
-
-#include "vrc4173_cardu.h"
-
-MODULE_DESCRIPTION("NEC VRC4173 CARDU driver for Socket Services");
-MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
-MODULE_LICENSE("GPL");
-
-static int vrc4173_cardu_slots;
-
-static vrc4173_socket_t cardu_sockets[CARDU_MAX_SOCKETS];
-
-extern struct socket_info_t *pcmcia_register_socket (int slot,
- struct pccard_operations *vtable,
- int use_bus_pm);
-extern void pcmcia_unregister_socket(struct socket_info_t *s);
-
-static inline uint8_t exca_readb(vrc4173_socket_t *socket, uint16_t offset)
-{
- return readb(socket->base + EXCA_REGS_BASE + offset);
-}
-
-static inline uint16_t exca_readw(vrc4173_socket_t *socket, uint16_t offset)
-{
- uint16_t val;
-
- val = readb(socket->base + EXCA_REGS_BASE + offset);
- val |= (u16)readb(socket->base + EXCA_REGS_BASE + offset + 1) << 8;
-
- return val;
-}
-
-static inline void exca_writeb(vrc4173_socket_t *socket, uint16_t offset, uint8_t val)
-{
- writeb(val, socket->base + EXCA_REGS_BASE + offset);
-}
-
-static inline void exca_writew(vrc4173_socket_t *socket, uint8_t offset, uint16_t val)
-{
- writeb((u8)val, socket->base + EXCA_REGS_BASE + offset);
- writeb((u8)(val >> 8), socket->base + EXCA_REGS_BASE + offset + 1);
-}
-
-static inline uint32_t cardbus_socket_readl(vrc4173_socket_t *socket, u16 offset)
-{
- return readl(socket->base + CARDBUS_SOCKET_REGS_BASE + offset);
-}
-
-static inline void cardbus_socket_writel(vrc4173_socket_t *socket, u16 offset, uint32_t val)
-{
- writel(val, socket->base + CARDBUS_SOCKET_REGS_BASE + offset);
-}
-
-static void cardu_pciregs_init(struct pci_dev *dev)
-{
- u32 syscnt;
- u16 brgcnt;
- u8 devcnt;
-
- pci_write_config_dword(dev, 0x1c, 0x10000000);
- pci_write_config_dword(dev, 0x20, 0x17fff000);
- pci_write_config_dword(dev, 0x2c, 0);
- pci_write_config_dword(dev, 0x30, 0xfffc);
-
- pci_read_config_word(dev, BRGCNT, &brgcnt);
- brgcnt &= ~IREQ_INT;
- pci_write_config_word(dev, BRGCNT, brgcnt);
-
- pci_read_config_dword(dev, SYSCNT, &syscnt);
- syscnt &= ~(BAD_VCC_REQ_DISB|PCPCI_EN|CH_ASSIGN_MASK|SUB_ID_WR_EN|PCI_CLK_RIN);
- syscnt |= (CH_ASSIGN_NODMA|ASYN_INT_MODE);
- pci_write_config_dword(dev, SYSCNT, syscnt);
-
- pci_read_config_byte(dev, DEVCNT, &devcnt);
- devcnt &= ~(ZOOM_VIDEO_EN|SR_PCI_INT_SEL_MASK|PCI_INT_MODE|IRQ_MODE);
- devcnt |= (SR_PCI_INT_SEL_NONE|IFG);
- pci_write_config_byte(dev, DEVCNT, devcnt);
-
- pci_write_config_byte(dev, CHIPCNT, S_PREF_DISB);
-
- pci_write_config_byte(dev, SERRDIS, 0);
-}
-
-static int cardu_init(unsigned int slot)
-{
- vrc4173_socket_t *socket = &cardu_sockets[slot];
-
- cardu_pciregs_init(socket->dev);
-
- /* CARD_SC bits are cleared by reading CARD_SC. */
- exca_writeb(socket, GLO_CNT, 0);
-
- socket->cap.features |= SS_CAP_PCCARD | SS_CAP_PAGE_REGS;
- socket->cap.irq_mask = 0;
- socket->cap.map_size = 0x1000;
- socket->cap.pci_irq = socket->dev->irq;
- socket->events = 0;
- spin_lock_init(socket->event_lock);
-
- /* Enable PC Card status interrupts */
- exca_writeb(socket, CARD_SCI, CARD_DT_EN|RDY_EN|BAT_WAR_EN|BAT_DEAD_EN);
-
- return 0;
-}
-
-static int cardu_register_callback(unsigned int sock,
- void (*handler)(void *, unsigned int),
- void * info)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
-
- socket->handler = handler;
- socket->info = info;
-
- return 0;
-}
-
-static int cardu_inquire_socket(unsigned int sock, socket_cap_t *cap)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
-
- *cap = socket->cap;
-
- return 0;
-}
-
-static int cardu_get_status(unsigned int sock, u_int *value)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
- uint32_t state;
- uint8_t status;
- u_int val = 0;
-
- status = exca_readb(socket, IF_STATUS);
- if (status & CARD_PWR) val |= SS_POWERON;
- if (status & READY) val |= SS_READY;
- if (status & CARD_WP) val |= SS_WRPROT;
- if ((status & (CARD_DETECT1|CARD_DETECT2)) == (CARD_DETECT1|CARD_DETECT2))
- val |= SS_DETECT;
- if (exca_readb(socket, INT_GEN_CNT) & CARD_TYPE_IO) {
- if (status & STSCHG) val |= SS_STSCHG;
- } else {
- status &= BV_DETECT_MASK;
- if (status != BV_DETECT_GOOD) {
- if (status == BV_DETECT_WARN) val |= SS_BATWARN;
- else val |= SS_BATDEAD;
- }
- }
-
- state = cardbus_socket_readl(socket, SKT_PRE_STATE);
- if (state & VOL_3V_CARD_DT) val |= SS_3VCARD;
- if (state & VOL_XV_CARD_DT) val |= SS_XVCARD;
- if (state & CB_CARD_DT) val |= SS_CARDBUS;
- if (!(state &
- (VOL_YV_CARD_DT|VOL_XV_CARD_DT|VOL_3V_CARD_DT|VOL_5V_CARD_DT|CCD20|CCD10)))
- val |= SS_PENDING;
-
- *value = val;
-
- return 0;
-}
-
-static inline uint8_t set_Vcc_value(u_char Vcc)
-{
- switch (Vcc) {
- case 33:
- return VCC_3V;
- case 50:
- return VCC_5V;
- }
-
- return VCC_0V;
-}
-
-static inline uint8_t set_Vpp_value(u_char Vpp)
-{
- switch (Vpp) {
- case 33:
- case 50:
- return VPP_VCC;
- case 120:
- return VPP_12V;
- }
-
- return VPP_0V;
-}
-
-static int cardu_set_socket(unsigned int sock, socket_state_t *state)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
- uint8_t val;
-
- if (((state->Vpp == 33) || (state->Vpp == 50)) && (state->Vpp != state->Vcc))
- return -EINVAL;
-
- val = set_Vcc_value(state->Vcc);
- val |= set_Vpp_value(state->Vpp);
- if (state->flags & SS_OUTPUT_ENA) val |= CARD_OUT_EN;
- exca_writeb(socket, PWR_CNT, val);
-
- val = exca_readb(socket, INT_GEN_CNT) & CARD_REST0;
- if (state->flags & SS_RESET) val &= ~CARD_REST0;
- else val |= CARD_REST0;
- if (state->flags & SS_IOCARD) val |= CARD_TYPE_IO;
- exca_writeb(socket, INT_GEN_CNT, val);
-
- return 0;
-}
-
-static int cardu_get_io_map(unsigned int sock, struct pccard_io_map *io)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
- uint8_t ioctl, window;
- u_char map;
-
- map = io->map;
- if (map > 1)
- return -EINVAL;
-
- io->start = exca_readw(socket, IO_WIN_SA(map));
- io->stop = exca_readw(socket, IO_WIN_EA(map));
-
- ioctl = exca_readb(socket, IO_WIN_CNT);
- window = exca_readb(socket, ADR_WIN_EN);
- io->flags = (window & IO_WIN_EN(map)) ? MAP_ACTIVE : 0;
- if (ioctl & IO_WIN_DATA_AUTOSZ(map))
- io->flags |= MAP_AUTOSZ;
- else if (ioctl & IO_WIN_DATA_16BIT(map))
- io->flags |= MAP_16BIT;
-
- return 0;
-}
-
-static int cardu_set_io_map(unsigned int sock, struct pccard_io_map *io)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
- uint16_t ioctl;
- uint8_t window, enable;
- u_char map;
-
- map = io->map;
- if (map > 1)
- return -EINVAL;
-
- window = exca_readb(socket, ADR_WIN_EN);
- enable = IO_WIN_EN(map);
-
- if (window & enable) {
- window &= ~enable;
- exca_writeb(socket, ADR_WIN_EN, window);
- }
-
- exca_writew(socket, IO_WIN_SA(map), io->start);
- exca_writew(socket, IO_WIN_EA(map), io->stop);
-
- ioctl = exca_readb(socket, IO_WIN_CNT) & ~IO_WIN_CNT_MASK(map);
- if (io->flags & MAP_AUTOSZ) ioctl |= IO_WIN_DATA_AUTOSZ(map);
- else if (io->flags & MAP_16BIT) ioctl |= IO_WIN_DATA_16BIT(map);
- exca_writeb(socket, IO_WIN_CNT, ioctl);
-
- if (io->flags & MAP_ACTIVE)
- exca_writeb(socket, ADR_WIN_EN, window | enable);
-
- return 0;
-}
-
-static int cardu_get_mem_map(unsigned int sock, struct pccard_mem_map *mem)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
- uint32_t start, stop, offset, page;
- uint8_t window;
- u_char map;
-
- map = mem->map;
- if (map > 4)
- return -EINVAL;
-
- window = exca_readb(socket, ADR_WIN_EN);
- mem->flags = (window & MEM_WIN_EN(map)) ? MAP_ACTIVE : 0;
-
- start = exca_readw(socket, MEM_WIN_SA(map));
- mem->flags |= (start & MEM_WIN_DSIZE) ? MAP_16BIT : 0;
- start = (start & 0x0fff) << 12;
-
- stop = exca_readw(socket, MEM_WIN_EA(map));
- stop = ((stop & 0x0fff) << 12) + 0x0fff;
-
- offset = exca_readw(socket, MEM_WIN_OA(map));
- mem->flags |= (offset & MEM_WIN_WP) ? MAP_WRPROT : 0;
- mem->flags |= (offset & MEM_WIN_REGSET) ? MAP_ATTRIB : 0;
- offset = ((offset & 0x3fff) << 12) + start;
- mem->card_start = offset & 0x03ffffff;
-
- page = exca_readb(socket, MEM_WIN_SAU(map)) << 24;
- mem->sys_start = start + page;
- mem->sys_stop = start + page;
-
- return 0;
-}
-
-static int cardu_set_mem_map(unsigned int sock, struct pccard_mem_map *mem)
-{
- vrc4173_socket_t *socket = &cardu_sockets[sock];
- uint16_t value;
- uint8_t window, enable;
- u_long sys_start, sys_stop, card_start;
- u_char map;
-
- map = mem->map;
- sys_start = mem->sys_start;
- sys_stop = mem->sys_stop;
- card_start = mem->card_start;
-
- if (map > 4 || sys_start > sys_stop || ((sys_start ^ sys_stop) >> 24) ||
- (card_start >> 26))
- return -EINVAL;
-
- window = exca_readb(socket, ADR_WIN_EN);
- enable = MEM_WIN_EN(map);
- if (window & enable) {
- window &= ~enable;
- exca_writeb(socket, ADR_WIN_EN, window);
- }
-
- exca_writeb(socket, MEM_WIN_SAU(map), sys_start >> 24);
-
- value = (sys_start >> 12) & 0x0fff;
- if (mem->flags & MAP_16BIT) value |= MEM_WIN_DSIZE;
- exca_writew(socket, MEM_WIN_SA(map), value);
-
- value = (sys_stop >> 12) & 0x0fff;
- exca_writew(socket, MEM_WIN_EA(map), value);
-
- value = ((card_start - sys_start) >> 12) & 0x3fff;
- if (mem->flags & MAP_WRPROT) value |= MEM_WIN_WP;
- if (mem->flags & MAP_ATTRIB) value |= MEM_WIN_REGSET;
- exca_writew(socket, MEM_WIN_OA(map), value);
-
- if (mem->flags & MAP_ACTIVE)
- exca_writeb(socket, ADR_WIN_EN, window | enable);
-
- return 0;
-}
-
-static void cardu_proc_setup(unsigned int sock, struct proc_dir_entry *base)
-{
-}
-
-static struct pccard_operations cardu_operations = {
- .init = cardu_init,
- .register_callback = cardu_register_callback,
- .inquire_socket = cardu_inquire_socket,
- .get_status = cardu_get_status,
- .set_socket = cardu_set_socket,
- .get_io_map = cardu_get_io_map,
- .set_io_map = cardu_set_io_map,
- .get_mem_map = cardu_get_mem_map,
- .set_mem_map = cardu_set_mem_map,
- .proc_setup = cardu_proc_setup,
-};
-
-static void cardu_bh(void *data)
-{
- vrc4173_socket_t *socket = (vrc4173_socket_t *)data;
- uint16_t events;
-
- spin_lock_irq(&socket->event_lock);
- events = socket->events;
- socket->events = 0;
- spin_unlock_irq(&socket->event_lock);
-
- if (socket->handler)
- socket->handler(socket->info, events);
-}
-
-static uint16_t get_events(vrc4173_socket_t *socket)
-{
- uint16_t events = 0;
- uint8_t csc, status;
-
- status = exca_readb(socket, IF_STATUS);
- csc = exca_readb(socket, CARD_SC);
- if ((csc & CARD_DT_CHG) &&
- ((status & (CARD_DETECT1|CARD_DETECT2)) == (CARD_DETECT1|CARD_DETECT2)))
- events |= SS_DETECT;
-
- if ((csc & RDY_CHG) && (status & READY))
- events |= SS_READY;
-
- if (exca_readb(socket, INT_GEN_CNT) & CARD_TYPE_IO) {
- if ((csc & BAT_DEAD_ST_CHG) && (status & STSCHG))
- events |= SS_STSCHG;
- } else {
- if (csc & (BAT_WAR_CHG|BAT_DEAD_ST_CHG)) {
- if ((status & BV_DETECT_MASK) != BV_DETECT_GOOD) {
- if (status == BV_DETECT_WARN) events |= SS_BATWARN;
- else events |= SS_BATDEAD;
- }
- }
- }
-
- return events;
-}
-
-static void cardu_interrupt(int irq, void *dev_id)
-{
- vrc4173_socket_t *socket = (vrc4173_socket_t *)dev_id;
- uint16_t events;
-
- INIT_WORK(&socket->tq_work, cardu_bh, socket);
-
- events = get_events(socket);
- if (events) {
- spin_lock(&socket->event_lock);
- socket->events |= events;
- spin_unlock(&socket->event_lock);
- schedule_work(&socket->tq_work);
- }
-}
-
-static int vrc4173_cardu_probe(struct pci_dev *dev,
- const struct pci_device_id *ent)
-{
- vrc4173_socket_t *socket;
- unsigned long start, len, flags;
- int slot, err, ret;
-
- slot = vrc4173_cardu_slots++;
- socket = &cardu_sockets[slot];
- if (socket->noprobe != 0)
- return -EBUSY;
-
- sprintf(socket->name, "NEC VRC4173 CARDU%1d", slot+1);
-
- if ((err = pci_enable_device(dev)) < 0)
- return err;
-
- start = pci_resource_start(dev, 0);
- if (start == 0) {
- ret = -ENODEV;
- goto disable;
- }
-
- len = pci_resource_len(dev, 0);
- if (len == 0) {
- ret = -ENODEV;
- goto disable;
- }
-
- flags = pci_resource_flags(dev, 0);
- if ((flags & IORESOURCE_MEM) == 0) {
- ret = -EBUSY;
- goto disable;
- }
-
- err = pci_request_regions(dev, socket->name);
- if (err < 0) {
- ret = err;
- goto disable;
- }
-
- socket->base = ioremap(start, len);
- if (socket->base == NULL) {
- ret = -ENODEV;
- goto release;
- }
-
- socket->dev = dev;
-
- socket->pcmcia_socket = pcmcia_register_socket(slot, &cardu_operations, 1);
- if (socket->pcmcia_socket == NULL) {
- ret = -ENOMEM;
- goto unmap;
- }
-
- if (request_irq(dev->irq, cardu_interrupt, IRQF_SHARED, socket->name, socket) < 0) {
- ret = -EBUSY;
- goto unregister;
- }
-
- printk(KERN_INFO "%s at %#08lx, IRQ %d\n", socket->name, start, dev->irq);
-
- return 0;
-
-unregister:
- pcmcia_unregister_socket(socket->pcmcia_socket);
- socket->pcmcia_socket = NULL;
-unmap:
- iounmap(socket->base);
- socket->base = NULL;
-release:
- pci_release_regions(dev);
-disable:
- pci_disable_device(dev);
- return ret;
-}
-
-static int vrc4173_cardu_setup(char *options)
-{
- if (options == NULL || *options == '\0')
- return 1;
-
- if (strncmp(options, "cardu1:", 7) == 0) {
- options += 7;
- if (*options != '\0') {
- if (strncmp(options, "noprobe", 7) == 0) {
- cardu_sockets[CARDU1].noprobe = 1;
- options += 7;
- }
-
- if (*options != ',')
- return 1;
- } else
- return 1;
- }
-
- if (strncmp(options, "cardu2:", 7) == 0) {
- options += 7;
- if ((*options != '\0') && (strncmp(options, "noprobe", 7) == 0))
- cardu_sockets[CARDU2].noprobe = 1;
- }
-
- return 1;
-}
-
-__setup("vrc4173_cardu=", vrc4173_cardu_setup);
-
-static const struct pci_device_id vrc4173_cardu_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_NAPCCARD) },
- {0, }
-};
-
-static struct pci_driver vrc4173_cardu_driver = {
- .name = "NEC VRC4173 CARDU",
- .probe = vrc4173_cardu_probe,
- .id_table = vrc4173_cardu_id_table,
-};
-
-static int vrc4173_cardu_init(void)
-{
- vrc4173_cardu_slots = 0;
-
- return pci_register_driver(&vrc4173_cardu_driver);
-}
-
-static void vrc4173_cardu_exit(void)
-{
- pci_unregister_driver(&vrc4173_cardu_driver);
-}
-
-module_init(vrc4173_cardu_init);
-module_exit(vrc4173_cardu_exit);
-MODULE_DEVICE_TABLE(pci, vrc4173_cardu_id_table);
diff --git a/drivers/pcmcia/vrc4173_cardu.h b/drivers/pcmcia/vrc4173_cardu.h
deleted file mode 100644
index a7d96018ed8d..000000000000
--- a/drivers/pcmcia/vrc4173_cardu.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
- * FILE NAME
- * drivers/pcmcia/vrc4173_cardu.h
- *
- * BRIEF MODULE DESCRIPTION
- * Include file for NEC VRC4173 CARDU.
- *
- * Copyright 2002 Yoichi Yuasa <yuasa@linux-mips.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-#ifndef _VRC4173_CARDU_H
-#define _VRC4173_CARDU_H
-
-#include <linux/pci.h>
-
-#include <pcmcia/ss.h>
-
-#define CARDU_MAX_SOCKETS 2
-#define CARDU1 0
-#define CARDU2 1
-
-/*
- * PCI Configuration Registers
- */
-#define BRGCNT 0x3e
- #define POST_WR_EN 0x0400
- #define MEM1_PREF_EN 0x0200
- #define MEM0_PREF_EN 0x0100
- #define IREQ_INT 0x0080
- #define CARD_RST 0x0040
- #define MABORT_MODE 0x0020
- #define VGA_EN 0x0008
- #define ISA_EN 0x0004
- #define SERR_EN 0x0002
- #define PERR_EN 0x0001
-
-#define SYSCNT 0x80
- #define BAD_VCC_REQ_DISB 0x00200000
- #define PCPCI_EN 0x00080000
- #define CH_ASSIGN_MASK 0x00070000
- #define CH_ASSIGN_NODMA 0x00040000
- #define SUB_ID_WR_EN 0x00000008
- #define ASYN_INT_MODE 0x00000004
- #define PCI_CLK_RIN 0x00000002
-
-#define DEVCNT 0x91
- #define ZOOM_VIDEO_EN 0x40
- #define SR_PCI_INT_SEL_MASK 0x18
- #define SR_PCI_INT_SEL_NONE 0x00
- #define PCI_INT_MODE 0x04
- #define IRQ_MODE 0x02
- #define IFG 0x01
-
-#define CHIPCNT 0x9c
- #define S_PREF_DISB 0x10
-
-#define SERRDIS 0x9f
- #define SERR_DIS_MAB 0x10
- #define SERR_DIS_TAB 0x08
- #define SERR_DIS_DT_PERR 0x04
-
-/*
- * ExCA Registers
- */
-#define EXCA_REGS_BASE 0x800
-#define EXCA_REGS_SIZE 0x800
-
-#define ID_REV 0x000
- #define IF_TYPE_16BIT 0x80
-
-#define IF_STATUS 0x001
- #define CARD_PWR 0x40
- #define READY 0x20
- #define CARD_WP 0x10
- #define CARD_DETECT2 0x08
- #define CARD_DETECT1 0x04
- #define BV_DETECT_MASK 0x03
- #define BV_DETECT_GOOD 0x03 /* Memory card */
- #define BV_DETECT_WARN 0x02
- #define BV_DETECT_BAD1 0x01
- #define BV_DETECT_BAD0 0x00
- #define STSCHG 0x02 /* I/O card */
- #define SPKR 0x01
-
-#define PWR_CNT 0x002
- #define CARD_OUT_EN 0x80
- #define VCC_MASK 0x18
- #define VCC_3V 0x18
- #define VCC_5V 0x10
- #define VCC_0V 0x00
- #define VPP_MASK 0x03
- #define VPP_12V 0x02
- #define VPP_VCC 0x01
- #define VPP_0V 0x00
-
-#define INT_GEN_CNT 0x003
- #define CARD_REST0 0x40
- #define CARD_TYPE_MASK 0x20
- #define CARD_TYPE_IO 0x20
- #define CARD_TYPE_MEM 0x00
-
-#define CARD_SC 0x004
- #define CARD_DT_CHG 0x08
- #define RDY_CHG 0x04
- #define BAT_WAR_CHG 0x02
- #define BAT_DEAD_ST_CHG 0x01
-
-#define CARD_SCI 0x005
- #define CARD_DT_EN 0x08
- #define RDY_EN 0x04
- #define BAT_WAR_EN 0x02
- #define BAT_DEAD_EN 0x01
-
-#define ADR_WIN_EN 0x006
- #define IO_WIN_EN(x) (0x40 << (x))
- #define MEM_WIN_EN(x) (0x01 << (x))
-
-#define IO_WIN_CNT 0x007
- #define IO_WIN_CNT_MASK(x) (0x03 << ((x) << 2))
- #define IO_WIN_DATA_AUTOSZ(x) (0x02 << ((x) << 2))
- #define IO_WIN_DATA_16BIT(x) (0x01 << ((x) << 2))
-
-#define IO_WIN_SA(x) (0x008 + ((x) << 2))
-#define IO_WIN_EA(x) (0x00a + ((x) << 2))
-
-#define MEM_WIN_SA(x) (0x010 + ((x) << 3))
- #define MEM_WIN_DSIZE 0x8000
-
-#define MEM_WIN_EA(x) (0x012 + ((x) << 3))
-
-#define MEM_WIN_OA(x) (0x014 + ((x) << 3))
- #define MEM_WIN_WP 0x8000
- #define MEM_WIN_REGSET 0x4000
-
-#define GEN_CNT 0x016
- #define VS2_STATUS 0x80
- #define VS1_STATUS 0x40
- #define EXCA_REG_RST_EN 0x02
-
-#define GLO_CNT 0x01e
- #define FUN_INT_LEV 0x08
- #define INT_WB_CLR 0x04
- #define CSC_INT_LEV 0x02
-
-#define IO_WIN_OAL(x) (0x036 + ((x) << 1))
-#define IO_WIN_OAH(x) (0x037 + ((x) << 1))
-
-#define MEM_WIN_SAU(x) (0x040 + (x))
-
-#define IO_SETUP_TIM 0x080
-#define IO_CMD_TIM 0x081
-#define IO_HOLD_TIM 0x082
-#define MEM_SETUP_TIM(x) (0x084 + ((x) << 2))
-#define MEM_CMD_TIM(x) (0x085 + ((x) << 2))
-#define MEM_HOLD_TIM(x) (0x086 + ((x) << 2))
- #define TIM_CLOCKS(x) ((x) - 1)
-
-#define MEM_TIM_SEL1 0x08c
-#define MEM_TIM_SEL2 0x08d
- #define MEM_WIN_TIMSEL1(x) (0x03 << (((x) & 3) << 1))
-
-#define MEM_WIN_PWEN 0x091
- #define POSTWEN 0x01
-
-/*
- * CardBus Socket Registers
- */
-#define CARDBUS_SOCKET_REGS_BASE 0x000
-#define CARDBUS_SOCKET_REGS_SIZE 0x800
-
-#define SKT_EV 0x000
- #define POW_CYC_EV 0x00000008
- #define CCD2_EV 0x00000004
- #define CCD1_EV 0x00000002
- #define CSTSCHG_EV 0x00000001
-
-#define SKT_MASK 0x004
- #define POW_CYC_MASK 0x00000008
- #define CCD_MASK 0x00000006
- #define CSC_MASK 0x00000001
-
-#define SKT_PRE_STATE 0x008
-#define SKT_FORCE_EV 0x00c
- #define VOL_3V_SKT 0x20000000
- #define VOL_5V_SKT 0x10000000
- #define CVS_TEST 0x00004000
- #define VOL_YV_CARD_DT 0x00002000
- #define VOL_XV_CARD_DT 0x00001000
- #define VOL_3V_CARD_DT 0x00000800
- #define VOL_5V_CARD_DT 0x00000400
- #define BAD_VCC_REQ 0x00000200
- #define DATA_LOST 0x00000100
- #define NOT_A_CARD 0x00000080
- #define CREADY 0x00000040
- #define CB_CARD_DT 0x00000020
- #define R2_CARD_DT 0x00000010
- #define POW_UP 0x00000008
- #define CCD20 0x00000004
- #define CCD10 0x00000002
- #define CSTSCHG 0x00000001
-
-#define SKT_CNT 0x010
- #define STP_CLK_EN 0x00000080
- #define VCC_CNT_MASK 0x00000070
- #define VCC_CNT_3V 0x00000030
- #define VCC_CNT_5V 0x00000020
- #define VCC_CNT_0V 0x00000000
- #define VPP_CNT_MASK 0x00000007
- #define VPP_CNT_3V 0x00000003
- #define VPP_CNT_5V 0x00000002
- #define VPP_CNT_12V 0x00000001
- #define VPP_CNT_0V 0x00000000
-
-typedef struct vrc4173_socket {
- int noprobe;
- struct pci_dev *dev;
- void *base;
- void (*handler)(void *, unsigned int);
- void *info;
- socket_cap_t cap;
- spinlock_t event_lock;
- uint16_t events;
- struct socket_info_t *pcmcia_socket;
- struct work_struct tq_work;
- char name[20];
-} vrc4173_socket_t;
-
-#endif /* _VRC4173_CARDU_H */
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 657e35a75d84..d4ea10803fd9 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -948,8 +948,8 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
(mode < 0) ? "unknown" : modes[mode]);
} else {
int irq = chip->to_irq(chip, offset);
- struct irq_desc *desc = irq_to_desc(irq);
const int pullidx = pull ? 1 : 0;
+ bool wake;
int val;
static const char * const pulls[] = {
"none ",
@@ -969,8 +969,9 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
* This races with request_irq(), set_irq_type(),
* and set_irq_wake() ... but those are "rare".
*/
- if (irq > 0 && desc && desc->action) {
+ if (irq > 0 && irq_has_action(irq)) {
char *trigger;
+ bool wake;
if (nmk_chip->edge_rising & BIT(offset))
trigger = "edge-rising";
@@ -979,10 +980,10 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
else
trigger = "edge-undefined";
+ wake = !!(nmk_chip->real_wake & BIT(offset));
+
seq_printf(s, " irq-%d %s%s",
- irq, trigger,
- irqd_is_wakeup_set(&desc->irq_data)
- ? " wakeup" : "");
+ irq, trigger, wake ? " wakeup" : "");
}
}
clk_disable(nmk_chip->clk);
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index 0ecee8b8773d..7c92a6e22d75 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -742,12 +742,16 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
* Sensor events need to be parsed by the sensor sub-device.
* Defer them, and don't report the wakeup here.
*/
- if (event_type == EC_MKBP_EVENT_SENSOR_FIFO)
- *wake_event = false;
- /* Masked host-events should not count as wake events. */
- else if (host_event &&
- !(host_event & ec_dev->host_event_wake_mask))
+ if (event_type == EC_MKBP_EVENT_SENSOR_FIFO) {
*wake_event = false;
+ } else if (host_event) {
+ /* rtc_update_irq() already handles wakeup events. */
+ if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_RTC))
+ *wake_event = false;
+ /* Masked host-events should not count as wake events. */
+ if (!(host_event & ec_dev->host_event_wake_mask))
+ *wake_event = false;
+ }
}
return ret;
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 8111ed1fc574..c43868615790 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -7,6 +7,7 @@
*/
#include <linux/acpi.h>
+#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_data/cros_ec_commands.h>
@@ -14,6 +15,7 @@
#include <linux/platform_data/cros_usbpd_notify.h>
#include <linux/platform_device.h>
#include <linux/usb/pd.h>
+#include <linux/usb/pd_vdo.h>
#include <linux/usb/typec.h>
#include <linux/usb/typec_altmode.h>
#include <linux/usb/typec_dp.h>
@@ -30,6 +32,12 @@ enum {
CROS_EC_ALTMODE_MAX,
};
+/* Container for altmode pointer nodes. */
+struct cros_typec_altmode_node {
+ struct typec_altmode *amode;
+ struct list_head list;
+};
+
/* Per port data. */
struct cros_typec_port {
struct typec_port *port;
@@ -48,6 +56,11 @@ struct cros_typec_port {
/* Port alt modes. */
struct typec_altmode p_altmode[CROS_EC_ALTMODE_MAX];
+
+ /* Flag indicating that PD discovery data parsing is completed. */
+ bool disc_done;
+ struct ec_response_typec_discovery *sop_disc;
+ struct list_head partner_mode_list;
};
/* Platform-specific data for the Chrome OS EC Type C controller. */
@@ -60,6 +73,7 @@ struct cros_typec_data {
struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS];
struct notifier_block nb;
struct work_struct port_work;
+ bool typec_cmd_supported;
};
static int cros_typec_parse_port_props(struct typec_capability *cap,
@@ -166,11 +180,25 @@ static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num,
return ret;
}
+static void cros_typec_unregister_altmodes(struct cros_typec_data *typec, int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct cros_typec_altmode_node *node, *tmp;
+
+ list_for_each_entry_safe(node, tmp, &port->partner_mode_list, list) {
+ list_del(&node->list);
+ typec_unregister_altmode(node->amode);
+ devm_kfree(typec->dev, node);
+ }
+}
+
static void cros_typec_remove_partner(struct cros_typec_data *typec,
int port_num)
{
struct cros_typec_port *port = typec->ports[port_num];
+ cros_typec_unregister_altmodes(typec, port_num);
+
port->state.alt = NULL;
port->state.mode = TYPEC_STATE_USB;
port->state.data = NULL;
@@ -181,6 +209,8 @@ static void cros_typec_remove_partner(struct cros_typec_data *typec,
typec_unregister_partner(port->partner);
port->partner = NULL;
+ memset(&port->p_identity, 0, sizeof(port->p_identity));
+ port->disc_done = false;
}
static void cros_unregister_ports(struct cros_typec_data *typec)
@@ -190,7 +220,10 @@ static void cros_unregister_ports(struct cros_typec_data *typec)
for (i = 0; i < typec->num_ports; i++) {
if (!typec->ports[i])
continue;
- cros_typec_remove_partner(typec, i);
+
+ if (typec->ports[i]->partner)
+ cros_typec_remove_partner(typec, i);
+
usb_role_switch_put(typec->ports[i]->role_sw);
typec_switch_put(typec->ports[i]->ori_sw);
typec_mux_put(typec->ports[i]->mux);
@@ -289,6 +322,14 @@ static int cros_typec_init_ports(struct cros_typec_data *typec)
port_num);
cros_typec_register_port_altmodes(typec, port_num);
+
+ cros_port->sop_disc = devm_kzalloc(dev, EC_PROTO2_MAX_RESPONSE_SIZE, GFP_KERNEL);
+ if (!cros_port->sop_disc) {
+ ret = -ENOMEM;
+ goto unregister_ports;
+ }
+
+ INIT_LIST_HEAD(&cros_port->partner_mode_list);
}
return 0;
@@ -329,74 +370,6 @@ static int cros_typec_ec_command(struct cros_typec_data *typec,
return ret;
}
-static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
- int port_num, struct ec_response_usb_pd_control *resp)
-{
- struct typec_port *port = typec->ports[port_num]->port;
- enum typec_orientation polarity;
-
- if (!resp->enabled)
- polarity = TYPEC_ORIENTATION_NONE;
- else if (!resp->polarity)
- polarity = TYPEC_ORIENTATION_NORMAL;
- else
- polarity = TYPEC_ORIENTATION_REVERSE;
-
- typec_set_pwr_role(port, resp->role ? TYPEC_SOURCE : TYPEC_SINK);
- typec_set_orientation(port, polarity);
-}
-
-static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
- int port_num, struct ec_response_usb_pd_control_v1 *resp)
-{
- struct typec_port *port = typec->ports[port_num]->port;
- enum typec_orientation polarity;
- bool pd_en;
- int ret;
-
- if (!(resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
- polarity = TYPEC_ORIENTATION_NONE;
- else if (!resp->polarity)
- polarity = TYPEC_ORIENTATION_NORMAL;
- else
- polarity = TYPEC_ORIENTATION_REVERSE;
- typec_set_orientation(port, polarity);
- typec_set_data_role(port, resp->role & PD_CTRL_RESP_ROLE_DATA ?
- TYPEC_HOST : TYPEC_DEVICE);
- typec_set_pwr_role(port, resp->role & PD_CTRL_RESP_ROLE_POWER ?
- TYPEC_SOURCE : TYPEC_SINK);
- typec_set_vconn_role(port, resp->role & PD_CTRL_RESP_ROLE_VCONN ?
- TYPEC_SOURCE : TYPEC_SINK);
-
- /* Register/remove partners when a connect/disconnect occurs. */
- if (resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED) {
- if (typec->ports[port_num]->partner)
- return;
-
- pd_en = resp->enabled & PD_CTRL_RESP_ENABLED_PD_CAPABLE;
- ret = cros_typec_add_partner(typec, port_num, pd_en);
- if (ret)
- dev_warn(typec->dev,
- "Failed to register partner on port: %d\n",
- port_num);
- } else {
- if (!typec->ports[port_num]->partner)
- return;
- cros_typec_remove_partner(typec, port_num);
- }
-}
-
-static int cros_typec_get_mux_info(struct cros_typec_data *typec, int port_num,
- struct ec_response_usb_pd_mux_info *resp)
-{
- struct ec_params_usb_pd_mux_info req = {
- .port = port_num,
- };
-
- return cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_MUX_INFO, &req,
- sizeof(req), resp, sizeof(*resp));
-}
-
static int cros_typec_usb_safe_state(struct cros_typec_port *port)
{
port->state.mode = TYPEC_STATE_SAFE;
@@ -563,15 +536,210 @@ static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num,
port->state.mode = TYPEC_STATE_USB;
ret = typec_mux_set(port->mux, &port->state);
} else {
- dev_info(typec->dev,
- "Unsupported mode requested, mux flags: %x\n",
- mux_flags);
- ret = -ENOTSUPP;
+ dev_dbg(typec->dev,
+ "Unrecognized mode requested, mux flags: %x\n",
+ mux_flags);
+ }
+
+ return ret;
+}
+
+static void cros_typec_set_port_params_v0(struct cros_typec_data *typec,
+ int port_num, struct ec_response_usb_pd_control *resp)
+{
+ struct typec_port *port = typec->ports[port_num]->port;
+ enum typec_orientation polarity;
+
+ if (!resp->enabled)
+ polarity = TYPEC_ORIENTATION_NONE;
+ else if (!resp->polarity)
+ polarity = TYPEC_ORIENTATION_NORMAL;
+ else
+ polarity = TYPEC_ORIENTATION_REVERSE;
+
+ typec_set_pwr_role(port, resp->role ? TYPEC_SOURCE : TYPEC_SINK);
+ typec_set_orientation(port, polarity);
+}
+
+static void cros_typec_set_port_params_v1(struct cros_typec_data *typec,
+ int port_num, struct ec_response_usb_pd_control_v1 *resp)
+{
+ struct typec_port *port = typec->ports[port_num]->port;
+ enum typec_orientation polarity;
+ bool pd_en;
+ int ret;
+
+ if (!(resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED))
+ polarity = TYPEC_ORIENTATION_NONE;
+ else if (!resp->polarity)
+ polarity = TYPEC_ORIENTATION_NORMAL;
+ else
+ polarity = TYPEC_ORIENTATION_REVERSE;
+ typec_set_orientation(port, polarity);
+ typec_set_data_role(port, resp->role & PD_CTRL_RESP_ROLE_DATA ?
+ TYPEC_HOST : TYPEC_DEVICE);
+ typec_set_pwr_role(port, resp->role & PD_CTRL_RESP_ROLE_POWER ?
+ TYPEC_SOURCE : TYPEC_SINK);
+ typec_set_vconn_role(port, resp->role & PD_CTRL_RESP_ROLE_VCONN ?
+ TYPEC_SOURCE : TYPEC_SINK);
+
+ /* Register/remove partners when a connect/disconnect occurs. */
+ if (resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED) {
+ if (typec->ports[port_num]->partner)
+ return;
+
+ pd_en = resp->enabled & PD_CTRL_RESP_ENABLED_PD_CAPABLE;
+ ret = cros_typec_add_partner(typec, port_num, pd_en);
+ if (ret)
+ dev_warn(typec->dev,
+ "Failed to register partner on port: %d\n",
+ port_num);
+ } else {
+ if (!typec->ports[port_num]->partner)
+ return;
+ cros_typec_remove_partner(typec, port_num);
}
+}
+
+static int cros_typec_get_mux_info(struct cros_typec_data *typec, int port_num,
+ struct ec_response_usb_pd_mux_info *resp)
+{
+ struct ec_params_usb_pd_mux_info req = {
+ .port = port_num,
+ };
+
+ return cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_MUX_INFO, &req,
+ sizeof(req), resp, sizeof(*resp));
+}
+
+static int cros_typec_register_altmodes(struct cros_typec_data *typec, int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct ec_response_typec_discovery *sop_disc = port->sop_disc;
+ struct cros_typec_altmode_node *node;
+ struct typec_altmode_desc desc;
+ struct typec_altmode *amode;
+ int ret = 0;
+ int i, j;
+
+ for (i = 0; i < sop_disc->svid_count; i++) {
+ for (j = 0; j < sop_disc->svids[i].mode_count; j++) {
+ memset(&desc, 0, sizeof(desc));
+ desc.svid = sop_disc->svids[i].svid;
+ desc.mode = j;
+ desc.vdo = sop_disc->svids[i].mode_vdo[j];
+
+ amode = typec_partner_register_altmode(port->partner, &desc);
+ if (IS_ERR(amode)) {
+ ret = PTR_ERR(amode);
+ goto err_cleanup;
+ }
+
+ /* If no memory is available we should unregister and exit. */
+ node = devm_kzalloc(typec->dev, sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ ret = -ENOMEM;
+ typec_unregister_altmode(amode);
+ goto err_cleanup;
+ }
+
+ node->amode = amode;
+ list_add_tail(&node->list, &port->partner_mode_list);
+ }
+ }
+
+ return 0;
+err_cleanup:
+ cros_typec_unregister_altmodes(typec, port_num);
return ret;
}
+static int cros_typec_handle_sop_disc(struct cros_typec_data *typec, int port_num)
+{
+ struct cros_typec_port *port = typec->ports[port_num];
+ struct ec_response_typec_discovery *sop_disc = port->sop_disc;
+ struct ec_params_typec_discovery req = {
+ .port = port_num,
+ .partner_type = TYPEC_PARTNER_SOP,
+ };
+ int ret = 0;
+ int i;
+
+ if (!port->partner) {
+ dev_err(typec->dev,
+ "SOP Discovery received without partner registered, port: %d\n",
+ port_num);
+ ret = -EINVAL;
+ goto disc_exit;
+ }
+
+ memset(sop_disc, 0, EC_PROTO2_MAX_RESPONSE_SIZE);
+ ret = cros_typec_ec_command(typec, 0, EC_CMD_TYPEC_DISCOVERY, &req, sizeof(req),
+ sop_disc, EC_PROTO2_MAX_RESPONSE_SIZE);
+ if (ret < 0) {
+ dev_err(typec->dev, "Failed to get SOP discovery data for port: %d\n", port_num);
+ goto disc_exit;
+ }
+
+ /* First, update the PD identity VDOs for the partner. */
+ if (sop_disc->identity_count > 0)
+ port->p_identity.id_header = sop_disc->discovery_vdo[0];
+ if (sop_disc->identity_count > 1)
+ port->p_identity.cert_stat = sop_disc->discovery_vdo[1];
+ if (sop_disc->identity_count > 2)
+ port->p_identity.product = sop_disc->discovery_vdo[2];
+
+ /* Copy the remaining identity VDOs till a maximum of 6. */
+ for (i = 3; i < sop_disc->identity_count && i < VDO_MAX_OBJECTS; i++)
+ port->p_identity.vdo[i - 3] = sop_disc->discovery_vdo[i];
+
+ ret = typec_partner_set_identity(port->partner);
+ if (ret < 0) {
+ dev_err(typec->dev, "Failed to update partner PD identity, port: %d\n", port_num);
+ goto disc_exit;
+ }
+
+ ret = cros_typec_register_altmodes(typec, port_num);
+ if (ret < 0) {
+ dev_err(typec->dev, "Failed to register partner altmodes, port: %d\n", port_num);
+ goto disc_exit;
+ }
+
+disc_exit:
+ return ret;
+}
+
+static void cros_typec_handle_status(struct cros_typec_data *typec, int port_num)
+{
+ struct ec_response_typec_status resp;
+ struct ec_params_typec_status req = {
+ .port = port_num,
+ };
+ int ret;
+
+ ret = cros_typec_ec_command(typec, 0, EC_CMD_TYPEC_STATUS, &req, sizeof(req),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_warn(typec->dev, "EC_CMD_TYPEC_STATUS failed for port: %d\n", port_num);
+ return;
+ }
+
+ if (typec->ports[port_num]->disc_done)
+ return;
+
+ /* Handle any events appropriately. */
+ if (resp.events & PD_STATUS_EVENT_SOP_DISC_DONE) {
+ ret = cros_typec_handle_sop_disc(typec, port_num);
+ if (ret < 0) {
+ dev_err(typec->dev, "Couldn't parse SOP Disc data, port: %d\n", port_num);
+ return;
+ }
+
+ typec->ports[port_num]->disc_done = true;
+ }
+}
+
static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
{
struct ec_params_usb_pd_control req;
@@ -608,6 +776,9 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num)
cros_typec_set_port_params_v0(typec, port_num,
(struct ec_response_usb_pd_control *) &resp);
+ if (typec->typec_cmd_supported)
+ cros_typec_handle_status(typec, port_num);
+
/* Update the switches if they exist, according to requested state */
ret = cros_typec_get_mux_info(typec, port_num, &mux_resp);
if (ret < 0) {
@@ -656,6 +827,23 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec)
return 0;
}
+/* Check the EC feature flags to see if TYPEC_* commands are supported. */
+static int cros_typec_cmds_supported(struct cros_typec_data *typec)
+{
+ struct ec_response_get_features resp = {};
+ int ret;
+
+ ret = cros_typec_ec_command(typec, 0, EC_CMD_GET_FEATURES, NULL, 0,
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ dev_warn(typec->dev,
+ "Failed to get features, assuming typec commands unsupported.\n");
+ return 0;
+ }
+
+ return resp.flags[EC_FEATURE_TYPEC_CMD / 32] & EC_FEATURE_MASK_1(EC_FEATURE_TYPEC_CMD);
+}
+
static void cros_typec_port_work(struct work_struct *work)
{
struct cros_typec_data *typec = container_of(work, struct cros_typec_data, port_work);
@@ -715,6 +903,8 @@ static int cros_typec_probe(struct platform_device *pdev)
return ret;
}
+ typec->typec_cmd_supported = !!cros_typec_cmds_supported(typec);
+
ret = cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_PORTS, NULL, 0,
&resp, sizeof(resp));
if (ret < 0)
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index d55b3727e00e..b22c4fdb2561 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -177,6 +177,13 @@ config POWER_RESET_QNAP
Say Y if you have a QNAP NAS.
+config POWER_RESET_REGULATOR
+ bool "Regulator subsystem power-off driver"
+ depends on OF && REGULATOR
+ help
+ This driver supports turning off your board by disabling a
+ power regulator defined in the devicetree.
+
config POWER_RESET_RESTART
bool "Restart power-off driver"
help
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index c51eceba9ea3..9dc49d3a57ff 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_POWER_RESET_OCELOT_RESET) += ocelot-reset.o
obj-$(CONFIG_POWER_RESET_PIIX4_POWEROFF) += piix4-poweroff.o
obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
+obj-$(CONFIG_POWER_RESET_REGULATOR) += regulator-poweroff.o
obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
obj-$(CONFIG_POWER_RESET_ST) += st-poweroff.o
obj-$(CONFIG_POWER_RESET_VERSATILE) += arm-versatile-reboot.o
diff --git a/drivers/power/reset/ocelot-reset.c b/drivers/power/reset/ocelot-reset.c
index f74e1dbb4ba3..8caa90cb58fc 100644
--- a/drivers/power/reset/ocelot-reset.c
+++ b/drivers/power/reset/ocelot-reset.c
@@ -29,6 +29,8 @@ struct ocelot_reset_context {
struct notifier_block restart_handler;
};
+#define BIT_OFF_INVALID 32
+
#define SOFT_CHIP_RST BIT(0)
#define ICPU_CFG_CPU_SYSTEM_CTRL_GENERAL_CTRL 0x24
@@ -50,9 +52,11 @@ static int ocelot_restart_handle(struct notifier_block *this,
ctx->props->vcore_protect, 0);
/* Make the SI back to boot mode */
- regmap_update_bits(ctx->cpu_ctrl, ICPU_CFG_CPU_SYSTEM_CTRL_GENERAL_CTRL,
- IF_SI_OWNER_MASK << if_si_owner_bit,
- IF_SI_OWNER_SIBM << if_si_owner_bit);
+ if (if_si_owner_bit != BIT_OFF_INVALID)
+ regmap_update_bits(ctx->cpu_ctrl,
+ ICPU_CFG_CPU_SYSTEM_CTRL_GENERAL_CTRL,
+ IF_SI_OWNER_MASK << if_si_owner_bit,
+ IF_SI_OWNER_SIBM << if_si_owner_bit);
pr_emerg("Resetting SoC\n");
@@ -96,6 +100,20 @@ static int ocelot_reset_probe(struct platform_device *pdev)
return err;
}
+static const struct reset_props reset_props_jaguar2 = {
+ .syscon = "mscc,ocelot-cpu-syscon",
+ .protect_reg = 0x20,
+ .vcore_protect = BIT(2),
+ .if_si_owner_bit = 6,
+};
+
+static const struct reset_props reset_props_luton = {
+ .syscon = "mscc,ocelot-cpu-syscon",
+ .protect_reg = 0x20,
+ .vcore_protect = BIT(2),
+ .if_si_owner_bit = BIT_OFF_INVALID, /* n/a */
+};
+
static const struct reset_props reset_props_ocelot = {
.syscon = "mscc,ocelot-cpu-syscon",
.protect_reg = 0x20,
@@ -112,6 +130,12 @@ static const struct reset_props reset_props_sparx5 = {
static const struct of_device_id ocelot_reset_of_match[] = {
{
+ .compatible = "mscc,jaguar2-chip-reset",
+ .data = &reset_props_jaguar2
+ }, {
+ .compatible = "mscc,luton-chip-reset",
+ .data = &reset_props_luton
+ }, {
.compatible = "mscc,ocelot-chip-reset",
.data = &reset_props_ocelot
}, {
diff --git a/drivers/power/reset/qnap-poweroff.c b/drivers/power/reset/qnap-poweroff.c
index 52b7dc61d870..0ddf7f25f7b8 100644
--- a/drivers/power/reset/qnap-poweroff.c
+++ b/drivers/power/reset/qnap-poweroff.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/serial_reg.h>
-#include <linux/kallsyms.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/clk.h>
@@ -75,7 +74,6 @@ static int qnap_power_off_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct resource *res;
struct clk *clk;
- char symname[KSYM_NAME_LEN];
const struct of_device_id *match =
of_match_node(qnap_power_off_of_match_table, np);
@@ -104,10 +102,8 @@ static int qnap_power_off_probe(struct platform_device *pdev)
/* Check that nothing else has already setup a handler */
if (pm_power_off) {
- lookup_symbol_name((ulong)pm_power_off, symname);
- dev_err(&pdev->dev,
- "pm_power_off already claimed %p %s",
- pm_power_off, symname);
+ dev_err(&pdev->dev, "pm_power_off already claimed for %ps",
+ pm_power_off);
return -EBUSY;
}
pm_power_off = qnap_power_off;
diff --git a/drivers/power/reset/regulator-poweroff.c b/drivers/power/reset/regulator-poweroff.c
new file mode 100644
index 000000000000..f697088e0ad1
--- /dev/null
+++ b/drivers/power/reset/regulator-poweroff.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Force-disables a regulator to power down a device
+ *
+ * Michael Klein <michael@fossekall.de>
+ *
+ * Copyright (C) 2020 Michael Klein
+ *
+ * Based on the gpio-poweroff driver.
+ */
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+
+#define TIMEOUT_MS 3000
+
+/*
+ * Hold configuration here, cannot be more than one instance of the driver
+ * since pm_power_off itself is global.
+ */
+static struct regulator *cpu_regulator;
+
+static void regulator_poweroff_do_poweroff(void)
+{
+ if (cpu_regulator && regulator_is_enabled(cpu_regulator))
+ regulator_force_disable(cpu_regulator);
+
+ /* give it some time */
+ mdelay(TIMEOUT_MS);
+
+ WARN_ON(1);
+}
+
+static int regulator_poweroff_probe(struct platform_device *pdev)
+{
+ /* If a pm_power_off function has already been added, leave it alone */
+ if (pm_power_off != NULL) {
+ dev_err(&pdev->dev,
+ "%s: pm_power_off function already registered\n",
+ __func__);
+ return -EBUSY;
+ }
+
+ cpu_regulator = devm_regulator_get(&pdev->dev, "cpu");
+ if (IS_ERR(cpu_regulator))
+ return PTR_ERR(cpu_regulator);
+
+ pm_power_off = &regulator_poweroff_do_poweroff;
+ return 0;
+}
+
+static int regulator_poweroff_remove(__maybe_unused struct platform_device *pdev)
+{
+ if (pm_power_off == &regulator_poweroff_do_poweroff)
+ pm_power_off = NULL;
+
+ return 0;
+}
+
+static const struct of_device_id of_regulator_poweroff_match[] = {
+ { .compatible = "regulator-poweroff", },
+ {},
+};
+
+static struct platform_driver regulator_poweroff_driver = {
+ .probe = regulator_poweroff_probe,
+ .remove = regulator_poweroff_remove,
+ .driver = {
+ .name = "poweroff-regulator",
+ .of_match_table = of_regulator_poweroff_match,
+ },
+};
+
+module_platform_driver(regulator_poweroff_driver);
+
+MODULE_AUTHOR("Michael Klein <michael@fossekall.de>");
+MODULE_DESCRIPTION("Regulator poweroff driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:poweroff-regulator");
diff --git a/drivers/power/reset/syscon-poweroff.c b/drivers/power/reset/syscon-poweroff.c
index 4d6923b102b6..ed58bdf41e27 100644
--- a/drivers/power/reset/syscon-poweroff.c
+++ b/drivers/power/reset/syscon-poweroff.c
@@ -6,7 +6,6 @@
* Author: Moritz Fischer <moritz.fischer@ettus.com>
*/
-#include <linux/kallsyms.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/notifier.h>
@@ -34,7 +33,6 @@ static void syscon_poweroff(void)
static int syscon_poweroff_probe(struct platform_device *pdev)
{
- char symname[KSYM_NAME_LEN];
int mask_err, value_err;
map = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "regmap");
@@ -65,10 +63,8 @@ static int syscon_poweroff_probe(struct platform_device *pdev)
}
if (pm_power_off) {
- lookup_symbol_name((ulong)pm_power_off, symname);
- dev_err(&pdev->dev,
- "pm_power_off already claimed %p %s",
- pm_power_off, symname);
+ dev_err(&pdev->dev, "pm_power_off already claimed for %ps",
+ pm_power_off);
return -EBUSY;
}
diff --git a/drivers/power/supply/ab8500_btemp.c b/drivers/power/supply/ab8500_btemp.c
index 909f0242bacb..d20345386b1e 100644
--- a/drivers/power/supply/ab8500_btemp.c
+++ b/drivers/power/supply/ab8500_btemp.c
@@ -936,29 +936,23 @@ static struct ab8500_btemp_interrupts ab8500_btemp_irq[] = {
{"BTEMP_MEDIUM_HIGH", ab8500_btemp_medhigh_handler},
};
-#if defined(CONFIG_PM)
-static int ab8500_btemp_resume(struct platform_device *pdev)
+static int __maybe_unused ab8500_btemp_resume(struct device *dev)
{
- struct ab8500_btemp *di = platform_get_drvdata(pdev);
+ struct ab8500_btemp *di = dev_get_drvdata(dev);
ab8500_btemp_periodic(di, true);
return 0;
}
-static int ab8500_btemp_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int __maybe_unused ab8500_btemp_suspend(struct device *dev)
{
- struct ab8500_btemp *di = platform_get_drvdata(pdev);
+ struct ab8500_btemp *di = dev_get_drvdata(dev);
ab8500_btemp_periodic(di, false);
return 0;
}
-#else
-#define ab8500_btemp_suspend NULL
-#define ab8500_btemp_resume NULL
-#endif
static int ab8500_btemp_remove(struct platform_device *pdev)
{
@@ -999,48 +993,45 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct abx500_bm_data *plat = pdev->dev.platform_data;
struct power_supply_config psy_cfg = {};
+ struct device *dev = &pdev->dev;
struct ab8500_btemp *di;
int irq, i, ret = 0;
u8 val;
- di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
- if (!di) {
- dev_err(&pdev->dev, "%s no mem for ab8500_btemp\n", __func__);
+ di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
+ if (!di)
return -ENOMEM;
- }
if (!plat) {
- dev_err(&pdev->dev, "no battery management data supplied\n");
+ dev_err(dev, "no battery management data supplied\n");
return -EINVAL;
}
di->bm = plat;
if (np) {
- ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+ ret = ab8500_bm_of_probe(dev, np, di->bm);
if (ret) {
- dev_err(&pdev->dev, "failed to get battery information\n");
+ dev_err(dev, "failed to get battery information\n");
return ret;
}
}
/* get parent data */
- di->dev = &pdev->dev;
+ di->dev = dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
/* Get ADC channels */
- di->btemp_ball = devm_iio_channel_get(&pdev->dev, "btemp_ball");
+ di->btemp_ball = devm_iio_channel_get(dev, "btemp_ball");
if (IS_ERR(di->btemp_ball)) {
- if (PTR_ERR(di->btemp_ball) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get BTEMP BALL ADC channel\n");
- return PTR_ERR(di->btemp_ball);
+ ret = dev_err_probe(dev, PTR_ERR(di->btemp_ball),
+ "failed to get BTEMP BALL ADC channel\n");
+ return ret;
}
- di->bat_ctrl = devm_iio_channel_get(&pdev->dev, "bat_ctrl");
+ di->bat_ctrl = devm_iio_channel_get(dev, "bat_ctrl");
if (IS_ERR(di->bat_ctrl)) {
- if (PTR_ERR(di->bat_ctrl) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get BAT CTRL ADC channel\n");
- return PTR_ERR(di->bat_ctrl);
+ ret = dev_err_probe(dev, PTR_ERR(di->bat_ctrl),
+ "failed to get BAT CTRL ADC channel\n");
+ return ret;
}
di->initialized = false;
@@ -1053,7 +1044,7 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
di->btemp_wq =
alloc_workqueue("ab8500_btemp_wq", WQ_MEM_RECLAIM, 0);
if (di->btemp_wq == NULL) {
- dev_err(di->dev, "failed to create work queue\n");
+ dev_err(dev, "failed to create work queue\n");
return -ENOMEM;
}
@@ -1065,10 +1056,10 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
di->btemp_ranges.btemp_low_limit = BTEMP_THERMAL_LOW_LIMIT;
di->btemp_ranges.btemp_med_limit = BTEMP_THERMAL_MED_LIMIT;
- ret = abx500_get_register_interruptible(di->dev, AB8500_CHARGER,
+ ret = abx500_get_register_interruptible(dev, AB8500_CHARGER,
AB8500_BTEMP_HIGH_TH, &val);
if (ret < 0) {
- dev_err(di->dev, "%s ab8500 read failed\n", __func__);
+ dev_err(dev, "%s ab8500 read failed\n", __func__);
goto free_btemp_wq;
}
switch (val) {
@@ -1088,10 +1079,10 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
}
/* Register BTEMP power supply class */
- di->btemp_psy = power_supply_register(di->dev, &ab8500_btemp_desc,
+ di->btemp_psy = power_supply_register(dev, &ab8500_btemp_desc,
&psy_cfg);
if (IS_ERR(di->btemp_psy)) {
- dev_err(di->dev, "failed to register BTEMP psy\n");
+ dev_err(dev, "failed to register BTEMP psy\n");
ret = PTR_ERR(di->btemp_psy);
goto free_btemp_wq;
}
@@ -1105,15 +1096,15 @@ static int ab8500_btemp_probe(struct platform_device *pdev)
}
ret = request_threaded_irq(irq, NULL, ab8500_btemp_irq[i].isr,
- IRQF_SHARED | IRQF_NO_SUSPEND,
+ IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
ab8500_btemp_irq[i].name, di);
if (ret) {
- dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ dev_err(dev, "failed to request %s IRQ %d: %d\n"
, ab8500_btemp_irq[i].name, irq, ret);
goto free_irq;
}
- dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ dev_dbg(dev, "Requested %s IRQ %d: %d\n",
ab8500_btemp_irq[i].name, irq, ret);
}
@@ -1138,6 +1129,8 @@ free_btemp_wq:
return ret;
}
+static SIMPLE_DEV_PM_OPS(ab8500_btemp_pm_ops, ab8500_btemp_suspend, ab8500_btemp_resume);
+
static const struct of_device_id ab8500_btemp_match[] = {
{ .compatible = "stericsson,ab8500-btemp", },
{ },
@@ -1146,11 +1139,10 @@ static const struct of_device_id ab8500_btemp_match[] = {
static struct platform_driver ab8500_btemp_driver = {
.probe = ab8500_btemp_probe,
.remove = ab8500_btemp_remove,
- .suspend = ab8500_btemp_suspend,
- .resume = ab8500_btemp_resume,
.driver = {
.name = "ab8500-btemp",
.of_match_table = ab8500_btemp_match,
+ .pm = &ab8500_btemp_pm_ops,
},
};
diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c
index db65be026920..ac77c8882d17 100644
--- a/drivers/power/supply/ab8500_charger.c
+++ b/drivers/power/supply/ab8500_charger.c
@@ -3209,11 +3209,10 @@ static int ab8500_charger_usb_notifier_call(struct notifier_block *nb,
return NOTIFY_OK;
}
-#if defined(CONFIG_PM)
-static int ab8500_charger_resume(struct platform_device *pdev)
+static int __maybe_unused ab8500_charger_resume(struct device *dev)
{
int ret;
- struct ab8500_charger *di = platform_get_drvdata(pdev);
+ struct ab8500_charger *di = dev_get_drvdata(dev);
/*
* For ABB revision 1.0 and 1.1 there is a bug in the watchdog
@@ -3247,10 +3246,9 @@ static int ab8500_charger_resume(struct platform_device *pdev)
return 0;
}
-static int ab8500_charger_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int __maybe_unused ab8500_charger_suspend(struct device *dev)
{
- struct ab8500_charger *di = platform_get_drvdata(pdev);
+ struct ab8500_charger *di = dev_get_drvdata(dev);
/* Cancel any pending jobs */
cancel_delayed_work(&di->check_hw_failure_work);
@@ -3272,10 +3270,6 @@ static int ab8500_charger_suspend(struct platform_device *pdev,
return 0;
}
-#else
-#define ab8500_charger_suspend NULL
-#define ab8500_charger_resume NULL
-#endif
static struct notifier_block charger_nb = {
.notifier_call = ab8500_external_charger_prepare,
@@ -3354,23 +3348,22 @@ static int ab8500_charger_probe(struct platform_device *pdev)
struct power_supply_config ac_psy_cfg = {}, usb_psy_cfg = {};
struct ab8500_charger *di;
int irq, i, charger_status, ret = 0, ch_stat;
+ struct device *dev = &pdev->dev;
- di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
- if (!di) {
- dev_err(&pdev->dev, "%s no mem for ab8500_charger\n", __func__);
+ di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
+ if (!di)
return -ENOMEM;
- }
if (!plat) {
- dev_err(&pdev->dev, "no battery management data supplied\n");
+ dev_err(dev, "no battery management data supplied\n");
return -EINVAL;
}
di->bm = plat;
if (np) {
- ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+ ret = ab8500_bm_of_probe(dev, np, di->bm);
if (ret) {
- dev_err(&pdev->dev, "failed to get battery information\n");
+ dev_err(dev, "failed to get battery information\n");
return ret;
}
di->autopower_cfg = of_property_read_bool(np, "autopower_cfg");
@@ -3378,40 +3371,33 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->autopower_cfg = false;
/* get parent data */
- di->dev = &pdev->dev;
+ di->dev = dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
/* Get ADC channels */
- di->adc_main_charger_v = devm_iio_channel_get(&pdev->dev,
- "main_charger_v");
+ di->adc_main_charger_v = devm_iio_channel_get(dev, "main_charger_v");
if (IS_ERR(di->adc_main_charger_v)) {
- if (PTR_ERR(di->adc_main_charger_v) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get ADC main charger voltage\n");
- return PTR_ERR(di->adc_main_charger_v);
+ ret = dev_err_probe(dev, PTR_ERR(di->adc_main_charger_v),
+ "failed to get ADC main charger voltage\n");
+ return ret;
}
- di->adc_main_charger_c = devm_iio_channel_get(&pdev->dev,
- "main_charger_c");
+ di->adc_main_charger_c = devm_iio_channel_get(dev, "main_charger_c");
if (IS_ERR(di->adc_main_charger_c)) {
- if (PTR_ERR(di->adc_main_charger_c) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get ADC main charger current\n");
- return PTR_ERR(di->adc_main_charger_c);
+ ret = dev_err_probe(dev, PTR_ERR(di->adc_main_charger_c),
+ "failed to get ADC main charger current\n");
+ return ret;
}
- di->adc_vbus_v = devm_iio_channel_get(&pdev->dev, "vbus_v");
+ di->adc_vbus_v = devm_iio_channel_get(dev, "vbus_v");
if (IS_ERR(di->adc_vbus_v)) {
- if (PTR_ERR(di->adc_vbus_v) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get ADC USB charger voltage\n");
- return PTR_ERR(di->adc_vbus_v);
+ ret = dev_err_probe(dev, PTR_ERR(di->adc_vbus_v),
+ "failed to get ADC USB charger voltage\n");
+ return ret;
}
- di->adc_usb_charger_c = devm_iio_channel_get(&pdev->dev,
- "usb_charger_c");
+ di->adc_usb_charger_c = devm_iio_channel_get(dev, "usb_charger_c");
if (IS_ERR(di->adc_usb_charger_c)) {
- if (PTR_ERR(di->adc_usb_charger_c) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get ADC USB charger current\n");
- return PTR_ERR(di->adc_usb_charger_c);
+ ret = dev_err_probe(dev, PTR_ERR(di->adc_usb_charger_c),
+ "failed to get ADC USB charger current\n");
+ return ret;
}
/* initialize lock */
@@ -3467,7 +3453,7 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->charger_wq = alloc_ordered_workqueue("ab8500_charger_wq",
WQ_MEM_RECLAIM);
if (di->charger_wq == NULL) {
- dev_err(di->dev, "failed to create work queue\n");
+ dev_err(dev, "failed to create work queue\n");
return -ENOMEM;
}
@@ -3526,10 +3512,10 @@ static int ab8500_charger_probe(struct platform_device *pdev)
* is a charger connected to avoid erroneous BTEMP_HIGH/LOW
* interrupts during charging
*/
- di->regu = devm_regulator_get(di->dev, "vddadc");
+ di->regu = devm_regulator_get(dev, "vddadc");
if (IS_ERR(di->regu)) {
ret = PTR_ERR(di->regu);
- dev_err(di->dev, "failed to get vddadc regulator\n");
+ dev_err(dev, "failed to get vddadc regulator\n");
goto free_charger_wq;
}
@@ -3537,17 +3523,17 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/* Initialize OVV, and other registers */
ret = ab8500_charger_init_hw_registers(di);
if (ret) {
- dev_err(di->dev, "failed to initialize ABB registers\n");
+ dev_err(dev, "failed to initialize ABB registers\n");
goto free_charger_wq;
}
/* Register AC charger class */
if (di->ac_chg.enabled) {
- di->ac_chg.psy = power_supply_register(di->dev,
+ di->ac_chg.psy = power_supply_register(dev,
&ab8500_ac_chg_desc,
&ac_psy_cfg);
if (IS_ERR(di->ac_chg.psy)) {
- dev_err(di->dev, "failed to register AC charger\n");
+ dev_err(dev, "failed to register AC charger\n");
ret = PTR_ERR(di->ac_chg.psy);
goto free_charger_wq;
}
@@ -3555,11 +3541,11 @@ static int ab8500_charger_probe(struct platform_device *pdev)
/* Register USB charger class */
if (di->usb_chg.enabled) {
- di->usb_chg.psy = power_supply_register(di->dev,
+ di->usb_chg.psy = power_supply_register(dev,
&ab8500_usb_chg_desc,
&usb_psy_cfg);
if (IS_ERR(di->usb_chg.psy)) {
- dev_err(di->dev, "failed to register USB charger\n");
+ dev_err(dev, "failed to register USB charger\n");
ret = PTR_ERR(di->usb_chg.psy);
goto free_ac;
}
@@ -3567,14 +3553,14 @@ static int ab8500_charger_probe(struct platform_device *pdev)
di->usb_phy = usb_get_phy(USB_PHY_TYPE_USB2);
if (IS_ERR_OR_NULL(di->usb_phy)) {
- dev_err(di->dev, "failed to get usb transceiver\n");
+ dev_err(dev, "failed to get usb transceiver\n");
ret = -EINVAL;
goto free_usb;
}
di->nb.notifier_call = ab8500_charger_usb_notifier_call;
ret = usb_register_notifier(di->usb_phy, &di->nb);
if (ret) {
- dev_err(di->dev, "failed to register usb notifier\n");
+ dev_err(dev, "failed to register usb notifier\n");
goto put_usb_phy;
}
@@ -3603,15 +3589,15 @@ static int ab8500_charger_probe(struct platform_device *pdev)
}
ret = request_threaded_irq(irq, NULL, ab8500_charger_irq[i].isr,
- IRQF_SHARED | IRQF_NO_SUSPEND,
+ IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
ab8500_charger_irq[i].name, di);
if (ret != 0) {
- dev_err(di->dev, "failed to request %s IRQ %d: %d\n"
+ dev_err(dev, "failed to request %s IRQ %d: %d\n"
, ab8500_charger_irq[i].name, irq, ret);
goto free_irq;
}
- dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
+ dev_dbg(dev, "Requested %s IRQ %d: %d\n",
ab8500_charger_irq[i].name, irq, ret);
}
@@ -3659,6 +3645,8 @@ free_charger_wq:
return ret;
}
+static SIMPLE_DEV_PM_OPS(ab8500_charger_pm_ops, ab8500_charger_suspend, ab8500_charger_resume);
+
static const struct of_device_id ab8500_charger_match[] = {
{ .compatible = "stericsson,ab8500-charger", },
{ },
@@ -3667,11 +3655,10 @@ static const struct of_device_id ab8500_charger_match[] = {
static struct platform_driver ab8500_charger_driver = {
.probe = ab8500_charger_probe,
.remove = ab8500_charger_remove,
- .suspend = ab8500_charger_suspend,
- .resume = ab8500_charger_resume,
.driver = {
.name = "ab8500-charger",
.of_match_table = ab8500_charger_match,
+ .pm = &ab8500_charger_pm_ops,
},
};
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c
index 592a73d4dde6..3873e4857e3d 100644
--- a/drivers/power/supply/ab8500_fg.c
+++ b/drivers/power/supply/ab8500_fg.c
@@ -2942,10 +2942,9 @@ static void ab8500_fg_sysfs_psy_remove_attrs(struct ab8500_fg *di)
/* Exposure to the sysfs interface <<END>> */
-#if defined(CONFIG_PM)
-static int ab8500_fg_resume(struct platform_device *pdev)
+static int __maybe_unused ab8500_fg_resume(struct device *dev)
{
- struct ab8500_fg *di = platform_get_drvdata(pdev);
+ struct ab8500_fg *di = dev_get_drvdata(dev);
/*
* Change state if we're not charging. If we're charging we will wake
@@ -2959,10 +2958,9 @@ static int ab8500_fg_resume(struct platform_device *pdev)
return 0;
}
-static int ab8500_fg_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int __maybe_unused ab8500_fg_suspend(struct device *dev)
{
- struct ab8500_fg *di = platform_get_drvdata(pdev);
+ struct ab8500_fg *di = dev_get_drvdata(dev);
flush_delayed_work(&di->fg_periodic_work);
flush_work(&di->fg_work);
@@ -2980,10 +2978,6 @@ static int ab8500_fg_suspend(struct platform_device *pdev,
return 0;
}
-#else
-#define ab8500_fg_suspend NULL
-#define ab8500_fg_resume NULL
-#endif
static int ab8500_fg_remove(struct platform_device *pdev)
{
@@ -3007,14 +3001,11 @@ static int ab8500_fg_remove(struct platform_device *pdev)
}
/* ab8500 fg driver interrupts and their respective isr */
-static struct ab8500_fg_interrupts ab8500_fg_irq_th[] = {
+static struct ab8500_fg_interrupts ab8500_fg_irq[] = {
{"NCONV_ACCU", ab8500_fg_cc_convend_handler},
{"BATT_OVV", ab8500_fg_batt_ovv_handler},
{"LOW_BAT_F", ab8500_fg_lowbatf_handler},
{"CC_INT_CALIB", ab8500_fg_cc_int_calib_handler},
-};
-
-static struct ab8500_fg_interrupts ab8500_fg_irq_bh[] = {
{"CCEOC", ab8500_fg_cc_data_end_handler},
};
@@ -3037,26 +3028,25 @@ static int ab8500_fg_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct abx500_bm_data *plat = pdev->dev.platform_data;
struct power_supply_config psy_cfg = {};
+ struct device *dev = &pdev->dev;
struct ab8500_fg *di;
int i, irq;
int ret = 0;
- di = devm_kzalloc(&pdev->dev, sizeof(*di), GFP_KERNEL);
- if (!di) {
- dev_err(&pdev->dev, "%s no mem for ab8500_fg\n", __func__);
+ di = devm_kzalloc(dev, sizeof(*di), GFP_KERNEL);
+ if (!di)
return -ENOMEM;
- }
if (!plat) {
- dev_err(&pdev->dev, "no battery management data supplied\n");
+ dev_err(dev, "no battery management data supplied\n");
return -EINVAL;
}
di->bm = plat;
if (np) {
- ret = ab8500_bm_of_probe(&pdev->dev, np, di->bm);
+ ret = ab8500_bm_of_probe(dev, np, di->bm);
if (ret) {
- dev_err(&pdev->dev, "failed to get battery information\n");
+ dev_err(dev, "failed to get battery information\n");
return ret;
}
}
@@ -3064,15 +3054,14 @@ static int ab8500_fg_probe(struct platform_device *pdev)
mutex_init(&di->cc_lock);
/* get parent data */
- di->dev = &pdev->dev;
+ di->dev = dev;
di->parent = dev_get_drvdata(pdev->dev.parent);
- di->main_bat_v = devm_iio_channel_get(&pdev->dev, "main_bat_v");
+ di->main_bat_v = devm_iio_channel_get(dev, "main_bat_v");
if (IS_ERR(di->main_bat_v)) {
- if (PTR_ERR(di->main_bat_v) == -ENODEV)
- return -EPROBE_DEFER;
- dev_err(&pdev->dev, "failed to get main battery ADC channel\n");
- return PTR_ERR(di->main_bat_v);
+ ret = dev_err_probe(dev, PTR_ERR(di->main_bat_v),
+ "failed to get main battery ADC channel\n");
+ return ret;
}
psy_cfg.supplied_to = supply_interface;
@@ -3094,7 +3083,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Create a work queue for running the FG algorithm */
di->fg_wq = alloc_ordered_workqueue("ab8500_fg_wq", WQ_MEM_RECLAIM);
if (di->fg_wq == NULL) {
- dev_err(di->dev, "failed to create work queue\n");
+ dev_err(dev, "failed to create work queue\n");
return -ENOMEM;
}
@@ -3129,7 +3118,7 @@ static int ab8500_fg_probe(struct platform_device *pdev)
/* Initialize OVV, and other registers */
ret = ab8500_fg_init_hw_registers(di);
if (ret) {
- dev_err(di->dev, "failed to initialize registers\n");
+ dev_err(dev, "failed to initialize registers\n");
goto free_inst_curr_wq;
}
@@ -3138,9 +3127,9 @@ static int ab8500_fg_probe(struct platform_device *pdev)
di->flags.batt_id_received = false;
/* Register FG power supply class */
- di->fg_psy = power_supply_register(di->dev, &ab8500_fg_desc, &psy_cfg);
+ di->fg_psy = power_supply_register(dev, &ab8500_fg_desc, &psy_cfg);
if (IS_ERR(di->fg_psy)) {
- dev_err(di->dev, "failed to register FG psy\n");
+ dev_err(dev, "failed to register FG psy\n");
ret = PTR_ERR(di->fg_psy);
goto free_inst_curr_wq;
}
@@ -3156,45 +3145,26 @@ static int ab8500_fg_probe(struct platform_device *pdev)
init_completion(&di->ab8500_fg_complete);
/* Register primary interrupt handlers */
- for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq_th); i++) {
- irq = platform_get_irq_byname(pdev, ab8500_fg_irq_th[i].name);
+ for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) {
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
if (irq < 0) {
ret = irq;
- goto free_irq_th;
+ goto free_irq;
}
- ret = request_irq(irq, ab8500_fg_irq_th[i].isr,
- IRQF_SHARED | IRQF_NO_SUSPEND,
- ab8500_fg_irq_th[i].name, di);
+ ret = request_threaded_irq(irq, NULL, ab8500_fg_irq[i].isr,
+ IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ ab8500_fg_irq[i].name, di);
if (ret != 0) {
- dev_err(di->dev, "failed to request %s IRQ %d: %d\n",
- ab8500_fg_irq_th[i].name, irq, ret);
- goto free_irq_th;
+ dev_err(dev, "failed to request %s IRQ %d: %d\n",
+ ab8500_fg_irq[i].name, irq, ret);
+ goto free_irq;
}
- dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
- ab8500_fg_irq_th[i].name, irq, ret);
+ dev_dbg(dev, "Requested %s IRQ %d: %d\n",
+ ab8500_fg_irq[i].name, irq, ret);
}
- /* Register threaded interrupt handler */
- irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
- if (irq < 0) {
- ret = irq;
- goto free_irq_th;
- }
-
- ret = request_threaded_irq(irq, NULL, ab8500_fg_irq_bh[0].isr,
- IRQF_SHARED | IRQF_NO_SUSPEND | IRQF_ONESHOT,
- ab8500_fg_irq_bh[0].name, di);
-
- if (ret != 0) {
- dev_err(di->dev, "failed to request %s IRQ %d: %d\n",
- ab8500_fg_irq_bh[0].name, irq, ret);
- goto free_irq_th;
- }
- dev_dbg(di->dev, "Requested %s IRQ %d: %d\n",
- ab8500_fg_irq_bh[0].name, irq, ret);
-
di->irq = platform_get_irq_byname(pdev, "CCEOC");
disable_irq(di->irq);
di->nbr_cceoc_irq_cnt = 0;
@@ -3203,13 +3173,13 @@ static int ab8500_fg_probe(struct platform_device *pdev)
ret = ab8500_fg_sysfs_init(di);
if (ret) {
- dev_err(di->dev, "failed to create sysfs entry\n");
+ dev_err(dev, "failed to create sysfs entry\n");
goto free_irq;
}
ret = ab8500_fg_sysfs_psy_create_attrs(di);
if (ret) {
- dev_err(di->dev, "failed to create FG psy\n");
+ dev_err(dev, "failed to create FG psy\n");
ab8500_fg_sysfs_exit(di);
goto free_irq;
}
@@ -3230,12 +3200,9 @@ static int ab8500_fg_probe(struct platform_device *pdev)
free_irq:
/* We also have to free all registered irqs */
- irq = platform_get_irq_byname(pdev, ab8500_fg_irq_bh[0].name);
- free_irq(irq, di);
-free_irq_th:
while (--i >= 0) {
/* Last assignment of i from primary interrupt handlers */
- irq = platform_get_irq_byname(pdev, ab8500_fg_irq_th[i].name);
+ irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name);
free_irq(irq, di);
}
@@ -3245,6 +3212,8 @@ free_inst_curr_wq:
return ret;
}
+static SIMPLE_DEV_PM_OPS(ab8500_fg_pm_ops, ab8500_fg_suspend, ab8500_fg_resume);
+
static const struct of_device_id ab8500_fg_match[] = {
{ .compatible = "stericsson,ab8500-fg", },
{ },
@@ -3253,11 +3222,10 @@ static const struct of_device_id ab8500_fg_match[] = {
static struct platform_driver ab8500_fg_driver = {
.probe = ab8500_fg_probe,
.remove = ab8500_fg_remove,
- .suspend = ab8500_fg_suspend,
- .resume = ab8500_fg_resume,
.driver = {
.name = "ab8500-fg",
.of_match_table = ab8500_fg_match,
+ .pm = &ab8500_fg_pm_ops,
},
};
diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c
index 175c4f3d7955..a9d84d845f24 100644
--- a/drivers/power/supply/abx500_chargalg.c
+++ b/drivers/power/supply/abx500_chargalg.c
@@ -1913,10 +1913,9 @@ static int abx500_chargalg_sysfs_init(struct abx500_chargalg *di)
}
/* Exposure to the sysfs interface <<END>> */
-#if defined(CONFIG_PM)
-static int abx500_chargalg_resume(struct platform_device *pdev)
+static int __maybe_unused abx500_chargalg_resume(struct device *dev)
{
- struct abx500_chargalg *di = platform_get_drvdata(pdev);
+ struct abx500_chargalg *di = dev_get_drvdata(dev);
/* Kick charger watchdog if charging (any charger online) */
if (di->chg_info.online_chg)
@@ -1931,10 +1930,9 @@ static int abx500_chargalg_resume(struct platform_device *pdev)
return 0;
}
-static int abx500_chargalg_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int __maybe_unused abx500_chargalg_suspend(struct device *dev)
{
- struct abx500_chargalg *di = platform_get_drvdata(pdev);
+ struct abx500_chargalg *di = dev_get_drvdata(dev);
if (di->chg_info.online_chg)
cancel_delayed_work_sync(&di->chargalg_wd_work);
@@ -1943,10 +1941,6 @@ static int abx500_chargalg_suspend(struct platform_device *pdev,
return 0;
}
-#else
-#define abx500_chargalg_suspend NULL
-#define abx500_chargalg_resume NULL
-#endif
static int abx500_chargalg_remove(struct platform_device *pdev)
{
@@ -2080,6 +2074,8 @@ free_chargalg_wq:
return ret;
}
+static SIMPLE_DEV_PM_OPS(abx500_chargalg_pm_ops, abx500_chargalg_suspend, abx500_chargalg_resume);
+
static const struct of_device_id ab8500_chargalg_match[] = {
{ .compatible = "stericsson,ab8500-chargalg", },
{ },
@@ -2088,11 +2084,10 @@ static const struct of_device_id ab8500_chargalg_match[] = {
static struct platform_driver abx500_chargalg_driver = {
.probe = abx500_chargalg_probe,
.remove = abx500_chargalg_remove,
- .suspend = abx500_chargalg_suspend,
- .resume = abx500_chargalg_resume,
.driver = {
.name = "ab8500-chargalg",
.of_match_table = ab8500_chargalg_match,
+ .pm = &abx500_chargalg_pm_ops,
},
};
diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c
index 0eaa86c52874..70b28b699a80 100644
--- a/drivers/power/supply/axp20x_usb_power.c
+++ b/drivers/power/supply/axp20x_usb_power.c
@@ -92,7 +92,7 @@ static irqreturn_t axp20x_usb_power_irq(int irq, void *devid)
power_supply_changed(power->supply);
- mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+ mod_delayed_work(system_power_efficient_wq, &power->vbus_detect, DEBOUNCE_TIME);
return IRQ_HANDLED;
}
@@ -117,7 +117,7 @@ static void axp20x_usb_power_poll_vbus(struct work_struct *work)
out:
if (axp20x_usb_vbus_needs_polling(power))
- mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+ mod_delayed_work(system_power_efficient_wq, &power->vbus_detect, DEBOUNCE_TIME);
}
static int axp20x_get_current_max(struct axp20x_usb_power *power, int *val)
@@ -397,7 +397,7 @@ static int axp20x_usb_power_prop_writeable(struct power_supply *psy,
struct axp20x_usb_power *power = power_supply_get_drvdata(psy);
/*
- * The VBUS path select flag works differently on on AXP288 and newer:
+ * The VBUS path select flag works differently on AXP288 and newer:
* - On AXP20x and AXP22x, the flag enables VBUS (ignoring N_VBUSEN).
* - On AXP288 and AXP8xx, the flag disables VBUS (ignoring N_VBUSEN).
* We only expose the control on variants where it can be used to force
@@ -525,7 +525,7 @@ static int axp20x_usb_power_resume(struct device *dev)
while (i < power->num_irqs)
enable_irq(power->irqs[i++]);
- mod_delayed_work(system_wq, &power->vbus_detect, DEBOUNCE_TIME);
+ mod_delayed_work(system_power_efficient_wq, &power->vbus_detect, DEBOUNCE_TIME);
return 0;
}
@@ -647,7 +647,7 @@ static int axp20x_usb_power_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&power->vbus_detect, axp20x_usb_power_poll_vbus);
if (axp20x_usb_vbus_needs_polling(power))
- queue_delayed_work(system_wq, &power->vbus_detect, 0);
+ queue_delayed_work(system_power_efficient_wq, &power->vbus_detect, 0);
return 0;
}
diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c
index 9d981b76c1e7..a4df1ea92386 100644
--- a/drivers/power/supply/axp288_charger.c
+++ b/drivers/power/supply/axp288_charger.c
@@ -548,14 +548,15 @@ out:
/*
* The HP Pavilion x2 10 series comes in a number of variants:
- * Bay Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "815D"
- * Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E"
- * Cherry Trail SoC + TI PMIC, DMI_BOARD_NAME: "827C" or "82F4"
+ * Bay Trail SoC + AXP288 PMIC, Micro-USB, DMI_BOARD_NAME: "8021"
+ * Bay Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "815D"
+ * Cherry Trail SoC + AXP288 PMIC, Type-C, DMI_BOARD_NAME: "813E"
+ * Cherry Trail SoC + TI PMIC, Type-C, DMI_BOARD_NAME: "827C" or "82F4"
*
- * The variants with the AXP288 PMIC are all kinds of special:
+ * The variants with the AXP288 + Type-C connector are all kinds of special:
*
- * 1. All variants use a Type-C connector which the AXP288 does not support, so
- * when using a Type-C charger it is not recognized. Unlike most AXP288 devices,
+ * 1. They use a Type-C connector which the AXP288 does not support, so when
+ * using a Type-C charger it is not recognized. Unlike most AXP288 devices,
* this model actually has mostly working ACPI AC / Battery code, the ACPI code
* "solves" this by simply setting the input_current_limit to 3A.
* There are still some issues with the ACPI code, so we use this native driver,
@@ -578,12 +579,17 @@ out:
*/
static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = {
{
- /*
- * Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry
- * Trail model has "HP", so we only match on product_name.
- */
.matches = {
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "815D"),
+ },
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "HP"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "813E"),
},
},
{} /* Terminating entry */
diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c
index d14186525e1e..4841e14a5bfb 100644
--- a/drivers/power/supply/bq24190_charger.c
+++ b/drivers/power/supply/bq24190_charger.c
@@ -16,7 +16,6 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/workqueue.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/extcon-provider.h>
@@ -448,8 +447,10 @@ static ssize_t bq24190_sysfs_show(struct device *dev,
return -EINVAL;
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
ret = bq24190_read_mask(bdi, info->reg, info->mask, info->shift, &v);
if (ret)
@@ -1077,8 +1078,10 @@ static int bq24190_charger_get_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_CHARGE_TYPE:
@@ -1149,8 +1152,10 @@ static int bq24190_charger_set_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
@@ -1410,8 +1415,10 @@ static int bq24190_battery_get_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
@@ -1456,8 +1463,10 @@ static int bq24190_battery_set_property(struct power_supply *psy,
dev_dbg(bdi->dev, "prop: %d\n", psp);
ret = pm_runtime_get_sync(bdi->dev);
- if (ret < 0)
+ if (ret < 0) {
+ pm_runtime_put_noidle(bdi->dev);
return ret;
+ }
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
diff --git a/drivers/power/supply/bq24735-charger.c b/drivers/power/supply/bq24735-charger.c
index 6931e1d826f5..ab2f4bf8f603 100644
--- a/drivers/power/supply/bq24735-charger.c
+++ b/drivers/power/supply/bq24735-charger.c
@@ -18,7 +18,6 @@
*/
#include <linux/err.h>
-#include <linux/gpio.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/interrupt.h>
diff --git a/drivers/power/supply/bq25890_charger.c b/drivers/power/supply/bq25890_charger.c
index 34c21c51bac1..945c3257ca93 100644
--- a/drivers/power/supply/bq25890_charger.c
+++ b/drivers/power/supply/bq25890_charger.c
@@ -299,7 +299,7 @@ static const union {
/* TODO: BQ25896 has max ICHG 3008 mA */
[TBL_ICHG] = { .rt = {0, 5056000, 64000} }, /* uA */
[TBL_ITERM] = { .rt = {64000, 1024000, 64000} }, /* uA */
- [TBL_IILIM] = { .rt = {50000, 3200000, 50000} }, /* uA */
+ [TBL_IILIM] = { .rt = {100000, 3250000, 50000} }, /* uA */
[TBL_VREG] = { .rt = {3840000, 4608000, 16000} }, /* uV */
[TBL_BOOSTV] = { .rt = {4550000, 5510000, 64000} }, /* uV */
[TBL_SYSVMIN] = { .rt = {3000000, 3700000, 100000} }, /* uV */
diff --git a/drivers/power/supply/collie_battery.c b/drivers/power/supply/collie_battery.c
index cbd588e9e233..7fb9b549f2de 100644
--- a/drivers/power/supply/collie_battery.c
+++ b/drivers/power/supply/collie_battery.c
@@ -12,7 +12,9 @@
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/gpio/consumer.h>
#include <linux/mfd/ucb1x00.h>
#include <asm/mach/sharpsl_param.h>
@@ -31,18 +33,18 @@ struct collie_bat {
struct mutex work_lock; /* protects data */
bool (*is_present)(struct collie_bat *bat);
- int gpio_full;
- int gpio_charge_on;
+ struct gpio_desc *gpio_full;
+ struct gpio_desc *gpio_charge_on;
int technology;
- int gpio_bat;
+ struct gpio_desc *gpio_bat;
int adc_bat;
int adc_bat_divider;
int bat_max;
int bat_min;
- int gpio_temp;
+ struct gpio_desc *gpio_temp;
int adc_temp;
int adc_temp_divider;
};
@@ -53,15 +55,15 @@ static unsigned long collie_read_bat(struct collie_bat *bat)
{
unsigned long value = 0;
- if (bat->gpio_bat < 0 || bat->adc_bat < 0)
+ if (!bat->gpio_bat || bat->adc_bat < 0)
return 0;
mutex_lock(&bat_lock);
- gpio_set_value(bat->gpio_bat, 1);
+ gpiod_set_value(bat->gpio_bat, 1);
msleep(5);
ucb1x00_adc_enable(ucb);
value = ucb1x00_adc_read(ucb, bat->adc_bat, UCB_SYNC);
ucb1x00_adc_disable(ucb);
- gpio_set_value(bat->gpio_bat, 0);
+ gpiod_set_value(bat->gpio_bat, 0);
mutex_unlock(&bat_lock);
value = value * 1000000 / bat->adc_bat_divider;
@@ -71,16 +73,16 @@ static unsigned long collie_read_bat(struct collie_bat *bat)
static unsigned long collie_read_temp(struct collie_bat *bat)
{
unsigned long value = 0;
- if (bat->gpio_temp < 0 || bat->adc_temp < 0)
+ if (!bat->gpio_temp || bat->adc_temp < 0)
return 0;
mutex_lock(&bat_lock);
- gpio_set_value(bat->gpio_temp, 1);
+ gpiod_set_value(bat->gpio_temp, 1);
msleep(5);
ucb1x00_adc_enable(ucb);
value = ucb1x00_adc_read(ucb, bat->adc_temp, UCB_SYNC);
ucb1x00_adc_disable(ucb);
- gpio_set_value(bat->gpio_temp, 0);
+ gpiod_set_value(bat->gpio_temp, 0);
mutex_unlock(&bat_lock);
value = value * 10000 / bat->adc_temp_divider;
@@ -162,23 +164,23 @@ static void collie_bat_update(struct collie_bat *bat)
bat->full_chrg = -1;
} else if (power_supply_am_i_supplied(psy)) {
if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING) {
- gpio_set_value(bat->gpio_charge_on, 1);
+ gpiod_set_value(bat->gpio_charge_on, 1);
mdelay(15);
}
- if (gpio_get_value(bat->gpio_full)) {
+ if (gpiod_get_value(bat->gpio_full)) {
if (old == POWER_SUPPLY_STATUS_CHARGING ||
bat->full_chrg == -1)
bat->full_chrg = collie_read_bat(bat);
- gpio_set_value(bat->gpio_charge_on, 0);
+ gpiod_set_value(bat->gpio_charge_on, 0);
bat->status = POWER_SUPPLY_STATUS_FULL;
} else {
- gpio_set_value(bat->gpio_charge_on, 1);
+ gpiod_set_value(bat->gpio_charge_on, 1);
bat->status = POWER_SUPPLY_STATUS_CHARGING;
}
} else {
- gpio_set_value(bat->gpio_charge_on, 0);
+ gpiod_set_value(bat->gpio_charge_on, 0);
bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
}
@@ -230,18 +232,18 @@ static struct collie_bat collie_bat_main = {
.full_chrg = -1,
.psy = NULL,
- .gpio_full = COLLIE_GPIO_CO,
- .gpio_charge_on = COLLIE_GPIO_CHARGE_ON,
+ .gpio_full = NULL,
+ .gpio_charge_on = NULL,
.technology = POWER_SUPPLY_TECHNOLOGY_LIPO,
- .gpio_bat = COLLIE_GPIO_MBAT_ON,
+ .gpio_bat = NULL,
.adc_bat = UCB_ADC_INP_AD1,
.adc_bat_divider = 155,
.bat_max = 4310000,
.bat_min = 1551 * 1000000 / 414,
- .gpio_temp = COLLIE_GPIO_TMP_ON,
+ .gpio_temp = NULL,
.adc_temp = UCB_ADC_INP_AD0,
.adc_temp_divider = 10000,
};
@@ -260,30 +262,24 @@ static struct collie_bat collie_bat_bu = {
.full_chrg = -1,
.psy = NULL,
- .gpio_full = -1,
- .gpio_charge_on = -1,
+ .gpio_full = NULL,
+ .gpio_charge_on = NULL,
.technology = POWER_SUPPLY_TECHNOLOGY_LiMn,
- .gpio_bat = COLLIE_GPIO_BBAT_ON,
+ .gpio_bat = NULL,
.adc_bat = UCB_ADC_INP_AD1,
.adc_bat_divider = 155,
.bat_max = 3000000,
.bat_min = 1900000,
- .gpio_temp = -1,
+ .gpio_temp = NULL,
.adc_temp = -1,
.adc_temp_divider = -1,
};
-static struct gpio collie_batt_gpios[] = {
- { COLLIE_GPIO_CO, GPIOF_IN, "main battery full" },
- { COLLIE_GPIO_MAIN_BAT_LOW, GPIOF_IN, "main battery low" },
- { COLLIE_GPIO_CHARGE_ON, GPIOF_OUT_INIT_LOW, "main charge on" },
- { COLLIE_GPIO_MBAT_ON, GPIOF_OUT_INIT_LOW, "main battery" },
- { COLLIE_GPIO_TMP_ON, GPIOF_OUT_INIT_LOW, "main battery temp" },
- { COLLIE_GPIO_BBAT_ON, GPIOF_OUT_INIT_LOW, "backup battery" },
-};
+/* Obtained but unused GPIO */
+static struct gpio_desc *collie_mbat_low;
#ifdef CONFIG_PM
static int wakeup_enabled;
@@ -295,7 +291,7 @@ static int collie_bat_suspend(struct ucb1x00_dev *dev)
if (device_may_wakeup(&dev->ucb->dev) &&
collie_bat_main.status == POWER_SUPPLY_STATUS_CHARGING)
- wakeup_enabled = !enable_irq_wake(gpio_to_irq(COLLIE_GPIO_CO));
+ wakeup_enabled = !enable_irq_wake(gpiod_to_irq(collie_bat_main.gpio_full));
else
wakeup_enabled = 0;
@@ -305,7 +301,7 @@ static int collie_bat_suspend(struct ucb1x00_dev *dev)
static int collie_bat_resume(struct ucb1x00_dev *dev)
{
if (wakeup_enabled)
- disable_irq_wake(gpio_to_irq(COLLIE_GPIO_CO));
+ disable_irq_wake(gpiod_to_irq(collie_bat_main.gpio_full));
/* things may have changed while we were away */
schedule_work(&bat_work);
@@ -320,16 +316,71 @@ static int collie_bat_probe(struct ucb1x00_dev *dev)
{
int ret;
struct power_supply_config psy_main_cfg = {}, psy_bu_cfg = {};
+ struct gpio_chip *gc = &dev->ucb->gpio;
if (!machine_is_collie())
return -ENODEV;
ucb = dev->ucb;
- ret = gpio_request_array(collie_batt_gpios,
- ARRAY_SIZE(collie_batt_gpios));
- if (ret)
- return ret;
+ /* Obtain all the main battery GPIOs */
+ collie_bat_main.gpio_full = gpiod_get(&dev->ucb->dev,
+ "main battery full",
+ GPIOD_IN);
+ if (IS_ERR(collie_bat_main.gpio_full))
+ return PTR_ERR(collie_bat_main.gpio_full);
+
+ collie_mbat_low = gpiod_get(&dev->ucb->dev,
+ "main battery low",
+ GPIOD_IN);
+ if (IS_ERR(collie_mbat_low)) {
+ ret = PTR_ERR(collie_mbat_low);
+ goto err_put_gpio_full;
+ }
+
+ collie_bat_main.gpio_charge_on = gpiod_get(&dev->ucb->dev,
+ "main charge on",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(collie_bat_main.gpio_charge_on)) {
+ ret = PTR_ERR(collie_bat_main.gpio_charge_on);
+ goto err_put_mbat_low;
+ }
+
+ /* COLLIE_GPIO_MBAT_ON = GPIO 7 on the UCB (TC35143) */
+ collie_bat_main.gpio_bat = gpiochip_request_own_desc(gc,
+ 7,
+ "main battery",
+ GPIO_ACTIVE_HIGH,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(collie_bat_main.gpio_bat)) {
+ ret = PTR_ERR(collie_bat_main.gpio_bat);
+ goto err_put_gpio_charge_on;
+ }
+
+ /* COLLIE_GPIO_TMP_ON = GPIO 9 on the UCB (TC35143) */
+ collie_bat_main.gpio_temp = gpiochip_request_own_desc(gc,
+ 9,
+ "main battery temp",
+ GPIO_ACTIVE_HIGH,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(collie_bat_main.gpio_temp)) {
+ ret = PTR_ERR(collie_bat_main.gpio_temp);
+ goto err_free_gpio_bat;
+ }
+
+ /*
+ * Obtain the backup battery COLLIE_GPIO_BBAT_ON which is
+ * GPIO 8 on the UCB (TC35143)
+ */
+ collie_bat_bu.gpio_bat = gpiochip_request_own_desc(gc,
+ 8,
+ "backup battery",
+ GPIO_ACTIVE_HIGH,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(collie_bat_bu.gpio_bat)) {
+ ret = PTR_ERR(collie_bat_bu.gpio_bat);
+ goto err_free_gpio_temp;
+ }
mutex_init(&collie_bat_main.work_lock);
@@ -370,27 +421,43 @@ err_irq:
err_psy_reg_bu:
power_supply_unregister(collie_bat_main.psy);
err_psy_reg_main:
-
/* see comment in collie_bat_remove */
cancel_work_sync(&bat_work);
- gpio_free_array(collie_batt_gpios, ARRAY_SIZE(collie_batt_gpios));
+ gpiochip_free_own_desc(collie_bat_bu.gpio_bat);
+err_free_gpio_temp:
+ gpiochip_free_own_desc(collie_bat_main.gpio_temp);
+err_free_gpio_bat:
+ gpiochip_free_own_desc(collie_bat_main.gpio_bat);
+err_put_gpio_charge_on:
+ gpiod_put(collie_bat_main.gpio_charge_on);
+err_put_mbat_low:
+ gpiod_put(collie_mbat_low);
+err_put_gpio_full:
+ gpiod_put(collie_bat_main.gpio_full);
+
return ret;
}
static void collie_bat_remove(struct ucb1x00_dev *dev)
{
free_irq(gpio_to_irq(COLLIE_GPIO_CO), &collie_bat_main);
-
power_supply_unregister(collie_bat_bu.psy);
power_supply_unregister(collie_bat_main.psy);
+ /* These are obtained from the machine */
+ gpiod_put(collie_bat_main.gpio_full);
+ gpiod_put(collie_mbat_low);
+ gpiod_put(collie_bat_main.gpio_charge_on);
+ /* These are directly from the UCB so let's free them */
+ gpiochip_free_own_desc(collie_bat_main.gpio_bat);
+ gpiochip_free_own_desc(collie_bat_main.gpio_temp);
+ gpiochip_free_own_desc(collie_bat_bu.gpio_bat);
/*
* Now cancel the bat_work. We won't get any more schedules,
* since all sources (isr and external_power_changed) are
* unregistered now.
*/
cancel_work_sync(&bat_work);
- gpio_free_array(collie_batt_gpios, ARRAY_SIZE(collie_batt_gpios));
}
static struct ucb1x00_driver collie_bat_driver = {
diff --git a/drivers/power/supply/generic-adc-battery.c b/drivers/power/supply/generic-adc-battery.c
index caa829738ef7..0032069fbc2b 100644
--- a/drivers/power/supply/generic-adc-battery.c
+++ b/drivers/power/supply/generic-adc-battery.c
@@ -12,7 +12,7 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
@@ -52,6 +52,7 @@ struct gab {
int level;
int status;
bool cable_plugged;
+ struct gpio_desc *charge_finished;
};
static struct gab *to_generic_bat(struct power_supply *psy)
@@ -91,13 +92,9 @@ static const enum power_supply_property gab_dyn_props[] = {
static bool gab_charge_finished(struct gab *adc_bat)
{
- struct gab_platform_data *pdata = adc_bat->pdata;
- bool ret = gpio_get_value(pdata->gpio_charge_finished);
- bool inv = pdata->gpio_inverted;
-
- if (!gpio_is_valid(pdata->gpio_charge_finished))
+ if (!adc_bat->charge_finished)
return false;
- return ret ^ inv;
+ return gpiod_get_value(adc_bat->charge_finished);
}
static int gab_get_status(struct gab *adc_bat)
@@ -327,18 +324,17 @@ static int gab_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&adc_bat->bat_work, gab_work);
- if (gpio_is_valid(pdata->gpio_charge_finished)) {
+ adc_bat->charge_finished = devm_gpiod_get_optional(&pdev->dev,
+ "charged", GPIOD_IN);
+ if (adc_bat->charge_finished) {
int irq;
- ret = gpio_request(pdata->gpio_charge_finished, "charged");
- if (ret)
- goto gpio_req_fail;
- irq = gpio_to_irq(pdata->gpio_charge_finished);
+ irq = gpiod_to_irq(adc_bat->charge_finished);
ret = request_any_context_irq(irq, gab_charged,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"battery charged", adc_bat);
if (ret < 0)
- goto err_gpio;
+ goto gpio_req_fail;
}
platform_set_drvdata(pdev, adc_bat);
@@ -348,8 +344,6 @@ static int gab_probe(struct platform_device *pdev)
msecs_to_jiffies(0));
return 0;
-err_gpio:
- gpio_free(pdata->gpio_charge_finished);
gpio_req_fail:
power_supply_unregister(adc_bat->psy);
err_reg_fail:
@@ -367,14 +361,11 @@ static int gab_remove(struct platform_device *pdev)
{
int chan;
struct gab *adc_bat = platform_get_drvdata(pdev);
- struct gab_platform_data *pdata = adc_bat->pdata;
power_supply_unregister(adc_bat->psy);
- if (gpio_is_valid(pdata->gpio_charge_finished)) {
- free_irq(gpio_to_irq(pdata->gpio_charge_finished), adc_bat);
- gpio_free(pdata->gpio_charge_finished);
- }
+ if (adc_bat->charge_finished)
+ free_irq(gpiod_to_irq(adc_bat->charge_finished), adc_bat);
for (chan = 0; chan < ARRAY_SIZE(gab_chan_name); chan++) {
if (adc_bat->channel[chan])
diff --git a/drivers/power/supply/max17042_battery.c b/drivers/power/supply/max17042_battery.c
index f284547913d6..79d4b5988360 100644
--- a/drivers/power/supply/max17042_battery.c
+++ b/drivers/power/supply/max17042_battery.c
@@ -78,6 +78,7 @@ static enum power_supply_property max17042_battery_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TEMP_ALERT_MIN,
POWER_SUPPLY_PROP_TEMP_ALERT_MAX,
@@ -85,9 +86,10 @@ static enum power_supply_property max17042_battery_props[] = {
POWER_SUPPLY_PROP_TEMP_MAX,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_SCOPE,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ // these two have to be at the end on the list
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CURRENT_AVG,
- POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
};
static int max17042_get_temperature(struct max17042_chip *chip, int *temp)
@@ -353,7 +355,8 @@ static int max17042_get_property(struct power_supply *psy,
if (ret < 0)
return ret;
- val->intval = data * 1000 / 2;
+ data64 = sign_extend64(data, 15) * 5000000ll;
+ val->intval = div_s64(data64, chip->pdata->r_sns);
break;
case POWER_SUPPLY_PROP_TEMP:
ret = max17042_get_temperature(chip, &val->intval);
@@ -394,8 +397,8 @@ static int max17042_get_property(struct power_supply *psy,
if (ret < 0)
return ret;
- val->intval = sign_extend32(data, 15);
- val->intval *= 1562500 / chip->pdata->r_sns;
+ data64 = sign_extend64(data, 15) * 1562500ll;
+ val->intval = div_s64(data64, chip->pdata->r_sns);
} else {
return -EINVAL;
}
@@ -406,12 +409,20 @@ static int max17042_get_property(struct power_supply *psy,
if (ret < 0)
return ret;
- val->intval = sign_extend32(data, 15);
- val->intval *= 1562500 / chip->pdata->r_sns;
+ data64 = sign_extend64(data, 15) * 1562500ll;
+ val->intval = div_s64(data64, chip->pdata->r_sns);
} else {
return -EINVAL;
}
break;
+ case POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT:
+ ret = regmap_read(map, MAX17042_ICHGTerm, &data);
+ if (ret < 0)
+ return ret;
+
+ data64 = data * 1562500ll;
+ val->intval = div_s64(data64, chip->pdata->r_sns);
+ break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
ret = regmap_read(map, MAX17042_TTE, &data);
if (ret < 0)
diff --git a/drivers/power/supply/max8997_charger.c b/drivers/power/supply/max8997_charger.c
index f5e84cd47924..1947af25879a 100644
--- a/drivers/power/supply/max8997_charger.c
+++ b/drivers/power/supply/max8997_charger.c
@@ -13,6 +13,20 @@
#include <linux/mfd/max8997.h>
#include <linux/mfd/max8997-private.h>
+/* MAX8997_REG_STATUS4 */
+#define DCINOK_SHIFT 1
+#define DCINOK_MASK (1 << DCINOK_SHIFT)
+#define DETBAT_SHIFT 2
+#define DETBAT_MASK (1 << DETBAT_SHIFT)
+
+/* MAX8997_REG_MBCCTRL1 */
+#define TFCH_SHIFT 4
+#define TFCH_MASK (7 << TFCH_SHIFT)
+
+/* MAX8997_REG_MBCCTRL5 */
+#define ITOPOFF_SHIFT 0
+#define ITOPOFF_MASK (0xF << ITOPOFF_SHIFT)
+
struct charger_data {
struct device *dev;
struct max8997_dev *iodev;
@@ -20,7 +34,7 @@ struct charger_data {
};
static enum power_supply_property max8997_battery_props[] = {
- POWER_SUPPLY_PROP_STATUS, /* "FULL" or "NOT FULL" only. */
+ POWER_SUPPLY_PROP_STATUS, /* "FULL", "CHARGING" or "DISCHARGING". */
POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */
POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */
};
@@ -43,6 +57,10 @@ static int max8997_battery_get_property(struct power_supply *psy,
return ret;
if ((reg & (1 << 0)) == 0x1)
val->intval = POWER_SUPPLY_STATUS_FULL;
+ else if ((reg & DCINOK_MASK))
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
break;
case POWER_SUPPLY_PROP_PRESENT:
@@ -50,7 +68,7 @@ static int max8997_battery_get_property(struct power_supply *psy,
ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
if (ret)
return ret;
- if ((reg & (1 << 2)) == 0x0)
+ if ((reg & DETBAT_MASK) == 0x0)
val->intval = 1;
break;
@@ -59,8 +77,7 @@ static int max8997_battery_get_property(struct power_supply *psy,
ret = max8997_read_reg(i2c, MAX8997_REG_STATUS4, &reg);
if (ret)
return ret;
- /* DCINOK */
- if (reg & (1 << 1))
+ if (reg & DCINOK_MASK)
val->intval = 1;
break;
@@ -84,11 +101,14 @@ static int max8997_battery_probe(struct platform_device *pdev)
int ret = 0;
struct charger_data *charger;
struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
- struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct i2c_client *i2c = iodev->i2c;
+ struct max8997_platform_data *pdata = iodev->pdata;
struct power_supply_config psy_cfg = {};
- if (!pdata)
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data supplied.\n");
return -EINVAL;
+ }
if (pdata->eoc_mA) {
int val = (pdata->eoc_mA - 50) / 10;
@@ -97,30 +117,29 @@ static int max8997_battery_probe(struct platform_device *pdev)
if (val > 0xf)
val = 0xf;
- ret = max8997_update_reg(iodev->i2c,
- MAX8997_REG_MBCCTRL5, val, 0xf);
+ ret = max8997_update_reg(i2c, MAX8997_REG_MBCCTRL5,
+ val << ITOPOFF_SHIFT, ITOPOFF_MASK);
if (ret < 0) {
dev_err(&pdev->dev, "Cannot use i2c bus.\n");
return ret;
}
}
-
switch (pdata->timeout) {
case 5:
- ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
- 0x2 << 4, 0x7 << 4);
+ ret = max8997_update_reg(i2c, MAX8997_REG_MBCCTRL1,
+ 0x2 << TFCH_SHIFT, TFCH_MASK);
break;
case 6:
- ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
- 0x3 << 4, 0x7 << 4);
+ ret = max8997_update_reg(i2c, MAX8997_REG_MBCCTRL1,
+ 0x3 << TFCH_SHIFT, TFCH_MASK);
break;
case 7:
- ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
- 0x4 << 4, 0x7 << 4);
+ ret = max8997_update_reg(i2c, MAX8997_REG_MBCCTRL1,
+ 0x4 << TFCH_SHIFT, TFCH_MASK);
break;
case 0:
- ret = max8997_update_reg(iodev->i2c, MAX8997_REG_MBCCTRL1,
- 0x7 << 4, 0x7 << 4);
+ ret = max8997_update_reg(i2c, MAX8997_REG_MBCCTRL1,
+ 0x7 << TFCH_SHIFT, TFCH_MASK);
break;
default:
dev_err(&pdev->dev, "incorrect timeout value (%d)\n",
@@ -138,7 +157,6 @@ static int max8997_battery_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, charger);
-
charger->dev = &pdev->dev;
charger->iodev = iodev;
@@ -168,18 +186,7 @@ static struct platform_driver max8997_battery_driver = {
.probe = max8997_battery_probe,
.id_table = max8997_battery_id,
};
-
-static int __init max8997_battery_init(void)
-{
- return platform_driver_register(&max8997_battery_driver);
-}
-subsys_initcall(max8997_battery_init);
-
-static void __exit max8997_battery_cleanup(void)
-{
- platform_driver_unregister(&max8997_battery_driver);
-}
-module_exit(max8997_battery_cleanup);
+module_platform_driver(max8997_battery_driver);
MODULE_DESCRIPTION("MAXIM 8997/8966 battery control driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/power/supply/pm2301_charger.c b/drivers/power/supply/pm2301_charger.c
index 2df6a2459d1f..ac06ecf7fc9c 100644
--- a/drivers/power/supply/pm2301_charger.c
+++ b/drivers/power/supply/pm2301_charger.c
@@ -455,7 +455,6 @@ static int pm2_int_reg4(void *pm2_data, int val)
static int pm2_int_reg5(void *pm2_data, int val)
{
struct pm2xxx_charger *pm2 = pm2_data;
- int ret = 0;
if (val & (PM2XXX_INT6_ITVPWR2DROP | PM2XXX_INT6_ITVPWR1DROP)) {
dev_dbg(pm2->dev, "VMPWR drop to VBAT level\n");
@@ -468,7 +467,7 @@ static int pm2_int_reg5(void *pm2_data, int val)
dev_dbg(pm2->dev, "Falling/Rising edge on WPWR1/2\n");
}
- return ret;
+ return 0;
}
static irqreturn_t pm2xxx_irq_int(int irq, void *data)
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index a616b9d8f43c..92dd63171193 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -402,7 +402,7 @@ void power_supply_init_attrs(struct device_type *dev_type)
struct device_attribute *attr;
if (!power_supply_attrs[i].prop_name) {
- pr_warn("%s: Property %d skipped because is is missing from power_supply_attrs\n",
+ pr_warn("%s: Property %d skipped because it is missing from power_supply_attrs\n",
__func__, i);
sprintf(power_supply_attrs[i].attr_name, "_err_%d", i);
} else {
diff --git a/drivers/power/supply/s3c_adc_battery.c b/drivers/power/supply/s3c_adc_battery.c
index 60b7f41ab063..a2addc24ee8b 100644
--- a/drivers/power/supply/s3c_adc_battery.c
+++ b/drivers/power/supply/s3c_adc_battery.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/leds.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/err.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
@@ -31,6 +31,7 @@ struct s3c_adc_bat {
struct power_supply *psy;
struct s3c_adc_client *client;
struct s3c_adc_bat_pdata *pdata;
+ struct gpio_desc *charge_finished;
int volt_value;
int cur_value;
unsigned int timestamp;
@@ -132,9 +133,7 @@ static int calc_full_volt(int volt_val, int cur_val, int impedance)
static int charge_finished(struct s3c_adc_bat *bat)
{
- return bat->pdata->gpio_inverted ?
- !gpio_get_value(bat->pdata->gpio_charge_finished) :
- gpio_get_value(bat->pdata->gpio_charge_finished);
+ return gpiod_get_value(bat->charge_finished);
}
static int s3c_adc_bat_get_property(struct power_supply *psy,
@@ -169,7 +168,7 @@ static int s3c_adc_bat_get_property(struct power_supply *psy,
}
if (bat->cable_plugged &&
- ((bat->pdata->gpio_charge_finished < 0) ||
+ (!bat->charge_finished ||
!charge_finished(bat))) {
lut = bat->pdata->lut_acin;
lut_size = bat->pdata->lut_acin_cnt;
@@ -206,7 +205,7 @@ static int s3c_adc_bat_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
- if (bat->pdata->gpio_charge_finished < 0)
+ if (!bat->charge_finished)
val->intval = bat->level == 100000 ?
POWER_SUPPLY_STATUS_FULL : bat->status;
else
@@ -265,7 +264,7 @@ static void s3c_adc_bat_work(struct work_struct *work)
bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
}
} else {
- if ((bat->pdata->gpio_charge_finished >= 0) && is_plugged) {
+ if (bat->charge_finished && is_plugged) {
is_charged = charge_finished(&main_bat);
if (is_charged) {
if (bat->pdata->disable_charger)
@@ -294,6 +293,7 @@ static int s3c_adc_bat_probe(struct platform_device *pdev)
struct s3c_adc_client *client;
struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
struct power_supply_config psy_cfg = {};
+ struct gpio_desc *gpiod;
int ret;
client = s3c_adc_register(pdev, NULL, NULL, 0);
@@ -304,8 +304,17 @@ static int s3c_adc_bat_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, client);
+ gpiod = devm_gpiod_get_optional(&pdev->dev, "charge-status", GPIOD_IN);
+ if (IS_ERR(gpiod)) {
+ /* Could be probe deferral etc */
+ ret = PTR_ERR(gpiod);
+ dev_err(&pdev->dev, "no GPIO %d\n", ret);
+ return ret;
+ }
+
main_bat.client = client;
main_bat.pdata = pdata;
+ main_bat.charge_finished = gpiod;
main_bat.volt_value = -1;
main_bat.cur_value = -1;
main_bat.cable_plugged = 0;
@@ -323,6 +332,7 @@ static int s3c_adc_bat_probe(struct platform_device *pdev)
backup_bat.client = client;
backup_bat.pdata = pdev->dev.platform_data;
+ backup_bat.charge_finished = gpiod;
backup_bat.volt_value = -1;
backup_bat.psy = power_supply_register(&pdev->dev,
&backup_bat_desc,
@@ -335,12 +345,8 @@ static int s3c_adc_bat_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&bat_work, s3c_adc_bat_work);
- if (pdata->gpio_charge_finished >= 0) {
- ret = gpio_request(pdata->gpio_charge_finished, "charged");
- if (ret)
- goto err_gpio;
-
- ret = request_irq(gpio_to_irq(pdata->gpio_charge_finished),
+ if (gpiod) {
+ ret = request_irq(gpiod_to_irq(gpiod),
s3c_adc_bat_charged,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
"battery charged", NULL);
@@ -364,12 +370,9 @@ static int s3c_adc_bat_probe(struct platform_device *pdev)
return 0;
err_platform:
- if (pdata->gpio_charge_finished >= 0)
- free_irq(gpio_to_irq(pdata->gpio_charge_finished), NULL);
+ if (gpiod)
+ free_irq(gpiod_to_irq(gpiod), NULL);
err_irq:
- if (pdata->gpio_charge_finished >= 0)
- gpio_free(pdata->gpio_charge_finished);
-err_gpio:
if (pdata->backup_volt_mult)
power_supply_unregister(backup_bat.psy);
err_reg_backup:
@@ -389,10 +392,8 @@ static int s3c_adc_bat_remove(struct platform_device *pdev)
s3c_adc_release(client);
- if (pdata->gpio_charge_finished >= 0) {
- free_irq(gpio_to_irq(pdata->gpio_charge_finished), NULL);
- gpio_free(pdata->gpio_charge_finished);
- }
+ if (main_bat.charge_finished)
+ free_irq(gpiod_to_irq(main_bat.charge_finished), NULL);
cancel_delayed_work(&bat_work);
@@ -408,12 +409,12 @@ static int s3c_adc_bat_suspend(struct platform_device *pdev,
{
struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
- if (pdata->gpio_charge_finished >= 0) {
+ if (main_bat.charge_finished) {
if (device_may_wakeup(&pdev->dev))
enable_irq_wake(
- gpio_to_irq(pdata->gpio_charge_finished));
+ gpiod_to_irq(main_bat.charge_finished));
else {
- disable_irq(gpio_to_irq(pdata->gpio_charge_finished));
+ disable_irq(gpiod_to_irq(main_bat.charge_finished));
main_bat.pdata->disable_charger();
}
}
@@ -425,12 +426,12 @@ static int s3c_adc_bat_resume(struct platform_device *pdev)
{
struct s3c_adc_bat_pdata *pdata = pdev->dev.platform_data;
- if (pdata->gpio_charge_finished >= 0) {
+ if (main_bat.charge_finished) {
if (device_may_wakeup(&pdev->dev))
disable_irq_wake(
- gpio_to_irq(pdata->gpio_charge_finished));
+ gpiod_to_irq(main_bat.charge_finished));
else
- enable_irq(gpio_to_irq(pdata->gpio_charge_finished));
+ enable_irq(gpiod_to_irq(main_bat.charge_finished));
}
/* Schedule timer to check current status */
diff --git a/drivers/power/supply/wm831x_power.c b/drivers/power/supply/wm831x_power.c
index 18b33f14dfee..4cd2dd870039 100644
--- a/drivers/power/supply/wm831x_power.c
+++ b/drivers/power/supply/wm831x_power.c
@@ -668,7 +668,6 @@ static int wm831x_power_probe(struct platform_device *pdev)
fallthrough;
case -EPROBE_DEFER:
goto err_bat_irq;
- break;
}
return ret;
diff --git a/drivers/ps3/ps3-lpm.c b/drivers/ps3/ps3-lpm.c
index e54aa2d82f50..65512b6cc6fd 100644
--- a/drivers/ps3/ps3-lpm.c
+++ b/drivers/ps3/ps3-lpm.c
@@ -1196,7 +1196,7 @@ static int ps3_lpm_probe(struct ps3_system_bus_device *dev)
return 0;
}
-static int ps3_lpm_remove(struct ps3_system_bus_device *dev)
+static void ps3_lpm_remove(struct ps3_system_bus_device *dev)
{
dev_dbg(&dev->core, " -> %s:%u:\n", __func__, __LINE__);
@@ -1206,7 +1206,6 @@ static int ps3_lpm_remove(struct ps3_system_bus_device *dev)
lpm_priv = NULL;
dev_info(&dev->core, " <- %s:%u:\n", __func__, __LINE__);
- return 0;
}
static struct ps3_system_bus_driver ps3_lpm_driver = {
diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c
index 4ed131eaff51..e34ae6a442c7 100644
--- a/drivers/ps3/ps3-vuart.c
+++ b/drivers/ps3/ps3-vuart.c
@@ -1102,7 +1102,7 @@ static int ps3_vuart_cleanup(struct ps3_system_bus_device *dev)
* device can no longer be used.
*/
-static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
+static void ps3_vuart_remove(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_priv *priv = to_port_priv(dev);
struct ps3_vuart_port_driver *drv;
@@ -1118,7 +1118,7 @@ static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
__LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
- return 0;
+ return;
}
drv = ps3_system_bus_dev_to_vuart_drv(dev);
@@ -1141,7 +1141,6 @@ static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
- return 0;
}
/**
@@ -1154,7 +1153,7 @@ static int ps3_vuart_remove(struct ps3_system_bus_device *dev)
* sequence.
*/
-static int ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
+static void ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
{
struct ps3_vuart_port_driver *drv;
@@ -1169,7 +1168,7 @@ static int ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
dev_dbg(&dev->core, "%s:%d: no driver bound\n", __func__,
__LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
- return 0;
+ return;
}
drv = ps3_system_bus_dev_to_vuart_drv(dev);
@@ -1193,7 +1192,6 @@ static int ps3_vuart_shutdown(struct ps3_system_bus_device *dev)
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
mutex_unlock(&vuart_bus_priv.probe_mutex);
- return 0;
}
static int __init ps3_vuart_bus_init(void)
diff --git a/drivers/ps3/ps3stor_lib.c b/drivers/ps3/ps3stor_lib.c
index 333ba83006e4..a12a1ad9b5fe 100644
--- a/drivers/ps3/ps3stor_lib.c
+++ b/drivers/ps3/ps3stor_lib.c
@@ -189,7 +189,7 @@ int ps3stor_setup(struct ps3_storage_device *dev, irq_handler_t handler)
dev->bounce_lpar = ps3_mm_phys_to_lpar(__pa(dev->bounce_buf));
dev->bounce_dma = dma_map_single(&dev->sbd.core, dev->bounce_buf,
dev->bounce_size, DMA_BIDIRECTIONAL);
- if (!dev->bounce_dma) {
+ if (dma_mapping_error(&dev->sbd.core, dev->bounce_dma)) {
dev_err(&dev->sbd.core, "%s:%u: map DMA region failed\n",
__func__, __LINE__);
error = -ENODEV;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 476d7c7fe70a..f2edef0df40f 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -64,6 +64,7 @@ config DP83640_PHY
depends on NETWORK_PHY_TIMESTAMPING
depends on PHYLIB
depends on PTP_1588_CLOCK
+ select CRC32
help
Supports the DP83640 PHYTER with IEEE 1588 features.
@@ -78,6 +79,7 @@ config DP83640_PHY
config PTP_1588_CLOCK_INES
tristate "ZHAW InES PTP time stamping IP core"
depends on NETWORK_PHY_TIMESTAMPING
+ depends on HAS_IOMEM
depends on PHYLIB
depends on PTP_1588_CLOCK
help
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index 63be5362fd3a..0937e1c047ac 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -53,8 +53,8 @@ config PWM_AB8500
config PWM_ATMEL
tristate "Atmel PWM support"
- depends on OF
depends on ARCH_AT91 || COMPILE_TEST
+ depends on HAS_IOMEM && OF
help
Generic PWM framework driver for Atmel SoC.
@@ -75,7 +75,8 @@ config PWM_ATMEL_HLCDC_PWM
config PWM_ATMEL_TCB
tristate "Atmel TC Block PWM support"
- depends on ATMEL_TCLIB && OF
+ depends on OF
+ select REGMAP_MMIO
help
Generic PWM framework driver for Atmel Timer Counter Block.
@@ -88,7 +89,7 @@ config PWM_ATMEL_TCB
config PWM_BCM_IPROC
tristate "iProc PWM support"
depends on ARCH_BCM_IPROC || COMPILE_TEST
- depends on COMMON_CLK
+ depends on COMMON_CLK && HAS_IOMEM
default ARCH_BCM_IPROC
help
Generic PWM framework driver for Broadcom iProc PWM block. This
@@ -111,6 +112,7 @@ config PWM_BCM_KONA
config PWM_BCM2835
tristate "BCM2835 PWM support"
depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
+ depends on HAS_IOMEM
help
PWM framework driver for BCM2835 controller (Raspberry Pi)
@@ -120,6 +122,7 @@ config PWM_BCM2835
config PWM_BERLIN
tristate "Marvell Berlin PWM support"
depends on ARCH_BERLIN || COMPILE_TEST
+ depends on HAS_IOMEM
help
PWM framework driver for Marvell Berlin SoCs.
@@ -129,6 +132,7 @@ config PWM_BERLIN
config PWM_BRCMSTB
tristate "Broadcom STB PWM support"
depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for the Broadcom Set-top-Box
SoCs (BCM7xxx).
@@ -160,9 +164,19 @@ config PWM_CROS_EC
PWM driver for exposing a PWM attached to the ChromeOS Embedded
Controller.
+config PWM_DWC
+ tristate "DesignWare PWM Controller"
+ depends on PCI
+ help
+ PWM driver for Synopsys DWC PWM Controller attached to a PCI bus.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-dwc.
+
config PWM_EP93XX
tristate "Cirrus Logic EP93xx PWM support"
depends on ARCH_EP93XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for Cirrus Logic EP93xx.
@@ -184,6 +198,7 @@ config PWM_FSL_FTM
config PWM_HIBVT
tristate "HiSilicon BVT PWM support"
depends on ARCH_HISI || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for HiSilicon BVT SoCs.
@@ -206,6 +221,7 @@ config PWM_IMG
config PWM_IMX1
tristate "i.MX1 PWM support"
depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for i.MX1 and i.MX21
@@ -215,6 +231,7 @@ config PWM_IMX1
config PWM_IMX27
tristate "i.MX27 PWM support"
depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for i.MX27 and later i.MX SoCs.
@@ -232,6 +249,17 @@ config PWM_IMX_TPM
To compile this driver as a module, choose M here: the module
will be called pwm-imx-tpm.
+config PWM_INTEL_LGM
+ tristate "Intel LGM PWM support"
+ depends on HAS_IOMEM
+ depends on (OF && X86) || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ Generic PWM fan controller driver for LGM SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-intel-lgm.
+
config PWM_IQS620A
tristate "Azoteq IQS620A PWM support"
depends on MFD_IQS62X || COMPILE_TEST
@@ -254,6 +282,15 @@ config PWM_JZ4740
To compile this driver as a module, choose M here: the module
will be called pwm-jz4740.
+config PWM_KEEMBAY
+ tristate "Intel Keem Bay PWM driver"
+ depends on ARCH_KEEMBAY || (ARM64 && COMPILE_TEST)
+ help
+ The platform driver for Intel Keem Bay PWM controller.
+
+ To compile this driver as a module, choose M here: the module
+ will be called pwm-keembay.
+
config PWM_LP3943
tristate "TI/National Semiconductor LP3943 PWM support"
depends on MFD_LP3943
@@ -267,6 +304,7 @@ config PWM_LP3943
config PWM_LPC18XX_SCT
tristate "LPC18xx/43xx PWM/SCT support"
depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for NXP LPC18xx PWM/SCT which
supports 16 channels.
@@ -279,6 +317,7 @@ config PWM_LPC18XX_SCT
config PWM_LPC32XX
tristate "LPC32XX PWM support"
depends on ARCH_LPC32XX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for LPC32XX. The LPC32XX SOC has two
PWM controllers.
@@ -287,11 +326,13 @@ config PWM_LPC32XX
will be called pwm-lpc32xx.
config PWM_LPSS
+ depends on HAS_IOMEM
tristate
config PWM_LPSS_PCI
tristate "Intel LPSS PWM PCI driver"
- depends on X86 && PCI
+ depends on X86 || COMPILE_TEST
+ depends on HAS_IOMEM && PCI
select PWM_LPSS
help
The PCI driver for Intel Low Power Subsystem PWM controller.
@@ -301,7 +342,8 @@ config PWM_LPSS_PCI
config PWM_LPSS_PLATFORM
tristate "Intel LPSS PWM platform driver"
- depends on X86 && ACPI
+ depends on (X86 && ACPI) || COMPILE_TEST
+ depends on HAS_IOMEM
select PWM_LPSS
help
The platform driver for Intel Low Power Subsystem PWM controller.
@@ -312,7 +354,7 @@ config PWM_LPSS_PLATFORM
config PWM_MESON
tristate "Amlogic Meson PWM driver"
depends on ARCH_MESON || COMPILE_TEST
- depends on COMMON_CLK
+ depends on COMMON_CLK && HAS_IOMEM
help
The platform driver for Amlogic Meson PWM controller.
@@ -333,6 +375,7 @@ config PWM_MTK_DISP
config PWM_MEDIATEK
tristate "MediaTek PWM support"
depends on ARCH_MEDIATEK || RALINK || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for Mediatek ARM SoC.
@@ -341,8 +384,8 @@ config PWM_MEDIATEK
config PWM_MXS
tristate "Freescale MXS PWM support"
- depends on OF
depends on ARCH_MXS || COMPILE_TEST
+ depends on HAS_IOMEM && OF
select STMP_DEVICE
help
Generic PWM framework driver for Freescale MXS.
@@ -373,6 +416,7 @@ config PWM_PCA9685
config PWM_PXA
tristate "PXA PWM support"
depends on ARCH_PXA || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for PXA.
@@ -404,6 +448,7 @@ config PWM_RENESAS_TPU
config PWM_ROCKCHIP
tristate "Rockchip PWM support"
depends on ARCH_ROCKCHIP || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for the PWM controller found on
Rockchip SoCs.
@@ -411,6 +456,7 @@ config PWM_ROCKCHIP
config PWM_SAMSUNG
tristate "Samsung PWM support"
depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for Samsung.
@@ -420,7 +466,7 @@ config PWM_SAMSUNG
config PWM_SIFIVE
tristate "SiFive PWM support"
depends on OF
- depends on COMMON_CLK
+ depends on COMMON_CLK && HAS_IOMEM
depends on RISCV || COMPILE_TEST
help
Generic PWM framework driver for SiFive SoCs.
@@ -441,7 +487,7 @@ config PWM_SL28CPLD
config PWM_SPEAR
tristate "STMicroelectronics SPEAr PWM support"
depends on PLAT_SPEAR || COMPILE_TEST
- depends on OF
+ depends on HAS_IOMEM && OF
help
Generic PWM framework driver for the PWM controller on ST
SPEAr SoCs.
@@ -463,7 +509,7 @@ config PWM_SPRD
config PWM_STI
tristate "STiH4xx PWM support"
depends on ARCH_STI || COMPILE_TEST
- depends on OF
+ depends on HAS_IOMEM && OF
help
Generic PWM framework driver for STiH4xx SoCs.
@@ -509,6 +555,7 @@ config PWM_SUN4I
config PWM_TEGRA
tristate "NVIDIA Tegra PWM support"
depends on ARCH_TEGRA || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for the PWFM controller found on NVIDIA
Tegra SoCs.
@@ -519,6 +566,7 @@ config PWM_TEGRA
config PWM_TIECAP
tristate "ECAP PWM support"
depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ depends on HAS_IOMEM
help
PWM driver support for the ECAP APWM controller found on TI SOCs
@@ -528,6 +576,7 @@ config PWM_TIECAP
config PWM_TIEHRPWM
tristate "EHRPWM PWM support"
depends on ARCH_OMAP2PLUS || ARCH_DAVINCI_DA8XX || ARCH_K3 || COMPILE_TEST
+ depends on HAS_IOMEM
help
PWM driver support for the EHRPWM controller found on TI SOCs
@@ -555,6 +604,7 @@ config PWM_TWL_LED
config PWM_VT8500
tristate "vt8500 PWM support"
depends on ARCH_VT8500 || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for vt8500.
@@ -564,6 +614,7 @@ config PWM_VT8500
config PWM_ZX
tristate "ZTE ZX PWM support"
depends on ARCH_ZX || COMPILE_TEST
+ depends on HAS_IOMEM
help
Generic PWM framework driver for ZTE ZX family SoCs.
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index cbdcd55d69ee..18b89d7fd092 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_PWM_BRCMSTB) += pwm-brcmstb.o
obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o
obj-$(CONFIG_PWM_CRC) += pwm-crc.o
obj-$(CONFIG_PWM_CROS_EC) += pwm-cros-ec.o
+obj-$(CONFIG_PWM_DWC) += pwm-dwc.o
obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o
obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o
obj-$(CONFIG_PWM_HIBVT) += pwm-hibvt.o
@@ -20,8 +21,10 @@ obj-$(CONFIG_PWM_IMG) += pwm-img.o
obj-$(CONFIG_PWM_IMX1) += pwm-imx1.o
obj-$(CONFIG_PWM_IMX27) += pwm-imx27.o
obj-$(CONFIG_PWM_IMX_TPM) += pwm-imx-tpm.o
+obj-$(CONFIG_PWM_INTEL_LGM) += pwm-intel-lgm.o
obj-$(CONFIG_PWM_IQS620A) += pwm-iqs620a.o
obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
+obj-$(CONFIG_PWM_KEEMBAY) += pwm-keembay.o
obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
obj-$(CONFIG_PWM_LPC18XX_SCT) += pwm-lpc18xx-sct.o
obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 1f16f5365d3c..a8eff4b3ee36 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -1338,7 +1338,7 @@ DEFINE_SEQ_ATTRIBUTE(pwm_debugfs);
static int __init pwm_debugfs_init(void)
{
- debugfs_create_file("pwm", S_IFREG | S_IRUGO, NULL, NULL,
+ debugfs_create_file("pwm", S_IFREG | 0444, NULL, NULL,
&pwm_debugfs_fops);
return 0;
diff --git a/drivers/pwm/pwm-ab8500.c b/drivers/pwm/pwm-ab8500.c
index fdf3964db4a6..58c6c0f5b0ec 100644
--- a/drivers/pwm/pwm-ab8500.c
+++ b/drivers/pwm/pwm-ab8500.c
@@ -101,12 +101,12 @@ static int ab8500_pwm_probe(struct platform_device *pdev)
ab8500->chip.dev = &pdev->dev;
ab8500->chip.ops = &ab8500_pwm_ops;
- ab8500->chip.base = pdev->id;
+ ab8500->chip.base = -1;
ab8500->chip.npwm = 1;
err = pwmchip_add(&ab8500->chip);
if (err < 0)
- return err;
+ return dev_err_probe(&pdev->dev, err, "Failed to add pwm chip\n");
dev_dbg(&pdev->dev, "pwm probe successful\n");
platform_set_drvdata(pdev, ab8500);
diff --git a/drivers/pwm/pwm-atmel-tcb.c b/drivers/pwm/pwm-atmel-tcb.c
index 85c53701958c..5ccc3e7420e9 100644
--- a/drivers/pwm/pwm-atmel-tcb.c
+++ b/drivers/pwm/pwm-atmel-tcb.c
@@ -16,13 +16,16 @@
#include <linux/err.h>
#include <linux/ioport.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/pwm.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <soc/at91/atmel_tcb.h>
-#define NPWM 6
+#define NPWM 2
#define ATMEL_TC_ACMR_MASK (ATMEL_TC_ACPA | ATMEL_TC_ACPC | \
ATMEL_TC_AEEVT | ATMEL_TC_ASWTRG)
@@ -48,11 +51,18 @@ struct atmel_tcb_channel {
struct atmel_tcb_pwm_chip {
struct pwm_chip chip;
spinlock_t lock;
- struct atmel_tc *tc;
+ u8 channel;
+ u8 width;
+ struct regmap *regmap;
+ struct clk *clk;
+ struct clk *gclk;
+ struct clk *slow_clk;
struct atmel_tcb_pwm_device *pwms[NPWM];
- struct atmel_tcb_channel bkup[NPWM / 2];
+ struct atmel_tcb_channel bkup;
};
+const u8 atmel_tcb_divisors[] = { 2, 8, 32, 128, 0, };
+
static inline struct atmel_tcb_pwm_chip *to_tcb_chip(struct pwm_chip *chip)
{
return container_of(chip, struct atmel_tcb_pwm_chip, chip);
@@ -74,10 +84,6 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm;
- struct atmel_tc *tc = tcbpwmc->tc;
- void __iomem *regs = tc->regs;
- unsigned group = pwm->hwpwm / 2;
- unsigned index = pwm->hwpwm % 2;
unsigned cmr;
int ret;
@@ -85,7 +91,7 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
if (!tcbpwm)
return -ENOMEM;
- ret = clk_prepare_enable(tc->clk[group]);
+ ret = clk_prepare_enable(tcbpwmc->clk);
if (ret) {
devm_kfree(chip->dev, tcbpwm);
return ret;
@@ -98,28 +104,31 @@ static int atmel_tcb_pwm_request(struct pwm_chip *chip,
tcbpwm->div = 0;
spin_lock(&tcbpwmc->lock);
- cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR));
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
/*
* Get init config from Timer Counter registers if
* Timer Counter is already configured as a PWM generator.
*/
if (cmr & ATMEL_TC_WAVE) {
- if (index == 0)
- tcbpwm->duty =
- __raw_readl(regs + ATMEL_TC_REG(group, RA));
+ if (pwm->hwpwm == 0)
+ regmap_read(tcbpwmc->regmap,
+ ATMEL_TC_REG(tcbpwmc->channel, RA),
+ &tcbpwm->duty);
else
- tcbpwm->duty =
- __raw_readl(regs + ATMEL_TC_REG(group, RB));
+ regmap_read(tcbpwmc->regmap,
+ ATMEL_TC_REG(tcbpwmc->channel, RB),
+ &tcbpwm->duty);
tcbpwm->div = cmr & ATMEL_TC_TCCLKS;
- tcbpwm->period = __raw_readl(regs + ATMEL_TC_REG(group, RC));
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, RC),
+ &tcbpwm->period);
cmr &= (ATMEL_TC_TCCLKS | ATMEL_TC_ACMR_MASK |
ATMEL_TC_BCMR_MASK);
} else
cmr = 0;
cmr |= ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO | ATMEL_TC_EEVT_XC0;
- __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
spin_unlock(&tcbpwmc->lock);
tcbpwmc->pwms[pwm->hwpwm] = tcbpwm;
@@ -131,9 +140,8 @@ static void atmel_tcb_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
- struct atmel_tc *tc = tcbpwmc->tc;
- clk_disable_unprepare(tc->clk[pwm->hwpwm / 2]);
+ clk_disable_unprepare(tcbpwmc->clk);
tcbpwmc->pwms[pwm->hwpwm] = NULL;
devm_kfree(chip->dev, tcbpwm);
}
@@ -142,10 +150,6 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
- struct atmel_tc *tc = tcbpwmc->tc;
- void __iomem *regs = tc->regs;
- unsigned group = pwm->hwpwm / 2;
- unsigned index = pwm->hwpwm % 2;
unsigned cmr;
enum pwm_polarity polarity = tcbpwm->polarity;
@@ -161,10 +165,10 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
polarity = !polarity;
spin_lock(&tcbpwmc->lock);
- cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR));
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
/* flush old setting and set the new one */
- if (index == 0) {
+ if (pwm->hwpwm == 0) {
cmr &= ~ATMEL_TC_ACMR_MASK;
if (polarity == PWM_POLARITY_INVERSED)
cmr |= ATMEL_TC_ASWTRG_CLEAR;
@@ -178,20 +182,22 @@ static void atmel_tcb_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
cmr |= ATMEL_TC_BSWTRG_SET;
}
- __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
/*
* Use software trigger to apply the new setting.
* If both PWM devices in this group are disabled we stop the clock.
*/
if (!(cmr & (ATMEL_TC_ACPC | ATMEL_TC_BCPC))) {
- __raw_writel(ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS,
- regs + ATMEL_TC_REG(group, CCR));
- tcbpwmc->bkup[group].enabled = 1;
+ regmap_write(tcbpwmc->regmap,
+ ATMEL_TC_REG(tcbpwmc->channel, CCR),
+ ATMEL_TC_SWTRG | ATMEL_TC_CLKDIS);
+ tcbpwmc->bkup.enabled = 1;
} else {
- __raw_writel(ATMEL_TC_SWTRG, regs +
- ATMEL_TC_REG(group, CCR));
- tcbpwmc->bkup[group].enabled = 0;
+ regmap_write(tcbpwmc->regmap,
+ ATMEL_TC_REG(tcbpwmc->channel, CCR),
+ ATMEL_TC_SWTRG);
+ tcbpwmc->bkup.enabled = 0;
}
spin_unlock(&tcbpwmc->lock);
@@ -201,10 +207,6 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
- struct atmel_tc *tc = tcbpwmc->tc;
- void __iomem *regs = tc->regs;
- unsigned group = pwm->hwpwm / 2;
- unsigned index = pwm->hwpwm % 2;
u32 cmr;
enum pwm_polarity polarity = tcbpwm->polarity;
@@ -220,12 +222,12 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
polarity = !polarity;
spin_lock(&tcbpwmc->lock);
- cmr = __raw_readl(regs + ATMEL_TC_REG(group, CMR));
+ regmap_read(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), &cmr);
/* flush old setting and set the new one */
cmr &= ~ATMEL_TC_TCCLKS;
- if (index == 0) {
+ if (pwm->hwpwm == 0) {
cmr &= ~ATMEL_TC_ACMR_MASK;
/* Set CMR flags according to given polarity */
@@ -248,7 +250,7 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
* this config till next config call.
*/
if (tcbpwm->duty != tcbpwm->period && tcbpwm->duty > 0) {
- if (index == 0) {
+ if (pwm->hwpwm == 0) {
if (polarity == PWM_POLARITY_INVERSED)
cmr |= ATMEL_TC_ACPA_SET | ATMEL_TC_ACPC_CLEAR;
else
@@ -263,19 +265,24 @@ static int atmel_tcb_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
cmr |= (tcbpwm->div & ATMEL_TC_TCCLKS);
- __raw_writel(cmr, regs + ATMEL_TC_REG(group, CMR));
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CMR), cmr);
- if (index == 0)
- __raw_writel(tcbpwm->duty, regs + ATMEL_TC_REG(group, RA));
+ if (pwm->hwpwm == 0)
+ regmap_write(tcbpwmc->regmap,
+ ATMEL_TC_REG(tcbpwmc->channel, RA),
+ tcbpwm->duty);
else
- __raw_writel(tcbpwm->duty, regs + ATMEL_TC_REG(group, RB));
+ regmap_write(tcbpwmc->regmap,
+ ATMEL_TC_REG(tcbpwmc->channel, RB),
+ tcbpwm->duty);
- __raw_writel(tcbpwm->period, regs + ATMEL_TC_REG(group, RC));
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, RC),
+ tcbpwm->period);
/* Use software trigger to apply the new setting */
- __raw_writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
- regs + ATMEL_TC_REG(group, CCR));
- tcbpwmc->bkup[group].enabled = 1;
+ regmap_write(tcbpwmc->regmap, ATMEL_TC_REG(tcbpwmc->channel, CCR),
+ ATMEL_TC_SWTRG | ATMEL_TC_CLKEN);
+ tcbpwmc->bkup.enabled = 1;
spin_unlock(&tcbpwmc->lock);
return 0;
}
@@ -285,29 +292,29 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
{
struct atmel_tcb_pwm_chip *tcbpwmc = to_tcb_chip(chip);
struct atmel_tcb_pwm_device *tcbpwm = pwm_get_chip_data(pwm);
- unsigned group = pwm->hwpwm / 2;
- unsigned index = pwm->hwpwm % 2;
struct atmel_tcb_pwm_device *atcbpwm = NULL;
- struct atmel_tc *tc = tcbpwmc->tc;
- int i;
+ int i = 0;
int slowclk = 0;
unsigned period;
unsigned duty;
- unsigned rate = clk_get_rate(tc->clk[group]);
+ unsigned rate = clk_get_rate(tcbpwmc->clk);
unsigned long long min;
unsigned long long max;
/*
* Find best clk divisor:
* the smallest divisor which can fulfill the period_ns requirements.
+ * If there is a gclk, the first divisor is actuallly the gclk selector
*/
- for (i = 0; i < 5; ++i) {
- if (atmel_tc_divisors[i] == 0) {
+ if (tcbpwmc->gclk)
+ i = 1;
+ for (; i < ARRAY_SIZE(atmel_tcb_divisors); ++i) {
+ if (atmel_tcb_divisors[i] == 0) {
slowclk = i;
continue;
}
- min = div_u64((u64)NSEC_PER_SEC * atmel_tc_divisors[i], rate);
- max = min << tc->tcb_config->counter_width;
+ min = div_u64((u64)NSEC_PER_SEC * atmel_tcb_divisors[i], rate);
+ max = min << tcbpwmc->width;
if (max >= period_ns)
break;
}
@@ -316,11 +323,11 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
* If none of the divisor are small enough to represent period_ns
* take slow clock (32KHz).
*/
- if (i == 5) {
+ if (i == ARRAY_SIZE(atmel_tcb_divisors)) {
i = slowclk;
- rate = clk_get_rate(tc->slow_clk);
+ rate = clk_get_rate(tcbpwmc->slow_clk);
min = div_u64(NSEC_PER_SEC, rate);
- max = min << tc->tcb_config->counter_width;
+ max = min << tcbpwmc->width;
/* If period is too big return ERANGE error */
if (max < period_ns)
@@ -330,17 +337,13 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
duty = div_u64(duty_ns, min);
period = div_u64(period_ns, min);
- if (index == 0)
- atcbpwm = tcbpwmc->pwms[pwm->hwpwm + 1];
+ if (pwm->hwpwm == 0)
+ atcbpwm = tcbpwmc->pwms[1];
else
- atcbpwm = tcbpwmc->pwms[pwm->hwpwm - 1];
+ atcbpwm = tcbpwmc->pwms[0];
/*
- * PWM devices provided by TCB driver are grouped by 2:
- * - group 0: PWM 0 & 1
- * - group 1: PWM 2 & 3
- * - group 2: PWM 4 & 5
- *
+ * PWM devices provided by the TCB driver are grouped by 2.
* PWM devices in a given group must be configured with the
* same period_ns.
*
@@ -376,32 +379,75 @@ static const struct pwm_ops atmel_tcb_pwm_ops = {
.owner = THIS_MODULE,
};
+static struct atmel_tcb_config tcb_rm9200_config = {
+ .counter_width = 16,
+};
+
+static struct atmel_tcb_config tcb_sam9x5_config = {
+ .counter_width = 32,
+};
+
+static struct atmel_tcb_config tcb_sama5d2_config = {
+ .counter_width = 32,
+ .has_gclk = 1,
+};
+
+static const struct of_device_id atmel_tcb_of_match[] = {
+ { .compatible = "atmel,at91rm9200-tcb", .data = &tcb_rm9200_config, },
+ { .compatible = "atmel,at91sam9x5-tcb", .data = &tcb_sam9x5_config, },
+ { .compatible = "atmel,sama5d2-tcb", .data = &tcb_sama5d2_config, },
+ { /* sentinel */ }
+};
+
static int atmel_tcb_pwm_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct atmel_tcb_pwm_chip *tcbpwm;
+ const struct atmel_tcb_config *config;
struct device_node *np = pdev->dev.of_node;
- struct atmel_tc *tc;
+ struct regmap *regmap;
+ struct clk *clk, *gclk = NULL;
+ struct clk *slow_clk;
+ char clk_name[] = "t0_clk";
int err;
- int tcblock;
+ int channel;
- err = of_property_read_u32(np, "tc-block", &tcblock);
+ err = of_property_read_u32(np, "reg", &channel);
if (err < 0) {
dev_err(&pdev->dev,
- "failed to get Timer Counter Block number from device tree (error: %d)\n",
+ "failed to get Timer Counter Block channel from device tree (error: %d)\n",
err);
return err;
}
- tc = atmel_tc_alloc(tcblock);
- if (tc == NULL) {
- dev_err(&pdev->dev, "failed to allocate Timer Counter Block\n");
- return -ENOMEM;
+ regmap = syscon_node_to_regmap(np->parent);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ slow_clk = of_clk_get_by_name(np->parent, "slow_clk");
+ if (IS_ERR(slow_clk))
+ return PTR_ERR(slow_clk);
+
+ clk_name[1] += channel;
+ clk = of_clk_get_by_name(np->parent, clk_name);
+ if (IS_ERR(clk))
+ clk = of_clk_get_by_name(np->parent, "t0_clk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ match = of_match_node(atmel_tcb_of_match, np->parent);
+ config = match->data;
+
+ if (config->has_gclk) {
+ gclk = of_clk_get_by_name(np->parent, "gclk");
+ if (IS_ERR(gclk))
+ return PTR_ERR(gclk);
}
tcbpwm = devm_kzalloc(&pdev->dev, sizeof(*tcbpwm), GFP_KERNEL);
if (tcbpwm == NULL) {
err = -ENOMEM;
- goto err_free_tc;
+ goto err_slow_clk;
}
tcbpwm->chip.dev = &pdev->dev;
@@ -410,11 +456,16 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
tcbpwm->chip.of_pwm_n_cells = 3;
tcbpwm->chip.base = -1;
tcbpwm->chip.npwm = NPWM;
- tcbpwm->tc = tc;
-
- err = clk_prepare_enable(tc->slow_clk);
+ tcbpwm->channel = channel;
+ tcbpwm->regmap = regmap;
+ tcbpwm->clk = clk;
+ tcbpwm->gclk = gclk;
+ tcbpwm->slow_clk = slow_clk;
+ tcbpwm->width = config->counter_width;
+
+ err = clk_prepare_enable(slow_clk);
if (err)
- goto err_free_tc;
+ goto err_slow_clk;
spin_lock_init(&tcbpwm->lock);
@@ -427,10 +478,10 @@ static int atmel_tcb_pwm_probe(struct platform_device *pdev)
return 0;
err_disable_clk:
- clk_disable_unprepare(tcbpwm->tc->slow_clk);
+ clk_disable_unprepare(tcbpwm->slow_clk);
-err_free_tc:
- atmel_tc_free(tc);
+err_slow_clk:
+ clk_put(slow_clk);
return err;
}
@@ -440,14 +491,14 @@ static int atmel_tcb_pwm_remove(struct platform_device *pdev)
struct atmel_tcb_pwm_chip *tcbpwm = platform_get_drvdata(pdev);
int err;
- clk_disable_unprepare(tcbpwm->tc->slow_clk);
+ clk_disable_unprepare(tcbpwm->slow_clk);
+ clk_put(tcbpwm->slow_clk);
+ clk_put(tcbpwm->clk);
err = pwmchip_remove(&tcbpwm->chip);
if (err < 0)
return err;
- atmel_tc_free(tcbpwm->tc);
-
return 0;
}
@@ -461,38 +512,33 @@ MODULE_DEVICE_TABLE(of, atmel_tcb_pwm_dt_ids);
static int atmel_tcb_pwm_suspend(struct device *dev)
{
struct atmel_tcb_pwm_chip *tcbpwm = dev_get_drvdata(dev);
- void __iomem *base = tcbpwm->tc->regs;
- int i;
+ struct atmel_tcb_channel *chan = &tcbpwm->bkup;
+ unsigned int channel = tcbpwm->channel;
- for (i = 0; i < (NPWM / 2); i++) {
- struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
+ regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, CMR), &chan->cmr);
+ regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RA), &chan->ra);
+ regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RB), &chan->rb);
+ regmap_read(tcbpwm->regmap, ATMEL_TC_REG(channel, RC), &chan->rc);
- chan->cmr = readl(base + ATMEL_TC_REG(i, CMR));
- chan->ra = readl(base + ATMEL_TC_REG(i, RA));
- chan->rb = readl(base + ATMEL_TC_REG(i, RB));
- chan->rc = readl(base + ATMEL_TC_REG(i, RC));
- }
return 0;
}
static int atmel_tcb_pwm_resume(struct device *dev)
{
struct atmel_tcb_pwm_chip *tcbpwm = dev_get_drvdata(dev);
- void __iomem *base = tcbpwm->tc->regs;
- int i;
-
- for (i = 0; i < (NPWM / 2); i++) {
- struct atmel_tcb_channel *chan = &tcbpwm->bkup[i];
-
- writel(chan->cmr, base + ATMEL_TC_REG(i, CMR));
- writel(chan->ra, base + ATMEL_TC_REG(i, RA));
- writel(chan->rb, base + ATMEL_TC_REG(i, RB));
- writel(chan->rc, base + ATMEL_TC_REG(i, RC));
- if (chan->enabled) {
- writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
- base + ATMEL_TC_REG(i, CCR));
- }
- }
+ struct atmel_tcb_channel *chan = &tcbpwm->bkup;
+ unsigned int channel = tcbpwm->channel;
+
+ regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, CMR), chan->cmr);
+ regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RA), chan->ra);
+ regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RB), chan->rb);
+ regmap_write(tcbpwm->regmap, ATMEL_TC_REG(channel, RC), chan->rc);
+
+ if (chan->enabled)
+ regmap_write(tcbpwm->regmap,
+ ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
+ ATMEL_TC_REG(channel, CCR));
+
return 0;
}
#endif
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 6161e7e3e9ac..5813339b597b 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -401,7 +401,6 @@ MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids);
static int atmel_pwm_probe(struct platform_device *pdev)
{
struct atmel_pwm_chip *atmel_pwm;
- struct resource *res;
int ret;
atmel_pwm = devm_kzalloc(&pdev->dev, sizeof(*atmel_pwm), GFP_KERNEL);
@@ -412,8 +411,7 @@ static int atmel_pwm_probe(struct platform_device *pdev)
atmel_pwm->data = of_device_get_match_data(&pdev->dev);
atmel_pwm->updated_pwms = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- atmel_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ atmel_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(atmel_pwm->base))
return PTR_ERR(atmel_pwm->base);
diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c
index 79b1e58e946d..f4853c4a2d75 100644
--- a/drivers/pwm/pwm-bcm-iproc.c
+++ b/drivers/pwm/pwm-bcm-iproc.c
@@ -197,7 +197,6 @@ static const struct pwm_ops iproc_pwm_ops = {
static int iproc_pwmc_probe(struct platform_device *pdev)
{
struct iproc_pwmc *ip;
- struct resource *res;
unsigned int i;
u32 value;
int ret;
@@ -215,8 +214,7 @@ static int iproc_pwmc_probe(struct platform_device *pdev)
ip->chip.of_xlate = of_pwm_xlate_with_flags;
ip->chip.of_pwm_n_cells = 3;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ip->base = devm_ioremap_resource(&pdev->dev, res);
+ ip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ip->base))
return PTR_ERR(ip->base);
diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c
index 16c5898b934a..578b3621c97e 100644
--- a/drivers/pwm/pwm-bcm-kona.c
+++ b/drivers/pwm/pwm-bcm-kona.c
@@ -259,7 +259,6 @@ static const struct pwm_ops kona_pwm_ops = {
static int kona_pwmc_probe(struct platform_device *pdev)
{
struct kona_pwmc *kp;
- struct resource *res;
unsigned int chan;
unsigned int value = 0;
int ret = 0;
@@ -277,8 +276,7 @@ static int kona_pwmc_probe(struct platform_device *pdev)
kp->chip.of_xlate = of_pwm_xlate_with_flags;
kp->chip.of_pwm_n_cells = 3;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- kp->base = devm_ioremap_resource(&pdev->dev, res);
+ kp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(kp->base))
return PTR_ERR(kp->base);
diff --git a/drivers/pwm/pwm-bcm2835.c b/drivers/pwm/pwm-bcm2835.c
index 6841dcfe27fc..6ff5f04b3e07 100644
--- a/drivers/pwm/pwm-bcm2835.c
+++ b/drivers/pwm/pwm-bcm2835.c
@@ -58,13 +58,15 @@ static void bcm2835_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
writel(value, pc->base + PWM_CONTROL);
}
-static int bcm2835_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
- int duty_ns, int period_ns)
+static int bcm2835_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
{
+
struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
unsigned long rate = clk_get_rate(pc->clk);
+ unsigned long long period;
unsigned long scaler;
- u32 period;
+ u32 val;
if (!rate) {
dev_err(pc->dev, "failed to get clock rate\n");
@@ -72,54 +74,34 @@ static int bcm2835_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
}
scaler = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate);
- period = DIV_ROUND_CLOSEST(period_ns, scaler);
+ /* set period */
+ period = DIV_ROUND_CLOSEST_ULL(state->period, scaler);
- if (period < PERIOD_MIN)
+ /* dont accept a period that is too small or has been truncated */
+ if ((period < PERIOD_MIN) || (period > U32_MAX))
return -EINVAL;
- writel(DIV_ROUND_CLOSEST(duty_ns, scaler),
- pc->base + DUTY(pwm->hwpwm));
writel(period, pc->base + PERIOD(pwm->hwpwm));
- return 0;
-}
-
-static int bcm2835_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
- u32 value;
-
- value = readl(pc->base + PWM_CONTROL);
- value |= PWM_ENABLE << PWM_CONTROL_SHIFT(pwm->hwpwm);
- writel(value, pc->base + PWM_CONTROL);
+ /* set duty cycle */
+ val = DIV_ROUND_CLOSEST_ULL(state->duty_cycle, scaler);
+ writel(val, pc->base + DUTY(pwm->hwpwm));
- return 0;
-}
+ /* set polarity */
+ val = readl(pc->base + PWM_CONTROL);
-static void bcm2835_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
-{
- struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
- u32 value;
-
- value = readl(pc->base + PWM_CONTROL);
- value &= ~(PWM_ENABLE << PWM_CONTROL_SHIFT(pwm->hwpwm));
- writel(value, pc->base + PWM_CONTROL);
-}
-
-static int bcm2835_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
- enum pwm_polarity polarity)
-{
- struct bcm2835_pwm *pc = to_bcm2835_pwm(chip);
- u32 value;
-
- value = readl(pc->base + PWM_CONTROL);
+ if (state->polarity == PWM_POLARITY_NORMAL)
+ val &= ~(PWM_POLARITY << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ else
+ val |= PWM_POLARITY << PWM_CONTROL_SHIFT(pwm->hwpwm);
- if (polarity == PWM_POLARITY_NORMAL)
- value &= ~(PWM_POLARITY << PWM_CONTROL_SHIFT(pwm->hwpwm));
+ /* enable/disable */
+ if (state->enabled)
+ val |= PWM_ENABLE << PWM_CONTROL_SHIFT(pwm->hwpwm);
else
- value |= PWM_POLARITY << PWM_CONTROL_SHIFT(pwm->hwpwm);
+ val &= ~(PWM_ENABLE << PWM_CONTROL_SHIFT(pwm->hwpwm));
- writel(value, pc->base + PWM_CONTROL);
+ writel(val, pc->base + PWM_CONTROL);
return 0;
}
@@ -127,17 +109,13 @@ static int bcm2835_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
static const struct pwm_ops bcm2835_pwm_ops = {
.request = bcm2835_pwm_request,
.free = bcm2835_pwm_free,
- .config = bcm2835_pwm_config,
- .enable = bcm2835_pwm_enable,
- .disable = bcm2835_pwm_disable,
- .set_polarity = bcm2835_set_polarity,
+ .apply = bcm2835_pwm_apply,
.owner = THIS_MODULE,
};
static int bcm2835_pwm_probe(struct platform_device *pdev)
{
struct bcm2835_pwm *pc;
- struct resource *res;
int ret;
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
@@ -146,8 +124,7 @@ static int bcm2835_pwm_probe(struct platform_device *pdev)
pc->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pc->base = devm_ioremap_resource(&pdev->dev, res);
+ pc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->base))
return PTR_ERR(pc->base);
diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c
index b91c477cc84b..fe405289e582 100644
--- a/drivers/pwm/pwm-berlin.c
+++ b/drivers/pwm/pwm-berlin.c
@@ -186,15 +186,13 @@ MODULE_DEVICE_TABLE(of, berlin_pwm_match);
static int berlin_pwm_probe(struct platform_device *pdev)
{
struct berlin_pwm_chip *pwm;
- struct resource *res;
int ret;
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
if (!pwm)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pwm->base))
return PTR_ERR(pwm->base);
diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
index fea612c45f20..8b66f9d2f589 100644
--- a/drivers/pwm/pwm-brcmstb.c
+++ b/drivers/pwm/pwm-brcmstb.c
@@ -234,7 +234,6 @@ MODULE_DEVICE_TABLE(of, brcmstb_pwm_of_match);
static int brcmstb_pwm_probe(struct platform_device *pdev)
{
struct brcmstb_pwm *p;
- struct resource *res;
int ret;
p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL);
@@ -262,8 +261,7 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
p->chip.base = -1;
p->chip.npwm = 2;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- p->base = devm_ioremap_resource(&pdev->dev, res);
+ p->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(p->base)) {
ret = PTR_ERR(p->base);
goto out_clk;
diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c
index ba9500aca078..cb1af86873ee 100644
--- a/drivers/pwm/pwm-clps711x.c
+++ b/drivers/pwm/pwm-clps711x.c
@@ -113,14 +113,12 @@ static struct pwm_device *clps711x_pwm_xlate(struct pwm_chip *chip,
static int clps711x_pwm_probe(struct platform_device *pdev)
{
struct clps711x_chip *priv;
- struct resource *res;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->pmpcon = devm_ioremap_resource(&pdev->dev, res);
+ priv->pmpcon = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->pmpcon))
return PTR_ERR(priv->pmpcon);
diff --git a/drivers/pwm/pwm-crc.c b/drivers/pwm/pwm-crc.c
index ecfdfac0c2d9..1e2276808b7a 100644
--- a/drivers/pwm/pwm-crc.c
+++ b/drivers/pwm/pwm-crc.c
@@ -64,7 +64,7 @@ static int crc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
}
if (state->polarity != PWM_POLARITY_NORMAL)
- return -EOPNOTSUPP;
+ return -EINVAL;
if (pwm_is_enabled(pwm) && !state->enabled) {
err = regmap_write(crc_pwm->regmap, BACKLIGHT_EN, 0);
diff --git a/drivers/pwm/pwm-dwc.c b/drivers/pwm/pwm-dwc.c
new file mode 100644
index 000000000000..f6c98e0d57c2
--- /dev/null
+++ b/drivers/pwm/pwm-dwc.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DesignWare PWM Controller driver
+ *
+ * Copyright (C) 2018-2020 Intel Corporation
+ *
+ * Author: Felipe Balbi (Intel)
+ * Author: Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ * Author: Raymond Tan <raymond.tan@intel.com>
+ *
+ * Limitations:
+ * - The hardware cannot generate a 0 % or 100 % duty cycle. Both high and low
+ * periods are one or more input clock periods long.
+ */
+
+#include <linux/bitops.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pm_runtime.h>
+#include <linux/pwm.h>
+
+#define DWC_TIM_LD_CNT(n) ((n) * 0x14)
+#define DWC_TIM_LD_CNT2(n) (((n) * 4) + 0xb0)
+#define DWC_TIM_CUR_VAL(n) (((n) * 0x14) + 0x04)
+#define DWC_TIM_CTRL(n) (((n) * 0x14) + 0x08)
+#define DWC_TIM_EOI(n) (((n) * 0x14) + 0x0c)
+#define DWC_TIM_INT_STS(n) (((n) * 0x14) + 0x10)
+
+#define DWC_TIMERS_INT_STS 0xa0
+#define DWC_TIMERS_EOI 0xa4
+#define DWC_TIMERS_RAW_INT_STS 0xa8
+#define DWC_TIMERS_COMP_VERSION 0xac
+
+#define DWC_TIMERS_TOTAL 8
+#define DWC_CLK_PERIOD_NS 10
+
+/* Timer Control Register */
+#define DWC_TIM_CTRL_EN BIT(0)
+#define DWC_TIM_CTRL_MODE BIT(1)
+#define DWC_TIM_CTRL_MODE_FREE (0 << 1)
+#define DWC_TIM_CTRL_MODE_USER (1 << 1)
+#define DWC_TIM_CTRL_INT_MASK BIT(2)
+#define DWC_TIM_CTRL_PWM BIT(3)
+
+struct dwc_pwm_ctx {
+ u32 cnt;
+ u32 cnt2;
+ u32 ctrl;
+};
+
+struct dwc_pwm {
+ struct pwm_chip chip;
+ void __iomem *base;
+ struct dwc_pwm_ctx ctx[DWC_TIMERS_TOTAL];
+};
+#define to_dwc_pwm(p) (container_of((p), struct dwc_pwm, chip))
+
+static inline u32 dwc_pwm_readl(struct dwc_pwm *dwc, u32 offset)
+{
+ return readl(dwc->base + offset);
+}
+
+static inline void dwc_pwm_writel(struct dwc_pwm *dwc, u32 value, u32 offset)
+{
+ writel(value, dwc->base + offset);
+}
+
+static void __dwc_pwm_set_enable(struct dwc_pwm *dwc, int pwm, int enabled)
+{
+ u32 reg;
+
+ reg = dwc_pwm_readl(dwc, DWC_TIM_CTRL(pwm));
+
+ if (enabled)
+ reg |= DWC_TIM_CTRL_EN;
+ else
+ reg &= ~DWC_TIM_CTRL_EN;
+
+ dwc_pwm_writel(dwc, reg, DWC_TIM_CTRL(pwm));
+}
+
+static int __dwc_pwm_configure_timer(struct dwc_pwm *dwc,
+ struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ u64 tmp;
+ u32 ctrl;
+ u32 high;
+ u32 low;
+
+ /*
+ * Calculate width of low and high period in terms of input clock
+ * periods and check are the result within HW limits between 1 and
+ * 2^32 periods.
+ */
+ tmp = DIV_ROUND_CLOSEST_ULL(state->duty_cycle, DWC_CLK_PERIOD_NS);
+ if (tmp < 1 || tmp > (1ULL << 32))
+ return -ERANGE;
+ low = tmp - 1;
+
+ tmp = DIV_ROUND_CLOSEST_ULL(state->period - state->duty_cycle,
+ DWC_CLK_PERIOD_NS);
+ if (tmp < 1 || tmp > (1ULL << 32))
+ return -ERANGE;
+ high = tmp - 1;
+
+ /*
+ * Specification says timer usage flow is to disable timer, then
+ * program it followed by enable. It also says Load Count is loaded
+ * into timer after it is enabled - either after a disable or
+ * a reset. Based on measurements it happens also without disable
+ * whenever Load Count is updated. But follow the specification.
+ */
+ __dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
+
+ /*
+ * Write Load Count and Load Count 2 registers. Former defines the
+ * width of low period and latter the width of high period in terms
+ * multiple of input clock periods:
+ * Width = ((Count + 1) * input clock period).
+ */
+ dwc_pwm_writel(dwc, low, DWC_TIM_LD_CNT(pwm->hwpwm));
+ dwc_pwm_writel(dwc, high, DWC_TIM_LD_CNT2(pwm->hwpwm));
+
+ /*
+ * Set user-defined mode, timer reloads from Load Count registers
+ * when it counts down to 0.
+ * Set PWM mode, it makes output to toggle and width of low and high
+ * periods are set by Load Count registers.
+ */
+ ctrl = DWC_TIM_CTRL_MODE_USER | DWC_TIM_CTRL_PWM;
+ dwc_pwm_writel(dwc, ctrl, DWC_TIM_CTRL(pwm->hwpwm));
+
+ /*
+ * Enable timer. Output starts from low period.
+ */
+ __dwc_pwm_set_enable(dwc, pwm->hwpwm, state->enabled);
+
+ return 0;
+}
+
+static int dwc_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
+
+ if (state->polarity != PWM_POLARITY_INVERSED)
+ return -EINVAL;
+
+ if (state->enabled) {
+ if (!pwm->state.enabled)
+ pm_runtime_get_sync(chip->dev);
+ return __dwc_pwm_configure_timer(dwc, pwm, state);
+ } else {
+ if (pwm->state.enabled) {
+ __dwc_pwm_set_enable(dwc, pwm->hwpwm, false);
+ pm_runtime_put_sync(chip->dev);
+ }
+ }
+
+ return 0;
+}
+
+static void dwc_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct dwc_pwm *dwc = to_dwc_pwm(chip);
+ u64 duty, period;
+
+ pm_runtime_get_sync(chip->dev);
+
+ state->enabled = !!(dwc_pwm_readl(dwc,
+ DWC_TIM_CTRL(pwm->hwpwm)) & DWC_TIM_CTRL_EN);
+
+ duty = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(pwm->hwpwm));
+ duty += 1;
+ duty *= DWC_CLK_PERIOD_NS;
+ state->duty_cycle = duty;
+
+ period = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(pwm->hwpwm));
+ period += 1;
+ period *= DWC_CLK_PERIOD_NS;
+ period += duty;
+ state->period = period;
+
+ state->polarity = PWM_POLARITY_INVERSED;
+
+ pm_runtime_put_sync(chip->dev);
+}
+
+static const struct pwm_ops dwc_pwm_ops = {
+ .apply = dwc_pwm_apply,
+ .get_state = dwc_pwm_get_state,
+ .owner = THIS_MODULE,
+};
+
+static int dwc_pwm_probe(struct pci_dev *pci, const struct pci_device_id *id)
+{
+ struct device *dev = &pci->dev;
+ struct dwc_pwm *dwc;
+ int ret;
+
+ dwc = devm_kzalloc(&pci->dev, sizeof(*dwc), GFP_KERNEL);
+ if (!dwc)
+ return -ENOMEM;
+
+ ret = pcim_enable_device(pci);
+ if (ret) {
+ dev_err(&pci->dev,
+ "Failed to enable device (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ pci_set_master(pci);
+
+ ret = pcim_iomap_regions(pci, BIT(0), pci_name(pci));
+ if (ret) {
+ dev_err(&pci->dev,
+ "Failed to iomap PCI BAR (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ dwc->base = pcim_iomap_table(pci)[0];
+ if (!dwc->base) {
+ dev_err(&pci->dev, "Base address missing\n");
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pci, dwc);
+
+ dwc->chip.dev = dev;
+ dwc->chip.ops = &dwc_pwm_ops;
+ dwc->chip.npwm = DWC_TIMERS_TOTAL;
+ dwc->chip.base = -1;
+
+ ret = pwmchip_add(&dwc->chip);
+ if (ret)
+ return ret;
+
+ pm_runtime_put(dev);
+ pm_runtime_allow(dev);
+
+ return 0;
+}
+
+static void dwc_pwm_remove(struct pci_dev *pci)
+{
+ struct dwc_pwm *dwc = pci_get_drvdata(pci);
+
+ pm_runtime_forbid(&pci->dev);
+ pm_runtime_get_noresume(&pci->dev);
+
+ pwmchip_remove(&dwc->chip);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dwc_pwm_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct dwc_pwm *dwc = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
+ if (dwc->chip.pwms[i].state.enabled) {
+ dev_err(dev, "PWM %u in use by consumer (%s)\n",
+ i, dwc->chip.pwms[i].label);
+ return -EBUSY;
+ }
+ dwc->ctx[i].cnt = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT(i));
+ dwc->ctx[i].cnt2 = dwc_pwm_readl(dwc, DWC_TIM_LD_CNT2(i));
+ dwc->ctx[i].ctrl = dwc_pwm_readl(dwc, DWC_TIM_CTRL(i));
+ }
+
+ return 0;
+}
+
+static int dwc_pwm_resume(struct device *dev)
+{
+ struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
+ struct dwc_pwm *dwc = pci_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < DWC_TIMERS_TOTAL; i++) {
+ dwc_pwm_writel(dwc, dwc->ctx[i].cnt, DWC_TIM_LD_CNT(i));
+ dwc_pwm_writel(dwc, dwc->ctx[i].cnt2, DWC_TIM_LD_CNT2(i));
+ dwc_pwm_writel(dwc, dwc->ctx[i].ctrl, DWC_TIM_CTRL(i));
+ }
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dwc_pwm_pm_ops, dwc_pwm_suspend, dwc_pwm_resume);
+
+static const struct pci_device_id dwc_pwm_id_table[] = {
+ { PCI_VDEVICE(INTEL, 0x4bb7) }, /* Elkhart Lake */
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(pci, dwc_pwm_id_table);
+
+static struct pci_driver dwc_pwm_driver = {
+ .name = "pwm-dwc",
+ .probe = dwc_pwm_probe,
+ .remove = dwc_pwm_remove,
+ .id_table = dwc_pwm_id_table,
+ .driver = {
+ .pm = &dwc_pwm_pm_ops,
+ },
+};
+
+module_pci_driver(dwc_pwm_driver);
+
+MODULE_AUTHOR("Felipe Balbi (Intel)");
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@linux.intel.com>");
+MODULE_AUTHOR("Raymond Tan <raymond.tan@intel.com>");
+MODULE_DESCRIPTION("DesignWare PWM Controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c
index 4bab73073ad7..c9fc6f223640 100644
--- a/drivers/pwm/pwm-ep93xx.c
+++ b/drivers/pwm/pwm-ep93xx.c
@@ -169,15 +169,13 @@ static const struct pwm_ops ep93xx_pwm_ops = {
static int ep93xx_pwm_probe(struct platform_device *pdev)
{
struct ep93xx_pwm *ep93xx_pwm;
- struct resource *res;
int ret;
ep93xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_pwm), GFP_KERNEL);
if (!ep93xx_pwm)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ep93xx_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ ep93xx_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ep93xx_pwm->base))
return PTR_ERR(ep93xx_pwm->base);
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index 59272a920479..2a6801226aba 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -399,7 +399,6 @@ static const struct regmap_config fsl_pwm_regmap_config = {
static int fsl_pwm_probe(struct platform_device *pdev)
{
struct fsl_pwm_chip *fpc;
- struct resource *res;
void __iomem *base;
int ret;
@@ -412,8 +411,7 @@ static int fsl_pwm_probe(struct platform_device *pdev)
fpc->soc = of_device_get_match_data(&pdev->dev);
fpc->chip.dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c
index ad205fdad372..a1900d0a872e 100644
--- a/drivers/pwm/pwm-hibvt.c
+++ b/drivers/pwm/pwm-hibvt.c
@@ -190,9 +190,7 @@ static int hibvt_pwm_probe(struct platform_device *pdev)
const struct hibvt_pwm_soc *soc =
of_device_get_match_data(&pdev->dev);
struct hibvt_pwm_chip *pwm_chip;
- struct resource *res;
- int ret;
- int i;
+ int ret, i;
pwm_chip = devm_kzalloc(&pdev->dev, sizeof(*pwm_chip), GFP_KERNEL);
if (pwm_chip == NULL)
@@ -213,8 +211,7 @@ static int hibvt_pwm_probe(struct platform_device *pdev)
pwm_chip->chip.of_pwm_n_cells = 3;
pwm_chip->soc = soc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm_chip->base = devm_ioremap_resource(&pdev->dev, res);
+ pwm_chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pwm_chip->base))
return PTR_ERR(pwm_chip->base);
diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c
index a34d95ed70b2..6faf5b5a5584 100644
--- a/drivers/pwm/pwm-img.c
+++ b/drivers/pwm/pwm-img.c
@@ -240,7 +240,6 @@ static int img_pwm_probe(struct platform_device *pdev)
int ret;
u64 val;
unsigned long clk_rate;
- struct resource *res;
struct img_pwm_chip *pwm;
const struct of_device_id *of_dev_id;
@@ -250,8 +249,7 @@ static int img_pwm_probe(struct platform_device *pdev)
pwm->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pwm->base))
return PTR_ERR(pwm->base);
diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c
index fcdf6befb838..aaf629bd8c35 100644
--- a/drivers/pwm/pwm-imx-tpm.c
+++ b/drivers/pwm/pwm-imx-tpm.c
@@ -350,13 +350,9 @@ static int pwm_imx_tpm_probe(struct platform_device *pdev)
return PTR_ERR(tpm->base);
tpm->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(tpm->clk)) {
- ret = PTR_ERR(tpm->clk);
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get PWM clock: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(tpm->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(tpm->clk),
+ "failed to get PWM clock\n");
ret = clk_prepare_enable(tpm->clk);
if (ret) {
diff --git a/drivers/pwm/pwm-imx1.c b/drivers/pwm/pwm-imx1.c
index f8b2c2e001a7..727e0d3e249e 100644
--- a/drivers/pwm/pwm-imx1.c
+++ b/drivers/pwm/pwm-imx1.c
@@ -136,7 +136,6 @@ MODULE_DEVICE_TABLE(of, pwm_imx1_dt_ids);
static int pwm_imx1_probe(struct platform_device *pdev)
{
struct pwm_imx1_chip *imx;
- struct resource *r;
imx = devm_kzalloc(&pdev->dev, sizeof(*imx), GFP_KERNEL);
if (!imx)
@@ -145,31 +144,21 @@ static int pwm_imx1_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx);
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(imx->clk_ipg)) {
- dev_err(&pdev->dev, "getting ipg clock failed with %ld\n",
- PTR_ERR(imx->clk_ipg));
- return PTR_ERR(imx->clk_ipg);
- }
+ if (IS_ERR(imx->clk_ipg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_ipg),
+ "getting ipg clock failed\n");
imx->clk_per = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(imx->clk_per)) {
- int ret = PTR_ERR(imx->clk_per);
-
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get peripheral clock: %d\n",
- ret);
-
- return ret;
- }
+ if (IS_ERR(imx->clk_per))
+ return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_per),
+ "failed to get peripheral clock\n");
imx->chip.ops = &pwm_imx1_ops;
imx->chip.dev = &pdev->dev;
imx->chip.base = -1;
imx->chip.npwm = 1;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ imx->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(imx->mmio_base))
return PTR_ERR(imx->mmio_base);
diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c
index c50d453552bd..18055326a2f3 100644
--- a/drivers/pwm/pwm-imx27.c
+++ b/drivers/pwm/pwm-imx27.c
@@ -235,8 +235,9 @@ static int pwm_imx27_apply(struct pwm_chip *chip, struct pwm_device *pwm,
period_cycles /= prescale;
c = clkrate * state->duty_cycle;
- do_div(c, NSEC_PER_SEC * prescale);
+ do_div(c, NSEC_PER_SEC);
duty_cycles = c;
+ duty_cycles /= prescale;
/*
* according to imx pwm RM, the real period value should be PERIOD
@@ -315,27 +316,14 @@ static int pwm_imx27_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, imx);
imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(imx->clk_ipg)) {
- int ret = PTR_ERR(imx->clk_ipg);
-
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "getting ipg clock failed with %d\n",
- ret);
- return ret;
- }
+ if (IS_ERR(imx->clk_ipg))
+ return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_ipg),
+ "getting ipg clock failed\n");
imx->clk_per = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(imx->clk_per)) {
- int ret = PTR_ERR(imx->clk_per);
-
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev,
- "failed to get peripheral clock: %d\n",
- ret);
-
- return ret;
- }
+ if (IS_ERR(imx->clk_per))
+ return dev_err_probe(&pdev->dev, PTR_ERR(imx->clk_per),
+ "failed to get peripheral clock\n");
imx->chip.ops = &pwm_imx27_ops;
imx->chip.dev = &pdev->dev;
diff --git a/drivers/pwm/pwm-intel-lgm.c b/drivers/pwm/pwm-intel-lgm.c
new file mode 100644
index 000000000000..e9e54dda07aa
--- /dev/null
+++ b/drivers/pwm/pwm-intel-lgm.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Intel Corporation.
+ *
+ * Limitations:
+ * - The hardware supports fixed period & configures only 2-wire mode.
+ * - Supports normal polarity. Does not support changing polarity.
+ * - When PWM is disabled, output of PWM will become 0(inactive). It doesn't
+ * keep track of running period.
+ * - When duty cycle is changed, PWM output may be a mix of previous setting
+ * and new setting for the first period. From second period, the output is
+ * based on new setting.
+ * - It is a dedicated PWM fan controller. There are no other consumers for
+ * this PWM controller.
+ */
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+
+#define LGM_PWM_FAN_CON0 0x0
+#define LGM_PWM_FAN_EN_EN BIT(0)
+#define LGM_PWM_FAN_EN_DIS 0x0
+#define LGM_PWM_FAN_EN_MSK BIT(0)
+#define LGM_PWM_FAN_MODE_2WIRE 0x0
+#define LGM_PWM_FAN_MODE_MSK BIT(1)
+#define LGM_PWM_FAN_DC_MSK GENMASK(23, 16)
+
+#define LGM_PWM_FAN_CON1 0x4
+#define LGM_PWM_FAN_MAX_RPM_MSK GENMASK(15, 0)
+
+#define LGM_PWM_MAX_RPM (BIT(16) - 1)
+#define LGM_PWM_DEFAULT_RPM 4000
+#define LGM_PWM_MAX_DUTY_CYCLE (BIT(8) - 1)
+
+#define LGM_PWM_DC_BITS 8
+
+#define LGM_PWM_PERIOD_2WIRE_NS (40 * NSEC_PER_MSEC)
+
+struct lgm_pwm_chip {
+ struct pwm_chip chip;
+ struct regmap *regmap;
+ u32 period;
+};
+
+static inline struct lgm_pwm_chip *to_lgm_pwm_chip(struct pwm_chip *chip)
+{
+ return container_of(chip, struct lgm_pwm_chip, chip);
+}
+
+static int lgm_pwm_enable(struct pwm_chip *chip, bool enable)
+{
+ struct lgm_pwm_chip *pc = to_lgm_pwm_chip(chip);
+ struct regmap *regmap = pc->regmap;
+
+ return regmap_update_bits(regmap, LGM_PWM_FAN_CON0, LGM_PWM_FAN_EN_MSK,
+ enable ? LGM_PWM_FAN_EN_EN : LGM_PWM_FAN_EN_DIS);
+}
+
+static int lgm_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct lgm_pwm_chip *pc = to_lgm_pwm_chip(chip);
+ u32 duty_cycle, val;
+ int ret;
+
+ /* The hardware only supports normal polarity and fixed period. */
+ if (state->polarity != PWM_POLARITY_NORMAL || state->period < pc->period)
+ return -EINVAL;
+
+ if (!state->enabled)
+ return lgm_pwm_enable(chip, 0);
+
+ duty_cycle = min_t(u64, state->duty_cycle, pc->period);
+ val = duty_cycle * LGM_PWM_MAX_DUTY_CYCLE / pc->period;
+
+ ret = regmap_update_bits(pc->regmap, LGM_PWM_FAN_CON0, LGM_PWM_FAN_DC_MSK,
+ FIELD_PREP(LGM_PWM_FAN_DC_MSK, val));
+ if (ret)
+ return ret;
+
+ return lgm_pwm_enable(chip, 1);
+}
+
+static void lgm_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct lgm_pwm_chip *pc = to_lgm_pwm_chip(chip);
+ u32 duty, val;
+
+ state->enabled = regmap_test_bits(pc->regmap, LGM_PWM_FAN_CON0,
+ LGM_PWM_FAN_EN_EN);
+ state->polarity = PWM_POLARITY_NORMAL;
+ state->period = pc->period; /* fixed period */
+
+ regmap_read(pc->regmap, LGM_PWM_FAN_CON0, &val);
+ duty = FIELD_GET(LGM_PWM_FAN_DC_MSK, val);
+ state->duty_cycle = DIV_ROUND_UP(duty * pc->period, LGM_PWM_MAX_DUTY_CYCLE);
+}
+
+static const struct pwm_ops lgm_pwm_ops = {
+ .get_state = lgm_pwm_get_state,
+ .apply = lgm_pwm_apply,
+ .owner = THIS_MODULE,
+};
+
+static void lgm_pwm_init(struct lgm_pwm_chip *pc)
+{
+ struct regmap *regmap = pc->regmap;
+ u32 con0_val;
+
+ con0_val = FIELD_PREP(LGM_PWM_FAN_MODE_MSK, LGM_PWM_FAN_MODE_2WIRE);
+ pc->period = LGM_PWM_PERIOD_2WIRE_NS;
+ regmap_update_bits(regmap, LGM_PWM_FAN_CON1, LGM_PWM_FAN_MAX_RPM_MSK,
+ LGM_PWM_DEFAULT_RPM);
+ regmap_update_bits(regmap, LGM_PWM_FAN_CON0, LGM_PWM_FAN_MODE_MSK,
+ con0_val);
+}
+
+static const struct regmap_config lgm_pwm_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+};
+
+static void lgm_clk_release(void *data)
+{
+ struct clk *clk = data;
+
+ clk_disable_unprepare(clk);
+}
+
+static int lgm_clk_enable(struct device *dev, struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, lgm_clk_release, clk);
+}
+
+static void lgm_reset_control_release(void *data)
+{
+ struct reset_control *rst = data;
+
+ reset_control_assert(rst);
+}
+
+static int lgm_reset_control_deassert(struct device *dev, struct reset_control *rst)
+{
+ int ret;
+
+ ret = reset_control_deassert(rst);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, lgm_reset_control_release, rst);
+}
+
+static int lgm_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct reset_control *rst;
+ struct lgm_pwm_chip *pc;
+ void __iomem *io_base;
+ struct clk *clk;
+ int ret;
+
+ pc = devm_kzalloc(dev, sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pc);
+
+ io_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(io_base))
+ return PTR_ERR(io_base);
+
+ pc->regmap = devm_regmap_init_mmio(dev, io_base, &lgm_pwm_regmap_config);
+ if (IS_ERR(pc->regmap))
+ return dev_err_probe(dev, PTR_ERR(pc->regmap),
+ "failed to init register map\n");
+
+ clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "failed to get clock\n");
+
+ ret = lgm_clk_enable(dev, clk);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to enable clock\n");
+
+ rst = devm_reset_control_get_exclusive(dev, NULL);
+ if (IS_ERR(rst))
+ return dev_err_probe(dev, PTR_ERR(rst),
+ "failed to get reset control\n");
+
+ ret = lgm_reset_control_deassert(dev, rst);
+ if (ret)
+ return dev_err_probe(dev, ret, "cannot deassert reset control\n");
+
+ pc->chip.dev = dev;
+ pc->chip.ops = &lgm_pwm_ops;
+ pc->chip.npwm = 1;
+ pc->chip.base = -1;
+
+ lgm_pwm_init(pc);
+
+ ret = pwmchip_add(&pc->chip);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to add PWM chip\n");
+
+ return 0;
+}
+
+static int lgm_pwm_remove(struct platform_device *pdev)
+{
+ struct lgm_pwm_chip *pc = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&pc->chip);
+}
+
+static const struct of_device_id lgm_pwm_of_match[] = {
+ { .compatible = "intel,lgm-pwm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lgm_pwm_of_match);
+
+static struct platform_driver lgm_pwm_driver = {
+ .driver = {
+ .name = "intel-pwm",
+ .of_match_table = lgm_pwm_of_match,
+ },
+ .probe = lgm_pwm_probe,
+ .remove = lgm_pwm_remove,
+};
+module_platform_driver(lgm_pwm_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c
index 7d33e3646436..5ede8255926e 100644
--- a/drivers/pwm/pwm-iqs620a.c
+++ b/drivers/pwm/pwm-iqs620a.c
@@ -50,7 +50,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
int ret;
if (state->polarity != PWM_POLARITY_NORMAL)
- return -ENOTSUPP;
+ return -EINVAL;
if (state->period < IQS620_PWM_PERIOD_NS)
return -EINVAL;
diff --git a/drivers/pwm/pwm-keembay.c b/drivers/pwm/pwm-keembay.c
new file mode 100644
index 000000000000..cdfdef66ff8e
--- /dev/null
+++ b/drivers/pwm/pwm-keembay.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Intel Keem Bay PWM driver
+ *
+ * Copyright (C) 2020 Intel Corporation
+ * Authors: Lai Poey Seng <poey.seng.lai@intel.com>
+ * Vineetha G. Jaya Kumaran <vineetha.g.jaya.kumaran@intel.com>
+ *
+ * Limitations:
+ * - Upon disabling a channel, the currently running
+ * period will not be completed. However, upon
+ * reconfiguration of the duty cycle/period, the
+ * currently running period will be completed first.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+#include <linux/regmap.h>
+
+#define KMB_TOTAL_PWM_CHANNELS 6
+#define KMB_PWM_COUNT_MAX U16_MAX
+#define KMB_PWM_EN_BIT BIT(31)
+
+/* Mask */
+#define KMB_PWM_HIGH_MASK GENMASK(31, 16)
+#define KMB_PWM_LOW_MASK GENMASK(15, 0)
+#define KMB_PWM_LEADIN_MASK GENMASK(30, 0)
+
+/* PWM Register offset */
+#define KMB_PWM_LEADIN_OFFSET(ch) (0x00 + 4 * (ch))
+#define KMB_PWM_HIGHLOW_OFFSET(ch) (0x20 + 4 * (ch))
+
+struct keembay_pwm {
+ struct pwm_chip chip;
+ struct device *dev;
+ struct clk *clk;
+ void __iomem *base;
+};
+
+static inline struct keembay_pwm *to_keembay_pwm_dev(struct pwm_chip *chip)
+{
+ return container_of(chip, struct keembay_pwm, chip);
+}
+
+static void keembay_clk_unprepare(void *data)
+{
+ clk_disable_unprepare(data);
+}
+
+static int keembay_clk_enable(struct device *dev, struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dev, keembay_clk_unprepare, clk);
+}
+
+/*
+ * With gcc 10, CONFIG_CC_OPTIMIZE_FOR_SIZE and only "inline" instead of
+ * "__always_inline" this fails to compile because the compiler doesn't notice
+ * for all valid masks (e.g. KMB_PWM_LEADIN_MASK) that they are ok.
+ */
+static __always_inline void keembay_pwm_update_bits(struct keembay_pwm *priv, u32 mask,
+ u32 val, u32 offset)
+{
+ u32 buff = readl(priv->base + offset);
+
+ buff = u32_replace_bits(buff, val, mask);
+ writel(buff, priv->base + offset);
+}
+
+static void keembay_pwm_enable(struct keembay_pwm *priv, int ch)
+{
+ keembay_pwm_update_bits(priv, KMB_PWM_EN_BIT, 1,
+ KMB_PWM_LEADIN_OFFSET(ch));
+}
+
+static void keembay_pwm_disable(struct keembay_pwm *priv, int ch)
+{
+ keembay_pwm_update_bits(priv, KMB_PWM_EN_BIT, 0,
+ KMB_PWM_LEADIN_OFFSET(ch));
+}
+
+static void keembay_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
+ struct pwm_state *state)
+{
+ struct keembay_pwm *priv = to_keembay_pwm_dev(chip);
+ unsigned long long high, low;
+ unsigned long clk_rate;
+ u32 highlow;
+
+ clk_rate = clk_get_rate(priv->clk);
+
+ /* Read channel enabled status */
+ highlow = readl(priv->base + KMB_PWM_LEADIN_OFFSET(pwm->hwpwm));
+ if (highlow & KMB_PWM_EN_BIT)
+ state->enabled = true;
+ else
+ state->enabled = false;
+
+ /* Read period and duty cycle */
+ highlow = readl(priv->base + KMB_PWM_HIGHLOW_OFFSET(pwm->hwpwm));
+ low = FIELD_GET(KMB_PWM_LOW_MASK, highlow) * NSEC_PER_SEC;
+ high = FIELD_GET(KMB_PWM_HIGH_MASK, highlow) * NSEC_PER_SEC;
+ state->duty_cycle = DIV_ROUND_UP_ULL(high, clk_rate);
+ state->period = DIV_ROUND_UP_ULL(high + low, clk_rate);
+ state->polarity = PWM_POLARITY_NORMAL;
+}
+
+static int keembay_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
+ const struct pwm_state *state)
+{
+ struct keembay_pwm *priv = to_keembay_pwm_dev(chip);
+ struct pwm_state current_state;
+ unsigned long long div;
+ unsigned long clk_rate;
+ u32 pwm_count = 0;
+ u16 high, low;
+
+ if (state->polarity != PWM_POLARITY_NORMAL)
+ return -EINVAL;
+
+ /*
+ * Configure the pwm repeat count as infinite at (15:0) and leadin
+ * low time as 0 at (30:16), which is in terms of clock cycles.
+ */
+ keembay_pwm_update_bits(priv, KMB_PWM_LEADIN_MASK, 0,
+ KMB_PWM_LEADIN_OFFSET(pwm->hwpwm));
+
+ keembay_pwm_get_state(chip, pwm, &current_state);
+
+ if (!state->enabled) {
+ if (current_state.enabled)
+ keembay_pwm_disable(priv, pwm->hwpwm);
+ return 0;
+ }
+
+ /*
+ * The upper 16 bits and lower 16 bits of the KMB_PWM_HIGHLOW_OFFSET
+ * register contain the high time and low time of waveform accordingly.
+ * All the values are in terms of clock cycles.
+ */
+
+ clk_rate = clk_get_rate(priv->clk);
+ div = clk_rate * state->duty_cycle;
+ div = DIV_ROUND_DOWN_ULL(div, NSEC_PER_SEC);
+ if (div > KMB_PWM_COUNT_MAX)
+ return -ERANGE;
+
+ high = div;
+ div = clk_rate * state->period;
+ div = DIV_ROUND_DOWN_ULL(div, NSEC_PER_SEC);
+ div = div - high;
+ if (div > KMB_PWM_COUNT_MAX)
+ return -ERANGE;
+
+ low = div;
+
+ pwm_count = FIELD_PREP(KMB_PWM_HIGH_MASK, high) |
+ FIELD_PREP(KMB_PWM_LOW_MASK, low);
+
+ writel(pwm_count, priv->base + KMB_PWM_HIGHLOW_OFFSET(pwm->hwpwm));
+
+ if (state->enabled && !current_state.enabled)
+ keembay_pwm_enable(priv, pwm->hwpwm);
+
+ return 0;
+}
+
+static const struct pwm_ops keembay_pwm_ops = {
+ .owner = THIS_MODULE,
+ .apply = keembay_pwm_apply,
+ .get_state = keembay_pwm_get_state,
+};
+
+static int keembay_pwm_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct keembay_pwm *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(dev, PTR_ERR(priv->clk), "Failed to get clock\n");
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ ret = keembay_clk_enable(dev, priv->clk);
+ if (ret)
+ return ret;
+
+ priv->chip.base = -1;
+ priv->chip.dev = dev;
+ priv->chip.ops = &keembay_pwm_ops;
+ priv->chip.npwm = KMB_TOTAL_PWM_CHANNELS;
+
+ ret = pwmchip_add(&priv->chip);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to add PWM chip\n");
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static int keembay_pwm_remove(struct platform_device *pdev)
+{
+ struct keembay_pwm *priv = platform_get_drvdata(pdev);
+
+ return pwmchip_remove(&priv->chip);
+}
+
+static const struct of_device_id keembay_pwm_of_match[] = {
+ { .compatible = "intel,keembay-pwm" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, keembay_pwm_of_match);
+
+static struct platform_driver keembay_pwm_driver = {
+ .probe = keembay_pwm_probe,
+ .remove = keembay_pwm_remove,
+ .driver = {
+ .name = "pwm-keembay",
+ .of_match_table = keembay_pwm_of_match,
+ },
+};
+module_platform_driver(keembay_pwm_driver);
+
+MODULE_ALIAS("platform:pwm-keembay");
+MODULE_DESCRIPTION("Intel Keem Bay PWM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 7551253ada32..bf3f14fb5f24 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -275,6 +275,7 @@ static int lp3943_pwm_probe(struct platform_device *pdev)
lp3943_pwm->chip.dev = &pdev->dev;
lp3943_pwm->chip.ops = &lp3943_pwm_ops;
lp3943_pwm->chip.npwm = LP3943_NUM_PWMS;
+ lp3943_pwm->chip.base = -1;
platform_set_drvdata(pdev, lp3943_pwm);
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
index 5ff11145c1a3..dc5133bec3e7 100644
--- a/drivers/pwm/pwm-lpc18xx-sct.c
+++ b/drivers/pwm/pwm-lpc18xx-sct.c
@@ -325,7 +325,6 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
{
struct lpc18xx_pwm_chip *lpc18xx_pwm;
struct pwm_device *pwm;
- struct resource *res;
int ret, i;
u64 val;
@@ -336,8 +335,7 @@ static int lpc18xx_pwm_probe(struct platform_device *pdev)
lpc18xx_pwm->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lpc18xx_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ lpc18xx_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc18xx_pwm->base))
return PTR_ERR(lpc18xx_pwm->base);
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index 710d9a207d2b..6b4090436c06 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -98,7 +98,6 @@ static const struct pwm_ops lpc32xx_pwm_ops = {
static int lpc32xx_pwm_probe(struct platform_device *pdev)
{
struct lpc32xx_pwm_chip *lpc32xx;
- struct resource *res;
int ret;
u32 val;
@@ -106,8 +105,7 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
if (!lpc32xx)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lpc32xx->base = devm_ioremap_resource(&pdev->dev, res);
+ lpc32xx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lpc32xx->base))
return PTR_ERR(lpc32xx->base);
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
index c6502cf7a7af..986786be1e49 100644
--- a/drivers/pwm/pwm-lpss-platform.c
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -58,7 +58,25 @@ static int pwm_lpss_probe_platform(struct platform_device *pdev)
platform_set_drvdata(pdev, lpwm);
- dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_SMART_PREPARE);
+ /*
+ * On Cherry Trail devices the GFX0._PS0 AML checks if the controller
+ * is on and if it is not on it turns it on and restores what it
+ * believes is the correct state to the PWM controller.
+ * Because of this we must disallow direct-complete, which keeps the
+ * controller (runtime)suspended on resume, to avoid 2 issues:
+ * 1. The controller getting turned on without the linux-pm code
+ * knowing about this. On devices where the controller is unused
+ * this causes it to stay on during the next suspend causing high
+ * battery drain (because S0i3 is not reached)
+ * 2. The state restoring code unexpectedly messing with the controller
+ *
+ * Leaving the controller runtime-suspended (skipping runtime-resume +
+ * normal-suspend) during suspend is fine.
+ */
+ if (info->other_devices_aml_touches_pwm_regs)
+ dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NO_DIRECT_COMPLETE|
+ DPM_FLAG_SMART_SUSPEND);
+
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
@@ -73,24 +91,6 @@ static int pwm_lpss_remove_platform(struct platform_device *pdev)
return pwm_lpss_remove(lpwm);
}
-static int pwm_lpss_prepare(struct device *dev)
-{
- struct pwm_lpss_chip *lpwm = dev_get_drvdata(dev);
-
- /*
- * If other device's AML code touches the PWM regs on suspend/resume
- * force runtime-resume the PWM controller to allow this.
- */
- if (lpwm->info->other_devices_aml_touches_pwm_regs)
- return 0; /* Force runtime-resume */
-
- return 1; /* If runtime-suspended leave as is */
-}
-
-static const struct dev_pm_ops pwm_lpss_platform_pm_ops = {
- .prepare = pwm_lpss_prepare,
-};
-
static const struct acpi_device_id pwm_lpss_acpi_match[] = {
{ "80860F09", (unsigned long)&pwm_lpss_byt_info },
{ "80862288", (unsigned long)&pwm_lpss_bsw_info },
@@ -104,7 +104,6 @@ static struct platform_driver pwm_lpss_driver_platform = {
.driver = {
.name = "pwm-lpss",
.acpi_match_table = pwm_lpss_acpi_match,
- .pm = &pwm_lpss_platform_pm_ops,
},
.probe = pwm_lpss_probe_platform,
.remove = pwm_lpss_remove_platform,
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 3444c56b4bed..939de93c157b 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -76,7 +76,12 @@ static int pwm_lpss_wait_for_update(struct pwm_device *pwm)
static inline int pwm_lpss_is_updating(struct pwm_device *pwm)
{
- return (pwm_lpss_read(pwm) & PWM_SW_UPDATE) ? -EBUSY : 0;
+ if (pwm_lpss_read(pwm) & PWM_SW_UPDATE) {
+ dev_err(pwm->chip->dev, "PWM_SW_UPDATE is still set, skipping update\n");
+ return -EBUSY;
+ }
+
+ return 0;
}
static void pwm_lpss_prepare(struct pwm_lpss_chip *lpwm, struct pwm_device *pwm,
diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c
index ab001ce55178..fcfc3b147e5f 100644
--- a/drivers/pwm/pwm-mediatek.c
+++ b/drivers/pwm/pwm-mediatek.c
@@ -30,12 +30,14 @@
#define PWM45DWIDTH_FIXUP 0x30
#define PWMTHRES 0x30
#define PWM45THRES_FIXUP 0x34
+#define PWM_CK_26M_SEL 0x210
#define PWM_CLK_DIV_MAX 7
struct pwm_mediatek_of_data {
unsigned int num_pwms;
bool pwm45_fixup;
+ bool has_ck_26m_sel;
};
/**
@@ -132,6 +134,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm,
if (ret < 0)
return ret;
+ /* Make sure we use the bus clock and not the 26MHz clock */
+ if (pc->soc->has_ck_26m_sel)
+ writel(0, pc->regs + PWM_CK_26M_SEL);
+
/* Using resolution in picosecond gets accuracy higher */
resolution = (u64)NSEC_PER_SEC * 1000;
do_div(resolution, clk_get_rate(pc->clk_pwms[pwm->hwpwm]));
@@ -208,7 +214,6 @@ static const struct pwm_ops pwm_mediatek_ops = {
static int pwm_mediatek_probe(struct platform_device *pdev)
{
struct pwm_mediatek_chip *pc;
- struct resource *res;
unsigned int i;
int ret;
@@ -218,8 +223,7 @@ static int pwm_mediatek_probe(struct platform_device *pdev)
pc->soc = of_device_get_match_data(&pdev->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pc->regs = devm_ioremap_resource(&pdev->dev, res);
+ pc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->regs))
return PTR_ERR(pc->regs);
@@ -281,31 +285,43 @@ static int pwm_mediatek_remove(struct platform_device *pdev)
static const struct pwm_mediatek_of_data mt2712_pwm_data = {
.num_pwms = 8,
.pwm45_fixup = false,
+ .has_ck_26m_sel = false,
};
static const struct pwm_mediatek_of_data mt7622_pwm_data = {
.num_pwms = 6,
.pwm45_fixup = false,
+ .has_ck_26m_sel = false,
};
static const struct pwm_mediatek_of_data mt7623_pwm_data = {
.num_pwms = 5,
.pwm45_fixup = true,
+ .has_ck_26m_sel = false,
};
static const struct pwm_mediatek_of_data mt7628_pwm_data = {
.num_pwms = 4,
.pwm45_fixup = true,
+ .has_ck_26m_sel = false,
};
static const struct pwm_mediatek_of_data mt7629_pwm_data = {
.num_pwms = 1,
.pwm45_fixup = false,
+ .has_ck_26m_sel = false,
+};
+
+static const struct pwm_mediatek_of_data mt8183_pwm_data = {
+ .num_pwms = 4,
+ .pwm45_fixup = false,
+ .has_ck_26m_sel = true,
};
static const struct pwm_mediatek_of_data mt8516_pwm_data = {
.num_pwms = 5,
.pwm45_fixup = false,
+ .has_ck_26m_sel = true,
};
static const struct of_device_id pwm_mediatek_of_match[] = {
@@ -314,6 +330,7 @@ static const struct of_device_id pwm_mediatek_of_match[] = {
{ .compatible = "mediatek,mt7623-pwm", .data = &mt7623_pwm_data },
{ .compatible = "mediatek,mt7628-pwm", .data = &mt7628_pwm_data },
{ .compatible = "mediatek,mt7629-pwm", .data = &mt7629_pwm_data },
+ { .compatible = "mediatek,mt8183-pwm", .data = &mt8183_pwm_data },
{ .compatible = "mediatek,mt8516-pwm", .data = &mt8516_pwm_data },
{ },
};
diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
index bd0d7336b898..a3ce9789412a 100644
--- a/drivers/pwm/pwm-meson.c
+++ b/drivers/pwm/pwm-meson.c
@@ -537,15 +537,13 @@ static int meson_pwm_init_channels(struct meson_pwm *meson)
static int meson_pwm_probe(struct platform_device *pdev)
{
struct meson_pwm *meson;
- struct resource *regs;
int err;
meson = devm_kzalloc(&pdev->dev, sizeof(*meson), GFP_KERNEL);
if (!meson)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- meson->base = devm_ioremap_resource(&pdev->dev, regs);
+ meson->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(meson->base))
return PTR_ERR(meson->base);
diff --git a/drivers/pwm/pwm-mtk-disp.c b/drivers/pwm/pwm-mtk-disp.c
index 83b8be0209b7..87c6b4bc5d43 100644
--- a/drivers/pwm/pwm-mtk-disp.c
+++ b/drivers/pwm/pwm-mtk-disp.c
@@ -172,7 +172,6 @@ static const struct pwm_ops mtk_disp_pwm_ops = {
static int mtk_disp_pwm_probe(struct platform_device *pdev)
{
struct mtk_disp_pwm *mdp;
- struct resource *r;
int ret;
mdp = devm_kzalloc(&pdev->dev, sizeof(*mdp), GFP_KERNEL);
@@ -181,8 +180,7 @@ static int mtk_disp_pwm_probe(struct platform_device *pdev)
mdp->data = of_device_get_match_data(&pdev->dev);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdp->base = devm_ioremap_resource(&pdev->dev, r);
+ mdp->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdp->base))
return PTR_ERR(mdp->base);
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index a2a0912c2dcd..d06cf60e6575 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -166,7 +166,6 @@ static int pwm_probe(struct platform_device *pdev)
{
const struct platform_device_id *id = platform_get_device_id(pdev);
struct pxa_pwm_chip *pwm;
- struct resource *r;
int ret = 0;
if (IS_ENABLED(CONFIG_OF) && id == NULL)
@@ -193,8 +192,7 @@ static int pwm_probe(struct platform_device *pdev)
pwm->chip.of_pwm_n_cells = 1;
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ pwm->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pwm->mmio_base))
return PTR_ERR(pwm->mmio_base);
diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
index 7ab9eb6616d9..002ab79a7ec2 100644
--- a/drivers/pwm/pwm-rcar.c
+++ b/drivers/pwm/pwm-rcar.c
@@ -168,7 +168,7 @@ static int rcar_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
/* This HW/driver only supports normal polarity */
if (state->polarity != PWM_POLARITY_NORMAL)
- return -ENOTSUPP;
+ return -EINVAL;
if (!state->enabled) {
rcar_pwm_disable(rp);
@@ -204,15 +204,13 @@ static const struct pwm_ops rcar_pwm_ops = {
static int rcar_pwm_probe(struct platform_device *pdev)
{
struct rcar_pwm_chip *rcar_pwm;
- struct resource *res;
int ret;
rcar_pwm = devm_kzalloc(&pdev->dev, sizeof(*rcar_pwm), GFP_KERNEL);
if (rcar_pwm == NULL)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rcar_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ rcar_pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rcar_pwm->base))
return PTR_ERR(rcar_pwm->base);
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index 81ad5a551455..d02b24b77cdf 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -383,7 +383,6 @@ static const struct pwm_ops tpu_pwm_ops = {
static int tpu_probe(struct platform_device *pdev)
{
struct tpu_device *tpu;
- struct resource *res;
int ret;
tpu = devm_kzalloc(&pdev->dev, sizeof(*tpu), GFP_KERNEL);
@@ -394,8 +393,7 @@ static int tpu_probe(struct platform_device *pdev)
tpu->pdev = pdev;
/* Map memory, get clock and pin control. */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tpu->base = devm_ioremap_resource(&pdev->dev, res);
+ tpu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(tpu->base))
return PTR_ERR(tpu->base);
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index 77c23a2c6d71..389a5e140412 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -287,7 +287,6 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
struct rockchip_pwm_chip *pc;
- struct resource *r;
u32 enable_conf, ctrl;
int ret, count;
@@ -299,8 +298,7 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
if (!pc)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pc->base = devm_ioremap_resource(&pdev->dev, r);
+ pc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->base))
return PTR_ERR(pc->base);
diff --git a/drivers/pwm/pwm-samsung.c b/drivers/pwm/pwm-samsung.c
index 87a886f7dc2f..645d0066ff0a 100644
--- a/drivers/pwm/pwm-samsung.c
+++ b/drivers/pwm/pwm-samsung.c
@@ -510,7 +510,6 @@ static int pwm_samsung_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct samsung_pwm_chip *chip;
- struct resource *res;
unsigned int chan;
int ret;
@@ -541,8 +540,7 @@ static int pwm_samsung_probe(struct platform_device *pdev)
sizeof(chip->variant));
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(&pdev->dev, res);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c
index 2485fbaaead2..2a7cd2deaeea 100644
--- a/drivers/pwm/pwm-sifive.c
+++ b/drivers/pwm/pwm-sifive.c
@@ -232,7 +232,6 @@ static int pwm_sifive_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct pwm_sifive_ddata *ddata;
struct pwm_chip *chip;
- struct resource *res;
int ret;
ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
@@ -248,8 +247,7 @@ static int pwm_sifive_probe(struct platform_device *pdev)
chip->base = -1;
chip->npwm = 4;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ddata->regs = devm_ioremap_resource(dev, res);
+ ddata->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ddata->regs))
return PTR_ERR(ddata->regs);
diff --git a/drivers/pwm/pwm-sl28cpld.c b/drivers/pwm/pwm-sl28cpld.c
index b4c651fc749c..0b01ec25e2f0 100644
--- a/drivers/pwm/pwm-sl28cpld.c
+++ b/drivers/pwm/pwm-sl28cpld.c
@@ -232,6 +232,8 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
chip->base = -1;
chip->npwm = 1;
+ platform_set_drvdata(pdev, priv);
+
ret = pwmchip_add(&priv->pwm_chip);
if (ret) {
dev_err(&pdev->dev, "failed to add PWM chip (%pe)",
@@ -239,8 +241,6 @@ static int sl28cpld_pwm_probe(struct platform_device *pdev)
return ret;
}
- platform_set_drvdata(pdev, priv);
-
return 0;
}
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index 6c6b44fd3f43..f63b54aae1b4 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -174,7 +174,6 @@ static int spear_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct spear_pwm_chip *pc;
- struct resource *r;
int ret;
u32 val;
@@ -182,8 +181,7 @@ static int spear_pwm_probe(struct platform_device *pdev)
if (!pc)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c
index 1508616d794c..99c70e07858d 100644
--- a/drivers/pwm/pwm-sti.c
+++ b/drivers/pwm/pwm-sti.c
@@ -505,7 +505,6 @@ static int sti_pwm_probe_dt(struct sti_pwm_chip *pc)
if (IS_ERR(pc->prescale_high))
return PTR_ERR(pc->prescale_high);
-
pc->pwm_out_en = devm_regmap_field_alloc(dev, pc->regmap,
reg_fields[PWM_OUT_EN]);
if (IS_ERR(pc->pwm_out_en))
@@ -540,7 +539,6 @@ static int sti_pwm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sti_pwm_compat_data *cdata;
struct sti_pwm_chip *pc;
- struct resource *res;
unsigned int i;
int irq, ret;
@@ -552,9 +550,7 @@ static int sti_pwm_probe(struct platform_device *pdev)
if (!cdata)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- pc->mmio = devm_ioremap_resource(dev, res);
+ pc->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio))
return PTR_ERR(pc->mmio);
@@ -593,38 +589,34 @@ static int sti_pwm_probe(struct platform_device *pdev)
if (ret)
return ret;
- if (!cdata->pwm_num_devs)
- goto skip_pwm;
-
- pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
- if (IS_ERR(pc->pwm_clk)) {
- dev_err(dev, "failed to get PWM clock\n");
- return PTR_ERR(pc->pwm_clk);
- }
+ if (cdata->pwm_num_devs) {
+ pc->pwm_clk = of_clk_get_by_name(dev->of_node, "pwm");
+ if (IS_ERR(pc->pwm_clk)) {
+ dev_err(dev, "failed to get PWM clock\n");
+ return PTR_ERR(pc->pwm_clk);
+ }
- ret = clk_prepare(pc->pwm_clk);
- if (ret) {
- dev_err(dev, "failed to prepare clock\n");
- return ret;
+ ret = clk_prepare(pc->pwm_clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare clock\n");
+ return ret;
+ }
}
-skip_pwm:
- if (!cdata->cpt_num_devs)
- goto skip_cpt;
-
- pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
- if (IS_ERR(pc->cpt_clk)) {
- dev_err(dev, "failed to get PWM capture clock\n");
- return PTR_ERR(pc->cpt_clk);
- }
+ if (cdata->cpt_num_devs) {
+ pc->cpt_clk = of_clk_get_by_name(dev->of_node, "capture");
+ if (IS_ERR(pc->cpt_clk)) {
+ dev_err(dev, "failed to get PWM capture clock\n");
+ return PTR_ERR(pc->cpt_clk);
+ }
- ret = clk_prepare(pc->cpt_clk);
- if (ret) {
- dev_err(dev, "failed to prepare clock\n");
- return ret;
+ ret = clk_prepare(pc->cpt_clk);
+ if (ret) {
+ dev_err(dev, "failed to prepare clock\n");
+ return ret;
+ }
}
-skip_cpt:
pc->chip.dev = dev;
pc->chip.ops = &sti_pwm_ops;
pc->chip.base = -1;
diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c
index 38a4c5c1317b..ce5c4fc8da6f 100644
--- a/drivers/pwm/pwm-sun4i.c
+++ b/drivers/pwm/pwm-sun4i.c
@@ -294,12 +294,8 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm,
ctrl |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
- if (state->enabled) {
+ if (state->enabled)
ctrl |= BIT_CH(PWM_EN, pwm->hwpwm);
- } else {
- ctrl &= ~BIT_CH(PWM_EN, pwm->hwpwm);
- ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm);
- }
sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG);
@@ -395,7 +391,6 @@ MODULE_DEVICE_TABLE(of, sun4i_pwm_dt_ids);
static int sun4i_pwm_probe(struct platform_device *pdev)
{
struct sun4i_pwm_chip *pwm;
- struct resource *res;
int ret;
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
@@ -406,8 +401,7 @@ static int sun4i_pwm_probe(struct platform_device *pdev)
if (!pwm->data)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm->base = devm_ioremap_resource(&pdev->dev, res);
+ pwm->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pwm->base))
return PTR_ERR(pwm->base);
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 1daf591025c0..55bc63d5a0ae 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -237,7 +237,6 @@ static const struct pwm_ops tegra_pwm_ops = {
static int tegra_pwm_probe(struct platform_device *pdev)
{
struct tegra_pwm_chip *pwm;
- struct resource *r;
int ret;
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
@@ -247,8 +246,7 @@ static int tegra_pwm_probe(struct platform_device *pdev)
pwm->soc = of_device_get_match_data(&pdev->dev);
pwm->dev = &pdev->dev;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pwm->regs = devm_ioremap_resource(&pdev->dev, r);
+ pwm->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pwm->regs))
return PTR_ERR(pwm->regs);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 683804c7d26c..2a8949014bb1 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -196,7 +196,6 @@ static int ecap_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct ecap_pwm_chip *pc;
- struct resource *r;
struct clk *clk;
int ret;
@@ -230,8 +229,7 @@ static int ecap_pwm_probe(struct platform_device *pdev)
pc->chip.base = -1;
pc->chip.npwm = 1;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 0846917ff2d2..a7fb224d6535 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -421,7 +421,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct ehrpwm_pwm_chip *pc;
- struct resource *r;
struct clk *clk;
int ret;
@@ -437,10 +436,8 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
}
}
- if (IS_ERR(clk)) {
- dev_err(&pdev->dev, "failed to get clock\n");
- return PTR_ERR(clk);
- }
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk), "Failed to get fck\n");
pc->clk_rate = clk_get_rate(clk);
if (!pc->clk_rate) {
@@ -455,17 +452,14 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
pc->chip.base = -1;
pc->chip.npwm = NUM_PWM_CHANNEL;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+ pc->mmio_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
/* Acquire tbclk for Time Base EHRPWM submodule */
pc->tbclk = devm_clk_get(&pdev->dev, "tbclk");
- if (IS_ERR(pc->tbclk)) {
- dev_err(&pdev->dev, "Failed to get tbclk\n");
- return PTR_ERR(pc->tbclk);
- }
+ if (IS_ERR(pc->tbclk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(pc->tbclk), "Failed to get tbclk\n");
ret = clk_prepare(pc->tbclk);
if (ret < 0) {
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 11d45e56a923..6e36851a22bb 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -193,7 +193,6 @@ MODULE_DEVICE_TABLE(of, vt8500_pwm_dt_ids);
static int vt8500_pwm_probe(struct platform_device *pdev)
{
struct vt8500_chip *chip;
- struct resource *r;
struct device_node *np = pdev->dev.of_node;
int ret;
@@ -219,8 +218,7 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
return PTR_ERR(chip->clk);
}
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- chip->base = devm_ioremap_resource(&pdev->dev, r);
+ chip->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->base))
return PTR_ERR(chip->base);
diff --git a/drivers/pwm/pwm-zx.c b/drivers/pwm/pwm-zx.c
index e2c21cc34a96..34e91195ce98 100644
--- a/drivers/pwm/pwm-zx.c
+++ b/drivers/pwm/pwm-zx.c
@@ -196,7 +196,6 @@ static const struct pwm_ops zx_pwm_ops = {
static int zx_pwm_probe(struct platform_device *pdev)
{
struct zx_pwm_chip *zpc;
- struct resource *res;
unsigned int i;
int ret;
@@ -204,8 +203,7 @@ static int zx_pwm_probe(struct platform_device *pdev)
if (!zpc)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- zpc->base = devm_ioremap_resource(&pdev->dev, res);
+ zpc->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(zpc->base))
return PTR_ERR(zpc->base);
@@ -238,6 +236,7 @@ static int zx_pwm_probe(struct platform_device *pdev)
ret = pwmchip_add(&zpc->chip);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
+ clk_disable_unprepare(zpc->pclk);
return ret;
}
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 53fa84f4d1e1..5abdd29fb9f3 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -881,6 +881,7 @@ config REGULATOR_QCOM_RPM
config REGULATOR_QCOM_RPMH
tristate "Qualcomm Technologies, Inc. RPMh regulator driver"
depends on QCOM_RPMH || (QCOM_RPMH=n && COMPILE_TEST)
+ depends on QCOM_COMMAND_DB || (QCOM_COMMAND_DB=n && COMPILE_TEST)
help
This driver supports control of PMIC regulators via the RPMh hardware
block found on Qualcomm Technologies Inc. SoCs. RPMh regulator
diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
index e6d5d98c3cea..9309765d0450 100644
--- a/drivers/regulator/bd718x7-regulator.c
+++ b/drivers/regulator/bd718x7-regulator.c
@@ -15,6 +15,36 @@
#include <linux/regulator/of_regulator.h>
#include <linux/slab.h>
+/* Typical regulator startup times as per data sheet in uS */
+#define BD71847_BUCK1_STARTUP_TIME 144
+#define BD71847_BUCK2_STARTUP_TIME 162
+#define BD71847_BUCK3_STARTUP_TIME 162
+#define BD71847_BUCK4_STARTUP_TIME 240
+#define BD71847_BUCK5_STARTUP_TIME 270
+#define BD71847_BUCK6_STARTUP_TIME 200
+#define BD71847_LDO1_STARTUP_TIME 440
+#define BD71847_LDO2_STARTUP_TIME 370
+#define BD71847_LDO3_STARTUP_TIME 310
+#define BD71847_LDO4_STARTUP_TIME 400
+#define BD71847_LDO5_STARTUP_TIME 530
+#define BD71847_LDO6_STARTUP_TIME 400
+
+#define BD71837_BUCK1_STARTUP_TIME 160
+#define BD71837_BUCK2_STARTUP_TIME 180
+#define BD71837_BUCK3_STARTUP_TIME 180
+#define BD71837_BUCK4_STARTUP_TIME 180
+#define BD71837_BUCK5_STARTUP_TIME 160
+#define BD71837_BUCK6_STARTUP_TIME 240
+#define BD71837_BUCK7_STARTUP_TIME 220
+#define BD71837_BUCK8_STARTUP_TIME 200
+#define BD71837_LDO1_STARTUP_TIME 440
+#define BD71837_LDO2_STARTUP_TIME 370
+#define BD71837_LDO3_STARTUP_TIME 310
+#define BD71837_LDO4_STARTUP_TIME 400
+#define BD71837_LDO5_STARTUP_TIME 310
+#define BD71837_LDO6_STARTUP_TIME 400
+#define BD71837_LDO7_STARTUP_TIME 530
+
/*
* BD718(37/47/50) have two "enable control modes". ON/OFF can either be
* controlled by software - or by PMIC internal HW state machine. Whether
@@ -613,6 +643,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -646,6 +677,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -680,6 +712,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_buck3_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -706,6 +739,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
.linear_range_selectors = bd71847_buck4_volt_range_sel,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -727,6 +761,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -750,6 +785,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71847_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -775,6 +811,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -796,6 +833,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -818,6 +856,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -840,6 +879,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -865,6 +905,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.linear_range_selectors = bd71847_ldo5_volt_range_sel,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -889,6 +930,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71847_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -942,6 +984,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK1_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK1_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -975,6 +1018,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD718XX_REG_BUCK2_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK2_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1005,6 +1049,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK3_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK3_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1033,6 +1078,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = DVS_BUCK_RUN_MASK,
.enable_reg = BD71837_REG_BUCK4_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK4_STARTUP_TIME,
.owner = THIS_MODULE,
.of_parse_cb = buck_set_hw_dvs_levels,
},
@@ -1065,6 +1111,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd71837_buck5_volt_range_sel,
.enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1088,6 +1135,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_BUCK6_MASK,
.enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1109,6 +1157,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1132,6 +1181,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
.enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
.enable_mask = BD718XX_BUCK_EN,
+ .enable_time = BD71837_BUCK8_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1157,6 +1207,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.linear_range_selectors = bd718xx_ldo1_volt_range_sel,
.enable_reg = BD718XX_REG_LDO1_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO1_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1178,6 +1229,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.n_voltages = ARRAY_SIZE(ldo_2_volts),
.enable_reg = BD718XX_REG_LDO2_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO2_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1200,6 +1252,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO3_MASK,
.enable_reg = BD718XX_REG_LDO3_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO3_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1222,6 +1275,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO4_MASK,
.enable_reg = BD718XX_REG_LDO4_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO4_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1246,6 +1300,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO5_MASK,
.enable_reg = BD718XX_REG_LDO5_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO5_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1272,6 +1327,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD718XX_LDO6_MASK,
.enable_reg = BD718XX_REG_LDO6_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO6_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
@@ -1296,6 +1352,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
.vsel_mask = BD71837_LDO7_MASK,
.enable_reg = BD71837_REG_LDO7_VOLT,
.enable_mask = BD718XX_LDO_EN,
+ .enable_time = BD71837_LDO7_STARTUP_TIME,
.owner = THIS_MODULE,
},
.init = {
diff --git a/drivers/regulator/pf8x00-regulator.c b/drivers/regulator/pf8x00-regulator.c
index 308c27fa6ea8..af9918cd27aa 100644
--- a/drivers/regulator/pf8x00-regulator.c
+++ b/drivers/regulator/pf8x00-regulator.c
@@ -469,13 +469,17 @@ static int pf8x00_i2c_probe(struct i2c_client *client)
}
static const struct of_device_id pf8x00_dt_ids[] = {
- { .compatible = "nxp,pf8x00",},
+ { .compatible = "nxp,pf8100",},
+ { .compatible = "nxp,pf8121a",},
+ { .compatible = "nxp,pf8200",},
{ }
};
MODULE_DEVICE_TABLE(of, pf8x00_dt_ids);
static const struct i2c_device_id pf8x00_i2c_id[] = {
- { "pf8x00", 0 },
+ { "pf8100", 0 },
+ { "pf8121a", 0 },
+ { "pf8200", 0 },
{},
};
MODULE_DEVICE_TABLE(i2c, pf8x00_i2c_id);
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index fe030ec4b7db..c395a8dda6f7 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -726,7 +726,7 @@ static const struct rpmh_vreg_hw_data pmic5_ftsmps510 = {
static const struct rpmh_vreg_hw_data pmic5_hfsmps515 = {
.regulator_type = VRM,
.ops = &rpmh_regulator_vrm_ops,
- .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 1600),
+ .voltage_range = REGULATOR_LINEAR_RANGE(2800000, 0, 4, 16000),
.n_voltages = 5,
.pmic_mode_map = pmic_mode_map_pmic5_smps,
.of_map_mode = rpmh_regulator_pmic4_smps_of_map_mode,
diff --git a/drivers/remoteproc/wkup_m3_rproc.c b/drivers/remoteproc/wkup_m3_rproc.c
index b9349d684258..92d387dfc03b 100644
--- a/drivers/remoteproc/wkup_m3_rproc.c
+++ b/drivers/remoteproc/wkup_m3_rproc.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc.h>
+#include <linux/reset.h>
#include <linux/platform_data/wkup_m3.h>
@@ -43,11 +44,13 @@ struct wkup_m3_mem {
* @rproc: rproc handle
* @pdev: pointer to platform device
* @mem: WkupM3 memory information
+ * @rsts: reset control
*/
struct wkup_m3_rproc {
struct rproc *rproc;
struct platform_device *pdev;
struct wkup_m3_mem mem[WKUPM3_MEM_MAX];
+ struct reset_control *rsts;
};
static int wkup_m3_rproc_start(struct rproc *rproc)
@@ -56,13 +59,16 @@ static int wkup_m3_rproc_start(struct rproc *rproc)
struct platform_device *pdev = wkupm3->pdev;
struct device *dev = &pdev->dev;
struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+ int error = 0;
- if (pdata->deassert_reset(pdev, pdata->reset_name)) {
+ error = reset_control_deassert(wkupm3->rsts);
+
+ if (!wkupm3->rsts && pdata->deassert_reset(pdev, pdata->reset_name)) {
dev_err(dev, "Unable to reset wkup_m3!\n");
- return -ENODEV;
+ error = -ENODEV;
}
- return 0;
+ return error;
}
static int wkup_m3_rproc_stop(struct rproc *rproc)
@@ -71,13 +77,16 @@ static int wkup_m3_rproc_stop(struct rproc *rproc)
struct platform_device *pdev = wkupm3->pdev;
struct device *dev = &pdev->dev;
struct wkup_m3_platform_data *pdata = dev_get_platdata(dev);
+ int error = 0;
- if (pdata->assert_reset(pdev, pdata->reset_name)) {
+ error = reset_control_assert(wkupm3->rsts);
+
+ if (!wkupm3->rsts && pdata->assert_reset(pdev, pdata->reset_name)) {
dev_err(dev, "Unable to assert reset of wkup_m3!\n");
- return -ENODEV;
+ error = -ENODEV;
}
- return 0;
+ return error;
}
static void *wkup_m3_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
@@ -132,12 +141,6 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
int ret;
int i;
- if (!(pdata && pdata->deassert_reset && pdata->assert_reset &&
- pdata->reset_name)) {
- dev_err(dev, "Platform data missing!\n");
- return -ENODEV;
- }
-
ret = of_property_read_string(dev->of_node, "ti,pm-firmware",
&fw_name);
if (ret) {
@@ -165,6 +168,18 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
wkupm3->rproc = rproc;
wkupm3->pdev = pdev;
+ wkupm3->rsts = devm_reset_control_get_optional_shared(dev, "rstctrl");
+ if (IS_ERR(wkupm3->rsts))
+ return PTR_ERR(wkupm3->rsts);
+ if (!wkupm3->rsts) {
+ if (!(pdata && pdata->deassert_reset && pdata->assert_reset &&
+ pdata->reset_name)) {
+ dev_err(dev, "Platform data missing!\n");
+ ret = -ENODEV;
+ goto err_put_rproc;
+ }
+ }
+
for (i = 0; i < ARRAY_SIZE(mem_names); i++) {
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
mem_names[i]);
@@ -173,7 +188,7 @@ static int wkup_m3_rproc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "devm_ioremap_resource failed for resource %d\n",
i);
ret = PTR_ERR(wkupm3->mem[i].cpu_addr);
- goto err;
+ goto err_put_rproc;
}
wkupm3->mem[i].bus_addr = res->start;
wkupm3->mem[i].size = resource_size(res);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index dceec715e745..71ab75a46491 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -102,7 +102,8 @@ config RESET_LPC18XX
This enables the reset controller driver for NXP LPC18xx/43xx SoCs.
config RESET_MESON
- bool "Meson Reset Driver" if COMPILE_TEST
+ tristate "Meson Reset Driver"
+ depends on ARCH_MESON || COMPILE_TEST
default ARCH_MESON
help
This enables the reset driver for Amlogic Meson SoCs.
diff --git a/drivers/reset/core.c b/drivers/reset/core.c
index a2df88e90011..34e89aa0fb5e 100644
--- a/drivers/reset/core.c
+++ b/drivers/reset/core.c
@@ -208,6 +208,39 @@ static int reset_control_array_reset(struct reset_control_array *resets)
return 0;
}
+static int reset_control_array_rearm(struct reset_control_array *resets)
+{
+ struct reset_control *rstc;
+ int i;
+
+ for (i = 0; i < resets->num_rstcs; i++) {
+ rstc = resets->rstc[i];
+
+ if (!rstc)
+ continue;
+
+ if (WARN_ON(IS_ERR(rstc)))
+ return -EINVAL;
+
+ if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
+ return -EINVAL;
+ } else {
+ if (!rstc->acquired)
+ return -EPERM;
+ }
+ }
+
+ for (i = 0; i < resets->num_rstcs; i++) {
+ rstc = resets->rstc[i];
+
+ if (rstc && rstc->shared)
+ WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
+ }
+
+ return 0;
+}
+
static int reset_control_array_assert(struct reset_control_array *resets)
{
int ret, i;
@@ -326,6 +359,46 @@ int reset_control_reset(struct reset_control *rstc)
EXPORT_SYMBOL_GPL(reset_control_reset);
/**
+ * reset_control_rearm - allow shared reset line to be re-triggered"
+ * @rstc: reset controller
+ *
+ * On a shared reset line the actual reset pulse is only triggered once for the
+ * lifetime of the reset_control instance, except if this call is used.
+ *
+ * Calls to this function must be balanced with calls to reset_control_reset,
+ * a warning is thrown in case triggered_count ever dips below 0.
+ *
+ * Consumers must not use reset_control_(de)assert on shared reset lines when
+ * reset_control_reset or reset_control_rearm have been used.
+ *
+ * If rstc is NULL the function will just return 0.
+ */
+int reset_control_rearm(struct reset_control *rstc)
+{
+ if (!rstc)
+ return 0;
+
+ if (WARN_ON(IS_ERR(rstc)))
+ return -EINVAL;
+
+ if (reset_control_is_array(rstc))
+ return reset_control_array_rearm(rstc_to_array(rstc));
+
+ if (rstc->shared) {
+ if (WARN_ON(atomic_read(&rstc->deassert_count) != 0))
+ return -EINVAL;
+
+ WARN_ON(atomic_dec_return(&rstc->triggered_count) < 0);
+ } else {
+ if (!rstc->acquired)
+ return -EPERM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(reset_control_rearm);
+
+/**
* reset_control_assert - asserts the reset line
* @rstc: reset controller
*
diff --git a/drivers/reset/reset-meson.c b/drivers/reset/reset-meson.c
index 94d7ba88d7d2..c9bc325ad65a 100644
--- a/drivers/reset/reset-meson.c
+++ b/drivers/reset/reset-meson.c
@@ -9,6 +9,7 @@
#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/reset-controller.h>
#include <linux/slab.h>
@@ -104,6 +105,7 @@ static const struct of_device_id meson_reset_dt_ids[] = {
{ .compatible = "amlogic,meson-a1-reset", .data = &meson_a1_param},
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, meson_reset_dt_ids);
static int meson_reset_probe(struct platform_device *pdev)
{
@@ -142,4 +144,8 @@ static struct platform_driver meson_reset_driver = {
.of_match_table = meson_reset_dt_ids,
},
};
-builtin_platform_driver(meson_reset_driver);
+module_platform_driver(meson_reset_driver);
+
+MODULE_DESCRIPTION("Amlogic Meson Reset Controller driver");
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index bdd984296196..2a72f861f798 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -44,7 +44,7 @@ static int a10_reset_init(struct device_node *np)
data->membase = ioremap(res.start, size);
if (!data->membase) {
ret = -ENOMEM;
- goto err_alloc;
+ goto release_region;
}
if (of_property_read_u32(np, "altr,modrst-offset", &reg_offset))
@@ -59,7 +59,14 @@ static int a10_reset_init(struct device_node *np)
data->rcdev.of_node = np;
data->status_active_low = true;
- return reset_controller_register(&data->rcdev);
+ ret = reset_controller_register(&data->rcdev);
+ if (ret)
+ pr_err("unable to register device\n");
+
+ return ret;
+
+release_region:
+ release_mem_region(res.start, size);
err_alloc:
kfree(data);
diff --git a/drivers/reset/reset-ti-syscon.c b/drivers/reset/reset-ti-syscon.c
index ef97c4dbbb4e..218370faf37b 100644
--- a/drivers/reset/reset-ti-syscon.c
+++ b/drivers/reset/reset-ti-syscon.c
@@ -89,7 +89,7 @@ static int ti_syscon_reset_assert(struct reset_controller_dev *rcdev,
mask = BIT(control->assert_bit);
value = (control->flags & ASSERT_SET) ? mask : 0x0;
- return regmap_update_bits(data->regmap, control->assert_offset, mask, value);
+ return regmap_write_bits(data->regmap, control->assert_offset, mask, value);
}
/**
@@ -120,7 +120,7 @@ static int ti_syscon_reset_deassert(struct reset_controller_dev *rcdev,
mask = BIT(control->deassert_bit);
value = (control->flags & DEASSERT_SET) ? mask : 0x0;
- return regmap_update_bits(data->regmap, control->deassert_offset, mask, value);
+ return regmap_write_bits(data->regmap, control->deassert_offset, mask, value);
}
/**
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 65ad9d0b47ab..6123f9f4fbc9 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -13,7 +13,7 @@ config RTC_MC146818_LIB
menuconfig RTC_CLASS
bool "Real Time Clock"
default n
- depends on !S390 && !UML
+ depends on !S390
select RTC_LIB
help
Generic RTC class support. If you say yes here, you will
@@ -817,15 +817,6 @@ config RTC_DRV_RX4581
This driver can also be built as a module. If so the module
will be called rtc-rx4581.
-config RTC_DRV_RX6110
- tristate "Epson RX-6110"
- select REGMAP_SPI
- help
- If you say yes here you will get support for the Epson RX-6610.
-
- This driver can also be built as a module. If so the module
- will be called rtc-rx6110.
-
config RTC_DRV_RS5C348
tristate "Ricoh RS5C348A/B"
help
@@ -936,6 +927,17 @@ config RTC_DRV_RV3029_HWMON
Say Y here if you want to expose temperature sensor data on
rtc-rv3029.
+config RTC_DRV_RX6110
+ tristate "Epson RX-6110"
+ depends on RTC_I2C_AND_SPI
+ select REGMAP_SPI if SPI_MASTER
+ select REGMAP_I2C if I2C
+ help
+ If you say yes here you will get support for the Epson RX-6110.
+
+ This driver can also be built as a module. If so the module
+ will be called rtc-rx6110.
+
comment "Platform RTC drivers"
# this 'CMOS' RTC driver is arch dependent because it requires
@@ -1017,6 +1019,7 @@ config RTC_DRV_DS1553
config RTC_DRV_DS1685_FAMILY
tristate "Dallas/Maxim DS1685 Family"
+ depends on HAS_IOMEM
help
If you say yes here you get support for the Dallas/Maxim DS1685
family of real time chips. This family includes the DS1685/DS1687,
@@ -1150,6 +1153,7 @@ config RTC_DRV_STK17TA8
config RTC_DRV_M48T86
tristate "ST M48T86/Dallas DS12887"
+ depends on HAS_IOMEM
help
If you say Y here you will get support for the
ST M48T86 and Dallas DS12887 RTC chips.
@@ -1752,7 +1756,9 @@ config RTC_DRV_LOONGSON1
config RTC_DRV_MXC
tristate "Freescale MXC Real Time Clock"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
help
If you say yes here you get support for the Freescale MXC
RTC module.
@@ -1762,7 +1768,9 @@ config RTC_DRV_MXC
config RTC_DRV_MXC_V2
tristate "Freescale MXC Real Time Clock for i.MX53"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on HAS_IOMEM
+ depends on OF
help
If you say yes here you get support for the Freescale MXC
SRTC module in i.MX53 processor.
@@ -1935,7 +1943,6 @@ config RTC_DRV_HID_SENSOR_TIME
config RTC_DRV_GOLDFISH
tristate "Goldfish Real Time Clock"
depends on OF && HAS_IOMEM
- depends on GOLDFISH || COMPILE_TEST
help
Say yes to enable RTC driver for the Goldfish based virtual platform.
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 5855aa2eef62..7e470fbd5e4d 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -28,6 +28,7 @@ static void rtc_device_release(struct device *dev)
struct rtc_device *rtc = to_rtc_device(dev);
ida_simple_remove(&rtc_ida, rtc->id);
+ mutex_destroy(&rtc->ops_lock);
kfree(rtc);
}
@@ -326,8 +327,10 @@ static void rtc_device_get_offset(struct rtc_device *rtc)
*
* @rtc: the RTC class device to destroy
*/
-static void rtc_device_unregister(struct rtc_device *rtc)
+static void devm_rtc_unregister_device(void *data)
{
+ struct rtc_device *rtc = data;
+
mutex_lock(&rtc->ops_lock);
/*
* Remove innards of this RTC, then disable it, before
@@ -337,60 +340,43 @@ static void rtc_device_unregister(struct rtc_device *rtc)
cdev_device_del(&rtc->char_dev, &rtc->dev);
rtc->ops = NULL;
mutex_unlock(&rtc->ops_lock);
- put_device(&rtc->dev);
}
-static void devm_rtc_release_device(struct device *dev, void *res)
+static void devm_rtc_release_device(void *res)
{
- struct rtc_device *rtc = *(struct rtc_device **)res;
+ struct rtc_device *rtc = res;
- rtc_nvmem_unregister(rtc);
-
- if (rtc->registered)
- rtc_device_unregister(rtc);
- else
- put_device(&rtc->dev);
+ put_device(&rtc->dev);
}
struct rtc_device *devm_rtc_allocate_device(struct device *dev)
{
- struct rtc_device **ptr, *rtc;
+ struct rtc_device *rtc;
int id, err;
id = rtc_device_get_id(dev);
if (id < 0)
return ERR_PTR(id);
- ptr = devres_alloc(devm_rtc_release_device, sizeof(*ptr), GFP_KERNEL);
- if (!ptr) {
- err = -ENOMEM;
- goto exit_ida;
- }
-
rtc = rtc_allocate_device();
if (!rtc) {
- err = -ENOMEM;
- goto exit_devres;
+ ida_simple_remove(&rtc_ida, id);
+ return ERR_PTR(-ENOMEM);
}
- *ptr = rtc;
- devres_add(dev, ptr);
-
rtc->id = id;
rtc->dev.parent = dev;
dev_set_name(&rtc->dev, "rtc%d", id);
- return rtc;
+ err = devm_add_action_or_reset(dev, devm_rtc_release_device, rtc);
+ if (err)
+ return ERR_PTR(err);
-exit_devres:
- devres_free(ptr);
-exit_ida:
- ida_simple_remove(&rtc_ida, id);
- return ERR_PTR(err);
+ return rtc;
}
EXPORT_SYMBOL_GPL(devm_rtc_allocate_device);
-int __rtc_register_device(struct module *owner, struct rtc_device *rtc)
+int __devm_rtc_register_device(struct module *owner, struct rtc_device *rtc)
{
struct rtc_wkalrm alrm;
int err;
@@ -420,7 +406,6 @@ int __rtc_register_device(struct module *owner, struct rtc_device *rtc)
rtc_proc_add_device(rtc);
- rtc->registered = true;
dev_info(rtc->dev.parent, "registered as %s\n",
dev_name(&rtc->dev));
@@ -429,9 +414,10 @@ int __rtc_register_device(struct module *owner, struct rtc_device *rtc)
rtc_hctosys(rtc);
#endif
- return 0;
+ return devm_add_action_or_reset(rtc->dev.parent,
+ devm_rtc_unregister_device, rtc);
}
-EXPORT_SYMBOL_GPL(__rtc_register_device);
+EXPORT_SYMBOL_GPL(__devm_rtc_register_device);
/**
* devm_rtc_device_register - resource managed rtc_device_register()
@@ -461,7 +447,7 @@ struct rtc_device *devm_rtc_device_register(struct device *dev,
rtc->ops = ops;
- err = __rtc_register_device(owner, rtc);
+ err = __devm_rtc_register_device(owner, rtc);
if (err)
return ERR_PTR(err);
diff --git a/drivers/rtc/nvmem.c b/drivers/rtc/nvmem.c
index 4312096c7738..07ede21cee34 100644
--- a/drivers/rtc/nvmem.c
+++ b/drivers/rtc/nvmem.c
@@ -9,99 +9,22 @@
#include <linux/types.h>
#include <linux/nvmem-consumer.h>
#include <linux/rtc.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-/*
- * Deprecated ABI compatibility, this should be removed at some point
- */
-
-static const char nvram_warning[] = "Deprecated ABI, please use nvmem";
-
-static ssize_t
-rtc_nvram_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- dev_warn_once(kobj_to_dev(kobj), nvram_warning);
-
- return nvmem_device_read(attr->private, off, count, buf);
-}
-
-static ssize_t
-rtc_nvram_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off, size_t count)
-{
- dev_warn_once(kobj_to_dev(kobj), nvram_warning);
-
- return nvmem_device_write(attr->private, off, count, buf);
-}
-
-static int rtc_nvram_register(struct rtc_device *rtc,
- struct nvmem_device *nvmem, size_t size)
-{
- int err;
-
- rtc->nvram = kzalloc(sizeof(*rtc->nvram), GFP_KERNEL);
- if (!rtc->nvram)
- return -ENOMEM;
-
- rtc->nvram->attr.name = "nvram";
- rtc->nvram->attr.mode = 0644;
- rtc->nvram->private = nvmem;
-
- sysfs_bin_attr_init(rtc->nvram);
-
- rtc->nvram->read = rtc_nvram_read;
- rtc->nvram->write = rtc_nvram_write;
- rtc->nvram->size = size;
-
- err = sysfs_create_bin_file(&rtc->dev.parent->kobj,
- rtc->nvram);
- if (err) {
- kfree(rtc->nvram);
- rtc->nvram = NULL;
- }
-
- return err;
-}
-
-static void rtc_nvram_unregister(struct rtc_device *rtc)
-{
- sysfs_remove_bin_file(&rtc->dev.parent->kobj, rtc->nvram);
- kfree(rtc->nvram);
- rtc->nvram = NULL;
-}
-
-/*
- * New ABI, uses nvmem
- */
-int rtc_nvmem_register(struct rtc_device *rtc,
+int devm_rtc_nvmem_register(struct rtc_device *rtc,
struct nvmem_config *nvmem_config)
{
+ struct device *dev = rtc->dev.parent;
struct nvmem_device *nvmem;
if (!nvmem_config)
return -ENODEV;
- nvmem_config->dev = rtc->dev.parent;
+ nvmem_config->dev = dev;
nvmem_config->owner = rtc->owner;
- nvmem = devm_nvmem_register(rtc->dev.parent, nvmem_config);
+ nvmem = devm_nvmem_register(dev, nvmem_config);
if (IS_ERR(nvmem))
- return PTR_ERR(nvmem);
-
- /* Register the old ABI */
- if (rtc->nvram_old_abi)
- rtc_nvram_register(rtc, nvmem, nvmem_config->size);
+ dev_err(dev, "failed to register nvmem device for RTC\n");
- return 0;
-}
-EXPORT_SYMBOL_GPL(rtc_nvmem_register);
-
-void rtc_nvmem_unregister(struct rtc_device *rtc)
-{
- /* unregister the old ABI */
- if (rtc->nvram)
- rtc_nvram_unregister(rtc);
+ return PTR_ERR_OR_ZERO(nvmem);
}
+EXPORT_SYMBOL_GPL(devm_rtc_nvmem_register);
diff --git a/drivers/rtc/rtc-88pm80x.c b/drivers/rtc/rtc-88pm80x.c
index 75779e8501a3..6a3f44cf6ebe 100644
--- a/drivers/rtc/rtc-88pm80x.c
+++ b/drivers/rtc/rtc-88pm80x.c
@@ -294,7 +294,7 @@ static int pm80x_rtc_probe(struct platform_device *pdev)
info->rtc_dev->ops = &pm80x_rtc_ops;
info->rtc_dev->range_max = U32_MAX;
- ret = rtc_register_device(info->rtc_dev);
+ ret = devm_rtc_register_device(info->rtc_dev);
if (ret)
goto out_rtc;
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index c90457d001e9..2c809a1a445e 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -307,7 +307,7 @@ static int pm860x_rtc_probe(struct platform_device *pdev)
info->rtc_dev->ops = &pm860x_rtc_ops;
info->rtc_dev->range_max = U32_MAX;
- ret = rtc_register_device(info->rtc_dev);
+ ret = devm_rtc_register_device(info->rtc_dev);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c
index 2370ac0cdb5f..6e3e320dc727 100644
--- a/drivers/rtc/rtc-ab-b5ze-s3.c
+++ b/drivers/rtc/rtc-ab-b5ze-s3.c
@@ -892,7 +892,7 @@ static int abb5zes3_probe(struct i2c_client *client,
}
}
- ret = rtc_register_device(data->rtc);
+ ret = devm_rtc_register_device(data->rtc);
err:
if (ret && data->irq)
diff --git a/drivers/rtc/rtc-ab-eoz9.c b/drivers/rtc/rtc-ab-eoz9.c
index d690985caa4c..b20d8f26dcdb 100644
--- a/drivers/rtc/rtc-ab-eoz9.c
+++ b/drivers/rtc/rtc-ab-eoz9.c
@@ -420,7 +420,7 @@ static int abeoz9_probe(struct i2c_client *client,
data->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
data->rtc->range_max = RTC_TIMESTAMP_END_2099;
- ret = rtc_register_device(data->rtc);
+ ret = devm_rtc_register_device(data->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-ab3100.c b/drivers/rtc/rtc-ab3100.c
index 2ed6def90975..e4fd961e8bf6 100644
--- a/drivers/rtc/rtc-ab3100.c
+++ b/drivers/rtc/rtc-ab3100.c
@@ -238,7 +238,7 @@ static int __init ab3100_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct platform_driver ab3100_rtc_driver = {
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index 3d60f3283f11..b40048871295 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -404,7 +404,7 @@ static int ab8500_rtc_probe(struct platform_device *pdev)
if (err)
return err;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static int ab8500_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
index 803725b3a02c..6733bb0df674 100644
--- a/drivers/rtc/rtc-abx80x.c
+++ b/drivers/rtc/rtc-abx80x.c
@@ -851,7 +851,7 @@ static int abx80x_probe(struct i2c_client *client,
return err;
}
- return rtc_register_device(priv->rtc);
+ return devm_rtc_register_device(priv->rtc);
}
static const struct i2c_device_id abx80x_id[] = {
diff --git a/drivers/rtc/rtc-ac100.c b/drivers/rtc/rtc-ac100.c
index 29223931aba7..1ddbef99e38f 100644
--- a/drivers/rtc/rtc-ac100.c
+++ b/drivers/rtc/rtc-ac100.c
@@ -610,7 +610,7 @@ static int ac100_rtc_probe(struct platform_device *pdev)
if (ret)
return ret;
- return rtc_register_device(chip->rtc);
+ return devm_rtc_register_device(chip->rtc);
}
static int ac100_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 94d7c22fc4f3..807a79c07f08 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -556,7 +556,7 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->range_max = U32_MAX;
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-aspeed.c b/drivers/rtc/rtc-aspeed.c
index eacdd0637cce..a93352ed3aec 100644
--- a/drivers/rtc/rtc-aspeed.c
+++ b/drivers/rtc/rtc-aspeed.c
@@ -104,7 +104,7 @@ static int aspeed_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900;
rtc->rtc_dev->range_max = 38814989399LL; /* 3199-12-31 23:59:59 */
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
static const struct of_device_id aspeed_rtc_match[] = {
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index 5e811e04cb21..fe396d27ebb7 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -36,6 +36,10 @@
#define AT91_RTC_UPDCAL BIT(1) /* Update Request Calendar Register */
#define AT91_RTC_MR 0x04 /* Mode Register */
+#define AT91_RTC_HRMOD BIT(0) /* 12/24 hour mode */
+#define AT91_RTC_NEGPPM BIT(4) /* Negative PPM correction */
+#define AT91_RTC_CORRECTION GENMASK(14, 8) /* Slow clock correction */
+#define AT91_RTC_HIGHPPM BIT(15) /* High PPM correction */
#define AT91_RTC_TIMR 0x08 /* Time Register */
#define AT91_RTC_SEC GENMASK(6, 0) /* Current Second */
@@ -77,6 +81,9 @@
#define AT91_RTC_NVTIMALR BIT(2) /* Non valid Time Alarm */
#define AT91_RTC_NVCALALR BIT(3) /* Non valid Calendar Alarm */
+#define AT91_RTC_CORR_DIVIDEND 3906000
+#define AT91_RTC_CORR_LOW_RATIO 20
+
#define at91_rtc_read(field) \
readl_relaxed(at91_rtc_regs + field)
#define at91_rtc_write(field, val) \
@@ -84,6 +91,7 @@
struct at91_rtc_config {
bool use_shadow_imr;
+ bool has_correction;
};
static const struct at91_rtc_config *at91_rtc_config;
@@ -293,6 +301,75 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
return 0;
}
+static int at91_rtc_readoffset(struct device *dev, long *offset)
+{
+ u32 mr = at91_rtc_read(AT91_RTC_MR);
+ long val = FIELD_GET(AT91_RTC_CORRECTION, mr);
+
+ if (!val) {
+ *offset = 0;
+ return 0;
+ }
+
+ val++;
+
+ if (!(mr & AT91_RTC_NEGPPM))
+ val = -val;
+
+ if (!(mr & AT91_RTC_HIGHPPM))
+ val *= AT91_RTC_CORR_LOW_RATIO;
+
+ *offset = DIV_ROUND_CLOSEST(AT91_RTC_CORR_DIVIDEND, val);
+
+ return 0;
+}
+
+static int at91_rtc_setoffset(struct device *dev, long offset)
+{
+ long corr;
+ u32 mr;
+
+ if (offset > AT91_RTC_CORR_DIVIDEND / 2)
+ return -ERANGE;
+ if (offset < -AT91_RTC_CORR_DIVIDEND / 2)
+ return -ERANGE;
+
+ mr = at91_rtc_read(AT91_RTC_MR);
+ mr &= ~(AT91_RTC_NEGPPM | AT91_RTC_CORRECTION | AT91_RTC_HIGHPPM);
+
+ if (offset > 0)
+ mr |= AT91_RTC_NEGPPM;
+ else
+ offset = -offset;
+
+ /* offset less than 764 ppb, disable correction*/
+ if (offset < 764) {
+ at91_rtc_write(AT91_RTC_MR, mr & ~AT91_RTC_NEGPPM);
+
+ return 0;
+ }
+
+ /*
+ * 29208 ppb is the perfect cutoff between low range and high range
+ * low range values are never better than high range value after that.
+ */
+ if (offset < 29208) {
+ corr = DIV_ROUND_CLOSEST(AT91_RTC_CORR_DIVIDEND, offset * AT91_RTC_CORR_LOW_RATIO);
+ } else {
+ corr = DIV_ROUND_CLOSEST(AT91_RTC_CORR_DIVIDEND, offset);
+ mr |= AT91_RTC_HIGHPPM;
+ }
+
+ if (corr > 128)
+ corr = 128;
+
+ mr |= FIELD_PREP(AT91_RTC_CORRECTION, corr - 1);
+
+ at91_rtc_write(AT91_RTC_MR, mr);
+
+ return 0;
+}
+
/*
* IRQ handler for the RTC
*/
@@ -343,6 +420,10 @@ static const struct at91_rtc_config at91sam9x5_config = {
.use_shadow_imr = true,
};
+static const struct at91_rtc_config sama5d4_config = {
+ .has_correction = true,
+};
+
static const struct of_device_id at91_rtc_dt_ids[] = {
{
.compatible = "atmel,at91rm9200-rtc",
@@ -352,10 +433,13 @@ static const struct of_device_id at91_rtc_dt_ids[] = {
.data = &at91sam9x5_config,
}, {
.compatible = "atmel,sama5d4-rtc",
- .data = &at91rm9200_config,
+ .data = &sama5d4_config,
}, {
.compatible = "atmel,sama5d2-rtc",
- .data = &at91rm9200_config,
+ .data = &sama5d4_config,
+ }, {
+ .compatible = "microchip,sam9x60-rtc",
+ .data = &sama5d4_config,
}, {
/* sentinel */
}
@@ -370,6 +454,16 @@ static const struct rtc_class_ops at91_rtc_ops = {
.alarm_irq_enable = at91_rtc_alarm_irq_enable,
};
+static const struct rtc_class_ops sama5d4_rtc_ops = {
+ .read_time = at91_rtc_readtime,
+ .set_time = at91_rtc_settime,
+ .read_alarm = at91_rtc_readalarm,
+ .set_alarm = at91_rtc_setalarm,
+ .alarm_irq_enable = at91_rtc_alarm_irq_enable,
+ .set_offset = at91_rtc_setoffset,
+ .read_offset = at91_rtc_readoffset,
+};
+
/*
* Initialize and install RTC driver
*/
@@ -416,7 +510,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
}
at91_rtc_write(AT91_RTC_CR, 0);
- at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */
+ at91_rtc_write(AT91_RTC_MR, at91_rtc_read(AT91_RTC_MR) & ~AT91_RTC_HRMOD);
/* Disable all interrupts */
at91_rtc_write_idr(AT91_RTC_ACKUPD | AT91_RTC_ALARM |
@@ -437,10 +531,14 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
if (!device_can_wakeup(&pdev->dev))
device_init_wakeup(&pdev->dev, 1);
- rtc->ops = &at91_rtc_ops;
+ if (at91_rtc_config->has_correction)
+ rtc->ops = &sama5d4_rtc_ops;
+ else
+ rtc->ops = &at91_rtc_ops;
+
rtc->range_min = RTC_TIMESTAMP_BEGIN_1900;
rtc->range_max = RTC_TIMESTAMP_END_2099;
- ret = rtc_register_device(rtc);
+ ret = devm_rtc_register_device(rtc);
if (ret)
goto err_clk;
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index e39e89867d29..2216be429ab7 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -431,7 +431,7 @@ static int at91_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "%s: SET TIME!\n",
dev_name(&rtc->rtcdev->dev));
- return rtc_register_device(rtc->rtcdev);
+ return devm_rtc_register_device(rtc->rtcdev);
err_clk:
clk_disable_unprepare(rtc->sclk);
diff --git a/drivers/rtc/rtc-au1xxx.c b/drivers/rtc/rtc-au1xxx.c
index 791bebcb6f47..e6428b27b5d4 100644
--- a/drivers/rtc/rtc-au1xxx.c
+++ b/drivers/rtc/rtc-au1xxx.c
@@ -104,7 +104,7 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtcdev);
- return rtc_register_device(rtcdev);
+ return devm_rtc_register_device(rtcdev);
}
static struct platform_driver au1xrtc_driver = {
diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c
index 4492b770422c..17cb67f5bf6e 100644
--- a/drivers/rtc/rtc-bd70528.c
+++ b/drivers/rtc/rtc-bd70528.c
@@ -604,7 +604,7 @@ static int bd70528_probe(struct platform_device *pdev)
}
}
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static const struct platform_device_id bd718x7_rtc_id[] = {
diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c
index 4fee57c51280..0366e2ff04ae 100644
--- a/drivers/rtc/rtc-brcmstb-waketimer.c
+++ b/drivers/rtc/rtc-brcmstb-waketimer.c
@@ -252,7 +252,7 @@ static int brcmstb_waketmr_probe(struct platform_device *pdev)
timer->rtc->ops = &brcmstb_waketmr_ops;
timer->rtc->range_max = U32_MAX;
- ret = rtc_register_device(timer->rtc);
+ ret = devm_rtc_register_device(timer->rtc);
if (ret)
goto err_notifier;
@@ -264,8 +264,7 @@ err_notifier:
unregister_reboot_notifier(&timer->reboot_notifier);
err_clk:
- if (timer->clk)
- clk_disable_unprepare(timer->clk);
+ clk_disable_unprepare(timer->clk);
return ret;
}
diff --git a/drivers/rtc/rtc-cadence.c b/drivers/rtc/rtc-cadence.c
index 595d5d252850..1edf7f16d73a 100644
--- a/drivers/rtc/rtc-cadence.c
+++ b/drivers/rtc/rtc-cadence.c
@@ -336,7 +336,7 @@ static int cdns_rtc_probe(struct platform_device *pdev)
writel(0, crtc->regs + CDNS_RTC_HMR);
writel(CDNS_RTC_KRTCR_KRTC, crtc->regs + CDNS_RTC_KRTCR);
- ret = rtc_register_device(crtc->rtc_dev);
+ ret = devm_rtc_register_device(crtc->rtc_dev);
if (ret)
goto err_disable_wakeup;
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index c5bcd2adc9fe..51e80bc70d42 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -863,8 +863,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
cmos_rtc.rtc->ops = &cmos_rtc_ops_no_alarm;
}
- cmos_rtc.rtc->nvram_old_abi = true;
- retval = rtc_register_device(cmos_rtc.rtc);
+ retval = devm_rtc_register_device(cmos_rtc.rtc);
if (retval)
goto cleanup2;
@@ -873,8 +872,7 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
/* export at least the first block of NVRAM */
nvmem_cfg.size = address_space - NVRAM_OFFSET;
- if (rtc_nvmem_register(cmos_rtc.rtc, &nvmem_cfg))
- dev_err(dev, "nvmem registration failed\n");
+ devm_rtc_nvmem_register(cmos_rtc.rtc, &nvmem_cfg);
dev_info(dev, "%s%s, %d bytes nvram%s\n",
!is_valid_irq(rtc_irq) ? "no alarms" :
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index da59917c9ee8..168ced87d93a 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -203,7 +203,7 @@ static int __init coh901331_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtap);
- ret = rtc_register_device(rtap->rtc);
+ ret = devm_rtc_register_device(rtap->rtc);
if (ret)
goto out_no_rtc;
diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c
index 800667d73a6f..afc8fcba8f88 100644
--- a/drivers/rtc/rtc-cpcap.c
+++ b/drivers/rtc/rtc-cpcap.c
@@ -269,7 +269,8 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
rtc->alarm_irq = platform_get_irq(pdev, 0);
err = devm_request_threaded_irq(dev, rtc->alarm_irq, NULL,
- cpcap_rtc_alarm_irq, IRQF_TRIGGER_NONE,
+ cpcap_rtc_alarm_irq,
+ IRQF_TRIGGER_NONE | IRQF_ONESHOT,
"rtc_alarm", rtc);
if (err) {
dev_err(dev, "Could not request alarm irq: %d\n", err);
@@ -285,7 +286,8 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
*/
rtc->update_irq = platform_get_irq(pdev, 1);
err = devm_request_threaded_irq(dev, rtc->update_irq, NULL,
- cpcap_rtc_update_irq, IRQF_TRIGGER_NONE,
+ cpcap_rtc_update_irq,
+ IRQF_TRIGGER_NONE | IRQF_ONESHOT,
"rtc_1hz", rtc);
if (err) {
dev_err(dev, "Could not request update irq: %d\n", err);
@@ -299,7 +301,7 @@ static int cpcap_rtc_probe(struct platform_device *pdev)
/* ignore error and continue without wakeup support */
}
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
static const struct of_device_id cpcap_rtc_of_match[] = {
diff --git a/drivers/rtc/rtc-cros-ec.c b/drivers/rtc/rtc-cros-ec.c
index f7343c289cab..70626793ca69 100644
--- a/drivers/rtc/rtc-cros-ec.c
+++ b/drivers/rtc/rtc-cros-ec.c
@@ -350,7 +350,7 @@ static int cros_ec_rtc_probe(struct platform_device *pdev)
cros_ec_rtc->rtc->ops = &cros_ec_rtc_ops;
cros_ec_rtc->rtc->range_max = U32_MAX;
- ret = rtc_register_device(cros_ec_rtc->rtc);
+ ret = devm_rtc_register_device(cros_ec_rtc->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index 58de10da37b1..9ca99bd35702 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -304,7 +304,7 @@ static int da9052_rtc_probe(struct platform_device *pdev)
rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->rtc->range_max = RTC_TIMESTAMP_END_2063;
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 046b1d4c3dae..d4b72a9fa2ba 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -243,7 +243,7 @@ static int da9063_rtc_read_time(struct device *dev, struct rtc_time *tm)
al_secs = rtc_tm_to_time64(&rtc->alarm_time);
/* handle the rtc synchronisation delay */
- if (rtc->rtc_sync == true && al_secs - tm_secs == 1)
+ if (rtc->rtc_sync && al_secs - tm_secs == 1)
memcpy(tm, &rtc->alarm_time, sizeof(struct rtc_time));
else
rtc->rtc_sync = false;
@@ -494,7 +494,7 @@ static int da9063_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
irq_alarm, ret);
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
static struct platform_driver da9063_rtc_driver = {
diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c
index 73f87a17cdf3..6bef0f2353da 100644
--- a/drivers/rtc/rtc-davinci.c
+++ b/drivers/rtc/rtc-davinci.c
@@ -484,7 +484,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
- return rtc_register_device(davinci_rtc->rtc);
+ return devm_rtc_register_device(davinci_rtc->rtc);
}
static int __exit davinci_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-digicolor.c b/drivers/rtc/rtc-digicolor.c
index 200d85b01e8b..4fdfa5b6feb2 100644
--- a/drivers/rtc/rtc-digicolor.c
+++ b/drivers/rtc/rtc-digicolor.c
@@ -202,7 +202,7 @@ static int __init dc_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->ops = &dc_rtc_ops;
rtc->rtc_dev->range_max = U32_MAX;
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
static const struct of_device_id dc_dt_ids[] = {
diff --git a/drivers/rtc/rtc-dm355evm.c b/drivers/rtc/rtc-dm355evm.c
index cd947a20843b..94fb16ac3e0f 100644
--- a/drivers/rtc/rtc-dm355evm.c
+++ b/drivers/rtc/rtc-dm355evm.c
@@ -132,7 +132,7 @@ static int dm355evm_rtc_probe(struct platform_device *pdev)
rtc->ops = &dm355evm_rtc_ops;
rtc->range_max = U32_MAX;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
/*
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index a3d790889eea..8c2ab29c3d91 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -694,12 +694,11 @@ static int ds1305_probe(struct spi_device *spi)
ds1305->rtc->range_max = RTC_TIMESTAMP_END_2099;
ds1305_nvmem_cfg.priv = ds1305;
- ds1305->rtc->nvram_old_abi = true;
- status = rtc_register_device(ds1305->rtc);
+ status = devm_rtc_register_device(ds1305->rtc);
if (status)
return status;
- rtc_nvmem_register(ds1305->rtc, &ds1305_nvmem_cfg);
+ devm_rtc_nvmem_register(ds1305->rtc, &ds1305_nvmem_cfg);
/* Maybe set up alarm IRQ; be ready to handle it triggering right
* away. NOTE that we don't share this. The signal is active low,
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 9f5f54ca039d..183cf7c01364 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -8,12 +8,12 @@
* Copyright (C) 2012 Bertrand Achard (nvram access fixes)
*/
-#include <linux/acpi.h>
#include <linux/bcd.h>
#include <linux/i2c.h>
#include <linux/init.h>
+#include <linux/mod_devicetable.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/rtc/ds1307.h>
#include <linux/rtc.h>
#include <linux/slab.h>
@@ -31,6 +31,7 @@
* That's a natural job for a factory or repair bench.
*/
enum ds_type {
+ unknown_ds_type, /* always first and 0 */
ds_1307,
ds_1308,
ds_1337,
@@ -1090,7 +1091,6 @@ static const struct i2c_device_id ds1307_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ds1307_id);
-#ifdef CONFIG_OF
static const struct of_device_id ds1307_of_match[] = {
{
.compatible = "dallas,ds1307",
@@ -1167,32 +1167,6 @@ static const struct of_device_id ds1307_of_match[] = {
{ }
};
MODULE_DEVICE_TABLE(of, ds1307_of_match);
-#endif
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id ds1307_acpi_ids[] = {
- { .id = "DS1307", .driver_data = ds_1307 },
- { .id = "DS1308", .driver_data = ds_1308 },
- { .id = "DS1337", .driver_data = ds_1337 },
- { .id = "DS1338", .driver_data = ds_1338 },
- { .id = "DS1339", .driver_data = ds_1339 },
- { .id = "DS1388", .driver_data = ds_1388 },
- { .id = "DS1340", .driver_data = ds_1340 },
- { .id = "DS1341", .driver_data = ds_1341 },
- { .id = "DS3231", .driver_data = ds_3231 },
- { .id = "M41T0", .driver_data = m41t0 },
- { .id = "M41T00", .driver_data = m41t00 },
- { .id = "M41T11", .driver_data = m41t11 },
- { .id = "MCP7940X", .driver_data = mcp794xx },
- { .id = "MCP7941X", .driver_data = mcp794xx },
- { .id = "PT7C4338", .driver_data = ds_1307 },
- { .id = "RX8025", .driver_data = rx_8025 },
- { .id = "ISL12057", .driver_data = ds_1337 },
- { .id = "RX8130", .driver_data = rx_8130 },
- { }
-};
-MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids);
-#endif
/*
* The ds1337 and ds1339 both have two alarms, but we only use the first
@@ -1626,13 +1600,16 @@ static const struct clk_ops ds3231_clk_32khz_ops = {
.recalc_rate = ds3231_clk_32khz_recalc_rate,
};
+static const char *ds3231_clks_names[] = {
+ [DS3231_CLK_SQW] = "ds3231_clk_sqw",
+ [DS3231_CLK_32KHZ] = "ds3231_clk_32khz",
+};
+
static struct clk_init_data ds3231_clks_init[] = {
[DS3231_CLK_SQW] = {
- .name = "ds3231_clk_sqw",
.ops = &ds3231_clk_sqw_ops,
},
[DS3231_CLK_32KHZ] = {
- .name = "ds3231_clk_32khz",
.ops = &ds3231_clk_32khz_ops,
},
};
@@ -1653,6 +1630,11 @@ static int ds3231_clks_register(struct ds1307 *ds1307)
if (!onecell->clks)
return -ENOMEM;
+ /* optional override of the clockname */
+ device_property_read_string_array(ds1307->dev, "clock-output-names",
+ ds3231_clks_names,
+ ARRAY_SIZE(ds3231_clks_names));
+
for (i = 0; i < ARRAY_SIZE(ds3231_clks_init); i++) {
struct clk_init_data init = ds3231_clks_init[i];
@@ -1663,9 +1645,7 @@ static int ds3231_clks_register(struct ds1307 *ds1307)
if (i == DS3231_CLK_SQW && test_bit(HAS_ALARM, &ds1307->flags))
continue;
- /* optional override of the clockname */
- of_property_read_string_index(node, "clock-output-names", i,
- &init.name);
+ init.name = ds3231_clks_names[i];
ds1307->clks[i].init = &init;
onecell->clks[i] = devm_clk_register(ds1307->dev,
@@ -1674,10 +1654,8 @@ static int ds3231_clks_register(struct ds1307 *ds1307)
return PTR_ERR(onecell->clks[i]);
}
- if (!node)
- return 0;
-
- of_clk_add_provider(node, of_clk_src_onecell_get, onecell);
+ if (node)
+ of_clk_add_provider(node, of_clk_src_onecell_get, onecell);
return 0;
}
@@ -1761,6 +1739,7 @@ static int ds1307_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct ds1307 *ds1307;
+ const void *match;
int err = -ENODEV;
int tmp;
const struct chip_desc *chip;
@@ -1786,22 +1765,15 @@ static int ds1307_probe(struct i2c_client *client,
i2c_set_clientdata(client, ds1307);
- if (client->dev.of_node) {
- ds1307->type = (enum ds_type)
- of_device_get_match_data(&client->dev);
+ match = device_get_match_data(&client->dev);
+ if (match) {
+ ds1307->type = (enum ds_type)match;
chip = &chips[ds1307->type];
} else if (id) {
chip = &chips[id->driver_data];
ds1307->type = id->driver_data;
} else {
- const struct acpi_device_id *acpi_id;
-
- acpi_id = acpi_match_device(ACPI_PTR(ds1307_acpi_ids),
- ds1307->dev);
- if (!acpi_id)
- return -ENODEV;
- chip = &chips[acpi_id->driver_data];
- ds1307->type = acpi_id->driver_data;
+ return -ENODEV;
}
want_irq = client->irq > 0 && chip->alarm;
@@ -1819,7 +1791,6 @@ static int ds1307_probe(struct i2c_client *client,
trickle_charger_setup);
}
-#ifdef CONFIG_OF
/*
* For devices with no IRQ directly connected to the SoC, the RTC chip
* can be forced as a wakeup source by stating that explicitly in
@@ -1828,10 +1799,8 @@ static int ds1307_probe(struct i2c_client *client,
* This will guarantee the 'wakealarm' sysfs entry is available on the device,
* if supported by the RTC.
*/
- if (chip->alarm && of_property_read_bool(client->dev.of_node,
- "wakeup-source"))
+ if (chip->alarm && device_property_read_bool(&client->dev, "wakeup-source"))
ds1307_can_wakeup_device = true;
-#endif
switch (ds1307->type) {
case ds_1337:
@@ -2032,7 +2001,7 @@ static int ds1307_probe(struct i2c_client *client,
if (err)
return err;
- err = rtc_register_device(ds1307->rtc);
+ err = devm_rtc_register_device(ds1307->rtc);
if (err)
return err;
@@ -2047,8 +2016,7 @@ static int ds1307_probe(struct i2c_client *client,
.priv = ds1307,
};
- ds1307->rtc->nvram_old_abi = true;
- rtc_nvmem_register(ds1307->rtc, &nvmem_cfg);
+ devm_rtc_nvmem_register(ds1307->rtc, &nvmem_cfg);
}
ds1307_hwmon_register(ds1307);
@@ -2064,8 +2032,7 @@ exit:
static struct i2c_driver ds1307_driver = {
.driver = {
.name = "rtc-ds1307",
- .of_match_table = of_match_ptr(ds1307_of_match),
- .acpi_match_table = ACPI_PTR(ds1307_acpi_ids),
+ .of_match_table = ds1307_of_match,
},
.probe = ds1307_probe,
.id_table = ds1307_id,
diff --git a/drivers/rtc/rtc-ds1343.c b/drivers/rtc/rtc-ds1343.c
index ba143423875b..f14ed6c96437 100644
--- a/drivers/rtc/rtc-ds1343.c
+++ b/drivers/rtc/rtc-ds1343.c
@@ -399,7 +399,6 @@ static int ds1343_probe(struct spi_device *spi)
if (IS_ERR(priv->rtc))
return PTR_ERR(priv->rtc);
- priv->rtc->nvram_old_abi = true;
priv->rtc->ops = &ds1343_rtc_ops;
priv->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
priv->rtc->range_max = RTC_TIMESTAMP_END_2099;
@@ -409,12 +408,12 @@ static int ds1343_probe(struct spi_device *spi)
dev_err(&spi->dev,
"unable to create sysfs entries for rtc ds1343\n");
- res = rtc_register_device(priv->rtc);
+ res = devm_rtc_register_device(priv->rtc);
if (res)
return res;
nvmem_cfg.priv = priv;
- rtc_nvmem_register(priv->rtc, &nvmem_cfg);
+ devm_rtc_nvmem_register(priv->rtc, &nvmem_cfg);
priv->irq = spi->irq;
diff --git a/drivers/rtc/rtc-ds1347.c b/drivers/rtc/rtc-ds1347.c
index 7025cf3fb9f8..157bf5209ac4 100644
--- a/drivers/rtc/rtc-ds1347.c
+++ b/drivers/rtc/rtc-ds1347.c
@@ -166,7 +166,7 @@ static int ds1347_probe(struct spi_device *spi)
rtc->range_min = RTC_TIMESTAMP_BEGIN_0000;
rtc->range_max = RTC_TIMESTAMP_END_9999;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct spi_driver ds1347_driver = {
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 177d870bda0d..fab79921a712 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -508,7 +508,7 @@ static int ds1374_probe(struct i2c_client *client,
ds1374->rtc->ops = &ds1374_rtc_ops;
ds1374->rtc->range_max = U32_MAX;
- ret = rtc_register_device(ds1374->rtc);
+ ret = devm_rtc_register_device(ds1374->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index a63872c4c76d..bda884333082 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -466,13 +466,11 @@ static int ds1511_rtc_probe(struct platform_device *pdev)
pdata->rtc->ops = &ds1511_rtc_ops;
- pdata->rtc->nvram_old_abi = true;
-
- ret = rtc_register_device(pdata->rtc);
+ ret = devm_rtc_register_device(pdata->rtc);
if (ret)
return ret;
- rtc_nvmem_register(pdata->rtc, &ds1511_nvmem_cfg);
+ devm_rtc_nvmem_register(pdata->rtc, &ds1511_nvmem_cfg);
/*
* if the platform has an interrupt in mind for this device,
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index cdf5e05b9489..dbff5b621ef5 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -294,9 +294,8 @@ static int ds1553_rtc_probe(struct platform_device *pdev)
return PTR_ERR(pdata->rtc);
pdata->rtc->ops = &ds1553_rtc_ops;
- pdata->rtc->nvram_old_abi = true;
- ret = rtc_register_device(pdata->rtc);
+ ret = devm_rtc_register_device(pdata->rtc);
if (ret)
return ret;
@@ -310,8 +309,7 @@ static int ds1553_rtc_probe(struct platform_device *pdev)
}
}
- if (rtc_nvmem_register(pdata->rtc, &nvmem_cfg))
- dev_err(&pdev->dev, "unable to register nvmem\n");
+ devm_rtc_nvmem_register(pdata->rtc, &nvmem_cfg);
return 0;
}
diff --git a/drivers/rtc/rtc-ds1672.c b/drivers/rtc/rtc-ds1672.c
index 9da84df9f152..630493759d15 100644
--- a/drivers/rtc/rtc-ds1672.c
+++ b/drivers/rtc/rtc-ds1672.c
@@ -124,7 +124,7 @@ static int ds1672_probe(struct i2c_client *client,
rtc->ops = &ds1672_rtc_ops;
rtc->range_max = U32_MAX;
- err = rtc_register_device(rtc);
+ err = devm_rtc_register_device(rtc);
if (err)
return err;
diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
index dfbd7b88b2b9..d69c807af29b 100644
--- a/drivers/rtc/rtc-ds1685.c
+++ b/drivers/rtc/rtc-ds1685.c
@@ -1316,13 +1316,12 @@ ds1685_rtc_probe(struct platform_device *pdev)
if (ret)
return ret;
- rtc_dev->nvram_old_abi = true;
nvmem_cfg.priv = rtc;
- ret = rtc_nvmem_register(rtc_dev, &nvmem_cfg);
+ ret = devm_rtc_nvmem_register(rtc_dev, &nvmem_cfg);
if (ret)
return ret;
- return rtc_register_device(rtc_dev);
+ return devm_rtc_register_device(rtc_dev);
}
/**
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index 2b949f0dbaa9..13d45c697da6 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -190,14 +190,12 @@ static int ds1742_rtc_probe(struct platform_device *pdev)
return PTR_ERR(rtc);
rtc->ops = &ds1742_rtc_ops;
- rtc->nvram_old_abi = true;
- ret = rtc_register_device(rtc);
+ ret = devm_rtc_register_device(rtc);
if (ret)
return ret;
- if (rtc_nvmem_register(rtc, &nvmem_cfg))
- dev_err(&pdev->dev, "Unable to register nvmem\n");
+ devm_rtc_nvmem_register(rtc, &nvmem_cfg);
return 0;
}
diff --git a/drivers/rtc/rtc-ds2404.c b/drivers/rtc/rtc-ds2404.c
index 9df0c44512b8..0480f592307e 100644
--- a/drivers/rtc/rtc-ds2404.c
+++ b/drivers/rtc/rtc-ds2404.c
@@ -234,7 +234,7 @@ static int rtc_probe(struct platform_device *pdev)
chip->rtc->ops = &ds2404_rtc_ops;
chip->rtc->range_max = U32_MAX;
- retval = rtc_register_device(chip->rtc);
+ retval = devm_rtc_register_device(chip->rtc);
if (retval)
return retval;
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 69c37ab64352..16b89035d135 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -518,7 +518,7 @@ static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq,
if (IS_ERR(ds3232->rtc))
return PTR_ERR(ds3232->rtc);
- ret = rtc_nvmem_register(ds3232->rtc, &nvmem_cfg);
+ ret = devm_rtc_nvmem_register(ds3232->rtc, &nvmem_cfg);
if(ret)
return ret;
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 8ec9ea1ca72e..acae7f16808f 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -33,7 +33,7 @@ struct ep93xx_rtc {
static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
unsigned short *delete)
{
- struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
+ struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev);
unsigned long comp;
comp = readl(ep93xx_rtc->mmio_base + EP93XX_RTC_SWCOMP);
@@ -51,7 +51,7 @@ static int ep93xx_rtc_get_swcomp(struct device *dev, unsigned short *preload,
static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
- struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
+ struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev);
unsigned long time;
time = readl(ep93xx_rtc->mmio_base + EP93XX_RTC_DATA);
@@ -62,7 +62,7 @@ static int ep93xx_rtc_read_time(struct device *dev, struct rtc_time *tm)
static int ep93xx_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
- struct ep93xx_rtc *ep93xx_rtc = dev_get_platdata(dev);
+ struct ep93xx_rtc *ep93xx_rtc = dev_get_drvdata(dev);
unsigned long secs = rtc_tm_to_time64(tm);
writel(secs + 1, ep93xx_rtc->mmio_base + EP93XX_RTC_LOAD);
@@ -145,7 +145,7 @@ static int ep93xx_rtc_probe(struct platform_device *pdev)
if (err)
return err;
- return rtc_register_device(ep93xx_rtc->rtc);
+ return devm_rtc_register_device(ep93xx_rtc->rtc);
}
static struct platform_driver ep93xx_rtc_driver = {
diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c
index 48d3b38ea348..57cc09d0a806 100644
--- a/drivers/rtc/rtc-fsl-ftm-alarm.c
+++ b/drivers/rtc/rtc-fsl-ftm-alarm.c
@@ -290,7 +290,7 @@ static int ftm_rtc_probe(struct platform_device *pdev)
if (ret)
dev_err(&pdev->dev, "failed to enable irq wake\n");
- ret = rtc_register_device(rtc->rtc_dev);
+ ret = devm_rtc_register_device(rtc->rtc_dev);
if (ret) {
dev_err(&pdev->dev, "can't register rtc device\n");
return ret;
diff --git a/drivers/rtc/rtc-ftrtc010.c b/drivers/rtc/rtc-ftrtc010.c
index 0919f7dc94a3..ad3add5db4c8 100644
--- a/drivers/rtc/rtc-ftrtc010.c
+++ b/drivers/rtc/rtc-ftrtc010.c
@@ -176,7 +176,7 @@ static int ftrtc010_rtc_probe(struct platform_device *pdev)
if (unlikely(ret))
return ret;
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
static int ftrtc010_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
index 6349d2cd3680..7ab95d052644 100644
--- a/drivers/rtc/rtc-goldfish.c
+++ b/drivers/rtc/rtc-goldfish.c
@@ -194,7 +194,7 @@ static int goldfish_rtc_probe(struct platform_device *pdev)
if (err)
return err;
- return rtc_register_device(rtcdrv->rtc);
+ return devm_rtc_register_device(rtcdrv->rtc);
}
static const struct of_device_id goldfish_rtc_of_match[] = {
diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
index 0fb79c4afb46..24e0095be058 100644
--- a/drivers/rtc/rtc-hym8563.c
+++ b/drivers/rtc/rtc-hym8563.c
@@ -527,8 +527,6 @@ static int hym8563_probe(struct i2c_client *client,
hym8563->client = client;
i2c_set_clientdata(client, hym8563);
- device_set_wakeup_capable(&client->dev, true);
-
ret = hym8563_init_device(client);
if (ret) {
dev_err(&client->dev, "could not init device, %d\n", ret);
@@ -547,6 +545,11 @@ static int hym8563_probe(struct i2c_client *client,
}
}
+ if (client->irq > 0 ||
+ device_property_read_bool(&client->dev, "wakeup-source")) {
+ device_init_wakeup(&client->dev, true);
+ }
+
/* check state of calendar information */
ret = i2c_smbus_read_byte_data(client, HYM8563_SEC);
if (ret < 0)
diff --git a/drivers/rtc/rtc-imx-sc.c b/drivers/rtc/rtc-imx-sc.c
index a5f59e6f862e..cc9fbab49999 100644
--- a/drivers/rtc/rtc-imx-sc.c
+++ b/drivers/rtc/rtc-imx-sc.c
@@ -166,7 +166,7 @@ static int imx_sc_rtc_probe(struct platform_device *pdev)
imx_sc_rtc->range_min = 0;
imx_sc_rtc->range_max = U32_MAX;
- ret = rtc_register_device(imx_sc_rtc);
+ ret = devm_rtc_register_device(imx_sc_rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index 8d141d8a5490..c2692da74e09 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -814,7 +814,7 @@ static int __init dryice_rtc_probe(struct platform_device *pdev)
imxdi->rtc->ops = &dryice_rtc_ops;
imxdi->rtc->range_max = U32_MAX;
- rc = rtc_register_device(imxdi->rtc);
+ rc = devm_rtc_register_device(imxdi->rtc);
if (rc)
goto err;
diff --git a/drivers/rtc/rtc-isl12026.c b/drivers/rtc/rtc-isl12026.c
index 5b6b17fb6d62..1fc6627d854d 100644
--- a/drivers/rtc/rtc-isl12026.c
+++ b/drivers/rtc/rtc-isl12026.c
@@ -465,11 +465,11 @@ static int isl12026_probe_new(struct i2c_client *client)
priv->rtc->ops = &isl12026_rtc_ops;
nvm_cfg.priv = priv;
- ret = rtc_nvmem_register(priv->rtc, &nvm_cfg);
+ ret = devm_rtc_nvmem_register(priv->rtc, &nvm_cfg);
if (ret)
return ret;
- return rtc_register_device(priv->rtc);
+ return devm_rtc_register_device(priv->rtc);
}
static int isl12026_remove(struct i2c_client *client)
diff --git a/drivers/rtc/rtc-isl1208.c b/drivers/rtc/rtc-isl1208.c
index ebb691fa48a6..563a6d9c9fcf 100644
--- a/drivers/rtc/rtc-isl1208.c
+++ b/drivers/rtc/rtc-isl1208.c
@@ -890,11 +890,11 @@ isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (rc)
return rc;
- rc = rtc_nvmem_register(isl1208->rtc, &isl1208->nvmem_config);
+ rc = devm_rtc_nvmem_register(isl1208->rtc, &isl1208->nvmem_config);
if (rc)
return rc;
- return rtc_register_device(isl1208->rtc);
+ return devm_rtc_register_device(isl1208->rtc);
}
static struct i2c_driver isl1208_driver = {
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 9607e6b6e0b3..6e51df72fd65 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -375,7 +375,7 @@ static int jz4740_rtc_probe(struct platform_device *pdev)
/* Each 1 Hz pulse should happen after (rate) ticks */
jz4740_rtc_reg_write(rtc, JZ_REG_RTC_REGULATOR, rate - 1);
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 15d8abda81fe..76ad7031a13d 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -239,7 +239,7 @@ static int lpc32xx_rtc_probe(struct platform_device *pdev)
rtc->rtc->ops = &lpc32xx_rtc_ops;
rtc->rtc->range_max = U32_MAX;
- err = rtc_register_device(rtc->rtc);
+ err = devm_rtc_register_device(rtc->rtc);
if (err)
return err;
diff --git a/drivers/rtc/rtc-ls1x.c b/drivers/rtc/rtc-ls1x.c
index 8bd34056fea0..5af26dc5c2a3 100644
--- a/drivers/rtc/rtc-ls1x.c
+++ b/drivers/rtc/rtc-ls1x.c
@@ -176,7 +176,7 @@ static int ls1x_rtc_probe(struct platform_device *pdev)
rtcdev->range_min = RTC_TIMESTAMP_BEGIN_1900;
rtcdev->range_max = RTC_TIMESTAMP_END_2099;
- return rtc_register_device(rtcdev);
+ return devm_rtc_register_device(rtcdev);
}
static struct platform_driver ls1x_rtc_driver = {
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 8a89bc52b0d4..160dcf68e64e 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -977,7 +977,7 @@ static int m41t80_probe(struct i2c_client *client,
m41t80_sqw_register_clk(m41t80_data);
#endif
- rc = rtc_register_device(m41t80_data->rtc);
+ rc = devm_rtc_register_device(m41t80_data->rtc);
if (rc)
return rc;
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 67e218758a8b..5f5898d3b055 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -463,15 +463,14 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
if (IS_ERR(m48t59->rtc))
return PTR_ERR(m48t59->rtc);
- m48t59->rtc->nvram_old_abi = true;
m48t59->rtc->ops = ops;
nvmem_cfg.size = pdata->offset;
- ret = rtc_nvmem_register(m48t59->rtc, &nvmem_cfg);
+ ret = devm_rtc_nvmem_register(m48t59->rtc, &nvmem_cfg);
if (ret)
return ret;
- ret = rtc_register_device(m48t59->rtc);
+ ret = devm_rtc_register_device(m48t59->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index 75a0e73071d8..481c9525b1dd 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -254,13 +254,12 @@ static int m48t86_rtc_probe(struct platform_device *pdev)
return PTR_ERR(info->rtc);
info->rtc->ops = &m48t86_rtc_ops;
- info->rtc->nvram_old_abi = true;
- err = rtc_register_device(info->rtc);
+ err = devm_rtc_register_device(info->rtc);
if (err)
return err;
- rtc_nvmem_register(info->rtc, &m48t86_nvmem_cfg);
+ devm_rtc_nvmem_register(info->rtc, &m48t86_nvmem_cfg);
/* read battery status */
reg = m48t86_readb(&pdev->dev, M48T86_D);
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index d6802e6191cb..d4234e78497e 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -307,7 +307,7 @@ static int __init mc13xxx_rtc_probe(struct platform_device *pdev)
mc13xxx_unlock(mc13xxx);
- ret = rtc_register_device(priv->rtc);
+ ret = devm_rtc_register_device(priv->rtc);
if (ret) {
mc13xxx_lock(mc13xxx);
goto err_irq_request;
diff --git a/drivers/rtc/rtc-meson-vrtc.c b/drivers/rtc/rtc-meson-vrtc.c
index e6bd0808a092..1463c8621561 100644
--- a/drivers/rtc/rtc-meson-vrtc.c
+++ b/drivers/rtc/rtc-meson-vrtc.c
@@ -83,7 +83,7 @@ static int meson_vrtc_probe(struct platform_device *pdev)
return PTR_ERR(vrtc->rtc);
vrtc->rtc->ops = &meson_vrtc_ops;
- return rtc_register_device(vrtc->rtc);
+ return devm_rtc_register_device(vrtc->rtc);
}
static int __maybe_unused meson_vrtc_suspend(struct device *dev)
diff --git a/drivers/rtc/rtc-meson.c b/drivers/rtc/rtc-meson.c
index 47ebcf834cc2..8642c06565ea 100644
--- a/drivers/rtc/rtc-meson.c
+++ b/drivers/rtc/rtc-meson.c
@@ -365,11 +365,11 @@ static int meson_rtc_probe(struct platform_device *pdev)
}
meson_rtc_nvmem_config.priv = rtc;
- ret = rtc_nvmem_register(rtc->rtc, &meson_rtc_nvmem_config);
+ ret = devm_rtc_nvmem_register(rtc->rtc, &meson_rtc_nvmem_config);
if (ret)
goto out_disable_vdd;
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret)
goto out_disable_vdd;
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index 5c2ce71aa044..bb2ea9bc56f2 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -371,7 +371,7 @@ static int mpc5121_rtc_probe(struct platform_device *op)
rtc->rtc->range_max = U32_MAX;
}
- err = rtc_register_device(rtc->rtc);
+ err = devm_rtc_register_device(rtc->rtc);
if (err)
goto out_dispose2;
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 17bf5394e1e5..421b3b6071b6 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -361,7 +361,7 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
}
}
- retval = rtc_register_device(mrst_rtc.rtc);
+ retval = devm_rtc_register_device(mrst_rtc.rtc);
if (retval)
goto cleanup0;
diff --git a/drivers/rtc/rtc-mt2712.c b/drivers/rtc/rtc-mt2712.c
index d5f691c8a035..cd92a9788351 100644
--- a/drivers/rtc/rtc-mt2712.c
+++ b/drivers/rtc/rtc-mt2712.c
@@ -352,7 +352,7 @@ static int mt2712_rtc_probe(struct platform_device *pdev)
mt2712_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
mt2712_rtc->rtc->range_max = MT2712_RTC_TIMESTAMP_END_2127;
- return rtc_register_device(mt2712_rtc->rtc);
+ return devm_rtc_register_device(mt2712_rtc->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-mt6397.c b/drivers/rtc/rtc-mt6397.c
index 1894aded4c85..6655035e5164 100644
--- a/drivers/rtc/rtc-mt6397.c
+++ b/drivers/rtc/rtc-mt6397.c
@@ -301,7 +301,7 @@ static int mtk_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->ops = &mtk_rtc_ops;
- return rtc_register_device(rtc->rtc_dev);
+ return devm_rtc_register_device(rtc->rtc_dev);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index d5f190e578e4..f8e2ecea1d8d 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -278,7 +278,7 @@ static int __init mv_rtc_probe(struct platform_device *pdev)
pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pdata->rtc->range_max = RTC_TIMESTAMP_END_2099;
- ret = rtc_register_device(pdata->rtc);
+ ret = devm_rtc_register_device(pdata->rtc);
if (!ret)
return 0;
out:
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index a8cfbde048f4..65b29b0fa548 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -70,27 +70,12 @@ struct rtc_plat_data {
enum imx_rtc_type devtype;
};
-static const struct platform_device_id imx_rtc_devtype[] = {
- {
- .name = "imx1-rtc",
- .driver_data = IMX1_RTC,
- }, {
- .name = "imx21-rtc",
- .driver_data = IMX21_RTC,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(platform, imx_rtc_devtype);
-
-#ifdef CONFIG_OF
static const struct of_device_id imx_rtc_dt_ids[] = {
{ .compatible = "fsl,imx1-rtc", .data = (const void *)IMX1_RTC },
{ .compatible = "fsl,imx21-rtc", .data = (const void *)IMX21_RTC },
{}
};
MODULE_DEVICE_TABLE(of, imx_rtc_dt_ids);
-#endif
static inline int is_imx1_rtc(struct rtc_plat_data *data)
{
@@ -322,17 +307,12 @@ static int mxc_rtc_probe(struct platform_device *pdev)
u32 reg;
unsigned long rate;
int ret;
- const struct of_device_id *of_id;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return -ENOMEM;
- of_id = of_match_device(imx_rtc_dt_ids, &pdev->dev);
- if (of_id)
- pdata->devtype = (enum imx_rtc_type)of_id->data;
- else
- pdata->devtype = pdev->id_entry->driver_data;
+ pdata->devtype = (enum imx_rtc_type)of_device_get_match_data(&pdev->dev);
pdata->ioaddr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pdata->ioaddr))
@@ -428,7 +408,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to enable irq wake\n");
}
- ret = rtc_register_device(rtc);
+ ret = devm_rtc_register_device(rtc);
return ret;
}
@@ -438,7 +418,6 @@ static struct platform_driver mxc_rtc_driver = {
.name = "mxc_rtc",
.of_match_table = of_match_ptr(imx_rtc_dt_ids),
},
- .id_table = imx_rtc_devtype,
.probe = mxc_rtc_probe,
};
diff --git a/drivers/rtc/rtc-mxc_v2.c b/drivers/rtc/rtc-mxc_v2.c
index 91534560fe2a..0d73f6f0cf9e 100644
--- a/drivers/rtc/rtc-mxc_v2.c
+++ b/drivers/rtc/rtc-mxc_v2.c
@@ -354,7 +354,7 @@ static int mxc_rtc_probe(struct platform_device *pdev)
return ret;
}
- ret = rtc_register_device(pdata->rtc);
+ ret = devm_rtc_register_device(pdata->rtc);
if (ret < 0)
clk_unprepare(pdata->clk);
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index c20fc7937dfa..dc7db2477f88 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -879,18 +879,18 @@ static int omap_rtc_probe(struct platform_device *pdev)
/* Support ext_wakeup pinconf */
rtc_pinctrl_desc.name = dev_name(&pdev->dev);
- rtc->pctldev = pinctrl_register(&rtc_pinctrl_desc, &pdev->dev, rtc);
+ rtc->pctldev = devm_pinctrl_register(&pdev->dev, &rtc_pinctrl_desc, rtc);
if (IS_ERR(rtc->pctldev)) {
dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
ret = PTR_ERR(rtc->pctldev);
goto err;
}
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret)
- goto err_deregister_pinctrl;
+ goto err;
- rtc_nvmem_register(rtc->rtc, &omap_rtc_nvmem_config);
+ devm_rtc_nvmem_register(rtc->rtc, &omap_rtc_nvmem_config);
if (rtc->is_pmic_controller) {
if (!pm_power_off) {
@@ -901,8 +901,6 @@ static int omap_rtc_probe(struct platform_device *pdev)
return 0;
-err_deregister_pinctrl:
- pinctrl_unregister(rtc->pctldev);
err:
clk_disable_unprepare(rtc->clk);
device_init_wakeup(&pdev->dev, false);
@@ -945,9 +943,6 @@ static int omap_rtc_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- /* Remove ext_wakeup pinconf */
- pinctrl_unregister(rtc->pctldev);
-
return 0;
}
diff --git a/drivers/rtc/rtc-pcap.c b/drivers/rtc/rtc-pcap.c
index 178bfb1dea21..8c7a98a5452c 100644
--- a/drivers/rtc/rtc-pcap.c
+++ b/drivers/rtc/rtc-pcap.c
@@ -163,7 +163,7 @@ static int __init pcap_rtc_probe(struct platform_device *pdev)
if (err)
return err;
- return rtc_register_device(pcap_rtc->rtc);
+ return devm_rtc_register_device(pcap_rtc->rtc);
}
static int __exit pcap_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index c3691fa4210e..534ffc91eec1 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -434,7 +434,7 @@ static int pcf2123_probe(struct spi_device *spi)
rtc->range_max = RTC_TIMESTAMP_END_2099;
rtc->set_start_time = true;
- ret = rtc_register_device(rtc);
+ ret = devm_rtc_register_device(rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c
index 07a5630ec841..39a7b5116aa4 100644
--- a/drivers/rtc/rtc-pcf2127.c
+++ b/drivers/rtc/rtc-pcf2127.c
@@ -243,10 +243,8 @@ static int pcf2127_nvmem_read(void *priv, unsigned int offset,
if (ret)
return ret;
- ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_RAM_RD_CMD,
- val, bytes);
-
- return ret ?: bytes;
+ return regmap_bulk_read(pcf2127->regmap, PCF2127_REG_RAM_RD_CMD,
+ val, bytes);
}
static int pcf2127_nvmem_write(void *priv, unsigned int offset,
@@ -261,10 +259,8 @@ static int pcf2127_nvmem_write(void *priv, unsigned int offset,
if (ret)
return ret;
- ret = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_RAM_WRT_CMD,
- val, bytes);
-
- return ret ?: bytes;
+ return regmap_bulk_write(pcf2127->regmap, PCF2127_REG_RAM_WRT_CMD,
+ val, bytes);
}
/* watchdog driver */
@@ -335,6 +331,37 @@ static const struct watchdog_ops pcf2127_watchdog_ops = {
.set_timeout = pcf2127_wdt_set_timeout,
};
+static int pcf2127_watchdog_init(struct device *dev, struct pcf2127 *pcf2127)
+{
+ u32 wdd_timeout;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_WATCHDOG) ||
+ !device_property_read_bool(dev, "reset-source"))
+ return 0;
+
+ pcf2127->wdd.parent = dev;
+ pcf2127->wdd.info = &pcf2127_wdt_info;
+ pcf2127->wdd.ops = &pcf2127_watchdog_ops;
+ pcf2127->wdd.min_timeout = PCF2127_WD_VAL_MIN;
+ pcf2127->wdd.max_timeout = PCF2127_WD_VAL_MAX;
+ pcf2127->wdd.timeout = PCF2127_WD_VAL_DEFAULT;
+ pcf2127->wdd.min_hw_heartbeat_ms = 500;
+ pcf2127->wdd.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
+
+ watchdog_set_drvdata(&pcf2127->wdd, pcf2127);
+
+ /* Test if watchdog timer is started by bootloader */
+ ret = regmap_read(pcf2127->regmap, PCF2127_REG_WD_VAL, &wdd_timeout);
+ if (ret)
+ return ret;
+
+ if (wdd_timeout)
+ set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
+
+ return devm_watchdog_register_device(dev, &pcf2127->wdd);
+}
+
/* Alarm */
static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
@@ -536,7 +563,6 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
int alarm_irq, const char *name, bool has_nvmem)
{
struct pcf2127 *pcf2127;
- u32 wdd_timeout;
int ret = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -575,17 +601,6 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
pcf2127->rtc->ops = &pcf2127_rtc_alrm_ops;
}
- pcf2127->wdd.parent = dev;
- pcf2127->wdd.info = &pcf2127_wdt_info;
- pcf2127->wdd.ops = &pcf2127_watchdog_ops;
- pcf2127->wdd.min_timeout = PCF2127_WD_VAL_MIN;
- pcf2127->wdd.max_timeout = PCF2127_WD_VAL_MAX;
- pcf2127->wdd.timeout = PCF2127_WD_VAL_DEFAULT;
- pcf2127->wdd.min_hw_heartbeat_ms = 500;
- pcf2127->wdd.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
-
- watchdog_set_drvdata(&pcf2127->wdd, pcf2127);
-
if (has_nvmem) {
struct nvmem_config nvmem_cfg = {
.priv = pcf2127,
@@ -594,7 +609,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
.size = 512,
};
- ret = rtc_nvmem_register(pcf2127->rtc, &nvmem_cfg);
+ ret = devm_rtc_nvmem_register(pcf2127->rtc, &nvmem_cfg);
}
/*
@@ -615,19 +630,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
return ret;
}
- /* Test if watchdog timer is started by bootloader */
- ret = regmap_read(pcf2127->regmap, PCF2127_REG_WD_VAL, &wdd_timeout);
- if (ret)
- return ret;
-
- if (wdd_timeout)
- set_bit(WDOG_HW_RUNNING, &pcf2127->wdd.status);
-
-#ifdef CONFIG_WATCHDOG
- ret = devm_watchdog_register_device(dev, &pcf2127->wdd);
- if (ret)
- return ret;
-#endif /* CONFIG_WATCHDOG */
+ pcf2127_watchdog_init(dev, pcf2127);
/*
* Disable battery low/switch-over timestamp and interrupts.
@@ -680,7 +683,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap,
return ret;
}
- return rtc_register_device(pcf2127->rtc);
+ return devm_rtc_register_device(pcf2127->rtc);
}
#ifdef CONFIG_OF
diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c
index f8b99cb72959..e19cf2adbc35 100644
--- a/drivers/rtc/rtc-pcf85063.c
+++ b/drivers/rtc/rtc-pcf85063.c
@@ -607,14 +607,14 @@ static int pcf85063_probe(struct i2c_client *client)
}
nvmem_cfg.priv = pcf85063->regmap;
- rtc_nvmem_register(pcf85063->rtc, &nvmem_cfg);
+ devm_rtc_nvmem_register(pcf85063->rtc, &nvmem_cfg);
#ifdef CONFIG_COMMON_CLK
/* register clk in common clk framework */
pcf85063_clkout_register_clk(pcf85063);
#endif
- return rtc_register_device(pcf85063->rtc);
+ return devm_rtc_register_device(pcf85063->rtc);
}
#ifdef CONFIG_OF
diff --git a/drivers/rtc/rtc-pcf8523.c b/drivers/rtc/rtc-pcf8523.c
index 57d351dfe272..5e1e7b2a8c9a 100644
--- a/drivers/rtc/rtc-pcf8523.c
+++ b/drivers/rtc/rtc-pcf8523.c
@@ -12,18 +12,18 @@
#define DRIVER_NAME "rtc-pcf8523"
#define REG_CONTROL1 0x00
-#define REG_CONTROL1_CAP_SEL (1 << 7)
-#define REG_CONTROL1_STOP (1 << 5)
+#define REG_CONTROL1_CAP_SEL BIT(7)
+#define REG_CONTROL1_STOP BIT(5)
#define REG_CONTROL3 0x02
-#define REG_CONTROL3_PM_BLD (1 << 7) /* battery low detection disabled */
-#define REG_CONTROL3_PM_VDD (1 << 6) /* switch-over disabled */
-#define REG_CONTROL3_PM_DSM (1 << 5) /* direct switching mode */
+#define REG_CONTROL3_PM_BLD BIT(7) /* battery low detection disabled */
+#define REG_CONTROL3_PM_VDD BIT(6) /* switch-over disabled */
+#define REG_CONTROL3_PM_DSM BIT(5) /* direct switching mode */
#define REG_CONTROL3_PM_MASK 0xe0
-#define REG_CONTROL3_BLF (1 << 2) /* battery low bit, read-only */
+#define REG_CONTROL3_BLF BIT(2) /* battery low bit, read-only */
#define REG_SECONDS 0x03
-#define REG_SECONDS_OS (1 << 7)
+#define REG_SECONDS_OS BIT(7)
#define REG_MINUTES 0x04
#define REG_HOURS 0x05
@@ -226,17 +226,6 @@ static int pcf8523_rtc_set_time(struct device *dev, struct rtc_time *tm)
u8 regs[8];
int err;
- /*
- * The hardware can only store values between 0 and 99 in it's YEAR
- * register (with 99 overflowing to 0 on increment).
- * After 2100-02-28 we could start interpreting the year to be in the
- * interval [2100, 2199], but there is no path to switch in a smooth way
- * because the chip handles YEAR=0x00 (and the out-of-spec
- * YEAR=0xa0) as a leap year, but 2100 isn't.
- */
- if (tm->tm_year < 100 || tm->tm_year >= 200)
- return -EINVAL;
-
err = pcf8523_stop_rtc(client);
if (err < 0)
return err;
@@ -356,12 +345,15 @@ static int pcf8523_probe(struct i2c_client *client,
if (err < 0)
return err;
- rtc = devm_rtc_device_register(&client->dev, DRIVER_NAME,
- &pcf8523_rtc_ops, THIS_MODULE);
+ rtc = devm_rtc_allocate_device(&client->dev);
if (IS_ERR(rtc))
return PTR_ERR(rtc);
- return 0;
+ rtc->ops = &pcf8523_rtc_ops;
+ rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
+ rtc->range_max = RTC_TIMESTAMP_END_2099;
+
+ return devm_rtc_register_device(rtc);
}
static const struct i2c_device_id pcf8523_id[] = {
diff --git a/drivers/rtc/rtc-pcf85363.c b/drivers/rtc/rtc-pcf85363.c
index 3450d615974d..a574c8d15a5c 100644
--- a/drivers/rtc/rtc-pcf85363.c
+++ b/drivers/rtc/rtc-pcf85363.c
@@ -418,11 +418,11 @@ static int pcf85363_probe(struct i2c_client *client,
pcf85363->rtc->ops = &rtc_ops_alarm;
}
- ret = rtc_register_device(pcf85363->rtc);
+ ret = devm_rtc_register_device(pcf85363->rtc);
for (i = 0; i < config->num_nvram; i++) {
nvmem_cfg[i].priv = pcf85363;
- rtc_nvmem_register(pcf85363->rtc, &nvmem_cfg[i]);
+ devm_rtc_nvmem_register(pcf85363->rtc, &nvmem_cfg[i]);
}
return ret;
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 2dc30eafa639..de3e6c355f2e 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -582,7 +582,7 @@ static int pcf8563_probe(struct i2c_client *client,
}
}
- err = rtc_register_device(pcf8563->rtc);
+ err = devm_rtc_register_device(pcf8563->rtc);
if (err)
return err;
diff --git a/drivers/rtc/rtc-pic32.c b/drivers/rtc/rtc-pic32.c
index 2b6946744654..7fb9145c43bd 100644
--- a/drivers/rtc/rtc-pic32.c
+++ b/drivers/rtc/rtc-pic32.c
@@ -338,7 +338,7 @@ static int pic32_rtc_probe(struct platform_device *pdev)
pdata->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
pdata->rtc->range_max = RTC_TIMESTAMP_END_2099;
- ret = rtc_register_device(pdata->rtc);
+ ret = devm_rtc_register_device(pdata->rtc);
if (ret)
goto err_nortc;
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index ebe03eba8f5f..5a880516f3e8 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -121,7 +121,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id)
if (ret)
goto err_irq;
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret)
goto err_reg;
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index c6b89273feba..224bbf096262 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -361,14 +361,16 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
device_init_wakeup(&adev->dev, true);
ldata->rtc = devm_rtc_allocate_device(&adev->dev);
- if (IS_ERR(ldata->rtc))
- return PTR_ERR(ldata->rtc);
+ if (IS_ERR(ldata->rtc)) {
+ ret = PTR_ERR(ldata->rtc);
+ goto out;
+ }
ldata->rtc->ops = ops;
ldata->rtc->range_min = vendor->range_min;
ldata->rtc->range_max = vendor->range_max;
- ret = rtc_register_device(ldata->rtc);
+ ret = devm_rtc_register_device(ldata->rtc);
if (ret)
goto out;
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index b45ee2cb2c04..0d9dd6faabba 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -508,7 +508,7 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
return rc;
}
- return rtc_register_device(rtc_dd->rtc);
+ return devm_rtc_register_device(rtc_dd->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-ps3.c b/drivers/rtc/rtc-ps3.c
index f0336d691e6c..6b098734c715 100644
--- a/drivers/rtc/rtc-ps3.c
+++ b/drivers/rtc/rtc-ps3.c
@@ -56,7 +56,7 @@ static int __init ps3_rtc_probe(struct platform_device *dev)
platform_set_drvdata(dev, rtc);
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct platform_driver ps3_rtc_driver = {
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index 7ceb968f0e44..60a3c3d7499b 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -127,7 +127,7 @@ static int r9701_probe(struct spi_device *spi)
rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rtc->range_max = RTC_TIMESTAMP_END_2099;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct spi_driver r9701_driver = {
diff --git a/drivers/rtc/rtc-rc5t619.c b/drivers/rtc/rtc-rc5t619.c
index dd1a20977478..e73102a39f1b 100644
--- a/drivers/rtc/rtc-rc5t619.c
+++ b/drivers/rtc/rtc-rc5t619.c
@@ -426,7 +426,7 @@ static int rc5t619_rtc_probe(struct platform_device *pdev)
dev_warn(&pdev->dev, "rc5t619 interrupt is disabled\n");
}
- return rtc_register_device(rtc->rtc);
+ return devm_rtc_register_device(rtc->rtc);
}
static struct platform_driver rc5t619_rtc_driver = {
diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c
index c0334c602e88..e920da8c08da 100644
--- a/drivers/rtc/rtc-rk808.c
+++ b/drivers/rtc/rtc-rk808.c
@@ -447,7 +447,7 @@ static int rk808_rtc_probe(struct platform_device *pdev)
return ret;
}
- return rtc_register_device(rk808_rtc->rtc);
+ return devm_rtc_register_device(rk808_rtc->rtc);
}
static struct platform_driver rk808_rtc_driver = {
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index 8776eadbdd3a..44afa6d996e7 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -251,16 +251,15 @@ static int __init rp5c01_rtc_probe(struct platform_device *dev)
return PTR_ERR(rtc);
rtc->ops = &rp5c01_rtc_ops;
- rtc->nvram_old_abi = true;
priv->rtc = rtc;
nvmem_cfg.priv = priv;
- error = rtc_nvmem_register(rtc, &nvmem_cfg);
+ error = devm_rtc_nvmem_register(rtc, &nvmem_cfg);
if (error)
return error;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct platform_driver rp5c01_rtc_driver = {
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 47c13678449e..fec633f80789 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -197,7 +197,7 @@ static int rs5c348_probe(struct spi_device *spi)
rtc->ops = &rs5c348_rtc_ops;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct spi_driver rs5c348_driver = {
diff --git a/drivers/rtc/rtc-rv3028.c b/drivers/rtc/rtc-rv3028.c
index fa226f0fe67d..979407a51c7a 100644
--- a/drivers/rtc/rtc-rv3028.c
+++ b/drivers/rtc/rtc-rv3028.c
@@ -886,14 +886,14 @@ static int rv3028_probe(struct i2c_client *client)
rv3028->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rv3028->rtc->range_max = RTC_TIMESTAMP_END_2099;
rv3028->rtc->ops = &rv3028_rtc_ops;
- ret = rtc_register_device(rv3028->rtc);
+ ret = devm_rtc_register_device(rv3028->rtc);
if (ret)
return ret;
nvmem_cfg.priv = rv3028->regmap;
- rtc_nvmem_register(rv3028->rtc, &nvmem_cfg);
+ devm_rtc_nvmem_register(rv3028->rtc, &nvmem_cfg);
eeprom_cfg.priv = rv3028;
- rtc_nvmem_register(rv3028->rtc, &eeprom_cfg);
+ devm_rtc_nvmem_register(rv3028->rtc, &eeprom_cfg);
rv3028->rtc->max_user_freq = 1;
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c
index 62718231731b..dc1bda62095e 100644
--- a/drivers/rtc/rtc-rv3029c2.c
+++ b/drivers/rtc/rtc-rv3029c2.c
@@ -750,12 +750,12 @@ static int rv3029_probe(struct device *dev, struct regmap *regmap, int irq,
rv3029->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rv3029->rtc->range_max = RTC_TIMESTAMP_END_2079;
- rc = rtc_register_device(rv3029->rtc);
+ rc = devm_rtc_register_device(rv3029->rtc);
if (rc)
return rc;
nvmem_cfg.priv = rv3029->regmap;
- rtc_nvmem_register(rv3029->rtc, &nvmem_cfg);
+ devm_rtc_nvmem_register(rv3029->rtc, &nvmem_cfg);
return 0;
}
diff --git a/drivers/rtc/rtc-rv3032.c b/drivers/rtc/rtc-rv3032.c
index 3e67f71f4261..c9bcea727757 100644
--- a/drivers/rtc/rtc-rv3032.c
+++ b/drivers/rtc/rtc-rv3032.c
@@ -885,14 +885,14 @@ static int rv3032_probe(struct i2c_client *client)
rv3032->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rv3032->rtc->range_max = RTC_TIMESTAMP_END_2099;
rv3032->rtc->ops = &rv3032_rtc_ops;
- ret = rtc_register_device(rv3032->rtc);
+ ret = devm_rtc_register_device(rv3032->rtc);
if (ret)
return ret;
- nvmem_cfg.priv = rv3032;
- rtc_nvmem_register(rv3032->rtc, &nvmem_cfg);
+ nvmem_cfg.priv = rv3032->regmap;
+ devm_rtc_nvmem_register(rv3032->rtc, &nvmem_cfg);
eeprom_cfg.priv = rv3032;
- rtc_nvmem_register(rv3032->rtc, &eeprom_cfg);
+ devm_rtc_nvmem_register(rv3032->rtc, &eeprom_cfg);
rv3032->rtc->max_user_freq = 1;
diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c
index c6d8e3425688..d4ea6db51b26 100644
--- a/drivers/rtc/rtc-rv8803.c
+++ b/drivers/rtc/rtc-rv8803.c
@@ -585,14 +585,13 @@ static int rv8803_probe(struct i2c_client *client,
}
rv8803->rtc->ops = &rv8803_rtc_ops;
- rv8803->rtc->nvram_old_abi = true;
rv8803->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rv8803->rtc->range_max = RTC_TIMESTAMP_END_2099;
- err = rtc_register_device(rv8803->rtc);
+ err = devm_rtc_register_device(rv8803->rtc);
if (err)
return err;
- rtc_nvmem_register(rv8803->rtc, &nvmem_cfg);
+ devm_rtc_nvmem_register(rv8803->rtc, &nvmem_cfg);
rv8803->rtc->max_user_freq = 1;
diff --git a/drivers/rtc/rtc-rx6110.c b/drivers/rtc/rtc-rx6110.c
index 3a9eb7043f01..a7b671a21022 100644
--- a/drivers/rtc/rtc-rx6110.c
+++ b/drivers/rtc/rtc-rx6110.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/spi/spi.h>
+#include <linux/i2c.h>
/* RX-6110 Register definitions */
#define RX6110_REG_SEC 0x10
@@ -310,6 +311,27 @@ static const struct rtc_class_ops rx6110_rtc_ops = {
.set_time = rx6110_set_time,
};
+static int rx6110_probe(struct rx6110_data *rx6110, struct device *dev)
+{
+ int err;
+
+ rx6110->rtc = devm_rtc_device_register(dev,
+ RX6110_DRIVER_NAME,
+ &rx6110_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rx6110->rtc))
+ return PTR_ERR(rx6110->rtc);
+
+ err = rx6110_init(rx6110);
+ if (err)
+ return err;
+
+ rx6110->rtc->max_user_freq = 1;
+
+ return 0;
+}
+
+#ifdef CONFIG_SPI_MASTER
static struct regmap_config regmap_spi_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -318,13 +340,12 @@ static struct regmap_config regmap_spi_config = {
};
/**
- * rx6110_probe - initialize rtc driver
+ * rx6110_spi_probe - initialize rtc driver
* @spi: pointer to spi device
*/
-static int rx6110_probe(struct spi_device *spi)
+static int rx6110_spi_probe(struct spi_device *spi)
{
struct rx6110_data *rx6110;
- int err;
if ((spi->bits_per_word && spi->bits_per_word != 8) ||
(spi->max_speed_hz > 2000000) ||
@@ -346,27 +367,14 @@ static int rx6110_probe(struct spi_device *spi)
spi_set_drvdata(spi, rx6110);
- rx6110->rtc = devm_rtc_device_register(&spi->dev,
- RX6110_DRIVER_NAME,
- &rx6110_rtc_ops, THIS_MODULE);
-
- if (IS_ERR(rx6110->rtc))
- return PTR_ERR(rx6110->rtc);
-
- err = rx6110_init(rx6110);
- if (err)
- return err;
-
- rx6110->rtc->max_user_freq = 1;
-
- return 0;
+ return rx6110_probe(rx6110, &spi->dev);
}
-static const struct spi_device_id rx6110_id[] = {
+static const struct spi_device_id rx6110_spi_id[] = {
{ "rx6110", 0 },
{ }
};
-MODULE_DEVICE_TABLE(spi, rx6110_id);
+MODULE_DEVICE_TABLE(spi, rx6110_spi_id);
static const struct of_device_id rx6110_spi_of_match[] = {
{ .compatible = "epson,rx6110" },
@@ -374,16 +382,127 @@ static const struct of_device_id rx6110_spi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, rx6110_spi_of_match);
-static struct spi_driver rx6110_driver = {
+static struct spi_driver rx6110_spi_driver = {
.driver = {
.name = RX6110_DRIVER_NAME,
.of_match_table = of_match_ptr(rx6110_spi_of_match),
},
- .probe = rx6110_probe,
- .id_table = rx6110_id,
+ .probe = rx6110_spi_probe,
+ .id_table = rx6110_spi_id,
+};
+
+static int rx6110_spi_register(void)
+{
+ return spi_register_driver(&rx6110_spi_driver);
+}
+
+static void rx6110_spi_unregister(void)
+{
+ spi_unregister_driver(&rx6110_spi_driver);
+}
+#else
+static int rx6110_spi_register(void)
+{
+ return 0;
+}
+
+static void rx6110_spi_unregister(void)
+{
+}
+#endif /* CONFIG_SPI_MASTER */
+
+#ifdef CONFIG_I2C
+static struct regmap_config regmap_i2c_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = RX6110_REG_IRQ,
+ .read_flag_mask = 0x80,
};
-module_spi_driver(rx6110_driver);
+static int rx6110_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct rx6110_data *rx6110;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA
+ | I2C_FUNC_SMBUS_I2C_BLOCK)) {
+ dev_err(&adapter->dev,
+ "doesn't support required functionality\n");
+ return -EIO;
+ }
+
+ rx6110 = devm_kzalloc(&client->dev, sizeof(*rx6110), GFP_KERNEL);
+ if (!rx6110)
+ return -ENOMEM;
+
+ rx6110->regmap = devm_regmap_init_i2c(client, &regmap_i2c_config);
+ if (IS_ERR(rx6110->regmap)) {
+ dev_err(&client->dev, "regmap init failed for rtc rx6110\n");
+ return PTR_ERR(rx6110->regmap);
+ }
+
+ i2c_set_clientdata(client, rx6110);
+
+ return rx6110_probe(rx6110, &client->dev);
+}
+
+static const struct i2c_device_id rx6110_i2c_id[] = {
+ { "rx6110", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, rx6110_i2c_id);
+
+static struct i2c_driver rx6110_i2c_driver = {
+ .driver = {
+ .name = RX6110_DRIVER_NAME,
+ },
+ .probe = rx6110_i2c_probe,
+ .id_table = rx6110_i2c_id,
+};
+
+static int rx6110_i2c_register(void)
+{
+ return i2c_add_driver(&rx6110_i2c_driver);
+}
+
+static void rx6110_i2c_unregister(void)
+{
+ i2c_del_driver(&rx6110_i2c_driver);
+}
+#else
+static int rx6110_i2c_register(void)
+{
+ return 0;
+}
+
+static void rx6110_i2c_unregister(void)
+{
+}
+#endif /* CONFIG_I2C */
+
+static int __init rx6110_module_init(void)
+{
+ int ret;
+
+ ret = rx6110_spi_register();
+ if (ret)
+ return ret;
+
+ ret = rx6110_i2c_register();
+ if (ret)
+ rx6110_spi_unregister();
+
+ return ret;
+}
+module_init(rx6110_module_init);
+
+static void __exit rx6110_module_exit(void)
+{
+ rx6110_spi_unregister();
+ rx6110_i2c_unregister();
+}
+module_exit(rx6110_module_exit);
MODULE_AUTHOR("Val Krutov <val.krutov@erd.epson.com>");
MODULE_DESCRIPTION("RX-6110 SA RTC driver");
diff --git a/drivers/rtc/rtc-rx8010.c b/drivers/rtc/rtc-rx8010.c
index dca41a2a39b2..8340ab47a059 100644
--- a/drivers/rtc/rtc-rx8010.c
+++ b/drivers/rtc/rtc-rx8010.c
@@ -419,7 +419,7 @@ static int rx8010_probe(struct i2c_client *client)
rx8010->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
rx8010->rtc->range_max = RTC_TIMESTAMP_END_2099;
- return rtc_register_device(rx8010->rtc);
+ return devm_rtc_register_device(rx8010->rtc);
}
static struct i2c_driver rx8010_driver = {
diff --git a/drivers/rtc/rtc-rx8581.c b/drivers/rtc/rtc-rx8581.c
index 490f70f57636..de109139529b 100644
--- a/drivers/rtc/rtc-rx8581.c
+++ b/drivers/rtc/rtc-rx8581.c
@@ -298,11 +298,11 @@ static int rx8581_probe(struct i2c_client *client,
rx8581->rtc->start_secs = 0;
rx8581->rtc->set_start_time = true;
- ret = rtc_register_device(rx8581->rtc);
+ ret = devm_rtc_register_device(rx8581->rtc);
for (i = 0; i < config->num_nvram; i++) {
nvmem_cfg[i].priv = rx8581;
- rtc_nvmem_register(rx8581->rtc, &nvmem_cfg[i]);
+ devm_rtc_nvmem_register(rx8581->rtc, &nvmem_cfg[i]);
}
return ret;
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index 03672a246356..ea15d0392bb9 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -497,7 +497,7 @@ static int s35390a_probe(struct i2c_client *client,
if (status1 & S35390A_FLAG_INT2)
rtc_update_irq(s35390a->rtc, 1, RTC_AF);
- return rtc_register_device(s35390a->rtc);
+ return devm_rtc_register_device(s35390a->rtc);
}
static struct i2c_driver s35390a_driver = {
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 24a41909f049..fab326ba9cec 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -42,26 +42,15 @@ struct s3c_rtc {
const struct s3c_rtc_data *data;
int irq_alarm;
- int irq_tick;
-
- spinlock_t pie_lock;
spinlock_t alarm_lock;
- int ticnt_save;
- int ticnt_en_save;
bool wake_en;
};
struct s3c_rtc_data {
- int max_user_freq;
bool needs_src_clk;
void (*irq_handler) (struct s3c_rtc *info, int mask);
- void (*set_freq) (struct s3c_rtc *info, int freq);
- void (*enable_tick) (struct s3c_rtc *info, struct seq_file *seq);
- void (*select_tick_clk) (struct s3c_rtc *info);
- void (*save_tick_cnt) (struct s3c_rtc *info);
- void (*restore_tick_cnt) (struct s3c_rtc *info);
void (*enable) (struct s3c_rtc *info);
void (*disable) (struct s3c_rtc *info);
};
@@ -91,17 +80,7 @@ static void s3c_rtc_disable_clk(struct s3c_rtc *info)
clk_disable(info->rtc_clk);
}
-/* IRQ Handlers */
-static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
-{
- struct s3c_rtc *info = (struct s3c_rtc *)id;
-
- if (info->data->irq_handler)
- info->data->irq_handler(info, S3C2410_INTP_TIC);
-
- return IRQ_HANDLED;
-}
-
+/* IRQ Handler */
static irqreturn_t s3c_rtc_alarmirq(int irq, void *id)
{
struct s3c_rtc *info = (struct s3c_rtc *)id;
@@ -148,28 +127,6 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
return ret;
}
-/* Set RTC frequency */
-static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq)
-{
- int ret;
-
- if (!is_power_of_2(freq))
- return -EINVAL;
-
- ret = s3c_rtc_enable_clk(info);
- if (ret)
- return ret;
- spin_lock_irq(&info->pie_lock);
-
- if (info->data->set_freq)
- info->data->set_freq(info, freq);
-
- spin_unlock_irq(&info->pie_lock);
- s3c_rtc_disable_clk(info);
-
- return 0;
-}
-
/* Time read/write */
static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
@@ -348,29 +305,11 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
return 0;
}
-static int s3c_rtc_proc(struct device *dev, struct seq_file *seq)
-{
- struct s3c_rtc *info = dev_get_drvdata(dev);
- int ret;
-
- ret = s3c_rtc_enable_clk(info);
- if (ret)
- return ret;
-
- if (info->data->enable_tick)
- info->data->enable_tick(info, seq);
-
- s3c_rtc_disable_clk(info);
-
- return 0;
-}
-
static const struct rtc_class_ops s3c_rtcops = {
.read_time = s3c_rtc_gettime,
.set_time = s3c_rtc_settime,
.read_alarm = s3c_rtc_getalarm,
.set_alarm = s3c_rtc_setalarm,
- .proc = s3c_rtc_proc,
.alarm_irq_enable = s3c_rtc_setaie,
};
@@ -450,18 +389,12 @@ static int s3c_rtc_probe(struct platform_device *pdev)
if (!info)
return -ENOMEM;
- /* find the IRQs */
- info->irq_tick = platform_get_irq(pdev, 1);
- if (info->irq_tick < 0)
- return info->irq_tick;
-
info->dev = &pdev->dev;
info->data = of_device_get_match_data(&pdev->dev);
if (!info->data) {
dev_err(&pdev->dev, "failed getting s3c_rtc_data\n");
return -EINVAL;
}
- spin_lock_init(&info->pie_lock);
spin_lock_init(&info->alarm_lock);
platform_set_drvdata(pdev, info);
@@ -470,8 +403,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
if (info->irq_alarm < 0)
return info->irq_alarm;
- dev_dbg(&pdev->dev, "s3c2410_rtc: tick irq %d, alarm irq %d\n",
- info->irq_tick, info->irq_alarm);
+ dev_dbg(&pdev->dev, "s3c2410_rtc: alarm irq %d\n", info->irq_alarm);
/* get the memory region */
info->base = devm_platform_ioremap_resource(pdev, 0);
@@ -503,6 +435,10 @@ static int s3c_rtc_probe(struct platform_device *pdev)
goto err_src_clk;
}
+ /* disable RTC enable bits potentially set by the bootloader */
+ if (info->data->disable)
+ info->data->disable(info);
+
/* check to see if everything is setup correctly */
if (info->data->enable)
info->data->enable(info);
@@ -542,18 +478,6 @@ static int s3c_rtc_probe(struct platform_device *pdev)
goto err_nortc;
}
- ret = devm_request_irq(&pdev->dev, info->irq_tick, s3c_rtc_tickirq,
- 0, "s3c2410-rtc tick", info);
- if (ret) {
- dev_err(&pdev->dev, "IRQ%d error %d\n", info->irq_tick, ret);
- goto err_nortc;
- }
-
- if (info->data->select_tick_clk)
- info->data->select_tick_clk(info);
-
- s3c_rtc_setfreq(info, 1);
-
s3c_rtc_disable_clk(info);
return 0;
@@ -581,10 +505,6 @@ static int s3c_rtc_suspend(struct device *dev)
if (ret)
return ret;
- /* save TICNT for anyone using periodic interrupts */
- if (info->data->save_tick_cnt)
- info->data->save_tick_cnt(info);
-
if (info->data->disable)
info->data->disable(info);
@@ -605,9 +525,6 @@ static int s3c_rtc_resume(struct device *dev)
if (info->data->enable)
info->data->enable(info);
- if (info->data->restore_tick_cnt)
- info->data->restore_tick_cnt(info);
-
s3c_rtc_disable_clk(info);
if (device_may_wakeup(dev) && info->wake_en) {
@@ -631,162 +548,27 @@ static void s3c6410_rtc_irq(struct s3c_rtc *info, int mask)
writeb(mask, info->base + S3C2410_INTP);
}
-static void s3c2410_rtc_setfreq(struct s3c_rtc *info, int freq)
-{
- unsigned int tmp = 0;
- int val;
-
- tmp = readb(info->base + S3C2410_TICNT);
- tmp &= S3C2410_TICNT_ENABLE;
-
- val = (info->rtc->max_user_freq / freq) - 1;
- tmp |= val;
-
- writel(tmp, info->base + S3C2410_TICNT);
-}
-
-static void s3c2416_rtc_setfreq(struct s3c_rtc *info, int freq)
-{
- unsigned int tmp = 0;
- int val;
-
- tmp = readb(info->base + S3C2410_TICNT);
- tmp &= S3C2410_TICNT_ENABLE;
-
- val = (info->rtc->max_user_freq / freq) - 1;
-
- tmp |= S3C2443_TICNT_PART(val);
- writel(S3C2443_TICNT1_PART(val), info->base + S3C2443_TICNT1);
-
- writel(S3C2416_TICNT2_PART(val), info->base + S3C2416_TICNT2);
-
- writel(tmp, info->base + S3C2410_TICNT);
-}
-
-static void s3c2443_rtc_setfreq(struct s3c_rtc *info, int freq)
-{
- unsigned int tmp = 0;
- int val;
-
- tmp = readb(info->base + S3C2410_TICNT);
- tmp &= S3C2410_TICNT_ENABLE;
-
- val = (info->rtc->max_user_freq / freq) - 1;
-
- tmp |= S3C2443_TICNT_PART(val);
- writel(S3C2443_TICNT1_PART(val), info->base + S3C2443_TICNT1);
-
- writel(tmp, info->base + S3C2410_TICNT);
-}
-
-static void s3c6410_rtc_setfreq(struct s3c_rtc *info, int freq)
-{
- int val;
-
- val = (info->rtc->max_user_freq / freq) - 1;
- writel(val, info->base + S3C2410_TICNT);
-}
-
-static void s3c24xx_rtc_enable_tick(struct s3c_rtc *info, struct seq_file *seq)
-{
- unsigned int ticnt;
-
- ticnt = readb(info->base + S3C2410_TICNT);
- ticnt &= S3C2410_TICNT_ENABLE;
-
- seq_printf(seq, "periodic_IRQ\t: %s\n", ticnt ? "yes" : "no");
-}
-
-static void s3c2416_rtc_select_tick_clk(struct s3c_rtc *info)
-{
- unsigned int con;
-
- con = readw(info->base + S3C2410_RTCCON);
- con |= S3C2443_RTCCON_TICSEL;
- writew(con, info->base + S3C2410_RTCCON);
-}
-
-static void s3c6410_rtc_enable_tick(struct s3c_rtc *info, struct seq_file *seq)
-{
- unsigned int ticnt;
-
- ticnt = readw(info->base + S3C2410_RTCCON);
- ticnt &= S3C64XX_RTCCON_TICEN;
-
- seq_printf(seq, "periodic_IRQ\t: %s\n", ticnt ? "yes" : "no");
-}
-
-static void s3c24xx_rtc_save_tick_cnt(struct s3c_rtc *info)
-{
- info->ticnt_save = readb(info->base + S3C2410_TICNT);
-}
-
-static void s3c24xx_rtc_restore_tick_cnt(struct s3c_rtc *info)
-{
- writeb(info->ticnt_save, info->base + S3C2410_TICNT);
-}
-
-static void s3c6410_rtc_save_tick_cnt(struct s3c_rtc *info)
-{
- info->ticnt_en_save = readw(info->base + S3C2410_RTCCON);
- info->ticnt_en_save &= S3C64XX_RTCCON_TICEN;
- info->ticnt_save = readl(info->base + S3C2410_TICNT);
-}
-
-static void s3c6410_rtc_restore_tick_cnt(struct s3c_rtc *info)
-{
- unsigned int con;
-
- writel(info->ticnt_save, info->base + S3C2410_TICNT);
- if (info->ticnt_en_save) {
- con = readw(info->base + S3C2410_RTCCON);
- writew(con | info->ticnt_en_save, info->base + S3C2410_RTCCON);
- }
-}
-
static struct s3c_rtc_data const s3c2410_rtc_data = {
- .max_user_freq = 128,
.irq_handler = s3c24xx_rtc_irq,
- .set_freq = s3c2410_rtc_setfreq,
- .enable_tick = s3c24xx_rtc_enable_tick,
- .save_tick_cnt = s3c24xx_rtc_save_tick_cnt,
- .restore_tick_cnt = s3c24xx_rtc_restore_tick_cnt,
.enable = s3c24xx_rtc_enable,
.disable = s3c24xx_rtc_disable,
};
static struct s3c_rtc_data const s3c2416_rtc_data = {
- .max_user_freq = 32768,
.irq_handler = s3c24xx_rtc_irq,
- .set_freq = s3c2416_rtc_setfreq,
- .enable_tick = s3c24xx_rtc_enable_tick,
- .select_tick_clk = s3c2416_rtc_select_tick_clk,
- .save_tick_cnt = s3c24xx_rtc_save_tick_cnt,
- .restore_tick_cnt = s3c24xx_rtc_restore_tick_cnt,
.enable = s3c24xx_rtc_enable,
.disable = s3c24xx_rtc_disable,
};
static struct s3c_rtc_data const s3c2443_rtc_data = {
- .max_user_freq = 32768,
.irq_handler = s3c24xx_rtc_irq,
- .set_freq = s3c2443_rtc_setfreq,
- .enable_tick = s3c24xx_rtc_enable_tick,
- .select_tick_clk = s3c2416_rtc_select_tick_clk,
- .save_tick_cnt = s3c24xx_rtc_save_tick_cnt,
- .restore_tick_cnt = s3c24xx_rtc_restore_tick_cnt,
.enable = s3c24xx_rtc_enable,
.disable = s3c24xx_rtc_disable,
};
static struct s3c_rtc_data const s3c6410_rtc_data = {
- .max_user_freq = 32768,
.needs_src_clk = true,
.irq_handler = s3c6410_rtc_irq,
- .set_freq = s3c6410_rtc_setfreq,
- .enable_tick = s3c6410_rtc_enable_tick,
- .save_tick_cnt = s3c6410_rtc_save_tick_cnt,
- .restore_tick_cnt = s3c6410_rtc_restore_tick_cnt,
.enable = s3c24xx_rtc_enable,
.disable = s3c6410_rtc_disable,
};
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 9ccc97cf5e09..1250887e4382 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -205,7 +205,7 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
info->rtc->max_user_freq = RTC_FREQ;
info->rtc->range_max = U32_MAX;
- ret = rtc_register_device(info->rtc);
+ ret = devm_rtc_register_device(info->rtc);
if (ret) {
clk_disable_unprepare(info->clk);
return ret;
diff --git a/drivers/rtc/rtc-sc27xx.c b/drivers/rtc/rtc-sc27xx.c
index 36810dd40cd3..187aa955b79c 100644
--- a/drivers/rtc/rtc-sc27xx.c
+++ b/drivers/rtc/rtc-sc27xx.c
@@ -299,33 +299,6 @@ static int sprd_rtc_set_secs(struct sprd_rtc *rtc, enum sprd_rtc_reg_types type,
sts_mask);
}
-static int sprd_rtc_read_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
-{
- struct sprd_rtc *rtc = dev_get_drvdata(dev);
- time64_t secs;
- u32 val;
- int ret;
-
- ret = sprd_rtc_get_secs(rtc, SPRD_RTC_AUX_ALARM, &secs);
- if (ret)
- return ret;
-
- rtc_time64_to_tm(secs, &alrm->time);
-
- ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_EN, &val);
- if (ret)
- return ret;
-
- alrm->enabled = !!(val & SPRD_RTC_AUXALM_EN);
-
- ret = regmap_read(rtc->regmap, rtc->base + SPRD_RTC_INT_RAW_STS, &val);
- if (ret)
- return ret;
-
- alrm->pending = !!(val & SPRD_RTC_AUXALM_EN);
- return 0;
-}
-
static int sprd_rtc_set_aux_alarm(struct device *dev, struct rtc_wkalrm *alrm)
{
struct sprd_rtc *rtc = dev_get_drvdata(dev);
@@ -415,16 +388,9 @@ static int sprd_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
u32 val;
/*
- * Before RTC device is registered, it will check to see if there is an
- * alarm already set in RTC hardware, and we always read the normal
- * alarm at this time.
- *
- * Or if aie_timer is enabled, we should get the normal alarm time.
- * Otherwise we should get auxiliary alarm time.
+ * The RTC core checks to see if there is an alarm already set in RTC
+ * hardware, and we always read the normal alarm at this time.
*/
- if (rtc->rtc && rtc->rtc->registered && rtc->rtc->aie_timer.enabled == 0)
- return sprd_rtc_read_aux_alarm(dev, alrm);
-
ret = sprd_rtc_get_secs(rtc, SPRD_RTC_ALARM, &secs);
if (ret)
return ret;
@@ -563,7 +529,7 @@ static int sprd_rtc_check_power_down(struct sprd_rtc *rtc)
* means the RTC has been powered down, so the RTC time values are
* invalid.
*/
- rtc->valid = val == SPRD_RTC_POWER_RESET_VALUE ? false : true;
+ rtc->valid = val != SPRD_RTC_POWER_RESET_VALUE;
return 0;
}
@@ -652,7 +618,7 @@ static int sprd_rtc_probe(struct platform_device *pdev)
rtc->rtc->ops = &sprd_rtc_ops;
rtc->rtc->range_min = 0;
rtc->rtc->range_max = 5662310399LL;
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret) {
device_init_wakeup(&pdev->dev, 0);
return ret;
diff --git a/drivers/rtc/rtc-sd3078.c b/drivers/rtc/rtc-sd3078.c
index a7aa943c1183..f6bee69ba017 100644
--- a/drivers/rtc/rtc-sd3078.c
+++ b/drivers/rtc/rtc-sd3078.c
@@ -192,7 +192,7 @@ static int sd3078_probe(struct i2c_client *client,
sd3078->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
sd3078->rtc->range_max = RTC_TIMESTAMP_END_2099;
- ret = rtc_register_device(sd3078->rtc);
+ ret = devm_rtc_register_device(sd3078->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 9167b48014a1..cd146b574143 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -607,7 +607,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->range_max = mktime64(2098, 12, 31, 23, 59, 59);
}
- ret = rtc_register_device(rtc->rtc_dev);
+ ret = devm_rtc_register_device(rtc->rtc_dev);
if (ret)
goto err_unmap;
diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c
index abf19435dbad..03a6cca23201 100644
--- a/drivers/rtc/rtc-sirfsoc.c
+++ b/drivers/rtc/rtc-sirfsoc.c
@@ -356,7 +356,7 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev)
return err;
}
- return rtc_register_device(rtcdrv->rtc);
+ return devm_rtc_register_device(rtcdrv->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c
index 0263d996b8a8..bd929b0e7d7d 100644
--- a/drivers/rtc/rtc-snvs.c
+++ b/drivers/rtc/rtc-snvs.c
@@ -151,17 +151,14 @@ static int snvs_rtc_read_time(struct device *dev, struct rtc_time *tm)
unsigned long time;
int ret;
- if (data->clk) {
- ret = clk_enable(data->clk);
- if (ret)
- return ret;
- }
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
time = rtc_read_lp_counter(data);
rtc_time64_to_tm(time, tm);
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return 0;
}
@@ -172,11 +169,9 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
unsigned long time = rtc_tm_to_time64(tm);
int ret;
- if (data->clk) {
- ret = clk_enable(data->clk);
- if (ret)
- return ret;
- }
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
/* Disable RTC first */
ret = snvs_rtc_enable(data, false);
@@ -190,8 +185,7 @@ static int snvs_rtc_set_time(struct device *dev, struct rtc_time *tm)
/* Enable RTC again */
ret = snvs_rtc_enable(data, true);
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return ret;
}
@@ -202,11 +196,9 @@ static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
u32 lptar, lpsr;
int ret;
- if (data->clk) {
- ret = clk_enable(data->clk);
- if (ret)
- return ret;
- }
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
regmap_read(data->regmap, data->offset + SNVS_LPTAR, &lptar);
rtc_time64_to_tm(lptar, &alrm->time);
@@ -214,8 +206,7 @@ static int snvs_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
regmap_read(data->regmap, data->offset + SNVS_LPSR, &lpsr);
alrm->pending = (lpsr & SNVS_LPSR_LPTA) ? 1 : 0;
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return 0;
}
@@ -225,11 +216,9 @@ static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
struct snvs_rtc_data *data = dev_get_drvdata(dev);
int ret;
- if (data->clk) {
- ret = clk_enable(data->clk);
- if (ret)
- return ret;
- }
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
regmap_update_bits(data->regmap, data->offset + SNVS_LPCR,
(SNVS_LPCR_LPTA_EN | SNVS_LPCR_LPWUI_EN),
@@ -237,8 +226,7 @@ static int snvs_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
ret = rtc_write_sync_lp(data);
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return ret;
}
@@ -249,11 +237,9 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned long time = rtc_tm_to_time64(&alrm->time);
int ret;
- if (data->clk) {
- ret = clk_enable(data->clk);
- if (ret)
- return ret;
- }
+ ret = clk_enable(data->clk);
+ if (ret)
+ return ret;
regmap_update_bits(data->regmap, data->offset + SNVS_LPCR, SNVS_LPCR_LPTA_EN, 0);
ret = rtc_write_sync_lp(data);
@@ -264,8 +250,7 @@ static int snvs_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
/* Clear alarm interrupt status bit */
regmap_write(data->regmap, data->offset + SNVS_LPSR, SNVS_LPSR_LPTA);
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return snvs_rtc_alarm_irq_enable(dev, alrm->enabled);
}
@@ -285,8 +270,7 @@ static irqreturn_t snvs_rtc_irq_handler(int irq, void *dev_id)
u32 lpsr;
u32 events = 0;
- if (data->clk)
- clk_enable(data->clk);
+ clk_enable(data->clk);
regmap_read(data->regmap, data->offset + SNVS_LPSR, &lpsr);
@@ -302,8 +286,7 @@ static irqreturn_t snvs_rtc_irq_handler(int irq, void *dev_id)
/* clear interrupt status */
regmap_write(data->regmap, data->offset + SNVS_LPSR, lpsr);
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return events ? IRQ_HANDLED : IRQ_NONE;
}
@@ -316,8 +299,7 @@ static const struct regmap_config snvs_rtc_config = {
static void snvs_rtc_action(void *data)
{
- if (data)
- clk_disable_unprepare(data);
+ clk_disable_unprepare(data);
}
static int snvs_rtc_probe(struct platform_device *pdev)
@@ -405,15 +387,14 @@ static int snvs_rtc_probe(struct platform_device *pdev)
data->rtc->ops = &snvs_rtc_ops;
data->rtc->range_max = U32_MAX;
- return rtc_register_device(data->rtc);
+ return devm_rtc_register_device(data->rtc);
}
static int __maybe_unused snvs_rtc_suspend_noirq(struct device *dev)
{
struct snvs_rtc_data *data = dev_get_drvdata(dev);
- if (data->clk)
- clk_disable(data->clk);
+ clk_disable(data->clk);
return 0;
}
diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c
index 0c65448b85ee..bdb20f63254e 100644
--- a/drivers/rtc/rtc-st-lpc.c
+++ b/drivers/rtc/rtc-st-lpc.c
@@ -250,7 +250,7 @@ static int st_rtc_probe(struct platform_device *pdev)
rtc->rtc_dev->range_max = U64_MAX;
do_div(rtc->rtc_dev->range_max, rtc->clkrate);
- ret = rtc_register_device(rtc->rtc_dev);
+ ret = devm_rtc_register_device(rtc->rtc_dev);
if (ret) {
clk_disable_unprepare(rtc->clk);
return ret;
diff --git a/drivers/rtc/rtc-starfire.c b/drivers/rtc/rtc-starfire.c
index 37a26279e107..fbd1ed41cbf1 100644
--- a/drivers/rtc/rtc-starfire.c
+++ b/drivers/rtc/rtc-starfire.c
@@ -48,7 +48,7 @@ static int __init starfire_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct platform_driver starfire_rtc_driver = {
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index 01a45044f468..7cb6be1b7815 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -311,14 +311,13 @@ static int stk17ta8_rtc_probe(struct platform_device *pdev)
return PTR_ERR(pdata->rtc);
pdata->rtc->ops = &stk17ta8_rtc_ops;
- pdata->rtc->nvram_old_abi = true;
nvmem_cfg.priv = pdata;
- ret = rtc_nvmem_register(pdata->rtc, &nvmem_cfg);
+ ret = devm_rtc_nvmem_register(pdata->rtc, &nvmem_cfg);
if (ret)
return ret;
- return rtc_register_device(pdata->rtc);
+ return devm_rtc_register_device(pdata->rtc);
}
/* work with hotplug and coldplug */
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 0a969af80af7..40c0f7ed36e0 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -366,7 +366,7 @@ static int stmp3xxx_rtc_probe(struct platform_device *pdev)
rtc_data->rtc->ops = &stmp3xxx_rtc_ops;
rtc_data->rtc->range_max = U32_MAX;
- err = rtc_register_device(rtc_data->rtc);
+ err = devm_rtc_register_device(rtc_data->rtc);
if (err)
return err;
diff --git a/drivers/rtc/rtc-sun4v.c b/drivers/rtc/rtc-sun4v.c
index 036463dfa103..a86e27de8c06 100644
--- a/drivers/rtc/rtc-sun4v.c
+++ b/drivers/rtc/rtc-sun4v.c
@@ -86,7 +86,7 @@ static int __init sun4v_rtc_probe(struct platform_device *pdev)
rtc->range_max = U64_MAX;
platform_set_drvdata(pdev, rtc);
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct platform_driver sun4v_rtc_driver = {
diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c
index e2b8b150bcb4..adec1b14a8de 100644
--- a/drivers/rtc/rtc-sun6i.c
+++ b/drivers/rtc/rtc-sun6i.c
@@ -272,7 +272,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
300000000);
if (IS_ERR(rtc->int_osc)) {
pr_crit("Couldn't register the internal oscillator\n");
- return;
+ goto err;
}
parents[0] = clk_hw_get_name(rtc->int_osc);
@@ -290,7 +290,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
rtc->losc = clk_register(NULL, &rtc->hw);
if (IS_ERR(rtc->losc)) {
pr_crit("Couldn't register the LOSC clock\n");
- return;
+ goto err_register;
}
of_property_read_string_index(node, "clock-output-names", 1,
@@ -301,7 +301,7 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
&rtc->lock);
if (IS_ERR(rtc->ext_losc)) {
pr_crit("Couldn't register the LOSC external gate\n");
- return;
+ goto err_register;
}
clk_data->num = 2;
@@ -314,6 +314,8 @@ static void __init sun6i_rtc_clk_init(struct device_node *node,
of_clk_add_hw_provider(node, of_clk_hw_onecell_get, clk_data);
return;
+err_register:
+ clk_hw_unregister_fixed_rate(rtc->int_osc);
err:
kfree(clk_data);
}
@@ -724,7 +726,7 @@ static int sun6i_rtc_probe(struct platform_device *pdev)
chip->rtc->ops = &sun6i_rtc_ops;
chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */
- ret = rtc_register_device(chip->rtc);
+ ret = devm_rtc_register_device(chip->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-sunxi.c b/drivers/rtc/rtc-sunxi.c
index f5d7f44550ce..5d019e3a835a 100644
--- a/drivers/rtc/rtc-sunxi.c
+++ b/drivers/rtc/rtc-sunxi.c
@@ -470,7 +470,7 @@ static int sunxi_rtc_probe(struct platform_device *pdev)
chip->rtc->ops = &sunxi_rtc_ops;
- return rtc_register_device(chip->rtc);
+ return devm_rtc_register_device(chip->rtc);
}
static struct platform_driver sunxi_rtc_driver = {
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 7fbb1741692f..8925015cc698 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -329,7 +329,7 @@ static int tegra_rtc_probe(struct platform_device *pdev)
goto disable_clk;
}
- ret = rtc_register_device(info->rtc);
+ ret = devm_rtc_register_device(info->rtc);
if (ret)
goto disable_clk;
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 74b3a0603b73..7e0d8fb26465 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -50,7 +50,6 @@ static int test_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (expires > U32_MAX)
expires = U32_MAX;
- pr_err("ABE: %s +%d %s\n", __FILE__, __LINE__, __func__);
rtd->alarm.expires = expires;
if (alrm->enabled)
@@ -139,7 +138,7 @@ static int test_probe(struct platform_device *plat_dev)
timer_setup(&rtd->alarm, test_rtc_alarm_handler, 0);
rtd->alarm.expires = 0;
- return rtc_register_device(rtd->rtc);
+ return devm_rtc_register_device(rtd->rtc);
}
static struct platform_driver test_driver = {
diff --git a/drivers/rtc/rtc-tps6586x.c b/drivers/rtc/rtc-tps6586x.c
index e39af2d67051..a980337c3065 100644
--- a/drivers/rtc/rtc-tps6586x.c
+++ b/drivers/rtc/rtc-tps6586x.c
@@ -280,7 +280,7 @@ static int tps6586x_rtc_probe(struct platform_device *pdev)
goto fail_rtc_register;
}
- ret = rtc_register_device(rtc->rtc);
+ ret = devm_rtc_register_device(rtc->rtc);
if (ret)
goto fail_rtc_register;
diff --git a/drivers/rtc/rtc-tps65910.c b/drivers/rtc/rtc-tps65910.c
index e3840386f430..2d87b62826a8 100644
--- a/drivers/rtc/rtc-tps65910.c
+++ b/drivers/rtc/rtc-tps65910.c
@@ -434,7 +434,7 @@ static int tps65910_rtc_probe(struct platform_device *pdev)
tps_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000;
tps_rtc->rtc->range_max = RTC_TIMESTAMP_END_2099;
- return rtc_register_device(tps_rtc->rtc);
+ return devm_rtc_register_device(tps_rtc->rtc);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/rtc/rtc-tx4939.c b/drivers/rtc/rtc-tx4939.c
index 715b82981279..c3309db5448d 100644
--- a/drivers/rtc/rtc-tx4939.c
+++ b/drivers/rtc/rtc-tx4939.c
@@ -266,17 +266,16 @@ static int __init tx4939_rtc_probe(struct platform_device *pdev)
return PTR_ERR(rtc);
rtc->ops = &tx4939_rtc_ops;
- rtc->nvram_old_abi = true;
rtc->range_max = U32_MAX;
pdata->rtc = rtc;
nvmem_cfg.priv = pdata;
- ret = rtc_nvmem_register(rtc, &nvmem_cfg);
+ ret = devm_rtc_nvmem_register(rtc, &nvmem_cfg);
if (ret)
return ret;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static int __exit tx4939_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index c3671043ace7..5a9f9ad86d32 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -335,7 +335,7 @@ static int rtc_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Real Time Clock of NEC VR4100 series\n");
- retval = rtc_register_device(rtc);
+ retval = devm_rtc_register_device(rtc);
if (retval)
goto err_iounmap_all;
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index e2588625025f..197b649cd629 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -232,7 +232,7 @@ static int vt8500_rtc_probe(struct platform_device *pdev)
return ret;
}
- return rtc_register_device(vt8500_rtc->rtc);
+ return devm_rtc_register_device(vt8500_rtc->rtc);
}
static int vt8500_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-wilco-ec.c b/drivers/rtc/rtc-wilco-ec.c
index ff46066a68a4..2a205a646452 100644
--- a/drivers/rtc/rtc-wilco-ec.c
+++ b/drivers/rtc/rtc-wilco-ec.c
@@ -176,7 +176,7 @@ static int wilco_ec_rtc_probe(struct platform_device *pdev)
rtc->range_max = RTC_TIMESTAMP_END_2099;
rtc->owner = THIS_MODULE;
- return rtc_register_device(rtc);
+ return devm_rtc_register_device(rtc);
}
static struct platform_driver wilco_ec_rtc_driver = {
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index ccef887d2690..640833e21057 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -429,7 +429,7 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
wm831x_rtc->rtc->ops = &wm831x_rtc_ops;
wm831x_rtc->rtc->range_max = U32_MAX;
- ret = rtc_register_device(wm831x_rtc->rtc);
+ ret = devm_rtc_register_device(wm831x_rtc->rtc);
if (ret)
return ret;
diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
index 96db441f92b3..cf68a9b1c9eb 100644
--- a/drivers/rtc/rtc-xgene.c
+++ b/drivers/rtc/rtc-xgene.c
@@ -185,7 +185,7 @@ static int xgene_rtc_probe(struct platform_device *pdev)
pdata->rtc->ops = &xgene_rtc_ops;
pdata->rtc->range_max = U32_MAX;
- ret = rtc_register_device(pdata->rtc);
+ ret = devm_rtc_register_device(pdata->rtc);
if (ret) {
clk_disable_unprepare(pdata->clk);
return ret;
diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c
index 4b1077e2f826..f440bb52be92 100644
--- a/drivers/rtc/rtc-zynqmp.c
+++ b/drivers/rtc/rtc-zynqmp.c
@@ -264,7 +264,7 @@ static int xlnx_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
- return rtc_register_device(xrtcdev->rtc);
+ return devm_rtc_register_device(xrtcdev->rtc);
}
static int xlnx_rtc_remove(struct platform_device *pdev)
diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c
index 950fac0d41ff..8a957d31a1a4 100644
--- a/drivers/rtc/sysfs.c
+++ b/drivers/rtc/sysfs.c
@@ -317,8 +317,6 @@ int rtc_add_groups(struct rtc_device *rtc, const struct attribute_group **grps)
size_t old_cnt = 0, add_cnt = 0, new_cnt;
const struct attribute_group **groups, **old;
- if (rtc->registered)
- return -EINVAL;
if (!grps)
return -EINVAL;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 99f86612f775..dc78a523a69f 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -256,7 +256,6 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
return;
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&lcu->lock, flags);
- list_del_init(&device->alias_list);
/* make sure that the workers don't use this device */
if (device == lcu->suc_data.device) {
spin_unlock_irqrestore(&lcu->lock, flags);
@@ -283,6 +282,7 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
spin_lock_irqsave(&aliastree.lock, flags);
spin_lock(&lcu->lock);
+ list_del_init(&device->alias_list);
if (list_empty(&lcu->grouplist) &&
list_empty(&lcu->active_devices) &&
list_empty(&lcu->inactive_devices)) {
@@ -462,11 +462,19 @@ static int read_unit_address_configuration(struct dasd_device *device,
spin_unlock_irqrestore(&lcu->lock, flags);
rc = dasd_sleep_on(cqr);
- if (rc && !suborder_not_supported(cqr)) {
+ if (!rc)
+ goto out;
+
+ if (suborder_not_supported(cqr)) {
+ /* suborder not supported or device unusable for IO */
+ rc = -EOPNOTSUPP;
+ } else {
+ /* IO failed but should be retried */
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
}
+out:
dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
@@ -503,6 +511,14 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
return rc;
spin_lock_irqsave(&lcu->lock, flags);
+ /*
+ * there is another update needed skip the remaining handling
+ * the data might already be outdated
+ * but especially do not add the device to an LCU with pending
+ * update
+ */
+ if (lcu->flags & NEED_UAC_UPDATE)
+ goto out;
lcu->pav = NO_PAV;
for (i = 0; i < MAX_DEVICES_PER_LCU; ++i) {
switch (lcu->uac->unit[i].ua_type) {
@@ -521,6 +537,7 @@ static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
alias_list) {
_add_device_to_lcu(lcu, device, refdev);
}
+out:
spin_unlock_irqrestore(&lcu->lock, flags);
return 0;
}
@@ -625,6 +642,7 @@ int dasd_alias_add_device(struct dasd_device *device)
}
if (lcu->flags & UPDATE_PENDING) {
list_move(&device->alias_list, &lcu->active_devices);
+ private->pavgroup = NULL;
_schedule_lcu_update(lcu, device);
}
spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 36583dc8406c..4b0a7cbb2096 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -1681,7 +1681,7 @@ void ccw_device_wait_idle(struct ccw_device *cdev)
cio_tsch(sch);
if (sch->schib.scsw.cmd.actl == 0)
break;
- udelay_simple(100);
+ udelay(100);
}
}
#endif
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 8b3ed5b45277..68106be4ba7a 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -394,6 +394,7 @@ static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
switch (info->index) {
case VFIO_CCW_IO_IRQ_INDEX:
case VFIO_CCW_CRW_IRQ_INDEX:
+ case VFIO_CCW_REQ_IRQ_INDEX:
info->count = 1;
info->flags = VFIO_IRQ_INFO_EVENTFD;
break;
@@ -424,6 +425,9 @@ static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
case VFIO_CCW_CRW_IRQ_INDEX:
ctx = &private->crw_trigger;
break;
+ case VFIO_CCW_REQ_IRQ_INDEX:
+ ctx = &private->req_trigger;
+ break;
default:
return -EINVAL;
}
@@ -607,6 +611,27 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
}
}
+/* Request removal of the device*/
+static void vfio_ccw_mdev_request(struct mdev_device *mdev, unsigned int count)
+{
+ struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev));
+
+ if (!private)
+ return;
+
+ if (private->req_trigger) {
+ if (!(count % 10))
+ dev_notice_ratelimited(mdev_dev(private->mdev),
+ "Relaying device request to user (#%u)\n",
+ count);
+
+ eventfd_signal(private->req_trigger, 1);
+ } else if (count == 0) {
+ dev_notice(mdev_dev(private->mdev),
+ "No device request channel registered, blocked until released by user\n");
+ }
+}
+
static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
.owner = THIS_MODULE,
.supported_type_groups = mdev_type_groups,
@@ -617,6 +642,7 @@ static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
.read = vfio_ccw_mdev_read,
.write = vfio_ccw_mdev_write,
.ioctl = vfio_ccw_mdev_ioctl,
+ .request = vfio_ccw_mdev_request,
};
int vfio_ccw_mdev_reg(struct subchannel *sch)
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index 8723156b29ea..b2c762eb42b9 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -84,7 +84,10 @@ struct vfio_ccw_crw {
* @irb: irb info received from interrupt
* @scsw: scsw info
* @io_trigger: eventfd ctx for signaling userspace I/O results
+ * @crw_trigger: eventfd ctx for signaling userspace CRW information
+ * @req_trigger: eventfd ctx for signaling userspace to return device
* @io_work: work for deferral process of I/O handling
+ * @crw_work: work for deferral process of CRW handling
*/
struct vfio_ccw_private {
struct subchannel *sch;
@@ -108,6 +111,7 @@ struct vfio_ccw_private {
struct eventfd_ctx *io_trigger;
struct eventfd_ctx *crw_trigger;
+ struct eventfd_ctx *req_trigger;
struct work_struct io_work;
struct work_struct crw_work;
} __aligned(8);
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c
index 226a5612e855..62ceeb7fc125 100644
--- a/drivers/s390/crypto/zcrypt_cex2a.c
+++ b/drivers/s390/crypto/zcrypt_cex2a.c
@@ -175,7 +175,7 @@ static int zcrypt_cex2a_queue_probe(struct ap_device *ap_dev)
atomic_set(&zq->load, 0);
ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
- aq->request_timeout = CEX2A_CLEANUP_TIME,
+ aq->request_timeout = CEX2A_CLEANUP_TIME;
aq->private = zq;
rc = zcrypt_queue_register(zq);
if (rc) {
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index f5195bca1d85..f4a6d3744241 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -631,7 +631,7 @@ static int zcrypt_cex4_queue_probe(struct ap_device *ap_dev)
atomic_set(&zq->load, 0);
ap_queue_init_state(aq);
ap_queue_init_reply(aq, &zq->reply);
- aq->request_timeout = CEX4_CLEANUP_TIME,
+ aq->request_timeout = CEX4_CLEANUP_TIME;
aq->private = zq;
rc = zcrypt_queue_register(zq);
if (rc) {
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6f5ddc3eab8c..28f637042d44 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -1079,7 +1079,8 @@ struct qeth_card *qeth_get_card_by_busid(char *bus_id);
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask);
int qeth_threads_running(struct qeth_card *, unsigned long);
-int qeth_set_offline(struct qeth_card *card, bool resetting);
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ bool resetting);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f4b60294a969..cf18d87da41e 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5507,12 +5507,12 @@ out:
return rc;
}
-static int qeth_set_online(struct qeth_card *card)
+static int qeth_set_online(struct qeth_card *card,
+ const struct qeth_discipline *disc)
{
bool carrier_ok;
int rc;
- mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 2, "setonlin");
@@ -5529,7 +5529,7 @@ static int qeth_set_online(struct qeth_card *card)
/* no need for locking / error handling at this early stage: */
qeth_set_real_num_tx_queues(card, qeth_tx_actual_queues(card));
- rc = card->discipline->set_online(card, carrier_ok);
+ rc = disc->set_online(card, carrier_ok);
if (rc)
goto err_online;
@@ -5537,7 +5537,6 @@ static int qeth_set_online(struct qeth_card *card)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
err_online:
@@ -5552,15 +5551,14 @@ err_hardsetup:
qdio_free(CARD_DDEV(card));
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return rc;
}
-int qeth_set_offline(struct qeth_card *card, bool resetting)
+int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc,
+ bool resetting)
{
int rc, rc2, rc3;
- mutex_lock(&card->discipline_mutex);
mutex_lock(&card->conf_mutex);
QETH_CARD_TEXT(card, 3, "setoffl");
@@ -5581,7 +5579,7 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
cancel_work_sync(&card->rx_mode_work);
- card->discipline->set_offline(card);
+ disc->set_offline(card);
qeth_qdio_clear_card(card, 0);
qeth_drain_output_queues(card);
@@ -5602,16 +5600,19 @@ int qeth_set_offline(struct qeth_card *card, bool resetting)
kobject_uevent(&card->gdev->dev.kobj, KOBJ_CHANGE);
mutex_unlock(&card->conf_mutex);
- mutex_unlock(&card->discipline_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(qeth_set_offline);
static int qeth_do_reset(void *data)
{
+ const struct qeth_discipline *disc;
struct qeth_card *card = data;
int rc;
+ /* Lock-free, other users will block until we are done. */
+ disc = card->discipline;
+
QETH_CARD_TEXT(card, 2, "recover1");
if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD))
return 0;
@@ -5619,8 +5620,8 @@ static int qeth_do_reset(void *data)
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_offline(card, true);
- rc = qeth_set_online(card);
+ qeth_set_offline(card, disc, true);
+ rc = qeth_set_online(card, disc);
if (!rc) {
dev_info(&card->gdev->dev,
"Device successfully recovered!\n");
@@ -6584,6 +6585,7 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
break;
default:
card->info.layer_enforced = true;
+ /* It's so early that we don't need the discipline_mutex yet. */
rc = qeth_core_load_discipline(card, enforced_disc);
if (rc)
goto err_load;
@@ -6616,10 +6618,12 @@ static void qeth_core_remove_device(struct ccwgroup_device *gdev)
QETH_CARD_TEXT(card, 2, "removedv");
+ mutex_lock(&card->discipline_mutex);
if (card->discipline) {
card->discipline->remove(gdev);
qeth_core_free_discipline(card);
}
+ mutex_unlock(&card->discipline_mutex);
qeth_free_qdio_queues(card);
@@ -6634,6 +6638,7 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
int rc = 0;
enum qeth_discipline_id def_discipline;
+ mutex_lock(&card->discipline_mutex);
if (!card->discipline) {
def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
QETH_DISCIPLINE_LAYER2;
@@ -6647,16 +6652,23 @@ static int qeth_core_set_online(struct ccwgroup_device *gdev)
}
}
- rc = qeth_set_online(card);
+ rc = qeth_set_online(card, card->discipline);
+
err:
+ mutex_unlock(&card->discipline_mutex);
return rc;
}
static int qeth_core_set_offline(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
- return qeth_set_offline(card, false);
+ mutex_lock(&card->discipline_mutex);
+ rc = qeth_set_offline(card, card->discipline, false);
+ mutex_unlock(&card->discipline_mutex);
+
+ return rc;
}
static void qeth_core_shutdown(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 4ed0fb0705a5..4254caf1d9b6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -2208,7 +2208,7 @@ static void qeth_l2_remove_device(struct ccwgroup_device *gdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (gdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
+ qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index d138ac432d01..4c2cae7ae9a7 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1813,7 +1813,7 @@ static netdev_features_t qeth_l3_osa_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
- if (qeth_get_ip_version(skb) != 4)
+ if (vlan_get_protocol(skb) != htons(ETH_P_IP))
features &= ~NETIF_F_HW_VLAN_CTAG_TX;
return qeth_features_check(skb, dev, features);
}
@@ -1971,7 +1971,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
if (cgdev->state == CCWGROUP_ONLINE)
- qeth_set_offline(card, false);
+ qeth_set_offline(card, card->discipline, false);
cancel_work_sync(&card->close_dev_work);
if (card->dev->reg_state == NETREG_REGISTERED)
diff --git a/drivers/scsi/cxgbi/cxgb4i/Kconfig b/drivers/scsi/cxgbi/cxgb4i/Kconfig
index b206e266b4e7..8b0deece9758 100644
--- a/drivers/scsi/cxgbi/cxgb4i/Kconfig
+++ b/drivers/scsi/cxgbi/cxgb4i/Kconfig
@@ -4,6 +4,7 @@ config SCSI_CXGB4_ISCSI
depends on PCI && INET && (IPV6 || IPV6=n)
depends on THERMAL || !THERMAL
depends on ETHERNET
+ depends on TLS || TLS=n
select NET_VENDOR_CHELSIO
select CHELSIO_T4
select CHELSIO_LIB
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index e4e0d767b98e..244fc27215dc 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -329,6 +329,7 @@ static int start_context(struct ocxlflash_context *ctx)
struct ocxl_hw_afu *afu = ctx->hw_afu;
struct ocxl_afu_config *acfg = &afu->acfg;
void *link_token = afu->link_token;
+ struct pci_dev *pdev = afu->pdev;
struct device *dev = afu->dev;
bool master = ctx->master;
struct mm_struct *mm;
@@ -360,8 +361,9 @@ static int start_context(struct ocxlflash_context *ctx)
mm = current->mm;
}
- rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0, mm,
- ocxlflash_xsl_fault, ctx);
+ rc = ocxl_link_add_pe(link_token, ctx->pe, pid, 0, 0,
+ pci_dev_id(pdev), mm, ocxlflash_xsl_fault,
+ ctx);
if (unlikely(rc)) {
dev_err(dev, "%s: ocxl_link_add_pe failed rc=%d\n",
__func__, rc);
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index 2b28dd405600..e821dd32dd28 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -14,6 +14,7 @@
#include <linux/debugfs.h>
#include <linux/dmapool.h>
#include <linux/iopoll.h>
+#include <linux/irq.h>
#include <linux/lcm.h>
#include <linux/libata.h>
#include <linux/mfd/syscon.h>
@@ -294,6 +295,7 @@ enum {
struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
+ int (*interrupt_preinit)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
struct hisi_sas_device *device);
int (*slot_index_alloc)(struct hisi_hba *hisi_hba,
@@ -393,6 +395,8 @@ struct hisi_hba {
u32 refclk_frequency_mhz;
u8 sas_addr[SAS_ADDR_SIZE];
+ int *irq_map; /* v2 hw */
+
int n_phy;
spinlock_t lock;
struct semaphore sem;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index b6d4419c32f2..cf0bfac920a8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -2614,6 +2614,13 @@ err_out:
return NULL;
}
+static int hisi_sas_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+ if (hisi_hba->hw->interrupt_preinit)
+ return hisi_hba->hw->interrupt_preinit(hisi_hba);
+ return 0;
+}
+
int hisi_sas_probe(struct platform_device *pdev,
const struct hisi_sas_hw *hw)
{
@@ -2671,6 +2678,10 @@ int hisi_sas_probe(struct platform_device *pdev,
sha->sas_port[i] = &hisi_hba->port[i].sas_port;
}
+ rc = hisi_sas_interrupt_preinit(hisi_hba);
+ if (rc)
+ goto err_out_ha;
+
rc = scsi_add_host(shost, &pdev->dev);
if (rc)
goto err_out_ha;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index b57177b52fac..9adfdefef9ca 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -3302,6 +3302,28 @@ static irq_handler_t fatal_interrupts[HISI_SAS_FATAL_INT_NR] = {
fatal_axi_int_v2_hw
};
+#define CQ0_IRQ_INDEX (96)
+
+static int hisi_sas_v2_interrupt_preinit(struct hisi_hba *hisi_hba)
+{
+ struct platform_device *pdev = hisi_hba->platform_dev;
+ struct Scsi_Host *shost = hisi_hba->shost;
+ struct irq_affinity desc = {
+ .pre_vectors = CQ0_IRQ_INDEX,
+ .post_vectors = 16,
+ };
+ int resv = desc.pre_vectors + desc.post_vectors, minvec = resv + 1, nvec;
+
+ nvec = devm_platform_get_irqs_affinity(pdev, &desc, minvec, 128,
+ &hisi_hba->irq_map);
+ if (nvec < 0)
+ return nvec;
+
+ shost->nr_hw_queues = hisi_hba->cq_nvecs = nvec - resv;
+
+ return 0;
+}
+
/*
* There is a limitation in the hip06 chipset that we need
* to map in all mbigen interrupts, even if they are not used.
@@ -3310,14 +3332,11 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
{
struct platform_device *pdev = hisi_hba->platform_dev;
struct device *dev = &pdev->dev;
- int irq, rc = 0, irq_map[128];
+ int irq, rc = 0;
int i, phy_no, fatal_no, queue_no;
- for (i = 0; i < 128; i++)
- irq_map[i] = platform_get_irq(pdev, i);
-
for (i = 0; i < HISI_SAS_PHY_INT_NR; i++) {
- irq = irq_map[i + 1]; /* Phy up/down is irq1 */
+ irq = hisi_hba->irq_map[i + 1]; /* Phy up/down is irq1 */
rc = devm_request_irq(dev, irq, phy_interrupts[i], 0,
DRV_NAME " phy", hisi_hba);
if (rc) {
@@ -3331,7 +3350,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
- irq = irq_map[phy_no + 72];
+ irq = hisi_hba->irq_map[phy_no + 72];
rc = devm_request_irq(dev, irq, sata_int_v2_hw, 0,
DRV_NAME " sata", phy);
if (rc) {
@@ -3343,7 +3362,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
for (fatal_no = 0; fatal_no < HISI_SAS_FATAL_INT_NR; fatal_no++) {
- irq = irq_map[fatal_no + 81];
+ irq = hisi_hba->irq_map[fatal_no + 81];
rc = devm_request_irq(dev, irq, fatal_interrupts[fatal_no], 0,
DRV_NAME " fatal", hisi_hba);
if (rc) {
@@ -3354,24 +3373,22 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba)
}
}
- for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) {
+ for (queue_no = 0; queue_no < hisi_hba->cq_nvecs; queue_no++) {
struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no];
- cq->irq_no = irq_map[queue_no + 96];
+ cq->irq_no = hisi_hba->irq_map[queue_no + 96];
rc = devm_request_threaded_irq(dev, cq->irq_no,
cq_interrupt_v2_hw,
cq_thread_v2_hw, IRQF_ONESHOT,
DRV_NAME " cq", cq);
if (rc) {
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
- irq, rc);
+ cq->irq_no, rc);
rc = -ENOENT;
goto err_out;
}
+ cq->irq_mask = irq_get_affinity_mask(cq->irq_no);
}
-
- hisi_hba->cq_nvecs = hisi_hba->queue_count;
-
err_out:
return rc;
}
@@ -3529,6 +3546,26 @@ static struct device_attribute *host_attrs_v2_hw[] = {
NULL
};
+static int map_queues_v2_hw(struct Scsi_Host *shost)
+{
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
+ const struct cpumask *mask;
+ unsigned int queue, cpu;
+
+ for (queue = 0; queue < qmap->nr_queues; queue++) {
+ mask = irq_get_affinity_mask(hisi_hba->irq_map[96 + queue]);
+ if (!mask)
+ continue;
+
+ for_each_cpu(cpu, mask)
+ qmap->mq_map[cpu] = qmap->queue_offset + queue;
+ }
+
+ return 0;
+
+}
+
static struct scsi_host_template sht_v2_hw = {
.name = DRV_NAME,
.proc_name = DRV_NAME,
@@ -3553,10 +3590,13 @@ static struct scsi_host_template sht_v2_hw = {
#endif
.shost_attrs = host_attrs_v2_hw,
.host_reset = hisi_sas_host_reset,
+ .map_queues = map_queues_v2_hw,
+ .host_tagset = 1,
};
static const struct hisi_sas_hw hisi_sas_v2_hw = {
.hw_init = hisi_sas_v2_init,
+ .interrupt_preinit = hisi_sas_v2_interrupt_preinit,
.setup_itct = setup_itct_v2_hw,
.slot_index_alloc = slot_index_alloc_quirk_v2_hw,
.alloc_dev = alloc_dev_quirk_v2_hw,
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 6e4bf05c6d77..af192096a82b 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -37,6 +37,7 @@
#include <linux/poll.h>
#include <linux/vmalloc.h>
#include <linux/irq_poll.h>
+#include <linux/blk-mq-pci.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
@@ -113,6 +114,10 @@ unsigned int enable_sdev_max_qd;
module_param(enable_sdev_max_qd, int, 0444);
MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0");
+int host_tagset_enable = 1;
+module_param(host_tagset_enable, int, 0444);
+MODULE_PARM_DESC(host_tagset_enable, "Shared host tagset enable/disable Default: enable(1)");
+
MODULE_LICENSE("GPL");
MODULE_VERSION(MEGASAS_VERSION);
MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com");
@@ -3119,6 +3124,19 @@ megasas_bios_param(struct scsi_device *sdev, struct block_device *bdev,
return 0;
}
+static int megasas_map_queues(struct Scsi_Host *shost)
+{
+ struct megasas_instance *instance;
+
+ instance = (struct megasas_instance *)shost->hostdata;
+
+ if (shost->nr_hw_queues == 1)
+ return 0;
+
+ return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
+ instance->pdev, instance->low_latency_index_start);
+}
+
static void megasas_aen_polling(struct work_struct *work);
/**
@@ -3427,6 +3445,7 @@ static struct scsi_host_template megasas_template = {
.eh_timed_out = megasas_reset_timer,
.shost_attrs = megaraid_host_attrs,
.bios_param = megasas_bios_param,
+ .map_queues = megasas_map_queues,
.change_queue_depth = scsi_change_queue_depth,
.max_segment_size = 0xffffffff,
};
@@ -6808,6 +6827,26 @@ static int megasas_io_attach(struct megasas_instance *instance)
host->max_lun = MEGASAS_MAX_LUN;
host->max_cmd_len = 16;
+ /* Use shared host tagset only for fusion adaptors
+ * if there are managed interrupts (smp affinity enabled case).
+ * Single msix_vectors in kdump, so shared host tag is also disabled.
+ */
+
+ host->host_tagset = 0;
+ host->nr_hw_queues = 1;
+
+ if ((instance->adapter_type != MFI_SERIES) &&
+ (instance->msix_vectors > instance->low_latency_index_start) &&
+ host_tagset_enable &&
+ instance->smp_affinity_enable) {
+ host->host_tagset = 1;
+ host->nr_hw_queues = instance->msix_vectors -
+ instance->low_latency_index_start;
+ }
+
+ dev_info(&instance->pdev->dev,
+ "Max firmware commands: %d shared with nr_hw_queues = %d\n",
+ instance->max_fw_cmds, host->nr_hw_queues);
/*
* Notify the mid-layer about the new controller
*/
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index b0c01cf0428f..fd607287608e 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -359,24 +359,29 @@ megasas_get_msix_index(struct megasas_instance *instance,
{
int sdev_busy;
- /* nr_hw_queue = 1 for MegaRAID */
- struct blk_mq_hw_ctx *hctx =
- scmd->device->request_queue->queue_hw_ctx[0];
-
- sdev_busy = atomic_read(&hctx->nr_active);
+ /* TBD - if sml remove device_busy in future, driver
+ * should track counter in internal structure.
+ */
+ sdev_busy = atomic_read(&scmd->device->device_busy);
if (instance->perf_mode == MR_BALANCED_PERF_MODE &&
- sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH))
+ sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) {
cmd->request_desc->SCSIIO.MSIxIndex =
mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) /
MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start);
- else if (instance->msix_load_balance)
+ } else if (instance->msix_load_balance) {
cmd->request_desc->SCSIIO.MSIxIndex =
(mega_mod64(atomic64_add_return(1, &instance->total_io_count),
instance->msix_vectors));
- else
+ } else if (instance->host->nr_hw_queues > 1) {
+ u32 tag = blk_mq_unique_tag(scmd->request);
+
+ cmd->request_desc->SCSIIO.MSIxIndex = blk_mq_unique_tag_to_hwq(tag) +
+ instance->low_latency_index_start;
+ } else {
cmd->request_desc->SCSIIO.MSIxIndex =
instance->reply_map[raw_smp_processor_id()];
+ }
}
/**
@@ -956,9 +961,6 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance)
if (megasas_alloc_cmdlist_fusion(instance))
goto fail_exit;
- dev_info(&instance->pdev->dev, "Configured max firmware commands: %d\n",
- instance->max_fw_cmds);
-
/* The first 256 bytes (SMID 0) is not used. Don't add to the cmd list */
io_req_base = fusion->io_request_frames + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
io_req_base_phys = fusion->io_request_frames_phys + MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE;
@@ -1102,8 +1104,9 @@ megasas_ioc_init_fusion(struct megasas_instance *instance)
MR_HIGH_IOPS_QUEUE_COUNT) && cur_intr_coalescing)
instance->perf_mode = MR_BALANCED_PERF_MODE;
- dev_info(&instance->pdev->dev, "Performance mode :%s\n",
- MEGASAS_PERF_MODE_2STR(instance->perf_mode));
+ dev_info(&instance->pdev->dev, "Performance mode :%s (latency index = %d)\n",
+ MEGASAS_PERF_MODE_2STR(instance->perf_mode),
+ instance->low_latency_index_start);
instance->fw_sync_cache_support = (scratch_pad_1 &
MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 969baf4cd3f5..6e23dc3209fe 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -5034,7 +5034,7 @@ _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc)
static void
_base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
{
- u16 trigger_flags;
+ int trigger_flags;
/*
* Default setting of master trigger.
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index f75c0b5cd587..ccb5771f1cb7 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -402,7 +402,7 @@ fail_free_bounce:
return error;
}
-static int ps3rom_remove(struct ps3_system_bus_device *_dev)
+static void ps3rom_remove(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
struct Scsi_Host *host = ps3_system_bus_get_drvdata(&dev->sbd);
@@ -412,7 +412,6 @@ static int ps3rom_remove(struct ps3_system_bus_device *_dev)
scsi_host_put(host);
ps3_system_bus_set_drvdata(&dev->sbd, NULL);
kfree(dev->bounce_buf);
- return 0;
}
static struct ps3_system_bus_driver ps3rom = {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4848ae3c7b56..b3f14f05340a 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -249,7 +249,8 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
req = blk_get_request(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
- REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
+ REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+ rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
if (IS_ERR(req))
return ret;
rq = scsi_req(req);
@@ -1206,6 +1207,8 @@ static blk_status_t
scsi_device_state_check(struct scsi_device *sdev, struct request *req)
{
switch (sdev->sdev_state) {
+ case SDEV_CREATED:
+ return BLK_STS_OK;
case SDEV_OFFLINE:
case SDEV_TRANSPORT_OFFLINE:
/*
@@ -1232,18 +1235,18 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req)
return BLK_STS_RESOURCE;
case SDEV_QUIESCE:
/*
- * If the devices is blocked we defer normal commands.
+ * If the device is blocked we only accept power management
+ * commands.
*/
- if (req && !(req->rq_flags & RQF_PREEMPT))
+ if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
return BLK_STS_RESOURCE;
return BLK_STS_OK;
default:
/*
* For any other not fully online state we only allow
- * special commands. In particular any user initiated
- * command is not allowed.
+ * power management commands.
*/
- if (req && !(req->rq_flags & RQF_PREEMPT))
+ if (req && !(req->rq_flags & RQF_PM))
return BLK_STS_IOERR;
return BLK_STS_OK;
}
@@ -2516,15 +2519,13 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
/**
- * scsi_device_quiesce - Block user issued commands.
+ * scsi_device_quiesce - Block all commands except power management.
* @sdev: scsi device to quiesce.
*
* This works by trying to transition to the SDEV_QUIESCE state
* (which must be a legal transition). When the device is in this
- * state, only special requests will be accepted, all others will
- * be deferred. Since special requests may also be requeued requests,
- * a successful return doesn't guarantee the device will be
- * totally quiescent.
+ * state, only power management requests will be accepted, all others will
+ * be deferred.
*
* Must be called with user context, may sleep.
*
@@ -2586,12 +2587,12 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
+ if (sdev->sdev_state == SDEV_QUIESCE)
+ scsi_device_set_state(sdev, SDEV_RUNNING);
if (sdev->quiesced_by) {
sdev->quiesced_by = NULL;
blk_clear_pm_only(sdev->request_queue);
}
- if (sdev->sdev_state == SDEV_QUIESCE)
- scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
}
EXPORT_SYMBOL(scsi_device_resume);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index f3d5b1bbd5aa..c37dd15d16d2 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -117,12 +117,16 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
sshdr = &sshdr_tmp;
for(i = 0; i < DV_RETRIES; i++) {
+ /*
+ * The purpose of the RQF_PM flag below is to bypass the
+ * SDEV_QUIESCE state.
+ */
result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
sshdr, DV_TIMEOUT, /* retries */ 1,
REQ_FAILFAST_DEV |
REQ_FAILFAST_TRANSPORT |
REQ_FAILFAST_DRIVER,
- 0, NULL);
+ RQF_PM, NULL);
if (driver_byte(result) != DRIVER_SENSE ||
sshdr->sense_key != UNIT_ATTENTION)
break;
@@ -1005,23 +1009,26 @@ spi_dv_device(struct scsi_device *sdev)
*/
lock_system_sleep();
+ if (scsi_autopm_get_device(sdev))
+ goto unlock_system_sleep;
+
if (unlikely(spi_dv_in_progress(starget)))
- goto unlock;
+ goto put_autopm;
if (unlikely(scsi_device_get(sdev)))
- goto unlock;
+ goto put_autopm;
spi_dv_in_progress(starget) = 1;
buffer = kzalloc(len, GFP_KERNEL);
if (unlikely(!buffer))
- goto out_put;
+ goto put_sdev;
/* We need to verify that the actual device will quiesce; the
* later target quiesce is just a nice to have */
if (unlikely(scsi_device_quiesce(sdev)))
- goto out_free;
+ goto free_buffer;
scsi_target_quiesce(starget);
@@ -1041,12 +1048,16 @@ spi_dv_device(struct scsi_device *sdev)
spi_initial_dv(starget) = 1;
- out_free:
+free_buffer:
kfree(buffer);
- out_put:
+
+put_sdev:
spi_dv_in_progress(starget) = 0;
scsi_device_put(sdev);
-unlock:
+put_autopm:
+ scsi_autopm_put_device(sdev);
+
+unlock_system_sleep:
unlock_system_sleep();
}
EXPORT_SYMBOL(spi_dv_device);
diff --git a/drivers/scsi/ufs/ufs-mediatek-trace.h b/drivers/scsi/ufs/ufs-mediatek-trace.h
index fd6f84c1b4e2..895e82ea6ece 100644
--- a/drivers/scsi/ufs/ufs-mediatek-trace.h
+++ b/drivers/scsi/ufs/ufs-mediatek-trace.h
@@ -31,6 +31,6 @@ TRACE_EVENT(ufs_mtk_event,
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/scsi/ufs/
#define TRACE_INCLUDE_FILE ufs-mediatek-trace
#include <trace/define_trace.h>
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 3522458db3bb..80618af7c872 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -70,6 +70,13 @@ static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
}
+static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
+}
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -514,6 +521,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
host->caps |= UFS_MTK_CAP_DISABLE_AH8;
+ if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
+ host->caps |= UFS_MTK_CAP_BROKEN_VCC;
+
dev_info(hba->dev, "caps: 0x%x", host->caps);
}
@@ -1003,6 +1013,17 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
{
ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
+
+ if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
+ (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
+ hba->vreg_info.vcc->always_on = true;
+ /*
+ * VCC will be kept always-on thus we don't
+ * need any delay during regulator operations
+ */
+ hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+ UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
+ }
}
static void ufs_mtk_event_notify(struct ufs_hba *hba,
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index 93d35097dfb0..3f0d3bb769e8 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -81,6 +81,7 @@ enum ufs_mtk_host_caps {
UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0,
UFS_MTK_CAP_VA09_PWR_CTRL = 1 << 1,
UFS_MTK_CAP_DISABLE_AH8 = 1 << 2,
+ UFS_MTK_CAP_BROKEN_VCC = 1 << 3,
};
struct ufs_mtk_crypt_cfg {
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index d593edb48767..14dfda735adf 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -330,7 +330,6 @@ enum {
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
};
-#define POWER_DESC_MAX_SIZE 0x62
#define POWER_DESC_MAX_ACTV_ICC_LVLS 16
/* Attribute bActiveICCLevel parameter bit masks definitions */
@@ -513,6 +512,7 @@ struct ufs_query_res {
struct ufs_vreg {
struct regulator *reg;
const char *name;
+ bool always_on;
bool enabled;
int min_uV;
int max_uV;
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index df3a564c3e33..fadd566025b8 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -148,6 +148,8 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
{
struct intel_host *host;
+ hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
if (!host)
return -ENOMEM;
@@ -163,6 +165,41 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
intel_ltr_hide(hba->dev);
}
+static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
+{
+ /*
+ * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
+ * address registers must be restored because the restore kernel can
+ * have used different addresses.
+ */
+ ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+ REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+ ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_L);
+ ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+ REG_UTP_TASK_REQ_LIST_BASE_H);
+
+ if (ufshcd_is_link_hibern8(hba)) {
+ int ret = ufshcd_uic_hibern8_exit(hba);
+
+ if (!ret) {
+ ufshcd_set_link_active(hba);
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
+ /*
+ * Force reset and restore. Any other actions can lead
+ * to an unrecoverable state.
+ */
+ ufshcd_set_link_off(hba);
+ }
+ }
+
+ return 0;
+}
+
static int ufs_intel_ehl_init(struct ufs_hba *hba)
{
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
@@ -174,6 +211,7 @@ static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.init = ufs_intel_common_init,
.exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
};
static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
@@ -181,6 +219,7 @@ static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
.init = ufs_intel_ehl_init,
.exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
+ .resume = ufs_intel_resume,
};
#ifdef CONFIG_PM_SLEEP
@@ -207,6 +246,30 @@ static int ufshcd_pci_resume(struct device *dev)
{
return ufshcd_system_resume(dev_get_drvdata(dev));
}
+
+/**
+ * ufshcd_pci_poweroff - suspend-to-disk poweroff function
+ * @dev: pointer to PCI device handle
+ *
+ * Returns 0 if successful
+ * Returns non-zero otherwise
+ */
+static int ufshcd_pci_poweroff(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ int spm_lvl = hba->spm_lvl;
+ int ret;
+
+ /*
+ * For poweroff we need to set the UFS device to PowerDown mode.
+ * Force spm_lvl to ensure that.
+ */
+ hba->spm_lvl = 5;
+ ret = ufshcd_system_suspend(hba);
+ hba->spm_lvl = spm_lvl;
+ return ret;
+}
+
#endif /* !CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
@@ -302,8 +365,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend,
- ufshcd_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_pci_suspend,
+ .resume = ufshcd_pci_resume,
+ .freeze = ufshcd_pci_suspend,
+ .thaw = ufshcd_pci_resume,
+ .poweroff = ufshcd_pci_poweroff,
+ .restore = ufshcd_pci_resume,
+#endif
SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
ufshcd_pci_runtime_resume,
ufshcd_pci_runtime_idle)
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 9902b7e3aa4a..82ad31781bc9 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -225,6 +225,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_clear_ua_wluns(struct ufs_hba *hba);
static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
@@ -580,6 +581,23 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba)
hba->pwr_info.hs_rate);
}
+static void ufshcd_device_reset(struct ufs_hba *hba)
+{
+ int err;
+
+ err = ufshcd_vops_device_reset(hba);
+
+ if (!err) {
+ ufshcd_set_ufs_dev_active(hba);
+ if (ufshcd_is_wb_allowed(hba)) {
+ hba->wb_enabled = false;
+ hba->wb_buf_flush_enabled = false;
+ }
+ }
+ if (err != -EOPNOTSUPP)
+ ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
+}
+
void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
{
if (!us)
@@ -3665,7 +3683,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
if (ret)
dev_err(hba->dev,
- "dme-reset: error code %d\n", ret);
+ "dme-enable: error code %d\n", ret);
return ret;
}
@@ -3964,7 +3982,7 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ret = ufshcd_host_reset_and_restore(hba);
@@ -6930,7 +6948,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
/* Establish the link again and restore the device */
err = ufshcd_probe_hba(hba, false);
-
+ if (!err)
+ ufshcd_clear_ua_wluns(hba);
out:
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@ -6968,7 +6987,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
do {
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
@@ -8045,7 +8064,7 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
{
int ret = 0;
- if (!vreg || !vreg->enabled)
+ if (!vreg || !vreg->enabled || vreg->always_on)
goto out;
ret = regulator_disable(vreg->reg);
@@ -8414,13 +8433,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
* handling context.
*/
hba->host->eh_noresume = 1;
- if (hba->wlun_dev_clr_ua) {
- ret = ufshcd_send_request_sense(hba, sdp);
- if (ret)
- goto out;
- /* Unit attention condition is cleared now */
- hba->wlun_dev_clr_ua = false;
- }
+ ufshcd_clear_ua_wluns(hba);
cmd[4] = pwr_mode << 4;
@@ -8441,7 +8454,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
if (!ret)
hba->curr_dev_pwr_mode = pwr_mode;
-out:
+
scsi_device_put(sdp);
hba->host->eh_noresume = 0;
return ret;
@@ -8747,7 +8760,7 @@ set_link_active:
* further below.
*/
if (ufshcd_is_ufs_dev_deepsleep(hba)) {
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
WARN_ON(!ufshcd_is_link_off(hba));
}
if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
@@ -8757,7 +8770,7 @@ set_link_active:
set_dev_active:
/* Can also get here needing to exit DeepSleep */
if (ufshcd_is_ufs_dev_deepsleep(hba)) {
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ufshcd_host_reset_and_restore(hba);
}
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
@@ -9353,7 +9366,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
}
/* Reset the attached device */
- ufshcd_vops_device_reset(hba);
+ ufshcd_device_reset(hba);
ufshcd_init_crypto(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index f8c2467dc014..aa9ea3552323 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -1218,16 +1218,12 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
hba->vops->dbg_register_dump(hba);
}
-static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
+static inline int ufshcd_vops_device_reset(struct ufs_hba *hba)
{
- if (hba->vops && hba->vops->device_reset) {
- int err = hba->vops->device_reset(hba);
-
- if (!err)
- ufshcd_set_ufs_dev_active(hba);
- if (err != -EOPNOTSUPP)
- ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
- }
+ if (hba->vops && hba->vops->device_reset)
+ return hba->vops->device_reset(hba);
+
+ return -EOPNOTSUPP;
}
static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig
index 425ab6f7e375..d097d070f579 100644
--- a/drivers/soc/Kconfig
+++ b/drivers/soc/Kconfig
@@ -9,6 +9,7 @@ source "drivers/soc/bcm/Kconfig"
source "drivers/soc/fsl/Kconfig"
source "drivers/soc/imx/Kconfig"
source "drivers/soc/ixp4xx/Kconfig"
+source "drivers/soc/litex/Kconfig"
source "drivers/soc/mediatek/Kconfig"
source "drivers/soc/qcom/Kconfig"
source "drivers/soc/renesas/Kconfig"
diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile
index 36452bed86ef..699b758d28e4 100644
--- a/drivers/soc/Makefile
+++ b/drivers/soc/Makefile
@@ -4,7 +4,7 @@
#
obj-$(CONFIG_ARCH_ACTIONS) += actions/
-obj-$(CONFIG_SOC_ASPEED) += aspeed/
+obj-y += aspeed/
obj-$(CONFIG_ARCH_AT91) += atmel/
obj-y += bcm/
obj-$(CONFIG_ARCH_DOVE) += dove/
@@ -14,6 +14,7 @@ obj-$(CONFIG_ARCH_GEMINI) += gemini/
obj-y += imx/
obj-$(CONFIG_ARCH_IXP4XX) += ixp4xx/
obj-$(CONFIG_SOC_XWAY) += lantiq/
+obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex/
obj-y += mediatek/
obj-y += amlogic/
obj-y += qcom/
diff --git a/drivers/soc/amlogic/Kconfig b/drivers/soc/amlogic/Kconfig
index 321c5e26a268..174a9b011461 100644
--- a/drivers/soc/amlogic/Kconfig
+++ b/drivers/soc/amlogic/Kconfig
@@ -9,7 +9,7 @@ config MESON_CANVAS
Say yes to support the canvas IP for Amlogic SoCs.
config MESON_CLK_MEASURE
- bool "Amlogic Meson SoC Clock Measure driver"
+ tristate "Amlogic Meson SoC Clock Measure driver"
depends on ARCH_MESON || COMPILE_TEST
default ARCH_MESON
select REGMAP_MMIO
@@ -19,7 +19,7 @@ config MESON_CLK_MEASURE
config MESON_GX_SOCINFO
bool "Amlogic Meson GX SoC Information driver"
- depends on ARCH_MESON || COMPILE_TEST
+ depends on (ARM64 && ARCH_MESON) || COMPILE_TEST
default ARCH_MESON
select SOC_BUS
help
@@ -27,7 +27,7 @@ config MESON_GX_SOCINFO
information about the type, package and version.
config MESON_GX_PM_DOMAINS
- bool "Amlogic Meson GX Power Domains driver"
+ tristate "Amlogic Meson GX Power Domains driver"
depends on ARCH_MESON || COMPILE_TEST
depends on PM && OF
default ARCH_MESON
@@ -38,7 +38,7 @@ config MESON_GX_PM_DOMAINS
Generic Power Domains.
config MESON_EE_PM_DOMAINS
- bool "Amlogic Meson Everything-Else Power Domains driver"
+ tristate "Amlogic Meson Everything-Else Power Domains driver"
depends on ARCH_MESON || COMPILE_TEST
depends on PM && OF
default ARCH_MESON
@@ -49,7 +49,7 @@ config MESON_EE_PM_DOMAINS
Generic Power Domains.
config MESON_SECURE_PM_DOMAINS
- bool "Amlogic Meson Secure Power Domains driver"
+ tristate "Amlogic Meson Secure Power Domains driver"
depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM
depends on PM && OF
depends on HAVE_ARM_SMCCC
@@ -63,7 +63,7 @@ config MESON_SECURE_PM_DOMAINS
config MESON_MX_SOCINFO
bool "Amlogic Meson MX SoC Information driver"
- depends on ARCH_MESON || COMPILE_TEST
+ depends on (ARM && ARCH_MESON) || COMPILE_TEST
default ARCH_MESON
select SOC_BUS
help
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
index c655f5f92b12..d0329ad170d1 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -72,8 +72,10 @@ struct meson_canvas *meson_canvas_get(struct device *dev)
* current state, this driver probe cannot return -EPROBE_DEFER
*/
canvas = dev_get_drvdata(&canvas_pdev->dev);
- if (!canvas)
+ if (!canvas) {
+ put_device(&canvas_pdev->dev);
return ERR_PTR(-EINVAL);
+ }
return canvas;
}
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index 0fa47d77577d..e1957476a006 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -10,6 +10,7 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/regmap.h>
+#include <linux/module.h>
static DEFINE_MUTEX(measure_lock);
@@ -681,6 +682,7 @@ static const struct of_device_id meson_msr_match_table[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, meson_msr_match_table);
static struct platform_driver meson_msr_driver = {
.probe = meson_msr_probe,
@@ -689,4 +691,5 @@ static struct platform_driver meson_msr_driver = {
.of_match_table = meson_msr_match_table,
},
};
-builtin_platform_driver(meson_msr_driver);
+module_platform_driver(meson_msr_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/amlogic/meson-ee-pwrc.c b/drivers/soc/amlogic/meson-ee-pwrc.c
index 5164a4dc2352..50bf5d2b828b 100644
--- a/drivers/soc/amlogic/meson-ee-pwrc.c
+++ b/drivers/soc/amlogic/meson-ee-pwrc.c
@@ -14,6 +14,7 @@
#include <linux/reset-controller.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <linux/module.h>
#include <dt-bindings/power/meson8-power.h>
#include <dt-bindings/power/meson-axg-power.h>
#include <dt-bindings/power/meson-g12a-power.h>
@@ -412,8 +413,7 @@ static int meson_ee_pwrc_init_domain(struct platform_device *pdev,
dev_warn(&pdev->dev, "Invalid resets count %d for domain %s\n",
count, dom->desc.name);
- dom->rstc = devm_reset_control_array_get(&pdev->dev, false,
- false);
+ dom->rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(dom->rstc))
return PTR_ERR(dom->rstc);
}
@@ -602,6 +602,7 @@ static const struct of_device_id meson_ee_pwrc_match_table[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, meson_ee_pwrc_match_table);
static struct platform_driver meson_ee_pwrc_driver = {
.probe = meson_ee_pwrc_probe,
@@ -611,4 +612,5 @@ static struct platform_driver meson_ee_pwrc_driver = {
.of_match_table = meson_ee_pwrc_match_table,
},
};
-builtin_platform_driver(meson_ee_pwrc_driver);
+module_platform_driver(meson_ee_pwrc_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
index 21b4bc811c00..b4615b288625 100644
--- a/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
+++ b/drivers/soc/amlogic/meson-gx-pwrc-vpu.c
@@ -14,6 +14,7 @@
#include <linux/of_device.h>
#include <linux/reset.h>
#include <linux/clk.h>
+#include <linux/module.h>
/* AO Offsets */
@@ -303,7 +304,7 @@ static int meson_gx_pwrc_vpu_probe(struct platform_device *pdev)
return PTR_ERR(regmap_hhi);
}
- rstc = devm_reset_control_array_get(&pdev->dev, false, false);
+ rstc = devm_reset_control_array_get_exclusive(&pdev->dev);
if (IS_ERR(rstc)) {
if (PTR_ERR(rstc) != -EPROBE_DEFER)
dev_err(&pdev->dev, "failed to get reset lines\n");
@@ -364,6 +365,7 @@ static const struct of_device_id meson_gx_pwrc_vpu_match_table[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, meson_gx_pwrc_vpu_match_table);
static struct platform_driver meson_gx_pwrc_vpu_driver = {
.probe = meson_gx_pwrc_vpu_probe,
@@ -373,4 +375,5 @@ static struct platform_driver meson_gx_pwrc_vpu_driver = {
.of_match_table = meson_gx_pwrc_vpu_match_table,
},
};
-builtin_platform_driver(meson_gx_pwrc_vpu_driver);
+module_platform_driver(meson_gx_pwrc_vpu_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c
index 5fb29a475879..59bd195fa9c9 100644
--- a/drivers/soc/amlogic/meson-secure-pwrc.c
+++ b/drivers/soc/amlogic/meson-secure-pwrc.c
@@ -13,6 +13,7 @@
#include <dt-bindings/power/meson-a1-power.h>
#include <linux/arm-smccc.h>
#include <linux/firmware/meson/meson_sm.h>
+#include <linux/module.h>
#define PWRC_ON 1
#define PWRC_OFF 0
@@ -193,6 +194,7 @@ static const struct of_device_id meson_secure_pwrc_match_table[] = {
},
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, meson_secure_pwrc_match_table);
static struct platform_driver meson_secure_pwrc_driver = {
.probe = meson_secure_pwrc_probe,
@@ -201,4 +203,5 @@ static struct platform_driver meson_secure_pwrc_driver = {
.of_match_table = meson_secure_pwrc_match_table,
},
};
-builtin_platform_driver(meson_secure_pwrc_driver);
+module_platform_driver(meson_secure_pwrc_driver);
+MODULE_LICENSE("Dual MIT/GPL");
diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
index c95fa30f1a76..243ca196e6ad 100644
--- a/drivers/soc/aspeed/Kconfig
+++ b/drivers/soc/aspeed/Kconfig
@@ -1,32 +1,47 @@
# SPDX-License-Identifier: GPL-2.0-only
-menu "Aspeed SoC drivers"
-config SOC_ASPEED
- def_bool y
- depends on ARCH_ASPEED || COMPILE_TEST
+if ARCH_ASPEED || COMPILE_TEST
+
+menu "ASPEED SoC drivers"
config ASPEED_LPC_CTRL
- depends on SOC_ASPEED && REGMAP && MFD_SYSCON
- tristate "Aspeed ast2400/2500 HOST LPC to BMC bridge control"
+ tristate "ASPEED LPC firmware cycle control"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
help
- Control Aspeed ast2400/2500 HOST LPC to BMC mappings through
- ioctl()s, the driver also provides a read/write interface to a BMC ram
- region where the host LPC read/write region can be buffered.
+ Control LPC firmware cycle mappings through ioctl()s. The driver
+ also provides a read/write interface to a BMC ram region where the
+ host LPC read/write region can be buffered.
config ASPEED_LPC_SNOOP
- tristate "Aspeed ast2500 HOST LPC snoop support"
- depends on SOC_ASPEED && REGMAP && MFD_SYSCON
+ tristate "ASPEED LPC snoop support"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
help
Provides a driver to control the LPC snoop interface which
allows the BMC to listen on and save the data written by
the host to an arbitrary LPC I/O port.
config ASPEED_P2A_CTRL
- depends on SOC_ASPEED && REGMAP && MFD_SYSCON
- tristate "Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC bridge control"
+ tristate "ASPEED P2A (VGA MMIO to BMC) bridge control"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
+ help
+ Control ASPEED P2A VGA MMIO to BMC mappings through ioctl()s. The
+ driver also provides an interface for userspace mappings to a
+ pre-defined region.
+
+config ASPEED_SOCINFO
+ bool "ASPEED SoC Information driver"
+ default ARCH_ASPEED
+ select SOC_BUS
+ default ARCH_ASPEED
help
- Control Aspeed ast2400/2500 HOST P2A VGA MMIO to BMC mappings through
- ioctl()s, the driver also provides an interface for userspace mappings to
- a pre-defined region.
+ Say yes to support decoding of ASPEED BMC information.
endmenu
+
+endif
diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile
index b64be47f2b1f..fcab7192e1a4 100644
--- a/drivers/soc/aspeed/Makefile
+++ b/drivers/soc/aspeed/Makefile
@@ -2,3 +2,4 @@
obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
+obj-$(CONFIG_ASPEED_SOCINFO) += aspeed-socinfo.o
diff --git a/drivers/soc/aspeed/aspeed-lpc-ctrl.c b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
index 01ed21e8bfee..439bcd6b8c4a 100644
--- a/drivers/soc/aspeed/aspeed-lpc-ctrl.c
+++ b/drivers/soc/aspeed/aspeed-lpc-ctrl.c
@@ -4,6 +4,7 @@
*/
#include <linux/clk.h>
+#include <linux/log2.h>
#include <linux/mfd/syscon.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
@@ -21,6 +22,9 @@
#define HICR5_ENL2H BIT(8)
#define HICR5_ENFWH BIT(10)
+#define HICR6 0x4
+#define SW_FWH2AHB BIT(17)
+
#define HICR7 0x8
#define HICR8 0xc
@@ -30,8 +34,9 @@ struct aspeed_lpc_ctrl {
struct clk *clk;
phys_addr_t mem_base;
resource_size_t mem_size;
- u32 pnor_size;
- u32 pnor_base;
+ u32 pnor_size;
+ u32 pnor_base;
+ bool fwh2ahb;
};
static struct aspeed_lpc_ctrl *file_aspeed_lpc_ctrl(struct file *file)
@@ -177,6 +182,16 @@ static long aspeed_lpc_ctrl_ioctl(struct file *file, unsigned int cmd,
return rc;
/*
+ * Switch to FWH2AHB mode, AST2600 only.
+ *
+ * The other bits in this register are interrupt status bits
+ * that are cleared by writing 1. As we don't want to clear
+ * them, set only the bit of interest.
+ */
+ if (lpc_ctrl->fwh2ahb)
+ regmap_write(lpc_ctrl->regmap, HICR6, SW_FWH2AHB);
+
+ /*
* Enable LPC FHW cycles. This is required for the host to
* access the regions specified.
*/
@@ -241,6 +256,18 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
lpc_ctrl->mem_size = resource_size(&resm);
lpc_ctrl->mem_base = resm.start;
+
+ if (!is_power_of_2(lpc_ctrl->mem_size)) {
+ dev_err(dev, "Reserved memory size must be a power of 2, got %u\n",
+ (unsigned int)lpc_ctrl->mem_size);
+ return -EINVAL;
+ }
+
+ if (!IS_ALIGNED(lpc_ctrl->mem_base, lpc_ctrl->mem_size)) {
+ dev_err(dev, "Reserved memory must be naturally aligned for size %u\n",
+ (unsigned int)lpc_ctrl->mem_size);
+ return -EINVAL;
+ }
}
lpc_ctrl->regmap = syscon_node_to_regmap(
@@ -261,6 +288,9 @@ static int aspeed_lpc_ctrl_probe(struct platform_device *pdev)
return rc;
}
+ if (of_device_is_compatible(dev->of_node, "aspeed,ast2600-lpc-ctrl"))
+ lpc_ctrl->fwh2ahb = true;
+
lpc_ctrl->miscdev.minor = MISC_DYNAMIC_MINOR;
lpc_ctrl->miscdev.name = DEVICE_NAME;
lpc_ctrl->miscdev.fops = &aspeed_lpc_ctrl_fops;
@@ -291,6 +321,7 @@ static int aspeed_lpc_ctrl_remove(struct platform_device *pdev)
static const struct of_device_id aspeed_lpc_ctrl_match[] = {
{ .compatible = "aspeed,ast2400-lpc-ctrl" },
{ .compatible = "aspeed,ast2500-lpc-ctrl" },
+ { .compatible = "aspeed,ast2600-lpc-ctrl" },
{ },
};
@@ -308,4 +339,4 @@ module_platform_driver(aspeed_lpc_ctrl_driver);
MODULE_DEVICE_TABLE(of, aspeed_lpc_ctrl_match);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Cyril Bur <cyrilbur@gmail.com>");
-MODULE_DESCRIPTION("Control for aspeed 2400/2500 LPC HOST to BMC mappings");
+MODULE_DESCRIPTION("Control for ASPEED LPC HOST to BMC mappings");
diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c
index f3d8d53ab84d..682ba0eb4eba 100644
--- a/drivers/soc/aspeed/aspeed-lpc-snoop.c
+++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c
@@ -325,6 +325,8 @@ static const struct of_device_id aspeed_lpc_snoop_match[] = {
.data = &ast2400_model_data },
{ .compatible = "aspeed,ast2500-lpc-snoop",
.data = &ast2500_model_data },
+ { .compatible = "aspeed,ast2600-lpc-snoop",
+ .data = &ast2500_model_data },
{ },
};
diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c
new file mode 100644
index 000000000000..773930e0cb10
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-socinfo.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright 2019 IBM Corp. */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+static struct {
+ const char *name;
+ const u32 id;
+} const rev_table[] = {
+ /* AST2400 */
+ { "AST2400", 0x02000303 },
+ { "AST1400", 0x02010103 },
+ { "AST1250", 0x02010303 },
+ /* AST2500 */
+ { "AST2500", 0x04000303 },
+ { "AST2510", 0x04000103 },
+ { "AST2520", 0x04000203 },
+ { "AST2530", 0x04000403 },
+ /* AST2600 */
+ { "AST2600", 0x05000303 },
+ { "AST2620", 0x05010203 },
+};
+
+static const char *siliconid_to_name(u32 siliconid)
+{
+ unsigned int id = siliconid & 0xff00ffff;
+ unsigned int i;
+
+ for (i = 0 ; i < ARRAY_SIZE(rev_table) ; ++i) {
+ if (rev_table[i].id == id)
+ return rev_table[i].name;
+ }
+
+ return "Unknown";
+}
+
+static const char *siliconid_to_rev(u32 siliconid)
+{
+ unsigned int rev = (siliconid >> 16) & 0xff;
+
+ switch (rev) {
+ case 0:
+ return "A0";
+ case 1:
+ return "A1";
+ case 3:
+ return "A2";
+ }
+
+ return "??";
+}
+
+static int __init aspeed_socinfo_init(void)
+{
+ struct soc_device_attribute *attrs;
+ struct soc_device *soc_dev;
+ struct device_node *np;
+ void __iomem *reg;
+ bool has_chipid = false;
+ u32 siliconid;
+ u32 chipid[2];
+ const char *machine = NULL;
+
+ np = of_find_compatible_node(NULL, NULL, "aspeed,silicon-id");
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+
+ reg = of_iomap(np, 0);
+ if (!reg) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+ siliconid = readl(reg);
+ iounmap(reg);
+
+ /* This is optional, the ast2400 does not have it */
+ reg = of_iomap(np, 1);
+ if (reg) {
+ has_chipid = true;
+ chipid[0] = readl(reg);
+ chipid[1] = readl(reg + 4);
+ iounmap(reg);
+ }
+ of_node_put(np);
+
+ attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
+ if (!attrs)
+ return -ENODEV;
+
+ /*
+ * Machine: Romulus BMC
+ * Family: AST2500
+ * Revision: A1
+ * SoC ID: raw silicon revision id
+ * Serial Number: 64-bit chipid
+ */
+
+ np = of_find_node_by_path("/");
+ of_property_read_string(np, "model", &machine);
+ if (machine)
+ attrs->machine = kstrdup(machine, GFP_KERNEL);
+ of_node_put(np);
+
+ attrs->family = siliconid_to_name(siliconid);
+ attrs->revision = siliconid_to_rev(siliconid);
+ attrs->soc_id = kasprintf(GFP_KERNEL, "%08x", siliconid);
+
+ if (has_chipid)
+ attrs->serial_number = kasprintf(GFP_KERNEL, "%08x%08x",
+ chipid[1], chipid[0]);
+
+ soc_dev = soc_device_register(attrs);
+ if (IS_ERR(soc_dev)) {
+ kfree(attrs->soc_id);
+ kfree(attrs->serial_number);
+ kfree(attrs);
+ return PTR_ERR(soc_dev);
+ }
+
+ pr_info("ASPEED %s rev %s (%s)\n",
+ attrs->family,
+ attrs->revision,
+ attrs->soc_id);
+
+ return 0;
+}
+early_initcall(aspeed_socinfo_init);
diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c
index 55a1f57a4d8c..c4472b68b7c2 100644
--- a/drivers/soc/atmel/soc.c
+++ b/drivers/soc/atmel/soc.c
@@ -69,6 +69,12 @@ static const struct at91_soc __initconst socs[] = {
#endif
#ifdef CONFIG_SOC_SAM9X60
AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_EXID_MATCH, "sam9x60", "sam9x60"),
+ AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D5M_EXID_MATCH,
+ "sam9x60 64MiB DDR2 SiP", "sam9x60"),
+ AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D1G_EXID_MATCH,
+ "sam9x60 128MiB DDR2 SiP", "sam9x60"),
+ AT91_SOC(SAM9X60_CIDR_MATCH, SAM9X60_D6K_EXID_MATCH,
+ "sam9x60 8MiB SDRAM SiP", "sam9x60"),
#endif
#ifdef CONFIG_SOC_SAMA5
AT91_SOC(SAMA5D2_CIDR_MATCH, SAMA5D21CU_EXID_MATCH,
diff --git a/drivers/soc/atmel/soc.h b/drivers/soc/atmel/soc.h
index ee652e4841a5..5849846a69d6 100644
--- a/drivers/soc/atmel/soc.h
+++ b/drivers/soc/atmel/soc.h
@@ -60,6 +60,9 @@ at91_soc_init(const struct at91_soc *socs);
#define AT91SAM9CN11_EXID_MATCH 0x00000009
#define SAM9X60_EXID_MATCH 0x00000000
+#define SAM9X60_D5M_EXID_MATCH 0x00000001
+#define SAM9X60_D1G_EXID_MATCH 0x00000010
+#define SAM9X60_D6K_EXID_MATCH 0x00000011
#define AT91SAM9XE128_CIDR_MATCH 0x329973a0
#define AT91SAM9XE256_CIDR_MATCH 0x329a93a0
diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
index b1062334e608..a673fdffe216 100644
--- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c
+++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c
@@ -111,6 +111,8 @@ enum bsp_initiate_command {
static struct brcmstb_pm_control ctrl;
+noinline int brcmstb_pm_s3_finish(void);
+
static int (*brcmstb_pm_do_s2_sram)(void __iomem *aon_ctrl_base,
void __iomem *ddr_phy_pll_status);
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 659b4a570d5b..f13da4d7d1c5 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -424,7 +424,7 @@ int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p)
/**
* qbman_swp_interrupt_set_inhibit() - write interrupt mask register
* @p: the given software portal object
- * @mask: The mask to set in SWP_IIR register
+ * @inhibit: whether to inhibit the IRQs
*/
void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit)
{
@@ -510,7 +510,7 @@ enum qb_enqueue_commands {
#define QB_ENQUEUE_CMD_TARGET_TYPE_SHIFT 4
#define QB_ENQUEUE_CMD_DCA_EN_SHIFT 7
-/**
+/*
* qbman_eq_desc_clear() - Clear the contents of a descriptor to
* default/starting state.
*/
@@ -522,7 +522,7 @@ void qbman_eq_desc_clear(struct qbman_eq_desc *d)
/**
* qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp
* @d: the enqueue descriptor.
- * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ * @respond_success: 1 = enqueue with response always; 0 = enqueue with
* rejections returned on a FQ.
*/
void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
@@ -932,7 +932,7 @@ int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
/**
* qbman_swp_push_get() - Get the push dequeue setup
- * @p: the software portal object
+ * @s: the software portal object
* @channel_idx: the channel index to query
* @enabled: returned boolean to show whether the push dequeue is enabled
* for the given channel
@@ -947,7 +947,7 @@ void qbman_swp_push_get(struct qbman_swp *s, u8 channel_idx, int *enabled)
/**
* qbman_swp_push_set() - Enable or disable push dequeue
- * @p: the software portal object
+ * @s: the software portal object
* @channel_idx: the channel index (0 to 15)
* @enable: enable or disable push dequeue
*/
@@ -1046,6 +1046,7 @@ void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes)
/**
* qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues
+ * @d: the pull dequeue descriptor to be set
* @fqid: the frame queue index of the given FQ
*/
void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
@@ -1057,6 +1058,7 @@ void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid)
/**
* qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues
+ * @d: the pull dequeue descriptor to be set
* @wqid: composed of channel id and wqid within the channel
* @dct: the dequeue command type
*/
@@ -1071,6 +1073,7 @@ void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
/**
* qbman_pull_desc_set_channel() - Set channelid from which the dequeue command
* dequeues
+ * @d: the pull dequeue descriptor to be set
* @chid: the channel id to be dequeued
* @dct: the dequeue command type
*/
@@ -1398,6 +1401,7 @@ int qbman_result_has_new_result(struct qbman_swp *s, const struct dpaa2_dq *dq)
/**
* qbman_release_desc_clear() - Clear the contents of a descriptor to
* default/starting state.
+ * @d: the pull dequeue descriptor to be cleared
*/
void qbman_release_desc_clear(struct qbman_release_desc *d)
{
@@ -1407,6 +1411,8 @@ void qbman_release_desc_clear(struct qbman_release_desc *d)
/**
* qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to
+ * @d: the pull dequeue descriptor to be set
+ * @bpid: the bpid value to be set
*/
void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
{
@@ -1416,6 +1422,8 @@ void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid)
/**
* qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI
* interrupt source should be asserted after the release command is completed.
+ * @d: the pull dequeue descriptor to be set
+ * @enable: enable (1) or disable (0) value
*/
void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable)
{
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 101def7dc73d..a1b9be1d105a 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -2622,7 +2622,7 @@ int qman_shutdown_fq(u32 fqid)
union qm_mc_command *mcc;
union qm_mc_result *mcr;
int orl_empty, drain = 0, ret = 0;
- u32 channel, wq, res;
+ u32 channel, res;
u8 state;
p = get_affine_portal();
@@ -2655,7 +2655,7 @@ int qman_shutdown_fq(u32 fqid)
DPAA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
/* Need to store these since the MCR gets reused */
channel = qm_fqd_get_chan(&mcr->queryfq.fqd);
- wq = qm_fqd_get_wq(&mcr->queryfq.fqd);
+ qm_fqd_get_wq(&mcr->queryfq.fqd);
if (channel < qm_channel_pool1) {
channel_portal = get_portal_for_channel(channel);
@@ -2697,7 +2697,6 @@ int qman_shutdown_fq(u32 fqid)
* to dequeue from the channel the FQ is scheduled on
*/
int found_fqrn = 0;
- u16 dequeue_wq = 0;
/* Flag that we need to drain FQ */
drain = 1;
@@ -2705,11 +2704,8 @@ int qman_shutdown_fq(u32 fqid)
if (channel >= qm_channel_pool1 &&
channel < qm_channel_pool1 + 15) {
/* Pool channel, enable the bit in the portal */
- dequeue_wq = (channel -
- qm_channel_pool1 + 1)<<4 | wq;
} else if (channel < qm_channel_pool1) {
/* Dedicated channel */
- dequeue_wq = wq;
} else {
dev_err(dev, "Can't recover FQ 0x%x, ch: 0x%x",
fqid, channel);
diff --git a/drivers/soc/fsl/qe/qe_common.c b/drivers/soc/fsl/qe/qe_common.c
index 75075591f630..497a7e0fd027 100644
--- a/drivers/soc/fsl/qe/qe_common.c
+++ b/drivers/soc/fsl/qe/qe_common.c
@@ -231,7 +231,7 @@ EXPORT_SYMBOL(cpm_muram_offset);
/**
* cpm_muram_dma - turn a muram virtual address into a DMA address
- * @offset: virtual address from cpm_muram_addr() to convert
+ * @addr: virtual address from cpm_muram_addr() to convert
*/
dma_addr_t cpm_muram_dma(void __iomem *addr)
{
diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
index a093dbe6d2cb..4ace28cab314 100644
--- a/drivers/soc/fsl/rcpm.c
+++ b/drivers/soc/fsl/rcpm.c
@@ -2,7 +2,7 @@
//
// rcpm.c - Freescale QorIQ RCPM driver
//
-// Copyright 2019 NXP
+// Copyright 2019-2020 NXP
//
// Author: Ran Wang <ran.wang_1@nxp.com>
@@ -22,6 +22,28 @@ struct rcpm {
bool little_endian;
};
+#define SCFG_SPARECR8 0x051c
+
+static void copy_ippdexpcr1_setting(u32 val)
+{
+ struct device_node *np;
+ void __iomem *regs;
+ u32 reg_val;
+
+ np = of_find_compatible_node(NULL, NULL, "fsl,ls1021a-scfg");
+ if (!np)
+ return;
+
+ regs = of_iomap(np, 0);
+ if (!regs)
+ return;
+
+ reg_val = ioread32be(regs + SCFG_SPARECR8);
+ iowrite32be(val | reg_val, regs + SCFG_SPARECR8);
+
+ iounmap(regs);
+}
+
/**
* rcpm_pm_prepare - performs device-level tasks associated with power
* management, such as programming related to the wakeup source control.
@@ -90,6 +112,17 @@ static int rcpm_pm_prepare(struct device *dev)
tmp |= ioread32be(address);
iowrite32be(tmp, address);
}
+ /*
+ * Workaround of errata A-008646 on SoC LS1021A:
+ * There is a bug of register ippdexpcr1.
+ * Reading configuration register RCPM_IPPDEXPCR1
+ * always return zero. So save ippdexpcr1's value
+ * to register SCFG_SPARECR8.And the value of
+ * ippdexpcr1 will be read from SCFG_SPARECR8.
+ */
+ if (dev_of_node(dev) && (i == 1))
+ if (of_device_is_compatible(np, "fsl,ls1021a-rcpm"))
+ copy_ippdexpcr1_setting(tmp);
}
return 0;
diff --git a/drivers/soc/litex/Kconfig b/drivers/soc/litex/Kconfig
new file mode 100644
index 000000000000..7c6b009b6f6c
--- /dev/null
+++ b/drivers/soc/litex/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License_Identifier: GPL-2.0
+
+menu "Enable LiteX SoC Builder specific drivers"
+
+config LITEX
+ bool
+
+config LITEX_SOC_CONTROLLER
+ tristate "Enable LiteX SoC Controller driver"
+ depends on OF || COMPILE_TEST
+ select LITEX
+ help
+ This option enables the SoC Controller Driver which verifies
+ LiteX CSR access and provides common litex_get_reg/litex_set_reg
+ accessors.
+ All drivers that use functions from litex.h must depend on
+ LITEX.
+
+endmenu
diff --git a/drivers/soc/litex/Makefile b/drivers/soc/litex/Makefile
new file mode 100644
index 000000000000..98ff7325b1c0
--- /dev/null
+++ b/drivers/soc/litex/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License_Identifier: GPL-2.0
+
+obj-$(CONFIG_LITEX_SOC_CONTROLLER) += litex_soc_ctrl.o
diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c
new file mode 100644
index 000000000000..1217cafdfd4d
--- /dev/null
+++ b/drivers/soc/litex/litex_soc_ctrl.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LiteX SoC Controller Driver
+ *
+ * Copyright (C) 2020 Antmicro <www.antmicro.com>
+ *
+ */
+
+#include <linux/litex.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+
+/*
+ * LiteX SoC Generator, depending on the configuration, can split a single
+ * logical CSR (Control&Status Register) into a series of consecutive physical
+ * registers.
+ *
+ * For example, in the configuration with 8-bit CSR Bus, 32-bit aligned (the
+ * default one for 32-bit CPUs) a 32-bit logical CSR will be generated as four
+ * 32-bit physical registers, each one containing one byte of meaningful data.
+ *
+ * For details see: https://github.com/enjoy-digital/litex/wiki/CSR-Bus
+ *
+ * The purpose of `litex_set_reg`/`litex_get_reg` is to implement the logic
+ * of writing to/reading from the LiteX CSR in a single place that can be
+ * then reused by all LiteX drivers.
+ */
+
+/**
+ * litex_set_reg() - Writes the value to the LiteX CSR (Control&Status Register)
+ * @reg: Address of the CSR
+ * @reg_size: The width of the CSR expressed in the number of bytes
+ * @val: Value to be written to the CSR
+ *
+ * In the currently supported LiteX configuration (8-bit CSR Bus, 32-bit aligned),
+ * a 32-bit LiteX CSR is generated as 4 consecutive 32-bit physical registers,
+ * each one containing one byte of meaningful data.
+ *
+ * This function splits a single possibly multi-byte write into a series of
+ * single-byte writes with a proper offset.
+ */
+void litex_set_reg(void __iomem *reg, unsigned long reg_size,
+ unsigned long val)
+{
+ unsigned long shifted_data, shift, i;
+
+ for (i = 0; i < reg_size; ++i) {
+ shift = ((reg_size - i - 1) * LITEX_SUBREG_SIZE_BIT);
+ shifted_data = val >> shift;
+
+ WRITE_LITEX_SUBREGISTER(shifted_data, reg, i);
+ }
+}
+EXPORT_SYMBOL_GPL(litex_set_reg);
+
+/**
+ * litex_get_reg() - Reads the value of the LiteX CSR (Control&Status Register)
+ * @reg: Address of the CSR
+ * @reg_size: The width of the CSR expressed in the number of bytes
+ *
+ * Return: Value read from the CSR
+ *
+ * In the currently supported LiteX configuration (8-bit CSR Bus, 32-bit aligned),
+ * a 32-bit LiteX CSR is generated as 4 consecutive 32-bit physical registers,
+ * each one containing one byte of meaningful data.
+ *
+ * This function generates a series of single-byte reads with a proper offset
+ * and joins their results into a single multi-byte value.
+ */
+unsigned long litex_get_reg(void __iomem *reg, unsigned long reg_size)
+{
+ unsigned long shifted_data, shift, i;
+ unsigned long result = 0;
+
+ for (i = 0; i < reg_size; ++i) {
+ shifted_data = READ_LITEX_SUBREGISTER(reg, i);
+
+ shift = ((reg_size - i - 1) * LITEX_SUBREG_SIZE_BIT);
+ result |= (shifted_data << shift);
+ }
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(litex_get_reg);
+
+#define SCRATCH_REG_OFF 0x04
+#define SCRATCH_REG_VALUE 0x12345678
+#define SCRATCH_TEST_VALUE 0xdeadbeef
+
+/*
+ * Check LiteX CSR read/write access
+ *
+ * This function reads and writes a scratch register in order to verify if CSR
+ * access works.
+ *
+ * In case any problems are detected, the driver should panic.
+ *
+ * Access to the LiteX CSR is, by design, done in CPU native endianness.
+ * The driver should not dynamically configure access functions when
+ * the endianness mismatch is detected. Such situation indicates problems in
+ * the soft SoC design and should be solved at the LiteX generator level,
+ * not in the software.
+ */
+static int litex_check_csr_access(void __iomem *reg_addr)
+{
+ unsigned long reg;
+
+ reg = litex_read32(reg_addr + SCRATCH_REG_OFF);
+
+ if (reg != SCRATCH_REG_VALUE) {
+ panic("Scratch register read error - the system is probably broken! Expected: 0x%x but got: 0x%lx",
+ SCRATCH_REG_VALUE, reg);
+ return -EINVAL;
+ }
+
+ litex_write32(reg_addr + SCRATCH_REG_OFF, SCRATCH_TEST_VALUE);
+ reg = litex_read32(reg_addr + SCRATCH_REG_OFF);
+
+ if (reg != SCRATCH_TEST_VALUE) {
+ panic("Scratch register write error - the system is probably broken! Expected: 0x%x but got: 0x%lx",
+ SCRATCH_TEST_VALUE, reg);
+ return -EINVAL;
+ }
+
+ /* restore original value of the SCRATCH register */
+ litex_write32(reg_addr + SCRATCH_REG_OFF, SCRATCH_REG_VALUE);
+
+ pr_info("LiteX SoC Controller driver initialized");
+
+ return 0;
+}
+
+struct litex_soc_ctrl_device {
+ void __iomem *base;
+};
+
+static const struct of_device_id litex_soc_ctrl_of_match[] = {
+ {.compatible = "litex,soc-controller"},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match);
+
+static int litex_soc_ctrl_probe(struct platform_device *pdev)
+{
+ struct litex_soc_ctrl_device *soc_ctrl_dev;
+
+ soc_ctrl_dev = devm_kzalloc(&pdev->dev, sizeof(*soc_ctrl_dev), GFP_KERNEL);
+ if (!soc_ctrl_dev)
+ return -ENOMEM;
+
+ soc_ctrl_dev->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(soc_ctrl_dev->base))
+ return PTR_ERR(soc_ctrl_dev->base);
+
+ return litex_check_csr_access(soc_ctrl_dev->base);
+}
+
+static struct platform_driver litex_soc_ctrl_driver = {
+ .driver = {
+ .name = "litex-soc-controller",
+ .of_match_table = of_match_ptr(litex_soc_ctrl_of_match)
+ },
+ .probe = litex_soc_ctrl_probe,
+};
+
+module_platform_driver(litex_soc_ctrl_driver);
+MODULE_DESCRIPTION("LiteX SoC Controller driver");
+MODULE_AUTHOR("Antmicro <www.antmicro.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig
index 59a56cd790ec..fdd8bc08569e 100644
--- a/drivers/soc/mediatek/Kconfig
+++ b/drivers/soc/mediatek/Kconfig
@@ -17,6 +17,15 @@ config MTK_CMDQ
time limitation, such as updating display configuration during the
vblank.
+config MTK_DEVAPC
+ tristate "Mediatek Device APC Support"
+ help
+ Say yes here to enable support for Mediatek Device APC driver.
+ This driver is mainly used to handle the violation which catches
+ unexpected transaction.
+ The violation information is logged for further analysis or
+ countermeasures.
+
config MTK_INFRACFG
bool "MediaTek INFRACFG Support"
select REGMAP
@@ -44,9 +53,22 @@ config MTK_SCPSYS
Say yes here to add support for the MediaTek SCPSYS power domain
driver.
+config MTK_SCPSYS_PM_DOMAINS
+ bool "MediaTek SCPSYS generic power domain"
+ default ARCH_MEDIATEK
+ depends on PM
+ select PM_GENERIC_DOMAINS
+ select REGMAP
+ help
+ Say y here to enable power domain support.
+ In order to meet high performance and low power requirements, the System
+ Control Processor System (SCPSYS) has several power management related
+ tasks in the system.
+
config MTK_MMSYS
bool "MediaTek MMSYS Support"
default ARCH_MEDIATEK
+ depends on HAS_IOMEM
help
Say yes here to add support for the MediaTek Multimedia
Subsystem (MMSYS).
diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile
index 01f9f873634a..b6908db534c2 100644
--- a/drivers/soc/mediatek/Makefile
+++ b/drivers/soc/mediatek/Makefile
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o
+obj-$(CONFIG_MTK_DEVAPC) += mtk-devapc.o
obj-$(CONFIG_MTK_INFRACFG) += mtk-infracfg.o
obj-$(CONFIG_MTK_PMIC_WRAP) += mtk-pmic-wrap.o
obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o
+obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o
obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o
diff --git a/drivers/soc/mediatek/mt8173-pm-domains.h b/drivers/soc/mediatek/mt8173-pm-domains.h
new file mode 100644
index 000000000000..3e8ee5dabb43
--- /dev/null
+++ b/drivers/soc/mediatek/mt8173-pm-domains.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8173_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT8173_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt8173-power.h>
+
+/*
+ * MT8173 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt8173[] = {
+ [MT8173_POWER_DOMAIN_VDEC] = {
+ .sta_mask = PWR_STATUS_VDEC,
+ .ctl_offs = SPM_VDE_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8173_POWER_DOMAIN_VENC] = {
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = SPM_VEN_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8173_POWER_DOMAIN_ISP] = {
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = SPM_ISP_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ },
+ [MT8173_POWER_DOMAIN_MM] = {
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = SPM_DIS_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MM_M0 |
+ MT8173_TOP_AXI_PROT_EN_MM_M1),
+ },
+ },
+ [MT8173_POWER_DOMAIN_VENC_LT] = {
+ .sta_mask = PWR_STATUS_VENC_LT,
+ .ctl_offs = SPM_VEN2_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8173_POWER_DOMAIN_AUDIO] = {
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = SPM_AUDIO_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8173_POWER_DOMAIN_USB] = {
+ .sta_mask = PWR_STATUS_USB,
+ .ctl_offs = SPM_USB_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .caps = MTK_SCPD_ACTIVE_WAKEUP,
+ },
+ [MT8173_POWER_DOMAIN_MFG_ASYNC] = {
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = SPM_MFG_ASYNC_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = 0,
+ },
+ [MT8173_POWER_DOMAIN_MFG_2D] = {
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = SPM_MFG_2D_PWR_CON,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ },
+ [MT8173_POWER_DOMAIN_MFG] = {
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = SPM_MFG_PWR_CON,
+ .sram_pdn_bits = GENMASK(13, 8),
+ .sram_pdn_ack_bits = GENMASK(21, 16),
+ .bp_infracfg = {
+ BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MFG_S |
+ MT8173_TOP_AXI_PROT_EN_MFG_M0 |
+ MT8173_TOP_AXI_PROT_EN_MFG_M1 |
+ MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT),
+ },
+ },
+};
+
+static const struct scpsys_soc_data mt8173_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt8173,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8173),
+ .pwr_sta_offs = SPM_PWR_STATUS,
+ .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND,
+};
+
+#endif /* __SOC_MEDIATEK_MT8173_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h
new file mode 100644
index 000000000000..8d996c5d2682
--- /dev/null
+++ b/drivers/soc/mediatek/mt8183-pm-domains.h
@@ -0,0 +1,221 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8183_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT8183_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt8183-power.h>
+
+/*
+ * MT8183 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = {
+ [MT8183_POWER_DOMAIN_AUDIO] = {
+ .sta_mask = PWR_STATUS_AUDIO,
+ .ctl_offs = 0x0314,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ },
+ [MT8183_POWER_DOMAIN_CONN] = {
+ .sta_mask = PWR_STATUS_CONN,
+ .ctl_offs = 0x032c,
+ .sram_pdn_bits = 0,
+ .sram_pdn_ack_bits = 0,
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_CONN, MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR, MT8183_TOP_AXI_PROT_EN_STA1),
+ },
+ },
+ [MT8183_POWER_DOMAIN_MFG_ASYNC] = {
+ .sta_mask = PWR_STATUS_MFG_ASYNC,
+ .ctl_offs = 0x0334,
+ .sram_pdn_bits = 0,
+ .sram_pdn_ack_bits = 0,
+ },
+ [MT8183_POWER_DOMAIN_MFG] = {
+ .sta_mask = PWR_STATUS_MFG,
+ .ctl_offs = 0x0338,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8183_POWER_DOMAIN_MFG_CORE0] = {
+ .sta_mask = BIT(7),
+ .ctl_offs = 0x034c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8183_POWER_DOMAIN_MFG_CORE1] = {
+ .sta_mask = BIT(20),
+ .ctl_offs = 0x0310,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8183_POWER_DOMAIN_MFG_2D] = {
+ .sta_mask = PWR_STATUS_MFG_2D,
+ .ctl_offs = 0x0348,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_1_MFG, MT8183_TOP_AXI_PROT_EN_1_SET,
+ MT8183_TOP_AXI_PROT_EN_1_CLR, MT8183_TOP_AXI_PROT_EN_STA1_1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MFG, MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR, MT8183_TOP_AXI_PROT_EN_STA1),
+ },
+ },
+ [MT8183_POWER_DOMAIN_DISP] = {
+ .sta_mask = PWR_STATUS_DISP,
+ .ctl_offs = 0x030c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_1_DISP, MT8183_TOP_AXI_PROT_EN_1_SET,
+ MT8183_TOP_AXI_PROT_EN_1_CLR, MT8183_TOP_AXI_PROT_EN_STA1_1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_DISP, MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR, MT8183_TOP_AXI_PROT_EN_STA1),
+ },
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_DISP,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_CAM] = {
+ .sta_mask = BIT(25),
+ .ctl_offs = 0x0344,
+ .sram_pdn_bits = GENMASK(9, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MM_CAM, MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR, MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_CAM, MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR, MT8183_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR_IGN(MT8183_TOP_AXI_PROT_EN_MM_CAM_2ND,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_CAM,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_ISP] = {
+ .sta_mask = PWR_STATUS_ISP,
+ .ctl_offs = 0x0308,
+ .sram_pdn_bits = GENMASK(9, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MM_ISP,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR_IGN(MT8183_TOP_AXI_PROT_EN_MM_ISP_2ND,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_ISP,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_VDEC] = {
+ .sta_mask = BIT(31),
+ .ctl_offs = 0x0300,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_VDEC,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_VENC] = {
+ .sta_mask = PWR_STATUS_VENC,
+ .ctl_offs = 0x0304,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(15, 12),
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_VENC,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_VPU_TOP] = {
+ .sta_mask = BIT(26),
+ .ctl_offs = 0x0324,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_VPU_TOP,
+ MT8183_TOP_AXI_PROT_EN_SET,
+ MT8183_TOP_AXI_PROT_EN_CLR,
+ MT8183_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MM_VPU_TOP_2ND,
+ MT8183_TOP_AXI_PROT_EN_MM_SET,
+ MT8183_TOP_AXI_PROT_EN_MM_CLR,
+ MT8183_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ .bp_smi = {
+ BUS_PROT_WR(MT8183_SMI_COMMON_SMI_CLAMP_VPU_TOP,
+ MT8183_SMI_COMMON_CLAMP_EN_SET,
+ MT8183_SMI_COMMON_CLAMP_EN_CLR,
+ MT8183_SMI_COMMON_CLAMP_EN),
+ },
+ },
+ [MT8183_POWER_DOMAIN_VPU_CORE0] = {
+ .sta_mask = BIT(27),
+ .ctl_offs = 0x33c,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0,
+ MT8183_TOP_AXI_PROT_EN_MCU_SET,
+ MT8183_TOP_AXI_PROT_EN_MCU_CLR,
+ MT8183_TOP_AXI_PROT_EN_MCU_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE0_2ND,
+ MT8183_TOP_AXI_PROT_EN_MCU_SET,
+ MT8183_TOP_AXI_PROT_EN_MCU_CLR,
+ MT8183_TOP_AXI_PROT_EN_MCU_STA1),
+ },
+ .caps = MTK_SCPD_SRAM_ISO,
+ },
+ [MT8183_POWER_DOMAIN_VPU_CORE1] = {
+ .sta_mask = BIT(28),
+ .ctl_offs = 0x0340,
+ .sram_pdn_bits = GENMASK(11, 8),
+ .sram_pdn_ack_bits = GENMASK(13, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1,
+ MT8183_TOP_AXI_PROT_EN_MCU_SET,
+ MT8183_TOP_AXI_PROT_EN_MCU_CLR,
+ MT8183_TOP_AXI_PROT_EN_MCU_STA1),
+ BUS_PROT_WR(MT8183_TOP_AXI_PROT_EN_MCU_VPU_CORE1_2ND,
+ MT8183_TOP_AXI_PROT_EN_MCU_SET,
+ MT8183_TOP_AXI_PROT_EN_MCU_CLR,
+ MT8183_TOP_AXI_PROT_EN_MCU_STA1),
+ },
+ .caps = MTK_SCPD_SRAM_ISO,
+ },
+};
+
+static const struct scpsys_soc_data mt8183_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt8183,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8183),
+ .pwr_sta_offs = 0x0180,
+ .pwr_sta2nd_offs = 0x0184
+};
+
+#endif /* __SOC_MEDIATEK_MT8183_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h
new file mode 100644
index 000000000000..0fdf6dc6231f
--- /dev/null
+++ b/drivers/soc/mediatek/mt8192-pm-domains.h
@@ -0,0 +1,292 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8192_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MT8192_PM_DOMAINS_H
+
+#include "mtk-pm-domains.h"
+#include <dt-bindings/power/mt8192-power.h>
+
+/*
+ * MT8192 power domain support
+ */
+
+static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = {
+ [MT8192_POWER_DOMAIN_AUDIO] = {
+ .sta_mask = BIT(21),
+ .ctl_offs = 0x0354,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_2_AUDIO,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_CONN] = {
+ .sta_mask = PWR_STATUS_CONN,
+ .ctl_offs = 0x0304,
+ .sram_pdn_bits = 0,
+ .sram_pdn_ack_bits = 0,
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_CONN,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_CONN_2ND,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_1_CONN,
+ MT8192_TOP_AXI_PROT_EN_1_SET,
+ MT8192_TOP_AXI_PROT_EN_1_CLR,
+ MT8192_TOP_AXI_PROT_EN_1_STA1),
+ },
+ .caps = MTK_SCPD_KEEP_DEFAULT_OFF,
+ },
+ [MT8192_POWER_DOMAIN_MFG0] = {
+ .sta_mask = BIT(2),
+ .ctl_offs = 0x0308,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG1] = {
+ .sta_mask = BIT(3),
+ .ctl_offs = 0x030c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_1_MFG1,
+ MT8192_TOP_AXI_PROT_EN_1_SET,
+ MT8192_TOP_AXI_PROT_EN_1_CLR,
+ MT8192_TOP_AXI_PROT_EN_1_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_2_MFG1,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MFG1,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_2_MFG1_2ND,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_MFG2] = {
+ .sta_mask = BIT(4),
+ .ctl_offs = 0x0310,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG3] = {
+ .sta_mask = BIT(5),
+ .ctl_offs = 0x0314,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG4] = {
+ .sta_mask = BIT(6),
+ .ctl_offs = 0x0318,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG5] = {
+ .sta_mask = BIT(7),
+ .ctl_offs = 0x031c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_MFG6] = {
+ .sta_mask = BIT(8),
+ .ctl_offs = 0x0320,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_DISP] = {
+ .sta_mask = BIT(20),
+ .ctl_offs = 0x0350,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR_IGN(MT8192_TOP_AXI_PROT_EN_MM_DISP,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR_IGN(MT8192_TOP_AXI_PROT_EN_MM_2_DISP,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_DISP,
+ MT8192_TOP_AXI_PROT_EN_SET,
+ MT8192_TOP_AXI_PROT_EN_CLR,
+ MT8192_TOP_AXI_PROT_EN_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_DISP_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_DISP_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_IPE] = {
+ .sta_mask = BIT(14),
+ .ctl_offs = 0x0338,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_IPE,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_IPE_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_ISP] = {
+ .sta_mask = BIT(12),
+ .ctl_offs = 0x0330,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_ISP,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_ISP_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_ISP2] = {
+ .sta_mask = BIT(13),
+ .ctl_offs = 0x0334,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_ISP2,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_ISP2_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_MDP] = {
+ .sta_mask = BIT(19),
+ .ctl_offs = 0x034c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_MDP,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_2_MDP_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_2_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_2_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_VENC] = {
+ .sta_mask = BIT(17),
+ .ctl_offs = 0x0344,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_VENC,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_VENC_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_VDEC] = {
+ .sta_mask = BIT(15),
+ .ctl_offs = 0x033c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_VDEC,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_VDEC_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_VDEC2] = {
+ .sta_mask = BIT(16),
+ .ctl_offs = 0x0340,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_CAM] = {
+ .sta_mask = BIT(23),
+ .ctl_offs = 0x035c,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ .bp_infracfg = {
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_2_CAM,
+ MT8192_TOP_AXI_PROT_EN_2_SET,
+ MT8192_TOP_AXI_PROT_EN_2_CLR,
+ MT8192_TOP_AXI_PROT_EN_2_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_CAM,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_1_CAM,
+ MT8192_TOP_AXI_PROT_EN_1_SET,
+ MT8192_TOP_AXI_PROT_EN_1_CLR,
+ MT8192_TOP_AXI_PROT_EN_1_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_MM_CAM_2ND,
+ MT8192_TOP_AXI_PROT_EN_MM_SET,
+ MT8192_TOP_AXI_PROT_EN_MM_CLR,
+ MT8192_TOP_AXI_PROT_EN_MM_STA1),
+ BUS_PROT_WR(MT8192_TOP_AXI_PROT_EN_VDNR_CAM,
+ MT8192_TOP_AXI_PROT_EN_VDNR_SET,
+ MT8192_TOP_AXI_PROT_EN_VDNR_CLR,
+ MT8192_TOP_AXI_PROT_EN_VDNR_STA1),
+ },
+ },
+ [MT8192_POWER_DOMAIN_CAM_RAWA] = {
+ .sta_mask = BIT(24),
+ .ctl_offs = 0x0360,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_CAM_RAWB] = {
+ .sta_mask = BIT(25),
+ .ctl_offs = 0x0364,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+ [MT8192_POWER_DOMAIN_CAM_RAWC] = {
+ .sta_mask = BIT(26),
+ .ctl_offs = 0x0368,
+ .sram_pdn_bits = GENMASK(8, 8),
+ .sram_pdn_ack_bits = GENMASK(12, 12),
+ },
+};
+
+static const struct scpsys_soc_data mt8192_scpsys_data = {
+ .domains_data = scpsys_domain_data_mt8192,
+ .num_domains = ARRAY_SIZE(scpsys_domain_data_mt8192),
+ .pwr_sta_offs = 0x016c,
+ .pwr_sta2nd_offs = 0x0170,
+};
+
+#endif /* __SOC_MEDIATEK_MT8192_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
index 505651b0d715..280d3bd9f675 100644
--- a/drivers/soc/mediatek/mtk-cmdq-helper.c
+++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
@@ -70,14 +70,7 @@ int cmdq_dev_get_client_reg(struct device *dev,
}
EXPORT_SYMBOL(cmdq_dev_get_client_reg);
-static void cmdq_client_timeout(struct timer_list *t)
-{
- struct cmdq_client *client = from_timer(client, t, timer);
-
- dev_err(client->client.dev, "cmdq timeout!\n");
-}
-
-struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
+struct cmdq_client *cmdq_mbox_create(struct device *dev, int index)
{
struct cmdq_client *client;
@@ -85,12 +78,6 @@ struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
if (!client)
return (struct cmdq_client *)-ENOMEM;
- client->timeout_ms = timeout;
- if (timeout != CMDQ_NO_TIMEOUT) {
- spin_lock_init(&client->lock);
- timer_setup(&client->timer, cmdq_client_timeout, 0);
- }
- client->pkt_cnt = 0;
client->client.dev = dev;
client->client.tx_block = false;
client->client.knows_txdone = true;
@@ -112,11 +99,6 @@ EXPORT_SYMBOL(cmdq_mbox_create);
void cmdq_mbox_destroy(struct cmdq_client *client)
{
- if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
- spin_lock(&client->lock);
- del_timer_sync(&client->timer);
- spin_unlock(&client->lock);
- }
mbox_free_channel(client->chan);
kfree(client);
}
@@ -449,18 +431,6 @@ static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
struct cmdq_task_cb *cb = &pkt->cb;
struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
- if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
- unsigned long flags = 0;
-
- spin_lock_irqsave(&client->lock, flags);
- if (--client->pkt_cnt == 0)
- del_timer(&client->timer);
- else
- mod_timer(&client->timer, jiffies +
- msecs_to_jiffies(client->timeout_ms));
- spin_unlock_irqrestore(&client->lock, flags);
- }
-
dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
pkt->cmd_buf_size, DMA_TO_DEVICE);
if (cb->cb) {
@@ -473,7 +443,6 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
void *data)
{
int err;
- unsigned long flags = 0;
struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
pkt->cb.cb = cb;
@@ -484,14 +453,6 @@ int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
pkt->cmd_buf_size, DMA_TO_DEVICE);
- if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
- spin_lock_irqsave(&client->lock, flags);
- if (client->pkt_cnt++ == 0)
- mod_timer(&client->timer, jiffies +
- msecs_to_jiffies(client->timeout_ms));
- spin_unlock_irqrestore(&client->lock, flags);
- }
-
err = mbox_send_message(client->chan, pkt);
if (err < 0)
return err;
diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c
new file mode 100644
index 000000000000..f1cea041dc5a
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-devapc.c
@@ -0,0 +1,308 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 MediaTek Inc.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#define VIO_MOD_TO_REG_IND(m) ((m) / 32)
+#define VIO_MOD_TO_REG_OFF(m) ((m) % 32)
+
+struct mtk_devapc_vio_dbgs {
+ union {
+ u32 vio_dbg0;
+ struct {
+ u32 mstid:16;
+ u32 dmnid:6;
+ u32 vio_w:1;
+ u32 vio_r:1;
+ u32 addr_h:4;
+ u32 resv:4;
+ } dbg0_bits;
+ };
+
+ u32 vio_dbg1;
+};
+
+struct mtk_devapc_data {
+ /* numbers of violation index */
+ u32 vio_idx_num;
+
+ /* reg offset */
+ u32 vio_mask_offset;
+ u32 vio_sta_offset;
+ u32 vio_dbg0_offset;
+ u32 vio_dbg1_offset;
+ u32 apc_con_offset;
+ u32 vio_shift_sta_offset;
+ u32 vio_shift_sel_offset;
+ u32 vio_shift_con_offset;
+};
+
+struct mtk_devapc_context {
+ struct device *dev;
+ void __iomem *infra_base;
+ struct clk *infra_clk;
+ const struct mtk_devapc_data *data;
+};
+
+static void clear_vio_status(struct mtk_devapc_context *ctx)
+{
+ void __iomem *reg;
+ int i;
+
+ reg = ctx->infra_base + ctx->data->vio_sta_offset;
+
+ for (i = 0; i < VIO_MOD_TO_REG_IND(ctx->data->vio_idx_num) - 1; i++)
+ writel(GENMASK(31, 0), reg + 4 * i);
+
+ writel(GENMASK(VIO_MOD_TO_REG_OFF(ctx->data->vio_idx_num) - 1, 0),
+ reg + 4 * i);
+}
+
+static void mask_module_irq(struct mtk_devapc_context *ctx, bool mask)
+{
+ void __iomem *reg;
+ u32 val;
+ int i;
+
+ reg = ctx->infra_base + ctx->data->vio_mask_offset;
+
+ if (mask)
+ val = GENMASK(31, 0);
+ else
+ val = 0;
+
+ for (i = 0; i < VIO_MOD_TO_REG_IND(ctx->data->vio_idx_num) - 1; i++)
+ writel(val, reg + 4 * i);
+
+ val = readl(reg + 4 * i);
+ if (mask)
+ val |= GENMASK(VIO_MOD_TO_REG_OFF(ctx->data->vio_idx_num) - 1,
+ 0);
+ else
+ val &= ~GENMASK(VIO_MOD_TO_REG_OFF(ctx->data->vio_idx_num) - 1,
+ 0);
+
+ writel(val, reg + 4 * i);
+}
+
+#define PHY_DEVAPC_TIMEOUT 0x10000
+
+/*
+ * devapc_sync_vio_dbg - do "shift" mechansim" to get full violation information.
+ * shift mechanism is depends on devapc hardware design.
+ * Mediatek devapc set multiple slaves as a group.
+ * When violation is triggered, violation info is kept
+ * inside devapc hardware.
+ * Driver should do shift mechansim to sync full violation
+ * info to VIO_DBGs registers.
+ *
+ */
+static int devapc_sync_vio_dbg(struct mtk_devapc_context *ctx)
+{
+ void __iomem *pd_vio_shift_sta_reg;
+ void __iomem *pd_vio_shift_sel_reg;
+ void __iomem *pd_vio_shift_con_reg;
+ int min_shift_group;
+ int ret;
+ u32 val;
+
+ pd_vio_shift_sta_reg = ctx->infra_base +
+ ctx->data->vio_shift_sta_offset;
+ pd_vio_shift_sel_reg = ctx->infra_base +
+ ctx->data->vio_shift_sel_offset;
+ pd_vio_shift_con_reg = ctx->infra_base +
+ ctx->data->vio_shift_con_offset;
+
+ /* Find the minimum shift group which has violation */
+ val = readl(pd_vio_shift_sta_reg);
+ if (!val)
+ return false;
+
+ min_shift_group = __ffs(val);
+
+ /* Assign the group to sync */
+ writel(0x1 << min_shift_group, pd_vio_shift_sel_reg);
+
+ /* Start syncing */
+ writel(0x1, pd_vio_shift_con_reg);
+
+ ret = readl_poll_timeout(pd_vio_shift_con_reg, val, val == 0x3, 0,
+ PHY_DEVAPC_TIMEOUT);
+ if (ret) {
+ dev_err(ctx->dev, "%s: Shift violation info failed\n", __func__);
+ return false;
+ }
+
+ /* Stop syncing */
+ writel(0x0, pd_vio_shift_con_reg);
+
+ /* Write clear */
+ writel(0x1 << min_shift_group, pd_vio_shift_sta_reg);
+
+ return true;
+}
+
+/*
+ * devapc_extract_vio_dbg - extract full violation information after doing
+ * shift mechanism.
+ */
+static void devapc_extract_vio_dbg(struct mtk_devapc_context *ctx)
+{
+ struct mtk_devapc_vio_dbgs vio_dbgs;
+ void __iomem *vio_dbg0_reg;
+ void __iomem *vio_dbg1_reg;
+
+ vio_dbg0_reg = ctx->infra_base + ctx->data->vio_dbg0_offset;
+ vio_dbg1_reg = ctx->infra_base + ctx->data->vio_dbg1_offset;
+
+ vio_dbgs.vio_dbg0 = readl(vio_dbg0_reg);
+ vio_dbgs.vio_dbg1 = readl(vio_dbg1_reg);
+
+ /* Print violation information */
+ if (vio_dbgs.dbg0_bits.vio_w)
+ dev_info(ctx->dev, "Write Violation\n");
+ else if (vio_dbgs.dbg0_bits.vio_r)
+ dev_info(ctx->dev, "Read Violation\n");
+
+ dev_info(ctx->dev, "Bus ID:0x%x, Dom ID:0x%x, Vio Addr:0x%x\n",
+ vio_dbgs.dbg0_bits.mstid, vio_dbgs.dbg0_bits.dmnid,
+ vio_dbgs.vio_dbg1);
+}
+
+/*
+ * devapc_violation_irq - the devapc Interrupt Service Routine (ISR) will dump
+ * violation information including which master violates
+ * access slave.
+ */
+static irqreturn_t devapc_violation_irq(int irq_number, void *data)
+{
+ struct mtk_devapc_context *ctx = data;
+
+ while (devapc_sync_vio_dbg(ctx))
+ devapc_extract_vio_dbg(ctx);
+
+ clear_vio_status(ctx);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * start_devapc - unmask slave's irq to start receiving devapc violation.
+ */
+static void start_devapc(struct mtk_devapc_context *ctx)
+{
+ writel(BIT(31), ctx->infra_base + ctx->data->apc_con_offset);
+
+ mask_module_irq(ctx, false);
+}
+
+/*
+ * stop_devapc - mask slave's irq to stop service.
+ */
+static void stop_devapc(struct mtk_devapc_context *ctx)
+{
+ mask_module_irq(ctx, true);
+
+ writel(BIT(2), ctx->infra_base + ctx->data->apc_con_offset);
+}
+
+static const struct mtk_devapc_data devapc_mt6779 = {
+ .vio_idx_num = 511,
+ .vio_mask_offset = 0x0,
+ .vio_sta_offset = 0x400,
+ .vio_dbg0_offset = 0x900,
+ .vio_dbg1_offset = 0x904,
+ .apc_con_offset = 0xF00,
+ .vio_shift_sta_offset = 0xF10,
+ .vio_shift_sel_offset = 0xF14,
+ .vio_shift_con_offset = 0xF20,
+};
+
+static const struct of_device_id mtk_devapc_dt_match[] = {
+ {
+ .compatible = "mediatek,mt6779-devapc",
+ .data = &devapc_mt6779,
+ }, {
+ },
+};
+
+static int mtk_devapc_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct mtk_devapc_context *ctx;
+ u32 devapc_irq;
+ int ret;
+
+ if (IS_ERR(node))
+ return -ENODEV;
+
+ ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->data = of_device_get_match_data(&pdev->dev);
+ ctx->dev = &pdev->dev;
+
+ ctx->infra_base = of_iomap(node, 0);
+ if (!ctx->infra_base)
+ return -EINVAL;
+
+ devapc_irq = irq_of_parse_and_map(node, 0);
+ if (!devapc_irq)
+ return -EINVAL;
+
+ ctx->infra_clk = devm_clk_get(&pdev->dev, "devapc-infra-clock");
+ if (IS_ERR(ctx->infra_clk))
+ return -EINVAL;
+
+ if (clk_prepare_enable(ctx->infra_clk))
+ return -EINVAL;
+
+ ret = devm_request_irq(&pdev->dev, devapc_irq, devapc_violation_irq,
+ IRQF_TRIGGER_NONE, "devapc", ctx);
+ if (ret) {
+ clk_disable_unprepare(ctx->infra_clk);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, ctx);
+
+ start_devapc(ctx);
+
+ return 0;
+}
+
+static int mtk_devapc_remove(struct platform_device *pdev)
+{
+ struct mtk_devapc_context *ctx = platform_get_drvdata(pdev);
+
+ stop_devapc(ctx);
+
+ clk_disable_unprepare(ctx->infra_clk);
+
+ return 0;
+}
+
+static struct platform_driver mtk_devapc_driver = {
+ .probe = mtk_devapc_probe,
+ .remove = mtk_devapc_remove,
+ .driver = {
+ .name = "mtk-devapc",
+ .of_match_table = mtk_devapc_dt_match,
+ },
+};
+
+module_platform_driver(mtk_devapc_driver);
+
+MODULE_DESCRIPTION("Mediatek Device APC Driver");
+MODULE_AUTHOR("Neal Liu <neal.liu@mediatek.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/mediatek/mtk-infracfg.c b/drivers/soc/mediatek/mtk-infracfg.c
index 4a123796aad3..0590b68e0d78 100644
--- a/drivers/soc/mediatek/mtk-infracfg.c
+++ b/drivers/soc/mediatek/mtk-infracfg.c
@@ -12,11 +12,6 @@
#define MTK_POLL_DELAY_US 10
#define MTK_POLL_TIMEOUT (jiffies_to_usecs(HZ))
-#define INFRA_TOPAXI_PROTECTEN 0x0220
-#define INFRA_TOPAXI_PROTECTSTA1 0x0228
-#define INFRA_TOPAXI_PROTECTEN_SET 0x0260
-#define INFRA_TOPAXI_PROTECTEN_CLR 0x0264
-
/**
* mtk_infracfg_set_bus_protection - enable bus protection
* @infracfg: The infracfg regmap
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index a55f25511173..18f93979e14a 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -5,13 +5,11 @@
*/
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
-#include "../../gpu/drm/mediatek/mtk_drm_ddp.h"
-#include "../../gpu/drm/mediatek/mtk_drm_ddp_comp.h"
-
#define DISP_REG_CONFIG_DISP_OVL0_MOUT_EN 0x040
#define DISP_REG_CONFIG_DISP_OVL1_MOUT_EN 0x044
#define DISP_REG_CONFIG_DISP_OD_MOUT_EN 0x048
@@ -308,15 +306,12 @@ static int mtk_mmsys_probe(struct platform_device *pdev)
struct platform_device *clks;
struct platform_device *drm;
void __iomem *config_regs;
- struct resource *mem;
int ret;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- config_regs = devm_ioremap_resource(dev, mem);
+ config_regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(config_regs)) {
ret = PTR_ERR(config_regs);
- dev_err(dev, "Failed to ioremap mmsys-config resource: %d\n",
- ret);
+ dev_err(dev, "Failed to ioremap mmsys registers: %d\n", ret);
return ret;
}
diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c
new file mode 100644
index 000000000000..fb70cb3b07b3
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-pm-domains.c
@@ -0,0 +1,614 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020 Collabora Ltd.
+ */
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of_clk.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regmap.h>
+#include <linux/soc/mediatek/infracfg.h>
+
+#include "mt8173-pm-domains.h"
+#include "mt8183-pm-domains.h"
+#include "mt8192-pm-domains.h"
+
+#define MTK_POLL_DELAY_US 10
+#define MTK_POLL_TIMEOUT USEC_PER_SEC
+
+#define PWR_RST_B_BIT BIT(0)
+#define PWR_ISO_BIT BIT(1)
+#define PWR_ON_BIT BIT(2)
+#define PWR_ON_2ND_BIT BIT(3)
+#define PWR_CLK_DIS_BIT BIT(4)
+#define PWR_SRAM_CLKISO_BIT BIT(5)
+#define PWR_SRAM_ISOINT_B_BIT BIT(6)
+
+struct scpsys_domain {
+ struct generic_pm_domain genpd;
+ const struct scpsys_domain_data *data;
+ struct scpsys *scpsys;
+ int num_clks;
+ struct clk_bulk_data *clks;
+ int num_subsys_clks;
+ struct clk_bulk_data *subsys_clks;
+ struct regmap *infracfg;
+ struct regmap *smi;
+};
+
+struct scpsys {
+ struct device *dev;
+ struct regmap *base;
+ const struct scpsys_soc_data *soc_data;
+ struct genpd_onecell_data pd_data;
+ struct generic_pm_domain *domains[];
+};
+
+#define to_scpsys_domain(gpd) container_of(gpd, struct scpsys_domain, genpd)
+
+static bool scpsys_domain_is_on(struct scpsys_domain *pd)
+{
+ struct scpsys *scpsys = pd->scpsys;
+ u32 status, status2;
+
+ regmap_read(scpsys->base, scpsys->soc_data->pwr_sta_offs, &status);
+ status &= pd->data->sta_mask;
+
+ regmap_read(scpsys->base, scpsys->soc_data->pwr_sta2nd_offs, &status2);
+ status2 &= pd->data->sta_mask;
+
+ /* A domain is on when both status bits are set. */
+ return status && status2;
+}
+
+static int scpsys_sram_enable(struct scpsys_domain *pd)
+{
+ u32 pdn_ack = pd->data->sram_pdn_ack_bits;
+ struct scpsys *scpsys = pd->scpsys;
+ unsigned int tmp;
+ int ret;
+
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
+
+ /* Either wait until SRAM_PDN_ACK all 1 or 0 */
+ ret = regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
+ (tmp & pdn_ack) == 0, MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) {
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT);
+ udelay(1);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT);
+ }
+
+ return 0;
+}
+
+static int scpsys_sram_disable(struct scpsys_domain *pd)
+{
+ u32 pdn_ack = pd->data->sram_pdn_ack_bits;
+ struct scpsys *scpsys = pd->scpsys;
+ unsigned int tmp;
+
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_SRAM_ISO)) {
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_CLKISO_BIT);
+ udelay(1);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_SRAM_ISOINT_B_BIT);
+ }
+
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, pd->data->sram_pdn_bits);
+
+ /* Either wait until SRAM_PDN_ACK all 1 or 0 */
+ return regmap_read_poll_timeout(scpsys->base, pd->data->ctl_offs, tmp,
+ (tmp & pdn_ack) == pdn_ack, MTK_POLL_DELAY_US,
+ MTK_POLL_TIMEOUT);
+}
+
+static int _scpsys_bus_protect_enable(const struct scpsys_bus_prot_data *bpd, struct regmap *regmap)
+{
+ int i, ret;
+
+ for (i = 0; i < SPM_MAX_BUS_PROT_DATA; i++) {
+ u32 val, mask = bpd[i].bus_prot_mask;
+
+ if (!mask)
+ break;
+
+ if (bpd[i].bus_prot_reg_update)
+ regmap_set_bits(regmap, bpd[i].bus_prot_set, mask);
+ else
+ regmap_write(regmap, bpd[i].bus_prot_set, mask);
+
+ ret = regmap_read_poll_timeout(regmap, bpd[i].bus_prot_sta,
+ val, (val & mask) == mask,
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int scpsys_bus_protect_enable(struct scpsys_domain *pd)
+{
+ int ret;
+
+ ret = _scpsys_bus_protect_enable(pd->data->bp_infracfg, pd->infracfg);
+ if (ret)
+ return ret;
+
+ return _scpsys_bus_protect_enable(pd->data->bp_smi, pd->smi);
+}
+
+static int _scpsys_bus_protect_disable(const struct scpsys_bus_prot_data *bpd,
+ struct regmap *regmap)
+{
+ int i, ret;
+
+ for (i = SPM_MAX_BUS_PROT_DATA - 1; i >= 0; i--) {
+ u32 val, mask = bpd[i].bus_prot_mask;
+
+ if (!mask)
+ continue;
+
+ if (bpd[i].bus_prot_reg_update)
+ regmap_clear_bits(regmap, bpd[i].bus_prot_clr, mask);
+ else
+ regmap_write(regmap, bpd[i].bus_prot_clr, mask);
+
+ if (bpd[i].ignore_clr_ack)
+ continue;
+
+ ret = regmap_read_poll_timeout(regmap, bpd[i].bus_prot_sta,
+ val, !(val & mask),
+ MTK_POLL_DELAY_US, MTK_POLL_TIMEOUT);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int scpsys_bus_protect_disable(struct scpsys_domain *pd)
+{
+ int ret;
+
+ ret = _scpsys_bus_protect_disable(pd->data->bp_smi, pd->smi);
+ if (ret)
+ return ret;
+
+ return _scpsys_bus_protect_disable(pd->data->bp_infracfg, pd->infracfg);
+}
+
+static int scpsys_power_on(struct generic_pm_domain *genpd)
+{
+ struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
+ struct scpsys *scpsys = pd->scpsys;
+ bool tmp;
+ int ret;
+
+ ret = clk_bulk_enable(pd->num_clks, pd->clks);
+ if (ret)
+ return ret;
+
+ /* subsys power on */
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
+
+ /* wait until PWR_ACK = 1 */
+ ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, tmp, MTK_POLL_DELAY_US,
+ MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ goto err_pwr_ack;
+
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+
+ ret = clk_bulk_enable(pd->num_subsys_clks, pd->subsys_clks);
+ if (ret)
+ goto err_pwr_ack;
+
+ ret = scpsys_sram_enable(pd);
+ if (ret < 0)
+ goto err_disable_subsys_clks;
+
+ ret = scpsys_bus_protect_disable(pd);
+ if (ret < 0)
+ goto err_disable_sram;
+
+ return 0;
+
+err_disable_sram:
+ scpsys_sram_disable(pd);
+err_disable_subsys_clks:
+ clk_bulk_disable(pd->num_subsys_clks, pd->subsys_clks);
+err_pwr_ack:
+ clk_bulk_disable(pd->num_clks, pd->clks);
+ return ret;
+}
+
+static int scpsys_power_off(struct generic_pm_domain *genpd)
+{
+ struct scpsys_domain *pd = container_of(genpd, struct scpsys_domain, genpd);
+ struct scpsys *scpsys = pd->scpsys;
+ bool tmp;
+ int ret;
+
+ ret = scpsys_bus_protect_enable(pd);
+ if (ret < 0)
+ return ret;
+
+ ret = scpsys_sram_disable(pd);
+ if (ret < 0)
+ return ret;
+
+ clk_bulk_disable(pd->num_subsys_clks, pd->subsys_clks);
+
+ /* subsys power off */
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_RST_B_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_ISO_BIT);
+ regmap_set_bits(scpsys->base, pd->data->ctl_offs, PWR_CLK_DIS_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_2ND_BIT);
+ regmap_clear_bits(scpsys->base, pd->data->ctl_offs, PWR_ON_BIT);
+
+ /* wait until PWR_ACK = 0 */
+ ret = readx_poll_timeout(scpsys_domain_is_on, pd, tmp, !tmp, MTK_POLL_DELAY_US,
+ MTK_POLL_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ clk_bulk_disable(pd->num_clks, pd->clks);
+
+ return 0;
+}
+
+static struct
+generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_node *node)
+{
+ const struct scpsys_domain_data *domain_data;
+ struct scpsys_domain *pd;
+ struct property *prop;
+ const char *clk_name;
+ int i, ret, num_clks;
+ struct clk *clk;
+ int clk_ind = 0;
+ u32 id;
+
+ ret = of_property_read_u32(node, "reg", &id);
+ if (ret) {
+ dev_err(scpsys->dev, "%pOF: failed to retrieve domain id from reg: %d\n",
+ node, ret);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (id >= scpsys->soc_data->num_domains) {
+ dev_err(scpsys->dev, "%pOF: invalid domain id %d\n", node, id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ domain_data = &scpsys->soc_data->domains_data[id];
+ if (domain_data->sta_mask == 0) {
+ dev_err(scpsys->dev, "%pOF: undefined domain id %d\n", node, id);
+ return ERR_PTR(-EINVAL);
+ }
+
+ pd = devm_kzalloc(scpsys->dev, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ pd->data = domain_data;
+ pd->scpsys = scpsys;
+
+ pd->infracfg = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,infracfg");
+ if (IS_ERR(pd->infracfg))
+ return ERR_CAST(pd->infracfg);
+
+ pd->smi = syscon_regmap_lookup_by_phandle_optional(node, "mediatek,smi");
+ if (IS_ERR(pd->smi))
+ return ERR_CAST(pd->smi);
+
+ num_clks = of_clk_get_parent_count(node);
+ if (num_clks > 0) {
+ /* Calculate number of subsys_clks */
+ of_property_for_each_string(node, "clock-names", prop, clk_name) {
+ char *subsys;
+
+ subsys = strchr(clk_name, '-');
+ if (subsys)
+ pd->num_subsys_clks++;
+ else
+ pd->num_clks++;
+ }
+
+ pd->clks = devm_kcalloc(scpsys->dev, pd->num_clks, sizeof(*pd->clks), GFP_KERNEL);
+ if (!pd->clks)
+ return ERR_PTR(-ENOMEM);
+
+ pd->subsys_clks = devm_kcalloc(scpsys->dev, pd->num_subsys_clks,
+ sizeof(*pd->subsys_clks), GFP_KERNEL);
+ if (!pd->subsys_clks)
+ return ERR_PTR(-ENOMEM);
+
+ }
+
+ for (i = 0; i < pd->num_clks; i++) {
+ clk = of_clk_get(node, i);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err_probe(scpsys->dev, ret,
+ "%pOF: failed to get clk at index %d: %d\n", node, i, ret);
+ goto err_put_clocks;
+ }
+
+ pd->clks[clk_ind++].clk = clk;
+ }
+
+ for (i = 0; i < pd->num_subsys_clks; i++) {
+ clk = of_clk_get(node, i + clk_ind);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err_probe(scpsys->dev, ret,
+ "%pOF: failed to get clk at index %d: %d\n", node,
+ i + clk_ind, ret);
+ goto err_put_subsys_clocks;
+ }
+
+ pd->subsys_clks[i].clk = clk;
+ }
+
+ ret = clk_bulk_prepare(pd->num_clks, pd->clks);
+ if (ret)
+ goto err_put_subsys_clocks;
+
+ ret = clk_bulk_prepare(pd->num_subsys_clks, pd->subsys_clks);
+ if (ret)
+ goto err_unprepare_clocks;
+
+ /*
+ * Initially turn on all domains to make the domains usable
+ * with !CONFIG_PM and to get the hardware in sync with the
+ * software. The unused domains will be switched off during
+ * late_init time.
+ */
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF)) {
+ if (scpsys_domain_is_on(pd))
+ dev_warn(scpsys->dev,
+ "%pOF: A default off power domain has been ON\n", node);
+ } else {
+ ret = scpsys_power_on(&pd->genpd);
+ if (ret < 0) {
+ dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret);
+ goto err_unprepare_clocks;
+ }
+ }
+
+ if (scpsys->domains[id]) {
+ ret = -EINVAL;
+ dev_err(scpsys->dev,
+ "power domain with id %d already exists, check your device-tree\n", id);
+ goto err_unprepare_subsys_clocks;
+ }
+
+ pd->genpd.name = node->name;
+ pd->genpd.power_off = scpsys_power_off;
+ pd->genpd.power_on = scpsys_power_on;
+
+ if (MTK_SCPD_CAPS(pd, MTK_SCPD_KEEP_DEFAULT_OFF))
+ pm_genpd_init(&pd->genpd, NULL, true);
+ else
+ pm_genpd_init(&pd->genpd, NULL, false);
+
+ scpsys->domains[id] = &pd->genpd;
+
+ return scpsys->pd_data.domains[id];
+
+err_unprepare_subsys_clocks:
+ clk_bulk_unprepare(pd->num_subsys_clks, pd->subsys_clks);
+err_unprepare_clocks:
+ clk_bulk_unprepare(pd->num_clks, pd->clks);
+err_put_subsys_clocks:
+ clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
+err_put_clocks:
+ clk_bulk_put(pd->num_clks, pd->clks);
+ return ERR_PTR(ret);
+}
+
+static int scpsys_add_subdomain(struct scpsys *scpsys, struct device_node *parent)
+{
+ struct generic_pm_domain *child_pd, *parent_pd;
+ struct device_node *child;
+ int ret;
+
+ for_each_child_of_node(parent, child) {
+ u32 id;
+
+ ret = of_property_read_u32(parent, "reg", &id);
+ if (ret) {
+ dev_err(scpsys->dev, "%pOF: failed to get parent domain id\n", child);
+ goto err_put_node;
+ }
+
+ if (!scpsys->pd_data.domains[id]) {
+ ret = -EINVAL;
+ dev_err(scpsys->dev, "power domain with id %d does not exist\n", id);
+ goto err_put_node;
+ }
+
+ parent_pd = scpsys->pd_data.domains[id];
+
+ child_pd = scpsys_add_one_domain(scpsys, child);
+ if (IS_ERR(child_pd)) {
+ ret = PTR_ERR(child_pd);
+ dev_err(scpsys->dev, "%pOF: failed to get child domain id\n", child);
+ goto err_put_node;
+ }
+
+ ret = pm_genpd_add_subdomain(parent_pd, child_pd);
+ if (ret) {
+ dev_err(scpsys->dev, "failed to add %s subdomain to parent %s\n",
+ child_pd->name, parent_pd->name);
+ goto err_put_node;
+ } else {
+ dev_dbg(scpsys->dev, "%s add subdomain: %s\n", parent_pd->name,
+ child_pd->name);
+ }
+
+ /* recursive call to add all subdomains */
+ ret = scpsys_add_subdomain(scpsys, child);
+ if (ret)
+ goto err_put_node;
+ }
+
+ return 0;
+
+err_put_node:
+ of_node_put(child);
+ return ret;
+}
+
+static void scpsys_remove_one_domain(struct scpsys_domain *pd)
+{
+ int ret;
+
+ if (scpsys_domain_is_on(pd))
+ scpsys_power_off(&pd->genpd);
+
+ /*
+ * We're in the error cleanup already, so we only complain,
+ * but won't emit another error on top of the original one.
+ */
+ ret = pm_genpd_remove(&pd->genpd);
+ if (ret < 0)
+ dev_err(pd->scpsys->dev,
+ "failed to remove domain '%s' : %d - state may be inconsistent\n",
+ pd->genpd.name, ret);
+
+ clk_bulk_unprepare(pd->num_clks, pd->clks);
+ clk_bulk_put(pd->num_clks, pd->clks);
+
+ clk_bulk_unprepare(pd->num_subsys_clks, pd->subsys_clks);
+ clk_bulk_put(pd->num_subsys_clks, pd->subsys_clks);
+}
+
+static void scpsys_domain_cleanup(struct scpsys *scpsys)
+{
+ struct generic_pm_domain *genpd;
+ struct scpsys_domain *pd;
+ int i;
+
+ for (i = scpsys->pd_data.num_domains - 1; i >= 0; i--) {
+ genpd = scpsys->pd_data.domains[i];
+ if (genpd) {
+ pd = to_scpsys_domain(genpd);
+ scpsys_remove_one_domain(pd);
+ }
+ }
+}
+
+static const struct of_device_id scpsys_of_match[] = {
+ {
+ .compatible = "mediatek,mt8173-power-controller",
+ .data = &mt8173_scpsys_data,
+ },
+ {
+ .compatible = "mediatek,mt8183-power-controller",
+ .data = &mt8183_scpsys_data,
+ },
+ {
+ .compatible = "mediatek,mt8192-power-controller",
+ .data = &mt8192_scpsys_data,
+ },
+ { }
+};
+
+static int scpsys_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ const struct scpsys_soc_data *soc;
+ struct device_node *node;
+ struct device *parent;
+ struct scpsys *scpsys;
+ int ret;
+
+ soc = of_device_get_match_data(&pdev->dev);
+ if (!soc) {
+ dev_err(&pdev->dev, "no power controller data\n");
+ return -EINVAL;
+ }
+
+ scpsys = devm_kzalloc(dev, struct_size(scpsys, domains, soc->num_domains), GFP_KERNEL);
+ if (!scpsys)
+ return -ENOMEM;
+
+ scpsys->dev = dev;
+ scpsys->soc_data = soc;
+
+ scpsys->pd_data.domains = scpsys->domains;
+ scpsys->pd_data.num_domains = soc->num_domains;
+
+ parent = dev->parent;
+ if (!parent) {
+ dev_err(dev, "no parent for syscon devices\n");
+ return -ENODEV;
+ }
+
+ scpsys->base = syscon_node_to_regmap(parent->of_node);
+ if (IS_ERR(scpsys->base)) {
+ dev_err(dev, "no regmap available\n");
+ return PTR_ERR(scpsys->base);
+ }
+
+ ret = -ENODEV;
+ for_each_available_child_of_node(np, node) {
+ struct generic_pm_domain *domain;
+
+ domain = scpsys_add_one_domain(scpsys, node);
+ if (IS_ERR(domain)) {
+ ret = PTR_ERR(domain);
+ of_node_put(node);
+ goto err_cleanup_domains;
+ }
+
+ ret = scpsys_add_subdomain(scpsys, node);
+ if (ret) {
+ of_node_put(node);
+ goto err_cleanup_domains;
+ }
+ }
+
+ if (ret) {
+ dev_dbg(dev, "no power domains present\n");
+ return ret;
+ }
+
+ ret = of_genpd_add_provider_onecell(np, &scpsys->pd_data);
+ if (ret) {
+ dev_err(dev, "failed to add provider: %d\n", ret);
+ goto err_cleanup_domains;
+ }
+
+ return 0;
+
+err_cleanup_domains:
+ scpsys_domain_cleanup(scpsys);
+ return ret;
+}
+
+static struct platform_driver scpsys_pm_domain_driver = {
+ .probe = scpsys_probe,
+ .driver = {
+ .name = "mtk-power-controller",
+ .suppress_bind_attrs = true,
+ .of_match_table = scpsys_of_match,
+ },
+};
+builtin_platform_driver(scpsys_pm_domain_driver);
diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h
new file mode 100644
index 000000000000..a2f4d8f97e05
--- /dev/null
+++ b/drivers/soc/mediatek/mtk-pm-domains.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MTK_PM_DOMAINS_H
+#define __SOC_MEDIATEK_MTK_PM_DOMAINS_H
+
+#define MTK_SCPD_ACTIVE_WAKEUP BIT(0)
+#define MTK_SCPD_FWAIT_SRAM BIT(1)
+#define MTK_SCPD_SRAM_ISO BIT(2)
+#define MTK_SCPD_KEEP_DEFAULT_OFF BIT(3)
+#define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x))
+
+#define SPM_VDE_PWR_CON 0x0210
+#define SPM_MFG_PWR_CON 0x0214
+#define SPM_VEN_PWR_CON 0x0230
+#define SPM_ISP_PWR_CON 0x0238
+#define SPM_DIS_PWR_CON 0x023c
+#define SPM_VEN2_PWR_CON 0x0298
+#define SPM_AUDIO_PWR_CON 0x029c
+#define SPM_MFG_2D_PWR_CON 0x02c0
+#define SPM_MFG_ASYNC_PWR_CON 0x02c4
+#define SPM_USB_PWR_CON 0x02cc
+
+#define SPM_PWR_STATUS 0x060c
+#define SPM_PWR_STATUS_2ND 0x0610
+
+#define PWR_STATUS_CONN BIT(1)
+#define PWR_STATUS_DISP BIT(3)
+#define PWR_STATUS_MFG BIT(4)
+#define PWR_STATUS_ISP BIT(5)
+#define PWR_STATUS_VDEC BIT(7)
+#define PWR_STATUS_VENC_LT BIT(20)
+#define PWR_STATUS_VENC BIT(21)
+#define PWR_STATUS_MFG_2D BIT(22)
+#define PWR_STATUS_MFG_ASYNC BIT(23)
+#define PWR_STATUS_AUDIO BIT(24)
+#define PWR_STATUS_USB BIT(25)
+
+#define SPM_MAX_BUS_PROT_DATA 5
+
+#define _BUS_PROT(_mask, _set, _clr, _sta, _update, _ignore) { \
+ .bus_prot_mask = (_mask), \
+ .bus_prot_set = _set, \
+ .bus_prot_clr = _clr, \
+ .bus_prot_sta = _sta, \
+ .bus_prot_reg_update = _update, \
+ .ignore_clr_ack = _ignore, \
+ }
+
+#define BUS_PROT_WR(_mask, _set, _clr, _sta) \
+ _BUS_PROT(_mask, _set, _clr, _sta, false, false)
+
+#define BUS_PROT_WR_IGN(_mask, _set, _clr, _sta) \
+ _BUS_PROT(_mask, _set, _clr, _sta, false, true)
+
+#define BUS_PROT_UPDATE(_mask, _set, _clr, _sta) \
+ _BUS_PROT(_mask, _set, _clr, _sta, true, false)
+
+#define BUS_PROT_UPDATE_TOPAXI(_mask) \
+ BUS_PROT_UPDATE(_mask, \
+ INFRA_TOPAXI_PROTECTEN, \
+ INFRA_TOPAXI_PROTECTEN_CLR, \
+ INFRA_TOPAXI_PROTECTSTA1)
+
+struct scpsys_bus_prot_data {
+ u32 bus_prot_mask;
+ u32 bus_prot_set;
+ u32 bus_prot_clr;
+ u32 bus_prot_sta;
+ bool bus_prot_reg_update;
+ bool ignore_clr_ack;
+};
+
+#define MAX_SUBSYS_CLKS 10
+
+/**
+ * struct scpsys_domain_data - scp domain data for power on/off flow
+ * @sta_mask: The mask for power on/off status bit.
+ * @ctl_offs: The offset for main power control register.
+ * @sram_pdn_bits: The mask for sram power control bits.
+ * @sram_pdn_ack_bits: The mask for sram power control acked bits.
+ * @caps: The flag for active wake-up action.
+ * @bp_infracfg: bus protection for infracfg subsystem
+ * @bp_smi: bus protection for smi subsystem
+ */
+struct scpsys_domain_data {
+ u32 sta_mask;
+ int ctl_offs;
+ u32 sram_pdn_bits;
+ u32 sram_pdn_ack_bits;
+ u8 caps;
+ const struct scpsys_bus_prot_data bp_infracfg[SPM_MAX_BUS_PROT_DATA];
+ const struct scpsys_bus_prot_data bp_smi[SPM_MAX_BUS_PROT_DATA];
+};
+
+struct scpsys_soc_data {
+ const struct scpsys_domain_data *domains_data;
+ int num_domains;
+ int pwr_sta_offs;
+ int pwr_sta2nd_offs;
+};
+
+#endif /* __SOC_MEDIATEK_MTK_PM_DOMAINS_H */
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index f669d3754627..ca75b14931ec 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -524,6 +524,7 @@ static void mtk_register_power_domains(struct platform_device *pdev,
for (i = 0; i < num; i++) {
struct scp_domain *scpd = &scp->domains[i];
struct generic_pm_domain *genpd = &scpd->genpd;
+ bool on;
/*
* Initially turn on all domains to make the domains usable
@@ -531,9 +532,9 @@ static void mtk_register_power_domains(struct platform_device *pdev,
* software. The unused domains will be switched off during
* late_init time.
*/
- genpd->power_on(genpd);
+ on = !WARN_ON(genpd->power_on(genpd) < 0);
- pm_genpd_init(genpd, NULL, false);
+ pm_genpd_init(genpd, NULL, !on);
}
/*
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 6a3b69b43ad5..79b568f82a1c 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -17,7 +17,7 @@ config QCOM_AOSS_QMP
Subsystem (AOSS) using Qualcomm Messaging Protocol (QMP).
config QCOM_COMMAND_DB
- bool "Qualcomm Command DB"
+ tristate "Qualcomm Command DB"
depends on ARCH_QCOM || COMPILE_TEST
depends on OF_RESERVED_MEM
help
@@ -108,8 +108,9 @@ config QCOM_RMTFS_MEM
Say y here if you intend to boot the modem remoteproc.
config QCOM_RPMH
- bool "Qualcomm RPM-Hardened (RPMH) Communication"
+ tristate "Qualcomm RPM-Hardened (RPMH) Communication"
depends on ARCH_QCOM || COMPILE_TEST
+ depends on (QCOM_COMMAND_DB || !QCOM_COMMAND_DB)
help
Support for communication with the hardened-RPM blocks in
Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index fc5610603b17..dd872017f345 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -1,8 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */
#include <linux/debugfs.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_reserved_mem.h>
@@ -340,12 +341,14 @@ static const struct of_device_id cmd_db_match_table[] = {
{ .compatible = "qcom,cmd-db" },
{ }
};
+MODULE_DEVICE_TABLE(of, cmd_db_match_table);
static struct platform_driver cmd_db_dev_driver = {
.probe = cmd_db_dev_probe,
.driver = {
.name = "cmd-db",
.of_match_table = cmd_db_match_table,
+ .suppress_bind_attrs = true,
},
};
@@ -354,3 +357,6 @@ static int __init cmd_db_device_init(void)
return platform_driver_register(&cmd_db_dev_driver);
}
arch_initcall(cmd_db_device_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Command DB Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/kryo-l2-accessors.c b/drivers/soc/qcom/kryo-l2-accessors.c
index c20cb92077c0..7886af4fd726 100644
--- a/drivers/soc/qcom/kryo-l2-accessors.c
+++ b/drivers/soc/qcom/kryo-l2-accessors.c
@@ -16,7 +16,7 @@ static DEFINE_RAW_SPINLOCK(l2_access_lock);
/**
* kryo_l2_set_indirect_reg() - write value to an L2 register
* @reg: Address of L2 register.
- * @value: Value to be written to register.
+ * @val: Value to be written to register.
*
* Use architecturally required barriers for ordering between system register
* accesses, and system registers with respect to device memory
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 70fbe70c6213..16b421608e9c 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -45,10 +45,13 @@
#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + SZ_8 * n)
#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + SZ_8 * n)
+#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x21f00
+#define LLCC_TRP_PCB_ACT 0x21f04
+
#define BANK_OFFSET_STRIDE 0x80000
/**
- * llcc_slice_config - Data associated with the llcc slice
+ * struct llcc_slice_config - Data associated with the llcc slice
* @usecase_id: Unique id for the client's use case
* @slice_id: llcc slice id for each client
* @max_cap: The maximum capacity of the cache slice provided in KB
@@ -89,6 +92,7 @@ struct llcc_slice_config {
struct qcom_llcc_config {
const struct llcc_slice_config *sct_data;
int size;
+ bool need_llcc_cfg;
};
static const struct llcc_slice_config sc7180_data[] = {
@@ -119,14 +123,45 @@ static const struct llcc_slice_config sdm845_data[] = {
{ LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0 },
};
+static const struct llcc_slice_config sm8150_data[] = {
+ { LLCC_CPUSS, 1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 1 },
+ { LLCC_VIDSC0, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_VIDSC1, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDIO, 6, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPGRW, 7, 3072, 1, 0, 0xFF, 0xF00, 0, 0, 0, 1, 0 },
+ { LLCC_MDM, 8, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODHW, 9, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_CMPT, 10, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW , 11, 512, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 2560, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MMUHWT, 13, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1 },
+ { LLCC_CMPTDMA, 15, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_DISP, 16, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 20, 1024, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMHPFX, 21, 1024, 0, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_AUDHW, 22, 1024, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_WLHW, 24, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 256, 1, 1, 0xF, 0x0, 0, 0, 0, 1, 0 },
+ { LLCC_APTCM, 30, 256, 3, 1, 0x0, 0x1, 1, 0, 0, 1, 0 },
+ { LLCC_WRCACHE, 31, 128, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 0 },
+};
+
static const struct qcom_llcc_config sc7180_cfg = {
.sct_data = sc7180_data,
.size = ARRAY_SIZE(sc7180_data),
+ .need_llcc_cfg = true,
};
static const struct qcom_llcc_config sdm845_cfg = {
.sct_data = sdm845_data,
.size = ARRAY_SIZE(sdm845_data),
+ .need_llcc_cfg = false,
+};
+
+static const struct qcom_llcc_config sm8150_cfg = {
+ .sct_data = sm8150_data,
+ .size = ARRAY_SIZE(sm8150_data),
};
static struct llcc_drv_data *drv_data = (void *) -EPROBE_DEFER;
@@ -318,62 +353,91 @@ size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
}
EXPORT_SYMBOL_GPL(llcc_get_slice_size);
-static int qcom_llcc_cfg_program(struct platform_device *pdev)
+static int _qcom_llcc_cfg_program(const struct llcc_slice_config *config,
+ const struct qcom_llcc_config *cfg)
{
- int i;
+ int ret;
u32 attr1_cfg;
u32 attr0_cfg;
u32 attr1_val;
u32 attr0_val;
u32 max_cap_cacheline;
+ struct llcc_slice_desc desc;
+
+ attr1_val = config->cache_mode;
+ attr1_val |= config->probe_target_ways << ATTR1_PROBE_TARGET_WAYS_SHIFT;
+ attr1_val |= config->fixed_size << ATTR1_FIXED_SIZE_SHIFT;
+ attr1_val |= config->priority << ATTR1_PRIORITY_SHIFT;
+
+ max_cap_cacheline = MAX_CAP_TO_BYTES(config->max_cap);
+
+ /*
+ * LLCC instances can vary for each target.
+ * The SW writes to broadcast register which gets propagated
+ * to each llcc instance (llcc0,.. llccN).
+ * Since the size of the memory is divided equally amongst the
+ * llcc instances, we need to configure the max cap accordingly.
+ */
+ max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
+ max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
+ attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
+
+ attr1_cfg = LLCC_TRP_ATTR1_CFGn(config->slice_id);
+
+ ret = regmap_write(drv_data->bcast_regmap, attr1_cfg, attr1_val);
+ if (ret)
+ return ret;
+
+ attr0_val = config->res_ways & ATTR0_RES_WAYS_MASK;
+ attr0_val |= config->bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
+
+ attr0_cfg = LLCC_TRP_ATTR0_CFGn(config->slice_id);
+
+ ret = regmap_write(drv_data->bcast_regmap, attr0_cfg, attr0_val);
+ if (ret)
+ return ret;
+
+ if (cfg->need_llcc_cfg) {
+ u32 disable_cap_alloc, retain_pc;
+
+ disable_cap_alloc = config->dis_cap_alloc << config->slice_id;
+ ret = regmap_write(drv_data->bcast_regmap,
+ LLCC_TRP_SCID_DIS_CAP_ALLOC, disable_cap_alloc);
+ if (ret)
+ return ret;
+
+ retain_pc = config->retain_on_pc << config->slice_id;
+ ret = regmap_write(drv_data->bcast_regmap,
+ LLCC_TRP_PCB_ACT, retain_pc);
+ if (ret)
+ return ret;
+ }
+
+ if (config->activate_on_init) {
+ desc.slice_id = config->slice_id;
+ ret = llcc_slice_activate(&desc);
+ }
+
+ return ret;
+}
+
+static int qcom_llcc_cfg_program(struct platform_device *pdev,
+ const struct qcom_llcc_config *cfg)
+{
+ int i;
u32 sz;
int ret = 0;
const struct llcc_slice_config *llcc_table;
- struct llcc_slice_desc desc;
sz = drv_data->cfg_size;
llcc_table = drv_data->cfg;
for (i = 0; i < sz; i++) {
- attr1_cfg = LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
- attr0_cfg = LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
-
- attr1_val = llcc_table[i].cache_mode;
- attr1_val |= llcc_table[i].probe_target_ways <<
- ATTR1_PROBE_TARGET_WAYS_SHIFT;
- attr1_val |= llcc_table[i].fixed_size <<
- ATTR1_FIXED_SIZE_SHIFT;
- attr1_val |= llcc_table[i].priority <<
- ATTR1_PRIORITY_SHIFT;
-
- max_cap_cacheline = MAX_CAP_TO_BYTES(llcc_table[i].max_cap);
-
- /* LLCC instances can vary for each target.
- * The SW writes to broadcast register which gets propagated
- * to each llcc instace (llcc0,.. llccN).
- * Since the size of the memory is divided equally amongst the
- * llcc instances, we need to configure the max cap accordingly.
- */
- max_cap_cacheline = max_cap_cacheline / drv_data->num_banks;
- max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
- attr1_val |= max_cap_cacheline << ATTR1_MAX_CAP_SHIFT;
-
- attr0_val = llcc_table[i].res_ways & ATTR0_RES_WAYS_MASK;
- attr0_val |= llcc_table[i].bonus_ways << ATTR0_BONUS_WAYS_SHIFT;
-
- ret = regmap_write(drv_data->bcast_regmap, attr1_cfg,
- attr1_val);
+ ret = _qcom_llcc_cfg_program(&llcc_table[i], cfg);
if (ret)
return ret;
- ret = regmap_write(drv_data->bcast_regmap, attr0_cfg,
- attr0_val);
- if (ret)
- return ret;
- if (llcc_table[i].activate_on_init) {
- desc.slice_id = llcc_table[i].slice_id;
- ret = llcc_slice_activate(&desc);
- }
}
+
return ret;
}
@@ -472,7 +536,7 @@ static int qcom_llcc_probe(struct platform_device *pdev)
mutex_init(&drv_data->lock);
platform_set_drvdata(pdev, drv_data);
- ret = qcom_llcc_cfg_program(pdev);
+ ret = qcom_llcc_cfg_program(pdev, cfg);
if (ret)
goto err;
@@ -494,6 +558,7 @@ err:
static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
{ .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
+ { .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
{ }
};
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index 088dc99f77f3..209dcdca923f 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -110,7 +110,7 @@ static void pdr_locator_del_server(struct qmi_handle *qmi,
pdr->locator_addr.sq_port = 0;
}
-static struct qmi_ops pdr_locator_ops = {
+static const struct qmi_ops pdr_locator_ops = {
.new_server = pdr_locator_new_server,
.del_server = pdr_locator_del_server,
};
@@ -238,7 +238,7 @@ static void pdr_notifier_del_server(struct qmi_handle *qmi,
mutex_unlock(&pdr->list_lock);
}
-static struct qmi_ops pdr_notifier_ops = {
+static const struct qmi_ops pdr_notifier_ops = {
.new_server = pdr_notifier_new_server,
.del_server = pdr_notifier_del_server,
};
@@ -343,7 +343,7 @@ static void pdr_indication_cb(struct qmi_handle *qmi,
queue_work(pdr->indack_wq, &pdr->indack_work);
}
-static struct qmi_msg_handler qmi_indication_handler[] = {
+static const struct qmi_msg_handler qmi_indication_handler[] = {
{
.type = QMI_INDICATION,
.msg_id = SERVREG_STATE_UPDATED_IND_ID,
@@ -569,7 +569,7 @@ EXPORT_SYMBOL(pdr_add_lookup);
int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds)
{
struct servreg_restart_pd_resp resp;
- struct servreg_restart_pd_req req;
+ struct servreg_restart_pd_req req = { 0 };
struct sockaddr_qrtr addr;
struct pdr_service *tmp;
struct qmi_txn txn;
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 7649b2057b9a..f42954e2c98e 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -82,10 +82,11 @@
#define NUM_AHB_CLKS 2
/**
- * @struct geni_wrapper - Data structure to represent the QUP Wrapper Core
+ * struct geni_wrapper - Data structure to represent the QUP Wrapper Core
* @dev: Device pointer of the QUP wrapper core
* @base: Base address of this instance of QUP wrapper core
* @ahb_clks: Handle to the primary & secondary AHB clocks
+ * @to_core: Core ICC path
*/
struct geni_wrapper {
struct device *dev;
@@ -237,7 +238,7 @@ static void geni_se_irq_clear(struct geni_se *se)
* geni_se_init() - Initialize the GENI serial engine
* @se: Pointer to the concerned serial engine.
* @rx_wm: Receive watermark, in units of FIFO words.
- * @rx_rfr_wm: Ready-for-receive watermark, in units of FIFO words.
+ * @rx_rfr: Ready-for-receive watermark, in units of FIFO words.
*
* This function is used to initialize the GENI serial engine, configure
* receive watermark and ready-for-receive watermarks.
@@ -732,7 +733,7 @@ void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
{
struct geni_wrapper *wrapper = se->wrapper;
- if (iova && !dma_mapping_error(wrapper->dev, iova))
+ if (!dma_mapping_error(wrapper->dev, iova))
dma_unmap_single(wrapper->dev, iova, len, DMA_TO_DEVICE);
}
EXPORT_SYMBOL(geni_se_tx_dma_unprep);
@@ -749,7 +750,7 @@ void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len)
{
struct geni_wrapper *wrapper = se->wrapper;
- if (iova && !dma_mapping_error(wrapper->dev, iova))
+ if (!dma_mapping_error(wrapper->dev, iova))
dma_unmap_single(wrapper->dev, iova, len, DMA_FROM_DEVICE);
}
EXPORT_SYMBOL(geni_se_rx_dma_unprep);
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index ed2c687c16b3..b5840d624bc6 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -65,6 +65,7 @@ struct qmp_cooling_device {
* @tx_lock: provides synchronization between multiple callers of qmp_send()
* @qdss_clk: QDSS clock hw struct
* @pd_data: genpd data
+ * @cooling_devs: thermal cooling devices
*/
struct qmp {
void __iomem *msgram;
@@ -225,7 +226,6 @@ static bool qmp_message_empty(struct qmp *qmp)
static int qmp_send(struct qmp *qmp, const void *data, size_t len)
{
long time_left;
- size_t tlen;
int ret;
if (WARN_ON(len + sizeof(u32) > qmp->size))
@@ -242,7 +242,7 @@ static int qmp_send(struct qmp *qmp, const void *data, size_t len)
writel(len, qmp->msgram + qmp->offset);
/* Read back len to confirm data written in message RAM */
- tlen = readl(qmp->msgram + qmp->offset);
+ readl(qmp->msgram + qmp->offset);
qmp_kick(qmp);
time_left = wait_event_interruptible_timeout(qmp->event,
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index a297911afe57..37969dcbaf14 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -13,6 +13,7 @@
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
@@ -497,7 +498,7 @@ static void __tcs_buffer_write(struct rsc_drv *drv, int tcs_id, int cmd_id,
write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
- trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
+ trace_rpmh_send_msg(drv, tcs_id, j, msgid, cmd);
}
write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
@@ -1018,6 +1019,7 @@ static const struct of_device_id rpmh_drv_match[] = {
{ .compatible = "qcom,rpmh-rsc", },
{ }
};
+MODULE_DEVICE_TABLE(of, rpmh_drv_match);
static struct platform_driver rpmh_driver = {
.probe = rpmh_rsc_probe,
@@ -1033,3 +1035,6 @@ static int __init rpmh_driver_init(void)
return platform_driver_register(&rpmh_driver);
}
arch_initcall(rpmh_driver_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
index b61e183ede69..01765ee9cdfb 100644
--- a/drivers/soc/qcom/rpmh.c
+++ b/drivers/soc/qcom/rpmh.c
@@ -181,8 +181,6 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
struct cache_req *req;
int i;
- rpm_msg->msg.state = state;
-
/* Cache the request in our store and link the payload */
for (i = 0; i < rpm_msg->msg.num_cmds; i++) {
req = cache_rpm_request(ctrlr, state, &rpm_msg->msg.cmds[i]);
@@ -190,8 +188,6 @@ static int __rpmh_write(const struct device *dev, enum rpmh_state state,
return PTR_ERR(req);
}
- rpm_msg->msg.state = state;
-
if (state == RPMH_ACTIVE_ONLY_STATE) {
WARN_ON(irqs_disabled());
ret = rpmh_rsc_send_data(ctrlr_to_drv(ctrlr), &rpm_msg->msg);
@@ -254,7 +250,7 @@ EXPORT_SYMBOL(rpmh_write_async);
/**
* rpmh_write: Write a set of RPMH commands and block until response
*
- * @rc: The RPMH handle got from rpmh_get_client
+ * @dev: The device making the request
* @state: Active/sleep set
* @cmd: The payload data
* @n: The number of elements in @cmd
@@ -268,11 +264,9 @@ int rpmh_write(const struct device *dev, enum rpmh_state state,
DEFINE_RPMH_MSG_ONSTACK(dev, state, &compl, rpm_msg);
int ret;
- if (!cmd || !n || n > MAX_RPMH_PAYLOAD)
- return -EINVAL;
-
- memcpy(rpm_msg.cmd, cmd, n * sizeof(*cmd));
- rpm_msg.msg.num_cmds = n;
+ ret = __fill_rpmh_msg(&rpm_msg, state, cmd, n);
+ if (ret)
+ return ret;
ret = __rpmh_write(dev, state, &rpm_msg);
if (ret)
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index e72426221a69..7ce06356d24c 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -24,9 +24,12 @@
* struct rpmhpd - top level RPMh power domain resource data structure
* @dev: rpmh power domain controller device
* @pd: generic_pm_domain corrresponding to the power domain
+ * @parent: generic_pm_domain corrresponding to the parent's power domain
* @peer: A peer power domain in case Active only Voting is
* supported
* @active_only: True if it represents an Active only peer
+ * @corner: current corner
+ * @active_corner: current active corner
* @level: An array of level (vlvl) to corner (hlvl) mappings
* derived from cmd-db
* @level_count: Number of levels supported by the power domain. max
@@ -132,6 +135,18 @@ static const struct rpmhpd_desc sdm845_desc = {
.num_pds = ARRAY_SIZE(sdm845_rpmhpds),
};
+/* SDX55 RPMH powerdomains */
+static struct rpmhpd *sdx55_rpmhpds[] = {
+ [SDX55_MSS] = &sdm845_mss,
+ [SDX55_MX] = &sdm845_mx,
+ [SDX55_CX] = &sdm845_cx,
+};
+
+static const struct rpmhpd_desc sdx55_desc = {
+ .rpmhpds = sdx55_rpmhpds,
+ .num_pds = ARRAY_SIZE(sdx55_rpmhpds),
+};
+
/* SM8150 RPMH powerdomains */
static struct rpmhpd sm8150_mmcx_ao;
@@ -205,6 +220,7 @@ static const struct rpmhpd_desc sc7180_desc = {
static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
+ { .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc},
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
{ .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
{ }
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index f2168e4259b2..85d1207b72d7 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -35,7 +35,7 @@
#define KEY_FLOOR_LEVEL 0x6c6676 /* vfl */
#define KEY_LEVEL 0x6c766c76 /* vlvl */
-#define MAX_8996_RPMPD_STATE 6
+#define MAX_CORNER_RPMPD_STATE 6
#define DEFINE_RPMPD_PAIR(_platform, _name, _active, r_type, r_key, \
r_id) \
@@ -116,6 +116,52 @@ struct rpmpd_desc {
static DEFINE_MUTEX(rpmpd_lock);
+/* msm8939 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8939, vddmd, vddmd_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_VFC(msm8939, vddmd_vfc, SMPA, 1);
+
+DEFINE_RPMPD_PAIR(msm8939, vddcx, vddcx_ao, SMPA, CORNER, 2);
+DEFINE_RPMPD_VFC(msm8939, vddcx_vfc, SMPA, 2);
+
+DEFINE_RPMPD_PAIR(msm8939, vddmx, vddmx_ao, LDOA, CORNER, 3);
+
+static struct rpmpd *msm8939_rpmpds[] = {
+ [MSM8939_VDDMDCX] = &msm8939_vddmd,
+ [MSM8939_VDDMDCX_AO] = &msm8939_vddmd_ao,
+ [MSM8939_VDDMDCX_VFC] = &msm8939_vddmd_vfc,
+ [MSM8939_VDDCX] = &msm8939_vddcx,
+ [MSM8939_VDDCX_AO] = &msm8939_vddcx_ao,
+ [MSM8939_VDDCX_VFC] = &msm8939_vddcx_vfc,
+ [MSM8939_VDDMX] = &msm8939_vddmx,
+ [MSM8939_VDDMX_AO] = &msm8939_vddmx_ao,
+};
+
+static const struct rpmpd_desc msm8939_desc = {
+ .rpmpds = msm8939_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8939_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
+/* msm8916 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8916, vddcx, vddcx_ao, SMPA, CORNER, 1);
+DEFINE_RPMPD_PAIR(msm8916, vddmx, vddmx_ao, LDOA, CORNER, 3);
+
+DEFINE_RPMPD_VFC(msm8916, vddcx_vfc, SMPA, 1);
+
+static struct rpmpd *msm8916_rpmpds[] = {
+ [MSM8916_VDDCX] = &msm8916_vddcx,
+ [MSM8916_VDDCX_AO] = &msm8916_vddcx_ao,
+ [MSM8916_VDDCX_VFC] = &msm8916_vddcx_vfc,
+ [MSM8916_VDDMX] = &msm8916_vddmx,
+ [MSM8916_VDDMX_AO] = &msm8916_vddmx_ao,
+};
+
+static const struct rpmpd_desc msm8916_desc = {
+ .rpmpds = msm8916_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8916_rpmpds),
+ .max_state = MAX_CORNER_RPMPD_STATE,
+};
+
/* msm8976 RPM Power Domains */
DEFINE_RPMPD_PAIR(msm8976, vddcx, vddcx_ao, SMPA, LEVEL, 2);
DEFINE_RPMPD_PAIR(msm8976, vddmx, vddmx_ao, SMPA, LEVEL, 6);
@@ -159,7 +205,7 @@ static struct rpmpd *msm8996_rpmpds[] = {
static const struct rpmpd_desc msm8996_desc = {
.rpmpds = msm8996_rpmpds,
.num_pds = ARRAY_SIZE(msm8996_rpmpds),
- .max_state = MAX_8996_RPMPD_STATE,
+ .max_state = MAX_CORNER_RPMPD_STATE,
};
/* msm8998 RPM Power domains */
@@ -220,11 +266,46 @@ static const struct rpmpd_desc qcs404_desc = {
.max_state = RPM_SMD_LEVEL_BINNING,
};
+/* sdm660 RPM Power domains */
+DEFINE_RPMPD_PAIR(sdm660, vddcx, vddcx_ao, RWCX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sdm660, vddcx_vfl, RWCX, 0);
+
+DEFINE_RPMPD_PAIR(sdm660, vddmx, vddmx_ao, RWMX, LEVEL, 0);
+DEFINE_RPMPD_VFL(sdm660, vddmx_vfl, RWMX, 0);
+
+DEFINE_RPMPD_LEVEL(sdm660, vdd_ssccx, RWLC, 0);
+DEFINE_RPMPD_VFL(sdm660, vdd_ssccx_vfl, RWLC, 0);
+
+DEFINE_RPMPD_LEVEL(sdm660, vdd_sscmx, RWLM, 0);
+DEFINE_RPMPD_VFL(sdm660, vdd_sscmx_vfl, RWLM, 0);
+
+static struct rpmpd *sdm660_rpmpds[] = {
+ [SDM660_VDDCX] = &sdm660_vddcx,
+ [SDM660_VDDCX_AO] = &sdm660_vddcx_ao,
+ [SDM660_VDDCX_VFL] = &sdm660_vddcx_vfl,
+ [SDM660_VDDMX] = &sdm660_vddmx,
+ [SDM660_VDDMX_AO] = &sdm660_vddmx_ao,
+ [SDM660_VDDMX_VFL] = &sdm660_vddmx_vfl,
+ [SDM660_SSCCX] = &sdm660_vdd_ssccx,
+ [SDM660_SSCCX_VFL] = &sdm660_vdd_ssccx_vfl,
+ [SDM660_SSCMX] = &sdm660_vdd_sscmx,
+ [SDM660_SSCMX_VFL] = &sdm660_vdd_sscmx_vfl,
+};
+
+static const struct rpmpd_desc sdm660_desc = {
+ .rpmpds = sdm660_rpmpds,
+ .num_pds = ARRAY_SIZE(sdm660_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO,
+};
+
static const struct of_device_id rpmpd_match_table[] = {
+ { .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
+ { .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc },
{ .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc },
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
{ .compatible = "qcom,msm8998-rpmpd", .data = &msm8998_desc },
{ .compatible = "qcom,qcs404-rpmpd", .data = &qcs404_desc },
+ { .compatible = "qcom,sdm660-rpmpd", .data = &sdm660_desc },
{ }
};
MODULE_DEVICE_TABLE(of, rpmpd_match_table);
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 28c19bcb2f20..7251827bac88 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -122,7 +122,7 @@ struct smem_global_entry {
* @free_offset: index of the first unallocated byte in smem
* @available: number of bytes available for allocation
* @reserved: reserved field, must be 0
- * toc: array of references to items
+ * @toc: array of references to items
*/
struct smem_header {
struct smem_proc_comm proc_comm[4];
@@ -255,6 +255,7 @@ struct smem_region {
* processor/host
* @cacheline: list of cacheline sizes for each host
* @item_count: max accepted item number
+ * @socinfo: platform device pointer
* @num_regions: number of @regions
* @regions: list of the memory regions defining the shared memory
*/
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 07183d731d74..2df488333be9 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -112,6 +112,7 @@ struct smp2p_entry {
* struct qcom_smp2p - device driver context
* @dev: device driver handle
* @in: pointer to the inbound smem item
+ * @out: pointer to the outbound smem item
* @smem_items: ids of the two smem items
* @valid_entries: already scanned inbound entries
* @local_pid: processor id of the inbound edge
@@ -318,15 +319,16 @@ static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p,
static int smp2p_update_bits(void *data, u32 mask, u32 value)
{
struct smp2p_entry *entry = data;
+ unsigned long flags;
u32 orig;
u32 val;
- spin_lock(&entry->lock);
+ spin_lock_irqsave(&entry->lock, flags);
val = orig = readl(entry->value);
val &= ~mask;
val |= value;
writel(val, entry->value);
- spin_unlock(&entry->lock);
+ spin_unlock_irqrestore(&entry->lock, flags);
if (val != orig)
qcom_smp2p_kick(entry->smp2p);
diff --git a/drivers/soc/qcom/smsm.c b/drivers/soc/qcom/smsm.c
index 70c3c90b997c..1d3d5e3ec2b0 100644
--- a/drivers/soc/qcom/smsm.c
+++ b/drivers/soc/qcom/smsm.c
@@ -130,7 +130,7 @@ struct smsm_host {
/**
* smsm_update_bits() - change bit in outgoing entry and inform subscribers
* @data: smsm context pointer
- * @offset: bit in the entry
+ * @mask: value mask
* @value: new value
*
* Used to set and clear the bits in the outgoing/local entry and inform
@@ -254,10 +254,8 @@ static void smsm_mask_irq(struct irq_data *irqd)
* smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
* @irqd: IRQ handle to be unmasked
*
-
* This subscribes the local CPU to interrupts upon changes to the defined
* status bit. The bit is also marked for cascading.
-
*/
static void smsm_unmask_irq(struct irq_data *irqd)
{
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index b44ede48decc..d21530d24253 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -218,13 +218,19 @@ static const struct soc_id soc_id[] = {
{ 251, "MSM8992" },
{ 253, "APQ8094" },
{ 291, "APQ8096" },
+ { 293, "MSM8953" },
+ { 304, "APQ8053" },
{ 305, "MSM8996SG" },
{ 310, "MSM8996AU" },
{ 311, "APQ8096AU" },
{ 312, "APQ8096SG" },
{ 318, "SDM630" },
{ 321, "SDM845" },
+ { 338, "SDM450" },
{ 341, "SDA845" },
+ { 349, "SDM632" },
+ { 350, "SDA632" },
+ { 351, "SDA450" },
{ 356, "SM8250" },
{ 402, "IPQ6018" },
{ 425, "SC7180" },
diff --git a/drivers/soc/qcom/wcnss_ctrl.c b/drivers/soc/qcom/wcnss_ctrl.c
index e5c68051fb17..32bed249f90e 100644
--- a/drivers/soc/qcom/wcnss_ctrl.c
+++ b/drivers/soc/qcom/wcnss_ctrl.c
@@ -68,9 +68,8 @@ struct wcnss_msg_hdr {
u32 len;
} __packed;
-/**
+/*
* struct wcnss_version_resp - version request response
- * @hdr: common packet wcnss_msg_hdr header
*/
struct wcnss_version_resp {
struct wcnss_msg_hdr hdr;
@@ -108,9 +107,11 @@ struct wcnss_download_nv_resp {
/**
* wcnss_ctrl_smd_callback() - handler from SMD responses
- * @channel: smd channel handle
+ * @rpdev: remote processor message device pointer
* @data: pointer to the incoming data packet
* @count: size of the incoming data packet
+ * @priv: unused
+ * @addr: unused
*
* Handles any incoming packets from the remote WCNSS_CTRL service.
*/
@@ -267,6 +268,7 @@ free_req:
* @wcnss: wcnss handle, retrieved from drvdata
* @name: SMD channel name
* @cb: callback to handle incoming data on the channel
+ * @priv: private data for use in the call-back
*/
struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name, rpmsg_rx_cb_t cb, void *priv)
{
diff --git a/drivers/soc/renesas/rmobile-sysc.c b/drivers/soc/renesas/rmobile-sysc.c
index 54b616ad4a62..9046b8c933cb 100644
--- a/drivers/soc/renesas/rmobile-sysc.c
+++ b/drivers/soc/renesas/rmobile-sysc.c
@@ -57,19 +57,19 @@ static int rmobile_pd_power_down(struct generic_pm_domain *genpd)
return ret;
}
- if (__raw_readl(rmobile_pd->base + PSTR) & mask) {
+ if (readl(rmobile_pd->base + PSTR) & mask) {
unsigned int retry_count;
- __raw_writel(mask, rmobile_pd->base + SPDCR);
+ writel(mask, rmobile_pd->base + SPDCR);
for (retry_count = PSTR_RETRIES; retry_count; retry_count--) {
- if (!(__raw_readl(rmobile_pd->base + SPDCR) & mask))
+ if (!(readl(rmobile_pd->base + SPDCR) & mask))
break;
cpu_relax();
}
}
pr_debug("%s: Power off, 0x%08x -> PSTR = 0x%08x\n", genpd->name, mask,
- __raw_readl(rmobile_pd->base + PSTR));
+ readl(rmobile_pd->base + PSTR));
return 0;
}
@@ -80,13 +80,13 @@ static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd)
unsigned int retry_count;
int ret = 0;
- if (__raw_readl(rmobile_pd->base + PSTR) & mask)
+ if (readl(rmobile_pd->base + PSTR) & mask)
return ret;
- __raw_writel(mask, rmobile_pd->base + SWUCR);
+ writel(mask, rmobile_pd->base + SWUCR);
for (retry_count = 2 * PSTR_RETRIES; retry_count; retry_count--) {
- if (!(__raw_readl(rmobile_pd->base + SWUCR) & mask))
+ if (!(readl(rmobile_pd->base + SWUCR) & mask))
break;
if (retry_count > PSTR_RETRIES)
udelay(PSTR_DELAY_US);
@@ -98,7 +98,7 @@ static int __rmobile_pd_power_up(struct rmobile_pm_domain *rmobile_pd)
pr_debug("%s: Power on, 0x%08x -> PSTR = 0x%08x\n",
rmobile_pd->genpd.name, mask,
- __raw_readl(rmobile_pd->base + PSTR));
+ readl(rmobile_pd->base + PSTR));
return ret;
}
@@ -327,6 +327,7 @@ static int __init rmobile_init_pm_domains(void)
pmd = of_get_child_by_name(np, "pm-domains");
if (!pmd) {
+ iounmap(base);
pr_warn("%pOF lacks pm-domains node\n", np);
continue;
}
diff --git a/drivers/soc/rockchip/io-domain.c b/drivers/soc/rockchip/io-domain.c
index eece97f97ef8..cf8182fc3642 100644
--- a/drivers/soc/rockchip/io-domain.c
+++ b/drivers/soc/rockchip/io-domain.c
@@ -53,9 +53,6 @@
struct rockchip_iodomain;
-/**
- * @supplies: voltage settings matching the register bits.
- */
struct rockchip_iodomain_soc_data {
int grf_offset;
const char *supply_names[MAX_SUPPLIES];
@@ -547,6 +544,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev)
if (uV < 0) {
dev_err(iod->dev, "Can't determine voltage: %s\n",
supply_name);
+ ret = uV;
goto unreg_notify;
}
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index 8d4d05086906..1a76eade2ed6 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -20,6 +20,7 @@ static const struct exynos_soc_id {
const char *name;
unsigned int id;
} soc_ids[] = {
+ /* List ordered by SoC name */
{ "EXYNOS3250", 0xE3472000 },
{ "EXYNOS4210", 0x43200000 }, /* EVT0 revision */
{ "EXYNOS4210", 0x43210000 },
@@ -29,10 +30,10 @@ static const struct exynos_soc_id {
{ "EXYNOS5260", 0xE5260000 },
{ "EXYNOS5410", 0xE5410000 },
{ "EXYNOS5420", 0xE5420000 },
+ { "EXYNOS5433", 0xE5433000 },
{ "EXYNOS5440", 0xE5440000 },
{ "EXYNOS5800", 0xE5422000 },
{ "EXYNOS7420", 0xE7420000 },
- { "EXYNOS5433", 0xE5433000 },
};
static const char * __init product_id_to_soc_id(unsigned int product_id)
@@ -98,9 +99,9 @@ static int __init exynos_chipid_early_init(void)
goto err;
}
- /* it is too early to use dev_info() here (soc_dev is NULL) */
- pr_info("soc soc0: Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
- soc_dev_attr->soc_id, product_id, revision);
+ dev_info(soc_device_to_device(soc_dev),
+ "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
+ soc_dev_attr->soc_id, product_id, revision);
return 0;
@@ -110,4 +111,4 @@ err:
return ret;
}
-early_initcall(exynos_chipid_early_init);
+arch_initcall(exynos_chipid_early_init);
diff --git a/drivers/soc/samsung/exynos-pmu.c b/drivers/soc/samsung/exynos-pmu.c
index 17304fa18429..a18c93a4646c 100644
--- a/drivers/soc/samsung/exynos-pmu.c
+++ b/drivers/soc/samsung/exynos-pmu.c
@@ -8,6 +8,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/mfd/core.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
@@ -97,6 +98,10 @@ static const struct of_device_id exynos_pmu_of_device_ids[] = {
{ /*sentinel*/ },
};
+static const struct mfd_cell exynos_pmu_devs[] = {
+ { .name = "exynos-clkout", },
+};
+
struct regmap *exynos_get_pmu_regmap(void)
{
struct device_node *np = of_find_matching_node(NULL,
@@ -110,6 +115,7 @@ EXPORT_SYMBOL_GPL(exynos_get_pmu_regmap);
static int exynos_pmu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ int ret;
pmu_base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmu_base_addr))
@@ -128,6 +134,11 @@ static int exynos_pmu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pmu_context);
+ ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, exynos_pmu_devs,
+ ARRAY_SIZE(exynos_pmu_devs), NULL, 0, NULL);
+ if (ret)
+ return ret;
+
if (devm_of_platform_populate(dev))
dev_err(dev, "Error populating children, reboot and poweroff might not work properly\n");
diff --git a/drivers/soc/samsung/exynos5422-asv.c b/drivers/soc/samsung/exynos5422-asv.c
index 01bb3050d678..ca409a976e34 100644
--- a/drivers/soc/samsung/exynos5422-asv.c
+++ b/drivers/soc/samsung/exynos5422-asv.c
@@ -383,7 +383,7 @@ static int __asv_offset_voltage(unsigned int index)
return 25000;
default:
return 0;
- };
+ }
}
static void exynos5422_asv_offset_voltage_setup(struct exynos_asv *asv)
diff --git a/drivers/soc/samsung/s3c-pm-check.c b/drivers/soc/samsung/s3c-pm-check.c
index ff3e099fc208..439d5c372512 100644
--- a/drivers/soc/samsung/s3c-pm-check.c
+++ b/drivers/soc/samsung/s3c-pm-check.c
@@ -151,7 +151,7 @@ static inline int in_region(void *ptr, int size, void *what, size_t whatsz)
/**
* s3c_pm_runcheck() - helper to check a resource on restore.
* @res: The resource to check
- * @vak: Pointer to list of CRC32 values to check.
+ * @val: Pointer to list of CRC32 values to check.
*
* Called from the s3c_pm_check_restore() via s3c_pm_run_sysram(), this
* function runs the given memory resource checking it against the stored
diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig
index f10fd6cae13e..1fef0e711056 100644
--- a/drivers/soc/sunxi/Kconfig
+++ b/drivers/soc/sunxi/Kconfig
@@ -2,6 +2,14 @@
#
# Allwinner sunXi SoC drivers
#
+
+config SUNXI_MBUS
+ bool
+ default ARCH_SUNXI
+ help
+ Say y to enable the fixups needed to support the Allwinner
+ MBUS DMA quirks.
+
config SUNXI_SRAM
bool
default ARCH_SUNXI
diff --git a/drivers/soc/sunxi/Makefile b/drivers/soc/sunxi/Makefile
index 7816fbbec387..549159571d4f 100644
--- a/drivers/soc/sunxi/Makefile
+++ b/drivers/soc/sunxi/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SUNXI_MBUS) += sunxi_mbus.o
obj-$(CONFIG_SUNXI_SRAM) += sunxi_sram.o
diff --git a/drivers/soc/sunxi/sunxi_mbus.c b/drivers/soc/sunxi/sunxi_mbus.c
new file mode 100644
index 000000000000..e9925c8487d7
--- /dev/null
+++ b/drivers/soc/sunxi/sunxi_mbus.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2020 Maxime Ripard <maxime@cerno.tech> */
+
+#include <linux/device.h>
+#include <linux/dma-map-ops.h>
+#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static const char * const sunxi_mbus_devices[] = {
+ /*
+ * The display engine virtual devices are not strictly speaking
+ * connected to the MBUS, but since DRM will perform all the
+ * memory allocations and DMA operations through that device, we
+ * need to have the quirk on those devices too.
+ */
+ "allwinner,sun4i-a10-display-engine",
+ "allwinner,sun5i-a10s-display-engine",
+ "allwinner,sun5i-a13-display-engine",
+ "allwinner,sun6i-a31-display-engine",
+ "allwinner,sun6i-a31s-display-engine",
+ "allwinner,sun7i-a20-display-engine",
+ "allwinner,sun8i-a23-display-engine",
+ "allwinner,sun8i-a33-display-engine",
+ "allwinner,sun8i-a83t-display-engine",
+ "allwinner,sun8i-h3-display-engine",
+ "allwinner,sun8i-r40-display-engine",
+ "allwinner,sun8i-v3s-display-engine",
+ "allwinner,sun9i-a80-display-engine",
+ "allwinner,sun50i-a64-display-engine",
+
+ /*
+ * And now we have the regular devices connected to the MBUS
+ * (that we know of).
+ */
+ "allwinner,sun4i-a10-csi1",
+ "allwinner,sun4i-a10-display-backend",
+ "allwinner,sun4i-a10-display-frontend",
+ "allwinner,sun4i-a10-video-engine",
+ "allwinner,sun5i-a13-display-backend",
+ "allwinner,sun5i-a13-video-engine",
+ "allwinner,sun6i-a31-csi",
+ "allwinner,sun6i-a31-display-backend",
+ "allwinner,sun7i-a20-csi0",
+ "allwinner,sun7i-a20-display-backend",
+ "allwinner,sun7i-a20-display-frontend",
+ "allwinner,sun7i-a20-video-engine",
+ "allwinner,sun8i-a23-display-backend",
+ "allwinner,sun8i-a23-display-frontend",
+ "allwinner,sun8i-a33-display-backend",
+ "allwinner,sun8i-a33-display-frontend",
+ "allwinner,sun8i-a33-video-engine",
+ "allwinner,sun8i-a83t-csi",
+ "allwinner,sun8i-h3-csi",
+ "allwinner,sun8i-h3-video-engine",
+ "allwinner,sun8i-v3s-csi",
+ "allwinner,sun9i-a80-display-backend",
+ "allwinner,sun50i-a64-csi",
+ "allwinner,sun50i-a64-video-engine",
+ "allwinner,sun50i-h5-video-engine",
+ NULL,
+};
+
+static int sunxi_mbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *__dev)
+{
+ struct device *dev = __dev;
+ int ret;
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+
+ /*
+ * Only the devices that need a large memory bandwidth do DMA
+ * directly over the memory bus (called MBUS), instead of going
+ * through the regular system bus.
+ */
+ if (!of_device_compatible_match(dev->of_node, sunxi_mbus_devices))
+ return NOTIFY_DONE;
+
+ /*
+ * Devices with an interconnects property have the MBUS
+ * relationship described in their DT and dealt with by
+ * of_dma_configure, so we can just skip them.
+ *
+ * Older DTs or SoCs who are not clearly understood need to set
+ * that DMA offset though.
+ */
+ if (of_find_property(dev->of_node, "interconnects", NULL))
+ return NOTIFY_DONE;
+
+ ret = dma_direct_set_offset(dev, PHYS_OFFSET, 0, SZ_4G);
+ if (ret)
+ dev_err(dev, "Couldn't setup our DMA offset: %d\n", ret);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block sunxi_mbus_nb = {
+ .notifier_call = sunxi_mbus_notifier,
+};
+
+static const char * const sunxi_mbus_platforms[] __initconst = {
+ "allwinner,sun4i-a10",
+ "allwinner,sun5i-a10s",
+ "allwinner,sun5i-a13",
+ "allwinner,sun6i-a31",
+ "allwinner,sun7i-a20",
+ "allwinner,sun8i-a23",
+ "allwinner,sun8i-a33",
+ "allwinner,sun8i-a83t",
+ "allwinner,sun8i-h3",
+ "allwinner,sun8i-r40",
+ "allwinner,sun8i-v3",
+ "allwinner,sun8i-v3s",
+ "allwinner,sun9i-a80",
+ "allwinner,sun50i-a64",
+ "allwinner,sun50i-h5",
+ "nextthing,gr8",
+ NULL,
+};
+
+static int __init sunxi_mbus_init(void)
+{
+ if (!of_device_compatible_match(of_root, sunxi_mbus_platforms))
+ return 0;
+
+ bus_register_notifier(&platform_bus_type, &sunxi_mbus_nb);
+ return 0;
+}
+arch_initcall(sunxi_mbus_init);
diff --git a/drivers/soc/tegra/fuse/speedo-tegra124.c b/drivers/soc/tegra/fuse/speedo-tegra124.c
index bdbf76bb184f..5b1ee28e4272 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra124.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra124.c
@@ -101,8 +101,7 @@ static void __init rev_sku_to_speedo_ids(struct tegra_sku_info *sku_info,
void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
{
- int i, threshold, cpu_speedo_0_value, soc_speedo_0_value;
- int cpu_iddq_value, gpu_iddq_value, soc_iddq_value;
+ int i, threshold, soc_speedo_0_value;
BUILD_BUG_ON(ARRAY_SIZE(cpu_process_speedos) !=
THRESHOLD_INDEX_COUNT);
@@ -111,25 +110,17 @@ void __init tegra124_init_speedo_data(struct tegra_sku_info *sku_info)
BUILD_BUG_ON(ARRAY_SIZE(soc_process_speedos) !=
THRESHOLD_INDEX_COUNT);
- cpu_speedo_0_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
-
- /* GPU Speedo is stored in CPU_SPEEDO_2 */
- sku_info->gpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
-
- soc_speedo_0_value = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
-
- cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
- soc_iddq_value = tegra_fuse_read_early(FUSE_SOC_IDDQ);
- gpu_iddq_value = tegra_fuse_read_early(FUSE_GPU_IDDQ);
-
- sku_info->cpu_speedo_value = cpu_speedo_0_value;
-
+ sku_info->cpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_0);
if (sku_info->cpu_speedo_value == 0) {
pr_warn("Tegra Warning: Speedo value not fused.\n");
WARN_ON(1);
return;
}
+ /* GPU Speedo is stored in CPU_SPEEDO_2 */
+ sku_info->gpu_speedo_value = tegra_fuse_read_early(FUSE_CPU_SPEEDO_2);
+ soc_speedo_0_value = tegra_fuse_read_early(FUSE_SOC_SPEEDO_0);
+
rev_sku_to_speedo_ids(sku_info, &threshold);
sku_info->cpu_iddq_value = tegra_fuse_read_early(FUSE_CPU_IDDQ);
diff --git a/drivers/soc/tegra/fuse/speedo-tegra210.c b/drivers/soc/tegra/fuse/speedo-tegra210.c
index 70d3f6e1aa33..695d0b7f9a8a 100644
--- a/drivers/soc/tegra/fuse/speedo-tegra210.c
+++ b/drivers/soc/tegra/fuse/speedo-tegra210.c
@@ -94,7 +94,7 @@ static int get_process_id(int value, const u32 *speedos, unsigned int num)
unsigned int i;
for (i = 0; i < num; i++)
- if (value < speedos[num])
+ if (value < speedos[i])
return i;
return -EINVAL;
@@ -102,7 +102,7 @@ static int get_process_id(int value, const u32 *speedos, unsigned int num)
void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
{
- int cpu_speedo[3], soc_speedo[3], cpu_iddq, gpu_iddq, soc_iddq;
+ int cpu_speedo[3], soc_speedo[3];
unsigned int index;
u8 speedo_revision;
@@ -122,10 +122,6 @@ void __init tegra210_init_speedo_data(struct tegra_sku_info *sku_info)
soc_speedo[1] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_1);
soc_speedo[2] = tegra_fuse_read_early(FUSE_SOC_SPEEDO_2);
- cpu_iddq = tegra_fuse_read_early(FUSE_CPU_IDDQ) * 4;
- soc_iddq = tegra_fuse_read_early(FUSE_SOC_IDDQ) * 4;
- gpu_iddq = tegra_fuse_read_early(FUSE_GPU_IDDQ) * 5;
-
/*
* Determine CPU, GPU and SoC speedo values depending on speedo fusing
* revision. Note that GPU speedo value is fused in CPU_SPEEDO_2.
diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig
index f5b82ffa637b..7e2fb1c16af1 100644
--- a/drivers/soc/ti/Kconfig
+++ b/drivers/soc/ti/Kconfig
@@ -1,22 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
-# 64-bit ARM SoCs from TI
-if ARM64
-
-if ARCH_K3
-
-config ARCH_K3_AM6_SOC
- bool "K3 AM6 SoC"
- help
- Enable support for TI's AM6 SoC Family support
-
-config ARCH_K3_J721E_SOC
- bool "K3 J721E SoC"
- help
- Enable support for TI's J721E SoC Family support
-
-endif
-
-endif
#
# TI SOC drivers
diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c
index 1147dc4c1d59..b495b0d5d0fa 100644
--- a/drivers/soc/ti/k3-ringacc.c
+++ b/drivers/soc/ti/k3-ringacc.c
@@ -11,6 +11,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/sys_soc.h>
+#include <linux/dma/ti-cppi5.h>
#include <linux/soc/ti/k3-ringacc.h>
#include <linux/soc/ti/ti_sci_protocol.h>
#include <linux/soc/ti/ti_sci_inta_msi.h>
@@ -21,6 +22,7 @@ static LIST_HEAD(k3_ringacc_list);
static DEFINE_MUTEX(k3_ringacc_list_lock);
#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0)
+#define K3_DMARING_CFG_RING_SIZE_ELCNT_MASK GENMASK(15, 0)
/**
* struct k3_ring_rt_regs - The RA realtime Control/Status Registers region
@@ -43,7 +45,13 @@ struct k3_ring_rt_regs {
u32 hwindx;
};
-#define K3_RINGACC_RT_REGS_STEP 0x1000
+#define K3_RINGACC_RT_REGS_STEP 0x1000
+#define K3_DMARING_RT_REGS_STEP 0x2000
+#define K3_DMARING_RT_REGS_REVERSE_OFS 0x1000
+#define K3_RINGACC_RT_OCC_MASK GENMASK(20, 0)
+#define K3_DMARING_RT_OCC_TDOWN_COMPLETE BIT(31)
+#define K3_DMARING_RT_DB_ENTRY_MASK GENMASK(7, 0)
+#define K3_DMARING_RT_DB_TDOWN_ACK BIT(31)
/**
* struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region
@@ -122,6 +130,7 @@ struct k3_ring_state {
u32 occ;
u32 windex;
u32 rindex;
+ u32 tdown_complete:1;
};
/**
@@ -137,10 +146,13 @@ struct k3_ring_state {
* @elm_size: Size of the ring element
* @mode: Ring mode
* @flags: flags
+ * @state: Ring state
* @ring_id: Ring Id
* @parent: Pointer on struct @k3_ringacc
* @use_count: Use count for shared rings
* @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY)
+ * @dma_dev: device to be used for DMA API (allocation, mapping)
+ * @asel: Address Space Select value for physical addresses
*/
struct k3_ring {
struct k3_ring_rt_regs __iomem *rt;
@@ -155,11 +167,15 @@ struct k3_ring {
u32 flags;
#define K3_RING_FLAG_BUSY BIT(1)
#define K3_RING_FLAG_SHARED BIT(2)
+#define K3_RING_FLAG_REVERSE BIT(3)
struct k3_ring_state state;
u32 ring_id;
struct k3_ringacc *parent;
u32 use_count;
int proxy_id;
+ struct device *dma_dev;
+ u32 asel;
+#define K3_ADDRESS_ASEL_SHIFT 48
};
struct k3_ringacc_ops {
@@ -185,6 +201,7 @@ struct k3_ringacc_ops {
* @tisci_ring_ops: ti-sci rings ops
* @tisci_dev_id: ti-sci device id
* @ops: SoC specific ringacc operation
+ * @dma_rings: indicate DMA ring (dual ring within BCDMA/PKTDMA)
*/
struct k3_ringacc {
struct device *dev;
@@ -207,6 +224,7 @@ struct k3_ringacc {
u32 tisci_dev_id;
const struct k3_ringacc_ops *ops;
+ bool dma_rings;
};
/**
@@ -218,6 +236,21 @@ struct k3_ringacc_soc_data {
unsigned dma_ring_reset_quirk:1;
};
+static int k3_ringacc_ring_read_occ(struct k3_ring *ring)
+{
+ return readl(&ring->rt->occ) & K3_RINGACC_RT_OCC_MASK;
+}
+
+static void k3_ringacc_ring_update_occ(struct k3_ring *ring)
+{
+ u32 val;
+
+ val = readl(&ring->rt->occ);
+
+ ring->state.occ = val & K3_RINGACC_RT_OCC_MASK;
+ ring->state.tdown_complete = !!(val & K3_DMARING_RT_OCC_TDOWN_COMPLETE);
+}
+
static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring)
{
return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES -
@@ -231,12 +264,24 @@ static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx)
static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem);
+static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem);
+static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem);
static struct k3_ring_ops k3_ring_mode_ring_ops = {
.push_tail = k3_ringacc_ring_push_mem,
.pop_head = k3_ringacc_ring_pop_mem,
};
+static struct k3_ring_ops k3_dmaring_fwd_ops = {
+ .push_tail = k3_ringacc_ring_push_mem,
+ .pop_head = k3_dmaring_fwd_pop,
+};
+
+static struct k3_ring_ops k3_dmaring_reverse_ops = {
+ /* Reverse side of the DMA ring can only be popped by SW */
+ .pop_head = k3_dmaring_reverse_pop,
+};
+
static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem);
static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem);
@@ -339,6 +384,40 @@ error:
}
EXPORT_SYMBOL_GPL(k3_ringacc_request_ring);
+static int k3_dmaring_request_dual_ring(struct k3_ringacc *ringacc, int fwd_id,
+ struct k3_ring **fwd_ring,
+ struct k3_ring **compl_ring)
+{
+ int ret = 0;
+
+ /*
+ * DMA rings must be requested by ID, completion ring is the reverse
+ * side of the forward ring
+ */
+ if (fwd_id < 0)
+ return -EINVAL;
+
+ mutex_lock(&ringacc->req_lock);
+
+ if (test_bit(fwd_id, ringacc->rings_inuse)) {
+ ret = -EBUSY;
+ goto error;
+ }
+
+ *fwd_ring = &ringacc->rings[fwd_id];
+ *compl_ring = &ringacc->rings[fwd_id + ringacc->num_rings];
+ set_bit(fwd_id, ringacc->rings_inuse);
+ ringacc->rings[fwd_id].use_count++;
+ dev_dbg(ringacc->dev, "Giving ring#%d\n", fwd_id);
+
+ mutex_unlock(&ringacc->req_lock);
+ return 0;
+
+error:
+ mutex_unlock(&ringacc->req_lock);
+ return ret;
+}
+
int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
int fwd_id, int compl_id,
struct k3_ring **fwd_ring,
@@ -349,6 +428,10 @@ int k3_ringacc_request_rings_pair(struct k3_ringacc *ringacc,
if (!fwd_ring || !compl_ring)
return -EINVAL;
+ if (ringacc->dma_rings)
+ return k3_dmaring_request_dual_ring(ringacc, fwd_id,
+ fwd_ring, compl_ring);
+
*fwd_ring = k3_ringacc_request_ring(ringacc, fwd_id, 0);
if (!(*fwd_ring))
return -ENODEV;
@@ -365,20 +448,16 @@ EXPORT_SYMBOL_GPL(k3_ringacc_request_rings_pair);
static void k3_ringacc_ring_reset_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- ring->size,
- 0,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID;
+ ring_cfg.count = ring->size;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -398,20 +477,16 @@ EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset);
static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring,
enum k3_ring_mode mode)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_RING_MODE_VALID,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- 0,
- mode,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_RING_MODE_VALID;
+ ring_cfg.mode = mode;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -426,7 +501,7 @@ void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ)
goto reset;
if (!occ)
- occ = readl(&ring->rt->occ);
+ occ = k3_ringacc_ring_read_occ(ring);
if (occ) {
u32 db_ring_cnt, db_ring_cnt_cur;
@@ -478,20 +553,15 @@ EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma);
static void k3_ringacc_ring_free_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
int ret;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
- ringacc->tisci_dev_id,
- ring->ring_id,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n",
ret, ring->ring_id);
@@ -506,6 +576,13 @@ int k3_ringacc_ring_free(struct k3_ring *ring)
ringacc = ring->parent;
+ /*
+ * DMA rings: rings shared memory and configuration, only forward ring
+ * is configured and reverse ring considered as slave.
+ */
+ if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
+ return 0;
+
dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags);
if (!test_bit(ring->ring_id, ringacc->rings_inuse))
@@ -521,11 +598,14 @@ int k3_ringacc_ring_free(struct k3_ring *ring)
k3_ringacc_ring_free_sci(ring);
- dma_free_coherent(ringacc->dev,
+ dma_free_coherent(ring->dma_dev,
ring->size * (4 << ring->elm_size),
ring->ring_mem_virt, ring->ring_mem_dma);
ring->flags = 0;
ring->ops = NULL;
+ ring->dma_dev = NULL;
+ ring->asel = 0;
+
if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) {
clear_bit(ring->proxy_id, ringacc->proxy_inuse);
ring->proxy = NULL;
@@ -575,32 +655,115 @@ EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num);
static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring)
{
+ struct ti_sci_msg_rm_ring_cfg ring_cfg = { 0 };
struct k3_ringacc *ringacc = ring->parent;
- u32 ring_idx;
int ret;
if (!ringacc->tisci)
return -EINVAL;
- ring_idx = ring->ring_id;
- ret = ringacc->tisci_ring_ops->config(
- ringacc->tisci,
- TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER,
- ringacc->tisci_dev_id,
- ring_idx,
- lower_32_bits(ring->ring_mem_dma),
- upper_32_bits(ring->ring_mem_dma),
- ring->size,
- ring->mode,
- ring->elm_size,
- 0);
+ ring_cfg.nav_id = ringacc->tisci_dev_id;
+ ring_cfg.index = ring->ring_id;
+ ring_cfg.valid_params = TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER;
+ ring_cfg.addr_lo = lower_32_bits(ring->ring_mem_dma);
+ ring_cfg.addr_hi = upper_32_bits(ring->ring_mem_dma);
+ ring_cfg.count = ring->size;
+ ring_cfg.mode = ring->mode;
+ ring_cfg.size = ring->elm_size;
+ ring_cfg.asel = ring->asel;
+
+ ret = ringacc->tisci_ring_ops->set_cfg(ringacc->tisci, &ring_cfg);
if (ret)
dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n",
- ret, ring_idx);
+ ret, ring->ring_id);
return ret;
}
+static int k3_dmaring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
+{
+ struct k3_ringacc *ringacc;
+ struct k3_ring *reverse_ring;
+ int ret = 0;
+
+ if (cfg->elm_size != K3_RINGACC_RING_ELSIZE_8 ||
+ cfg->mode != K3_RINGACC_RING_MODE_RING ||
+ cfg->size & ~K3_DMARING_CFG_RING_SIZE_ELCNT_MASK)
+ return -EINVAL;
+
+ ringacc = ring->parent;
+
+ /*
+ * DMA rings: rings shared memory and configuration, only forward ring
+ * is configured and reverse ring considered as slave.
+ */
+ if (ringacc->dma_rings && (ring->flags & K3_RING_FLAG_REVERSE))
+ return 0;
+
+ if (!test_bit(ring->ring_id, ringacc->rings_inuse))
+ return -EINVAL;
+
+ ring->size = cfg->size;
+ ring->elm_size = cfg->elm_size;
+ ring->mode = cfg->mode;
+ ring->asel = cfg->asel;
+ ring->dma_dev = cfg->dma_dev;
+ if (!ring->dma_dev) {
+ dev_warn(ringacc->dev, "dma_dev is not provided for ring%d\n",
+ ring->ring_id);
+ ring->dma_dev = ringacc->dev;
+ }
+
+ memset(&ring->state, 0, sizeof(ring->state));
+
+ ring->ops = &k3_dmaring_fwd_ops;
+
+ ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
+ if (!ring->ring_mem_virt) {
+ dev_err(ringacc->dev, "Failed to alloc ring mem\n");
+ ret = -ENOMEM;
+ goto err_free_ops;
+ }
+
+ ret = k3_ringacc_ring_cfg_sci(ring);
+ if (ret)
+ goto err_free_mem;
+
+ ring->flags |= K3_RING_FLAG_BUSY;
+
+ k3_ringacc_ring_dump(ring);
+
+ /* DMA rings: configure reverse ring */
+ reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings];
+ reverse_ring->size = cfg->size;
+ reverse_ring->elm_size = cfg->elm_size;
+ reverse_ring->mode = cfg->mode;
+ reverse_ring->asel = cfg->asel;
+ memset(&reverse_ring->state, 0, sizeof(reverse_ring->state));
+ reverse_ring->ops = &k3_dmaring_reverse_ops;
+
+ reverse_ring->ring_mem_virt = ring->ring_mem_virt;
+ reverse_ring->ring_mem_dma = ring->ring_mem_dma;
+ reverse_ring->flags |= K3_RING_FLAG_BUSY;
+ k3_ringacc_ring_dump(reverse_ring);
+
+ return 0;
+
+err_free_mem:
+ dma_free_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ ring->ring_mem_virt,
+ ring->ring_mem_dma);
+err_free_ops:
+ ring->ops = NULL;
+ ring->proxy = NULL;
+ ring->dma_dev = NULL;
+ ring->asel = 0;
+ return ret;
+}
+
int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
{
struct k3_ringacc *ringacc;
@@ -608,8 +771,12 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
if (!ring || !cfg)
return -EINVAL;
+
ringacc = ring->parent;
+ if (ringacc->dma_rings)
+ return k3_dmaring_cfg(ring, cfg);
+
if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 ||
cfg->mode >= K3_RINGACC_RING_MODE_INVALID ||
cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK ||
@@ -648,8 +815,12 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
switch (ring->mode) {
case K3_RINGACC_RING_MODE_RING:
ring->ops = &k3_ring_mode_ring_ops;
+ ring->dma_dev = cfg->dma_dev;
+ if (!ring->dma_dev)
+ ring->dma_dev = ringacc->dev;
break;
case K3_RINGACC_RING_MODE_MESSAGE:
+ ring->dma_dev = ringacc->dev;
if (ring->proxy)
ring->ops = &k3_ring_mode_proxy_ops;
else
@@ -661,9 +832,9 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
goto err_free_proxy;
}
- ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev,
- ring->size * (4 << ring->elm_size),
- &ring->ring_mem_dma, GFP_KERNEL);
+ ring->ring_mem_virt = dma_alloc_coherent(ring->dma_dev,
+ ring->size * (4 << ring->elm_size),
+ &ring->ring_mem_dma, GFP_KERNEL);
if (!ring->ring_mem_virt) {
dev_err(ringacc->dev, "Failed to alloc ring mem\n");
ret = -ENOMEM;
@@ -684,12 +855,13 @@ int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg)
return 0;
err_free_mem:
- dma_free_coherent(ringacc->dev,
+ dma_free_coherent(ring->dma_dev,
ring->size * (4 << ring->elm_size),
ring->ring_mem_virt,
ring->ring_mem_dma);
err_free_ops:
ring->ops = NULL;
+ ring->dma_dev = NULL;
err_free_proxy:
ring->proxy = NULL;
return ret;
@@ -711,7 +883,7 @@ u32 k3_ringacc_ring_get_free(struct k3_ring *ring)
return -EINVAL;
if (!ring->state.free)
- ring->state.free = ring->size - readl(&ring->rt->occ);
+ ring->state.free = ring->size - k3_ringacc_ring_read_occ(ring);
return ring->state.free;
}
@@ -722,7 +894,7 @@ u32 k3_ringacc_ring_get_occ(struct k3_ring *ring)
if (!ring || !(ring->flags & K3_RING_FLAG_BUSY))
return -EINVAL;
- return readl(&ring->rt->occ);
+ return k3_ringacc_ring_read_occ(ring);
}
EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ);
@@ -898,6 +1070,72 @@ static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem)
K3_RINGACC_ACCESS_MODE_POP_HEAD);
}
+/*
+ * The element is 48 bits of address + ASEL bits in the ring.
+ * ASEL is used by the DMAs and should be removed for the kernel as it is not
+ * part of the physical memory address.
+ */
+static void k3_dmaring_remove_asel_from_elem(u64 *elem)
+{
+ *elem &= GENMASK_ULL(K3_ADDRESS_ASEL_SHIFT - 1, 0);
+}
+
+static int k3_dmaring_fwd_pop(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+ u32 elem_idx;
+
+ /*
+ * DMA rings: forward ring is always tied DMA channel and HW does not
+ * maintain any state data required for POP operation and its unknown
+ * how much elements were consumed by HW. So, to actually
+ * do POP, the read pointer has to be recalculated every time.
+ */
+ ring->state.occ = k3_ringacc_ring_read_occ(ring);
+ if (ring->state.windex >= ring->state.occ)
+ elem_idx = ring->state.windex - ring->state.occ;
+ else
+ elem_idx = ring->size - (ring->state.occ - ring->state.windex);
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, elem_idx);
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+ k3_dmaring_remove_asel_from_elem(elem);
+
+ ring->state.occ--;
+ writel(-1, &ring->rt->db);
+
+ dev_dbg(ring->parent->dev, "%s: occ%d Windex%d Rindex%d pos_ptr%px\n",
+ __func__, ring->state.occ, ring->state.windex, elem_idx,
+ elem_ptr);
+ return 0;
+}
+
+static int k3_dmaring_reverse_pop(struct k3_ring *ring, void *elem)
+{
+ void *elem_ptr;
+
+ elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.rindex);
+
+ if (ring->state.occ) {
+ memcpy(elem, elem_ptr, (4 << ring->elm_size));
+ k3_dmaring_remove_asel_from_elem(elem);
+
+ ring->state.rindex = (ring->state.rindex + 1) % ring->size;
+ ring->state.occ--;
+ writel(-1 & K3_DMARING_RT_DB_ENTRY_MASK, &ring->rt->db);
+ } else if (ring->state.tdown_complete) {
+ dma_addr_t *value = elem;
+
+ *value = CPPI5_TDCM_MARKER;
+ writel(K3_DMARING_RT_DB_TDOWN_ACK, &ring->rt->db);
+ ring->state.tdown_complete = false;
+ }
+
+ dev_dbg(ring->parent->dev, "%s: occ%d index%d pos_ptr%px\n",
+ __func__, ring->state.occ, ring->state.rindex, elem_ptr);
+ return 0;
+}
+
static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
{
void *elem_ptr;
@@ -905,6 +1143,11 @@ static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem)
elem_ptr = k3_ringacc_get_elm_addr(ring, ring->state.windex);
memcpy(elem_ptr, elem, (4 << ring->elm_size));
+ if (ring->parent->dma_rings) {
+ u64 *addr = elem_ptr;
+
+ *addr |= ((u64)ring->asel << K3_ADDRESS_ASEL_SHIFT);
+ }
ring->state.windex = (ring->state.windex + 1) % ring->size;
ring->state.free--;
@@ -981,12 +1224,12 @@ int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem)
return -EINVAL;
if (!ring->state.occ)
- ring->state.occ = k3_ringacc_ring_get_occ(ring);
+ k3_ringacc_ring_update_occ(ring);
dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->state.occ,
ring->state.rindex);
- if (!ring->state.occ)
+ if (!ring->state.occ && !ring->state.tdown_complete)
return -ENODATA;
if (ring->ops && ring->ops->pop_head)
@@ -1004,7 +1247,7 @@ int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem)
return -EINVAL;
if (!ring->state.occ)
- ring->state.occ = k3_ringacc_ring_get_occ(ring);
+ k3_ringacc_ring_update_occ(ring);
dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n",
ring->state.occ, ring->state.rindex);
@@ -1209,6 +1452,68 @@ static const struct of_device_id k3_ringacc_of_match[] = {
{},
};
+struct k3_ringacc *k3_ringacc_dmarings_init(struct platform_device *pdev,
+ struct k3_ringacc_init_data *data)
+{
+ struct device *dev = &pdev->dev;
+ struct k3_ringacc *ringacc;
+ void __iomem *base_rt;
+ struct resource *res;
+ int i;
+
+ ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL);
+ if (!ringacc)
+ return ERR_PTR(-ENOMEM);
+
+ ringacc->dev = dev;
+ ringacc->dma_rings = true;
+ ringacc->num_rings = data->num_rings;
+ ringacc->tisci = data->tisci;
+ ringacc->tisci_dev_id = data->tisci_dev_id;
+
+ mutex_init(&ringacc->req_lock);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ringrt");
+ base_rt = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base_rt))
+ return ERR_CAST(base_rt);
+
+ ringacc->rings = devm_kzalloc(dev,
+ sizeof(*ringacc->rings) *
+ ringacc->num_rings * 2,
+ GFP_KERNEL);
+ ringacc->rings_inuse = devm_kcalloc(dev,
+ BITS_TO_LONGS(ringacc->num_rings),
+ sizeof(unsigned long), GFP_KERNEL);
+
+ if (!ringacc->rings || !ringacc->rings_inuse)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ringacc->num_rings; i++) {
+ struct k3_ring *ring = &ringacc->rings[i];
+
+ ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i;
+ ring->parent = ringacc;
+ ring->ring_id = i;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+
+ ring = &ringacc->rings[ringacc->num_rings + i];
+ ring->rt = base_rt + K3_DMARING_RT_REGS_STEP * i +
+ K3_DMARING_RT_REGS_REVERSE_OFS;
+ ring->parent = ringacc;
+ ring->ring_id = i;
+ ring->proxy_id = K3_RINGACC_PROXY_NOT_USED;
+ ring->flags = K3_RING_FLAG_REVERSE;
+ }
+
+ ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops;
+
+ dev_info(dev, "Number of rings: %u\n", ringacc->num_rings);
+
+ return ringacc;
+}
+EXPORT_SYMBOL_GPL(k3_ringacc_dmarings_init);
+
static int k3_ringacc_probe(struct platform_device *pdev)
{
const struct ringacc_match_data *match_data;
diff --git a/drivers/soc/ti/k3-socinfo.c b/drivers/soc/ti/k3-socinfo.c
index bbbc2d2b7091..fd91129de6e5 100644
--- a/drivers/soc/ti/k3-socinfo.c
+++ b/drivers/soc/ti/k3-socinfo.c
@@ -40,6 +40,7 @@ static const struct k3_soc_id {
{ 0xBB5A, "AM65X" },
{ 0xBB64, "J721E" },
{ 0xBB6D, "J7200" },
+ { 0xBB38, "AM64X" }
};
static int
diff --git a/drivers/soc/ti/knav_dma.c b/drivers/soc/ti/knav_dma.c
index 8c863ecb1c60..7b5cb5d48f7d 100644
--- a/drivers/soc/ti/knav_dma.c
+++ b/drivers/soc/ti/knav_dma.c
@@ -500,7 +500,7 @@ EXPORT_SYMBOL_GPL(knav_dma_open_channel);
/**
* knav_dma_close_channel() - Destroy a dma channel
*
- * channel: dma channel handle
+ * @channel: dma channel handle
*
*/
void knav_dma_close_channel(void *channel)
@@ -749,8 +749,9 @@ static int knav_dma_probe(struct platform_device *pdev)
pm_runtime_enable(kdev->dev);
ret = pm_runtime_get_sync(kdev->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(kdev->dev);
dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
- return ret;
+ goto err_pm_disable;
}
/* Initialise all packet dmas */
@@ -764,7 +765,8 @@ static int knav_dma_probe(struct platform_device *pdev)
if (list_empty(&kdev->list)) {
dev_err(dev, "no valid dma instance\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put_sync;
}
debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
@@ -772,6 +774,13 @@ static int knav_dma_probe(struct platform_device *pdev)
device_ready = true;
return ret;
+
+err_put_sync:
+ pm_runtime_put_sync(kdev->dev);
+err_pm_disable:
+ pm_runtime_disable(kdev->dev);
+
+ return ret;
}
static int knav_dma_remove(struct platform_device *pdev)
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index a460f201bf8e..2e521f1eda96 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(knav_qmss_device_ready);
/**
* knav_queue_notify: qmss queue notfier call
*
- * @inst: qmss queue instance like accumulator
+ * @inst: - qmss queue instance like accumulator
*/
void knav_queue_notify(struct knav_queue_inst *inst)
{
@@ -511,10 +511,10 @@ static int knav_queue_flush(struct knav_queue *qh)
/**
* knav_queue_open() - open a hardware queue
- * @name - name to give the queue handle
- * @id - desired queue number if any or specifes the type
+ * @name: - name to give the queue handle
+ * @id: - desired queue number if any or specifes the type
* of queue
- * @flags - the following flags are applicable to queues:
+ * @flags: - the following flags are applicable to queues:
* KNAV_QUEUE_SHARED - allow the queue to be shared. Queues are
* exclusive by default.
* Subsequent attempts to open a shared queue should
@@ -545,7 +545,7 @@ EXPORT_SYMBOL_GPL(knav_queue_open);
/**
* knav_queue_close() - close a hardware queue handle
- * @qh - handle to close
+ * @qhandle: - handle to close
*/
void knav_queue_close(void *qhandle)
{
@@ -572,9 +572,9 @@ EXPORT_SYMBOL_GPL(knav_queue_close);
/**
* knav_queue_device_control() - Perform control operations on a queue
- * @qh - queue handle
- * @cmd - control commands
- * @arg - command argument
+ * @qhandle: - queue handle
+ * @cmd: - control commands
+ * @arg: - command argument
*
* Returns 0 on success, errno otherwise.
*/
@@ -623,10 +623,10 @@ EXPORT_SYMBOL_GPL(knav_queue_device_control);
/**
* knav_queue_push() - push data (or descriptor) to the tail of a queue
- * @qh - hardware queue handle
- * @data - data to push
- * @size - size of data to push
- * @flags - can be used to pass additional information
+ * @qhandle: - hardware queue handle
+ * @dma: - DMA data to push
+ * @size: - size of data to push
+ * @flags: - can be used to pass additional information
*
* Returns 0 on success, errno otherwise.
*/
@@ -646,8 +646,8 @@ EXPORT_SYMBOL_GPL(knav_queue_push);
/**
* knav_queue_pop() - pop data (or descriptor) from the head of a queue
- * @qh - hardware queue handle
- * @size - (optional) size of the data pop'ed.
+ * @qhandle: - hardware queue handle
+ * @size: - (optional) size of the data pop'ed.
*
* Returns a DMA address on success, 0 on failure.
*/
@@ -746,9 +746,9 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_dma_to_virt);
/**
* knav_pool_create() - Create a pool of descriptors
- * @name - name to give the pool handle
- * @num_desc - numbers of descriptors in the pool
- * @region_id - QMSS region id from which the descriptors are to be
+ * @name: - name to give the pool handle
+ * @num_desc: - numbers of descriptors in the pool
+ * @region_id: - QMSS region id from which the descriptors are to be
* allocated.
*
* Returns a pool handle on success.
@@ -856,7 +856,7 @@ EXPORT_SYMBOL_GPL(knav_pool_create);
/**
* knav_pool_destroy() - Free a pool of descriptors
- * @pool - pool handle
+ * @ph: - pool handle
*/
void knav_pool_destroy(void *ph)
{
@@ -884,7 +884,7 @@ EXPORT_SYMBOL_GPL(knav_pool_destroy);
/**
* knav_pool_desc_get() - Get a descriptor from the pool
- * @pool - pool handle
+ * @ph: - pool handle
*
* Returns descriptor from the pool.
*/
@@ -905,7 +905,8 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_get);
/**
* knav_pool_desc_put() - return a descriptor to the pool
- * @pool - pool handle
+ * @ph: - pool handle
+ * @desc: - virtual address
*/
void knav_pool_desc_put(void *ph, void *desc)
{
@@ -918,11 +919,11 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_put);
/**
* knav_pool_desc_map() - Map descriptor for DMA transfer
- * @pool - pool handle
- * @desc - address of descriptor to map
- * @size - size of descriptor to map
- * @dma - DMA address return pointer
- * @dma_sz - adjusted return pointer
+ * @ph: - pool handle
+ * @desc: - address of descriptor to map
+ * @size: - size of descriptor to map
+ * @dma: - DMA address return pointer
+ * @dma_sz: - adjusted return pointer
*
* Returns 0 on success, errno otherwise.
*/
@@ -945,9 +946,9 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_map);
/**
* knav_pool_desc_unmap() - Unmap descriptor after DMA transfer
- * @pool - pool handle
- * @dma - DMA address of descriptor to unmap
- * @dma_sz - size of descriptor to unmap
+ * @ph: - pool handle
+ * @dma: - DMA address of descriptor to unmap
+ * @dma_sz: - size of descriptor to unmap
*
* Returns descriptor address on success, Use IS_ERR_OR_NULL() to identify
* error values on return.
@@ -968,7 +969,7 @@ EXPORT_SYMBOL_GPL(knav_pool_desc_unmap);
/**
* knav_pool_count() - Get the number of descriptors in pool.
- * @pool - pool handle
+ * @ph: - pool handle
* Returns number of elements in the pool.
*/
int knav_pool_count(void *ph)
@@ -1307,12 +1308,11 @@ static int knav_setup_queue_pools(struct knav_device *kdev,
struct device_node *queue_pools)
{
struct device_node *type, *range;
- int ret;
for_each_child_of_node(queue_pools, type) {
for_each_child_of_node(type, range) {
- ret = knav_setup_queue_range(kdev, range);
/* return value ignored, we init the rest... */
+ knav_setup_queue_range(kdev, range);
}
}
@@ -1784,6 +1784,7 @@ static int knav_queue_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
dev_err(dev, "Failed to enable QMSS\n");
return ret;
}
@@ -1851,9 +1852,10 @@ static int knav_queue_probe(struct platform_device *pdev)
if (ret)
goto err;
- regions = of_get_child_by_name(node, "descriptor-regions");
+ regions = of_get_child_by_name(node, "descriptor-regions");
if (!regions) {
dev_err(dev, "descriptor-regions not specified\n");
+ ret = -ENODEV;
goto err;
}
ret = knav_queue_setup_regions(kdev, regions);
diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c
index 980b04c38fd9..77f0051358f1 100644
--- a/drivers/soc/ti/omap_prm.c
+++ b/drivers/soc/ti/omap_prm.c
@@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -14,6 +15,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
#include <linux/pm_domain.h>
#include <linux/reset-controller.h>
#include <linux/delay.h>
@@ -41,6 +43,7 @@ struct omap_prm_domain {
u16 pwrstst;
const struct omap_prm_domain_map *cap;
u32 pwrstctrl_saved;
+ unsigned int uses_pm_clk:1;
};
struct omap_rst_map {
@@ -121,6 +124,16 @@ static const struct omap_prm_domain_map omap_prm_onoff_noauto = {
.statechange = 1,
};
+static const struct omap_prm_domain_map omap_prm_alwon = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE),
+};
+
+static const struct omap_prm_domain_map omap_prm_reton = {
+ .usable_modes = BIT(OMAP_PRMD_ON_ACTIVE) | BIT(OMAP_PRMD_RETENTION),
+ .statechange = 1,
+ .logicretstate = 1,
+};
+
static const struct omap_rst_map rst_map_0[] = {
{ .rst = 0, .st = 0 },
{ .rst = -1 },
@@ -140,39 +153,237 @@ static const struct omap_rst_map rst_map_012[] = {
};
static const struct omap_prm_data omap4_prm_data[] = {
- { .name = "tesla", .base = 0x4a306400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ {
+ .name = "mpu", .base = 0x4a306300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ },
+ {
+ .name = "tesla", .base = 0x4a306400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
{
.name = "abe", .base = 0x4a306500,
.pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_all,
},
- { .name = "core", .base = 0x4a306700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ducati", .rstmap = rst_map_012 },
- { .name = "ivahd", .base = 0x4a306f00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
- { .name = "device", .base = 0x4a307b00, .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ {
+ .name = "always_on_core", .base = 0x4a306600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "core", .base = 0x4a306700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ducati",
+ .rstmap = rst_map_012
+ },
+ {
+ .name = "ivahd", .base = 0x4a306f00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012
+ },
+ {
+ .name = "cam", .base = 0x4a307000,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "dss", .base = 0x4a307100,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact
+ },
+ {
+ .name = "gfx", .base = 0x4a307200,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "l3init", .base = 0x4a307300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton
+ },
+ {
+ .name = "l4per", .base = 0x4a307400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton
+ },
+ {
+ .name = "cefuse", .base = 0x4a307600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "wkup", .base = 0x4a307700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon
+ },
+ {
+ .name = "emu", .base = 0x4a307900,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "device", .base = 0x4a307b00,
+ .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
{ },
};
static const struct omap_prm_data omap5_prm_data[] = {
- { .name = "dsp", .base = 0x4ae06400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ {
+ .name = "mpu", .base = 0x4ae06300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ },
+ {
+ .name = "dsp", .base = 0x4ae06400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
{
.name = "abe", .base = 0x4ae06500,
.pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_nooff,
},
- { .name = "core", .base = 0x4ae06700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu", .rstmap = rst_map_012 },
- { .name = "iva", .base = 0x4ae07200, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
- { .name = "device", .base = 0x4ae07c00, .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ {
+ .name = "coreaon", .base = 0x4ae06600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon
+ },
+ {
+ .name = "core", .base = 0x4ae06700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu",
+ .rstmap = rst_map_012
+ },
+ {
+ .name = "iva", .base = 0x4ae07200,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012
+ },
+ {
+ .name = "cam", .base = 0x4ae07300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "dss", .base = 0x4ae07400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact
+ },
+ {
+ .name = "gpu", .base = 0x4ae07500,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "l3init", .base = 0x4ae07600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton
+ },
+ {
+ .name = "custefuse", .base = 0x4ae07700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "wkupaon", .base = 0x4ae07800,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon
+ },
+ {
+ .name = "emu", .base = 0x4ae07a00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto
+ },
+ {
+ .name = "device", .base = 0x4ae07c00,
+ .rstctrl = 0x0, .rstst = 0x4, .rstmap = rst_map_01,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
{ },
};
static const struct omap_prm_data dra7_prm_data[] = {
- { .name = "dsp1", .base = 0x4ae06400, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
- { .name = "ipu", .base = 0x4ae06500, .rstctrl = 0x10, .rstst = 0x14, .clkdm_name = "ipu1", .rstmap = rst_map_012 },
- { .name = "core", .base = 0x4ae06700, .rstctrl = 0x210, .rstst = 0x214, .clkdm_name = "ipu2", .rstmap = rst_map_012 },
- { .name = "iva", .base = 0x4ae06f00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012 },
- { .name = "dsp2", .base = 0x4ae07b00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
- { .name = "eve1", .base = 0x4ae07b40, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
- { .name = "eve2", .base = 0x4ae07b80, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
- { .name = "eve3", .base = 0x4ae07bc0, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
- { .name = "eve4", .base = 0x4ae07c00, .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01 },
+ {
+ .name = "mpu", .base = 0x4ae06300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_reton,
+ },
+ {
+ .name = "dsp1", .base = 0x4ae06400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01,
+ },
+ {
+ .name = "ipu", .base = 0x4ae06500,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
+ .clkdm_name = "ipu1"
+ },
+ {
+ .name = "coreaon", .base = 0x4ae06628,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "core", .base = 0x4ae06700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ .rstctrl = 0x210, .rstst = 0x214, .rstmap = rst_map_012,
+ .clkdm_name = "ipu2"
+ },
+ {
+ .name = "iva", .base = 0x4ae06f00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
+ },
+ {
+ .name = "cam", .base = 0x4ae07000,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "dss", .base = 0x4ae07100,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "gpu", .base = 0x4ae07200,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "l3init", .base = 0x4ae07300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_012,
+ .clkdm_name = "pcie"
+ },
+ {
+ .name = "l4per", .base = 0x4ae07400,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "custefuse", .base = 0x4ae07600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "wkupaon", .base = 0x4ae07724,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "emu", .base = 0x4ae07900,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "dsp2", .base = 0x4ae07b00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "eve1", .base = 0x4ae07b40,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "eve2", .base = 0x4ae07b80,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "eve3", .base = 0x4ae07bc0,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "eve4", .base = 0x4ae07c00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_01
+ },
+ {
+ .name = "rtc", .base = 0x4ae07c60,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "vpe", .base = 0x4ae07c80,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
{ },
};
@@ -187,14 +398,40 @@ static const struct omap_rst_map am3_wkup_rst_map[] = {
};
static const struct omap_prm_data am3_prm_data[] = {
- { .name = "per", .base = 0x44e00c00, .rstctrl = 0x0, .rstmap = am3_per_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL, .clkdm_name = "pruss_ocp" },
- { .name = "wkup", .base = 0x44e00d00, .rstctrl = 0x0, .rstst = 0xc, .rstmap = am3_wkup_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
- { .name = "device", .base = 0x44e00f00, .rstctrl = 0x0, .rstst = 0x8, .rstmap = rst_map_01, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ {
+ .name = "per", .base = 0x44e00c00,
+ .pwrstctrl = 0xc, .pwrstst = 0x8, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x0, .rstmap = am3_per_rst_map,
+ .flags = OMAP_PRM_HAS_RSTCTRL, .clkdm_name = "pruss_ocp"
+ },
+ {
+ .name = "wkup", .base = 0x44e00d00,
+ .pwrstctrl = 0x4, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ .rstctrl = 0x0, .rstst = 0xc, .rstmap = am3_wkup_rst_map,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
+ {
+ .name = "mpu", .base = 0x44e00e00,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ },
+ {
+ .name = "device", .base = 0x44e00f00,
+ .rstctrl = 0x0, .rstst = 0x8, .rstmap = rst_map_01,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
+ {
+ .name = "rtc", .base = 0x44e01000,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
{
.name = "gfx", .base = 0x44e01100,
.pwrstctrl = 0, .pwrstst = 0x10, .dmap = &omap_prm_noinact,
.rstctrl = 0x4, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
},
+ {
+ .name = "cefuse", .base = 0x44e01200,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
{ },
};
@@ -211,13 +448,43 @@ static const struct omap_rst_map am4_device_rst_map[] = {
static const struct omap_prm_data am4_prm_data[] = {
{
+ .name = "mpu", .base = 0x44df0300,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ },
+ {
.name = "gfx", .base = 0x44df0400,
.pwrstctrl = 0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
.rstctrl = 0x10, .rstst = 0x14, .rstmap = rst_map_0, .clkdm_name = "gfx_l3",
},
- { .name = "per", .base = 0x44df0800, .rstctrl = 0x10, .rstst = 0x14, .rstmap = am4_per_rst_map, .clkdm_name = "pruss_ocp" },
- { .name = "wkup", .base = 0x44df2000, .rstctrl = 0x10, .rstst = 0x14, .rstmap = am3_wkup_rst_map, .flags = OMAP_PRM_HAS_NO_CLKDM },
- { .name = "device", .base = 0x44df4000, .rstctrl = 0x0, .rstst = 0x4, .rstmap = am4_device_rst_map, .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM },
+ {
+ .name = "rtc", .base = 0x44df0500,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "tamper", .base = 0x44df0600,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ },
+ {
+ .name = "cefuse", .base = 0x44df0700,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_onoff_noauto,
+ },
+ {
+ .name = "per", .base = 0x44df0800,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_noinact,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = am4_per_rst_map,
+ .clkdm_name = "pruss_ocp"
+ },
+ {
+ .name = "wkup", .base = 0x44df2000,
+ .pwrstctrl = 0x0, .pwrstst = 0x4, .dmap = &omap_prm_alwon,
+ .rstctrl = 0x10, .rstst = 0x14, .rstmap = am3_wkup_rst_map,
+ .flags = OMAP_PRM_HAS_NO_CLKDM
+ },
+ {
+ .name = "device", .base = 0x44df4000,
+ .rstctrl = 0x0, .rstst = 0x4, .rstmap = am4_device_rst_map,
+ .flags = OMAP_PRM_HAS_RSTCTRL | OMAP_PRM_HAS_NO_CLKDM
+ },
{ },
};
@@ -325,6 +592,38 @@ static int omap_prm_domain_power_off(struct generic_pm_domain *domain)
return 0;
}
+/*
+ * Note that ti-sysc already manages the module clocks separately so
+ * no need to manage those. Interconnect instances need clocks managed
+ * for simple-pm-bus.
+ */
+static int omap_prm_domain_attach_clock(struct device *dev,
+ struct omap_prm_domain *prmd)
+{
+ struct device_node *np = dev->of_node;
+ int error;
+
+ if (!of_device_is_compatible(np, "simple-pm-bus"))
+ return 0;
+
+ if (!of_property_read_bool(np, "clocks"))
+ return 0;
+
+ error = pm_clk_create(dev);
+ if (error)
+ return error;
+
+ error = of_pm_clk_add_clks(dev);
+ if (error < 0) {
+ pm_clk_destroy(dev);
+ return error;
+ }
+
+ prmd->uses_pm_clk = 1;
+
+ return 0;
+}
+
static int omap_prm_domain_attach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
@@ -349,6 +648,10 @@ static int omap_prm_domain_attach_dev(struct generic_pm_domain *domain,
genpd_data = dev_gpd_data(dev);
genpd_data->data = NULL;
+ ret = omap_prm_domain_attach_clock(dev, prmd);
+ if (ret)
+ return ret;
+
return 0;
}
@@ -356,7 +659,11 @@ static void omap_prm_domain_detach_dev(struct generic_pm_domain *domain,
struct device *dev)
{
struct generic_pm_domain_data *genpd_data;
+ struct omap_prm_domain *prmd;
+ prmd = genpd_to_prm_domain(domain);
+ if (prmd->uses_pm_clk)
+ pm_clk_destroy(dev);
genpd_data = dev_gpd_data(dev);
genpd_data->data = NULL;
}
@@ -393,6 +700,7 @@ static int omap_prm_domain_init(struct device *dev, struct omap_prm *prm)
prmd->pd.power_off = omap_prm_domain_power_off;
prmd->pd.attach_dev = omap_prm_domain_attach_dev;
prmd->pd.detach_dev = omap_prm_domain_detach_dev;
+ prmd->pd.flags = GENPD_FLAG_PM_CLK;
pm_genpd_init(&prmd->pd, NULL, true);
error = of_genpd_add_provider_simple(np, &prmd->pd);
@@ -484,6 +792,10 @@ static int omap_reset_deassert(struct reset_controller_dev *rcdev,
struct ti_prm_platform_data *pdata = dev_get_platdata(reset->dev);
int ret = 0;
+ /* Nothing to do if the reset is already deasserted */
+ if (!omap_reset_status(rcdev, id))
+ return 0;
+
has_rstst = reset->prm->data->rstst ||
(reset->prm->data->flags & OMAP_PRM_HAS_RSTST);
diff --git a/drivers/soc/ti/pm33xx.c b/drivers/soc/ti/pm33xx.c
index d2f5e7001a93..64f3e3105540 100644
--- a/drivers/soc/ti/pm33xx.c
+++ b/drivers/soc/ti/pm33xx.c
@@ -19,6 +19,7 @@
#include <linux/of_address.h>
#include <linux/platform_data/pm33xx.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/rtc.h>
#include <linux/rtc/rtc-omap.h>
#include <linux/sizes.h>
@@ -135,13 +136,11 @@ static int am33xx_push_sram_idle(void)
static int am33xx_do_sram_idle(u32 wfi_flags)
{
- int ret = 0;
-
if (!m3_ipc || !pm_ops)
return 0;
if (wfi_flags & WFI_FLAG_WAKE_M3)
- ret = m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
+ m3_ipc->ops->prepare_low_power(m3_ipc, WKUP_M3_IDLE);
return pm_ops->cpu_suspend(am33xx_do_wfi_sram, wfi_flags);
}
@@ -555,16 +554,26 @@ static int am33xx_pm_probe(struct platform_device *pdev)
suspend_wfi_flags |= WFI_FLAG_WAKE_M3;
#endif /* CONFIG_SUSPEND */
+ pm_runtime_enable(dev);
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ goto err_pm_runtime_disable;
+ }
+
ret = pm_ops->init(am33xx_do_sram_idle);
if (ret) {
dev_err(dev, "Unable to call core pm init!\n");
ret = -ENODEV;
- goto err_put_wkup_m3_ipc;
+ goto err_pm_runtime_put;
}
return 0;
-err_put_wkup_m3_ipc:
+err_pm_runtime_put:
+ pm_runtime_put_sync(dev);
+err_pm_runtime_disable:
+ pm_runtime_disable(dev);
wkup_m3_ipc_put(m3_ipc);
err_free_sram:
am33xx_pm_free_sram();
@@ -574,6 +583,8 @@ err_free_sram:
static int am33xx_pm_remove(struct platform_device *pdev)
{
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
if (pm_ops->deinit)
pm_ops->deinit();
suspend_set_ops(NULL);
diff --git a/drivers/soc/ti/pruss.c b/drivers/soc/ti/pruss.c
index cc0b4ad7a3d3..5d6e7132a5c4 100644
--- a/drivers/soc/ti/pruss.c
+++ b/drivers/soc/ti/pruss.c
@@ -126,8 +126,6 @@ static int pruss_clk_init(struct pruss *pruss, struct device_node *cfg_node)
int ret = 0;
data = of_device_get_match_data(dev);
- if (IS_ERR(data))
- return -ENODEV;
clks_np = of_get_child_by_name(cfg_node, "clocks");
if (!clks_np) {
@@ -175,10 +173,6 @@ static int pruss_probe(struct platform_device *pdev)
const char *mem_names[PRUSS_MEM_MAX] = { "dram0", "dram1", "shrdram2" };
data = of_device_get_match_data(&pdev->dev);
- if (IS_ERR(data)) {
- dev_err(dev, "missing private data\n");
- return -ENODEV;
- }
ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
if (ret) {
diff --git a/drivers/soc/ti/ti_sci_inta_msi.c b/drivers/soc/ti/ti_sci_inta_msi.c
index 0eb9462f609e..a1d9c027022a 100644
--- a/drivers/soc/ti/ti_sci_inta_msi.c
+++ b/drivers/soc/ti/ti_sci_inta_msi.c
@@ -89,6 +89,18 @@ static int ti_sci_inta_msi_alloc_descs(struct device *dev,
list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
count++;
}
+ for (i = 0; i < res->desc[set].num_sec; i++) {
+ msi_desc = alloc_msi_entry(dev, 1, NULL);
+ if (!msi_desc) {
+ ti_sci_inta_msi_free_descs(dev);
+ return -ENOMEM;
+ }
+
+ msi_desc->inta.dev_index = res->desc[set].start_sec + i;
+ INIT_LIST_HEAD(&msi_desc->list);
+ list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
+ count++;
+ }
}
return count;
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index e9ece45d7a33..c3e2161df732 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -218,6 +218,7 @@ static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
/* Public functions */
/**
* wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @mem_type: memory type value read directly from emif
*
* wkup_m3 must know what memory type is in use to properly suspend
@@ -230,6 +231,7 @@ static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
/**
* wkup_m3_set_resume_address - Pass wkup_m3 resume address
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @addr: Physical address from which resume code should execute
*/
static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
@@ -239,6 +241,7 @@ static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
/**
* wkup_m3_request_pm_status - Retrieve wkup_m3 status code after suspend
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*
* Returns code representing the status of a low power mode transition.
* 0 - Successful transition
@@ -260,6 +263,7 @@ static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
/**
* wkup_m3_prepare_low_power - Request preparation for transition to
* low power state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
* @state: A kernel suspend state to enter, either MEM or STANDBY
*
* Returns 0 if preparation was successful, otherwise returns error code
@@ -315,6 +319,7 @@ static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
/**
* wkup_m3_finish_low_power - Return m3 to reset state
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*
* Returns 0 if reset was successful, otherwise returns error code
*/
@@ -362,8 +367,7 @@ static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
/**
* wkup_m3_set_rtc_only - Set the rtc_only flag
- * @wkup_m3_wakeup: struct wkup_m3_wakeup_src * gets assigned the
- * wakeup src value
+ * @m3_ipc: Pointer to wkup_m3_ipc context
*/
static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
{
diff --git a/drivers/soc/xilinx/Kconfig b/drivers/soc/xilinx/Kconfig
index 646512d7276f..0b1708dae361 100644
--- a/drivers/soc/xilinx/Kconfig
+++ b/drivers/soc/xilinx/Kconfig
@@ -4,6 +4,7 @@ menu "Xilinx SoC drivers"
config XILINX_VCU
tristate "Xilinx VCU logicoreIP Init"
depends on HAS_IOMEM
+ select REGMAP_MMIO
help
Provides the driver to enable and disable the isolation between the
processing system and programmable logic part by using the logicoreIP
diff --git a/drivers/soc/xilinx/xlnx_vcu.c b/drivers/soc/xilinx/xlnx_vcu.c
index a3aa40996f13..14daad4efc58 100644
--- a/drivers/soc/xilinx/xlnx_vcu.c
+++ b/drivers/soc/xilinx/xlnx_vcu.c
@@ -10,39 +10,12 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/xlnx-vcu.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
-
-/* Address map for different registers implemented in the VCU LogiCORE IP. */
-#define VCU_ECODER_ENABLE 0x00
-#define VCU_DECODER_ENABLE 0x04
-#define VCU_MEMORY_DEPTH 0x08
-#define VCU_ENC_COLOR_DEPTH 0x0c
-#define VCU_ENC_VERTICAL_RANGE 0x10
-#define VCU_ENC_FRAME_SIZE_X 0x14
-#define VCU_ENC_FRAME_SIZE_Y 0x18
-#define VCU_ENC_COLOR_FORMAT 0x1c
-#define VCU_ENC_FPS 0x20
-#define VCU_MCU_CLK 0x24
-#define VCU_CORE_CLK 0x28
-#define VCU_PLL_BYPASS 0x2c
-#define VCU_ENC_CLK 0x30
-#define VCU_PLL_CLK 0x34
-#define VCU_ENC_VIDEO_STANDARD 0x38
-#define VCU_STATUS 0x3c
-#define VCU_AXI_ENC_CLK 0x40
-#define VCU_AXI_DEC_CLK 0x44
-#define VCU_AXI_MCU_CLK 0x48
-#define VCU_DEC_VIDEO_STANDARD 0x4c
-#define VCU_DEC_FRAME_SIZE_X 0x50
-#define VCU_DEC_FRAME_SIZE_Y 0x54
-#define VCU_DEC_FPS 0x58
-#define VCU_BUFFER_B_FRAME 0x5c
-#define VCU_WPP_EN 0x60
-#define VCU_PLL_CLK_DEC 0x64
-#define VCU_GASKET_INIT 0x74
-#define VCU_GASKET_VALUE 0x03
+#include <linux/regmap.h>
/* vcu slcr registers, bitmask and shift */
#define VCU_PLL_CTRL 0x24
@@ -106,11 +79,20 @@ struct xvcu_device {
struct device *dev;
struct clk *pll_ref;
struct clk *aclk;
- void __iomem *logicore_reg_ba;
+ struct regmap *logicore_reg_ba;
void __iomem *vcu_slcr_ba;
u32 coreclk;
};
+static struct regmap_config vcu_settings_regmap_config = {
+ .name = "regmap",
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0xfff,
+ .cache_type = REGCACHE_NONE,
+};
+
/**
* struct xvcu_pll_cfg - Helper data
* @fbdiv: The integer portion of the feedback divider to the PLL
@@ -300,10 +282,12 @@ static int xvcu_set_vcu_pll_info(struct xvcu_device *xvcu)
int ret, i;
const struct xvcu_pll_cfg *found = NULL;
- inte = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK);
- deci = xvcu_read(xvcu->logicore_reg_ba, VCU_PLL_CLK_DEC);
- coreclk = xvcu_read(xvcu->logicore_reg_ba, VCU_CORE_CLK) * MHZ;
- mcuclk = xvcu_read(xvcu->logicore_reg_ba, VCU_MCU_CLK) * MHZ;
+ regmap_read(xvcu->logicore_reg_ba, VCU_PLL_CLK, &inte);
+ regmap_read(xvcu->logicore_reg_ba, VCU_PLL_CLK_DEC, &deci);
+ regmap_read(xvcu->logicore_reg_ba, VCU_CORE_CLK, &coreclk);
+ coreclk *= MHZ;
+ regmap_read(xvcu->logicore_reg_ba, VCU_MCU_CLK, &mcuclk);
+ mcuclk *= MHZ;
if (!mcuclk || !coreclk) {
dev_err(xvcu->dev, "Invalid mcu and core clock data\n");
return -EINVAL;
@@ -498,6 +482,7 @@ static int xvcu_probe(struct platform_device *pdev)
{
struct resource *res;
struct xvcu_device *xvcu;
+ void __iomem *regs;
int ret;
xvcu = devm_kzalloc(&pdev->dev, sizeof(*xvcu), GFP_KERNEL);
@@ -518,17 +503,32 @@ static int xvcu_probe(struct platform_device *pdev)
return -ENOMEM;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "logicore");
- if (!res) {
- dev_err(&pdev->dev, "get logicore memory resource failed.\n");
- return -ENODEV;
- }
+ xvcu->logicore_reg_ba =
+ syscon_regmap_lookup_by_compatible("xlnx,vcu-settings");
+ if (IS_ERR(xvcu->logicore_reg_ba)) {
+ dev_info(&pdev->dev,
+ "could not find xlnx,vcu-settings: trying direct register access\n");
+
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "logicore");
+ if (!res) {
+ dev_err(&pdev->dev, "get logicore memory resource failed.\n");
+ return -ENODEV;
+ }
- xvcu->logicore_reg_ba = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!xvcu->logicore_reg_ba) {
- dev_err(&pdev->dev, "logicore register mapping failed.\n");
- return -ENOMEM;
+ regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!regs) {
+ dev_err(&pdev->dev, "logicore register mapping failed.\n");
+ return -ENOMEM;
+ }
+
+ xvcu->logicore_reg_ba =
+ devm_regmap_init_mmio(&pdev->dev, regs,
+ &vcu_settings_regmap_config);
+ if (IS_ERR(xvcu->logicore_reg_ba)) {
+ dev_err(&pdev->dev, "failed to init regmap\n");
+ return PTR_ERR(xvcu->logicore_reg_ba);
+ }
}
xvcu->aclk = devm_clk_get(&pdev->dev, "aclk");
@@ -560,7 +560,7 @@ static int xvcu_probe(struct platform_device *pdev)
* Bit 0 : Gasket isolation
* Bit 1 : put VCU out of reset
*/
- xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
+ regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, VCU_GASKET_VALUE);
/* Do the PLL Settings based on the ref clk,core and mcu clk freq */
ret = xvcu_set_pll(xvcu);
@@ -571,8 +571,6 @@ static int xvcu_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, xvcu);
- dev_info(&pdev->dev, "%s: Probed successfully\n", __func__);
-
return 0;
error_pll_ref:
@@ -599,7 +597,7 @@ static int xvcu_remove(struct platform_device *pdev)
return -ENODEV;
/* Add the the Gasket isolation and put the VCU in reset. */
- xvcu_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
+ regmap_write(xvcu->logicore_reg_ba, VCU_GASKET_INIT, 0);
clk_disable_unprepare(xvcu->pll_ref);
clk_disable_unprepare(xvcu->aclk);
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 809bfff3690a..cbc4c28c1541 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -189,24 +189,26 @@ static int altera_spi_txrx(struct spi_master *master,
/* send the first byte */
altera_spi_tx_word(hw);
- } else {
- while (hw->count < hw->len) {
- altera_spi_tx_word(hw);
- for (;;) {
- altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
- if (val & ALTERA_SPI_STATUS_RRDY_MSK)
- break;
+ return 1;
+ }
+
+ while (hw->count < hw->len) {
+ altera_spi_tx_word(hw);
- cpu_relax();
- }
+ for (;;) {
+ altr_spi_readl(hw, ALTERA_SPI_STATUS, &val);
+ if (val & ALTERA_SPI_STATUS_RRDY_MSK)
+ break;
- altera_spi_rx_word(hw);
+ cpu_relax();
}
- spi_finalize_current_transfer(master);
+
+ altera_spi_rx_word(hw);
}
+ spi_finalize_current_transfer(master);
- return t->len;
+ return 0;
}
static irqreturn_t altera_spi_irq(int irq, void *dev)
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 512e925d5ea4..881f645661cc 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -83,6 +83,7 @@ struct spi_geni_master {
spinlock_t lock;
int irq;
bool cs_flag;
+ bool abort_failed;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -141,8 +142,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
- if (!time_left)
+ if (!time_left) {
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+
+ /*
+ * No need for a lock since SPI core has a lock and we never
+ * access this from an interrupt.
+ */
+ mas->abort_failed = true;
+ }
+}
+
+static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
+{
+ struct geni_se *se = &mas->se;
+ u32 m_irq, m_irq_en;
+
+ if (!mas->abort_failed)
+ return false;
+
+ /*
+ * The only known case where a transfer times out and then a cancel
+ * times out then an abort times out is if something is blocking our
+ * interrupt handler from running. Avoid starting any new transfers
+ * until that sorts itself out.
+ */
+ spin_lock_irq(&mas->lock);
+ m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+ m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
+ spin_unlock_irq(&mas->lock);
+
+ if (m_irq & m_irq_en) {
+ dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
+ m_irq & m_irq_en);
+ return true;
+ }
+
+ /*
+ * If we're here the problem resolved itself so no need to check more
+ * on future transfers.
+ */
+ mas->abort_failed = false;
+
+ return false;
}
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
@@ -158,10 +200,21 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
if (set_flag == mas->cs_flag)
return;
- mas->cs_flag = set_flag;
-
pm_runtime_get_sync(mas->dev);
+
+ if (spi_geni_is_abort_still_pending(mas)) {
+ dev_err(mas->dev, "Can't set chip select\n");
+ goto exit;
+ }
+
spin_lock_irq(&mas->lock);
+ if (mas->cur_xfer) {
+ dev_err(mas->dev, "Can't set CS when prev xfer running\n");
+ spin_unlock_irq(&mas->lock);
+ goto exit;
+ }
+
+ mas->cs_flag = set_flag;
reinit_completion(&mas->cs_done);
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
@@ -170,9 +223,12 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
spin_unlock_irq(&mas->lock);
time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
- if (!time_left)
+ if (!time_left) {
+ dev_warn(mas->dev, "Timeout setting chip select\n");
handle_fifo_timeout(spi, NULL);
+ }
+exit:
pm_runtime_put(mas->dev);
}
@@ -280,6 +336,9 @@ static int spi_geni_prepare_message(struct spi_master *spi,
int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -354,6 +413,12 @@ static bool geni_spi_handle_tx(struct spi_geni_master *mas)
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
+ /* Stop the watermark IRQ if nothing to send */
+ if (!mas->cur_xfer) {
+ writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+ return false;
+ }
+
max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
if (mas->tx_rem_bytes < max_bytes)
max_bytes = mas->tx_rem_bytes;
@@ -396,6 +461,14 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
if (rx_last_byte_valid && rx_last_byte_valid < 4)
rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
}
+
+ /* Clear out the FIFO and bail if nowhere to put it */
+ if (!mas->cur_xfer) {
+ for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
+ readl(se->base + SE_GENI_RX_FIFOn);
+ return;
+ }
+
if (mas->rx_rem_bytes < rx_bytes)
rx_bytes = mas->rx_rem_bytes;
@@ -495,6 +568,9 @@ static int spi_geni_transfer_one(struct spi_master *spi,
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
return 0;
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
index 471dedf3d339..6017209c6d2f 100644
--- a/drivers/spi/spi-stm32.c
+++ b/drivers/spi/spi-stm32.c
@@ -493,9 +493,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
/* align packet size with data registers access */
if (spi->cur_bpw > 8)
- fthlv -= (fthlv % 2); /* multiple of 2 */
+ fthlv += (fthlv % 2) ? 1 : 0;
else
- fthlv -= (fthlv % 4); /* multiple of 4 */
+ fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
if (!fthlv)
fthlv = 1;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 51d7c004fbab..720ab34784c1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1108,6 +1108,7 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
{
struct spi_statistics *statm = &ctlr->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
+ u32 speed_hz = xfer->speed_hz;
unsigned long long ms;
if (spi_controller_is_slave(ctlr)) {
@@ -1116,8 +1117,11 @@ static int spi_transfer_wait(struct spi_controller *ctlr,
return -EINTR;
}
} else {
+ if (!speed_hz)
+ speed_hz = 100000;
+
ms = 8LL * 1000LL * xfer->len;
- do_div(ms, xfer->speed_hz);
+ do_div(ms, speed_hz);
ms += ms + 200; /* some tolerance */
if (ms > UINT_MAX)
@@ -3378,8 +3382,9 @@ int spi_setup(struct spi_device *spi)
if (status)
return status;
- if (!spi->max_speed_hz ||
- spi->max_speed_hz > spi->controller->max_speed_hz)
+ if (spi->controller->max_speed_hz &&
+ (!spi->max_speed_hz ||
+ spi->max_speed_hz > spi->controller->max_speed_hz))
spi->max_speed_hz = spi->controller->max_speed_hz;
mutex_lock(&spi->controller->io_mutex);
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 10b4be1f3e78..4789d36ddfd3 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -450,9 +450,9 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
vma_set_anonymous(vma);
}
- if (vma->vm_file)
- fput(vma->vm_file);
- vma->vm_file = asma->file;
+ vma_set_file(vma, asma->file);
+ /* XXX: merge this with the get_file() above if possible */
+ fput(asma->file);
out:
mutex_unlock(&ashmem_mutex);
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index d99231c737fb..80d74cce2a01 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2987,7 +2987,9 @@ static int put_compat_cmd(struct comedi32_cmd_struct __user *cmd32,
v32.chanlist_len = cmd->chanlist_len;
v32.data = ptr_to_compat(cmd->data);
v32.data_len = cmd->data_len;
- return copy_to_user(cmd32, &v32, sizeof(v32));
+ if (copy_to_user(cmd32, &v32, sizeof(v32)))
+ return -EFAULT;
+ return 0;
}
/* Handle 32-bit COMEDI_CMD ioctl. */
diff --git a/drivers/staging/hikey9xx/hisi-spmi-controller.c b/drivers/staging/hikey9xx/hisi-spmi-controller.c
index 861aedd5de48..0d42bc65f39b 100644
--- a/drivers/staging/hikey9xx/hisi-spmi-controller.c
+++ b/drivers/staging/hikey9xx/hisi-spmi-controller.c
@@ -278,21 +278,24 @@ static int spmi_controller_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores) {
dev_err(&pdev->dev, "can not get resource!\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_put_controller;
}
spmi_controller->base = devm_ioremap(&pdev->dev, iores->start,
resource_size(iores));
if (!spmi_controller->base) {
dev_err(&pdev->dev, "can not remap base addr!\n");
- return -EADDRNOTAVAIL;
+ ret = -EADDRNOTAVAIL;
+ goto err_put_controller;
}
ret = of_property_read_u32(pdev->dev.of_node, "spmi-channel",
&spmi_controller->channel);
if (ret) {
dev_err(&pdev->dev, "can not get channel\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put_controller;
}
platform_set_drvdata(pdev, spmi_controller);
@@ -309,9 +312,15 @@ static int spmi_controller_probe(struct platform_device *pdev)
ctrl->write_cmd = spmi_write_cmd;
ret = spmi_controller_add(ctrl);
- if (ret)
- dev_err(&pdev->dev, "spmi_add_controller failed with error %d!\n", ret);
+ if (ret) {
+ dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret);
+ goto err_put_controller;
+ }
+
+ return 0;
+err_put_controller:
+ spmi_controller_put(ctrl);
return ret;
}
@@ -320,7 +329,7 @@ static int spmi_del_controller(struct platform_device *pdev)
struct spmi_controller *ctrl = platform_get_drvdata(pdev);
spmi_controller_remove(ctrl);
- kfree(ctrl);
+ spmi_controller_put(ctrl);
return 0;
}
diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
index 52b9fb18c87f..b666cb23e5ca 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c
@@ -1062,26 +1062,6 @@ static const struct v4l2_ctrl_config ctrl_select_isp_version = {
.def = 0,
};
-#if 0 /* #ifdef CONFIG_ION */
-/*
- * Control for ISP ion device fd
- *
- * userspace will open ion device and pass the fd to kernel.
- * this fd will be used to map shared fd to buffer.
- */
-/* V4L2_CID_ATOMISP_ION_DEVICE_FD is not defined */
-static const struct v4l2_ctrl_config ctrl_ion_dev_fd = {
- .ops = &ctrl_ops,
- .id = V4L2_CID_ATOMISP_ION_DEVICE_FD,
- .type = V4L2_CTRL_TYPE_INTEGER,
- .name = "Ion Device Fd",
- .min = -1,
- .max = 1024,
- .step = 1,
- .def = ION_FD_UNSET
-};
-#endif
-
static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd,
struct atomisp_video_pipe *pipe, enum v4l2_buf_type buf_type)
{
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index 18d54f9fd715..ddad5d274ee8 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -588,7 +588,6 @@ static const struct cedrus_variant sun50i_h6_cedrus_variant = {
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_H265_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
- .quirks = CEDRUS_QUIRK_NO_DMA_OFFSET,
.mod_rate = 600000000,
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index e61c41853ba2..c96077aaef49 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -33,8 +33,6 @@
#define CEDRUS_CAPABILITY_MPEG2_DEC BIT(3)
#define CEDRUS_CAPABILITY_VP8_DEC BIT(4)
-#define CEDRUS_QUIRK_NO_DMA_OFFSET BIT(0)
-
enum cedrus_codec {
CEDRUS_CODEC_MPEG2,
CEDRUS_CODEC_H264,
@@ -168,7 +166,6 @@ struct cedrus_dec_ops {
struct cedrus_variant {
unsigned int capabilities;
- unsigned int quirks;
unsigned int mod_rate;
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index 111cb91f8fc2..e2f2ff609c7e 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -224,24 +224,6 @@ int cedrus_hw_probe(struct cedrus_dev *dev)
return ret;
}
- /*
- * The VPU is only able to handle bus addresses so we have to subtract
- * the RAM offset to the physcal addresses.
- *
- * This information will eventually be obtained from device-tree.
- *
- * XXX(hch): this has no business in a driver and needs to move
- * to the device tree.
- */
-
-#ifdef PHYS_PFN_OFFSET
- if (!(variant->quirks & CEDRUS_QUIRK_NO_DMA_OFFSET)) {
- ret = dma_direct_set_offset(dev->dev, PHYS_OFFSET, 0, SZ_4G);
- if (ret)
- return ret;
- }
-#endif
-
ret = of_reserved_mem_device_init(dev->dev);
if (ret && ret != -ENODEV) {
dev_err(dev->dev, "Failed to reserve memory\n");
diff --git a/drivers/staging/mt7621-dma/mtk-hsdma.c b/drivers/staging/mt7621-dma/mtk-hsdma.c
index d241349214e7..bc4bb4374313 100644
--- a/drivers/staging/mt7621-dma/mtk-hsdma.c
+++ b/drivers/staging/mt7621-dma/mtk-hsdma.c
@@ -712,7 +712,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
ret = dma_async_device_register(dd);
if (ret) {
dev_err(&pdev->dev, "failed to register dma device\n");
- return ret;
+ goto err_uninit_hsdma;
}
ret = of_dma_controller_register(pdev->dev.of_node,
@@ -728,6 +728,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
err_unregister:
dma_async_device_unregister(dd);
+err_uninit_hsdma:
+ mtk_hsdma_uninit(hsdma);
return ret;
}
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 44e15d7fb2f0..66d6f1d06f21 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -46,60 +46,83 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
return 0;
}
-struct xcopy_dev_search_info {
- const unsigned char *dev_wwn;
- struct se_device *found_dev;
-};
-
+/**
+ * target_xcopy_locate_se_dev_e4_iter - compare XCOPY NAA device identifiers
+ *
+ * @se_dev: device being considered for match
+ * @dev_wwn: XCOPY requested NAA dev_wwn
+ * @return: 1 on match, 0 on no-match
+ */
static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev,
- void *data)
+ const unsigned char *dev_wwn)
{
- struct xcopy_dev_search_info *info = data;
unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
int rc;
- if (!se_dev->dev_attrib.emulate_3pc)
+ if (!se_dev->dev_attrib.emulate_3pc) {
+ pr_debug("XCOPY: emulate_3pc disabled on se_dev %p\n", se_dev);
return 0;
+ }
memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN);
target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]);
- rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
- if (rc != 0)
- return 0;
-
- info->found_dev = se_dev;
- pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
-
- rc = target_depend_item(&se_dev->dev_group.cg_item);
+ rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN);
if (rc != 0) {
- pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n",
- rc, se_dev);
- return rc;
+ pr_debug("XCOPY: skip non-matching: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, tmp_dev_wwn);
+ return 0;
}
+ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
- pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n",
- se_dev, &se_dev->dev_group);
return 1;
}
-static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
- struct se_device **found_dev)
+static int target_xcopy_locate_se_dev_e4(struct se_session *sess,
+ const unsigned char *dev_wwn,
+ struct se_device **_found_dev,
+ struct percpu_ref **_found_lun_ref)
{
- struct xcopy_dev_search_info info;
- int ret;
-
- memset(&info, 0, sizeof(info));
- info.dev_wwn = dev_wwn;
-
- ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info);
- if (ret == 1) {
- *found_dev = info.found_dev;
- return 0;
- } else {
- pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
- return -EINVAL;
+ struct se_dev_entry *deve;
+ struct se_node_acl *nacl;
+ struct se_lun *this_lun = NULL;
+ struct se_device *found_dev = NULL;
+
+ /* cmd with NULL sess indicates no associated $FABRIC_MOD */
+ if (!sess)
+ goto err_out;
+
+ pr_debug("XCOPY 0xe4: searching for: %*ph\n",
+ XCOPY_NAA_IEEE_REGEX_LEN, dev_wwn);
+
+ nacl = sess->se_node_acl;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
+ struct se_device *this_dev;
+ int rc;
+
+ this_lun = rcu_dereference(deve->se_lun);
+ this_dev = rcu_dereference_raw(this_lun->lun_se_dev);
+
+ rc = target_xcopy_locate_se_dev_e4_iter(this_dev, dev_wwn);
+ if (rc) {
+ if (percpu_ref_tryget_live(&this_lun->lun_ref))
+ found_dev = this_dev;
+ break;
+ }
}
+ rcu_read_unlock();
+ if (found_dev == NULL)
+ goto err_out;
+
+ pr_debug("lun_ref held for se_dev: %p se_dev->se_dev_group: %p\n",
+ found_dev, &found_dev->dev_group);
+ *_found_dev = found_dev;
+ *_found_lun_ref = &this_lun->lun_ref;
+ return 0;
+err_out:
+ pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n");
+ return -EINVAL;
}
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
@@ -246,12 +269,16 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
switch (xop->op_origin) {
case XCOL_SOURCE_RECV_OP:
- rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
- &xop->dst_dev);
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->dst_tid_wwn,
+ &xop->dst_dev,
+ &xop->remote_lun_ref);
break;
case XCOL_DEST_RECV_OP:
- rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
- &xop->src_dev);
+ rc = target_xcopy_locate_se_dev_e4(se_cmd->se_sess,
+ xop->src_tid_wwn,
+ &xop->src_dev,
+ &xop->remote_lun_ref);
break;
default:
pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
@@ -391,18 +418,12 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
{
- struct se_device *remote_dev;
-
if (xop->op_origin == XCOL_SOURCE_RECV_OP)
- remote_dev = xop->dst_dev;
+ pr_debug("putting dst lun_ref for %p\n", xop->dst_dev);
else
- remote_dev = xop->src_dev;
-
- pr_debug("Calling configfs_undepend_item for"
- " remote_dev: %p remote_dev->dev_group: %p\n",
- remote_dev, &remote_dev->dev_group.cg_item);
+ pr_debug("putting src lun_ref for %p\n", xop->src_dev);
- target_undepend_item(&remote_dev->dev_group.cg_item);
+ percpu_ref_put(xop->remote_lun_ref);
}
static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index c56a1bde9417..e5f20005179a 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -27,6 +27,7 @@ struct xcopy_op {
struct se_device *dst_dev;
unsigned char dst_tid_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
unsigned char local_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
+ struct percpu_ref *remote_lun_ref;
sector_t src_lba;
sector_t dst_lba;
diff --git a/drivers/tee/optee/device.c b/drivers/tee/optee/device.c
index 7a897d51969f..ec1d24693eba 100644
--- a/drivers/tee/optee/device.c
+++ b/drivers/tee/optee/device.c
@@ -98,7 +98,7 @@ static int __optee_enumerate_devices(u32 func)
return -ENODEV;
/* Open session with device enumeration pseudo TA */
- memcpy(sess_arg.uuid, pta_uuid.b, TEE_IOCTL_UUID_LEN);
+ export_uuid(sess_arg.uuid, &pta_uuid);
sess_arg.clnt_login = TEE_IOCTL_LOGIN_PUBLIC;
sess_arg.num_params = 0;
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 0966551cbaaa..823354a1a91a 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -584,6 +584,7 @@ static int int3400_thermal_remove(struct platform_device *pdev)
static const struct acpi_device_id int3400_thermal_match[] = {
{"INT3400", 0},
{"INTC1040", 0},
+ {"INTC1041", 0},
{}
};
diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
index ec1d58c4ceaa..c3c4c4d34542 100644
--- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c
@@ -284,6 +284,7 @@ static int int3403_remove(struct platform_device *pdev)
static const struct acpi_device_id int3403_device_ids[] = {
{"INT3403", 0},
{"INTC1043", 0},
+ {"INTC1046", 0},
{"", 0},
};
MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 47a6e42f0d04..e15cd6b5bb99 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -401,6 +401,20 @@ config MIPS_EJTAG_FDC_KGDB_CHAN
help
FDC channel number to use for KGDB.
+config NULL_TTY
+ tristate "NULL TTY driver"
+ help
+ Say Y here if you want a NULL TTY which simply discards messages.
+
+ This is useful to allow userspace applications which expect a console
+ device to work without modifications even when no console is
+ available or desired.
+
+ In order to use this driver, you should redirect the console to this
+ TTY, or boot the kernel with console=ttynull.
+
+ If unsure, say N.
+
config TRACE_ROUTER
tristate "Trace data router for MIPI P1149.7 cJTAG standard"
depends on TRACE_SINK
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
index 3c1c5a9240a7..b3ccae932660 100644
--- a/drivers/tty/Makefile
+++ b/drivers/tty/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_TTY) += tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
tty_buffer.o tty_port.o tty_mutex.o \
tty_ldsem.o tty_baudrate.o tty_jobctrl.o \
- n_null.o ttynull.o
+ n_null.o
obj-$(CONFIG_LEGACY_PTYS) += pty.o
obj-$(CONFIG_UNIX98_PTYS) += pty.o
obj-$(CONFIG_AUDIT) += tty_audit.o
@@ -25,6 +25,7 @@ obj-$(CONFIG_ISI) += isicom.o
obj-$(CONFIG_MOXA_INTELLIO) += moxa.o
obj-$(CONFIG_MOXA_SMARTIO) += mxser.o
obj-$(CONFIG_NOZOMI) += nozomi.o
+obj-$(CONFIG_NULL_TTY) += ttynull.o
obj-$(CONFIG_ROCKETPORT) += rocket.o
obj-$(CONFIG_SYNCLINK_GT) += synclink_gt.o
obj-$(CONFIG_PPC_EPAPR_HV_BYTECHAN) += ehv_bytechan.o
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 12d71d5eb6ca..34a2899e69c0 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1567,6 +1567,38 @@ config SERIAL_MILBEAUT_USIO_CONSOLE
receives all kernel messages and warnings and which allows logins in
single user mode).
+config SERIAL_LITEUART
+ tristate "LiteUART serial port support"
+ depends on HAS_IOMEM
+ depends on OF || COMPILE_TEST
+ depends on LITEX
+ select SERIAL_CORE
+ help
+ This driver is for the FPGA-based LiteUART serial controller from LiteX
+ SoC builder.
+
+ Say 'Y' or 'M' here if you wish to use the LiteUART serial controller.
+ Otherwise, say 'N'.
+
+config SERIAL_LITEUART_MAX_PORTS
+ int "Maximum number of LiteUART ports"
+ depends on SERIAL_LITEUART
+ default "1"
+ help
+ Set this to the maximum number of serial ports you want the driver
+ to support.
+
+config SERIAL_LITEUART_CONSOLE
+ bool "LiteUART serial port console support"
+ depends on SERIAL_LITEUART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Say 'Y' or 'M' here if you wish to use the FPGA-based LiteUART serial
+ controller from LiteX SoC builder as the system console
+ (the system console is the device which receives all kernel messages
+ and warnings and which allows logins in single user mode).
+ Otherwise, say 'N'.
+
endmenu
config SERIAL_MCTRL_GPIO
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index af44b231123c..b85d53f9e9ff 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_SERIAL_OWL) += owl-uart.o
obj-$(CONFIG_SERIAL_RDA) += rda-uart.o
obj-$(CONFIG_SERIAL_MILBEAUT_USIO) += milbeaut_usio.o
obj-$(CONFIG_SERIAL_SIFIVE) += sifive.o
+obj-$(CONFIG_SERIAL_LITEUART) += liteuart.o
# GPIOLIB helpers for modem control lines
obj-$(CONFIG_SERIAL_MCTRL_GPIO) += serial_mctrl_gpio.o
diff --git a/drivers/tty/serial/liteuart.c b/drivers/tty/serial/liteuart.c
new file mode 100644
index 000000000000..64842f3539e1
--- /dev/null
+++ b/drivers/tty/serial/liteuart.c
@@ -0,0 +1,404 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LiteUART serial controller (LiteX) Driver
+ *
+ * Copyright (C) 2019-2020 Antmicro <www.antmicro.com>
+ */
+
+#include <linux/console.h>
+#include <linux/litex.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/serial.h>
+#include <linux/serial_core.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <linux/tty_flip.h>
+#include <linux/xarray.h>
+
+/*
+ * CSRs definitions (base address offsets + width)
+ *
+ * The definitions below are true for LiteX SoC configured for 8-bit CSR Bus,
+ * 32-bit aligned.
+ *
+ * Supporting other configurations might require new definitions or a more
+ * generic way of indexing the LiteX CSRs.
+ *
+ * For more details on how CSRs are defined and handled in LiteX, see comments
+ * in the LiteX SoC Driver: drivers/soc/litex/litex_soc_ctrl.c
+ */
+#define OFF_RXTX 0x00
+#define OFF_TXFULL 0x04
+#define OFF_RXEMPTY 0x08
+#define OFF_EV_STATUS 0x0c
+#define OFF_EV_PENDING 0x10
+#define OFF_EV_ENABLE 0x14
+
+/* events */
+#define EV_TX 0x1
+#define EV_RX 0x2
+
+struct liteuart_port {
+ struct uart_port port;
+ struct timer_list timer;
+ u32 id;
+};
+
+#define to_liteuart_port(port) container_of(port, struct liteuart_port, port)
+
+static DEFINE_XARRAY_FLAGS(liteuart_array, XA_FLAGS_ALLOC);
+
+#ifdef CONFIG_SERIAL_LITEUART_CONSOLE
+static struct console liteuart_console;
+#endif
+
+static struct uart_driver liteuart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = "liteuart",
+ .dev_name = "ttyLXU",
+ .major = 0,
+ .minor = 0,
+ .nr = CONFIG_SERIAL_LITEUART_MAX_PORTS,
+#ifdef CONFIG_SERIAL_LITEUART_CONSOLE
+ .cons = &liteuart_console,
+#endif
+};
+
+static void liteuart_timer(struct timer_list *t)
+{
+ struct liteuart_port *uart = from_timer(uart, t, timer);
+ struct uart_port *port = &uart->port;
+ unsigned char __iomem *membase = port->membase;
+ unsigned int flg = TTY_NORMAL;
+ int ch;
+ unsigned long status;
+
+ while ((status = !litex_read8(membase + OFF_RXEMPTY)) == 1) {
+ ch = litex_read8(membase + OFF_RXTX);
+ port->icount.rx++;
+
+ /* necessary for RXEMPTY to refresh its value */
+ litex_write8(membase + OFF_EV_PENDING, EV_TX | EV_RX);
+
+ /* no overflow bits in status */
+ if (!(uart_handle_sysrq_char(port, ch)))
+ uart_insert_char(port, status, 0, ch, flg);
+
+ tty_flip_buffer_push(&port->state->port);
+ }
+
+ mod_timer(&uart->timer, jiffies + uart_poll_timeout(port));
+}
+
+static void liteuart_putchar(struct uart_port *port, int ch)
+{
+ while (litex_read8(port->membase + OFF_TXFULL))
+ cpu_relax();
+
+ litex_write8(port->membase + OFF_RXTX, ch);
+}
+
+static unsigned int liteuart_tx_empty(struct uart_port *port)
+{
+ /* not really tx empty, just checking if tx is not full */
+ if (!litex_read8(port->membase + OFF_TXFULL))
+ return TIOCSER_TEMT;
+
+ return 0;
+}
+
+static void liteuart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /* modem control register is not present in LiteUART */
+}
+
+static unsigned int liteuart_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR;
+}
+
+static void liteuart_stop_tx(struct uart_port *port)
+{
+}
+
+static void liteuart_start_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned char ch;
+
+ if (unlikely(port->x_char)) {
+ litex_write8(port->membase + OFF_RXTX, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
+ } else if (!uart_circ_empty(xmit)) {
+ while (xmit->head != xmit->tail) {
+ ch = xmit->buf[xmit->tail];
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ liteuart_putchar(port, ch);
+ }
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static void liteuart_stop_rx(struct uart_port *port)
+{
+ struct liteuart_port *uart = to_liteuart_port(port);
+
+ /* just delete timer */
+ del_timer(&uart->timer);
+}
+
+static void liteuart_break_ctl(struct uart_port *port, int break_state)
+{
+ /* LiteUART doesn't support sending break signal */
+}
+
+static int liteuart_startup(struct uart_port *port)
+{
+ struct liteuart_port *uart = to_liteuart_port(port);
+
+ /* disable events */
+ litex_write8(port->membase + OFF_EV_ENABLE, 0);
+
+ /* prepare timer for polling */
+ timer_setup(&uart->timer, liteuart_timer, 0);
+ mod_timer(&uart->timer, jiffies + uart_poll_timeout(port));
+
+ return 0;
+}
+
+static void liteuart_shutdown(struct uart_port *port)
+{
+}
+
+static void liteuart_set_termios(struct uart_port *port, struct ktermios *new,
+ struct ktermios *old)
+{
+ unsigned int baud;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* update baudrate */
+ baud = uart_get_baud_rate(port, new, old, 0, 460800);
+ uart_update_timeout(port, new->c_cflag, baud);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *liteuart_type(struct uart_port *port)
+{
+ return "liteuart";
+}
+
+static void liteuart_release_port(struct uart_port *port)
+{
+}
+
+static int liteuart_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+static void liteuart_config_port(struct uart_port *port, int flags)
+{
+ /*
+ * Driver core for serial ports forces a non-zero value for port type.
+ * Write an arbitrary value here to accommodate the serial core driver,
+ * as ID part of UAPI is redundant.
+ */
+ port->type = 1;
+}
+
+static int liteuart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (port->type != PORT_UNKNOWN && ser->type != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct uart_ops liteuart_ops = {
+ .tx_empty = liteuart_tx_empty,
+ .set_mctrl = liteuart_set_mctrl,
+ .get_mctrl = liteuart_get_mctrl,
+ .stop_tx = liteuart_stop_tx,
+ .start_tx = liteuart_start_tx,
+ .stop_rx = liteuart_stop_rx,
+ .break_ctl = liteuart_break_ctl,
+ .startup = liteuart_startup,
+ .shutdown = liteuart_shutdown,
+ .set_termios = liteuart_set_termios,
+ .type = liteuart_type,
+ .release_port = liteuart_release_port,
+ .request_port = liteuart_request_port,
+ .config_port = liteuart_config_port,
+ .verify_port = liteuart_verify_port,
+};
+
+static int liteuart_probe(struct platform_device *pdev)
+{
+ struct liteuart_port *uart;
+ struct uart_port *port;
+ struct xa_limit limit;
+ int dev_id, ret;
+
+ /* look for aliases; auto-enumerate for free index if not found */
+ dev_id = of_alias_get_id(pdev->dev.of_node, "serial");
+ if (dev_id < 0)
+ limit = XA_LIMIT(0, CONFIG_SERIAL_LITEUART_MAX_PORTS);
+ else
+ limit = XA_LIMIT(dev_id, dev_id);
+
+ uart = devm_kzalloc(&pdev->dev, sizeof(struct liteuart_port), GFP_KERNEL);
+ if (!uart)
+ return -ENOMEM;
+
+ ret = xa_alloc(&liteuart_array, &dev_id, uart, limit, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ uart->id = dev_id;
+ port = &uart->port;
+
+ /* get membase */
+ port->membase = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (!port->membase)
+ return -ENXIO;
+
+ /* values not from device tree */
+ port->dev = &pdev->dev;
+ port->iotype = UPIO_MEM;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->ops = &liteuart_ops;
+ port->regshift = 2;
+ port->fifosize = 16;
+ port->iobase = 1;
+ port->type = PORT_UNKNOWN;
+ port->line = dev_id;
+ spin_lock_init(&port->lock);
+
+ return uart_add_one_port(&liteuart_driver, &uart->port);
+}
+
+static int liteuart_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+ struct liteuart_port *uart = to_liteuart_port(port);
+
+ xa_erase(&liteuart_array, uart->id);
+
+ return 0;
+}
+
+static const struct of_device_id liteuart_of_match[] = {
+ { .compatible = "litex,liteuart" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, liteuart_of_match);
+
+static struct platform_driver liteuart_platform_driver = {
+ .probe = liteuart_probe,
+ .remove = liteuart_remove,
+ .driver = {
+ .name = "liteuart",
+ .of_match_table = liteuart_of_match,
+ },
+};
+
+#ifdef CONFIG_SERIAL_LITEUART_CONSOLE
+
+static void liteuart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct liteuart_port *uart;
+ struct uart_port *port;
+ unsigned long flags;
+
+ uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index);
+ port = &uart->port;
+
+ spin_lock_irqsave(&port->lock, flags);
+ uart_console_write(port, s, count, liteuart_putchar);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static int liteuart_console_setup(struct console *co, char *options)
+{
+ struct liteuart_port *uart;
+ struct uart_port *port;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ uart = (struct liteuart_port *)xa_load(&liteuart_array, co->index);
+ if (!uart)
+ return -ENODEV;
+
+ port = &uart->port;
+ if (!port->membase)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct console liteuart_console = {
+ .name = "liteuart",
+ .write = liteuart_console_write,
+ .device = uart_console_device,
+ .setup = liteuart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &liteuart_driver,
+};
+
+static int __init liteuart_console_init(void)
+{
+ register_console(&liteuart_console);
+
+ return 0;
+}
+console_initcall(liteuart_console_init);
+#endif /* CONFIG_SERIAL_LITEUART_CONSOLE */
+
+static int __init liteuart_init(void)
+{
+ int res;
+
+ res = uart_register_driver(&liteuart_driver);
+ if (res)
+ return res;
+
+ res = platform_driver_register(&liteuart_platform_driver);
+ if (res) {
+ uart_unregister_driver(&liteuart_driver);
+ return res;
+ }
+
+ return 0;
+}
+
+static void __exit liteuart_exit(void)
+{
+ platform_driver_unregister(&liteuart_platform_driver);
+ uart_unregister_driver(&liteuart_driver);
+}
+
+module_init(liteuart_init);
+module_exit(liteuart_exit);
+
+MODULE_AUTHOR("Antmicro <www.antmicro.com>");
+MODULE_DESCRIPTION("LiteUART serial driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform: liteuart");
diff --git a/drivers/tty/ttynull.c b/drivers/tty/ttynull.c
index eced70ec54e1..17f05b7eb6d3 100644
--- a/drivers/tty/ttynull.c
+++ b/drivers/tty/ttynull.c
@@ -2,13 +2,6 @@
/*
* Copyright (C) 2019 Axis Communications AB
*
- * The console is useful for userspace applications which expect a console
- * device to work without modifications even when no console is available
- * or desired.
- *
- * In order to use this driver, you should redirect the console to this
- * TTY, or boot the kernel with console=ttynull.
- *
* Based on ttyprintk.c:
* Copyright (C) 2010 Samo Pogacnik
*/
@@ -66,17 +59,6 @@ static struct console ttynull_console = {
.device = ttynull_device,
};
-void __init register_ttynull_console(void)
-{
- if (!ttynull_driver)
- return;
-
- if (add_preferred_console(ttynull_console.name, 0, NULL))
- return;
-
- register_console(&ttynull_console);
-}
-
static int __init ttynull_init(void)
{
struct tty_driver *driver;
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 9e12152ea46b..8b7bc10b6e8b 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -139,9 +139,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
misc_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
- if (!misc_pdev || !platform_get_drvdata(misc_pdev))
+ if (!misc_pdev)
return ERR_PTR(-EPROBE_DEFER);
+ if (!platform_get_drvdata(misc_pdev)) {
+ put_device(&misc_pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
data->dev = &misc_pdev->dev;
/*
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f52f1bc0559f..781905745812 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1895,6 +1895,10 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x04d8, 0xfd08),
.driver_info = IGNORE_DEVICE,
},
+
+ { USB_DEVICE(0x04d8, 0xf58b),
+ .driver_info = IGNORE_DEVICE,
+ },
#endif
/*Samsung phone in firmware update mode */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 02d0cfd23bb2..508b1c3f8b73 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -465,13 +465,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
if (!desc->resp_count || !--desc->resp_count)
goto out;
+ if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+ rv = -ENODEV;
+ goto out;
+ }
+ if (test_bit(WDM_RESETTING, &desc->flags)) {
+ rv = -EIO;
+ goto out;
+ }
+
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
- dev_err(&desc->intf->dev,
- "usb_submit_urb failed with result %d\n", rv);
+ if (!test_bit(WDM_DISCONNECTING, &desc->flags))
+ dev_err(&desc->intf->dev,
+ "usb_submit_urb failed with result %d\n", rv);
/* make sure the next notification trigger a submit */
clear_bit(WDM_RESPONDING, &desc->flags);
@@ -1027,9 +1037,9 @@ static void wdm_disconnect(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
- kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
+ kill_urbs(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 67cbd42421be..134dc2005ce9 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -274,8 +274,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
#define usblp_reset(usblp)\
usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0)
-#define usblp_hp_channel_change_request(usblp, channel, buffer) \
- usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1)
+static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kzalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST,
+ USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE,
+ channel, buf, 1);
+ if (ret == 0)
+ *new_channel = buf[0];
+
+ kfree(buf);
+
+ return ret;
+}
/*
* See the description for usblp_select_alts() below for the usage
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 60886a7464c3..ad5a0f405a75 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1649,14 +1649,12 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
urb->status = status;
/*
* This function can be called in task context inside another remote
- * coverage collection section, but KCOV doesn't support that kind of
+ * coverage collection section, but kcov doesn't support that kind of
* recursion yet. Only collect coverage in softirq context for now.
*/
- if (in_serving_softirq())
- kcov_remote_start_usb((u64)urb->dev->bus->busnum);
+ kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
urb->complete(urb);
- if (in_serving_softirq())
- kcov_remote_stop();
+ kcov_remote_stop_softirq();
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 2f95f08ca511..1b241f937d8f 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -285,6 +285,7 @@
/* Global USB2 PHY Vendor Control Register */
#define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25)
+#define DWC3_GUSB2PHYACC_DONE BIT(24)
#define DWC3_GUSB2PHYACC_BUSY BIT(23)
#define DWC3_GUSB2PHYACC_WRITE BIT(22)
#define DWC3_GUSB2PHYACC_ADDR(n) (n << 16)
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index 417e05381b5d..bdf1f98dfad8 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -754,7 +754,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = priv->drvdata->setup_regmaps(priv, base);
if (ret)
- return ret;
+ goto err_disable_clks;
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 78cb4db8a6e4..ee44321fee38 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1763,6 +1763,8 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
list_for_each_entry_safe(r, t, &dep->started_list, list)
dwc3_gadget_move_cancelled_request(r);
+ dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
+
goto out;
}
}
@@ -2083,6 +2085,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
static void __dwc3_gadget_stop(struct dwc3 *dwc);
+static int __dwc3_gadget_start(struct dwc3 *dwc);
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
@@ -2145,6 +2148,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
dwc->ev_buf->length;
}
+ } else {
+ __dwc3_gadget_start(dwc);
}
ret = dwc3_gadget_run_stop(dwc, is_on, false);
@@ -2319,10 +2324,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
}
dwc->gadget_driver = driver;
-
- if (pm_runtime_active(dwc->dev))
- __dwc3_gadget_start(dwc);
-
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
@@ -2348,13 +2349,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
-
- if (pm_runtime_suspended(dwc->dev))
- goto out;
-
- __dwc3_gadget_stop(dwc);
-
-out:
dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
index aa213c9815f6..f23f4c9a557e 100644
--- a/drivers/usb/dwc3/ulpi.c
+++ b/drivers/usb/dwc3/ulpi.c
@@ -7,6 +7,8 @@
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
*/
+#include <linux/delay.h>
+#include <linux/time64.h>
#include <linux/ulpi/regs.h>
#include "core.h"
@@ -17,14 +19,28 @@
DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
+#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
+
+static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
{
- unsigned int count = 1000;
+ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
+ unsigned int count = 10000;
u32 reg;
+ if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ if (read)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (reg & DWC3_GUSB2PHYCFG_SUSPHY)
+ usleep_range(1000, 1200);
+
while (count--) {
+ ndelay(ns);
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
- if (!(reg & DWC3_GUSB2PHYACC_BUSY))
+ if (reg & DWC3_GUSB2PHYACC_DONE)
return 0;
cpu_relax();
}
@@ -38,16 +54,10 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
u32 reg;
int ret;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- ret = dwc3_ulpi_busyloop(dwc);
+ ret = dwc3_ulpi_busyloop(dwc, addr, true);
if (ret)
return ret;
@@ -61,17 +71,11 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
struct dwc3 *dwc = dev_get_drvdata(dev);
u32 reg;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
reg |= DWC3_GUSB2PHYACC_WRITE | val;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- return dwc3_ulpi_busyloop(dwc);
+ return dwc3_ulpi_busyloop(dwc, addr, false);
}
static const struct ulpi_ops dwc3_ulpi_ops = {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 7e47e6223089..2d152571a7de 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -265,6 +265,7 @@ config USB_CONFIGFS_NCM
depends on NET
select USB_U_ETHER
select USB_F_NCM
+ select CRC32
help
NCM is an advanced protocol for Ethernet encapsulation, allows
grouping of several ethernet frames into one USB transfer and
@@ -314,6 +315,7 @@ config USB_CONFIGFS_EEM
depends on NET
select USB_U_ETHER
select USB_F_EEM
+ select CRC32
help
CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
and therefore can be supported by more hardware. Technically ECM and
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index c6d455f2bb92..1a556a628971 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -392,8 +392,11 @@ int usb_function_deactivate(struct usb_function *function)
spin_lock_irqsave(&cdev->lock, flags);
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_deactivate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
if (status == 0)
cdev->deactivations++;
@@ -424,8 +427,11 @@ int usb_function_activate(struct usb_function *function)
status = -EINVAL;
else {
cdev->deactivations--;
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_activate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
}
spin_unlock_irqrestore(&cdev->lock, flags);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 56051bb97349..36ffb43f9c1a 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -221,9 +221,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
{
- char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name;
+ struct gadget_info *gi = to_gadget_info(item);
+ char *udc_name;
+ int ret;
+
+ mutex_lock(&gi->lock);
+ udc_name = gi->composite.gadget_driver.udc_name;
+ ret = sprintf(page, "%s\n", udc_name ?: "");
+ mutex_unlock(&gi->lock);
- return sprintf(page, "%s\n", udc_name ?: "");
+ return ret;
}
static int unregister_gadget(struct gadget_info *gi)
@@ -1248,9 +1255,9 @@ static void purge_configs_funcs(struct gadget_info *gi)
cfg = container_of(c, struct config_usb_cfg, c);
- list_for_each_entry_safe(f, tmp, &c->functions, list) {
+ list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
- list_move_tail(&f->list, &cfg->func_list);
+ list_move(&f->list, &cfg->func_list);
if (f->unbind) {
dev_dbg(&gi->cdev.gadget->dev,
"unbind function '%s'/%p\n",
@@ -1536,7 +1543,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
.suspend = configfs_composite_suspend,
.resume = configfs_composite_resume,
- .max_speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER_PLUS,
.driver = {
.owner = THIS_MODULE,
.name = "configfs-gadget",
@@ -1576,7 +1583,7 @@ static struct config_group *gadgets_make(
gi->composite.unbind = configfs_do_nothing;
gi->composite.suspend = NULL;
gi->composite.resume = NULL;
- gi->composite.max_speed = USB_SPEED_SUPER;
+ gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 64a4112068fc..2f1eb2e81d30 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1162,6 +1162,7 @@ fail_tx_reqs:
printer_req_free(dev->in_ep, req);
}
+ usb_free_all_descriptors(f);
return ret;
}
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 3633df6d7610..5d960b6603b6 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -271,7 +271,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = {
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -280,7 +280,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -348,7 +348,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -357,7 +357,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -444,12 +444,28 @@ struct cntrl_range_lay3 {
__le32 dRES;
} __packed;
-static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
+static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
struct usb_endpoint_descriptor *ep_desc,
- unsigned int factor, bool is_playback)
+ enum usb_device_speed speed, bool is_playback)
{
int chmask, srate, ssize;
- u16 max_packet_size;
+ u16 max_size_bw, max_size_ep;
+ unsigned int factor;
+
+ switch (speed) {
+ case USB_SPEED_FULL:
+ max_size_ep = 1023;
+ factor = 1000;
+ break;
+
+ case USB_SPEED_HIGH:
+ max_size_ep = 1024;
+ factor = 8000;
+ break;
+
+ default:
+ return -EINVAL;
+ }
if (is_playback) {
chmask = uac2_opts->p_chmask;
@@ -461,10 +477,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
ssize = uac2_opts->c_ssize;
}
- max_packet_size = num_channels(chmask) * ssize *
+ max_size_bw = num_channels(chmask) * ssize *
DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
- ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size,
- le16_to_cpu(ep_desc->wMaxPacketSize)));
+ ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
+ max_size_ep));
+
+ return 0;
}
/* Use macro to overcome line length limitation */
@@ -670,10 +688,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
/* Calculate wMaxPacketSize according to audio bandwidth */
- set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
- set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
- set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
- set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
if (EPOUT_EN(uac2_opts)) {
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 31ea76adcc0d..c019f2b0c0af 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -45,9 +45,10 @@
#define UETH__VERSION "29-May-2008"
/* Experiments show that both Linux and Windows hosts allow up to 16k
- * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
+ * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
* blocks and still have efficient handling. */
-#define GETHER_MAX_ETH_FRAME_LEN 15412
+#define GETHER_MAX_MTU_SIZE 15412
+#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
struct eth_dev {
/* lock is held while accessing port_usb
@@ -786,7 +787,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
@@ -848,7 +849,7 @@ struct net_device *gether_setup_name_default(const char *netname)
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
return net;
}
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index 59be2d8417c9..e8033e5f0c18 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -200,8 +200,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail_string_ids;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 1a12aab208b4..8c614bb86c66 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -90,7 +90,7 @@ config USB_BCM63XX_UDC
config USB_FSL_USB2
tristate "Freescale Highspeed USB DR Peripheral Controller"
- depends on FSL_SOC || ARCH_MXC
+ depends on FSL_SOC
help
Some of Freescale PowerPC and i.MX processors have a High Speed
Dual-Role(DR) USB controller, which supports device mode.
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index f5a7ce28aecd..a21f2224e7eb 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-y := fsl_udc_core.o
-fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 5b5cfeb6c14a..6a62bbd01324 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -659,8 +659,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
*
* Enables the D+ (or potentially D-) pullup. The host will start
* enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered). This pullup is always enabled unless
- * usb_gadget_disconnect() has been used to disable it.
+ * is active (the link is powered).
*
* Returns zero on success, else negative errno.
*/
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index ab5e978b5052..1a953f44183a 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2118,9 +2118,21 @@ static int dummy_hub_control(
dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
set_link_state(dum_hcd);
break;
- default:
+ case USB_PORT_FEAT_ENABLE:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ fallthrough;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
dum_hcd->port_status &= ~(1 << wValue);
set_link_state(dum_hcd);
+ break;
+ default:
+ /* Disallow INDICATOR and C_OVER_CURRENT */
+ goto error;
}
break;
case GetHubDescriptor:
@@ -2281,18 +2293,17 @@ static int dummy_hub_control(
*/
dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
fallthrough;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3, and ignored for USB-2 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ break;
default:
- if (hcd->speed == HCD_USB3) {
- if ((dum_hcd->port_status &
- USB_SS_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
- } else
- if ((dum_hcd->port_status &
- USB_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
- set_link_state(dum_hcd);
+ /* Disallow TEST, INDICATOR, and C_OVER_CURRENT */
+ goto error;
}
break;
case GetPortErrorCount:
diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c
deleted file mode 100644
index 5a321992decc..000000000000
--- a/drivers/usb/gadget/udc/fsl_mxc_udc.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2009
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Description:
- * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
- * driver to function correctly on these systems.
- */
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/fsl_devices.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include "fsl_usb2_udc.h"
-
-static struct clk *mxc_ahb_clk;
-static struct clk *mxc_per_clk;
-static struct clk *mxc_ipg_clk;
-
-/* workaround ENGcm09152 for i.MX35 */
-#define MX35_USBPHYCTRL_OFFSET 0x600
-#define USBPHYCTRL_OTGBASE_OFFSET 0x8
-#define USBPHYCTRL_EVDO (1 << 23)
-
-int fsl_udc_clk_init(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata;
- unsigned long freq;
- int ret;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- mxc_ipg_clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(mxc_ipg_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ipg\") failed\n");
- return PTR_ERR(mxc_ipg_clk);
- }
-
- mxc_ahb_clk = devm_clk_get(&pdev->dev, "ahb");
- if (IS_ERR(mxc_ahb_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ahb\") failed\n");
- return PTR_ERR(mxc_ahb_clk);
- }
-
- mxc_per_clk = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(mxc_per_clk)) {
- dev_err(&pdev->dev, "clk_get(\"per\") failed\n");
- return PTR_ERR(mxc_per_clk);
- }
-
- clk_prepare_enable(mxc_ipg_clk);
- clk_prepare_enable(mxc_ahb_clk);
- clk_prepare_enable(mxc_per_clk);
-
- /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
- if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {
- freq = clk_get_rate(mxc_per_clk);
- if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
- (freq < 59999000 || freq > 60001000)) {
- dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
- ret = -EINVAL;
- goto eclkrate;
- }
- }
-
- return 0;
-
-eclkrate:
- clk_disable_unprepare(mxc_ipg_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- return ret;
-}
-
-int fsl_udc_clk_finalize(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret = 0;
-
- /* workaround ENGcm09152 for i.MX35 */
- if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
- unsigned int v;
- struct resource *res = platform_get_resource
- (pdev, IORESOURCE_MEM, 0);
- void __iomem *phy_regs = ioremap(res->start +
- MX35_USBPHYCTRL_OFFSET, 512);
- if (!phy_regs) {
- dev_err(&pdev->dev, "ioremap for phy address fails\n");
- ret = -EINVAL;
- goto ioremap_err;
- }
-
- v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
- writel(v | USBPHYCTRL_EVDO,
- phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
-
- iounmap(phy_regs);
- }
-
-
-ioremap_err:
- /* ULPI transceivers don't need usbpll */
- if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- }
-
- return ret;
-}
-
-void fsl_udc_clk_release(void)
-{
- if (mxc_per_clk)
- clk_disable_unprepare(mxc_per_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_ipg_clk);
-}
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index fb52133c3557..98568b046a1a 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -200,7 +200,7 @@ fail_start:
return result;
}
-static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
+static void ps3_ehci_remove(struct ps3_system_bus_device *dev)
{
unsigned int tmp;
struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
@@ -227,8 +227,6 @@ static int ps3_ehci_remove(struct ps3_system_bus_device *dev)
ps3_dma_region_free(dev->d_region);
ps3_close_hv_device(dev);
-
- return 0;
}
static int __init ps3_ehci_driver_register(struct ps3_system_bus_driver *drv)
diff --git a/drivers/usb/host/ohci-ps3.c b/drivers/usb/host/ohci-ps3.c
index f77cd6af0ccf..4f5af929c3e4 100644
--- a/drivers/usb/host/ohci-ps3.c
+++ b/drivers/usb/host/ohci-ps3.c
@@ -184,7 +184,7 @@ fail_start:
return result;
}
-static int ps3_ohci_remove(struct ps3_system_bus_device *dev)
+static void ps3_ohci_remove(struct ps3_system_bus_device *dev)
{
unsigned int tmp;
struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev);
@@ -212,8 +212,6 @@ static int ps3_ohci_remove(struct ps3_system_bus_device *dev)
ps3_dma_region_free(dev->d_region);
ps3_close_hv_device(dev);
-
- return 0;
}
static int __init ps3_ohci_driver_register(struct ps3_system_bus_driver *drv)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 91ab81c3fc79..e86940571b4c 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4770,19 +4770,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+ else
+ timeout_ns = udev->u1_params.sel;
+
/* Prevent U1 if service interval is shorter than U1 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
- else
- timeout_ns = udev->u1_params.sel;
-
/* The U1 timeout is encoded in 1us intervals.
* Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
*/
@@ -4834,19 +4834,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+ else
+ timeout_ns = udev->u2_params.sel;
+
/* Prevent U2 if service interval is shorter than U2 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
- else
- timeout_ns = udev->u2_params.sel;
-
/* The U2 timeout is encoded in 256us intervals */
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
/* If the necessary timeout value is bigger than what we can set in the
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 73ebfa6e9715..c640f98d20c5 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -496,6 +496,9 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
finish_wait(&dev->waitq, &wait);
+ /* make sure URB is idle after timeout or (spurious) CMD_ACK */
+ usb_kill_urb(dev->cntl_urb);
+
mutex_unlock(&dev->io_mutex);
if (retval < 0) {
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index f1201d4de297..e8f06b41a503 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -532,23 +532,29 @@ static int iuu_uart_flush(struct usb_serial_port *port)
struct device *dev = &port->dev;
int i;
int status;
- u8 rxcmd = IUU_UART_RX;
+ u8 *rxcmd;
struct iuu_private *priv = usb_get_serial_port_data(port);
if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0)
return -EIO;
+ rxcmd = kmalloc(1, GFP_KERNEL);
+ if (!rxcmd)
+ return -ENOMEM;
+
+ rxcmd[0] = IUU_UART_RX;
+
for (i = 0; i < 2; i++) {
- status = bulk_immediate(port, &rxcmd, 1);
+ status = bulk_immediate(port, rxcmd, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_write error\n", __func__);
- return status;
+ goto out_free;
}
status = read_immediate(port, &priv->len, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
if (priv->len > 0) {
@@ -556,12 +562,16 @@ static int iuu_uart_flush(struct usb_serial_port *port)
status = read_immediate(port, priv->buf, priv->len);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
}
}
dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__);
iuu_led(port, 0, 0xF000, 0, 0xFF);
+
+out_free:
+ kfree(rxcmd);
+
return status;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2c21e34235bb..3fe959104311 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1117,6 +1117,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
@@ -2057,6 +2059,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 870e9cf3d5dc..f9677a5ec31b 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -91,6 +91,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
US_FL_BROKEN_FUA),
/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
+ "PNY",
+ "Pro Elite SSD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
+/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
"PNY",
"Pro Elite SSD",
diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
index 187690fd1a5b..60d375e9c3c7 100644
--- a/drivers/usb/typec/altmodes/Kconfig
+++ b/drivers/usb/typec/altmodes/Kconfig
@@ -20,6 +20,6 @@ config TYPEC_NVIDIA_ALTMODE
to enable support for VirtualLink devices with NVIDIA GPUs.
To compile this driver as a module, choose M here: the
- module will be called typec_displayport.
+ module will be called typec_nvidia.
endmenu
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index ebfd3113a9a8..8f77669f9cf4 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -766,6 +766,7 @@ int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmod
return ret;
sysfs_notify(&partner->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
return 0;
}
@@ -923,6 +924,7 @@ int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes)
return ret;
sysfs_notify(&plug->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&plug->dev.kobj, KOBJ_CHANGE);
return 0;
}
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index cf37a59ce130..46a25b8db72e 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -207,10 +207,21 @@ static int
pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
{
u8 msg[2] = { };
+ int ret;
msg[0] = PMC_USB_DP_HPD;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
+ /* Configure HPD first if HPD,IRQ comes together */
+ if (!IOM_PORT_HPD_ASSERTED(port->iom_status) &&
+ dp->status & DP_STATUS_IRQ_HPD &&
+ dp->status & DP_STATUS_HPD_STATE) {
+ msg[1] = PMC_USB_DP_HPD_LVL;
+ ret = pmc_usb_command(port, msg, sizeof(msg));
+ if (ret)
+ return ret;
+ }
+
if (dp->status & DP_STATUS_IRQ_HPD)
msg[1] = PMC_USB_DP_HPD_IRQ;
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 66cde5e5f796..3209b5ddd30c 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -396,6 +396,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
wValue);
+ if (wValue >= 32)
+ goto error;
vhci_hcd->port_status[rhport] &= ~(1 << wValue);
break;
}
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index 6caf539091e5..92a6396f8a73 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -9,21 +9,24 @@ menuconfig VDPA
if VDPA
config VDPA_SIM
- tristate "vDPA device simulator"
+ tristate "vDPA device simulator core"
depends on RUNTIME_TESTING_MENU && HAS_DMA
select DMA_OPS
select VHOST_RING
+ help
+ Enable this module to support vDPA device simulators. These devices
+ are used for testing, prototyping and development of vDPA.
+
+config VDPA_SIM_NET
+ tristate "vDPA simulator for networking device"
+ depends on VDPA_SIM
select GENERIC_NET_UTILS
- default n
help
- vDPA networking device simulator which loop TX traffic back
- to RX. This device is used for testing, prototyping and
- development of vDPA.
+ vDPA networking device simulator which loops TX traffic back to RX.
config IFCVF
tristate "Intel IFC VF vDPA driver"
depends on PCI_MSI
- default n
help
This kernel module can drive Intel IFC VF NIC to offload
virtio dataplane traffic to hardware.
@@ -42,7 +45,6 @@ config MLX5_VDPA_NET
tristate "vDPA driver for ConnectX devices"
select MLX5_VDPA
depends on MLX5_CORE
- default n
help
VDPA network driver for ConnectX6 and newer. Provides offloading
of virtio net datapath such that descriptors put on the ring will
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index 8b4028556cb6..fa1af301cf55 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -417,16 +417,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return ret;
}
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (ret) {
- IFCVF_ERR(pdev, "No usable DMA confiugration\n");
- return ret;
- }
-
- ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (ret) {
- IFCVF_ERR(pdev,
- "No usable coherent DMA confiugration\n");
+ IFCVF_ERR(pdev, "No usable DMA configuration\n");
return ret;
}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index f1d54814db97..88dde3455bfd 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -479,6 +479,11 @@ static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq)
static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num)
{
mlx5_cq_set_ci(&mvq->cq.mcq);
+
+ /* make sure CQ cosumer update is visible to the hardware before updating
+ * RX doorbell record.
+ */
+ dma_wmb();
rx_post(&mvq->vqqp, num);
if (mvq->event_cb.callback)
mvq->event_cb.callback(mvq->event_cb.private);
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index a69ffc991e13..c0825650c055 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -89,7 +89,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
if (!vdev)
goto err;
- err = ida_simple_get(&vdpa_index_ida, 0, 0, GFP_KERNEL);
+ err = ida_alloc(&vdpa_index_ida, GFP_KERNEL);
if (err < 0)
goto err_ida;
diff --git a/drivers/vdpa/vdpa_sim/Makefile b/drivers/vdpa/vdpa_sim/Makefile
index b40278f65e04..79d4536d347e 100644
--- a/drivers/vdpa/vdpa_sim/Makefile
+++ b/drivers/vdpa/vdpa_sim/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VDPA_SIM) += vdpa_sim.o
+obj-$(CONFIG_VDPA_SIM_NET) += vdpa_sim_net.o
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 6a90fdb9cbfc..b3fcc67bfdf0 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * VDPA networking device simulator.
+ * VDPA device simulator core.
*
* Copyright (c) 2020, Red Hat Inc. All rights reserved.
* Author: Jason Wang <jasowang@redhat.com>
@@ -11,97 +11,32 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/sched.h>
-#include <linux/wait.h>
-#include <linux/uuid.h>
-#include <linux/iommu.h>
#include <linux/dma-map-ops.h>
-#include <linux/sysfs.h>
-#include <linux/file.h>
-#include <linux/etherdevice.h>
#include <linux/vringh.h>
#include <linux/vdpa.h>
-#include <linux/virtio_byteorder.h>
#include <linux/vhost_iotlb.h>
-#include <uapi/linux/virtio_config.h>
-#include <uapi/linux/virtio_net.h>
+
+#include "vdpa_sim.h"
#define DRV_VERSION "0.1"
#define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
-#define DRV_DESC "vDPA Device Simulator"
+#define DRV_DESC "vDPA Device Simulator core"
#define DRV_LICENSE "GPL v2"
static int batch_mapping = 1;
module_param(batch_mapping, int, 0444);
MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
-static char *macaddr;
-module_param(macaddr, charp, 0);
-MODULE_PARM_DESC(macaddr, "Ethernet MAC address");
-
-struct vdpasim_virtqueue {
- struct vringh vring;
- struct vringh_kiov iov;
- unsigned short head;
- bool ready;
- u64 desc_addr;
- u64 device_addr;
- u64 driver_addr;
- u32 num;
- void *private;
- irqreturn_t (*cb)(void *data);
-};
+static int max_iotlb_entries = 2048;
+module_param(max_iotlb_entries, int, 0444);
+MODULE_PARM_DESC(max_iotlb_entries,
+ "Maximum number of iotlb entries. 0 means unlimited. (default: 2048)");
#define VDPASIM_QUEUE_ALIGN PAGE_SIZE
#define VDPASIM_QUEUE_MAX 256
-#define VDPASIM_DEVICE_ID 0x1
#define VDPASIM_VENDOR_ID 0
-#define VDPASIM_VQ_NUM 0x2
-#define VDPASIM_NAME "vdpasim-netdev"
-
-static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) |
- (1ULL << VIRTIO_F_VERSION_1) |
- (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
- (1ULL << VIRTIO_NET_F_MAC);
-
-/* State of each vdpasim device */
-struct vdpasim {
- struct vdpa_device vdpa;
- struct vdpasim_virtqueue vqs[VDPASIM_VQ_NUM];
- struct work_struct work;
- /* spinlock to synchronize virtqueue state */
- spinlock_t lock;
- struct virtio_net_config config;
- struct vhost_iotlb *iommu;
- void *buffer;
- u32 status;
- u32 generation;
- u64 features;
- /* spinlock to synchronize iommu table */
- spinlock_t iommu_lock;
-};
-
-/* TODO: cross-endian support */
-static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
-{
- return virtio_legacy_is_little_endian() ||
- (vdpasim->features & (1ULL << VIRTIO_F_VERSION_1));
-}
-
-static inline u16 vdpasim16_to_cpu(struct vdpasim *vdpasim, __virtio16 val)
-{
- return __virtio16_to_cpu(vdpasim_is_little_endian(vdpasim), val);
-}
-
-static inline __virtio16 cpu_to_vdpasim16(struct vdpasim *vdpasim, u16 val)
-{
- return __cpu_to_virtio16(vdpasim_is_little_endian(vdpasim), val);
-}
-
-static struct vdpasim *vdpasim_dev;
static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
{
@@ -115,20 +50,34 @@ static struct vdpasim *dev_to_sim(struct device *dev)
return vdpa_to_sim(vdpa);
}
+static void vdpasim_vq_notify(struct vringh *vring)
+{
+ struct vdpasim_virtqueue *vq =
+ container_of(vring, struct vdpasim_virtqueue, vring);
+
+ if (!vq->cb)
+ return;
+
+ vq->cb(vq->private);
+}
+
static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
{
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
- vringh_init_iotlb(&vq->vring, vdpasim_features,
+ vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
VDPASIM_QUEUE_MAX, false,
(struct vring_desc *)(uintptr_t)vq->desc_addr,
(struct vring_avail *)
(uintptr_t)vq->driver_addr,
(struct vring_used *)
(uintptr_t)vq->device_addr);
+
+ vq->vring.notify = vdpasim_vq_notify;
}
-static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
+static void vdpasim_vq_reset(struct vdpasim *vdpasim,
+ struct vdpasim_virtqueue *vq)
{
vq->ready = false;
vq->desc_addr = 0;
@@ -136,16 +85,18 @@ static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq)
vq->device_addr = 0;
vq->cb = NULL;
vq->private = NULL;
- vringh_init_iotlb(&vq->vring, vdpasim_features, VDPASIM_QUEUE_MAX,
- false, NULL, NULL, NULL);
+ vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
+ VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
+
+ vq->vring.notify = NULL;
}
static void vdpasim_reset(struct vdpasim *vdpasim)
{
int i;
- for (i = 0; i < VDPASIM_VQ_NUM; i++)
- vdpasim_vq_reset(&vdpasim->vqs[i]);
+ for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
+ vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
spin_lock(&vdpasim->iommu_lock);
vhost_iotlb_reset(vdpasim->iommu);
@@ -156,80 +107,6 @@ static void vdpasim_reset(struct vdpasim *vdpasim)
++vdpasim->generation;
}
-static void vdpasim_work(struct work_struct *work)
-{
- struct vdpasim *vdpasim = container_of(work, struct
- vdpasim, work);
- struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
- struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
- ssize_t read, write;
- size_t total_write;
- int pkts = 0;
- int err;
-
- spin_lock(&vdpasim->lock);
-
- if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
- goto out;
-
- if (!txq->ready || !rxq->ready)
- goto out;
-
- while (true) {
- total_write = 0;
- err = vringh_getdesc_iotlb(&txq->vring, &txq->iov, NULL,
- &txq->head, GFP_ATOMIC);
- if (err <= 0)
- break;
-
- err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->iov,
- &rxq->head, GFP_ATOMIC);
- if (err <= 0) {
- vringh_complete_iotlb(&txq->vring, txq->head, 0);
- break;
- }
-
- while (true) {
- read = vringh_iov_pull_iotlb(&txq->vring, &txq->iov,
- vdpasim->buffer,
- PAGE_SIZE);
- if (read <= 0)
- break;
-
- write = vringh_iov_push_iotlb(&rxq->vring, &rxq->iov,
- vdpasim->buffer, read);
- if (write <= 0)
- break;
-
- total_write += write;
- }
-
- /* Make sure data is wrote before advancing index */
- smp_wmb();
-
- vringh_complete_iotlb(&txq->vring, txq->head, 0);
- vringh_complete_iotlb(&rxq->vring, rxq->head, total_write);
-
- /* Make sure used is visible before rasing the interrupt. */
- smp_wmb();
-
- local_bh_disable();
- if (txq->cb)
- txq->cb(txq->private);
- if (rxq->cb)
- rxq->cb(rxq->private);
- local_bh_enable();
-
- if (++pkts > 4) {
- schedule_work(&vdpasim->work);
- goto out;
- }
- }
-
-out:
- spin_unlock(&vdpasim->lock);
-}
-
static int dir_to_perm(enum dma_data_direction dir)
{
int perm = -EFAULT;
@@ -342,26 +219,28 @@ static const struct dma_map_ops vdpasim_dma_ops = {
.free = vdpasim_free_coherent,
};
-static const struct vdpa_config_ops vdpasim_net_config_ops;
-static const struct vdpa_config_ops vdpasim_net_batch_config_ops;
+static const struct vdpa_config_ops vdpasim_config_ops;
+static const struct vdpa_config_ops vdpasim_batch_config_ops;
-static struct vdpasim *vdpasim_create(void)
+struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr)
{
const struct vdpa_config_ops *ops;
struct vdpasim *vdpasim;
struct device *dev;
- int ret = -ENOMEM;
+ int i, ret = -ENOMEM;
if (batch_mapping)
- ops = &vdpasim_net_batch_config_ops;
+ ops = &vdpasim_batch_config_ops;
else
- ops = &vdpasim_net_config_ops;
+ ops = &vdpasim_config_ops;
- vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM);
+ vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
+ dev_attr->nvqs);
if (!vdpasim)
goto err_alloc;
- INIT_WORK(&vdpasim->work, vdpasim_work);
+ vdpasim->dev_attr = *dev_attr;
+ INIT_WORK(&vdpasim->work, dev_attr->work_fn);
spin_lock_init(&vdpasim->lock);
spin_lock_init(&vdpasim->iommu_lock);
@@ -371,31 +250,27 @@ static struct vdpasim *vdpasim_create(void)
goto err_iommu;
set_dma_ops(dev, &vdpasim_dma_ops);
- vdpasim->iommu = vhost_iotlb_alloc(2048, 0);
+ vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
+ if (!vdpasim->config)
+ goto err_iommu;
+
+ vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
+ GFP_KERNEL);
+ if (!vdpasim->vqs)
+ goto err_iommu;
+
+ vdpasim->iommu = vhost_iotlb_alloc(max_iotlb_entries, 0);
if (!vdpasim->iommu)
goto err_iommu;
- vdpasim->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
if (!vdpasim->buffer)
goto err_iommu;
- if (macaddr) {
- mac_pton(macaddr, vdpasim->config.mac);
- if (!is_valid_ether_addr(vdpasim->config.mac)) {
- ret = -EADDRNOTAVAIL;
- goto err_iommu;
- }
- } else {
- eth_random_addr(vdpasim->config.mac);
- }
-
- vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu);
- vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu);
+ for (i = 0; i < dev_attr->nvqs; i++)
+ vringh_set_iotlb(&vdpasim->vqs[i].vring, vdpasim->iommu);
vdpasim->vdpa.dma_dev = dev;
- ret = vdpa_register_device(&vdpasim->vdpa);
- if (ret)
- goto err_iommu;
return vdpasim;
@@ -404,6 +279,7 @@ err_iommu:
err_alloc:
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(vdpasim_create);
static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
u64 desc_area, u64 driver_area,
@@ -498,28 +374,21 @@ static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
static u64 vdpasim_get_features(struct vdpa_device *vdpa)
{
- return vdpasim_features;
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ return vdpasim->dev_attr.supported_features;
}
static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
- struct virtio_net_config *config = &vdpasim->config;
/* DMA mapping must be done by driver */
if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
return -EINVAL;
- vdpasim->features = features & vdpasim_features;
-
- /* We generally only know whether guest is using the legacy interface
- * here, so generally that's the earliest we can set config fields.
- * Note: We actually require VIRTIO_F_ACCESS_PLATFORM above which
- * implies VIRTIO_F_VERSION_1, but let's not try to be clever here.
- */
+ vdpasim->features = features & vdpasim->dev_attr.supported_features;
- config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
- config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
return 0;
}
@@ -536,7 +405,9 @@ static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
{
- return VDPASIM_DEVICE_ID;
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ return vdpasim->dev_attr.id;
}
static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
@@ -572,14 +443,27 @@ static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
- if (offset + len < sizeof(struct virtio_net_config))
- memcpy(buf, (u8 *)&vdpasim->config + offset, len);
+ if (offset + len > vdpasim->dev_attr.config_size)
+ return;
+
+ if (vdpasim->dev_attr.get_config)
+ vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
+
+ memcpy(buf, vdpasim->config + offset, len);
}
static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
const void *buf, unsigned int len)
{
- /* No writable config supportted by vdpasim */
+ struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
+
+ if (offset + len > vdpasim->dev_attr.config_size)
+ return;
+
+ memcpy(vdpasim->config + offset, buf, len);
+
+ if (vdpasim->dev_attr.set_config)
+ vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
}
static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
@@ -656,12 +540,14 @@ static void vdpasim_free(struct vdpa_device *vdpa)
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
cancel_work_sync(&vdpasim->work);
- kfree(vdpasim->buffer);
+ kvfree(vdpasim->buffer);
if (vdpasim->iommu)
vhost_iotlb_free(vdpasim->iommu);
+ kfree(vdpasim->vqs);
+ kfree(vdpasim->config);
}
-static const struct vdpa_config_ops vdpasim_net_config_ops = {
+static const struct vdpa_config_ops vdpasim_config_ops = {
.set_vq_address = vdpasim_set_vq_address,
.set_vq_num = vdpasim_set_vq_num,
.kick_vq = vdpasim_kick_vq,
@@ -688,7 +574,7 @@ static const struct vdpa_config_ops vdpasim_net_config_ops = {
.free = vdpasim_free,
};
-static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
+static const struct vdpa_config_ops vdpasim_batch_config_ops = {
.set_vq_address = vdpasim_set_vq_address,
.set_vq_num = vdpasim_set_vq_num,
.kick_vq = vdpasim_kick_vq,
@@ -714,26 +600,6 @@ static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
.free = vdpasim_free,
};
-static int __init vdpasim_dev_init(void)
-{
- vdpasim_dev = vdpasim_create();
-
- if (!IS_ERR(vdpasim_dev))
- return 0;
-
- return PTR_ERR(vdpasim_dev);
-}
-
-static void __exit vdpasim_dev_exit(void)
-{
- struct vdpa_device *vdpa = &vdpasim_dev->vdpa;
-
- vdpa_unregister_device(vdpa);
-}
-
-module_init(vdpasim_dev_init)
-module_exit(vdpasim_dev_exit)
-
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE(DRV_LICENSE);
MODULE_AUTHOR(DRV_AUTHOR);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
new file mode 100644
index 000000000000..b02142293d5b
--- /dev/null
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020, Red Hat Inc. All rights reserved.
+ */
+
+#ifndef _VDPA_SIM_H
+#define _VDPA_SIM_H
+
+#include <linux/vringh.h>
+#include <linux/vdpa.h>
+#include <linux/virtio_byteorder.h>
+#include <linux/vhost_iotlb.h>
+#include <uapi/linux/virtio_config.h>
+
+#define VDPASIM_FEATURES ((1ULL << VIRTIO_F_ANY_LAYOUT) | \
+ (1ULL << VIRTIO_F_VERSION_1) | \
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM))
+
+struct vdpasim;
+
+struct vdpasim_virtqueue {
+ struct vringh vring;
+ struct vringh_kiov in_iov;
+ struct vringh_kiov out_iov;
+ unsigned short head;
+ bool ready;
+ u64 desc_addr;
+ u64 device_addr;
+ u64 driver_addr;
+ u32 num;
+ void *private;
+ irqreturn_t (*cb)(void *data);
+};
+
+struct vdpasim_dev_attr {
+ u64 supported_features;
+ size_t config_size;
+ size_t buffer_size;
+ int nvqs;
+ u32 id;
+
+ work_func_t work_fn;
+ void (*get_config)(struct vdpasim *vdpasim, void *config);
+ void (*set_config)(struct vdpasim *vdpasim, const void *config);
+};
+
+/* State of each vdpasim device */
+struct vdpasim {
+ struct vdpa_device vdpa;
+ struct vdpasim_virtqueue *vqs;
+ struct work_struct work;
+ struct vdpasim_dev_attr dev_attr;
+ /* spinlock to synchronize virtqueue state */
+ spinlock_t lock;
+ /* virtio config according to device type */
+ void *config;
+ struct vhost_iotlb *iommu;
+ void *buffer;
+ u32 status;
+ u32 generation;
+ u64 features;
+ /* spinlock to synchronize iommu table */
+ spinlock_t iommu_lock;
+};
+
+struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *attr);
+
+/* TODO: cross-endian support */
+static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim)
+{
+ return virtio_legacy_is_little_endian() ||
+ (vdpasim->features & (1ULL << VIRTIO_F_VERSION_1));
+}
+
+static inline u16 vdpasim16_to_cpu(struct vdpasim *vdpasim, __virtio16 val)
+{
+ return __virtio16_to_cpu(vdpasim_is_little_endian(vdpasim), val);
+}
+
+static inline __virtio16 cpu_to_vdpasim16(struct vdpasim *vdpasim, u16 val)
+{
+ return __cpu_to_virtio16(vdpasim_is_little_endian(vdpasim), val);
+}
+
+static inline u32 vdpasim32_to_cpu(struct vdpasim *vdpasim, __virtio32 val)
+{
+ return __virtio32_to_cpu(vdpasim_is_little_endian(vdpasim), val);
+}
+
+static inline __virtio32 cpu_to_vdpasim32(struct vdpasim *vdpasim, u32 val)
+{
+ return __cpu_to_virtio32(vdpasim_is_little_endian(vdpasim), val);
+}
+
+static inline u64 vdpasim64_to_cpu(struct vdpasim *vdpasim, __virtio64 val)
+{
+ return __virtio64_to_cpu(vdpasim_is_little_endian(vdpasim), val);
+}
+
+static inline __virtio64 cpu_to_vdpasim64(struct vdpasim *vdpasim, u64 val)
+{
+ return __cpu_to_virtio64(vdpasim_is_little_endian(vdpasim), val);
+}
+
+#endif
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
new file mode 100644
index 000000000000..c10b6981fdab
--- /dev/null
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * VDPA simulator for networking device.
+ *
+ * Copyright (c) 2020, Red Hat Inc. All rights reserved.
+ * Author: Jason Wang <jasowang@redhat.com>
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/vringh.h>
+#include <linux/vdpa.h>
+#include <uapi/linux/virtio_net.h>
+
+#include "vdpa_sim.h"
+
+#define DRV_VERSION "0.1"
+#define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>"
+#define DRV_DESC "vDPA Device Simulator for networking device"
+#define DRV_LICENSE "GPL v2"
+
+#define VDPASIM_NET_FEATURES (VDPASIM_FEATURES | \
+ (1ULL << VIRTIO_NET_F_MAC))
+
+#define VDPASIM_NET_VQ_NUM 2
+
+static char *macaddr;
+module_param(macaddr, charp, 0);
+MODULE_PARM_DESC(macaddr, "Ethernet MAC address");
+
+u8 macaddr_buf[ETH_ALEN];
+
+static struct vdpasim *vdpasim_net_dev;
+
+static void vdpasim_net_work(struct work_struct *work)
+{
+ struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
+ struct vdpasim_virtqueue *txq = &vdpasim->vqs[1];
+ struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0];
+ ssize_t read, write;
+ size_t total_write;
+ int pkts = 0;
+ int err;
+
+ spin_lock(&vdpasim->lock);
+
+ if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
+ goto out;
+
+ if (!txq->ready || !rxq->ready)
+ goto out;
+
+ while (true) {
+ total_write = 0;
+ err = vringh_getdesc_iotlb(&txq->vring, &txq->out_iov, NULL,
+ &txq->head, GFP_ATOMIC);
+ if (err <= 0)
+ break;
+
+ err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->in_iov,
+ &rxq->head, GFP_ATOMIC);
+ if (err <= 0) {
+ vringh_complete_iotlb(&txq->vring, txq->head, 0);
+ break;
+ }
+
+ while (true) {
+ read = vringh_iov_pull_iotlb(&txq->vring, &txq->out_iov,
+ vdpasim->buffer,
+ PAGE_SIZE);
+ if (read <= 0)
+ break;
+
+ write = vringh_iov_push_iotlb(&rxq->vring, &rxq->in_iov,
+ vdpasim->buffer, read);
+ if (write <= 0)
+ break;
+
+ total_write += write;
+ }
+
+ /* Make sure data is wrote before advancing index */
+ smp_wmb();
+
+ vringh_complete_iotlb(&txq->vring, txq->head, 0);
+ vringh_complete_iotlb(&rxq->vring, rxq->head, total_write);
+
+ /* Make sure used is visible before rasing the interrupt. */
+ smp_wmb();
+
+ local_bh_disable();
+ if (vringh_need_notify_iotlb(&txq->vring) > 0)
+ vringh_notify(&txq->vring);
+ if (vringh_need_notify_iotlb(&rxq->vring) > 0)
+ vringh_notify(&rxq->vring);
+ local_bh_enable();
+
+ if (++pkts > 4) {
+ schedule_work(&vdpasim->work);
+ goto out;
+ }
+ }
+
+out:
+ spin_unlock(&vdpasim->lock);
+}
+
+static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config)
+{
+ struct virtio_net_config *net_config =
+ (struct virtio_net_config *)config;
+
+ net_config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
+ net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
+ memcpy(net_config->mac, macaddr_buf, ETH_ALEN);
+}
+
+static int __init vdpasim_net_init(void)
+{
+ struct vdpasim_dev_attr dev_attr = {};
+ int ret;
+
+ if (macaddr) {
+ mac_pton(macaddr, macaddr_buf);
+ if (!is_valid_ether_addr(macaddr_buf)) {
+ ret = -EADDRNOTAVAIL;
+ goto out;
+ }
+ } else {
+ eth_random_addr(macaddr_buf);
+ }
+
+ dev_attr.id = VIRTIO_ID_NET;
+ dev_attr.supported_features = VDPASIM_NET_FEATURES;
+ dev_attr.nvqs = VDPASIM_NET_VQ_NUM;
+ dev_attr.config_size = sizeof(struct virtio_net_config);
+ dev_attr.get_config = vdpasim_net_get_config;
+ dev_attr.work_fn = vdpasim_net_work;
+ dev_attr.buffer_size = PAGE_SIZE;
+
+ vdpasim_net_dev = vdpasim_create(&dev_attr);
+ if (IS_ERR(vdpasim_net_dev)) {
+ ret = PTR_ERR(vdpasim_net_dev);
+ goto out;
+ }
+
+ ret = vdpa_register_device(&vdpasim_net_dev->vdpa);
+ if (ret)
+ goto put_dev;
+
+ return 0;
+
+put_dev:
+ put_device(&vdpasim_net_dev->vdpa.dev);
+out:
+ return ret;
+}
+
+static void __exit vdpasim_net_exit(void)
+{
+ struct vdpa_device *vdpa = &vdpasim_net_dev->vdpa;
+
+ vdpa_unregister_device(vdpa);
+}
+
+module_init(vdpasim_net_init);
+module_exit(vdpasim_net_exit);
+
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE(DRV_LICENSE);
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/vfio/mdev/mdev_core.c b/drivers/vfio/mdev/mdev_core.c
index b558d4cfd082..6de97d25a3f8 100644
--- a/drivers/vfio/mdev/mdev_core.c
+++ b/drivers/vfio/mdev/mdev_core.c
@@ -154,6 +154,10 @@ int mdev_register_device(struct device *dev, const struct mdev_parent_ops *ops)
if (!dev)
return -EINVAL;
+ /* Not mandatory, but its absence could be a problem */
+ if (!ops->request)
+ dev_info(dev, "Driver cannot be asked to release device\n");
+
mutex_lock(&parent_list_lock);
/* Check for duplicate */
diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c
index 30964a4e0a28..b52eea128549 100644
--- a/drivers/vfio/mdev/vfio_mdev.c
+++ b/drivers/vfio/mdev/vfio_mdev.c
@@ -98,6 +98,18 @@ static int vfio_mdev_mmap(void *device_data, struct vm_area_struct *vma)
return parent->ops->mmap(mdev, vma);
}
+static void vfio_mdev_request(void *device_data, unsigned int count)
+{
+ struct mdev_device *mdev = device_data;
+ struct mdev_parent *parent = mdev->parent;
+
+ if (parent->ops->request)
+ parent->ops->request(mdev, count);
+ else if (count == 0)
+ dev_notice(mdev_dev(mdev),
+ "No mdev vendor driver request callback support, blocked until released by user\n");
+}
+
static const struct vfio_device_ops vfio_mdev_dev_ops = {
.name = "vfio-mdev",
.open = vfio_mdev_open,
@@ -106,6 +118,7 @@ static const struct vfio_device_ops vfio_mdev_dev_ops = {
.read = vfio_mdev_read,
.write = vfio_mdev_write,
.mmap = vfio_mdev_mmap,
+ .request = vfio_mdev_request,
};
static int vfio_mdev_probe(struct device *dev)
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index e6190173482c..706de3ef94bb 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -161,8 +161,6 @@ static void vfio_pci_probe_mmaps(struct vfio_pci_device *vdev)
int i;
struct vfio_pci_dummy_resource *dummy_res;
- INIT_LIST_HEAD(&vdev->dummy_resources_list);
-
for (i = 0; i < PCI_STD_NUM_BARS; i++) {
int bar = i + PCI_STD_RESOURCES;
@@ -1635,8 +1633,8 @@ static vm_fault_t vfio_pci_mmap_fault(struct vm_fault *vmf)
mutex_unlock(&vdev->vma_lock);
- if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
ret = VM_FAULT_SIGBUS;
up_out:
@@ -1966,6 +1964,7 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&vdev->igate);
spin_lock_init(&vdev->irqlock);
mutex_init(&vdev->ioeventfds_lock);
+ INIT_LIST_HEAD(&vdev->dummy_resources_list);
INIT_LIST_HEAD(&vdev->ioeventfds_list);
mutex_init(&vdev->vma_lock);
INIT_LIST_HEAD(&vdev->vma_list);
diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c
index 65c61710c0e9..9adcf6a8f888 100644
--- a/drivers/vfio/pci/vfio_pci_nvlink2.c
+++ b/drivers/vfio/pci/vfio_pci_nvlink2.c
@@ -231,7 +231,7 @@ int vfio_pci_nvdia_v100_nvlink2_init(struct vfio_pci_device *vdev)
return -EINVAL;
if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
- return -EINVAL;
+ return -ENODEV;
mem_node = of_find_node_by_phandle(mem_phandle);
if (!mem_node)
@@ -393,7 +393,7 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
int ret;
struct vfio_pci_npu2_data *data;
struct device_node *nvlink_dn;
- u32 nvlink_index = 0;
+ u32 nvlink_index = 0, mem_phandle = 0;
struct pci_dev *npdev = vdev->pdev;
struct device_node *npu_node = pci_device_to_OF_node(npdev);
struct pci_controller *hose = pci_bus_to_host(npdev->bus);
@@ -408,6 +408,9 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev)
if (!pnv_pci_get_gpu_dev(vdev->pdev))
return -ENODEV;
+ if (of_property_read_u32(npu_node, "memory-region", &mem_phandle))
+ return -ENODEV;
+
/*
* NPU2 normally has 8 ATSD registers (for concurrency) and 6 links
* so we can allocate one register per link, using nvlink index as
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 2151bc7f87ab..4ad8a35667a7 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -2331,6 +2331,24 @@ int vfio_unregister_notifier(struct device *dev, enum vfio_notify_type type,
}
EXPORT_SYMBOL(vfio_unregister_notifier);
+struct iommu_domain *vfio_group_iommu_domain(struct vfio_group *group)
+{
+ struct vfio_container *container;
+ struct vfio_iommu_driver *driver;
+
+ if (!group)
+ return ERR_PTR(-EINVAL);
+
+ container = group->container;
+ driver = container->iommu_driver;
+ if (likely(driver && driver->ops->group_iommu_domain))
+ return driver->ops->group_iommu_domain(container->iommu_data,
+ group->iommu_group);
+
+ return ERR_PTR(-ENOTTY);
+}
+EXPORT_SYMBOL_GPL(vfio_group_iommu_domain);
+
/**
* Module/class support
*/
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 67e827638995..0b4dedaa9128 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -2980,6 +2980,29 @@ static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova,
return ret;
}
+static struct iommu_domain *
+vfio_iommu_type1_group_iommu_domain(void *iommu_data,
+ struct iommu_group *iommu_group)
+{
+ struct iommu_domain *domain = ERR_PTR(-ENODEV);
+ struct vfio_iommu *iommu = iommu_data;
+ struct vfio_domain *d;
+
+ if (!iommu || !iommu_group)
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&iommu->lock);
+ list_for_each_entry(d, &iommu->domain_list, next) {
+ if (find_iommu_group(d, iommu_group)) {
+ domain = d->domain;
+ break;
+ }
+ }
+ mutex_unlock(&iommu->lock);
+
+ return domain;
+}
+
static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.name = "vfio-iommu-type1",
.owner = THIS_MODULE,
@@ -2993,6 +3016,7 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = {
.register_notifier = vfio_iommu_type1_register_notifier,
.unregister_notifier = vfio_iommu_type1_unregister_notifier,
.dma_rw = vfio_iommu_type1_dma_rw,
+ .group_iommu_domain = vfio_iommu_type1_group_iommu_domain,
};
static int __init vfio_iommu_type1_init(void)
diff --git a/drivers/vfio/virqfd.c b/drivers/vfio/virqfd.c
index 997cb5d0a657..414e98d82b02 100644
--- a/drivers/vfio/virqfd.c
+++ b/drivers/vfio/virqfd.c
@@ -46,6 +46,9 @@ static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void
__poll_t flags = key_to_poll(key);
if (flags & EPOLLIN) {
+ u64 cnt;
+ eventfd_ctx_do_read(virqfd->eventfd, &cnt);
+
/* An event has been signaled, call function */
if ((!virqfd->handler ||
virqfd->handler(virqfd->opaque, virqfd->data)) &&
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 531a00d703cd..c8784dfafdd7 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -863,6 +863,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
size_t len, total_len = 0;
int err;
struct vhost_net_ubuf_ref *ubufs;
+ struct ubuf_info *ubuf;
bool zcopy_used;
int sent_pkts = 0;
@@ -895,9 +896,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
- struct ubuf_info *ubuf;
ubuf = nvq->ubuf_info + nvq->upend_idx;
-
vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
ubuf->callback = vhost_zerocopy_callback;
@@ -927,7 +926,8 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
if (zcopy_used) {
- vhost_net_ubuf_put(ubufs);
+ if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
+ vhost_net_ubuf_put(ubufs);
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
}
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 6ff8a5096691..4ce9f00ae10e 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1643,7 +1643,8 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
if (!vhost_vq_is_setup(vq))
continue;
- if (vhost_scsi_setup_vq_cmds(vq, vq->num))
+ ret = vhost_scsi_setup_vq_cmds(vq, vq->num);
+ if (ret)
goto destroy_vq_cmds;
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 29ed4173f04e..ef688c8c0e0e 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -245,14 +245,10 @@ static long vhost_vdpa_set_config(struct vhost_vdpa *v,
return -EFAULT;
if (vhost_vdpa_config_validate(v, &config))
return -EINVAL;
- buf = kvzalloc(config.len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- if (copy_from_user(buf, c->buf, config.len)) {
- kvfree(buf);
- return -EFAULT;
- }
+ buf = vmemdup_user(c->buf, config.len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
ops->set_config(vdpa, config.off, buf, config.len);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index a483cec31d5c..5e78fb719602 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -30,7 +30,12 @@
#define VHOST_VSOCK_PKT_WEIGHT 256
enum {
- VHOST_VSOCK_FEATURES = VHOST_FEATURES,
+ VHOST_VSOCK_FEATURES = VHOST_FEATURES |
+ (1ULL << VIRTIO_F_ACCESS_PLATFORM)
+};
+
+enum {
+ VHOST_VSOCK_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
};
/* Used to track all the vhost_vsock instances on the system. */
@@ -94,6 +99,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
if (!vhost_vq_get_backend(vq))
goto out;
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
/* Avoid further vmexits, we're already processing the virtqueue */
vhost_disable_notify(&vsock->dev, vq);
@@ -449,6 +457,9 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
if (!vhost_vq_get_backend(vq))
goto out;
+ if (!vq_meta_prefetch(vq))
+ goto out;
+
vhost_disable_notify(&vsock->dev, vq);
do {
u32 len;
@@ -766,8 +777,12 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
mutex_lock(&vsock->dev.mutex);
if ((features & (1 << VHOST_F_LOG_ALL)) &&
!vhost_log_access_ok(&vsock->dev)) {
- mutex_unlock(&vsock->dev.mutex);
- return -EFAULT;
+ goto err;
+ }
+
+ if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
+ if (vhost_init_device_iotlb(&vsock->dev, true))
+ goto err;
}
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
@@ -778,6 +793,10 @@ static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
}
mutex_unlock(&vsock->dev.mutex);
return 0;
+
+err:
+ mutex_unlock(&vsock->dev.mutex);
+ return -EFAULT;
}
static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
@@ -811,6 +830,18 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
if (copy_from_user(&features, argp, sizeof(features)))
return -EFAULT;
return vhost_vsock_set_features(vsock, features);
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_VSOCK_BACKEND_FEATURES;
+ if (copy_to_user(argp, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, argp, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_VSOCK_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&vsock->dev, features);
+ return 0;
default:
mutex_lock(&vsock->dev.mutex);
r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
@@ -823,6 +854,34 @@ static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
}
}
+static ssize_t vhost_vsock_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+ int noblock = file->f_flags & O_NONBLOCK;
+
+ return vhost_chr_read_iter(dev, to, noblock);
+}
+
+static ssize_t vhost_vsock_chr_write_iter(struct kiocb *iocb,
+ struct iov_iter *from)
+{
+ struct file *file = iocb->ki_filp;
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_write_iter(dev, from);
+}
+
+static __poll_t vhost_vsock_chr_poll(struct file *file, poll_table *wait)
+{
+ struct vhost_vsock *vsock = file->private_data;
+ struct vhost_dev *dev = &vsock->dev;
+
+ return vhost_chr_poll(file, dev, wait);
+}
+
static const struct file_operations vhost_vsock_fops = {
.owner = THIS_MODULE,
.open = vhost_vsock_dev_open,
@@ -830,6 +889,9 @@ static const struct file_operations vhost_vsock_fops = {
.llseek = noop_llseek,
.unlocked_ioctl = vhost_vsock_dev_ioctl,
.compat_ioctl = compat_ptr_ioctl,
+ .read_iter = vhost_vsock_chr_read_iter,
+ .write_iter = vhost_vsock_chr_write_iter,
+ .poll = vhost_vsock_chr_poll,
};
static struct miscdevice vhost_vsock_misc = {
diff --git a/drivers/video/fbdev/geode/lxfb_ops.c b/drivers/video/fbdev/geode/lxfb_ops.c
index b3a041fce570..32baaf59fcf7 100644
--- a/drivers/video/fbdev/geode/lxfb_ops.c
+++ b/drivers/video/fbdev/geode/lxfb_ops.c
@@ -682,6 +682,7 @@ static void lx_restore_display_ctlr(struct lxfb_par *par)
case DC_DV_CTL:
/* set all ram to dirty */
write_dc(par, i, par->dc[i] | DC_DV_CTL_CLEAR_DV_RAM);
+ break;
case DC_RSVD_1:
case DC_RSVD_2:
diff --git a/drivers/video/fbdev/pm2fb.c b/drivers/video/fbdev/pm2fb.c
index 0642555289e0..27893fa139b0 100644
--- a/drivers/video/fbdev/pm2fb.c
+++ b/drivers/video/fbdev/pm2fb.c
@@ -239,6 +239,7 @@ static u32 to3264(u32 timing, int bpp, int is64)
fallthrough;
case 16:
timing >>= 1;
+ fallthrough;
case 32:
break;
}
diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
index 203c254f8f6c..2fe08b67eda7 100644
--- a/drivers/video/fbdev/ps3fb.c
+++ b/drivers/video/fbdev/ps3fb.c
@@ -1208,7 +1208,7 @@ err:
return retval;
}
-static int ps3fb_shutdown(struct ps3_system_bus_device *dev)
+static void ps3fb_shutdown(struct ps3_system_bus_device *dev)
{
struct fb_info *info = ps3_system_bus_get_drvdata(dev);
u64 xdr_lpar = ps3_mm_phys_to_lpar(__pa(ps3fb_videomemory.address));
@@ -1241,8 +1241,6 @@ static int ps3fb_shutdown(struct ps3_system_bus_device *dev)
lv1_gpu_memory_free(ps3fb.memory_handle);
ps3_close_hv_device(dev);
dev_dbg(&dev->core, " <- %s:%d\n", __func__, __LINE__);
-
- return 0;
}
static struct ps3_system_bus_driver ps3fb_driver = {
diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index 181e2f18beae..9fc9ec4a25f5 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -27,20 +27,74 @@ static bool unplug_online = true;
module_param(unplug_online, bool, 0644);
MODULE_PARM_DESC(unplug_online, "Try to unplug online memory");
-enum virtio_mem_mb_state {
+static bool force_bbm;
+module_param(force_bbm, bool, 0444);
+MODULE_PARM_DESC(force_bbm,
+ "Force Big Block Mode. Default is 0 (auto-selection)");
+
+static unsigned long bbm_block_size;
+module_param(bbm_block_size, ulong, 0444);
+MODULE_PARM_DESC(bbm_block_size,
+ "Big Block size in bytes. Default is 0 (auto-detection).");
+
+static bool bbm_safe_unplug = true;
+module_param(bbm_safe_unplug, bool, 0444);
+MODULE_PARM_DESC(bbm_safe_unplug,
+ "Use a safe unplug mechanism in BBM, avoiding long/endless loops");
+
+/*
+ * virtio-mem currently supports the following modes of operation:
+ *
+ * * Sub Block Mode (SBM): A Linux memory block spans 2..X subblocks (SB). The
+ * size of a Sub Block (SB) is determined based on the device block size, the
+ * pageblock size, and the maximum allocation granularity of the buddy.
+ * Subblocks within a Linux memory block might either be plugged or unplugged.
+ * Memory is added/removed to Linux MM in Linux memory block granularity.
+ *
+ * * Big Block Mode (BBM): A Big Block (BB) spans 1..X Linux memory blocks.
+ * Memory is added/removed to Linux MM in Big Block granularity.
+ *
+ * The mode is determined automatically based on the Linux memory block size
+ * and the device block size.
+ *
+ * User space / core MM (auto onlining) is responsible for onlining added
+ * Linux memory blocks - and for selecting a zone. Linux Memory Blocks are
+ * always onlined separately, and all memory within a Linux memory block is
+ * onlined to the same zone - virtio-mem relies on this behavior.
+ */
+
+/*
+ * State of a Linux memory block in SBM.
+ */
+enum virtio_mem_sbm_mb_state {
/* Unplugged, not added to Linux. Can be reused later. */
- VIRTIO_MEM_MB_STATE_UNUSED = 0,
+ VIRTIO_MEM_SBM_MB_UNUSED = 0,
/* (Partially) plugged, not added to Linux. Error on add_memory(). */
- VIRTIO_MEM_MB_STATE_PLUGGED,
+ VIRTIO_MEM_SBM_MB_PLUGGED,
/* Fully plugged, fully added to Linux, offline. */
- VIRTIO_MEM_MB_STATE_OFFLINE,
+ VIRTIO_MEM_SBM_MB_OFFLINE,
/* Partially plugged, fully added to Linux, offline. */
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL,
/* Fully plugged, fully added to Linux, online. */
- VIRTIO_MEM_MB_STATE_ONLINE,
+ VIRTIO_MEM_SBM_MB_ONLINE,
/* Partially plugged, fully added to Linux, online. */
- VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL,
- VIRTIO_MEM_MB_STATE_COUNT
+ VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL,
+ VIRTIO_MEM_SBM_MB_COUNT
+};
+
+/*
+ * State of a Big Block (BB) in BBM, covering 1..X Linux memory blocks.
+ */
+enum virtio_mem_bbm_bb_state {
+ /* Unplugged, not added to Linux. Can be reused later. */
+ VIRTIO_MEM_BBM_BB_UNUSED = 0,
+ /* Plugged, not added to Linux. Error on add_memory(). */
+ VIRTIO_MEM_BBM_BB_PLUGGED,
+ /* Plugged and added to Linux. */
+ VIRTIO_MEM_BBM_BB_ADDED,
+ /* All online parts are fake-offline, ready to remove. */
+ VIRTIO_MEM_BBM_BB_FAKE_OFFLINE,
+ VIRTIO_MEM_BBM_BB_COUNT
};
struct virtio_mem {
@@ -51,6 +105,7 @@ struct virtio_mem {
/* Workqueue that processes the plug/unplug requests. */
struct work_struct wq;
+ atomic_t wq_active;
atomic_t config_changed;
/* Virtqueue for guest->host requests. */
@@ -70,27 +125,13 @@ struct virtio_mem {
/* The device block size (for communicating with the device). */
uint64_t device_block_size;
- /* The translated node id. NUMA_NO_NODE in case not specified. */
+ /* The determined node id for all memory of the device. */
int nid;
/* Physical start address of the memory region. */
uint64_t addr;
/* Maximum region size in bytes. */
uint64_t region_size;
- /* The subblock size. */
- uint64_t subblock_size;
- /* The number of subblocks per memory block. */
- uint32_t nb_sb_per_mb;
-
- /* Id of the first memory block of this device. */
- unsigned long first_mb_id;
- /* Id of the last memory block of this device. */
- unsigned long last_mb_id;
- /* Id of the last usable memory block of this device. */
- unsigned long last_usable_mb_id;
- /* Id of the next memory bock to prepare when needed. */
- unsigned long next_mb_id;
-
/* The parent resource for all memory added via this device. */
struct resource *parent_resource;
/*
@@ -99,31 +140,79 @@ struct virtio_mem {
*/
const char *resource_name;
- /* Summary of all memory block states. */
- unsigned long nb_mb_state[VIRTIO_MEM_MB_STATE_COUNT];
-#define VIRTIO_MEM_NB_OFFLINE_THRESHOLD 10
-
- /*
- * One byte state per memory block.
- *
- * Allocated via vmalloc(). When preparing new blocks, resized
- * (alloc+copy+free) when needed (crossing pages with the next mb).
- * (when crossing pages).
- *
- * With 128MB memory blocks, we have states for 512GB of memory in one
- * page.
- */
- uint8_t *mb_state;
-
/*
- * $nb_sb_per_mb bit per memory block. Handled similar to mb_state.
- *
- * With 4MB subblocks, we manage 128GB of memory in one page.
+ * We don't want to add too much memory if it's not getting onlined,
+ * to avoid running OOM. Besides this threshold, we allow to have at
+ * least two offline blocks at a time (whatever is bigger).
*/
- unsigned long *sb_bitmap;
+#define VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD (1024 * 1024 * 1024)
+ atomic64_t offline_size;
+ uint64_t offline_threshold;
+
+ /* If set, the driver is in SBM, otherwise in BBM. */
+ bool in_sbm;
+
+ union {
+ struct {
+ /* Id of the first memory block of this device. */
+ unsigned long first_mb_id;
+ /* Id of the last usable memory block of this device. */
+ unsigned long last_usable_mb_id;
+ /* Id of the next memory bock to prepare when needed. */
+ unsigned long next_mb_id;
+
+ /* The subblock size. */
+ uint64_t sb_size;
+ /* The number of subblocks per Linux memory block. */
+ uint32_t sbs_per_mb;
+
+ /* Summary of all memory block states. */
+ unsigned long mb_count[VIRTIO_MEM_SBM_MB_COUNT];
+
+ /*
+ * One byte state per memory block. Allocated via
+ * vmalloc(). Resized (alloc+copy+free) on demand.
+ *
+ * With 128 MiB memory blocks, we have states for 512
+ * GiB of memory in one 4 KiB page.
+ */
+ uint8_t *mb_states;
+
+ /*
+ * Bitmap: one bit per subblock. Allocated similar to
+ * sbm.mb_states.
+ *
+ * A set bit means the corresponding subblock is
+ * plugged, otherwise it's unblocked.
+ *
+ * With 4 MiB subblocks, we manage 128 GiB of memory
+ * in one 4 KiB page.
+ */
+ unsigned long *sb_states;
+ } sbm;
+
+ struct {
+ /* Id of the first big block of this device. */
+ unsigned long first_bb_id;
+ /* Id of the last usable big block of this device. */
+ unsigned long last_usable_bb_id;
+ /* Id of the next device bock to prepare when needed. */
+ unsigned long next_bb_id;
+
+ /* Summary of all big block states. */
+ unsigned long bb_count[VIRTIO_MEM_BBM_BB_COUNT];
+
+ /* One byte state per big block. See sbm.mb_states. */
+ uint8_t *bb_states;
+
+ /* The block size used for plugging/adding/removing. */
+ uint64_t bb_size;
+ } bbm;
+ };
/*
- * Mutex that protects the nb_mb_state, mb_state, and sb_bitmap.
+ * Mutex that protects the sbm.mb_count, sbm.mb_states,
+ * sbm.sb_states, bbm.bb_count, and bbm.bb_states
*
* When this lock is held the pointers can't change, ONLINE and
* OFFLINE blocks can't change the state and no subblocks will get
@@ -160,6 +249,11 @@ static DEFINE_MUTEX(virtio_mem_mutex);
static LIST_HEAD(virtio_mem_devices);
static void virtio_mem_online_page_cb(struct page *page, unsigned int order);
+static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
+ unsigned long nr_pages);
+static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
+ unsigned long nr_pages);
+static void virtio_mem_retry(struct virtio_mem *vm);
/*
* Register a virtio-mem device so it will be considered for the online_page
@@ -213,6 +307,24 @@ static unsigned long virtio_mem_mb_id_to_phys(unsigned long mb_id)
}
/*
+ * Calculate the big block id of a given address.
+ */
+static unsigned long virtio_mem_phys_to_bb_id(struct virtio_mem *vm,
+ uint64_t addr)
+{
+ return addr / vm->bbm.bb_size;
+}
+
+/*
+ * Calculate the physical start address of a given big block id.
+ */
+static uint64_t virtio_mem_bb_id_to_phys(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ return bb_id * vm->bbm.bb_size;
+}
+
+/*
* Calculate the subblock id of a given address.
*/
static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
@@ -221,89 +333,164 @@ static unsigned long virtio_mem_phys_to_sb_id(struct virtio_mem *vm,
const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
const unsigned long mb_addr = virtio_mem_mb_id_to_phys(mb_id);
- return (addr - mb_addr) / vm->subblock_size;
+ return (addr - mb_addr) / vm->sbm.sb_size;
+}
+
+/*
+ * Set the state of a big block, taking care of the state counter.
+ */
+static void virtio_mem_bbm_set_bb_state(struct virtio_mem *vm,
+ unsigned long bb_id,
+ enum virtio_mem_bbm_bb_state state)
+{
+ const unsigned long idx = bb_id - vm->bbm.first_bb_id;
+ enum virtio_mem_bbm_bb_state old_state;
+
+ old_state = vm->bbm.bb_states[idx];
+ vm->bbm.bb_states[idx] = state;
+
+ BUG_ON(vm->bbm.bb_count[old_state] == 0);
+ vm->bbm.bb_count[old_state]--;
+ vm->bbm.bb_count[state]++;
+}
+
+/*
+ * Get the state of a big block.
+ */
+static enum virtio_mem_bbm_bb_state virtio_mem_bbm_get_bb_state(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ return vm->bbm.bb_states[bb_id - vm->bbm.first_bb_id];
+}
+
+/*
+ * Prepare the big block state array for the next big block.
+ */
+static int virtio_mem_bbm_bb_states_prepare_next_bb(struct virtio_mem *vm)
+{
+ unsigned long old_bytes = vm->bbm.next_bb_id - vm->bbm.first_bb_id;
+ unsigned long new_bytes = old_bytes + 1;
+ int old_pages = PFN_UP(old_bytes);
+ int new_pages = PFN_UP(new_bytes);
+ uint8_t *new_array;
+
+ if (vm->bbm.bb_states && old_pages == new_pages)
+ return 0;
+
+ new_array = vzalloc(new_pages * PAGE_SIZE);
+ if (!new_array)
+ return -ENOMEM;
+
+ mutex_lock(&vm->hotplug_mutex);
+ if (vm->bbm.bb_states)
+ memcpy(new_array, vm->bbm.bb_states, old_pages * PAGE_SIZE);
+ vfree(vm->bbm.bb_states);
+ vm->bbm.bb_states = new_array;
+ mutex_unlock(&vm->hotplug_mutex);
+
+ return 0;
}
+#define virtio_mem_bbm_for_each_bb(_vm, _bb_id, _state) \
+ for (_bb_id = vm->bbm.first_bb_id; \
+ _bb_id < vm->bbm.next_bb_id && _vm->bbm.bb_count[_state]; \
+ _bb_id++) \
+ if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
+
+#define virtio_mem_bbm_for_each_bb_rev(_vm, _bb_id, _state) \
+ for (_bb_id = vm->bbm.next_bb_id - 1; \
+ _bb_id >= vm->bbm.first_bb_id && _vm->bbm.bb_count[_state]; \
+ _bb_id--) \
+ if (virtio_mem_bbm_get_bb_state(_vm, _bb_id) == _state)
+
/*
* Set the state of a memory block, taking care of the state counter.
*/
-static void virtio_mem_mb_set_state(struct virtio_mem *vm, unsigned long mb_id,
- enum virtio_mem_mb_state state)
+static void virtio_mem_sbm_set_mb_state(struct virtio_mem *vm,
+ unsigned long mb_id, uint8_t state)
{
- const unsigned long idx = mb_id - vm->first_mb_id;
- enum virtio_mem_mb_state old_state;
+ const unsigned long idx = mb_id - vm->sbm.first_mb_id;
+ uint8_t old_state;
- old_state = vm->mb_state[idx];
- vm->mb_state[idx] = state;
+ old_state = vm->sbm.mb_states[idx];
+ vm->sbm.mb_states[idx] = state;
- BUG_ON(vm->nb_mb_state[old_state] == 0);
- vm->nb_mb_state[old_state]--;
- vm->nb_mb_state[state]++;
+ BUG_ON(vm->sbm.mb_count[old_state] == 0);
+ vm->sbm.mb_count[old_state]--;
+ vm->sbm.mb_count[state]++;
}
/*
* Get the state of a memory block.
*/
-static enum virtio_mem_mb_state virtio_mem_mb_get_state(struct virtio_mem *vm,
- unsigned long mb_id)
+static uint8_t virtio_mem_sbm_get_mb_state(struct virtio_mem *vm,
+ unsigned long mb_id)
{
- const unsigned long idx = mb_id - vm->first_mb_id;
+ const unsigned long idx = mb_id - vm->sbm.first_mb_id;
- return vm->mb_state[idx];
+ return vm->sbm.mb_states[idx];
}
/*
* Prepare the state array for the next memory block.
*/
-static int virtio_mem_mb_state_prepare_next_mb(struct virtio_mem *vm)
+static int virtio_mem_sbm_mb_states_prepare_next_mb(struct virtio_mem *vm)
{
- unsigned long old_bytes = vm->next_mb_id - vm->first_mb_id + 1;
- unsigned long new_bytes = vm->next_mb_id - vm->first_mb_id + 2;
- int old_pages = PFN_UP(old_bytes);
- int new_pages = PFN_UP(new_bytes);
- uint8_t *new_mb_state;
+ int old_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id);
+ int new_pages = PFN_UP(vm->sbm.next_mb_id - vm->sbm.first_mb_id + 1);
+ uint8_t *new_array;
- if (vm->mb_state && old_pages == new_pages)
+ if (vm->sbm.mb_states && old_pages == new_pages)
return 0;
- new_mb_state = vzalloc(new_pages * PAGE_SIZE);
- if (!new_mb_state)
+ new_array = vzalloc(new_pages * PAGE_SIZE);
+ if (!new_array)
return -ENOMEM;
mutex_lock(&vm->hotplug_mutex);
- if (vm->mb_state)
- memcpy(new_mb_state, vm->mb_state, old_pages * PAGE_SIZE);
- vfree(vm->mb_state);
- vm->mb_state = new_mb_state;
+ if (vm->sbm.mb_states)
+ memcpy(new_array, vm->sbm.mb_states, old_pages * PAGE_SIZE);
+ vfree(vm->sbm.mb_states);
+ vm->sbm.mb_states = new_array;
mutex_unlock(&vm->hotplug_mutex);
return 0;
}
-#define virtio_mem_for_each_mb_state(_vm, _mb_id, _state) \
- for (_mb_id = _vm->first_mb_id; \
- _mb_id < _vm->next_mb_id && _vm->nb_mb_state[_state]; \
+#define virtio_mem_sbm_for_each_mb(_vm, _mb_id, _state) \
+ for (_mb_id = _vm->sbm.first_mb_id; \
+ _mb_id < _vm->sbm.next_mb_id && _vm->sbm.mb_count[_state]; \
_mb_id++) \
- if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
+ if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
-#define virtio_mem_for_each_mb_state_rev(_vm, _mb_id, _state) \
- for (_mb_id = _vm->next_mb_id - 1; \
- _mb_id >= _vm->first_mb_id && _vm->nb_mb_state[_state]; \
+#define virtio_mem_sbm_for_each_mb_rev(_vm, _mb_id, _state) \
+ for (_mb_id = _vm->sbm.next_mb_id - 1; \
+ _mb_id >= _vm->sbm.first_mb_id && _vm->sbm.mb_count[_state]; \
_mb_id--) \
- if (virtio_mem_mb_get_state(_vm, _mb_id) == _state)
+ if (virtio_mem_sbm_get_mb_state(_vm, _mb_id) == _state)
+
+/*
+ * Calculate the bit number in the subblock bitmap for the given subblock
+ * inside the given memory block.
+ */
+static int virtio_mem_sbm_sb_state_bit_nr(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id)
+{
+ return (mb_id - vm->sbm.first_mb_id) * vm->sbm.sbs_per_mb + sb_id;
+}
/*
* Mark all selected subblocks plugged.
*
* Will not modify the state of the memory block.
*/
-static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm,
- unsigned long mb_id, int sb_id,
- int count)
+static void virtio_mem_sbm_set_sb_plugged(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
{
- const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+ const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
- __bitmap_set(vm->sb_bitmap, bit, count);
+ __bitmap_set(vm->sbm.sb_states, bit, count);
}
/*
@@ -311,105 +498,114 @@ static void virtio_mem_mb_set_sb_plugged(struct virtio_mem *vm,
*
* Will not modify the state of the memory block.
*/
-static void virtio_mem_mb_set_sb_unplugged(struct virtio_mem *vm,
- unsigned long mb_id, int sb_id,
- int count)
+static void virtio_mem_sbm_set_sb_unplugged(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
{
- const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+ const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
- __bitmap_clear(vm->sb_bitmap, bit, count);
+ __bitmap_clear(vm->sbm.sb_states, bit, count);
}
/*
* Test if all selected subblocks are plugged.
*/
-static bool virtio_mem_mb_test_sb_plugged(struct virtio_mem *vm,
- unsigned long mb_id, int sb_id,
- int count)
+static bool virtio_mem_sbm_test_sb_plugged(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
{
- const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+ const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
if (count == 1)
- return test_bit(bit, vm->sb_bitmap);
+ return test_bit(bit, vm->sbm.sb_states);
/* TODO: Helper similar to bitmap_set() */
- return find_next_zero_bit(vm->sb_bitmap, bit + count, bit) >=
+ return find_next_zero_bit(vm->sbm.sb_states, bit + count, bit) >=
bit + count;
}
/*
* Test if all selected subblocks are unplugged.
*/
-static bool virtio_mem_mb_test_sb_unplugged(struct virtio_mem *vm,
- unsigned long mb_id, int sb_id,
- int count)
+static bool virtio_mem_sbm_test_sb_unplugged(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
{
- const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb + sb_id;
+ const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, sb_id);
/* TODO: Helper similar to bitmap_set() */
- return find_next_bit(vm->sb_bitmap, bit + count, bit) >= bit + count;
+ return find_next_bit(vm->sbm.sb_states, bit + count, bit) >=
+ bit + count;
}
/*
- * Find the first unplugged subblock. Returns vm->nb_sb_per_mb in case there is
+ * Find the first unplugged subblock. Returns vm->sbm.sbs_per_mb in case there is
* none.
*/
-static int virtio_mem_mb_first_unplugged_sb(struct virtio_mem *vm,
+static int virtio_mem_sbm_first_unplugged_sb(struct virtio_mem *vm,
unsigned long mb_id)
{
- const int bit = (mb_id - vm->first_mb_id) * vm->nb_sb_per_mb;
+ const int bit = virtio_mem_sbm_sb_state_bit_nr(vm, mb_id, 0);
- return find_next_zero_bit(vm->sb_bitmap, bit + vm->nb_sb_per_mb, bit) -
- bit;
+ return find_next_zero_bit(vm->sbm.sb_states,
+ bit + vm->sbm.sbs_per_mb, bit) - bit;
}
/*
* Prepare the subblock bitmap for the next memory block.
*/
-static int virtio_mem_sb_bitmap_prepare_next_mb(struct virtio_mem *vm)
+static int virtio_mem_sbm_sb_states_prepare_next_mb(struct virtio_mem *vm)
{
- const unsigned long old_nb_mb = vm->next_mb_id - vm->first_mb_id;
- const unsigned long old_nb_bits = old_nb_mb * vm->nb_sb_per_mb;
- const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->nb_sb_per_mb;
+ const unsigned long old_nb_mb = vm->sbm.next_mb_id - vm->sbm.first_mb_id;
+ const unsigned long old_nb_bits = old_nb_mb * vm->sbm.sbs_per_mb;
+ const unsigned long new_nb_bits = (old_nb_mb + 1) * vm->sbm.sbs_per_mb;
int old_pages = PFN_UP(BITS_TO_LONGS(old_nb_bits) * sizeof(long));
int new_pages = PFN_UP(BITS_TO_LONGS(new_nb_bits) * sizeof(long));
- unsigned long *new_sb_bitmap, *old_sb_bitmap;
+ unsigned long *new_bitmap, *old_bitmap;
- if (vm->sb_bitmap && old_pages == new_pages)
+ if (vm->sbm.sb_states && old_pages == new_pages)
return 0;
- new_sb_bitmap = vzalloc(new_pages * PAGE_SIZE);
- if (!new_sb_bitmap)
+ new_bitmap = vzalloc(new_pages * PAGE_SIZE);
+ if (!new_bitmap)
return -ENOMEM;
mutex_lock(&vm->hotplug_mutex);
- if (new_sb_bitmap)
- memcpy(new_sb_bitmap, vm->sb_bitmap, old_pages * PAGE_SIZE);
+ if (new_bitmap)
+ memcpy(new_bitmap, vm->sbm.sb_states, old_pages * PAGE_SIZE);
- old_sb_bitmap = vm->sb_bitmap;
- vm->sb_bitmap = new_sb_bitmap;
+ old_bitmap = vm->sbm.sb_states;
+ vm->sbm.sb_states = new_bitmap;
mutex_unlock(&vm->hotplug_mutex);
- vfree(old_sb_bitmap);
+ vfree(old_bitmap);
return 0;
}
/*
- * Try to add a memory block to Linux. This will usually only fail
- * if out of memory.
+ * Test if we could add memory without creating too much offline memory -
+ * to avoid running OOM if memory is getting onlined deferred.
+ */
+static bool virtio_mem_could_add_memory(struct virtio_mem *vm, uint64_t size)
+{
+ if (WARN_ON_ONCE(size > vm->offline_threshold))
+ return false;
+
+ return atomic64_read(&vm->offline_size) + size <= vm->offline_threshold;
+}
+
+/*
+ * Try adding memory to Linux. Will usually only fail if out of memory.
*
* Must not be called with the vm->hotplug_mutex held (possible deadlock with
* onlining code).
*
- * Will not modify the state of the memory block.
+ * Will not modify the state of memory blocks in virtio-mem.
*/
-static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
+static int virtio_mem_add_memory(struct virtio_mem *vm, uint64_t addr,
+ uint64_t size)
{
- const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
- int nid = vm->nid;
-
- if (nid == NUMA_NO_NODE)
- nid = memory_add_physaddr_to_nid(addr);
+ int rc;
/*
* When force-unloading the driver and we still have memory added to
@@ -422,53 +618,155 @@ static int virtio_mem_mb_add(struct virtio_mem *vm, unsigned long mb_id)
return -ENOMEM;
}
- dev_dbg(&vm->vdev->dev, "adding memory block: %lu\n", mb_id);
- return add_memory_driver_managed(nid, addr, memory_block_size_bytes(),
- vm->resource_name,
- MEMHP_MERGE_RESOURCE);
+ dev_dbg(&vm->vdev->dev, "adding memory: 0x%llx - 0x%llx\n", addr,
+ addr + size - 1);
+ /* Memory might get onlined immediately. */
+ atomic64_add(size, &vm->offline_size);
+ rc = add_memory_driver_managed(vm->nid, addr, size, vm->resource_name,
+ MEMHP_MERGE_RESOURCE);
+ if (rc) {
+ atomic64_sub(size, &vm->offline_size);
+ dev_warn(&vm->vdev->dev, "adding memory failed: %d\n", rc);
+ /*
+ * TODO: Linux MM does not properly clean up yet in all cases
+ * where adding of memory failed - especially on -ENOMEM.
+ */
+ }
+ return rc;
+}
+
+/*
+ * See virtio_mem_add_memory(): Try adding a single Linux memory block.
+ */
+static int virtio_mem_sbm_add_mb(struct virtio_mem *vm, unsigned long mb_id)
+{
+ const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
+ const uint64_t size = memory_block_size_bytes();
+
+ return virtio_mem_add_memory(vm, addr, size);
+}
+
+/*
+ * See virtio_mem_add_memory(): Try adding a big block.
+ */
+static int virtio_mem_bbm_add_bb(struct virtio_mem *vm, unsigned long bb_id)
+{
+ const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
+ const uint64_t size = vm->bbm.bb_size;
+
+ return virtio_mem_add_memory(vm, addr, size);
}
/*
- * Try to remove a memory block from Linux. Will only fail if the memory block
- * is not offline.
+ * Try removing memory from Linux. Will only fail if memory blocks aren't
+ * offline.
*
* Must not be called with the vm->hotplug_mutex held (possible deadlock with
* onlining code).
*
- * Will not modify the state of the memory block.
+ * Will not modify the state of memory blocks in virtio-mem.
*/
-static int virtio_mem_mb_remove(struct virtio_mem *vm, unsigned long mb_id)
+static int virtio_mem_remove_memory(struct virtio_mem *vm, uint64_t addr,
+ uint64_t size)
+{
+ int rc;
+
+ dev_dbg(&vm->vdev->dev, "removing memory: 0x%llx - 0x%llx\n", addr,
+ addr + size - 1);
+ rc = remove_memory(vm->nid, addr, size);
+ if (!rc) {
+ atomic64_sub(size, &vm->offline_size);
+ /*
+ * We might have freed up memory we can now unplug, retry
+ * immediately instead of waiting.
+ */
+ virtio_mem_retry(vm);
+ } else {
+ dev_dbg(&vm->vdev->dev, "removing memory failed: %d\n", rc);
+ }
+ return rc;
+}
+
+/*
+ * See virtio_mem_remove_memory(): Try removing a single Linux memory block.
+ */
+static int virtio_mem_sbm_remove_mb(struct virtio_mem *vm, unsigned long mb_id)
{
const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
- int nid = vm->nid;
+ const uint64_t size = memory_block_size_bytes();
- if (nid == NUMA_NO_NODE)
- nid = memory_add_physaddr_to_nid(addr);
+ return virtio_mem_remove_memory(vm, addr, size);
+}
- dev_dbg(&vm->vdev->dev, "removing memory block: %lu\n", mb_id);
- return remove_memory(nid, addr, memory_block_size_bytes());
+/*
+ * See virtio_mem_remove_memory(): Try to remove all Linux memory blocks covered
+ * by the big block.
+ */
+static int virtio_mem_bbm_remove_bb(struct virtio_mem *vm, unsigned long bb_id)
+{
+ const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
+ const uint64_t size = vm->bbm.bb_size;
+
+ return virtio_mem_remove_memory(vm, addr, size);
}
/*
- * Try to offline and remove a memory block from Linux.
+ * Try offlining and removing memory from Linux.
*
* Must not be called with the vm->hotplug_mutex held (possible deadlock with
* onlining code).
*
- * Will not modify the state of the memory block.
+ * Will not modify the state of memory blocks in virtio-mem.
*/
-static int virtio_mem_mb_offline_and_remove(struct virtio_mem *vm,
- unsigned long mb_id)
+static int virtio_mem_offline_and_remove_memory(struct virtio_mem *vm,
+ uint64_t addr,
+ uint64_t size)
+{
+ int rc;
+
+ dev_dbg(&vm->vdev->dev,
+ "offlining and removing memory: 0x%llx - 0x%llx\n", addr,
+ addr + size - 1);
+
+ rc = offline_and_remove_memory(vm->nid, addr, size);
+ if (!rc) {
+ atomic64_sub(size, &vm->offline_size);
+ /*
+ * We might have freed up memory we can now unplug, retry
+ * immediately instead of waiting.
+ */
+ virtio_mem_retry(vm);
+ } else {
+ dev_dbg(&vm->vdev->dev,
+ "offlining and removing memory failed: %d\n", rc);
+ }
+ return rc;
+}
+
+/*
+ * See virtio_mem_offline_and_remove_memory(): Try offlining and removing
+ * a single Linux memory block.
+ */
+static int virtio_mem_sbm_offline_and_remove_mb(struct virtio_mem *vm,
+ unsigned long mb_id)
{
const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id);
- int nid = vm->nid;
+ const uint64_t size = memory_block_size_bytes();
+
+ return virtio_mem_offline_and_remove_memory(vm, addr, size);
+}
- if (nid == NUMA_NO_NODE)
- nid = memory_add_physaddr_to_nid(addr);
+/*
+ * See virtio_mem_offline_and_remove_memory(): Try to offline and remove a
+ * all Linux memory blocks covered by the big block.
+ */
+static int virtio_mem_bbm_offline_and_remove_bb(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
+ const uint64_t size = vm->bbm.bb_size;
- dev_dbg(&vm->vdev->dev, "offlining and removing memory block: %lu\n",
- mb_id);
- return offline_and_remove_memory(nid, addr, memory_block_size_bytes());
+ return virtio_mem_offline_and_remove_memory(vm, addr, size);
}
/*
@@ -499,31 +797,28 @@ static int virtio_mem_translate_node_id(struct virtio_mem *vm, uint16_t node_id)
* Test if a virtio-mem device overlaps with the given range. Can be called
* from (notifier) callbacks lockless.
*/
-static bool virtio_mem_overlaps_range(struct virtio_mem *vm,
- unsigned long start, unsigned long size)
+static bool virtio_mem_overlaps_range(struct virtio_mem *vm, uint64_t start,
+ uint64_t size)
{
- unsigned long dev_start = virtio_mem_mb_id_to_phys(vm->first_mb_id);
- unsigned long dev_end = virtio_mem_mb_id_to_phys(vm->last_mb_id) +
- memory_block_size_bytes();
-
- return start < dev_end && dev_start < start + size;
+ return start < vm->addr + vm->region_size && vm->addr < start + size;
}
/*
- * Test if a virtio-mem device owns a memory block. Can be called from
+ * Test if a virtio-mem device contains a given range. Can be called from
* (notifier) callbacks lockless.
*/
-static bool virtio_mem_owned_mb(struct virtio_mem *vm, unsigned long mb_id)
+static bool virtio_mem_contains_range(struct virtio_mem *vm, uint64_t start,
+ uint64_t size)
{
- return mb_id >= vm->first_mb_id && mb_id <= vm->last_mb_id;
+ return start >= vm->addr && start + size <= vm->addr + vm->region_size;
}
-static int virtio_mem_notify_going_online(struct virtio_mem *vm,
- unsigned long mb_id)
+static int virtio_mem_sbm_notify_going_online(struct virtio_mem *vm,
+ unsigned long mb_id)
{
- switch (virtio_mem_mb_get_state(vm, mb_id)) {
- case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
- case VIRTIO_MEM_MB_STATE_OFFLINE:
+ switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
+ case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
+ case VIRTIO_MEM_SBM_MB_OFFLINE:
return NOTIFY_OK;
default:
break;
@@ -533,108 +828,100 @@ static int virtio_mem_notify_going_online(struct virtio_mem *vm,
return NOTIFY_BAD;
}
-static void virtio_mem_notify_offline(struct virtio_mem *vm,
- unsigned long mb_id)
+static void virtio_mem_sbm_notify_offline(struct virtio_mem *vm,
+ unsigned long mb_id)
{
- switch (virtio_mem_mb_get_state(vm, mb_id)) {
- case VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL:
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
+ switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
+ case VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL:
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
break;
- case VIRTIO_MEM_MB_STATE_ONLINE:
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE);
+ case VIRTIO_MEM_SBM_MB_ONLINE:
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE);
break;
default:
BUG();
break;
}
-
- /*
- * Trigger the workqueue, maybe we can now unplug memory. Also,
- * when we offline and remove a memory block, this will re-trigger
- * us immediately - which is often nice because the removal of
- * the memory block (e.g., memmap) might have freed up memory
- * on other memory blocks we manage.
- */
- virtio_mem_retry(vm);
}
-static void virtio_mem_notify_online(struct virtio_mem *vm, unsigned long mb_id)
+static void virtio_mem_sbm_notify_online(struct virtio_mem *vm,
+ unsigned long mb_id)
{
- unsigned long nb_offline;
-
- switch (virtio_mem_mb_get_state(vm, mb_id)) {
- case VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL:
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
+ switch (virtio_mem_sbm_get_mb_state(vm, mb_id)) {
+ case VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL:
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL);
break;
- case VIRTIO_MEM_MB_STATE_OFFLINE:
- virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_ONLINE);
+ case VIRTIO_MEM_SBM_MB_OFFLINE:
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_ONLINE);
break;
default:
BUG();
break;
}
- nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
-
- /* see if we can add new blocks now that we onlined one block */
- if (nb_offline == VIRTIO_MEM_NB_OFFLINE_THRESHOLD - 1)
- virtio_mem_retry(vm);
}
-static void virtio_mem_notify_going_offline(struct virtio_mem *vm,
- unsigned long mb_id)
+static void virtio_mem_sbm_notify_going_offline(struct virtio_mem *vm,
+ unsigned long mb_id)
{
- const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
- struct page *page;
+ const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
unsigned long pfn;
- int sb_id, i;
+ int sb_id;
- for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
- if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+ for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
+ if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
continue;
- /*
- * Drop our reference to the pages so the memory can get
- * offlined and add the unplugged pages to the managed
- * page counters (so offlining code can correctly subtract
- * them again).
- */
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
- sb_id * vm->subblock_size);
- adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
- for (i = 0; i < nr_pages; i++) {
- page = pfn_to_page(pfn + i);
- if (WARN_ON(!page_ref_dec_and_test(page)))
- dump_page(page, "unplugged page referenced");
- }
+ sb_id * vm->sbm.sb_size);
+ virtio_mem_fake_offline_going_offline(pfn, nr_pages);
}
}
-static void virtio_mem_notify_cancel_offline(struct virtio_mem *vm,
- unsigned long mb_id)
+static void virtio_mem_sbm_notify_cancel_offline(struct virtio_mem *vm,
+ unsigned long mb_id)
{
- const unsigned long nr_pages = PFN_DOWN(vm->subblock_size);
+ const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size);
unsigned long pfn;
- int sb_id, i;
+ int sb_id;
- for (sb_id = 0; sb_id < vm->nb_sb_per_mb; sb_id++) {
- if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+ for (sb_id = 0; sb_id < vm->sbm.sbs_per_mb; sb_id++) {
+ if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
continue;
- /*
- * Get the reference we dropped when going offline and
- * subtract the unplugged pages from the managed page
- * counters.
- */
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
- sb_id * vm->subblock_size);
- adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
- for (i = 0; i < nr_pages; i++)
- page_ref_inc(pfn_to_page(pfn + i));
+ sb_id * vm->sbm.sb_size);
+ virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
}
}
+static void virtio_mem_bbm_notify_going_offline(struct virtio_mem *vm,
+ unsigned long bb_id,
+ unsigned long pfn,
+ unsigned long nr_pages)
+{
+ /*
+ * When marked as "fake-offline", all online memory of this device block
+ * is allocated by us. Otherwise, we don't have any memory allocated.
+ */
+ if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
+ VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
+ return;
+ virtio_mem_fake_offline_going_offline(pfn, nr_pages);
+}
+
+static void virtio_mem_bbm_notify_cancel_offline(struct virtio_mem *vm,
+ unsigned long bb_id,
+ unsigned long pfn,
+ unsigned long nr_pages)
+{
+ if (virtio_mem_bbm_get_bb_state(vm, bb_id) !=
+ VIRTIO_MEM_BBM_BB_FAKE_OFFLINE)
+ return;
+ virtio_mem_fake_offline_cancel_offline(pfn, nr_pages);
+}
+
/*
* This callback will either be called synchronously from add_memory() or
* asynchronously (e.g., triggered via user space). We have to be careful
@@ -648,20 +935,33 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
struct memory_notify *mhp = arg;
const unsigned long start = PFN_PHYS(mhp->start_pfn);
const unsigned long size = PFN_PHYS(mhp->nr_pages);
- const unsigned long mb_id = virtio_mem_phys_to_mb_id(start);
int rc = NOTIFY_OK;
+ unsigned long id;
if (!virtio_mem_overlaps_range(vm, start, size))
return NOTIFY_DONE;
- /*
- * Memory is onlined/offlined in memory block granularity. We cannot
- * cross virtio-mem device boundaries and memory block boundaries. Bail
- * out if this ever changes.
- */
- if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
- !IS_ALIGNED(start, memory_block_size_bytes())))
- return NOTIFY_BAD;
+ if (vm->in_sbm) {
+ id = virtio_mem_phys_to_mb_id(start);
+ /*
+ * In SBM, we add memory in separate memory blocks - we expect
+ * it to be onlined/offlined in the same granularity. Bail out
+ * if this ever changes.
+ */
+ if (WARN_ON_ONCE(size != memory_block_size_bytes() ||
+ !IS_ALIGNED(start, memory_block_size_bytes())))
+ return NOTIFY_BAD;
+ } else {
+ id = virtio_mem_phys_to_bb_id(vm, start);
+ /*
+ * In BBM, we only care about onlining/offlining happening
+ * within a single big block, we don't care about the
+ * actual granularity as we don't track individual Linux
+ * memory blocks.
+ */
+ if (WARN_ON_ONCE(id != virtio_mem_phys_to_bb_id(vm, start + size - 1)))
+ return NOTIFY_BAD;
+ }
/*
* Avoid circular locking lockdep warnings. We lock the mutex
@@ -680,7 +980,12 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
break;
}
vm->hotplug_active = true;
- virtio_mem_notify_going_offline(vm, mb_id);
+ if (vm->in_sbm)
+ virtio_mem_sbm_notify_going_offline(vm, id);
+ else
+ virtio_mem_bbm_notify_going_offline(vm, id,
+ mhp->start_pfn,
+ mhp->nr_pages);
break;
case MEM_GOING_ONLINE:
mutex_lock(&vm->hotplug_mutex);
@@ -690,22 +995,51 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
break;
}
vm->hotplug_active = true;
- rc = virtio_mem_notify_going_online(vm, mb_id);
+ if (vm->in_sbm)
+ rc = virtio_mem_sbm_notify_going_online(vm, id);
break;
case MEM_OFFLINE:
- virtio_mem_notify_offline(vm, mb_id);
+ if (vm->in_sbm)
+ virtio_mem_sbm_notify_offline(vm, id);
+
+ atomic64_add(size, &vm->offline_size);
+ /*
+ * Trigger the workqueue. Now that we have some offline memory,
+ * maybe we can handle pending unplug requests.
+ */
+ if (!unplug_online)
+ virtio_mem_retry(vm);
+
vm->hotplug_active = false;
mutex_unlock(&vm->hotplug_mutex);
break;
case MEM_ONLINE:
- virtio_mem_notify_online(vm, mb_id);
+ if (vm->in_sbm)
+ virtio_mem_sbm_notify_online(vm, id);
+
+ atomic64_sub(size, &vm->offline_size);
+ /*
+ * Start adding more memory once we onlined half of our
+ * threshold. Don't trigger if it's possibly due to our actipn
+ * (e.g., us adding memory which gets onlined immediately from
+ * the core).
+ */
+ if (!atomic_read(&vm->wq_active) &&
+ virtio_mem_could_add_memory(vm, vm->offline_threshold / 2))
+ virtio_mem_retry(vm);
+
vm->hotplug_active = false;
mutex_unlock(&vm->hotplug_mutex);
break;
case MEM_CANCEL_OFFLINE:
if (!vm->hotplug_active)
break;
- virtio_mem_notify_cancel_offline(vm, mb_id);
+ if (vm->in_sbm)
+ virtio_mem_sbm_notify_cancel_offline(vm, id);
+ else
+ virtio_mem_bbm_notify_cancel_offline(vm, id,
+ mhp->start_pfn,
+ mhp->nr_pages);
vm->hotplug_active = false;
mutex_unlock(&vm->hotplug_mutex);
break;
@@ -729,7 +1063,7 @@ static int virtio_mem_memory_notifier_cb(struct notifier_block *nb,
* (via generic_online_page()) using PageDirty().
*/
static void virtio_mem_set_fake_offline(unsigned long pfn,
- unsigned int nr_pages, bool onlined)
+ unsigned long nr_pages, bool onlined)
{
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
@@ -748,7 +1082,7 @@ static void virtio_mem_set_fake_offline(unsigned long pfn,
* (via generic_online_page()), clear PageDirty().
*/
static void virtio_mem_clear_fake_offline(unsigned long pfn,
- unsigned int nr_pages, bool onlined)
+ unsigned long nr_pages, bool onlined)
{
for (; nr_pages--; pfn++) {
struct page *page = pfn_to_page(pfn);
@@ -763,16 +1097,17 @@ static void virtio_mem_clear_fake_offline(unsigned long pfn,
* Release a range of fake-offline pages to the buddy, effectively
* fake-onlining them.
*/
-static void virtio_mem_fake_online(unsigned long pfn, unsigned int nr_pages)
+static void virtio_mem_fake_online(unsigned long pfn, unsigned long nr_pages)
{
- const int order = MAX_ORDER - 1;
- int i;
+ const unsigned long max_nr_pages = MAX_ORDER_NR_PAGES;
+ unsigned long i;
/*
- * We are always called with subblock granularity, which is at least
- * aligned to MAX_ORDER - 1.
+ * We are always called at least with MAX_ORDER_NR_PAGES
+ * granularity/alignment (e.g., the way subblocks work). All pages
+ * inside such a block are alike.
*/
- for (i = 0; i < nr_pages; i += 1 << order) {
+ for (i = 0; i < nr_pages; i += max_nr_pages) {
struct page *page = pfn_to_page(pfn + i);
/*
@@ -782,42 +1117,128 @@ static void virtio_mem_fake_online(unsigned long pfn, unsigned int nr_pages)
* alike.
*/
if (PageDirty(page)) {
- virtio_mem_clear_fake_offline(pfn + i, 1 << order,
+ virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
false);
- generic_online_page(page, order);
+ generic_online_page(page, MAX_ORDER - 1);
} else {
- virtio_mem_clear_fake_offline(pfn + i, 1 << order,
+ virtio_mem_clear_fake_offline(pfn + i, max_nr_pages,
true);
- free_contig_range(pfn + i, 1 << order);
- adjust_managed_page_count(page, 1 << order);
+ free_contig_range(pfn + i, max_nr_pages);
+ adjust_managed_page_count(page, max_nr_pages);
}
}
}
+/*
+ * Try to allocate a range, marking pages fake-offline, effectively
+ * fake-offlining them.
+ */
+static int virtio_mem_fake_offline(unsigned long pfn, unsigned long nr_pages)
+{
+ const bool is_movable = zone_idx(page_zone(pfn_to_page(pfn))) ==
+ ZONE_MOVABLE;
+ int rc, retry_count;
+
+ /*
+ * TODO: We want an alloc_contig_range() mode that tries to allocate
+ * harder (e.g., dealing with temporarily pinned pages, PCP), especially
+ * with ZONE_MOVABLE. So for now, retry a couple of times with
+ * ZONE_MOVABLE before giving up - because that zone is supposed to give
+ * some guarantees.
+ */
+ for (retry_count = 0; retry_count < 5; retry_count++) {
+ rc = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_MOVABLE,
+ GFP_KERNEL);
+ if (rc == -ENOMEM)
+ /* whoops, out of memory */
+ return rc;
+ else if (rc && !is_movable)
+ break;
+ else if (rc)
+ continue;
+
+ virtio_mem_set_fake_offline(pfn, nr_pages, true);
+ adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/*
+ * Handle fake-offline pages when memory is going offline - such that the
+ * pages can be skipped by mm-core when offlining.
+ */
+static void virtio_mem_fake_offline_going_offline(unsigned long pfn,
+ unsigned long nr_pages)
+{
+ struct page *page;
+ unsigned long i;
+
+ /*
+ * Drop our reference to the pages so the memory can get offlined
+ * and add the unplugged pages to the managed page counters (so
+ * offlining code can correctly subtract them again).
+ */
+ adjust_managed_page_count(pfn_to_page(pfn), nr_pages);
+ /* Drop our reference to the pages so the memory can get offlined. */
+ for (i = 0; i < nr_pages; i++) {
+ page = pfn_to_page(pfn + i);
+ if (WARN_ON(!page_ref_dec_and_test(page)))
+ dump_page(page, "fake-offline page referenced");
+ }
+}
+
+/*
+ * Handle fake-offline pages when memory offlining is canceled - to undo
+ * what we did in virtio_mem_fake_offline_going_offline().
+ */
+static void virtio_mem_fake_offline_cancel_offline(unsigned long pfn,
+ unsigned long nr_pages)
+{
+ unsigned long i;
+
+ /*
+ * Get the reference we dropped when going offline and subtract the
+ * unplugged pages from the managed page counters.
+ */
+ adjust_managed_page_count(pfn_to_page(pfn), -nr_pages);
+ for (i = 0; i < nr_pages; i++)
+ page_ref_inc(pfn_to_page(pfn + i));
+}
+
static void virtio_mem_online_page_cb(struct page *page, unsigned int order)
{
const unsigned long addr = page_to_phys(page);
- const unsigned long mb_id = virtio_mem_phys_to_mb_id(addr);
+ unsigned long id, sb_id;
struct virtio_mem *vm;
- int sb_id;
+ bool do_online;
- /*
- * We exploit here that subblocks have at least MAX_ORDER - 1
- * size/alignment and that this callback is is called with such a
- * size/alignment. So we cannot cross subblocks and therefore
- * also not memory blocks.
- */
rcu_read_lock();
list_for_each_entry_rcu(vm, &virtio_mem_devices, next) {
- if (!virtio_mem_owned_mb(vm, mb_id))
+ if (!virtio_mem_contains_range(vm, addr, PFN_PHYS(1 << order)))
continue;
- sb_id = virtio_mem_phys_to_sb_id(vm, addr);
- /*
- * If plugged, online the pages, otherwise, set them fake
- * offline (PageOffline).
- */
- if (virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+ if (vm->in_sbm) {
+ /*
+ * We exploit here that subblocks have at least
+ * MAX_ORDER_NR_PAGES size/alignment - so we cannot
+ * cross subblocks within one call.
+ */
+ id = virtio_mem_phys_to_mb_id(addr);
+ sb_id = virtio_mem_phys_to_sb_id(vm, addr);
+ do_online = virtio_mem_sbm_test_sb_plugged(vm, id,
+ sb_id, 1);
+ } else {
+ /*
+ * If the whole block is marked fake offline, keep
+ * everything that way.
+ */
+ id = virtio_mem_phys_to_bb_id(vm, addr);
+ do_online = virtio_mem_bbm_get_bb_state(vm, id) !=
+ VIRTIO_MEM_BBM_BB_FAKE_OFFLINE;
+ }
+ if (do_online)
generic_online_page(page, order);
else
virtio_mem_set_fake_offline(PFN_DOWN(addr), 1 << order,
@@ -870,23 +1291,33 @@ static int virtio_mem_send_plug_request(struct virtio_mem *vm, uint64_t addr,
.u.plug.addr = cpu_to_virtio64(vm->vdev, addr),
.u.plug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
};
+ int rc = -ENOMEM;
if (atomic_read(&vm->config_changed))
return -EAGAIN;
+ dev_dbg(&vm->vdev->dev, "plugging memory: 0x%llx - 0x%llx\n", addr,
+ addr + size - 1);
+
switch (virtio_mem_send_request(vm, &req)) {
case VIRTIO_MEM_RESP_ACK:
vm->plugged_size += size;
return 0;
case VIRTIO_MEM_RESP_NACK:
- return -EAGAIN;
+ rc = -EAGAIN;
+ break;
case VIRTIO_MEM_RESP_BUSY:
- return -ETXTBSY;
+ rc = -ETXTBSY;
+ break;
case VIRTIO_MEM_RESP_ERROR:
- return -EINVAL;
+ rc = -EINVAL;
+ break;
default:
- return -ENOMEM;
+ break;
}
+
+ dev_dbg(&vm->vdev->dev, "plugging memory failed: %d\n", rc);
+ return rc;
}
static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
@@ -898,21 +1329,30 @@ static int virtio_mem_send_unplug_request(struct virtio_mem *vm, uint64_t addr,
.u.unplug.addr = cpu_to_virtio64(vm->vdev, addr),
.u.unplug.nb_blocks = cpu_to_virtio16(vm->vdev, nb_vm_blocks),
};
+ int rc = -ENOMEM;
if (atomic_read(&vm->config_changed))
return -EAGAIN;
+ dev_dbg(&vm->vdev->dev, "unplugging memory: 0x%llx - 0x%llx\n", addr,
+ addr + size - 1);
+
switch (virtio_mem_send_request(vm, &req)) {
case VIRTIO_MEM_RESP_ACK:
vm->plugged_size -= size;
return 0;
case VIRTIO_MEM_RESP_BUSY:
- return -ETXTBSY;
+ rc = -ETXTBSY;
+ break;
case VIRTIO_MEM_RESP_ERROR:
- return -EINVAL;
+ rc = -EINVAL;
+ break;
default:
- return -ENOMEM;
+ break;
}
+
+ dev_dbg(&vm->vdev->dev, "unplugging memory failed: %d\n", rc);
+ return rc;
}
static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
@@ -920,6 +1360,9 @@ static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
const struct virtio_mem_req req = {
.type = cpu_to_virtio16(vm->vdev, VIRTIO_MEM_REQ_UNPLUG_ALL),
};
+ int rc = -ENOMEM;
+
+ dev_dbg(&vm->vdev->dev, "unplugging all memory");
switch (virtio_mem_send_request(vm, &req)) {
case VIRTIO_MEM_RESP_ACK:
@@ -929,30 +1372,31 @@ static int virtio_mem_send_unplug_all_request(struct virtio_mem *vm)
atomic_set(&vm->config_changed, 1);
return 0;
case VIRTIO_MEM_RESP_BUSY:
- return -ETXTBSY;
+ rc = -ETXTBSY;
+ break;
default:
- return -ENOMEM;
+ break;
}
+
+ dev_dbg(&vm->vdev->dev, "unplugging all memory failed: %d\n", rc);
+ return rc;
}
/*
* Plug selected subblocks. Updates the plugged state, but not the state
* of the memory block.
*/
-static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
- int sb_id, int count)
+static int virtio_mem_sbm_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
+ int sb_id, int count)
{
const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
- sb_id * vm->subblock_size;
- const uint64_t size = count * vm->subblock_size;
+ sb_id * vm->sbm.sb_size;
+ const uint64_t size = count * vm->sbm.sb_size;
int rc;
- dev_dbg(&vm->vdev->dev, "plugging memory block: %lu : %i - %i\n", mb_id,
- sb_id, sb_id + count - 1);
-
rc = virtio_mem_send_plug_request(vm, addr, size);
if (!rc)
- virtio_mem_mb_set_sb_plugged(vm, mb_id, sb_id, count);
+ virtio_mem_sbm_set_sb_plugged(vm, mb_id, sb_id, count);
return rc;
}
@@ -960,24 +1404,47 @@ static int virtio_mem_mb_plug_sb(struct virtio_mem *vm, unsigned long mb_id,
* Unplug selected subblocks. Updates the plugged state, but not the state
* of the memory block.
*/
-static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
- int sb_id, int count)
+static int virtio_mem_sbm_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
+ int sb_id, int count)
{
const uint64_t addr = virtio_mem_mb_id_to_phys(mb_id) +
- sb_id * vm->subblock_size;
- const uint64_t size = count * vm->subblock_size;
+ sb_id * vm->sbm.sb_size;
+ const uint64_t size = count * vm->sbm.sb_size;
int rc;
- dev_dbg(&vm->vdev->dev, "unplugging memory block: %lu : %i - %i\n",
- mb_id, sb_id, sb_id + count - 1);
-
rc = virtio_mem_send_unplug_request(vm, addr, size);
if (!rc)
- virtio_mem_mb_set_sb_unplugged(vm, mb_id, sb_id, count);
+ virtio_mem_sbm_set_sb_unplugged(vm, mb_id, sb_id, count);
return rc;
}
/*
+ * Request to unplug a big block.
+ *
+ * Will not modify the state of the big block.
+ */
+static int virtio_mem_bbm_unplug_bb(struct virtio_mem *vm, unsigned long bb_id)
+{
+ const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
+ const uint64_t size = vm->bbm.bb_size;
+
+ return virtio_mem_send_unplug_request(vm, addr, size);
+}
+
+/*
+ * Request to plug a big block.
+ *
+ * Will not modify the state of the big block.
+ */
+static int virtio_mem_bbm_plug_bb(struct virtio_mem *vm, unsigned long bb_id)
+{
+ const uint64_t addr = virtio_mem_bb_id_to_phys(vm, bb_id);
+ const uint64_t size = vm->bbm.bb_size;
+
+ return virtio_mem_send_plug_request(vm, addr, size);
+}
+
+/*
* Unplug the desired number of plugged subblocks of a offline or not-added
* memory block. Will fail if any subblock cannot get unplugged (instead of
* skipping it).
@@ -986,29 +1453,29 @@ static int virtio_mem_mb_unplug_sb(struct virtio_mem *vm, unsigned long mb_id,
*
* Note: can fail after some subblocks were unplugged.
*/
-static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm,
- unsigned long mb_id, uint64_t *nb_sb)
+static int virtio_mem_sbm_unplug_any_sb(struct virtio_mem *vm,
+ unsigned long mb_id, uint64_t *nb_sb)
{
int sb_id, count;
int rc;
- sb_id = vm->nb_sb_per_mb - 1;
+ sb_id = vm->sbm.sbs_per_mb - 1;
while (*nb_sb) {
/* Find the next candidate subblock */
while (sb_id >= 0 &&
- virtio_mem_mb_test_sb_unplugged(vm, mb_id, sb_id, 1))
+ virtio_mem_sbm_test_sb_unplugged(vm, mb_id, sb_id, 1))
sb_id--;
if (sb_id < 0)
break;
/* Try to unplug multiple subblocks at a time */
count = 1;
while (count < *nb_sb && sb_id > 0 &&
- virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
+ virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id - 1, 1)) {
count++;
sb_id--;
}
- rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
+ rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
if (rc)
return rc;
*nb_sb -= count;
@@ -1025,63 +1492,50 @@ static int virtio_mem_mb_unplug_any_sb(struct virtio_mem *vm,
*
* Note: can fail after some subblocks were unplugged.
*/
-static int virtio_mem_mb_unplug(struct virtio_mem *vm, unsigned long mb_id)
+static int virtio_mem_sbm_unplug_mb(struct virtio_mem *vm, unsigned long mb_id)
{
- uint64_t nb_sb = vm->nb_sb_per_mb;
+ uint64_t nb_sb = vm->sbm.sbs_per_mb;
- return virtio_mem_mb_unplug_any_sb(vm, mb_id, &nb_sb);
+ return virtio_mem_sbm_unplug_any_sb(vm, mb_id, &nb_sb);
}
/*
* Prepare tracking data for the next memory block.
*/
-static int virtio_mem_prepare_next_mb(struct virtio_mem *vm,
- unsigned long *mb_id)
+static int virtio_mem_sbm_prepare_next_mb(struct virtio_mem *vm,
+ unsigned long *mb_id)
{
int rc;
- if (vm->next_mb_id > vm->last_usable_mb_id)
+ if (vm->sbm.next_mb_id > vm->sbm.last_usable_mb_id)
return -ENOSPC;
/* Resize the state array if required. */
- rc = virtio_mem_mb_state_prepare_next_mb(vm);
+ rc = virtio_mem_sbm_mb_states_prepare_next_mb(vm);
if (rc)
return rc;
/* Resize the subblock bitmap if required. */
- rc = virtio_mem_sb_bitmap_prepare_next_mb(vm);
+ rc = virtio_mem_sbm_sb_states_prepare_next_mb(vm);
if (rc)
return rc;
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_UNUSED]++;
- *mb_id = vm->next_mb_id++;
+ vm->sbm.mb_count[VIRTIO_MEM_SBM_MB_UNUSED]++;
+ *mb_id = vm->sbm.next_mb_id++;
return 0;
}
/*
- * Don't add too many blocks that are not onlined yet to avoid running OOM.
- */
-static bool virtio_mem_too_many_mb_offline(struct virtio_mem *vm)
-{
- unsigned long nb_offline;
-
- nb_offline = vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] +
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL];
- return nb_offline >= VIRTIO_MEM_NB_OFFLINE_THRESHOLD;
-}
-
-/*
* Try to plug the desired number of subblocks and add the memory block
* to Linux.
*
* Will modify the state of the memory block.
*/
-static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
- unsigned long mb_id,
- uint64_t *nb_sb)
+static int virtio_mem_sbm_plug_and_add_mb(struct virtio_mem *vm,
+ unsigned long mb_id, uint64_t *nb_sb)
{
- const int count = min_t(int, *nb_sb, vm->nb_sb_per_mb);
- int rc, rc2;
+ const int count = min_t(int, *nb_sb, vm->sbm.sbs_per_mb);
+ int rc;
if (WARN_ON_ONCE(!count))
return -EINVAL;
@@ -1090,7 +1544,7 @@ static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
* Plug the requested number of subblocks before adding it to linux,
* so that onlining will directly online all plugged subblocks.
*/
- rc = virtio_mem_mb_plug_sb(vm, mb_id, 0, count);
+ rc = virtio_mem_sbm_plug_sb(vm, mb_id, 0, count);
if (rc)
return rc;
@@ -1098,29 +1552,21 @@ static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
* Mark the block properly offline before adding it to Linux,
* so the memory notifiers will find the block in the right state.
*/
- if (count == vm->nb_sb_per_mb)
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE);
+ if (count == vm->sbm.sbs_per_mb)
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE);
else
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
/* Add the memory block to linux - if that fails, try to unplug. */
- rc = virtio_mem_mb_add(vm, mb_id);
+ rc = virtio_mem_sbm_add_mb(vm, mb_id);
if (rc) {
- enum virtio_mem_mb_state new_state = VIRTIO_MEM_MB_STATE_UNUSED;
+ int new_state = VIRTIO_MEM_SBM_MB_UNUSED;
- dev_err(&vm->vdev->dev,
- "adding memory block %lu failed with %d\n", mb_id, rc);
- rc2 = virtio_mem_mb_unplug_sb(vm, mb_id, 0, count);
-
- /*
- * TODO: Linux MM does not properly clean up yet in all cases
- * where adding of memory failed - especially on -ENOMEM.
- */
- if (rc2)
- new_state = VIRTIO_MEM_MB_STATE_PLUGGED;
- virtio_mem_mb_set_state(vm, mb_id, new_state);
+ if (virtio_mem_sbm_unplug_sb(vm, mb_id, 0, count))
+ new_state = VIRTIO_MEM_SBM_MB_PLUGGED;
+ virtio_mem_sbm_set_mb_state(vm, mb_id, new_state);
return rc;
}
@@ -1136,8 +1582,9 @@ static int virtio_mem_mb_plug_and_add(struct virtio_mem *vm,
*
* Note: Can fail after some subblocks were successfully plugged.
*/
-static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
- uint64_t *nb_sb, bool online)
+static int virtio_mem_sbm_plug_any_sb(struct virtio_mem *vm,
+ unsigned long mb_id, uint64_t *nb_sb,
+ bool online)
{
unsigned long pfn, nr_pages;
int sb_id, count;
@@ -1147,17 +1594,16 @@ static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
return -EINVAL;
while (*nb_sb) {
- sb_id = virtio_mem_mb_first_unplugged_sb(vm, mb_id);
- if (sb_id >= vm->nb_sb_per_mb)
+ sb_id = virtio_mem_sbm_first_unplugged_sb(vm, mb_id);
+ if (sb_id >= vm->sbm.sbs_per_mb)
break;
count = 1;
while (count < *nb_sb &&
- sb_id + count < vm->nb_sb_per_mb &&
- !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id + count,
- 1))
+ sb_id + count < vm->sbm.sbs_per_mb &&
+ !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id + count, 1))
count++;
- rc = virtio_mem_mb_plug_sb(vm, mb_id, sb_id, count);
+ rc = virtio_mem_sbm_plug_sb(vm, mb_id, sb_id, count);
if (rc)
return rc;
*nb_sb -= count;
@@ -1166,29 +1612,26 @@ static int virtio_mem_mb_plug_any_sb(struct virtio_mem *vm, unsigned long mb_id,
/* fake-online the pages if the memory block is online */
pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
- sb_id * vm->subblock_size);
- nr_pages = PFN_DOWN(count * vm->subblock_size);
+ sb_id * vm->sbm.sb_size);
+ nr_pages = PFN_DOWN(count * vm->sbm.sb_size);
virtio_mem_fake_online(pfn, nr_pages);
}
- if (virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+ if (virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
if (online)
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_ONLINE);
else
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE);
}
return 0;
}
-/*
- * Try to plug the requested amount of memory.
- */
-static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
+static int virtio_mem_sbm_plug_request(struct virtio_mem *vm, uint64_t diff)
{
- uint64_t nb_sb = diff / vm->subblock_size;
+ uint64_t nb_sb = diff / vm->sbm.sb_size;
unsigned long mb_id;
int rc;
@@ -1199,18 +1642,18 @@ static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
mutex_lock(&vm->hotplug_mutex);
/* Try to plug subblocks of partially plugged online blocks. */
- virtio_mem_for_each_mb_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
- rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, true);
+ virtio_mem_sbm_for_each_mb(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL) {
+ rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb, true);
if (rc || !nb_sb)
goto out_unlock;
cond_resched();
}
/* Try to plug subblocks of partially plugged offline blocks. */
- virtio_mem_for_each_mb_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
- rc = virtio_mem_mb_plug_any_sb(vm, mb_id, &nb_sb, false);
+ virtio_mem_sbm_for_each_mb(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
+ rc = virtio_mem_sbm_plug_any_sb(vm, mb_id, &nb_sb, false);
if (rc || !nb_sb)
goto out_unlock;
cond_resched();
@@ -1223,11 +1666,11 @@ static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
mutex_unlock(&vm->hotplug_mutex);
/* Try to plug and add unused blocks */
- virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED) {
- if (virtio_mem_too_many_mb_offline(vm))
+ virtio_mem_sbm_for_each_mb(vm, mb_id, VIRTIO_MEM_SBM_MB_UNUSED) {
+ if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
return -ENOSPC;
- rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
+ rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
return rc;
cond_resched();
@@ -1235,13 +1678,13 @@ static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
/* Try to prepare, plug and add new blocks */
while (nb_sb) {
- if (virtio_mem_too_many_mb_offline(vm))
+ if (!virtio_mem_could_add_memory(vm, memory_block_size_bytes()))
return -ENOSPC;
- rc = virtio_mem_prepare_next_mb(vm, &mb_id);
+ rc = virtio_mem_sbm_prepare_next_mb(vm, &mb_id);
if (rc)
return rc;
- rc = virtio_mem_mb_plug_and_add(vm, mb_id, &nb_sb);
+ rc = virtio_mem_sbm_plug_and_add_mb(vm, mb_id, &nb_sb);
if (rc)
return rc;
cond_resched();
@@ -1254,6 +1697,112 @@ out_unlock:
}
/*
+ * Plug a big block and add it to Linux.
+ *
+ * Will modify the state of the big block.
+ */
+static int virtio_mem_bbm_plug_and_add_bb(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ int rc;
+
+ if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
+ VIRTIO_MEM_BBM_BB_UNUSED))
+ return -EINVAL;
+
+ rc = virtio_mem_bbm_plug_bb(vm, bb_id);
+ if (rc)
+ return rc;
+ virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
+
+ rc = virtio_mem_bbm_add_bb(vm, bb_id);
+ if (rc) {
+ if (!virtio_mem_bbm_unplug_bb(vm, bb_id))
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_UNUSED);
+ else
+ /* Retry from the main loop. */
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_PLUGGED);
+ return rc;
+ }
+ return 0;
+}
+
+/*
+ * Prepare tracking data for the next big block.
+ */
+static int virtio_mem_bbm_prepare_next_bb(struct virtio_mem *vm,
+ unsigned long *bb_id)
+{
+ int rc;
+
+ if (vm->bbm.next_bb_id > vm->bbm.last_usable_bb_id)
+ return -ENOSPC;
+
+ /* Resize the big block state array if required. */
+ rc = virtio_mem_bbm_bb_states_prepare_next_bb(vm);
+ if (rc)
+ return rc;
+
+ vm->bbm.bb_count[VIRTIO_MEM_BBM_BB_UNUSED]++;
+ *bb_id = vm->bbm.next_bb_id;
+ vm->bbm.next_bb_id++;
+ return 0;
+}
+
+static int virtio_mem_bbm_plug_request(struct virtio_mem *vm, uint64_t diff)
+{
+ uint64_t nb_bb = diff / vm->bbm.bb_size;
+ unsigned long bb_id;
+ int rc;
+
+ if (!nb_bb)
+ return 0;
+
+ /* Try to plug and add unused big blocks */
+ virtio_mem_bbm_for_each_bb(vm, bb_id, VIRTIO_MEM_BBM_BB_UNUSED) {
+ if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
+ return -ENOSPC;
+
+ rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
+ if (!rc)
+ nb_bb--;
+ if (rc || !nb_bb)
+ return rc;
+ cond_resched();
+ }
+
+ /* Try to prepare, plug and add new big blocks */
+ while (nb_bb) {
+ if (!virtio_mem_could_add_memory(vm, vm->bbm.bb_size))
+ return -ENOSPC;
+
+ rc = virtio_mem_bbm_prepare_next_bb(vm, &bb_id);
+ if (rc)
+ return rc;
+ rc = virtio_mem_bbm_plug_and_add_bb(vm, bb_id);
+ if (!rc)
+ nb_bb--;
+ if (rc)
+ return rc;
+ cond_resched();
+ }
+
+ return 0;
+}
+
+/*
+ * Try to plug the requested amount of memory.
+ */
+static int virtio_mem_plug_request(struct virtio_mem *vm, uint64_t diff)
+{
+ if (vm->in_sbm)
+ return virtio_mem_sbm_plug_request(vm, diff);
+ return virtio_mem_bbm_plug_request(vm, diff);
+}
+
+/*
* Unplug the desired number of plugged subblocks of an offline memory block.
* Will fail if any subblock cannot get unplugged (instead of skipping it).
*
@@ -1262,33 +1811,33 @@ out_unlock:
*
* Note: Can fail after some subblocks were successfully unplugged.
*/
-static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm,
- unsigned long mb_id,
- uint64_t *nb_sb)
+static int virtio_mem_sbm_unplug_any_sb_offline(struct virtio_mem *vm,
+ unsigned long mb_id,
+ uint64_t *nb_sb)
{
int rc;
- rc = virtio_mem_mb_unplug_any_sb(vm, mb_id, nb_sb);
+ rc = virtio_mem_sbm_unplug_any_sb(vm, mb_id, nb_sb);
/* some subblocks might have been unplugged even on failure */
- if (!virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb))
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL);
+ if (!virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb))
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL);
if (rc)
return rc;
- if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+ if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
/*
* Remove the block from Linux - this should never fail.
* Hinder the block from getting onlined by marking it
* unplugged. Temporarily drop the mutex, so
* any pending GOING_ONLINE requests can be serviced/rejected.
*/
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_UNUSED);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_UNUSED);
mutex_unlock(&vm->hotplug_mutex);
- rc = virtio_mem_mb_remove(vm, mb_id);
+ rc = virtio_mem_sbm_remove_mb(vm, mb_id);
BUG_ON(rc);
mutex_lock(&vm->hotplug_mutex);
}
@@ -1300,38 +1849,31 @@ static int virtio_mem_mb_unplug_any_sb_offline(struct virtio_mem *vm,
*
* Will modify the state of the memory block.
*/
-static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm,
- unsigned long mb_id, int sb_id,
- int count)
+static int virtio_mem_sbm_unplug_sb_online(struct virtio_mem *vm,
+ unsigned long mb_id, int sb_id,
+ int count)
{
- const unsigned long nr_pages = PFN_DOWN(vm->subblock_size) * count;
+ const unsigned long nr_pages = PFN_DOWN(vm->sbm.sb_size) * count;
unsigned long start_pfn;
int rc;
start_pfn = PFN_DOWN(virtio_mem_mb_id_to_phys(mb_id) +
- sb_id * vm->subblock_size);
- rc = alloc_contig_range(start_pfn, start_pfn + nr_pages,
- MIGRATE_MOVABLE, GFP_KERNEL);
- if (rc == -ENOMEM)
- /* whoops, out of memory */
- return rc;
- if (rc)
- return -EBUSY;
+ sb_id * vm->sbm.sb_size);
- /* Mark it as fake-offline before unplugging it */
- virtio_mem_set_fake_offline(start_pfn, nr_pages, true);
- adjust_managed_page_count(pfn_to_page(start_pfn), -nr_pages);
+ rc = virtio_mem_fake_offline(start_pfn, nr_pages);
+ if (rc)
+ return rc;
/* Try to unplug the allocated memory */
- rc = virtio_mem_mb_unplug_sb(vm, mb_id, sb_id, count);
+ rc = virtio_mem_sbm_unplug_sb(vm, mb_id, sb_id, count);
if (rc) {
/* Return the memory to the buddy. */
virtio_mem_fake_online(start_pfn, nr_pages);
return rc;
}
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL);
return 0;
}
@@ -1345,34 +1887,34 @@ static int virtio_mem_mb_unplug_sb_online(struct virtio_mem *vm,
* Note: Can fail after some subblocks were successfully unplugged. Can
* return 0 even if subblocks were busy and could not get unplugged.
*/
-static int virtio_mem_mb_unplug_any_sb_online(struct virtio_mem *vm,
- unsigned long mb_id,
- uint64_t *nb_sb)
+static int virtio_mem_sbm_unplug_any_sb_online(struct virtio_mem *vm,
+ unsigned long mb_id,
+ uint64_t *nb_sb)
{
int rc, sb_id;
/* If possible, try to unplug the complete block in one shot. */
- if (*nb_sb >= vm->nb_sb_per_mb &&
- virtio_mem_mb_test_sb_plugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
- rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, 0,
- vm->nb_sb_per_mb);
+ if (*nb_sb >= vm->sbm.sbs_per_mb &&
+ virtio_mem_sbm_test_sb_plugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
+ rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, 0,
+ vm->sbm.sbs_per_mb);
if (!rc) {
- *nb_sb -= vm->nb_sb_per_mb;
+ *nb_sb -= vm->sbm.sbs_per_mb;
goto unplugged;
} else if (rc != -EBUSY)
return rc;
}
/* Fallback to single subblocks. */
- for (sb_id = vm->nb_sb_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
+ for (sb_id = vm->sbm.sbs_per_mb - 1; sb_id >= 0 && *nb_sb; sb_id--) {
/* Find the next candidate subblock */
while (sb_id >= 0 &&
- !virtio_mem_mb_test_sb_plugged(vm, mb_id, sb_id, 1))
+ !virtio_mem_sbm_test_sb_plugged(vm, mb_id, sb_id, 1))
sb_id--;
if (sb_id < 0)
break;
- rc = virtio_mem_mb_unplug_sb_online(vm, mb_id, sb_id, 1);
+ rc = virtio_mem_sbm_unplug_sb_online(vm, mb_id, sb_id, 1);
if (rc == -EBUSY)
continue;
else if (rc)
@@ -1386,24 +1928,21 @@ unplugged:
* remove it. This will usually not fail, as no memory is in use
* anymore - however some other notifiers might NACK the request.
*/
- if (virtio_mem_mb_test_sb_unplugged(vm, mb_id, 0, vm->nb_sb_per_mb)) {
+ if (virtio_mem_sbm_test_sb_unplugged(vm, mb_id, 0, vm->sbm.sbs_per_mb)) {
mutex_unlock(&vm->hotplug_mutex);
- rc = virtio_mem_mb_offline_and_remove(vm, mb_id);
+ rc = virtio_mem_sbm_offline_and_remove_mb(vm, mb_id);
mutex_lock(&vm->hotplug_mutex);
if (!rc)
- virtio_mem_mb_set_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_UNUSED);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_UNUSED);
}
return 0;
}
-/*
- * Try to unplug the requested amount of memory.
- */
-static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
+static int virtio_mem_sbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
{
- uint64_t nb_sb = diff / vm->subblock_size;
+ uint64_t nb_sb = diff / vm->sbm.sb_size;
unsigned long mb_id;
int rc;
@@ -1418,20 +1957,17 @@ static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
mutex_lock(&vm->hotplug_mutex);
/* Try to unplug subblocks of partially plugged offline blocks. */
- virtio_mem_for_each_mb_state_rev(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
- rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
- &nb_sb);
+ virtio_mem_sbm_for_each_mb_rev(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
+ rc = virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
goto out_unlock;
cond_resched();
}
/* Try to unplug subblocks of plugged offline blocks. */
- virtio_mem_for_each_mb_state_rev(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE) {
- rc = virtio_mem_mb_unplug_any_sb_offline(vm, mb_id,
- &nb_sb);
+ virtio_mem_sbm_for_each_mb_rev(vm, mb_id, VIRTIO_MEM_SBM_MB_OFFLINE) {
+ rc = virtio_mem_sbm_unplug_any_sb_offline(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
goto out_unlock;
cond_resched();
@@ -1443,10 +1979,9 @@ static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
}
/* Try to unplug subblocks of partially plugged online blocks. */
- virtio_mem_for_each_mb_state_rev(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL) {
- rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
- &nb_sb);
+ virtio_mem_sbm_for_each_mb_rev(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_ONLINE_PARTIAL) {
+ rc = virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
goto out_unlock;
mutex_unlock(&vm->hotplug_mutex);
@@ -1455,10 +1990,8 @@ static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
}
/* Try to unplug subblocks of plugged online blocks. */
- virtio_mem_for_each_mb_state_rev(vm, mb_id,
- VIRTIO_MEM_MB_STATE_ONLINE) {
- rc = virtio_mem_mb_unplug_any_sb_online(vm, mb_id,
- &nb_sb);
+ virtio_mem_sbm_for_each_mb_rev(vm, mb_id, VIRTIO_MEM_SBM_MB_ONLINE) {
+ rc = virtio_mem_sbm_unplug_any_sb_online(vm, mb_id, &nb_sb);
if (rc || !nb_sb)
goto out_unlock;
mutex_unlock(&vm->hotplug_mutex);
@@ -1474,19 +2007,211 @@ out_unlock:
}
/*
+ * Try to offline and remove a big block from Linux and unplug it. Will fail
+ * with -EBUSY if some memory is busy and cannot get unplugged.
+ *
+ * Will modify the state of the memory block. Might temporarily drop the
+ * hotplug_mutex.
+ */
+static int virtio_mem_bbm_offline_remove_and_unplug_bb(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
+ const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
+ unsigned long end_pfn = start_pfn + nr_pages;
+ unsigned long pfn;
+ struct page *page;
+ int rc;
+
+ if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
+ VIRTIO_MEM_BBM_BB_ADDED))
+ return -EINVAL;
+
+ if (bbm_safe_unplug) {
+ /*
+ * Start by fake-offlining all memory. Once we marked the device
+ * block as fake-offline, all newly onlined memory will
+ * automatically be kept fake-offline. Protect from concurrent
+ * onlining/offlining until we have a consistent state.
+ */
+ mutex_lock(&vm->hotplug_mutex);
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_FAKE_OFFLINE);
+
+ for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ page = pfn_to_online_page(pfn);
+ if (!page)
+ continue;
+
+ rc = virtio_mem_fake_offline(pfn, PAGES_PER_SECTION);
+ if (rc) {
+ end_pfn = pfn;
+ goto rollback_safe_unplug;
+ }
+ }
+ mutex_unlock(&vm->hotplug_mutex);
+ }
+
+ rc = virtio_mem_bbm_offline_and_remove_bb(vm, bb_id);
+ if (rc) {
+ if (bbm_safe_unplug) {
+ mutex_lock(&vm->hotplug_mutex);
+ goto rollback_safe_unplug;
+ }
+ return rc;
+ }
+
+ rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
+ if (rc)
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_PLUGGED);
+ else
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_UNUSED);
+ return rc;
+
+rollback_safe_unplug:
+ for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
+ page = pfn_to_online_page(pfn);
+ if (!page)
+ continue;
+ virtio_mem_fake_online(pfn, PAGES_PER_SECTION);
+ }
+ virtio_mem_bbm_set_bb_state(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED);
+ mutex_unlock(&vm->hotplug_mutex);
+ return rc;
+}
+
+/*
+ * Try to remove a big block from Linux and unplug it. Will fail with
+ * -EBUSY if some memory is online.
+ *
+ * Will modify the state of the memory block.
+ */
+static int virtio_mem_bbm_remove_and_unplug_bb(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ int rc;
+
+ if (WARN_ON_ONCE(virtio_mem_bbm_get_bb_state(vm, bb_id) !=
+ VIRTIO_MEM_BBM_BB_ADDED))
+ return -EINVAL;
+
+ rc = virtio_mem_bbm_remove_bb(vm, bb_id);
+ if (rc)
+ return -EBUSY;
+
+ rc = virtio_mem_bbm_unplug_bb(vm, bb_id);
+ if (rc)
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_PLUGGED);
+ else
+ virtio_mem_bbm_set_bb_state(vm, bb_id,
+ VIRTIO_MEM_BBM_BB_UNUSED);
+ return rc;
+}
+
+/*
+ * Test if a big block is completely offline.
+ */
+static bool virtio_mem_bbm_bb_is_offline(struct virtio_mem *vm,
+ unsigned long bb_id)
+{
+ const unsigned long start_pfn = PFN_DOWN(virtio_mem_bb_id_to_phys(vm, bb_id));
+ const unsigned long nr_pages = PFN_DOWN(vm->bbm.bb_size);
+ unsigned long pfn;
+
+ for (pfn = start_pfn; pfn < start_pfn + nr_pages;
+ pfn += PAGES_PER_SECTION) {
+ if (pfn_to_online_page(pfn))
+ return false;
+ }
+
+ return true;
+}
+
+static int virtio_mem_bbm_unplug_request(struct virtio_mem *vm, uint64_t diff)
+{
+ uint64_t nb_bb = diff / vm->bbm.bb_size;
+ uint64_t bb_id;
+ int rc;
+
+ if (!nb_bb)
+ return 0;
+
+ /* Try to unplug completely offline big blocks first. */
+ virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
+ cond_resched();
+ /*
+ * As we're holding no locks, this check is racy as memory
+ * can get onlined in the meantime - but we'll fail gracefully.
+ */
+ if (!virtio_mem_bbm_bb_is_offline(vm, bb_id))
+ continue;
+ rc = virtio_mem_bbm_remove_and_unplug_bb(vm, bb_id);
+ if (rc == -EBUSY)
+ continue;
+ if (!rc)
+ nb_bb--;
+ if (rc || !nb_bb)
+ return rc;
+ }
+
+ if (!unplug_online)
+ return 0;
+
+ /* Try to unplug any big blocks. */
+ virtio_mem_bbm_for_each_bb_rev(vm, bb_id, VIRTIO_MEM_BBM_BB_ADDED) {
+ cond_resched();
+ rc = virtio_mem_bbm_offline_remove_and_unplug_bb(vm, bb_id);
+ if (rc == -EBUSY)
+ continue;
+ if (!rc)
+ nb_bb--;
+ if (rc || !nb_bb)
+ return rc;
+ }
+
+ return nb_bb ? -EBUSY : 0;
+}
+
+/*
+ * Try to unplug the requested amount of memory.
+ */
+static int virtio_mem_unplug_request(struct virtio_mem *vm, uint64_t diff)
+{
+ if (vm->in_sbm)
+ return virtio_mem_sbm_unplug_request(vm, diff);
+ return virtio_mem_bbm_unplug_request(vm, diff);
+}
+
+/*
* Try to unplug all blocks that couldn't be unplugged before, for example,
* because the hypervisor was busy.
*/
static int virtio_mem_unplug_pending_mb(struct virtio_mem *vm)
{
- unsigned long mb_id;
+ unsigned long id;
int rc;
- virtio_mem_for_each_mb_state(vm, mb_id, VIRTIO_MEM_MB_STATE_PLUGGED) {
- rc = virtio_mem_mb_unplug(vm, mb_id);
+ if (!vm->in_sbm) {
+ virtio_mem_bbm_for_each_bb(vm, id,
+ VIRTIO_MEM_BBM_BB_PLUGGED) {
+ rc = virtio_mem_bbm_unplug_bb(vm, id);
+ if (rc)
+ return rc;
+ virtio_mem_bbm_set_bb_state(vm, id,
+ VIRTIO_MEM_BBM_BB_UNUSED);
+ }
+ return 0;
+ }
+
+ virtio_mem_sbm_for_each_mb(vm, id, VIRTIO_MEM_SBM_MB_PLUGGED) {
+ rc = virtio_mem_sbm_unplug_mb(vm, id);
if (rc)
return rc;
- virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
+ virtio_mem_sbm_set_mb_state(vm, id,
+ VIRTIO_MEM_SBM_MB_UNUSED);
}
return 0;
@@ -1511,7 +2236,13 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm)
usable_region_size, &usable_region_size);
end_addr = vm->addr + usable_region_size;
end_addr = min(end_addr, phys_limit);
- vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1;
+
+ if (vm->in_sbm)
+ vm->sbm.last_usable_mb_id =
+ virtio_mem_phys_to_mb_id(end_addr) - 1;
+ else
+ vm->bbm.last_usable_bb_id =
+ virtio_mem_phys_to_bb_id(vm, end_addr) - 1;
/* see if there is a request to change the size */
virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size,
@@ -1535,6 +2266,7 @@ static void virtio_mem_run_wq(struct work_struct *work)
if (vm->broken)
return;
+ atomic_set(&vm->wq_active, 1);
retry:
rc = 0;
@@ -1595,6 +2327,8 @@ retry:
"unknown error, marking device broken: %d\n", rc);
vm->broken = true;
}
+
+ atomic_set(&vm->wq_active, 0);
}
static enum hrtimer_restart virtio_mem_timer_expired(struct hrtimer *timer)
@@ -1631,6 +2365,7 @@ static int virtio_mem_init_vq(struct virtio_mem *vm)
static int virtio_mem_init(struct virtio_mem *vm)
{
const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
+ uint64_t sb_size, addr;
uint16_t node_id;
if (!vm->vdev->config->get) {
@@ -1659,15 +2394,9 @@ static int virtio_mem_init(struct virtio_mem *vm)
virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size,
&vm->region_size);
- /*
- * We always hotplug memory in memory block granularity. This way,
- * we have to wait for exactly one memory block to online.
- */
- if (vm->device_block_size > memory_block_size_bytes()) {
- dev_err(&vm->vdev->dev,
- "The block size is not supported (too big).\n");
- return -EINVAL;
- }
+ /* Determine the nid for the device based on the lowest address. */
+ if (vm->nid == NUMA_NO_NODE)
+ vm->nid = memory_add_physaddr_to_nid(vm->addr);
/* bad device setup - warn only */
if (!IS_ALIGNED(vm->addr, memory_block_size_bytes()))
@@ -1681,23 +2410,57 @@ static int virtio_mem_init(struct virtio_mem *vm)
"Some memory is not addressable. This can make some memory unusable.\n");
/*
- * Calculate the subblock size:
- * - At least MAX_ORDER - 1 / pageblock_order.
- * - At least the device block size.
- * In the worst case, a single subblock per memory block.
+ * We want subblocks to span at least MAX_ORDER_NR_PAGES and
+ * pageblock_nr_pages pages. This:
+ * - Simplifies our page onlining code (virtio_mem_online_page_cb)
+ * and fake page onlining code (virtio_mem_fake_online).
+ * - Is required for now for alloc_contig_range() to work reliably -
+ * it doesn't properly handle smaller granularity on ZONE_NORMAL.
*/
- vm->subblock_size = PAGE_SIZE * 1ul << max_t(uint32_t, MAX_ORDER - 1,
- pageblock_order);
- vm->subblock_size = max_t(uint64_t, vm->device_block_size,
- vm->subblock_size);
- vm->nb_sb_per_mb = memory_block_size_bytes() / vm->subblock_size;
-
- /* Round up to the next full memory block */
- vm->first_mb_id = virtio_mem_phys_to_mb_id(vm->addr - 1 +
- memory_block_size_bytes());
- vm->next_mb_id = vm->first_mb_id;
- vm->last_mb_id = virtio_mem_phys_to_mb_id(vm->addr +
- vm->region_size) - 1;
+ sb_size = max_t(uint64_t, MAX_ORDER_NR_PAGES,
+ pageblock_nr_pages) * PAGE_SIZE;
+ sb_size = max_t(uint64_t, vm->device_block_size, sb_size);
+
+ if (sb_size < memory_block_size_bytes() && !force_bbm) {
+ /* SBM: At least two subblocks per Linux memory block. */
+ vm->in_sbm = true;
+ vm->sbm.sb_size = sb_size;
+ vm->sbm.sbs_per_mb = memory_block_size_bytes() /
+ vm->sbm.sb_size;
+
+ /* Round up to the next full memory block */
+ addr = vm->addr + memory_block_size_bytes() - 1;
+ vm->sbm.first_mb_id = virtio_mem_phys_to_mb_id(addr);
+ vm->sbm.next_mb_id = vm->sbm.first_mb_id;
+ } else {
+ /* BBM: At least one Linux memory block. */
+ vm->bbm.bb_size = max_t(uint64_t, vm->device_block_size,
+ memory_block_size_bytes());
+
+ if (bbm_block_size) {
+ if (!is_power_of_2(bbm_block_size)) {
+ dev_warn(&vm->vdev->dev,
+ "bbm_block_size is not a power of 2");
+ } else if (bbm_block_size < vm->bbm.bb_size) {
+ dev_warn(&vm->vdev->dev,
+ "bbm_block_size is too small");
+ } else {
+ vm->bbm.bb_size = bbm_block_size;
+ }
+ }
+
+ /* Round up to the next aligned big block */
+ addr = vm->addr + vm->bbm.bb_size - 1;
+ vm->bbm.first_bb_id = virtio_mem_phys_to_bb_id(vm, addr);
+ vm->bbm.next_bb_id = vm->bbm.first_bb_id;
+ }
+
+ /* Prepare the offline threshold - make sure we can add two blocks. */
+ vm->offline_threshold = max_t(uint64_t, 2 * memory_block_size_bytes(),
+ VIRTIO_MEM_DEFAULT_OFFLINE_THRESHOLD);
+ /* In BBM, we also want at least two big blocks. */
+ vm->offline_threshold = max_t(uint64_t, 2 * vm->bbm.bb_size,
+ vm->offline_threshold);
dev_info(&vm->vdev->dev, "start address: 0x%llx", vm->addr);
dev_info(&vm->vdev->dev, "region size: 0x%llx", vm->region_size);
@@ -1705,9 +2468,13 @@ static int virtio_mem_init(struct virtio_mem *vm)
(unsigned long long)vm->device_block_size);
dev_info(&vm->vdev->dev, "memory block size: 0x%lx",
memory_block_size_bytes());
- dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
- (unsigned long long)vm->subblock_size);
- if (vm->nid != NUMA_NO_NODE)
+ if (vm->in_sbm)
+ dev_info(&vm->vdev->dev, "subblock size: 0x%llx",
+ (unsigned long long)vm->sbm.sb_size);
+ else
+ dev_info(&vm->vdev->dev, "big block size: 0x%llx",
+ (unsigned long long)vm->bbm.bb_size);
+ if (vm->nid != NUMA_NO_NODE && IS_ENABLED(CONFIG_NUMA))
dev_info(&vm->vdev->dev, "nid: %d", vm->nid);
return 0;
@@ -1753,6 +2520,20 @@ static void virtio_mem_delete_resource(struct virtio_mem *vm)
vm->parent_resource = NULL;
}
+static int virtio_mem_range_has_system_ram(struct resource *res, void *arg)
+{
+ return 1;
+}
+
+static bool virtio_mem_has_memory_added(struct virtio_mem *vm)
+{
+ const unsigned long flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+
+ return walk_iomem_res_desc(IORES_DESC_NONE, flags, vm->addr,
+ vm->addr + vm->region_size, NULL,
+ virtio_mem_range_has_system_ram) == 1;
+}
+
static int virtio_mem_probe(struct virtio_device *vdev)
{
struct virtio_mem *vm;
@@ -1849,21 +2630,24 @@ static void virtio_mem_remove(struct virtio_device *vdev)
cancel_work_sync(&vm->wq);
hrtimer_cancel(&vm->retry_timer);
- /*
- * After we unregistered our callbacks, user space can online partially
- * plugged offline blocks. Make sure to remove them.
- */
- virtio_mem_for_each_mb_state(vm, mb_id,
- VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL) {
- rc = virtio_mem_mb_remove(vm, mb_id);
- BUG_ON(rc);
- virtio_mem_mb_set_state(vm, mb_id, VIRTIO_MEM_MB_STATE_UNUSED);
+ if (vm->in_sbm) {
+ /*
+ * After we unregistered our callbacks, user space can online
+ * partially plugged offline blocks. Make sure to remove them.
+ */
+ virtio_mem_sbm_for_each_mb(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_OFFLINE_PARTIAL) {
+ rc = virtio_mem_sbm_remove_mb(vm, mb_id);
+ BUG_ON(rc);
+ virtio_mem_sbm_set_mb_state(vm, mb_id,
+ VIRTIO_MEM_SBM_MB_UNUSED);
+ }
+ /*
+ * After we unregistered our callbacks, user space can no longer
+ * offline partially plugged online memory blocks. No need to
+ * worry about them.
+ */
}
- /*
- * After we unregistered our callbacks, user space can no longer
- * offline partially plugged online memory blocks. No need to worry
- * about them.
- */
/* unregister callbacks */
unregister_virtio_mem_device(vm);
@@ -1874,10 +2658,7 @@ static void virtio_mem_remove(struct virtio_device *vdev)
* the system. And there is no way to stop the driver/device from going
* away. Warn at least.
*/
- if (vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE] ||
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_OFFLINE_PARTIAL] ||
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE] ||
- vm->nb_mb_state[VIRTIO_MEM_MB_STATE_ONLINE_PARTIAL]) {
+ if (virtio_mem_has_memory_added(vm)) {
dev_warn(&vdev->dev, "device still has system memory added\n");
} else {
virtio_mem_delete_resource(vm);
@@ -1885,8 +2666,12 @@ static void virtio_mem_remove(struct virtio_device *vdev)
}
/* remove all tracking data - no locking needed */
- vfree(vm->mb_state);
- vfree(vm->sb_bitmap);
+ if (vm->in_sbm) {
+ vfree(vm->sbm.mb_states);
+ vfree(vm->sbm.sb_states);
+ } else {
+ vfree(vm->bbm.bb_states);
+ }
/* reset the device and cleanup the queues */
vdev->config->reset(vdev);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index becc77697960..71e16b53e9c1 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -1608,7 +1608,6 @@ static struct virtqueue *vring_create_virtqueue_packed(
vq->num_added = 0;
vq->packed_ring = true;
vq->use_dma_api = vring_use_dma_api(vdev);
- list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
vq->last_add_time_valid = false;
@@ -1669,6 +1668,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
cpu_to_le16(vq->packed.event_flags_shadow);
}
+ list_add_tail(&vq->vq.list, &vdev->vqs);
return &vq->vq;
err_desc_extra:
@@ -1676,9 +1676,9 @@ err_desc_extra:
err_desc_state:
kfree(vq);
err_vq:
- vring_free_queue(vdev, event_size_in_bytes, device, ring_dma_addr);
+ vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
err_device:
- vring_free_queue(vdev, event_size_in_bytes, driver, ring_dma_addr);
+ vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
err_driver:
vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
err_ring:
@@ -2085,7 +2085,6 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
vq->last_used_idx = 0;
vq->num_added = 0;
vq->use_dma_api = vring_use_dma_api(vdev);
- list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
vq->last_add_time_valid = false;
@@ -2127,6 +2126,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
memset(vq->split.desc_state, 0, vring.num *
sizeof(struct vring_desc_state_split));
+ list_add_tail(&vq->vq.list, &vdev->vqs);
return &vq->vq;
}
EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index f22e37337030..7ff941e71b79 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -386,6 +386,7 @@ config ARM_SBSA_WATCHDOG
config ARMADA_37XX_WATCHDOG
tristate "Armada 37xx watchdog"
depends on ARCH_MVEBU || COMPILE_TEST
+ depends on HAS_IOMEM
select MFD_SYSCON
select WATCHDOG_CORE
help
@@ -631,7 +632,7 @@ config SUNXI_WATCHDOG
config COH901327_WATCHDOG
bool "ST-Ericsson COH 901 327 watchdog"
- depends on ARCH_U300 || (ARM && COMPILE_TEST)
+ depends on ARCH_U300 || (ARM && COMMON_CLK && COMPILE_TEST)
default y if MACH_U300
select WATCHDOG_CORE
help
@@ -789,6 +790,7 @@ config MOXART_WDT
config SIRFSOC_WATCHDOG
tristate "SiRFSOC watchdog"
+ depends on HAS_IOMEM
depends on ARCH_SIRF || COMPILE_TEST
select WATCHDOG_CORE
default y
@@ -1696,16 +1698,6 @@ config WDT_MTX1
Hardware driver for the MTX-1 boards. This is a watchdog timer that
will reboot the machine after a 100 seconds timer expired.
-config PNX833X_WDT
- tristate "PNX833x Hardware Watchdog"
- depends on SOC_PNX8335
- depends on BROKEN
- help
- Hardware driver for the PNX833x's watchdog. This is a
- watchdog timer that will reboot the machine after a programmable
- timer has expired and no process has written to /dev/watchdog during
- that time.
-
config SIBYTE_WDOG
tristate "Sibyte SoC hardware watchdog"
depends on CPU_SB1 || (MIPS && COMPILE_TEST)
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 071a2e50be98..5c74ee19d441 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -161,7 +161,6 @@ obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
obj-$(CONFIG_INDYDOG) += indydog.o
obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o
obj-$(CONFIG_WDT_MTX1) += mtx-1_wdt.o
-obj-$(CONFIG_PNX833X_WDT) += pnx833x_wdt.o
obj-$(CONFIG_SIBYTE_WDOG) += sb_wdog.o
obj-$(CONFIG_AR7_WDT) += ar7_wdt.o
obj-$(CONFIG_TXX9_WDT) += txx9wdt.o
diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c
index 83418924e30a..0b699c783d57 100644
--- a/drivers/watchdog/geodewdt.c
+++ b/drivers/watchdog/geodewdt.c
@@ -150,8 +150,6 @@ static long geodewdt_ioctl(struct file *file, unsigned int cmd,
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &ident,
sizeof(ident)) ? -EFAULT : 0;
- break;
-
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
return put_user(0, p);
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 7d34bcf1c45b..cbd1498ff015 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -21,8 +21,9 @@
#include <linux/types.h>
#include <linux/watchdog.h>
#include <asm/nmi.h>
+#include <linux/crash_dump.h>
-#define HPWDT_VERSION "2.0.3"
+#define HPWDT_VERSION "2.0.4"
#define SECS_TO_TICKS(secs) ((secs) * 1000 / 128)
#define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000)
#define HPWDT_MAX_TICKS 65535
@@ -334,6 +335,11 @@ static int hpwdt_init_one(struct pci_dev *dev,
watchdog_set_nowayout(&hpwdt_dev, nowayout);
watchdog_init_timeout(&hpwdt_dev, soft_margin, NULL);
+ if (is_kdump_kernel()) {
+ pretimeout = 0;
+ kdumptimeout = 0;
+ }
+
if (pretimeout && hpwdt_dev.timeout <= PRETIMEOUT_SEC) {
dev_warn(&dev->dev, "timeout <= pretimeout. Setting pretimeout to zero\n");
pretimeout = 0;
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index a370a185a41c..bf31d7b67a69 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -40,8 +40,6 @@
* Includes, defines, variables, module parameters, ...
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
/* Module and version information */
#define DRV_NAME "iTCO_wdt"
#define DRV_VERSION "1.11"
@@ -279,7 +277,7 @@ static int iTCO_wdt_start(struct watchdog_device *wd_dev)
/* disable chipset's NO_REBOOT bit */
if (p->update_no_reboot_bit(p->no_reboot_priv, false)) {
spin_unlock(&p->io_lock);
- pr_err("failed to reset NO_REBOOT flag, reboot disabled by hardware/BIOS\n");
+ dev_err(wd_dev->parent, "failed to reset NO_REBOOT flag, reboot disabled by hardware/BIOS\n");
return -EIO;
}
@@ -510,7 +508,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
/* Check chipset's NO_REBOOT bit */
if (p->update_no_reboot_bit(p->no_reboot_priv, false) &&
iTCO_vendor_check_noreboot_on()) {
- pr_info("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
+ dev_info(dev, "unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
return -ENODEV; /* Cannot reset NO_REBOOT bit */
}
@@ -530,12 +528,12 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
if (!devm_request_region(dev, p->tco_res->start,
resource_size(p->tco_res),
pdev->name)) {
- pr_err("I/O address 0x%04llx already in use, device disabled\n",
+ dev_err(dev, "I/O address 0x%04llx already in use, device disabled\n",
(u64)TCOBASE(p));
return -EBUSY;
}
- pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
+ dev_info(dev, "Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
pdata->name, pdata->version, (u64)TCOBASE(p));
/* Clear out the (probably old) status */
@@ -558,7 +556,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
break;
}
- p->wddev.info = &ident,
+ p->wddev.info = &ident,
p->wddev.ops = &iTCO_wdt_ops,
p->wddev.bootstatus = 0;
p->wddev.timeout = WATCHDOG_TIMEOUT;
@@ -575,7 +573,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
if not reset to the default */
if (iTCO_wdt_set_timeout(&p->wddev, heartbeat)) {
iTCO_wdt_set_timeout(&p->wddev, WATCHDOG_TIMEOUT);
- pr_info("timeout value out of range, using %d\n",
+ dev_info(dev, "timeout value out of range, using %d\n",
WATCHDOG_TIMEOUT);
}
@@ -583,11 +581,11 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
watchdog_stop_on_unregister(&p->wddev);
ret = devm_watchdog_register_device(dev, &p->wddev);
if (ret != 0) {
- pr_err("cannot register watchdog device (err=%d)\n", ret);
+ dev_err(dev, "cannot register watchdog device (err=%d)\n", ret);
return ret;
}
- pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
+ dev_info(dev, "initialized. heartbeat=%d sec (nowayout=%d)\n",
heartbeat, nowayout);
return 0;
@@ -651,21 +649,7 @@ static struct platform_driver iTCO_wdt_driver = {
},
};
-static int __init iTCO_wdt_init_module(void)
-{
- pr_info("Intel TCO WatchDog Timer Driver v%s\n", DRV_VERSION);
-
- return platform_driver_register(&iTCO_wdt_driver);
-}
-
-static void __exit iTCO_wdt_cleanup_module(void)
-{
- platform_driver_unregister(&iTCO_wdt_driver);
- pr_info("Watchdog Module Unloaded\n");
-}
-
-module_init(iTCO_wdt_init_module);
-module_exit(iTCO_wdt_cleanup_module);
+module_platform_driver(iTCO_wdt_driver);
MODULE_AUTHOR("Wim Van Sebroeck <wim@iguana.be>");
MODULE_DESCRIPTION("Intel TCO WatchDog Timer Driver");
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index 3fc457bc16db..2f7ded32e878 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -175,8 +175,8 @@ static int mpc8xxx_wdt_probe(struct platform_device *ofdev)
spin_lock_init(&ddata->lock);
- ddata->wdd.info = &mpc8xxx_wdt_info,
- ddata->wdd.ops = &mpc8xxx_wdt_ops,
+ ddata->wdd.info = &mpc8xxx_wdt_info;
+ ddata->wdd.ops = &mpc8xxx_wdt_ops;
ddata->wdd.timeout = WATCHDOG_TIMEOUT;
watchdog_init_timeout(&ddata->wdd, timeout, dev);
diff --git a/drivers/watchdog/pnx833x_wdt.c b/drivers/watchdog/pnx833x_wdt.c
deleted file mode 100644
index 4097d076aab8..000000000000
--- a/drivers/watchdog/pnx833x_wdt.c
+++ /dev/null
@@ -1,277 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * PNX833x Hardware Watchdog Driver
- * Copyright 2008 NXP Semiconductors
- * Daniel Laird <daniel.j.laird@nxp.com>
- * Andre McCurdy <andre.mccurdy@nxp.com>
- *
- * Heavily based upon - IndyDog 0.3
- * A Hardware Watchdog Device for SGI IP22
- *
- * (c) Copyright 2002 Guido Guenther <agx@sigxcpu.org>, All Rights Reserved.
- *
- * based on softdog.c by Alan Cox <alan@redhat.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/mm.h>
-#include <linux/miscdevice.h>
-#include <linux/watchdog.h>
-#include <linux/notifier.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <asm/mach-pnx833x/pnx833x.h>
-
-#define WATCHDOG_TIMEOUT 30 /* 30 sec Maximum timeout */
-#define WATCHDOG_COUNT_FREQUENCY 68000000U /* Watchdog counts at 68MHZ. */
-#define PNX_WATCHDOG_TIMEOUT (WATCHDOG_TIMEOUT * WATCHDOG_COUNT_FREQUENCY)
-#define PNX_TIMEOUT_VALUE 2040000000U
-
-/** CONFIG block */
-#define PNX833X_CONFIG (0x07000U)
-#define PNX833X_CONFIG_CPU_WATCHDOG (0x54)
-#define PNX833X_CONFIG_CPU_WATCHDOG_COMPARE (0x58)
-#define PNX833X_CONFIG_CPU_COUNTERS_CONTROL (0x1c)
-
-/** RESET block */
-#define PNX833X_RESET (0x08000U)
-#define PNX833X_RESET_CONFIG (0x08)
-
-static int pnx833x_wdt_alive;
-
-/* Set default timeout in MHZ.*/
-static int pnx833x_wdt_timeout = PNX_WATCHDOG_TIMEOUT;
-module_param(pnx833x_wdt_timeout, int, 0);
-MODULE_PARM_DESC(timeout, "Watchdog timeout in Mhz. (68Mhz clock), default="
- __MODULE_STRING(PNX_TIMEOUT_VALUE) "(30 seconds).");
-
-static bool nowayout = WATCHDOG_NOWAYOUT;
-module_param(nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
- __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-
-#define START_DEFAULT 1
-static int start_enabled = START_DEFAULT;
-module_param(start_enabled, int, 0);
-MODULE_PARM_DESC(start_enabled, "Watchdog is started on module insertion "
- "(default=" __MODULE_STRING(START_DEFAULT) ")");
-
-static void pnx833x_wdt_start(void)
-{
- /* Enable watchdog causing reset. */
- PNX833X_REG(PNX833X_RESET + PNX833X_RESET_CONFIG) |= 0x1;
- /* Set timeout.*/
- PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = pnx833x_wdt_timeout;
- /* Enable watchdog. */
- PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_COUNTERS_CONTROL) |= 0x1;
-
- pr_info("Started watchdog timer\n");
-}
-
-static void pnx833x_wdt_stop(void)
-{
- /* Disable watchdog causing reset. */
- PNX833X_REG(PNX833X_RESET + PNX833X_CONFIG) &= 0xFFFFFFFE;
- /* Disable watchdog.*/
- PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_COUNTERS_CONTROL) &= 0xFFFFFFFE;
-
- pr_info("Stopped watchdog timer\n");
-}
-
-static void pnx833x_wdt_ping(void)
-{
- PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = pnx833x_wdt_timeout;
-}
-
-/*
- * Allow only one person to hold it open
- */
-static int pnx833x_wdt_open(struct inode *inode, struct file *file)
-{
- if (test_and_set_bit(0, &pnx833x_wdt_alive))
- return -EBUSY;
-
- if (nowayout)
- __module_get(THIS_MODULE);
-
- /* Activate timer */
- if (!start_enabled)
- pnx833x_wdt_start();
-
- pnx833x_wdt_ping();
-
- pr_info("Started watchdog timer\n");
-
- return stream_open(inode, file);
-}
-
-static int pnx833x_wdt_release(struct inode *inode, struct file *file)
-{
- /* Shut off the timer.
- * Lock it in if it's a module and we defined ...NOWAYOUT */
- if (!nowayout)
- pnx833x_wdt_stop(); /* Turn the WDT off */
-
- clear_bit(0, &pnx833x_wdt_alive);
- return 0;
-}
-
-static ssize_t pnx833x_wdt_write(struct file *file, const char *data, size_t len, loff_t *ppos)
-{
- /* Refresh the timer. */
- if (len)
- pnx833x_wdt_ping();
-
- return len;
-}
-
-static long pnx833x_wdt_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int options, new_timeout = 0;
- uint32_t timeout, timeout_left = 0;
-
- static const struct watchdog_info ident = {
- .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT,
- .firmware_version = 0,
- .identity = "Hardware Watchdog for PNX833x",
- };
-
- switch (cmd) {
- default:
- return -ENOTTY;
-
- case WDIOC_GETSUPPORT:
- if (copy_to_user((struct watchdog_info *)arg,
- &ident, sizeof(ident)))
- return -EFAULT;
- return 0;
-
- case WDIOC_GETSTATUS:
- case WDIOC_GETBOOTSTATUS:
- return put_user(0, (int *)arg);
-
- case WDIOC_SETOPTIONS:
- if (get_user(options, (int *)arg))
- return -EFAULT;
-
- if (options & WDIOS_DISABLECARD)
- pnx833x_wdt_stop();
-
- if (options & WDIOS_ENABLECARD)
- pnx833x_wdt_start();
-
- return 0;
-
- case WDIOC_KEEPALIVE:
- pnx833x_wdt_ping();
- return 0;
-
- case WDIOC_SETTIMEOUT:
- {
- if (get_user(new_timeout, (int *)arg))
- return -EFAULT;
-
- pnx833x_wdt_timeout = new_timeout;
- PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_WATCHDOG_COMPARE) = new_timeout;
- return put_user(new_timeout, (int *)arg);
- }
-
- case WDIOC_GETTIMEOUT:
- timeout = PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_WATCHDOG_COMPARE);
- return put_user(timeout, (int *)arg);
-
- case WDIOC_GETTIMELEFT:
- timeout_left = PNX833X_REG(PNX833X_CONFIG +
- PNX833X_CONFIG_CPU_WATCHDOG);
- return put_user(timeout_left, (int *)arg);
-
- }
-}
-
-static int pnx833x_wdt_notify_sys(struct notifier_block *this,
- unsigned long code, void *unused)
-{
- if (code == SYS_DOWN || code == SYS_HALT)
- pnx833x_wdt_stop(); /* Turn the WDT off */
-
- return NOTIFY_DONE;
-}
-
-static const struct file_operations pnx833x_wdt_fops = {
- .owner = THIS_MODULE,
- .llseek = no_llseek,
- .write = pnx833x_wdt_write,
- .unlocked_ioctl = pnx833x_wdt_ioctl,
- .compat_ioctl = compat_ptr_ioctl,
- .open = pnx833x_wdt_open,
- .release = pnx833x_wdt_release,
-};
-
-static struct miscdevice pnx833x_wdt_miscdev = {
- .minor = WATCHDOG_MINOR,
- .name = "watchdog",
- .fops = &pnx833x_wdt_fops,
-};
-
-static struct notifier_block pnx833x_wdt_notifier = {
- .notifier_call = pnx833x_wdt_notify_sys,
-};
-
-static int __init watchdog_init(void)
-{
- int ret, cause;
-
- /* Lets check the reason for the reset.*/
- cause = PNX833X_REG(PNX833X_RESET);
- /*If bit 31 is set then watchdog was cause of reset.*/
- if (cause & 0x80000000) {
- pr_info("The system was previously reset due to the watchdog firing - please investigate...\n");
- }
-
- ret = register_reboot_notifier(&pnx833x_wdt_notifier);
- if (ret) {
- pr_err("cannot register reboot notifier (err=%d)\n", ret);
- return ret;
- }
-
- ret = misc_register(&pnx833x_wdt_miscdev);
- if (ret) {
- pr_err("cannot register miscdev on minor=%d (err=%d)\n",
- WATCHDOG_MINOR, ret);
- unregister_reboot_notifier(&pnx833x_wdt_notifier);
- return ret;
- }
-
- pr_info("Hardware Watchdog Timer for PNX833x: Version 0.1\n");
-
- if (start_enabled)
- pnx833x_wdt_start();
-
- return 0;
-}
-
-static void __exit watchdog_exit(void)
-{
- misc_deregister(&pnx833x_wdt_miscdev);
- unregister_reboot_notifier(&pnx833x_wdt_notifier);
-}
-
-module_init(watchdog_init);
-module_exit(watchdog_exit);
-
-MODULE_AUTHOR("Daniel Laird/Andre McCurdy");
-MODULE_DESCRIPTION("Hardware Watchdog Device for PNX833x");
-MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
index ab7465d186fd..7cf0f2ec649b 100644
--- a/drivers/watchdog/qcom-wdt.c
+++ b/drivers/watchdog/qcom-wdt.c
@@ -148,10 +148,17 @@ static int qcom_wdt_restart(struct watchdog_device *wdd, unsigned long action,
*/
wmb();
- msleep(150);
+ mdelay(150);
return 0;
}
+static int qcom_wdt_is_running(struct watchdog_device *wdd)
+{
+ struct qcom_wdt *wdt = to_qcom_wdt(wdd);
+
+ return (readl(wdt_addr(wdt, WDT_EN)) & QCOM_WDT_ENABLE);
+}
+
static const struct watchdog_ops qcom_wdt_ops = {
.start = qcom_wdt_start,
.stop = qcom_wdt_stop,
@@ -294,6 +301,17 @@ static int qcom_wdt_probe(struct platform_device *pdev)
wdt->wdd.timeout = min(wdt->wdd.max_timeout, 30U);
watchdog_init_timeout(&wdt->wdd, 0, dev);
+ /*
+ * If WDT is already running, call WDT start which
+ * will stop the WDT, set timeouts as bootloader
+ * might use different ones and set running bit
+ * to inform the WDT subsystem to ping the WDT
+ */
+ if (qcom_wdt_is_running(&wdt->wdd)) {
+ qcom_wdt_start(&wdt->wdd);
+ set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
+ }
+
ret = devm_watchdog_register_device(dev, &wdt->wdd);
if (ret)
return ret;
diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c
index 836319cbaca9..359302f71f7e 100644
--- a/drivers/watchdog/rti_wdt.c
+++ b/drivers/watchdog/rti_wdt.c
@@ -227,8 +227,10 @@ static int rti_wdt_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
- if (ret)
+ if (ret) {
+ pm_runtime_put_noidle(dev);
return dev_err_probe(dev, ret, "runtime pm failed\n");
+ }
platform_set_drvdata(pdev, wdt);
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index 04483d6453d6..13db71e16583 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -78,7 +78,7 @@ static int fitpc2_wdt_open(struct inode *inode, struct file *file)
return stream_open(inode, file);
}
-static ssize_t fitpc2_wdt_write(struct file *file, const char *data,
+static ssize_t fitpc2_wdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
size_t i;
@@ -125,16 +125,16 @@ static long fitpc2_wdt_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case WDIOC_GETSUPPORT:
- ret = copy_to_user((struct watchdog_info *)arg, &ident,
+ ret = copy_to_user((struct watchdog_info __user *)arg, &ident,
sizeof(ident)) ? -EFAULT : 0;
break;
case WDIOC_GETSTATUS:
- ret = put_user(0, (int *)arg);
+ ret = put_user(0, (int __user *)arg);
break;
case WDIOC_GETBOOTSTATUS:
- ret = put_user(0, (int *)arg);
+ ret = put_user(0, (int __user *)arg);
break;
case WDIOC_KEEPALIVE:
@@ -143,7 +143,7 @@ static long fitpc2_wdt_ioctl(struct file *file, unsigned int cmd,
break;
case WDIOC_SETTIMEOUT:
- ret = get_user(time, (int *)arg);
+ ret = get_user(time, (int __user *)arg);
if (ret)
break;
@@ -157,7 +157,7 @@ static long fitpc2_wdt_ioctl(struct file *file, unsigned int cmd,
fallthrough;
case WDIOC_GETTIMEOUT:
- ret = put_user(margin, (int *)arg);
+ ret = put_user(margin, (int __user *)arg);
break;
}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index 190d26e2e75f..958dc32a708f 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -291,6 +291,7 @@ sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id)
set_bit(WDOG_HW_RUNNING, &wdt->wdd.status);
}
+ watchdog_stop_on_reboot(&wdt->wdd);
ret = watchdog_register_device(&wdt->wdd);
if (ret)
goto err;
diff --git a/drivers/watchdog/sprd_wdt.c b/drivers/watchdog/sprd_wdt.c
index 65cb55f3916f..4e689b6ff141 100644
--- a/drivers/watchdog/sprd_wdt.c
+++ b/drivers/watchdog/sprd_wdt.c
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include <linux/clk.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -53,7 +54,7 @@
#define SPRD_WDT_CNT_HIGH_SHIFT 16
#define SPRD_WDT_LOW_VALUE_MASK GENMASK(15, 0)
-#define SPRD_WDT_LOAD_TIMEOUT 1000
+#define SPRD_WDT_LOAD_TIMEOUT 11
struct sprd_wdt {
void __iomem *base;
@@ -108,6 +109,23 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout,
u32 tmr_step = timeout * SPRD_WDT_CNT_STEP;
u32 prtmr_step = pretimeout * SPRD_WDT_CNT_STEP;
+ /*
+ * Checking busy bit to make sure the previous loading operation is
+ * done. According to the specification, the busy bit would be set
+ * after a new loading operation and last 2 or 3 RTC clock
+ * cycles (about 60us~92us).
+ */
+ do {
+ val = readl_relaxed(wdt->base + SPRD_WDT_INT_RAW);
+ if (!(val & SPRD_WDT_LD_BUSY_BIT))
+ break;
+
+ usleep_range(10, 100);
+ } while (delay_cnt++ < SPRD_WDT_LOAD_TIMEOUT);
+
+ if (delay_cnt >= SPRD_WDT_LOAD_TIMEOUT)
+ return -EBUSY;
+
sprd_wdt_unlock(wdt->base);
writel_relaxed((tmr_step >> SPRD_WDT_CNT_HIGH_SHIFT) &
SPRD_WDT_LOW_VALUE_MASK, wdt->base + SPRD_WDT_LOAD_HIGH);
@@ -120,20 +138,6 @@ static int sprd_wdt_load_value(struct sprd_wdt *wdt, u32 timeout,
wdt->base + SPRD_WDT_IRQ_LOAD_LOW);
sprd_wdt_lock(wdt->base);
- /*
- * Waiting the load value operation done,
- * it needs two or three RTC clock cycles.
- */
- do {
- val = readl_relaxed(wdt->base + SPRD_WDT_INT_RAW);
- if (!(val & SPRD_WDT_LD_BUSY_BIT))
- break;
-
- cpu_relax();
- } while (delay_cnt++ < SPRD_WDT_LOAD_TIMEOUT);
-
- if (delay_cnt >= SPRD_WDT_LOAD_TIMEOUT)
- return -EBUSY;
return 0;
}
@@ -345,15 +349,10 @@ static int __maybe_unused sprd_wdt_pm_resume(struct device *dev)
if (ret)
return ret;
- if (watchdog_active(&wdt->wdd)) {
+ if (watchdog_active(&wdt->wdd))
ret = sprd_wdt_start(&wdt->wdd);
- if (ret) {
- sprd_wdt_disable(wdt);
- return ret;
- }
- }
- return 0;
+ return ret;
}
static const struct dev_pm_ops sprd_wdt_pm_ops = {
diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c
index 25188d6bbe15..a3436c296c97 100644
--- a/drivers/watchdog/stm32_iwdg.c
+++ b/drivers/watchdog/stm32_iwdg.c
@@ -162,18 +162,15 @@ static int stm32_iwdg_clk_init(struct platform_device *pdev,
u32 ret;
wdt->clk_lsi = devm_clk_get(dev, "lsi");
- if (IS_ERR(wdt->clk_lsi)) {
- dev_err(dev, "Unable to get lsi clock\n");
- return PTR_ERR(wdt->clk_lsi);
- }
+ if (IS_ERR(wdt->clk_lsi))
+ return dev_err_probe(dev, PTR_ERR(wdt->clk_lsi), "Unable to get lsi clock\n");
/* optional peripheral clock */
if (wdt->data->has_pclk) {
wdt->clk_pclk = devm_clk_get(dev, "pclk");
- if (IS_ERR(wdt->clk_pclk)) {
- dev_err(dev, "Unable to get pclk clock\n");
- return PTR_ERR(wdt->clk_pclk);
- }
+ if (IS_ERR(wdt->clk_pclk))
+ return dev_err_probe(dev, PTR_ERR(wdt->clk_pclk),
+ "Unable to get pclk clock\n");
ret = clk_prepare_enable(wdt->clk_pclk);
if (ret) {
diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c
index 423844757812..0e9a99559609 100644
--- a/drivers/watchdog/watchdog_core.c
+++ b/drivers/watchdog/watchdog_core.c
@@ -267,15 +267,19 @@ static int __watchdog_register_device(struct watchdog_device *wdd)
}
if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) {
- wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
-
- ret = register_reboot_notifier(&wdd->reboot_nb);
- if (ret) {
- pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
- wdd->id, ret);
- watchdog_dev_unregister(wdd);
- ida_simple_remove(&watchdog_ida, id);
- return ret;
+ if (!wdd->ops->stop)
+ pr_warn("watchdog%d: stop_on_reboot not supported\n", wdd->id);
+ else {
+ wdd->reboot_nb.notifier_call = watchdog_reboot_notifier;
+
+ ret = register_reboot_notifier(&wdd->reboot_nb);
+ if (ret) {
+ pr_err("watchdog%d: Cannot register reboot notifier (%d)\n",
+ wdd->id, ret);
+ watchdog_dev_unregister(wdd);
+ ida_simple_remove(&watchdog_ida, id);
+ return ret;
+ }
}
}
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index 3065dd670a18..cec7917790e5 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -34,9 +34,9 @@ struct wdat_instruction {
* @period: How long is one watchdog period in ms
* @stopped_in_sleep: Is this watchdog stopped by the firmware in S1-S5
* @stopped: Was the watchdog stopped by the driver in suspend
- * @actions: An array of instruction lists indexed by an action number from
- * the WDAT table. There can be %NULL entries for not implemented
- * actions.
+ * @instructions: An array of instruction lists indexed by an action number from
+ * the WDAT table. There can be %NULL entries for not implemented
+ * actions.
*/
struct wdat_wdt {
struct platform_device *pdev;
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index babdca808861..c3621b9f4012 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -21,7 +21,7 @@ obj-$(CONFIG_XEN_GNTDEV) += xen-gntdev.o
obj-$(CONFIG_XEN_GRANT_DEV_ALLOC) += xen-gntalloc.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
-obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
+obj-$(CONFIG_XEN_PVHVM_GUEST) += platform-pci.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_MCE_LOG) += mcelog.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 6038c4c35db5..a8030332a191 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -95,7 +95,8 @@ struct irq_info {
struct list_head list;
struct list_head eoi_list;
short refcnt;
- short spurious_cnt;
+ u8 spurious_cnt;
+ u8 is_accounted;
enum xen_irq_type type; /* type */
unsigned irq;
evtchn_port_t evtchn; /* event channel */
@@ -161,6 +162,9 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
/* IRQ <-> IPI mapping */
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
+/* Event channel distribution data */
+static atomic_t channels_on_cpu[NR_CPUS];
+
static int **evtchn_to_irq;
#ifdef CONFIG_X86
static unsigned long *pirq_eoi_map;
@@ -257,6 +261,32 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info)
irq_set_chip_data(irq, info);
}
+/* Per CPU channel accounting */
+static void channels_on_cpu_dec(struct irq_info *info)
+{
+ if (!info->is_accounted)
+ return;
+
+ info->is_accounted = 0;
+
+ if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
+ return;
+
+ WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0));
+}
+
+static void channels_on_cpu_inc(struct irq_info *info)
+{
+ if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
+ return;
+
+ if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1,
+ INT_MAX)))
+ return;
+
+ info->is_accounted = 1;
+}
+
/* Constructors for packed IRQ information. */
static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq,
@@ -339,6 +369,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
{
set_evtchn_to_irq(info->evtchn, -1);
info->evtchn = 0;
+ channels_on_cpu_dec(info);
}
/*
@@ -433,18 +464,25 @@ static bool pirq_needs_eoi_flag(unsigned irq)
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
}
-static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu)
+static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
+ bool force_affinity)
{
int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info = info_for_irq(irq);
BUG_ON(irq == -1);
-#ifdef CONFIG_SMP
- cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
-#endif
+
+ if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
+ cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
+ cpumask_copy(irq_get_effective_affinity_mask(irq),
+ cpumask_of(cpu));
+ }
+
xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
+ channels_on_cpu_dec(info);
info->cpu = cpu;
+ channels_on_cpu_inc(info);
}
/**
@@ -523,8 +561,10 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
return;
if (spurious) {
- if ((1 << info->spurious_cnt) < (HZ << 2))
- info->spurious_cnt++;
+ if ((1 << info->spurious_cnt) < (HZ << 2)) {
+ if (info->spurious_cnt != 0xFF)
+ info->spurious_cnt++;
+ }
if (info->spurious_cnt > 1) {
delay = 1 << (info->spurious_cnt - 2);
if (delay > HZ)
@@ -615,11 +655,6 @@ static void xen_irq_init(unsigned irq)
{
struct irq_info *info;
-#ifdef CONFIG_SMP
- /* By default all event channels notify CPU#0. */
- cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
-#endif
-
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL)
panic("Unable to allocate metadata for IRQ%d\n", irq);
@@ -628,6 +663,11 @@ static void xen_irq_init(unsigned irq)
info->refcnt = -1;
set_info_for_irq(irq, info);
+ /*
+ * Interrupt affinity setting can be immediate. No point
+ * in delaying it until an interrupt is handled.
+ */
+ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
INIT_LIST_HEAD(&info->eoi_list);
list_add_tail(&info->list, &xen_irq_list_head);
@@ -739,18 +779,7 @@ static void eoi_pirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn))
return;
- if (unlikely(irqd_is_setaffinity_pending(data)) &&
- likely(!irqd_irq_disabled(data))) {
- int masked = test_and_set_mask(evtchn);
-
- clear_evtchn(evtchn);
-
- irq_move_masked_irq(data);
-
- if (!masked)
- unmask_evtchn(evtchn);
- } else
- clear_evtchn(evtchn);
+ clear_evtchn(evtchn);
if (pirq_needs_eoi(data->irq)) {
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
@@ -794,7 +823,7 @@ static unsigned int __startup_pirq(unsigned int irq)
goto err;
info->evtchn = evtchn;
- bind_evtchn_to_cpu(evtchn, 0);
+ bind_evtchn_to_cpu(evtchn, 0, false);
rc = xen_evtchn_port_setup(evtchn);
if (rc)
@@ -1113,8 +1142,14 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
irq = ret;
goto out;
}
- /* New interdomain events are bound to VCPU 0. */
- bind_evtchn_to_cpu(evtchn, 0);
+ /*
+ * New interdomain events are initially bound to vCPU0 This
+ * is required to setup the event channel in the first
+ * place and also important for UP guests because the
+ * affinity setting is not invoked on them so nothing would
+ * bind the channel.
+ */
+ bind_evtchn_to_cpu(evtchn, 0, false);
} else {
struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
@@ -1132,12 +1167,6 @@ int bind_evtchn_to_irq(evtchn_port_t evtchn)
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
-int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
-{
- return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip);
-}
-EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
-
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
@@ -1168,7 +1197,11 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
irq = ret;
goto out;
}
- bind_evtchn_to_cpu(evtchn, cpu);
+ /*
+ * Force the affinity mask to the target CPU so proc shows
+ * the correct target.
+ */
+ bind_evtchn_to_cpu(evtchn, cpu, true);
} else {
struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_IPI);
@@ -1281,7 +1314,11 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
goto out;
}
- bind_evtchn_to_cpu(evtchn, cpu);
+ /*
+ * Force the affinity mask for percpu interrupts so proc
+ * shows the correct target.
+ */
+ bind_evtchn_to_cpu(evtchn, cpu, percpu);
} else {
struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_VIRQ);
@@ -1646,9 +1683,7 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
mutex_unlock(&irq_mapping_update_lock);
- bind_evtchn_to_cpu(evtchn, info->cpu);
- /* This will be deferred until interrupt is processed */
- irq_set_affinity(irq, cpumask_of(info->cpu));
+ bind_evtchn_to_cpu(evtchn, info->cpu, false);
/* Unmask the event channel. */
enable_irq(irq);
@@ -1682,7 +1717,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
* it, but don't do the xenlinux-level rebind in that case.
*/
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
- bind_evtchn_to_cpu(evtchn, tcpu);
+ bind_evtchn_to_cpu(evtchn, tcpu, false);
if (!masked)
unmask_evtchn(evtchn);
@@ -1690,27 +1725,47 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
return 0;
}
+/*
+ * Find the CPU within @dest mask which has the least number of channels
+ * assigned. This is not precise as the per cpu counts can be modified
+ * concurrently.
+ */
+static unsigned int select_target_cpu(const struct cpumask *dest)
+{
+ unsigned int cpu, best_cpu = UINT_MAX, minch = UINT_MAX;
+
+ for_each_cpu_and(cpu, dest, cpu_online_mask) {
+ unsigned int curch = atomic_read(&channels_on_cpu[cpu]);
+
+ if (curch < minch) {
+ minch = curch;
+ best_cpu = cpu;
+ }
+ }
+
+ /*
+ * Catch the unlikely case that dest contains no online CPUs. Can't
+ * recurse.
+ */
+ if (best_cpu == UINT_MAX)
+ return select_target_cpu(cpu_online_mask);
+
+ return best_cpu;
+}
+
static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
bool force)
{
- unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
- int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
+ unsigned int tcpu = select_target_cpu(dest);
+ int ret;
+ ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
if (!ret)
irq_data_update_effective_affinity(data, cpumask_of(tcpu));
return ret;
}
-/* To be called with desc->lock held. */
-int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
-{
- struct irq_data *d = irq_desc_get_irq_data(desc);
-
- return set_affinity_irq(d, cpumask_of(tcpu), false);
-}
-EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
-
static void enable_dynirq(struct irq_data *data)
{
evtchn_port_t evtchn = evtchn_from_irq(data->irq);
@@ -1734,18 +1789,7 @@ static void ack_dynirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn))
return;
- if (unlikely(irqd_is_setaffinity_pending(data)) &&
- likely(!irqd_irq_disabled(data))) {
- int masked = test_and_set_mask(evtchn);
-
- clear_evtchn(evtchn);
-
- irq_move_masked_irq(data);
-
- if (!masked)
- unmask_evtchn(evtchn);
- } else
- clear_evtchn(evtchn);
+ clear_evtchn(evtchn);
}
static void mask_ack_dynirq(struct irq_data *data)
@@ -1830,7 +1874,8 @@ static void restore_cpu_virqs(unsigned int cpu)
/* Record the new mapping. */
(void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
- bind_evtchn_to_cpu(evtchn, cpu);
+ /* The affinity mask is still valid */
+ bind_evtchn_to_cpu(evtchn, cpu, false);
}
}
@@ -1855,7 +1900,8 @@ static void restore_cpu_ipis(unsigned int cpu)
/* Record the new mapping. */
(void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
- bind_evtchn_to_cpu(evtchn, cpu);
+ /* The affinity mask is still valid */
+ bind_evtchn_to_cpu(evtchn, cpu, false);
}
}
@@ -1938,8 +1984,12 @@ void xen_irq_resume(void)
xen_evtchn_resume();
/* No IRQ <-> event-channel mappings. */
- list_for_each_entry(info, &xen_irq_list_head, list)
- info->evtchn = 0; /* zap event-channel binding */
+ list_for_each_entry(info, &xen_irq_list_head, list) {
+ /* Zap event-channel binding */
+ info->evtchn = 0;
+ /* Adjust accounting */
+ channels_on_cpu_dec(info);
+ }
clear_evtchn_to_irq_all();
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 5dc016d68f83..a7a85719a8c8 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -421,36 +421,6 @@ static void evtchn_unbind_from_user(struct per_user_data *u,
del_evtchn(u, evtchn);
}
-static DEFINE_PER_CPU(int, bind_last_selected_cpu);
-
-static void evtchn_bind_interdom_next_vcpu(evtchn_port_t evtchn)
-{
- unsigned int selected_cpu, irq;
- struct irq_desc *desc;
- unsigned long flags;
-
- irq = irq_from_evtchn(evtchn);
- desc = irq_to_desc(irq);
-
- if (!desc)
- return;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- selected_cpu = this_cpu_read(bind_last_selected_cpu);
- selected_cpu = cpumask_next_and(selected_cpu,
- desc->irq_common_data.affinity, cpu_online_mask);
-
- if (unlikely(selected_cpu >= nr_cpu_ids))
- selected_cpu = cpumask_first_and(desc->irq_common_data.affinity,
- cpu_online_mask);
-
- this_cpu_write(bind_last_selected_cpu, selected_cpu);
-
- /* unmask expects irqs to be disabled */
- xen_set_affinity_evtchn(desc, selected_cpu);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
-}
-
static long evtchn_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -508,10 +478,8 @@ static long evtchn_ioctl(struct file *file,
break;
rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
- if (rc == 0) {
+ if (rc == 0)
rc = bind_interdomain.local_port;
- evtchn_bind_interdom_next_vcpu(rc);
- }
break;
}
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index cd046684e0d1..374d36de7f5a 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -179,6 +179,7 @@ static int poweroff_nb(struct notifier_block *cb, unsigned long code, void *unus
case SYS_HALT:
case SYS_POWER_OFF:
shutting_down = SHUTDOWN_POWEROFF;
+ break;
default:
break;
}